Skip to main content
Glama
test_production_ai.pyโ€ข3.93 kB
#!/usr/bin/env python3 """ Test script for production AI configuration with Ollama. """ import sys import os import asyncio import json sys.path.append('src') from dp_mcp.ai.ai_tools import initialize_ai_tools from dp_mcp.tools.postgres_tools import execute_query async def test_production_ai(): """Test production AI setup with Ollama.""" print("๐Ÿงช Testing Production AI Configuration") print("="*50) # 1. Initialize AI tools with production environment print("1๏ธโƒฃ Initializing production AI tools...") ai_tools = initialize_ai_tools("production") if not ai_tools: print("โŒ AI tools failed to initialize") return print("โœ… Production AI tools initialized") # 2. Get AI status print("\n2๏ธโƒฃ AI System Status:") status = ai_tools.get_ai_status() print(f" Environment: {status.get('environment')}") print(f" Privacy Level: {status.get('privacy_level')}") print(f" Available Models: {status.get('available_models')}") print(f" Default Model: {status.get('default_model')}") # 3. Test model availability print(f"\n3๏ธโƒฃ Testing Model Availability:") available_models = ai_tools.get_available_models() print(f" Models ready for use: {available_models}") # 4. Test direct Ollama interaction print(f"\n4๏ธโƒฃ Testing Ollama Integration:") try: # Test if AI manager can communicate with Ollama model_manager = ai_tools.ai_manager if hasattr(model_manager, 'models') and model_manager.models: for model_name, model_config in model_manager.models.items(): print(f" โ€ข {model_name}: {model_config.provider} ({model_config.model_type})") if model_config.provider == "ollama": print(f" Base URL: {model_config.base_url}") print(f" Model: {model_config.model}") else: print(" No models configured") except Exception as e: print(f" Error: {e}") # 5. Test a simple AI query simulation print(f"\n5๏ธโƒฃ Testing AI Query Processing:") try: # Test data privacy sample_text = "User john.doe@example.com has SSN 123-45-6789" privacy_manager = getattr(ai_tools, 'privacy_manager', None) if privacy_manager: sanitized = privacy_manager.sanitize_text(sample_text) print(f" Original: {sample_text}") print(f" Sanitized: {sanitized}") else: print(" Privacy manager not available") except Exception as e: print(f" Privacy test error: {e}") # 6. Test database query print(f"\n6๏ธโƒฃ Testing Database Integration:") try: result = await execute_query("SELECT 'AI Test' as message, COUNT(*) as user_count FROM users", 1) print(f" Database query result: {result}") except Exception as e: print(f" Database error: {e}") print(f"\n๐ŸŽ‰ Production AI Test Summary:") print(f" โ€ข Server: โœ… Running at http://127.0.0.1:8888/mcp/") print(f" โ€ข AI Environment: production") print(f" โ€ข Models: {len(available_models)} available") print(f" โ€ข Privacy Protection: โœ… Active") print(f" โ€ข Database Integration: โœ… Ready") print(f"\n๐Ÿ“‹ Available AI MCP Tools:") tools_list = [ "ask_natural_language_query", "explain_query_with_ai", "get_ai_data_insights", "analyze_table_patterns", "generate_ai_data_report", "get_ai_system_status" ] for tool in tools_list: print(f" โ€ข {tool}") print(f"\n๐Ÿš€ Next Steps:") print(f" 1. Use MCP client to call AI tools") print(f" 2. Add API keys to .env.ai for cloud models") print(f" 3. Test natural language queries") if __name__ == "__main__": asyncio.run(test_production_ai())

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/devraj21/dp-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server