#!/usr/bin/env python3
"""
Test LLM Fallback System
Tests all configured LLM providers and the fallback mechanism
"""
import sys
import asyncio
from pathlib import Path
# Add project root to path
project_root = Path(__file__).parent.parent
sys.path.insert(0, str(project_root))
from src.utils.config_loader import get_config
from src.utils.llm_manager import LLMManager
import json
def print_header(text: str):
"""Print formatted header"""
print("\n" + "=" * 70)
print(f" {text}")
print("=" * 70 + "\n")
def print_success(text: str):
"""Print success message"""
print(f"✓ {text}")
def print_error(text: str):
"""Print error message"""
print(f"✗ {text}")
def print_info(text: str):
"""Print info message"""
print(f"ℹ {text}")
async def test_single_provider(manager: LLMManager, provider_name: str):
"""Test a single provider"""
print(f"\n--- Testing {provider_name.upper()} ---")
try:
response = await manager.generate(
prompt="Say 'Hello from {provider_name}!' in one sentence.",
force_provider=provider_name
)
print_success(f"Provider: {response.provider}")
print_success(f"Response: {response.content[:100]}...")
print_success(f"Tokens: {response.usage['total_tokens']}")
print_success(f"Cost: ${response.cost:.6f}")
print_success(f"Latency: {response.latency:.2f}s")
return True
except Exception as e:
print_error(f"Failed: {str(e)}")
return False
async def test_automatic_fallback(manager: LLMManager):
"""Test automatic fallback mechanism"""
print_header("Testing Automatic Fallback")
print_info("Sending request without forcing provider...")
print_info("System will automatically choose best available provider")
try:
response = await manager.generate(
prompt="Explain what you are in one sentence.",
system_prompt="You are a helpful AI assistant."
)
print_success(f"Used Provider: {response.provider}")
print_success(f"Response: {response.content}")
print_success(f"Tokens: {response.usage['total_tokens']}")
print_success(f"Cost: ${response.cost:.6f}")
print_success(f"Latency: {response.latency:.2f}s")
return True
except Exception as e:
print_error(f"All providers failed: {str(e)}")
return False
async def test_health_check(manager: LLMManager):
"""Test health check system"""
print_header("Provider Health Check")
try:
health = await manager.health_check()
print_success(f"Total Requests: {health['total_requests']}")
print_success(f"Fallback Count: {health['fallback_count']}")
print_success(f"Fallback Rate: {health['fallback_rate']:.1%}")
print_success(f"Last Successful Provider: {health['last_successful_provider']}")
print("\nProvider Status:")
for provider_name, status in health['providers'].items():
status_emoji = "✓" if status.status == "healthy" else "✗"
print(f" {status_emoji} {provider_name.upper()}: {status.status}")
print(f" Success Rate: {status.success_rate:.1%}")
print(f" Total Calls: {status.total_calls}")
print(f" Total Cost: ${status.total_cost:.4f}")
print(f" Circuit: {status.circuit_breaker_state}")
return True
except Exception as e:
print_error(f"Health check failed: {str(e)}")
return False
async def test_usage_metrics(manager: LLMManager):
"""Test usage metrics"""
print_header("Usage Metrics")
try:
metrics = manager.get_usage_metrics()
print_success(f"Total Requests: {metrics.total_requests}")
print_success(f"Total Calls: {metrics.total_calls}")
print_success(f"Successful Calls: {metrics.successful_calls}")
print_success(f"Failed Calls: {metrics.failed_calls}")
print_success(f"Total Cost: ${metrics.total_cost:.4f}")
print_success(f"Fallback Count: {metrics.fallback_count}")
print("\nPer-Provider Metrics:")
for provider, stats in metrics.per_provider.items():
print(f" {provider.upper()}:")
print(f" Calls: {stats['calls']}")
print(f" Success Rate: {stats['success_rate']:.1%}")
print(f" Cost: ${stats['cost']:.4f}")
return True
except Exception as e:
print_error(f"Metrics failed: {str(e)}")
return False
async def main():
"""Main test function"""
print_header("Enhanced MCP Server - LLM Fallback Test")
# Load configuration
print_info("Loading configuration...")
try:
config = get_config()
print_success("Configuration loaded")
except Exception as e:
print_error(f"Failed to load configuration: {e}")
return
# Get LLM config
llm_config = config.get_llm_config()
providers = [p['name'] for p in llm_config.get('providers', []) if p.get('enabled', True)]
print_info(f"Configured providers: {', '.join(providers)}")
# Initialize LLM Manager
print_info("Initializing LLM Manager...")
try:
manager = LLMManager(config.yaml_config)
print_success("LLM Manager initialized")
except Exception as e:
print_error(f"Failed to initialize LLM Manager: {e}")
return
# Test results
results = {
'providers_tested': {},
'automatic_fallback': False,
'health_check': False,
'usage_metrics': False
}
# Test each provider individually
print_header("Testing Individual Providers")
for provider in providers:
success = await test_single_provider(manager, provider)
results['providers_tested'][provider] = success
# Test automatic fallback
results['automatic_fallback'] = await test_automatic_fallback(manager)
# Test health check
results['health_check'] = await test_health_check(manager)
# Test usage metrics
results['usage_metrics'] = await test_usage_metrics(manager)
# Print summary
print_header("Test Summary")
providers_working = sum(results['providers_tested'].values())
providers_total = len(results['providers_tested'])
print(f"Providers Working: {providers_working}/{providers_total}")
for provider, success in results['providers_tested'].items():
status = "✓ PASS" if success else "✗ FAIL"
print(f" {status}: {provider.upper()}")
print(f"\nAutomatic Fallback: {'✓ PASS' if results['automatic_fallback'] else '✗ FAIL'}")
print(f"Health Check: {'✓ PASS' if results['health_check'] else '✗ FAIL'}")
print(f"Usage Metrics: {'✓ PASS' if results['usage_metrics'] else '✗ FAIL'}")
# Overall result
all_critical_pass = (
providers_working > 0 and
results['automatic_fallback'] and
results['health_check']
)
print_header("Overall Result")
if all_critical_pass:
print_success("✓ ALL TESTS PASSED!")
print_success("LLM Fallback System is working correctly!")
else:
print_error("✗ SOME TESTS FAILED")
print_error("Please check configuration and API keys")
# Cleanup
await manager.close()
if __name__ == "__main__":
try:
asyncio.run(main())
except KeyboardInterrupt:
print("\n\nTest interrupted by user")
except Exception as e:
print_error(f"Unexpected error: {e}")
import traceback
traceback.print_exc()