/**
* Llama Maverick Hub MCP Server
*
* Author: Yobie Benjamin
* Version: 0.2
* Date: July 28, 2025
*
* This is the main entry point for the Llama Maverick Hub MCP server.
* It acts as a central orchestrator that:
* 1. Hosts Llama Maverick as the main AI server
* 2. Connects to multiple MCP-enabled services (Stripe, GitHub, etc.)
* 3. Routes and orchestrates requests between Llama and various MCP services
* 4. Provides unified tool access across all connected services
*/
import { Server } from '@modelcontextprotocol/sdk/server/index.js';
import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js';
import {
CallToolRequestSchema,
ListResourcesRequestSchema,
ListPromptsRequestSchema,
ListToolsRequestSchema,
ReadResourceRequestSchema,
GetPromptRequestSchema,
CompleteRequestSchema,
ErrorCode,
McpError
} from '@modelcontextprotocol/sdk/types.js';
import winston from 'winston';
import { HubOrchestrator } from './orchestrator/hub-orchestrator.js';
import { ServiceRegistry } from './registry/service-registry.js';
import { LlamaService } from './services/llama-service.js';
import { ConfigManager } from './config/config-manager.js';
import { MCPClientManager } from './clients/mcp-client-manager.js';
/**
* Configure winston logger for debugging and monitoring
* Logs are essential for tracking multi-service orchestration
*/
const logger = winston.createLogger({
level: process.env.LOG_LEVEL || 'info',
format: winston.format.combine(
winston.format.timestamp(),
winston.format.json()
),
transports: [
new winston.transports.Console({
format: winston.format.simple()
})
]
});
/**
* Main server class that implements the MCP hub
* This server acts as both an MCP server (for Claude) and an MCP client (for other services)
*/
class LlamaMaverickHubServer {
private server: Server;
private orchestrator: HubOrchestrator;
private registry: ServiceRegistry;
private llamaService: LlamaService;
private configManager: ConfigManager;
private mcpClientManager: MCPClientManager;
constructor() {
/**
* Initialize the MCP server instance
* This server will be accessed by Claude Desktop or other MCP clients
*/
this.server = new Server(
{
name: 'llama-maverick-hub',
version: '0.2.0',
},
{
capabilities: {
resources: {},
tools: {},
prompts: {},
completion: {}
}
}
);
/**
* Initialize core components
* Each component handles a specific aspect of the hub functionality
*/
this.configManager = new ConfigManager();
this.registry = new ServiceRegistry();
this.mcpClientManager = new MCPClientManager(this.registry);
this.llamaService = new LlamaService(this.configManager);
this.orchestrator = new HubOrchestrator(
this.llamaService,
this.registry,
this.mcpClientManager
);
}
/**
* Initialize the server and all its components
* This sets up connections to external MCP services and prepares Llama
*/
async initialize(): Promise<void> {
logger.info('Initializing Llama Maverick Hub MCP Server...');
try {
// Load configuration from environment and config files
await this.configManager.loadConfig();
// Initialize Llama service for AI processing
await this.llamaService.initialize();
// Connect to configured MCP services (Stripe, GitHub, etc.)
await this.mcpClientManager.connectToServices(
this.configManager.getEnabledServices()
);
// Register all discovered tools from connected services
await this.orchestrator.discoverAndRegisterTools();
// Setup all request handlers
this.setupHandlers();
logger.info('Hub server initialized successfully');
} catch (error) {
logger.error('Failed to initialize hub server:', error);
throw error;
}
}
/**
* Setup all MCP protocol handlers
* These handlers respond to requests from Claude Desktop
*/
private setupHandlers(): void {
/**
* Handle tool listing requests
* Returns aggregated tools from all connected MCP services
*/
this.server.setRequestHandler(ListToolsRequestSchema, async () => {
logger.debug('Listing available tools from all connected services');
const tools = await this.orchestrator.getAggregatedTools();
return {
tools: tools.map(tool => ({
name: tool.name,
description: tool.description,
inputSchema: tool.inputSchema
}))
};
});
/**
* Handle tool execution requests
* Routes tool calls to appropriate MCP service or handles internally
*/
this.server.setRequestHandler(CallToolRequestSchema, async (request) => {
const { name, arguments: args } = request.params;
logger.info(`Executing tool: ${name}`, { args });
try {
// Orchestrator determines which service handles this tool
// and manages the execution flow
const result = await this.orchestrator.executeTool(name, args || {});
return {
content: [
{
type: 'text',
text: JSON.stringify(result, null, 2)
}
]
};
} catch (error) {
logger.error(`Tool execution failed: ${name}`, error);
throw new McpError(
ErrorCode.InternalError,
`Tool execution failed: ${error instanceof Error ? error.message : String(error)}`
);
}
});
/**
* Handle resource listing requests
* Resources can come from any connected MCP service
*/
this.server.setRequestHandler(ListResourcesRequestSchema, async () => {
logger.debug('Listing resources from all services');
const resources = await this.orchestrator.getAggregatedResources();
return {
resources: resources
};
});
/**
* Handle resource reading requests
* Routes to the appropriate service that owns the resource
*/
this.server.setRequestHandler(ReadResourceRequestSchema, async (request) => {
const { uri } = request.params;
logger.debug(`Reading resource: ${uri}`);
try {
const content = await this.orchestrator.readResource(uri);
return {
contents: [
{
type: 'text',
text: content
}
]
};
} catch (error) {
throw new McpError(
ErrorCode.ResourceNotFound,
`Resource not found: ${uri}`
);
}
});
/**
* Handle prompt listing requests
* Prompts can be defined by the hub or come from connected services
*/
this.server.setRequestHandler(ListPromptsRequestSchema, async () => {
logger.debug('Listing available prompts');
const prompts = await this.orchestrator.getAggregatedPrompts();
return {
prompts: prompts
};
});
/**
* Handle prompt retrieval requests
* Returns the full prompt template with arguments
*/
this.server.setRequestHandler(GetPromptRequestSchema, async (request) => {
const { name, arguments: args } = request.params;
logger.debug(`Getting prompt: ${name}`);
try {
const prompt = await this.orchestrator.getPrompt(name, args);
return prompt;
} catch (error) {
throw new McpError(
ErrorCode.InvalidRequest,
`Prompt not found: ${name}`
);
}
});
/**
* Handle completion requests
* Uses Llama Maverick to generate completions with context from all services
*/
this.server.setRequestHandler(CompleteRequestSchema, async (request) => {
const { ref, argument } = request.params;
logger.debug('Handling completion request', { ref, argument });
try {
// Orchestrator coordinates with Llama and relevant services
const completion = await this.orchestrator.complete(ref, argument);
return {
completion
};
} catch (error) {
logger.error('Completion failed:', error);
throw new McpError(
ErrorCode.InternalError,
`Completion failed: ${error instanceof Error ? error.message : String(error)}`
);
}
});
}
/**
* Start the server and begin listening for connections
* Uses stdio transport for communication with Claude Desktop
*/
async start(): Promise<void> {
const transport = new StdioServerTransport();
await this.initialize();
await this.server.connect(transport);
logger.info('Llama Maverick Hub MCP Server is running');
logger.info('Connected services:', this.registry.listServices());
}
}
/**
* Main execution entry point
* Handles graceful shutdown and error management
*/
async function main() {
const hubServer = new LlamaMaverickHubServer();
try {
await hubServer.start();
} catch (error) {
logger.error('Failed to start server:', error);
process.exit(1);
}
/**
* Handle graceful shutdown on process termination
* Ensures all connections are properly closed
*/
process.on('SIGINT', async () => {
logger.info('Shutting down Llama Maverick Hub...');
process.exit(0);
});
process.on('SIGTERM', async () => {
logger.info('Shutting down Llama Maverick Hub...');
process.exit(0);
});
}
// Start the server
main().catch((error) => {
logger.error('Unhandled error:', error);
process.exit(1);
});