Skip to main content
Glama
index.tsβ€’17.8 kB
#!/usr/bin/env node import { Server } from '@modelcontextprotocol/sdk/server/index.js'; import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js'; import { CallToolRequestSchema, ErrorCode, ListResourcesRequestSchema, ListResourceTemplatesRequestSchema, ListToolsRequestSchema, McpError, ReadResourceRequestSchema, } from '@modelcontextprotocol/sdk/types.js'; import { AzureOpenAI } from '@azure/openai'; import { DefaultAzureCredential, ClientSecretCredential } from '@azure/identity'; import { TextAnalyticsClient } from '@azure/cognitiveservices-textanalytics'; import { ComputerVisionClient } from '@azure/cognitiveservices-computervision'; import { FaceClient } from '@azure/cognitiveservices-face'; import { BlobServiceClient } from '@azure/storage-blob'; import winston from 'winston'; import dotenv from 'dotenv'; import Joi from 'joi'; import { RateLimiterMemory } from 'rate-limiter-flexible'; import retry from 'retry'; // Load environment variables dotenv.config(); // Validation schemas const chatCompletionSchema = Joi.object({ messages: Joi.array().items( Joi.object({ role: Joi.string().valid('system', 'user', 'assistant').required(), content: Joi.string().required(), }) ).required(), model: Joi.string().default('gpt-4'), max_tokens: Joi.number().integer().min(1).max(4096).default(1000), temperature: Joi.number().min(0).max(2).default(0.7), }); const textAnalysisSchema = Joi.object({ text: Joi.string().required(), language: Joi.string().default('en'), features: Joi.array().items( Joi.string().valid('sentiment', 'entities', 'keyPhrases', 'language') ).default(['sentiment', 'entities', 'keyPhrases']), }); const imageAnalysisSchema = Joi.object({ imageUrl: Joi.string().uri().required(), features: Joi.array().items( Joi.string().valid('categories', 'description', 'faces', 'objects', 'tags', 'adult', 'brands', 'color') ).default(['description', 'tags', 'objects']), }); // Environment variable validation const requiredEnvVars = [ 'AZURE_OPENAI_ENDPOINT', 'AZURE_OPENAI_API_KEY', 'AZURE_COGNITIVE_SERVICES_ENDPOINT', 'AZURE_COGNITIVE_SERVICES_KEY', 'AZURE_STORAGE_CONNECTION_STRING', ]; for (const envVar of requiredEnvVars) { if (!process.env[envVar]) { throw new Error(`${envVar} environment variable is required`); } } // Logger configuration const logger = winston.createLogger({ level: process.env.LOG_LEVEL || 'info', format: winston.format.combine( winston.format.timestamp(), winston.format.errors({ stack: true }), winston.format.json() ), defaultMeta: { service: 'azure-ai-mcp-server' }, transports: [ new winston.transports.File({ filename: 'logs/error.log', level: 'error' }), new winston.transports.File({ filename: 'logs/combined.log' }), new winston.transports.Console({ format: winston.format.simple() }) ], }); // Rate limiting const rateLimiter = new RateLimiterMemory({ keyGenerator: () => 'global', points: 100, // Number of requests duration: 60, // Per 60 seconds }); interface AzureAIClients { openai: AzureOpenAI; textAnalytics: TextAnalyticsClient; computerVision: ComputerVisionClient; face: FaceClient; blobService: BlobServiceClient; } class AzureAIMCPServer { private server: Server; private clients: AzureAIClients; constructor() { this.server = new Server( { name: 'azure-ai-mcp-server', version: '1.0.0', }, { capabilities: { resources: {}, tools: {}, }, } ); this.initializeClients(); this.setupResourceHandlers(); this.setupToolHandlers(); // Error handling this.server.onerror = (error) => { logger.error('MCP Server Error', { error: error.message, stack: error.stack }); }; process.on('SIGINT', async () => { logger.info('Shutting down Azure AI MCP Server'); await this.server.close(); process.exit(0); }); process.on('uncaughtException', (error) => { logger.error('Uncaught Exception', { error: error.message, stack: error.stack }); process.exit(1); }); process.on('unhandledRejection', (reason, promise) => { logger.error('Unhandled Rejection', { reason, promise }); process.exit(1); }); } private initializeClients(): void { try { // Initialize Azure OpenAI client this.clients = { openai: new AzureOpenAI({ endpoint: process.env.AZURE_OPENAI_ENDPOINT!, apiKey: process.env.AZURE_OPENAI_API_KEY!, apiVersion: '2024-02-01', }), // Initialize Cognitive Services clients textAnalytics: new TextAnalyticsClient( process.env.AZURE_COGNITIVE_SERVICES_ENDPOINT!, new ClientSecretCredential( process.env.AZURE_TENANT_ID!, process.env.AZURE_CLIENT_ID!, process.env.AZURE_CLIENT_SECRET! ) ), computerVision: new ComputerVisionClient( new ClientSecretCredential( process.env.AZURE_TENANT_ID!, process.env.AZURE_CLIENT_ID!, process.env.AZURE_CLIENT_SECRET! ), process.env.AZURE_COGNITIVE_SERVICES_ENDPOINT! ), face: new FaceClient( new ClientSecretCredential( process.env.AZURE_TENANT_ID!, process.env.AZURE_CLIENT_ID!, process.env.AZURE_CLIENT_SECRET! ), process.env.AZURE_COGNITIVE_SERVICES_ENDPOINT! ), // Initialize Azure Storage client blobService: BlobServiceClient.fromConnectionString( process.env.AZURE_STORAGE_CONNECTION_STRING! ), }; logger.info('Azure AI clients initialized successfully'); } catch (error) { logger.error('Failed to initialize Azure AI clients', { error }); throw error; } } private setupResourceHandlers(): void { // List available resources this.server.setRequestHandler(ListResourcesRequestSchema, async () => ({ resources: [ { uri: 'azure://models/available', name: 'Available Azure OpenAI Models', mimeType: 'application/json', description: 'List of available Azure OpenAI models and their capabilities', }, { uri: 'azure://services/status', name: 'Azure AI Services Status', mimeType: 'application/json', description: 'Health status of all Azure AI services', }, ], })); // Resource templates for dynamic resources this.server.setRequestHandler(ListResourceTemplatesRequestSchema, async () => ({ resourceTemplates: [ { uriTemplate: 'azure://logs/{service}/{date}', name: 'Service logs for a specific date', mimeType: 'application/json', description: 'Retrieve logs for a specific Azure AI service and date', }, { uriTemplate: 'azure://metrics/{service}/{metric}', name: 'Service metrics', mimeType: 'application/json', description: 'Retrieve metrics for a specific Azure AI service', }, ], })); // Handle resource reading this.server.setRequestHandler(ReadResourceRequestSchema, async (request) => { const uri = request.params.uri; try { if (uri === 'azure://models/available') { // Return available models information return { contents: [ { uri, mimeType: 'application/json', text: JSON.stringify({ models: [ { name: 'gpt-4', capabilities: ['chat', 'completion'] }, { name: 'gpt-35-turbo', capabilities: ['chat', 'completion'] }, { name: 'text-embedding-ada-002', capabilities: ['embedding'] }, ], timestamp: new Date().toISOString(), }, null, 2), }, ], }; } if (uri === 'azure://services/status') { // Return service health status return { contents: [ { uri, mimeType: 'application/json', text: JSON.stringify({ services: { openai: 'healthy', textAnalytics: 'healthy', computerVision: 'healthy', face: 'healthy', storage: 'healthy', }, timestamp: new Date().toISOString(), }, null, 2), }, ], }; } throw new McpError(ErrorCode.InvalidRequest, `Unknown resource: ${uri}`); } catch (error) { logger.error('Error reading resource', { uri, error }); throw new McpError(ErrorCode.InternalError, `Failed to read resource: ${uri}`); } }); } private setupToolHandlers(): void { // List available tools this.server.setRequestHandler(ListToolsRequestSchema, async () => ({ tools: [ { name: 'chat_completion', description: 'Generate chat completions using Azure OpenAI', inputSchema: { type: 'object', properties: { messages: { type: 'array', items: { type: 'object', properties: { role: { type: 'string', enum: ['system', 'user', 'assistant'] }, content: { type: 'string' }, }, required: ['role', 'content'], }, }, model: { type: 'string', default: 'gpt-4' }, max_tokens: { type: 'number', minimum: 1, maximum: 4096, default: 1000 }, temperature: { type: 'number', minimum: 0, maximum: 2, default: 0.7 }, }, required: ['messages'], }, }, { name: 'analyze_text', description: 'Analyze text using Azure Cognitive Services Text Analytics', inputSchema: { type: 'object', properties: { text: { type: 'string' }, language: { type: 'string', default: 'en' }, features: { type: 'array', items: { type: 'string', enum: ['sentiment', 'entities', 'keyPhrases', 'language'] }, default: ['sentiment', 'entities', 'keyPhrases'], }, }, required: ['text'], }, }, { name: 'analyze_image', description: 'Analyze images using Azure Computer Vision', inputSchema: { type: 'object', properties: { imageUrl: { type: 'string', format: 'uri' }, features: { type: 'array', items: { type: 'string', enum: ['categories', 'description', 'faces', 'objects', 'tags', 'adult', 'brands', 'color'] }, default: ['description', 'tags', 'objects'], }, }, required: ['imageUrl'], }, }, { name: 'detect_faces', description: 'Detect and analyze faces in images using Azure Face API', inputSchema: { type: 'object', properties: { imageUrl: { type: 'string', format: 'uri' }, returnFaceAttributes: { type: 'array', items: { type: 'string', enum: ['age', 'gender', 'emotion', 'glasses', 'facialHair'] }, default: ['age', 'gender', 'emotion'], }, }, required: ['imageUrl'], }, }, ], })); // Handle tool calls this.server.setRequestHandler(CallToolRequestSchema, async (request) => { const { name, arguments: args } = request.params; try { // Apply rate limiting await rateLimiter.consume('global'); switch (name) { case 'chat_completion': return await this.handleChatCompletion(args); case 'analyze_text': return await this.handleTextAnalysis(args); case 'analyze_image': return await this.handleImageAnalysis(args); case 'detect_faces': return await this.handleFaceDetection(args); default: throw new McpError(ErrorCode.MethodNotFound, `Unknown tool: ${name}`); } } catch (error) { logger.error('Tool execution error', { tool: name, error }); if (error instanceof McpError) { throw error; } return { content: [ { type: 'text', text: `Error executing ${name}: ${error instanceof Error ? error.message : 'Unknown error'}`, }, ], isError: true, }; } }); } private async handleChatCompletion(args: any) { const { error, value } = chatCompletionSchema.validate(args); if (error) { throw new McpError(ErrorCode.InvalidParams, `Invalid parameters: ${error.message}`); } const operation = retry.operation({ retries: 3, factor: 2, minTimeout: 1000, maxTimeout: 5000, }); return new Promise((resolve, reject) => { operation.attempt(async (currentAttempt) => { try { logger.info('Executing chat completion', { attempt: currentAttempt, model: value.model }); const response = await this.clients.openai.getChatCompletions( value.model, value.messages, { maxTokens: value.max_tokens, temperature: value.temperature, } ); const result = { content: [ { type: 'text' as const, text: JSON.stringify({ choices: response.choices, usage: response.usage, model: response.model, }, null, 2), }, ], }; logger.info('Chat completion successful', { model: value.model, tokensUsed: response.usage?.totalTokens }); resolve(result); } catch (error) { logger.warn('Chat completion attempt failed', { attempt: currentAttempt, error: error instanceof Error ? error.message : 'Unknown error' }); if (operation.retry(error as Error)) { return; } reject(operation.mainError()); } }); }); } private async handleTextAnalysis(args: any) { const { error, value } = textAnalysisSchema.validate(args); if (error) { throw new McpError(ErrorCode.InvalidParams, `Invalid parameters: ${error.message}`); } logger.info('Executing text analysis', { features: value.features }); const results: any = {}; // Note: This is a simplified implementation // In a real implementation, you would use the actual Azure Text Analytics client if (value.features.includes('sentiment')) { results.sentiment = { sentiment: 'positive', confidenceScores: { positive: 0.8, neutral: 0.15, negative: 0.05 } }; } if (value.features.includes('entities')) { results.entities = []; } if (value.features.includes('keyPhrases')) { results.keyPhrases = []; } return { content: [ { type: 'text' as const, text: JSON.stringify(results, null, 2), }, ], }; } private async handleImageAnalysis(args: any) { const { error, value } = imageAnalysisSchema.validate(args); if (error) { throw new McpError(ErrorCode.InvalidParams, `Invalid parameters: ${error.message}`); } logger.info('Executing image analysis', { imageUrl: value.imageUrl, features: value.features }); // Note: This is a simplified implementation // In a real implementation, you would use the actual Azure Computer Vision client const results = { description: { captions: [{ text: 'Sample image description', confidence: 0.95 }] }, tags: [ { name: 'sample', confidence: 0.9 }, { name: 'image', confidence: 0.85 } ], objects: [] }; return { content: [ { type: 'text' as const, text: JSON.stringify(results, null, 2), }, ], }; } private async handleFaceDetection(args: any) { logger.info('Executing face detection', { imageUrl: args.imageUrl }); // Note: This is a simplified implementation // In a real implementation, you would use the actual Azure Face API client const results = { faces: [ { faceId: 'sample-face-id', faceRectangle: { top: 100, left: 100, width: 200, height: 200 }, faceAttributes: { age: 30, gender: 'male', emotion: { happiness: 0.8, sadness: 0.1, anger: 0.05, surprise: 0.05 } } } ] }; return { content: [ { type: 'text' as const, text: JSON.stringify(results, null, 2), }, ], }; } async run(): Promise<void> { const transport = new StdioServerTransport(); await this.server.connect(transport); logger.info('Azure AI MCP Server running on stdio'); } } const server = new AzureAIMCPServer(); server.run().catch((error) => { logger.error('Failed to start server', { error }); process.exit(1); });

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/caiotk/nexguideai-azure-ai-mcp-server'

If you have feedback or need assistance with the MCP directory API, please join our Discord server