Skip to main content
Glama
IncrementalImpactAnalyzer.js•12.1 kB
import { MCPConfigurationAnalyzer } from './MCPConfigurationAnalyzer.js'; import { MCPSchemaExtractor } from './MCPSchemaExtractor.js'; import { TokenMeasurementEngine } from './TokenMeasurementEngine.js'; /** * Analyzes incremental token impact of MCP servers */ export class IncrementalImpactAnalyzer { constructor() { this.configAnalyzer = new MCPConfigurationAnalyzer(); this.schemaExtractor = new MCPSchemaExtractor(); this.tokenEngine = new TokenMeasurementEngine(); this.results = null; } /** * Perform complete incremental analysis * @returns {Promise<Object>} Complete analysis results */ async performCompleteAnalysis() { console.log('šŸ” Starting MCP Token Analysis...\n'); // Phase 1: Configuration Analysis console.log('šŸ“‹ Phase 1: Analyzing MCP Configuration'); const configResults = await this.configAnalyzer.analyzeConfiguration(); if (!configResults.hasConfiguration) { return { error: 'No MCP configuration found', configPaths: this.configAnalyzer.configPaths }; } console.log(` āœ… Found ${configResults.totalServers} servers in ${configResults.configPath}`); const activeServers = configResults.servers.filter(server => !server.disabled); if (activeServers.length === 0) { return { error: 'No active MCP servers found', totalServers: configResults.totalServers, disabledServers: configResults.totalServers }; } console.log(` āœ… ${activeServers.length} active servers to analyze\n`); // Phase 2: Schema Extraction console.log('šŸ”Œ Phase 2: Extracting Server Schemas'); const extractionResults = await this.schemaExtractor.extractMultipleServerSchemas(activeServers); const successfulExtractions = extractionResults.filter(r => r.success); console.log(` āœ… Successfully extracted ${successfulExtractions.length}/${activeServers.length} server schemas\n`); // Phase 3: Token Measurement console.log('šŸ”¢ Phase 3: Measuring Token Impact'); const tokenResults = await this.analyzeTokenImpact(extractionResults); console.log(` āœ… Token analysis complete\n`); // Phase 4: Incremental Analysis console.log('šŸ“Š Phase 4: Incremental Impact Analysis'); const incrementalResults = await this.analyzeIncrementalImpact(tokenResults); console.log(` āœ… Incremental analysis complete\n`); this.results = { timestamp: new Date().toISOString(), configuration: configResults, extraction: { results: extractionResults, summary: this.schemaExtractor.getSummaryStatistics(extractionResults) }, tokens: tokenResults, incremental: incrementalResults, recommendations: this.generateRecommendations(incrementalResults) }; return this.results; } /** * Analyze token impact for all servers * @param {Array} extractionResults Server extraction results * @returns {Promise<Object>} Token analysis results */ async analyzeTokenImpact(extractionResults) { const serverTokenAnalyses = extractionResults.map(result => this.tokenEngine.countServerTokens(result) ); const baseline = this.tokenEngine.measureBaselineTokens(); const totalOverhead = this.tokenEngine.calculateTotalOverhead(serverTokenAnalyses); return { baseline, servers: serverTokenAnalyses, totalOverhead, summary: this.generateTokenSummary(serverTokenAnalyses) }; } /** * Analyze incremental impact of each server * @param {Object} tokenResults Token analysis results * @returns {Promise<Object>} Incremental analysis */ async analyzeIncrementalImpact(tokenResults) { const servers = tokenResults.servers.filter(s => s.success); // Sort servers by token impact const serversByImpact = [...servers].sort((a, b) => b.totalTokens - a.totalTokens); // Calculate cumulative impact let cumulativeTokens = tokenResults.baseline.totalBuiltInTokens; const incrementalSteps = serversByImpact.map((server, index) => { const beforeTokens = cumulativeTokens; cumulativeTokens += server.totalTokens; return { step: index + 1, serverName: server.serverName, serverTokens: server.totalTokens, toolCount: server.toolCount, beforeTotal: beforeTokens, afterTotal: cumulativeTokens, deltaTokens: server.totalTokens, cumulativePercentage: (cumulativeTokens / 200000) * 100 }; }); // Identify optimization opportunities const optimizationOpportunities = this.identifyOptimizationOpportunities(servers); // Calculate impact scenarios const impactScenarios = this.calculateImpactScenarios(servers); return { incrementalSteps, optimizationOpportunities, impactScenarios, heaviestServers: serversByImpact.slice(0, 3), lightestServers: serversByImpact.slice(-3).reverse() }; } /** * Identify optimization opportunities * @param {Array} servers Server token analyses * @returns {Array} Optimization opportunities */ identifyOptimizationOpportunities(servers) { const opportunities = []; // High token servers const highTokenServers = servers.filter(s => s.totalTokens > 2000); if (highTokenServers.length > 0) { opportunities.push({ type: 'high_token_servers', severity: 'high', description: 'Servers with high token consumption', servers: highTokenServers.map(s => ({ name: s.serverName, tokens: s.totalTokens })), recommendation: 'Consider lazy loading or selective tool enabling' }); } // Servers with many complex tools const complexServers = servers.filter(s => { const complexTools = s.tools?.filter(t => t.complexity.complexity === 'complex') || []; return complexTools.length > 3; }); if (complexServers.length > 0) { opportunities.push({ type: 'complex_schemas', severity: 'medium', description: 'Servers with complex tool schemas', servers: complexServers.map(s => ({ name: s.serverName, complexTools: s.tools?.filter(t => t.complexity.complexity === 'complex').length || 0 })), recommendation: 'Simplify schemas or break into smaller tools' }); } // Servers with verbose descriptions const verboseServers = servers.filter(s => { const avgDescriptionTokens = s.tools?.reduce((sum, t) => sum + t.tokens.description, 0) / (s.tools?.length || 1) || 0; return avgDescriptionTokens > 50; }); if (verboseServers.length > 0) { opportunities.push({ type: 'verbose_descriptions', severity: 'low', description: 'Servers with verbose tool descriptions', servers: verboseServers.map(s => ({ name: s.serverName })), recommendation: 'Reduce description verbosity while maintaining clarity' }); } return opportunities; } /** * Calculate various impact scenarios * @param {Array} servers Server token analyses * @returns {Object} Impact scenarios */ calculateImpactScenarios(servers) { const totalMcpTokens = servers.reduce((sum, s) => sum + s.totalTokens, 0); const baseline = this.tokenEngine.measureBaselineTokens(); // Scenario 1: Remove heaviest server const heaviestServer = servers.reduce((max, s) => s.totalTokens > max.totalTokens ? s : max, servers[0]); const withoutHeaviest = totalMcpTokens - heaviestServer.totalTokens; // Scenario 2: Keep only top 3 servers const top3Servers = [...servers].sort((a, b) => b.totalTokens - a.totalTokens).slice(0, 3); const top3Only = top3Servers.reduce((sum, s) => sum + s.totalTokens, 0); // Scenario 3: 50% token reduction (simulating optimization) const optimized50 = totalMcpTokens * 0.5; return { current: { mcpTokens: totalMcpTokens, totalOverhead: baseline.totalBuiltInTokens + totalMcpTokens, contextPercentage: ((baseline.totalBuiltInTokens + totalMcpTokens) / 200000) * 100 }, withoutHeaviest: { mcpTokens: withoutHeaviest, totalOverhead: baseline.totalBuiltInTokens + withoutHeaviest, contextPercentage: ((baseline.totalBuiltInTokens + withoutHeaviest) / 200000) * 100, savings: heaviestServer.totalTokens, removedServer: heaviestServer.serverName }, top3Only: { mcpTokens: top3Only, totalOverhead: baseline.totalBuiltInTokens + top3Only, contextPercentage: ((baseline.totalBuiltInTokens + top3Only) / 200000) * 100, savings: totalMcpTokens - top3Only, keptServers: top3Servers.map(s => s.serverName) }, optimized50: { mcpTokens: optimized50, totalOverhead: baseline.totalBuiltInTokens + optimized50, contextPercentage: ((baseline.totalBuiltInTokens + optimized50) / 200000) * 100, savings: totalMcpTokens - optimized50 } }; } /** * Generate token summary statistics * @param {Array} serverTokenAnalyses Server token analyses * @returns {Object} Summary statistics */ generateTokenSummary(serverTokenAnalyses) { const successful = serverTokenAnalyses.filter(s => s.success); if (successful.length === 0) { return { error: 'No successful server analyses' }; } const totalTokens = successful.reduce((sum, s) => sum + s.totalTokens, 0); const totalTools = successful.reduce((sum, s) => sum + s.toolCount, 0); return { totalServers: successful.length, totalTokens, totalTools, averageTokensPerServer: Math.round(totalTokens / successful.length), averageTokensPerTool: Math.round(totalTokens / totalTools), averageToolsPerServer: Math.round(totalTools / successful.length) }; } /** * Generate recommendations based on analysis * @param {Object} incrementalResults Incremental analysis results * @returns {Array} Array of recommendations */ generateRecommendations(incrementalResults) { const recommendations = []; // High overhead recommendation const totalPercentage = incrementalResults.incrementalSteps[incrementalResults.incrementalSteps.length - 1]?.cumulativePercentage || 0; if (totalPercentage > 10) { recommendations.push({ priority: 'high', category: 'overhead', title: 'High Token Overhead Detected', description: `MCP servers consume ${totalPercentage.toFixed(1)}% of context window`, action: 'Consider implementing lazy loading or reducing active servers' }); } // Server-specific recommendations if (incrementalResults.heaviestServers.length > 0) { const heaviest = incrementalResults.heaviestServers[0]; recommendations.push({ priority: 'medium', category: 'optimization', title: 'Optimize Heavy Server', description: `${heaviest.serverName} consumes ${heaviest.totalTokens} tokens`, action: 'Review tool schemas for optimization opportunities' }); } // Optimization opportunities incrementalResults.optimizationOpportunities.forEach(opp => { recommendations.push({ priority: opp.severity, category: 'schema', title: opp.description, description: `${opp.servers.length} servers affected`, action: opp.recommendation }); }); return recommendations; } /** * Get formatted analysis results * @returns {Object} Current analysis results */ getResults() { return this.results; } /** * Export results to JSON file * @param {string} filename Output filename * @returns {Promise<void>} */ async exportResults(filename = 'mcp-token-analysis.json') { if (!this.results) { throw new Error('No analysis results available. Run performCompleteAnalysis() first.'); } const fs = await import('fs/promises'); await fs.writeFile(filename, JSON.stringify(this.results, null, 2)); console.log(`šŸ“„ Results exported to ${filename}`); } }

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/cordlesssteve/token-analyzer-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server