Skip to main content
Glama

Qwen3-Coder MCP Server

by keithah
qwen3-mcp-server.js6.83 kB
#!/usr/bin/env node import { Server } from "@modelcontextprotocol/sdk/server/index.js"; import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js"; import { CallToolRequestSchema, ListToolsRequestSchema, } from "@modelcontextprotocol/sdk/types.js"; import { spawn } from "child_process"; const server = new Server( { name: "qwen3-coder-server", version: "0.1.0", description: "MCP server for Qwen3-Coder integration with Claude Code" }, { capabilities: { tools: {}, }, } ); // Function to call Ollama with Qwen3-Coder async function callQwen3Coder(prompt, options = {}) { return new Promise((resolve, reject) => { const ollamaProcess = spawn('ollama', ['run', 'qwen3-coder:30b', prompt], { stdio: ['pipe', 'pipe', 'pipe'] }); let output = ''; let error = ''; ollamaProcess.stdout.on('data', (data) => { output += data.toString(); }); ollamaProcess.stderr.on('data', (data) => { error += data.toString(); }); ollamaProcess.on('close', (code) => { if (code === 0) { resolve(output.trim()); } else { reject(new Error(`Ollama process exited with code ${code}: ${error}`)); } }); // Set timeout for long-running requests setTimeout(() => { ollamaProcess.kill(); reject(new Error('Request timeout')); }, options.timeout || 120000); // 2 minutes default timeout }); } // List available tools server.setRequestHandler(ListToolsRequestSchema, async () => { return { tools: [ { name: "qwen3_code_review", description: "Review code using Qwen3-Coder", inputSchema: { type: "object", properties: { code: { type: "string", description: "The code to review" }, language: { type: "string", description: "Programming language of the code" } }, required: ["code"] } }, { name: "qwen3_code_explain", description: "Explain code using Qwen3-Coder", inputSchema: { type: "object", properties: { code: { type: "string", description: "The code to explain" }, language: { type: "string", description: "Programming language of the code" } }, required: ["code"] } }, { name: "qwen3_code_generate", description: "Generate code using Qwen3-Coder", inputSchema: { type: "object", properties: { prompt: { type: "string", description: "Description of what code to generate" }, language: { type: "string", description: "Target programming language" } }, required: ["prompt"] } }, { name: "qwen3_code_fix", description: "Fix bugs in code using Qwen3-Coder", inputSchema: { type: "object", properties: { code: { type: "string", description: "The buggy code to fix" }, error: { type: "string", description: "Error message or description of the bug" }, language: { type: "string", description: "Programming language of the code" } }, required: ["code"] } }, { name: "qwen3_code_optimize", description: "Optimize code using Qwen3-Coder", inputSchema: { type: "object", properties: { code: { type: "string", description: "The code to optimize" }, criteria: { type: "string", description: "Optimization criteria (performance, memory, readability, etc.)" }, language: { type: "string", description: "Programming language of the code" } }, required: ["code"] } } ] }; }); // Handle tool calls server.setRequestHandler(CallToolRequestSchema, async (request) => { const { name, arguments: args } = request.params; try { let prompt = ""; let result = ""; switch (name) { case "qwen3_code_review": prompt = `Please review the following ${args.language || 'code'} and provide feedback on code quality, potential bugs, best practices, and suggestions for improvement: \`\`\`${args.language || ''} ${args.code} \`\`\` Please provide a detailed code review with specific suggestions.`; result = await callQwen3Coder(prompt); break; case "qwen3_code_explain": prompt = `Please explain the following ${args.language || 'code'} in detail, including what it does, how it works, and any important concepts: \`\`\`${args.language || ''} ${args.code} \`\`\` Provide a clear and comprehensive explanation.`; result = await callQwen3Coder(prompt); break; case "qwen3_code_generate": prompt = `Generate ${args.language || 'code'} for the following requirement: ${args.prompt} ${args.language ? `Please write the code in ${args.language}.` : ''} Provide clean, well-documented code with explanations.`; result = await callQwen3Coder(prompt); break; case "qwen3_code_fix": prompt = `Fix the following ${args.language || 'code'} that has a bug: \`\`\`${args.language || ''} ${args.code} \`\`\` ${args.error ? `Error/Issue: ${args.error}` : ''} Please provide the corrected code with explanations of what was wrong and how it was fixed.`; result = await callQwen3Coder(prompt); break; case "qwen3_code_optimize": prompt = `Optimize the following ${args.language || 'code'} for ${args.criteria || 'performance'}: \`\`\`${args.language || ''} ${args.code} \`\`\` Please provide optimized code with explanations of the improvements made.`; result = await callQwen3Coder(prompt); break; default: throw new Error(`Unknown tool: ${name}`); } return { content: [ { type: "text", text: result } ] }; } catch (error) { return { content: [ { type: "text", text: `Error: ${error.message}` } ], isError: true }; } }); // Start the server async function main() { const transport = new StdioServerTransport(); await server.connect(transport); console.error("Qwen3-Coder MCP server running on stdio"); } main().catch(console.error);

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/keithah/qwen3-coder-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server