Skip to main content
Glama

MCP Vulnerability Checker Server

by firetix
cvss_calculator.pyโ€ข9.41 kB
""" CVSS Calculator Tool This module provides functionality to calculate CVSS (Common Vulnerability Scoring System) scores from vector strings. Supports CVSS v3.0 and v3.1. """ import re from typing import Any, Dict, List import mcp.types as types def parse_cvss_vector(vector: str) -> Dict[str, Any]: """ Parse a CVSS vector string and extract metrics. Args: vector: CVSS vector string (e.g., "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H") Returns: Dictionary containing parsed metrics """ # Clean up vector string vector = vector.strip() # Extract version version_match = re.match(r"^CVSS:(\d+\.\d+)/", vector) if not version_match: raise ValueError("Invalid CVSS vector format. Must start with CVSS:x.x/") version = version_match.group(1) if version not in ["3.0", "3.1"]: raise ValueError( f"Unsupported CVSS version: {version}. Only 3.0 and 3.1 are supported." ) # Parse metrics metrics = {} metrics["version"] = version # Split the vector into components vector_parts = vector.split("/")[1:] # Skip the CVSS:x.x part for part in vector_parts: if ":" not in part: continue key, value = part.split(":", 1) metrics[key] = value return metrics def calculate_cvss3_base_score(metrics: Dict[str, Any]) -> Dict[str, Any]: """ Calculate CVSS v3.0/3.1 base score from parsed metrics. Args: metrics: Parsed CVSS metrics dictionary Returns: Dictionary containing calculated scores and interpretations """ # CVSS v3.x base metric values av_values = {"N": 0.85, "A": 0.62, "L": 0.55, "P": 0.2} ac_values = {"L": 0.77, "H": 0.44} pr_values = { "N": {"unchanged": 0.85, "changed": 0.85}, "L": {"unchanged": 0.62, "changed": 0.68}, "H": {"unchanged": 0.27, "changed": 0.50}, } ui_values = {"N": 0.85, "R": 0.62} s_values = {"U": "unchanged", "C": "changed"} cia_values = {"H": 0.56, "L": 0.22, "N": 0.0} # Extract required metrics try: av = metrics.get("AV", "") ac = metrics.get("AC", "") pr = metrics.get("PR", "") ui = metrics.get("UI", "") s = metrics.get("S", "") c = metrics.get("C", "") i = metrics.get("I", "") a = metrics.get("A", "") # Validate all required metrics are present required = ["AV", "AC", "PR", "UI", "S", "C", "I", "A"] missing = [metric for metric in required if metrics.get(metric, "") == ""] if missing: raise ValueError(f"Missing required metrics: {', '.join(missing)}") # Get numeric values av_score = av_values[av] ac_score = ac_values[ac] ui_score = ui_values[ui] scope = s_values[s] pr_score = pr_values[pr][scope] c_score = cia_values[c] i_score = cia_values[i] a_score = cia_values[a] # Calculate Impact Sub Score (ISS) iss = 1 - ((1 - c_score) * (1 - i_score) * (1 - a_score)) # Calculate Impact Score if scope == "unchanged": impact = 6.42 * iss else: # scope == "changed" impact = 7.52 * (iss - 0.029) - 3.25 * pow(iss - 0.02, 15) # Calculate Exploitability Score exploitability = 8.22 * av_score * ac_score * pr_score * ui_score # Calculate Base Score if impact <= 0: base_score = 0.0 elif scope == "unchanged": base_score = min(10.0, impact + exploitability) else: # scope == "changed" base_score = min(10.0, 1.08 * (impact + exploitability)) # Round to one decimal place base_score = round(base_score, 1) # Determine severity rating if base_score >= 9.0: severity = "CRITICAL" severity_color = "๐Ÿ”ด" elif base_score >= 7.0: severity = "HIGH" severity_color = "๐ŸŸ " elif base_score >= 4.0: severity = "MEDIUM" severity_color = "๐ŸŸก" elif base_score > 0.0: severity = "LOW" severity_color = "๐ŸŸข" else: severity = "NONE" severity_color = "โšช" return { "base_score": base_score, "impact_score": round(impact, 1), "exploitability_score": round(exploitability, 1), "severity": severity, "severity_color": severity_color, "scope": scope, "metrics": { "attack_vector": {"value": av, "score": av_score}, "attack_complexity": {"value": ac, "score": ac_score}, "privileges_required": {"value": pr, "score": pr_score}, "user_interaction": {"value": ui, "score": ui_score}, "scope": {"value": s, "description": scope}, "confidentiality": {"value": c, "score": c_score}, "integrity": {"value": i, "score": i_score}, "availability": {"value": a, "score": a_score}, }, } except KeyError as e: raise ValueError(f"Invalid metric value: {e}") async def calculate_cvss_score( vector: str, ) -> List[types.TextContent | types.ImageContent | types.EmbeddedResource]: """ Calculate CVSS base score from a CVSS vector string. Args: vector: CVSS vector string (e.g., "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H") Returns: List of content containing CVSS calculation results or error messages """ try: # Parse the vector metrics = parse_cvss_vector(vector) # Calculate score if metrics["version"] in ["3.0", "3.1"]: result = calculate_cvss3_base_score(metrics) else: return [ types.TextContent( type="text", text=f"Error: CVSS version {metrics['version']} is not supported. Only versions 3.0 and 3.1 are supported.", ) ] # Format the response response = "๐Ÿงฎ **CVSS Score Calculator Results**\n\n" response += f"๐Ÿ“Š **Base Score:** {result['base_score']} / 10.0\n" response += f"โš ๏ธ **Severity:** {result['severity_color']} {result['severity']}\n" response += f"๐Ÿ“ˆ **Impact Score:** {result['impact_score']}\n" response += f"๐ŸŽฏ **Exploitability Score:** {result['exploitability_score']}\n" response += f"๐Ÿ”„ **Scope:** {result['scope'].title()}\n" response += f"๐Ÿ“‹ **CVSS Version:** {metrics['version']}\n\n" response += "๐Ÿ” **Metric Breakdown:**\n" response += f" โ€ข **Attack Vector (AV):** {result['metrics']['attack_vector']['value']} (Score: {result['metrics']['attack_vector']['score']})\n" response += f" โ€ข **Attack Complexity (AC):** {result['metrics']['attack_complexity']['value']} (Score: {result['metrics']['attack_complexity']['score']})\n" response += f" โ€ข **Privileges Required (PR):** {result['metrics']['privileges_required']['value']} (Score: {result['metrics']['privileges_required']['score']})\n" response += f" โ€ข **User Interaction (UI):** {result['metrics']['user_interaction']['value']} (Score: {result['metrics']['user_interaction']['score']})\n" response += f" โ€ข **Scope (S):** {result['metrics']['scope']['value']} ({result['metrics']['scope']['description']})\n" response += f" โ€ข **Confidentiality (C):** {result['metrics']['confidentiality']['value']} (Score: {result['metrics']['confidentiality']['score']})\n" response += f" โ€ข **Integrity (I):** {result['metrics']['integrity']['value']} (Score: {result['metrics']['integrity']['score']})\n" response += f" โ€ข **Availability (A):** {result['metrics']['availability']['value']} (Score: {result['metrics']['availability']['score']})\n\n" response += "๐Ÿ“– **Metric Meanings:**\n" response += " โ€ข **AV** - N:Network, A:Adjacent, L:Local, P:Physical\n" response += " โ€ข **AC** - L:Low, H:High\n" response += " โ€ข **PR** - N:None, L:Low, H:High\n" response += " โ€ข **UI** - N:None, R:Required\n" response += " โ€ข **S** - U:Unchanged, C:Changed\n" response += " โ€ข **C/I/A** - H:High, L:Low, N:None\n\n" response += "๐ŸŽฏ **Severity Ranges:**\n" response += " โ€ข ๐Ÿ”ด **CRITICAL:** 9.0 - 10.0\n" response += " โ€ข ๐ŸŸ  **HIGH:** 7.0 - 8.9\n" response += " โ€ข ๐ŸŸก **MEDIUM:** 4.0 - 6.9\n" response += " โ€ข ๐ŸŸข **LOW:** 0.1 - 3.9\n" response += " โ€ข โšช **NONE:** 0.0\n\n" response += f"๐Ÿ“š **Original Vector:** `{vector}`\n" response += "๐ŸŒ **CVSS Specification:** https://www.first.org/cvss/specification-document" return [types.TextContent(type="text", text=response)] except ValueError as e: return [ types.TextContent( type="text", text=f"Error: {str(e)}\n\nExample valid vector: CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H", ) ] except Exception as e: return [ types.TextContent( type="text", text=f"Error: Failed to calculate CVSS score: {str(e)}", ) ]

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/firetix/vulnerability-intelligence-mcp-server'

If you have feedback or need assistance with the MCP directory API, please join our Discord server