Skip to main content
Glama
google_llm.py1.09 kB
import os from dotenv import load_dotenv from langchain_google_genai import GoogleGenerativeAI # Load environment variables load_dotenv() def ask_google(prompt: str): """Call Google Gemini model using LangChain""" api_key = os.getenv("GOOGLE_API_KEY") if not api_key: return "Error: GOOGLE_API_KEY tidak ditemukan. Pastikan file .env sudah dikonfigurasi dengan benar." try: # Initialize LLM inside function to avoid blocking at module load llm = GoogleGenerativeAI( model="models/gemini-2.5-flash", # Use stable version temperature=0.3, google_api_key=api_key, timeout=30, # 30 seconds timeout max_retries=2 ) print(f" Sending prompt to LLM (length: {len(prompt)} chars)...") response = llm.invoke(prompt) print(f" LLM responded (length: {len(response)} chars)") return response except Exception as e: error_msg = f"Error LLM: {str(e)}" print(f" ❌ {error_msg}") return error_msg

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/jamalexfo/mcp-api-tools'

If you have feedback or need assistance with the MCP directory API, please join our Discord server