Skip to main content
Glama
__main__.py12.2 kB
# Copyright © 2025 Dr.-Ing. Paul Wilhelm <paul@wilhelm.dev> # This file is part of Archive Agent. See LICENSE for details. from archive_agent.util.Informer import Informer from archive_agent.util.json_util import generate_json_filename, write_to_json with Informer("Starting…"): import typer import logging import asyncio import sys import subprocess from pathlib import Path from typing import List, Optional from archive_agent.core.ContextManager import ContextManager from archive_agent.mcp_server.McpServer import McpServer logger = logging.getLogger(__name__) app = typer.Typer( invoke_without_command=True, add_completion=False, help="Find your files with natural language and ask questions.", ) @app.callback() def root(ctx: typer.Context) -> None: """ Root callback that runs when no subcommand is provided. """ if ctx.invoked_subcommand is not None: return # Handle subcommand. _context = ContextManager() # Show help. typer.echo(ctx.get_help()) if _context.watchlist.isEmpty(): logger.info("💡 Include your first files ('archive-agent include')") raise typer.Exit() # noinspection PyShadowingNames @app.command() def switch(profile_name: str = typer.Argument("")) -> None: """ Create or switch profile. """ logger.info("💡 You can enter an existing or NEW name") _context = ContextManager(profile_name=profile_name) @app.command() def config() -> None: """ Open current profile config in nano. """ context = ContextManager() subprocess.run(["nano", str(context.config.file_path)]) # noinspection PyShadowingNames @app.command() def include(patterns: List[str] = typer.Argument(None)) -> None: """ Add included pattern(s). """ context = ContextManager() if not patterns: patterns = [context.cli.prompt("👉 Include pattern?", is_cmd=True).strip()] for pattern in patterns: context.watchlist.include(pattern) logger.info("💡 Don't forget to track files ('archive-agent track')") # noinspection PyShadowingNames @app.command() def exclude(patterns: List[str] = typer.Argument(None)) -> None: """ Add excluded pattern(s). """ context = ContextManager() if not patterns: patterns = [context.cli.prompt("👉 Exclude pattern?", is_cmd=True).strip()] for pattern in patterns: context.watchlist.exclude(pattern) logger.info("💡 Don't forget to track files ('archive-agent track')") # noinspection PyShadowingNames @app.command() def remove(patterns: List[str] = typer.Argument(None)) -> None: """ Remove previously included / excluded pattern(s). """ context = ContextManager() if not patterns: patterns = [context.cli.prompt("👉 Remove pattern?", is_cmd=True).strip()] for pattern in patterns: context.watchlist.remove(pattern) logger.info("💡 Don't forget to track files ('archive-agent track')") @app.command() def patterns() -> None: """ Show the list of included / excluded patterns. """ context = ContextManager() context.watchlist.patterns() if context.watchlist.isEmpty(): logger.info("💡 Include your first files ('archive-agent include')") @app.command() def track() -> None: """ Resolve all patterns and track changed files. """ context = ContextManager() n = context.watchlist.track() if n > 0: logger.info("💡 Commit your tracked files now ('archive-agent commit')") logger.info("💡 OR list added/removed/changed ('archive-agent diff')") if context.watchlist.isEmpty(): logger.info("💡 Include your first files ('archive-agent include')") else: logger.info("💡 Ready to get some answers? ('archive-agent query')") # noinspection PyShadowingBuiltins @app.command() def list() -> None: """ Show the list of tracked files. """ context = ContextManager() logger.info("💡 Always track your files first ('archive-agent track')") context.watchlist.list() @app.command() def diff() -> None: """ Show the list of changed files. """ context = ContextManager() logger.info("💡 Always track your files first ('archive-agent track')") context.watchlist.diff() @app.command() def commit( nocache: bool = typer.Option( False, "--nocache", help="Invalidate the AI cache for this commit." ), verbose: bool = typer.Option( False, "--verbose", help="Show additional information (vision, chunking, embedding)." ), confirm_delete: bool = typer.Option( False, "--confirm-delete", help="Automatically confirm deleting untracked files from the database." ), ) -> None: """ Sync changed files with the Qdrant database. """ context = ContextManager(invalidate_cache=nocache, verbose=verbose) logger.info("💡 Always track your files first ('archive-agent track')") context.committer.commit(confirm_delete=confirm_delete) context.usage() if context.watchlist.isEmpty(): logger.info("💡 Include your first files ('archive-agent include')") else: logger.info("💡 Ready to get some answers? ('archive-agent query')") @app.command() def update( nocache: bool = typer.Option( False, "--nocache", help="Invalidate the AI cache for this commit." ), verbose: bool = typer.Option( False, "--verbose", help="Show additional information (vision, chunking, embedding)." ), confirm_delete: bool = typer.Option( False, "--confirm-delete", help="Automatically confirm deleting untracked files from the database." ), ) -> None: """ `track` and then `commit` in one go. """ context = ContextManager(invalidate_cache=nocache, verbose=verbose) context.watchlist.track() context.committer.commit(confirm_delete=confirm_delete) context.usage() if context.watchlist.isEmpty(): logger.info("💡 Include your first files ('archive-agent include')") else: logger.info("💡 Ready to get some answers? ('archive-agent query')") @app.command() def search( question: str = typer.Argument(None), nocache: bool = typer.Option( False, "--nocache", help="Invalidate the AI cache for this search." ), verbose: bool = typer.Option( False, "--verbose", help="Show additional information (embedding, retrieval, reranking)." ), ) -> None: """ List files relevant to the question. """ context = ContextManager(invalidate_cache=nocache, verbose=verbose) logger.info("💡 Ask something — be as specific as possible") if question is None: question = context.cli.prompt("🧠 Ask Archive Agent…", is_cmd=True) _points = asyncio.run(context.qdrant.search(question)) context.usage() @app.command() def query( question: str = typer.Argument(None), nocache: bool = typer.Option( False, "--nocache", help="Invalidate the AI cache for this query." ), verbose: bool = typer.Option( False, "--verbose", help="Show additional information (embedding, retrieval, reranking, querying)." ), to_json: str = typer.Option( None, "--to-json", help="Write answer to JSON file." ), to_json_auto: Optional[str] = typer.Option( None, "--to-json-auto", help="Write answer to JSON file in directory with auto-generated filename from question.", metavar="DIR", is_flag=False, flag_value="." ), ) -> None: """ Get answer to question using RAG. """ to_json_auto_dir = None if to_json_auto is None else Path(to_json_auto).expanduser().resolve() context = ContextManager(invalidate_cache=nocache, verbose=verbose, to_json_auto_dir=to_json_auto_dir) logger.info("💡 Ask something — be as specific as possible") if question is None: question = context.cli.prompt("🧠 Ask Archive Agent…", is_cmd=True) _query_result, _answer_text = asyncio.run(context.qdrant.query(question)) # Handle JSON output options json_filename = None if to_json: json_filename = Path(to_json) elif to_json_auto and to_json_auto_dir: json_filename = to_json_auto_dir / generate_json_filename(question) if json_filename: write_to_json(json_filename=json_filename, question=question, query_result=_query_result.model_dump(), answer_text=_answer_text) context.usage() logger.info("⚡ Process finished") @app.command() def gui( nocache: bool = typer.Option( False, "--nocache", help="Invalidate the AI cache for this query." ), verbose: bool = typer.Option( False, "--verbose", help="Show additional information (embedding, retrieval, reranking, querying)." ), to_json_auto: Optional[str] = typer.Option( None, "--to-json-auto", help="Write answer to JSON file in directory with auto-generated filename from question.", metavar="DIR", is_flag=False, flag_value="." ), ) -> None: """ Launch browser-based GUI. This runs Streamlit via the current Python interpreter so we use the environment created by ``uv sync`` / ``uv run`` rather than a global install. Notes ----- Streamlit consumes its own CLI flags. To forward flags to the target script, insert ``--`` after the script path; everything following is passed through in ``sys.argv`` of the Streamlit app. """ logger.info("💡 GUI is starting…") gui_path = Path(__file__).parent / "core" / "GuiManager.py" to_json_auto_dir = None if to_json_auto is None else Path(to_json_auto).expanduser().resolve() # Collect script-level args script_args: List[str] = [] if nocache: script_args.append("--nocache") if verbose: script_args.append("--verbose") if to_json_auto: script_args.append("--to-json-auto") script_args.append(str(to_json_auto_dir)) # Build command: put `--` before args so Streamlit forwards them to the script cmd: List[str] = [sys.executable, "-m", "streamlit", "run", str(gui_path)] if script_args: cmd.append("--") cmd.extend(script_args) subprocess.run(cmd, check=True) @app.command() def mcp( nocache: bool = typer.Option( False, "--nocache", help="Invalidate the AI cache for this query." ), verbose: bool = typer.Option( False, "--verbose", help="Show additional information (embedding, retrieval, reranking, querying)." ), to_json_auto: Optional[str] = typer.Option( None, "--to-json-auto", help="Write answer to JSON file in directory with auto-generated filename from question.", metavar="DIR", is_flag=False, flag_value="." ), ) -> None: """ Start MCP server. """ to_json_auto_dir = None if to_json_auto is None else Path(to_json_auto).expanduser().resolve() context = ContextManager(invalidate_cache=nocache, verbose=verbose, to_json_auto_dir=to_json_auto_dir) logger.info("💡 MCP is starting…") # TODO: Allow for graceful CTRL+C shutdown without the `asyncio.exceptions.CancelledError` mcp_server = McpServer( context=context, host=context.config.data[context.config.MCP_SERVER_HOST], port=context.config.data[context.config.MCP_SERVER_PORT], ) mcp_server.start() context.usage() if __name__ == "__main__": app()

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/shredEngineer/Archive-Agent'

If you have feedback or need assistance with the MCP directory API, please join our Discord server