Skip to main content
Glama
ContextManager.py•6.45 kB
# Copyright Ā© 2025 Dr.-Ing. Paul Wilhelm <paul@wilhelm.dev> # This file is part of Archive Agent. See LICENSE for details. from pathlib import Path from typing import Optional, Type from archive_agent.ai.AiManagerFactory import AiManagerFactory from archive_agent.ai_provider.AiProviderParams import AiProviderParams from archive_agent.profile.ProfileManager import ProfileManager from archive_agent.config.ConfigManager import ConfigManager from archive_agent.core.CacheManager import CacheManager from archive_agent.core.CliManager import CliManager from archive_agent.core.ProgressManager import ProgressManager from archive_agent.config.DecoderSettings import DecoderSettings, OcrStrategy from archive_agent.watchlist.WatchlistManager import WatchlistManager from archive_agent.db.QdrantManager import QdrantManager from archive_agent.core.CommitManager import CommitManager from archive_agent.ai_provider.ai_provider_registry import ai_provider_registry from archive_agent.ai_provider.AiProvider import AiProvider class ContextManager: """ Context manager. """ def __init__( self, profile_name: Optional[str] = None, invalidate_cache: bool = False, verbose: bool = False, to_json_auto_dir: Optional[Path] = None, ): """ Initialize context manager. :param profile_name: Optional profile name to create or switch to (or "" to request prompt). :param invalidate_cache: Invalidate cache if enabled, probe cache otherwise. :param verbose: Set CLI verbosity. :param to_json_auto_dir: Optional directory passed via `--to-json-auto` option; answers will be written there. """ self.to_json_auto_dir = to_json_auto_dir self.invalidate_cache = invalidate_cache settings_path = Path.home() / ".archive-agent-settings" self.cli = CliManager(verbose=verbose) # Initialize ProgressManager with console access self.progress_manager = ProgressManager(self.cli.console) self.profile_manager = ProfileManager( cli=self.cli, settings_path=settings_path, profile_name=profile_name, ) self.config = ConfigManager( cli=self.cli, settings_path=settings_path, profile_name=self.profile_manager.get_profile_name(), ) self.watchlist = WatchlistManager( cli=self.cli, settings_path=settings_path, profile_name=self.profile_manager.get_profile_name(), ) self.ai_cache = CacheManager( cli=self.cli, cache_path=settings_path / self.profile_manager.get_profile_name() / "ai_cache", invalidate_cache=self.invalidate_cache, verbose=verbose, ) self.ai_factory = AiManagerFactory( cli=self.cli, chunk_lines_block=self.config.data[self.config.CHUNK_LINES_BLOCK], chunk_words_target=self.config.data[self.config.CHUNK_WORDS_TARGET], ai_provider_class=self._get_ai_provider_class(), ai_cache=self.ai_cache, ai_provider_params=self._get_ai_provider_params(), invalidate_cache=self.invalidate_cache, server_url=self.config.data[self.config.AI_SERVER_URL], ) self.qdrant = QdrantManager( cli=self.cli, ai_factory=self.ai_factory, server_url=self.config.data[self.config.QDRANT_SERVER_URL], collection=self.config.data[self.config.QDRANT_COLLECTION], vector_size=self.config.data[self.config.AI_VECTOR_SIZE], retrieve_score_min=self.config.data[self.config.RETRIEVE_SCORE_MIN], retrieve_chunks_max=self.config.data[self.config.RETRIEVE_CHUNKS_MAX], rerank_chunks_max=self.config.data[self.config.RERANK_CHUNKS_MAX], expand_chunks_radius=self.config.data[self.config.EXPAND_CHUNKS_RADIUS], ) self.decoder_settings = DecoderSettings( cli=self.cli, ocr_strategy=OcrStrategy(self.config.data[self.config.OCR_STRATEGY]), ocr_auto_threshold=self.config.data[self.config.OCR_AUTO_THRESHOLD], image_ocr=str(self.config.data[self.config.IMAGE_OCR]).lower().strip() == "true", image_entity_extract=str(self.config.data[self.config.IMAGE_ENTITY_EXTRACT]).lower().strip() == "true", ) self.committer = CommitManager( cli=self.cli, watchlist=self.watchlist, ai_factory=self.ai_factory, decoder_settings=self.decoder_settings, qdrant=self.qdrant, progress_manager=self.progress_manager, max_workers_ingest=self.config.data[self.config.MAX_WORKERS_INGEST], max_workers_vision=self.config.data[self.config.MAX_WORKERS_VISION], max_workers_embed=self.config.data[self.config.MAX_WORKERS_EMBED], ) def _get_ai_provider_class(self) -> Type[AiProvider]: """ Get AI provider class from config. :return: AI provider class. """ ai_provider_name = self.config.data[self.config.AI_PROVIDER] if ai_provider_name not in ai_provider_registry: raise ValueError( f"Invalid AI provider: '{ai_provider_name}' (must be one of {ai_provider_registry.keys()})" ) ai_server_url = self.config.data[self.config.AI_SERVER_URL] self.cli.logger.info(f"Using AI provider: '{ai_provider_name}' @ {ai_server_url}") ai_provider_class = ai_provider_registry[ai_provider_name]["class"] return ai_provider_class def _get_ai_provider_params(self) -> AiProviderParams: """ Get AI provider params. :return: AI provider params. """ return AiProviderParams( model_chunk=self.config.data[self.config.AI_MODEL_CHUNK], model_embed=self.config.data[self.config.AI_MODEL_EMBED], model_rerank=self.config.data[self.config.AI_MODEL_RERANK], model_query=self.config.data[self.config.AI_MODEL_QUERY], model_vision=self.config.data[self.config.AI_MODEL_VISION], temperature_query=self.config.data[self.config.AI_TEMPERATURE_QUERY], ) def usage(self): """ Show AI token usage. """ self.cli.usage()

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/shredEngineer/Archive-Agent'

If you have feedback or need assistance with the MCP directory API, please join our Discord server