import os # Prometheus — the HPE P5G stack uses /prometheus as base path PROMETHEUS_URL = os.getenv("MARVIS_PROMETHEUS_URL", "http://127.0.0.1:9090") PROMETHEUS_PREFIX = os.getenv("MARVIS_PROMETHEUS_PREFIX", "/prometheus") ALERTMANAGER_URL = os.getenv("MARVIS_ALERTMANAGER_URL", "http://127.0.0.1:9093") # AI backend: "rule" | "openai" | "ollama" AI_MODE = os.getenv("MARVIS_AI_MODE", "rule") OPENAI_API_KEY = os.getenv("MARVIS_OPENAI_API_KEY", "") OPENAI_MODEL = os.getenv("MARVIS_OPENAI_MODEL", "gpt-4o-mini") # Override to use any OpenAI-compatible local LLM (llama.cpp, vLLM, LM Studio, etc.) OPENAI_BASE_URL = os.getenv("MARVIS_OPENAI_BASE_URL", "https://api.openai.com") OLLAMA_URL = os.getenv("MARVIS_OLLAMA_URL", "http://localhost:11434") OLLAMA_MODEL = os.getenv("MARVIS_OLLAMA_MODEL", "llama3") # Maps Prometheus target_type label → display name TARGET_TYPE_MAP = { "amf": "AMF", "smf": "SMF", "upf": "UPF", "udm": "UDM", "udr": "UDR", "nrf": "NRF", "ausf": "AUSF", "pcf": "PCF", "mme": "MME", "sgwc": "SGWC", "dra": "DRA", "dsm": "DSM", "ncm": "NCM", "pls": "PLS", } ALL_NFS = ["AMF", "SMF", "UPF", "UDM", "UDR", "NRF", "AUSF", "PCF", "MME", "SGWC", "DRA", "DSM"]