updated the repo to use host networking as the default appliance deployment model

This commit is contained in:
Jake Kasper
2026-04-24 11:24:58 -04:00
parent a0e77aabd6
commit a8cf68b6d3
6 changed files with 45 additions and 12 deletions

Binary file not shown.

View File

@@ -1,6 +1,7 @@
import os
# Prometheus — the HPE P5G stack uses /prometheus as base path
# Defaults assume the appliance-style deployment model where Marvis runs with
# host networking and talks to sibling services over host loopback.
PROMETHEUS_URL = os.getenv("MARVIS_PROMETHEUS_URL", "http://127.0.0.1:9090")
PROMETHEUS_PREFIX = os.getenv("MARVIS_PROMETHEUS_PREFIX", "/prometheus")
ALERTMANAGER_URL = os.getenv("MARVIS_ALERTMANAGER_URL", "http://127.0.0.1:9093")
@@ -11,7 +12,7 @@ OPENAI_API_KEY = os.getenv("MARVIS_OPENAI_API_KEY", "")
OPENAI_MODEL = os.getenv("MARVIS_OPENAI_MODEL", "gpt-4o-mini")
# Override to use any OpenAI-compatible local LLM (llama.cpp, vLLM, LM Studio, etc.)
OPENAI_BASE_URL = os.getenv("MARVIS_OPENAI_BASE_URL", "https://api.openai.com")
OLLAMA_URL = os.getenv("MARVIS_OLLAMA_URL", "http://localhost:11434")
OLLAMA_URL = os.getenv("MARVIS_OLLAMA_URL", "http://127.0.0.1:11434")
OLLAMA_MODEL = os.getenv("MARVIS_OLLAMA_MODEL", "llama3")
# Container/runtime integration