diff --git a/archon-ui-main/src/components/settings/OllamaConfigurationPanel.tsx b/archon-ui-main/src/components/settings/OllamaConfigurationPanel.tsx index 55f2519..965ead9 100644 --- a/archon-ui-main/src/components/settings/OllamaConfigurationPanel.tsx +++ b/archon-ui-main/src/components/settings/OllamaConfigurationPanel.tsx @@ -595,7 +595,7 @@ const OllamaConfigurationPanel: React.FC = ({ value={tempUrls[instance.id] !== undefined ? tempUrls[instance.id] : instance.baseUrl} onChange={(e) => handleUrlChange(instance.id, e.target.value)} onBlur={() => handleUrlBlur(instance.id)} - placeholder="http://localhost:11434" + placeholder="http://host.docker.internal:11434" className={cn( "text-sm", tempUrls[instance.id] !== undefined && tempUrls[instance.id] !== instance.baseUrl diff --git a/archon-ui-main/src/components/settings/RAGSettings.tsx b/archon-ui-main/src/components/settings/RAGSettings.tsx index 83766b6..8cb721a 100644 --- a/archon-ui-main/src/components/settings/RAGSettings.tsx +++ b/archon-ui-main/src/components/settings/RAGSettings.tsx @@ -61,11 +61,11 @@ export const RAGSettings = ({ // Instance configurations const [llmInstanceConfig, setLLMInstanceConfig] = useState({ name: '', - url: ragSettings.LLM_BASE_URL || 'http://localhost:11434/v1' + url: ragSettings.LLM_BASE_URL || 'http://host.docker.internal:11434/v1' }); const [embeddingInstanceConfig, setEmbeddingInstanceConfig] = useState({ name: '', - url: ragSettings.OLLAMA_EMBEDDING_URL || 'http://localhost:11434/v1' + url: ragSettings.OLLAMA_EMBEDDING_URL || 'http://host.docker.internal:11434/v1' }); // Update instance configs when ragSettings change (after loading from database) @@ -932,7 +932,7 @@ export const RAGSettings = ({ className="text-green-400 border-green-400 mb-1" onClick={() => { // Quick setup: configure both instances with default values - const defaultUrl = 'http://localhost:11434/v1'; + const defaultUrl = 'http://host.docker.internal:11434/v1'; const defaultName = 'Default Ollama'; setLLMInstanceConfig({ name: defaultName, url: defaultUrl }); setEmbeddingInstanceConfig({ name: defaultName, url: defaultUrl }); @@ -1680,7 +1680,7 @@ export const RAGSettings = ({ }); } }} - placeholder="http://localhost:11434/v1" + placeholder="http://host.docker.internal:11434/v1" /> {/* Convenience checkbox for single host setup */} @@ -1753,7 +1753,7 @@ export const RAGSettings = ({ label="Instance URL" value={embeddingInstanceConfig.url} onChange={(e) => setEmbeddingInstanceConfig({...embeddingInstanceConfig, url: e.target.value})} - placeholder="http://localhost:11434/v1" + placeholder="http://host.docker.internal:11434/v1" /> diff --git a/docs/docs/rag.mdx b/docs/docs/rag.mdx index a77167c..82866e2 100644 --- a/docs/docs/rag.mdx +++ b/docs/docs/rag.mdx @@ -320,7 +320,7 @@ EMBEDDING_MODEL=text-embedding-004 ### Ollama (Local/Private) ```bash LLM_PROVIDER=ollama -LLM_BASE_URL=http://localhost:11434/v1 +LLM_BASE_URL=http://host.docker.internal:11434/v1 MODEL_CHOICE=llama2 EMBEDDING_MODEL=nomic-embed-text # Pros: Privacy, no API costs diff --git a/migration/complete_setup.sql b/migration/complete_setup.sql index bd9ebd8..322e0b2 100644 --- a/migration/complete_setup.sql +++ b/migration/complete_setup.sql @@ -94,7 +94,7 @@ INSERT INTO archon_settings (key, encrypted_value, is_encrypted, category, descr -- LLM Provider configuration settings INSERT INTO archon_settings (key, value, is_encrypted, category, description) VALUES ('LLM_PROVIDER', 'openai', false, 'rag_strategy', 'LLM provider to use: openai, ollama, or google'), -('LLM_BASE_URL', NULL, false, 'rag_strategy', 'Custom base URL for LLM provider (mainly for Ollama, e.g., http://localhost:11434/v1)'), +('LLM_BASE_URL', NULL, false, 'rag_strategy', 'Custom base URL for LLM provider (mainly for Ollama, e.g., http://host.docker.internal:11434/v1)'), ('EMBEDDING_MODEL', 'text-embedding-3-small', false, 'rag_strategy', 'Embedding model for vector search and similarity matching (required for all embedding operations)') ON CONFLICT (key) DO NOTHING; diff --git a/python/src/server/services/credential_service.py b/python/src/server/services/credential_service.py index a57c1ab..e72ca8a 100644 --- a/python/src/server/services/credential_service.py +++ b/python/src/server/services/credential_service.py @@ -475,7 +475,7 @@ class CredentialService: def _get_provider_base_url(self, provider: str, rag_settings: dict) -> str | None: """Get base URL for provider.""" if provider == "ollama": - return rag_settings.get("LLM_BASE_URL", "http://localhost:11434/v1") + return rag_settings.get("LLM_BASE_URL", "http://host.docker.internal:11434/v1") elif provider == "google": return "https://generativelanguage.googleapis.com/v1beta/openai/" return None # Use default for OpenAI diff --git a/python/src/server/services/llm_provider_service.py b/python/src/server/services/llm_provider_service.py index f04f074..10655b2 100644 --- a/python/src/server/services/llm_provider_service.py +++ b/python/src/server/services/llm_provider_service.py @@ -203,7 +203,7 @@ async def _get_optimal_ollama_instance(instance_type: str | None = None, return embedding_url if embedding_url.endswith('/v1') else f"{embedding_url}/v1" # Default to LLM base URL for chat operations - fallback_url = rag_settings.get("LLM_BASE_URL", "http://localhost:11434") + fallback_url = rag_settings.get("LLM_BASE_URL", "http://host.docker.internal:11434") return fallback_url if fallback_url.endswith('/v1') else f"{fallback_url}/v1" except Exception as e: @@ -211,11 +211,11 @@ async def _get_optimal_ollama_instance(instance_type: str | None = None, # Final fallback to localhost only if we can't get RAG settings try: rag_settings = await credential_service.get_credentials_by_category("rag_strategy") - fallback_url = rag_settings.get("LLM_BASE_URL", "http://localhost:11434") + fallback_url = rag_settings.get("LLM_BASE_URL", "http://host.docker.internal:11434") return fallback_url if fallback_url.endswith('/v1') else f"{fallback_url}/v1" except Exception as fallback_error: logger.error(f"Could not retrieve fallback configuration: {fallback_error}") - return "http://localhost:11434/v1" + return "http://host.docker.internal:11434/v1" async def get_embedding_model(provider: str | None = None) -> str: diff --git a/python/src/server/services/provider_discovery_service.py b/python/src/server/services/provider_discovery_service.py index e49341c..ccd811d 100644 --- a/python/src/server/services/provider_discovery_service.py +++ b/python/src/server/services/provider_discovery_service.py @@ -23,7 +23,7 @@ _provider_cache: dict[str, tuple[Any, float]] = {} _CACHE_TTL_SECONDS = 300 # 5 minutes # Default Ollama instance URL (configurable via environment/settings) -DEFAULT_OLLAMA_URL = "http://localhost:11434" +DEFAULT_OLLAMA_URL = "http://host.docker.internal:11434" # Model pattern detection for dynamic capabilities (no hardcoded model names) CHAT_MODEL_PATTERNS = ["llama", "qwen", "mistral", "codellama", "phi", "gemma", "vicuna", "orca"]