Skip to content

feat: Add LM Studio support and improve configuration handling #2890

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 7 additions & 5 deletions openmemory/api/app/routers/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ class LLMConfig(BaseModel):
max_tokens: int = Field(..., description="Maximum tokens to generate")
api_key: Optional[str] = Field(None, description="API key or 'env:API_KEY' to use environment variable")
ollama_base_url: Optional[str] = Field(None, description="Base URL for Ollama server (e.g., http://host.docker.internal:11434)")
lmstudio_base_url: Optional[str] = Field(None, description="Base URL for LM Studio server (e.g., http://host.docker.internal:1234)")

class LLMProvider(BaseModel):
provider: str = Field(..., description="LLM provider name")
Expand All @@ -25,6 +26,7 @@ class EmbedderConfig(BaseModel):
model: str = Field(..., description="Embedder model name")
api_key: Optional[str] = Field(None, description="API key or 'env:API_KEY' to use environment variable")
ollama_base_url: Optional[str] = Field(None, description="Base URL for Ollama server (e.g., http://host.docker.internal:11434)")
lmstudio_base_url: Optional[str] = Field(None, description="Base URL for LM Studio server (e.g., http://host.docker.internal:1234)")

class EmbedderProvider(BaseModel):
provider: str = Field(..., description="Embedder provider name")
Expand Down Expand Up @@ -140,10 +142,10 @@ async def update_configuration(config: ConfigSchema, db: Session = Depends(get_d
if config.openmemory is not None:
if "openmemory" not in updated_config:
updated_config["openmemory"] = {}
updated_config["openmemory"].update(config.openmemory.dict(exclude_none=True))
updated_config["openmemory"].update(config.openmemory.model_dump(exclude_none=True))

# Update mem0 settings
updated_config["mem0"] = config.mem0.dict(exclude_none=True)
updated_config["mem0"] = config.mem0.model_dump(exclude_none=True)

# Save the configuration to database
save_config_to_db(db, updated_config)
Expand Down Expand Up @@ -184,7 +186,7 @@ async def update_llm_configuration(llm_config: LLMProvider, db: Session = Depend
current_config["mem0"] = {}

# Update the LLM configuration
current_config["mem0"]["llm"] = llm_config.dict(exclude_none=True)
current_config["mem0"]["llm"] = llm_config.model_dump(exclude_none=True)

# Save the configuration to database
save_config_to_db(db, current_config)
Expand All @@ -208,7 +210,7 @@ async def update_embedder_configuration(embedder_config: EmbedderProvider, db: S
current_config["mem0"] = {}

# Update the Embedder configuration
current_config["mem0"]["embedder"] = embedder_config.dict(exclude_none=True)
current_config["mem0"]["embedder"] = embedder_config.model_dump(exclude_none=True)

# Save the configuration to database
save_config_to_db(db, current_config)
Expand All @@ -232,7 +234,7 @@ async def update_openmemory_configuration(openmemory_config: OpenMemoryConfig, d
current_config["openmemory"] = {}

# Update the OpenMemory configuration
current_config["openmemory"].update(openmemory_config.dict(exclude_none=True))
current_config["openmemory"].update(openmemory_config.model_dump(exclude_none=True))

# Save the configuration to database
save_config_to_db(db, current_config)
Expand Down
13 changes: 11 additions & 2 deletions openmemory/api/app/utils/categorization.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import json
import logging
import os

from openai import OpenAI
from typing import List
Expand All @@ -9,8 +10,16 @@
from app.utils.prompts import MEMORY_CATEGORIZATION_PROMPT

load_dotenv()
LM_STUDIO_API_KEY = os.getenv("LM_STUDIO_API_KEY")
LM_STUDIO_BASE_URL = os.getenv("LM_STUDIO_BASE_URL")
LM_STUDIO_MODEL = os.getenv("LM_STUDIO_MODEL")

openai_client = OpenAI()
if LM_STUDIO_API_KEY and LM_STUDIO_BASE_URL:
openai_client = OpenAI(base_url=LM_STUDIO_BASE_URL,api_key=LM_STUDIO_API_KEY)
model = LM_STUDIO_MODEL
else:
openai_client = OpenAI()
model = "gpt-4o-mini"


class MemoryCategories(BaseModel):
Expand All @@ -22,7 +31,7 @@ def get_categories_for_memory(memory: str) -> List[str]:
"""Get categories for a memory."""
try:
response = openai_client.responses.parse(
model="gpt-4o-mini",
model=model,
instructions=MEMORY_CATEGORIZATION_PROMPT,
input=memory,
temperature=0,
Expand Down
49 changes: 34 additions & 15 deletions openmemory/api/app/utils/memory.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
import hashlib
import socket
import platform
import urllib.parse

from mem0 import Memory
from app.database import SessionLocal
Expand Down Expand Up @@ -103,27 +104,41 @@ def _get_docker_host_url():
def _fix_ollama_urls(config_section):
"""
Fix Ollama URLs for Docker environment.
Replaces localhost URLs with appropriate Docker host URLs.
Sets default ollama_base_url if not provided.
Only set a default ollama_base_url if not provided. If user provides a value, always use it as-is.
"""
if not config_section or "config" not in config_section:
return config_section

ollama_config = config_section["config"]

# Set default ollama_base_url if not provided
if "ollama_base_url" not in ollama_config:
ollama_config["ollama_base_url"] = "http://host.docker.internal:11434"
else:
# Check for ollama_base_url and fix if it's localhost
url = ollama_config["ollama_base_url"]
if "localhost" in url or "127.0.0.1" in url:
docker_host = _get_docker_host_url()
if docker_host != "localhost":
new_url = url.replace("localhost", docker_host).replace("127.0.0.1", docker_host)
ollama_config["ollama_base_url"] = new_url
print(f"Adjusted Ollama URL from {url} to {new_url}")
if "ollama_base_url" not in ollama_config or not ollama_config["ollama_base_url"]:
is_docker = os.path.exists('/.dockerenv')
default_url = "http://host.docker.internal:11434" if is_docker else "http://localhost:11434"
ollama_config["ollama_base_url"] = default_url
print(f"Ollama: No base URL provided. Set default to {default_url}")
# else: always use user-supplied value as-is
return config_section


def _fix_lmstudio_urls(config_section):
"""
Fix LM Studio URLs for Docker environment.
Only set a default lmstudio_base_url if not provided. If user provides a value, always use it as-is.
"""
if not config_section:
return config_section

if "config" not in config_section or config_section["config"] is None:
config_section["config"] = {}
lmstudio_config = config_section["config"]

if "lmstudio_base_url" not in lmstudio_config or not lmstudio_config["lmstudio_base_url"]:
is_docker = os.path.exists('/.dockerenv')
default_url = "http://host.docker.internal:1234/v1" if is_docker else "http://localhost:1234/v1"
lmstudio_config["lmstudio_base_url"] = default_url
print(f"LM Studio: No base URL provided. Set default to {default_url}")
# else: always use user-supplied value as-is
return config_section


Expand Down Expand Up @@ -235,6 +250,8 @@ def get_memory_client(custom_instructions: str = None):
# Fix Ollama URLs for Docker if needed
if config["llm"].get("provider") == "ollama":
config["llm"] = _fix_ollama_urls(config["llm"])
elif config["llm"].get("provider") == "lmstudio":
config["llm"] = _fix_lmstudio_urls(config["llm"])

# Update Embedder configuration if available
if "embedder" in mem0_config and mem0_config["embedder"] is not None:
Expand All @@ -243,6 +260,8 @@ def get_memory_client(custom_instructions: str = None):
# Fix Ollama URLs for Docker if needed
if config["embedder"].get("provider") == "ollama":
config["embedder"] = _fix_ollama_urls(config["embedder"])
elif config["embedder"].get("provider") == "lmstudio":
config["embedder"] = _fix_lmstudio_urls(config["embedder"])
else:
print("No configuration found in database, using defaults")

Expand Down
85 changes: 52 additions & 33 deletions openmemory/ui/components/form-view.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -94,36 +94,37 @@ export function FormView({ settings, onChange }: FormViewProps) {
const isLlmOllama = settings.mem0?.llm?.provider?.toLowerCase() === "ollama"
const isEmbedderOllama = settings.mem0?.embedder?.provider?.toLowerCase() === "ollama"

const LLM_PROVIDERS = [
"OpenAI",
"Anthropic",
"Azure OpenAI",
"Ollama",
"Together",
"Groq",
"Litellm",
"Mistral AI",
"Google AI",
"AWS Bedrock",
"Gemini",
"DeepSeek",
"xAI",
"LM Studio",
"LangChain",
]
const LLM_PROVIDERS = {
"OpenAI": "openai",
"Anthropic": "anthropic",
"Azure OpenAI": "azure_openai",
"Ollama": "ollama",
"Together": "together",
"Groq": "groq",
"Litellm": "litellm",
"Mistral AI": "litellm",
"Google AI": "litellm",
"Gemini": "gemini",
"DeepSeek": "deepseek",
"Sarvam": "sarvam",
"LM Studio": "lmstudio",
"LangChain": "langchain",
"AWS Bedrock": "aws_bedrock",
}


const EMBEDDER_PROVIDERS = [
"OpenAI",
"Azure OpenAI",
"Ollama",
"Hugging Face",
"Vertexai",
"Gemini",
"Lmstudio",
"Together",
"LangChain",
"AWS Bedrock",
]
const EMBEDDER_PROVIDERS = {
"OpenAI": "openai",
"Azure OpenAI": "azure_openai",
"Ollama": "ollama",
"Hugging Face": "huggingface",
"Vertexai": "vertexai",
"Gemini": "gemini",
"Lmstudio": "lmstudio",
"Together": "together",
"LangChain": "langchain",
"AWS Bedrock": "aws_bedrock",
}

return (
<div className="space-y-8">
Expand Down Expand Up @@ -167,8 +168,8 @@ export function FormView({ settings, onChange }: FormViewProps) {
<SelectValue placeholder="Select a provider" />
</SelectTrigger>
<SelectContent>
{LLM_PROVIDERS.map((provider) => (
<SelectItem key={provider} value={provider.toLowerCase()}>
{Object.entries(LLM_PROVIDERS).map(([provider, value]) => (
<SelectItem key={value} value={value}>
{provider}
</SelectItem>
))}
Expand Down Expand Up @@ -259,6 +260,24 @@ export function FormView({ settings, onChange }: FormViewProps) {
onChange={(e) => handleLlmConfigChange("max_tokens", Number.parseInt(e.target.value) || "")}
/>
</div>
<div className="space-y-2">
<Label htmlFor="lmstudio-base-url">LM Studio Base URL</Label>
<Input
id="lmstudio-base-url"
placeholder="http://localhost:1234/v1"
value={settings.mem0?.llm?.config?.lmstudio_base_url || ""}
onChange={(e) => handleLlmConfigChange("lmstudio_base_url", e.target.value)}
/>
</div>
<div className="space-y-2">
<Label htmlFor="lmstudio-base-url">LM Studio Base URL</Label>
<Input
id="ollama-base-url"
placeholder="http://localhost:11434"
value={settings.mem0?.llm?.config?.ollama_base_url || ""}
onChange={(e) => handleLlmConfigChange("ollama_base_url", e.target.value)}
/>
</div>
</div>
)}
</CardContent>
Expand All @@ -281,8 +300,8 @@ export function FormView({ settings, onChange }: FormViewProps) {
<SelectValue placeholder="Select a provider" />
</SelectTrigger>
<SelectContent>
{EMBEDDER_PROVIDERS.map((provider) => (
<SelectItem key={provider} value={provider.toLowerCase()}>
{Object.entries(EMBEDDER_PROVIDERS).map(([provider, value]) => (
<SelectItem key={value} value={value}>
{provider}
</SelectItem>
))}
Expand Down
6 changes: 6 additions & 0 deletions openmemory/ui/store/configSlice.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ export interface LLMConfig {
max_tokens: number;
api_key?: string;
ollama_base_url?: string;
lmstudio_base_url?: string;
}

export interface LLMProvider {
Expand All @@ -17,6 +18,7 @@ export interface EmbedderConfig {
model: string;
api_key?: string;
ollama_base_url?: string;
lmstudio_base_url?: string;
}

export interface EmbedderProvider {
Expand Down Expand Up @@ -52,13 +54,17 @@ const initialState: ConfigState = {
temperature: 0.1,
max_tokens: 2000,
api_key: 'env:OPENAI_API_KEY',
lmstudio_base_url: 'http://localhost:1234/v1',
ollama_base_url: 'http://localhost:11434',
},
},
embedder: {
provider: 'openai',
config: {
model: 'text-embedding-3-small',
api_key: 'env:OPENAI_API_KEY',
lmstudio_base_url: 'http://localhost:1234/v1',
ollama_base_url: 'http://localhost:11434',
},
},
},
Expand Down