diff --git a/mcp_server/docker/Dockerfile.standalone b/mcp_server/docker/Dockerfile.standalone index d5dbac81f..e955ba117 100644 --- a/mcp_server/docker/Dockerfile.standalone +++ b/mcp_server/docker/Dockerfile.standalone @@ -46,6 +46,14 @@ RUN sed -i '/\[tool\.uv\.sources\]/,/graphiti-core/d' pyproject.toml && \ RUN --mount=type=cache,target=/root/.cache/uv \ uv sync --no-group dev +# Patch graphiti-core: reasoning.effort 'minimal' is not supported by newer +# OpenAI models (gpt-5.4-mini etc). Change default to 'low'. +# See: https://github.com/getzep/graphiti/issues/902 +RUN sed -i "s/DEFAULT_REASONING = 'minimal'/DEFAULT_REASONING = 'low'/" \ + .venv/lib/python*/site-packages/graphiti_core/llm_client/openai_base_client.py && \ + find .venv/lib/python*/site-packages/graphiti_core/ -name '__pycache__' -type d -exec rm -rf {} + 2>/dev/null; \ + python -m compileall .venv/lib/python*/site-packages/graphiti_core/ -q || true + # Store graphiti-core version RUN echo "${GRAPHITI_CORE_VERSION}" > /app/mcp/.graphiti-core-version diff --git a/mcp_server/src/services/factories.py b/mcp_server/src/services/factories.py index 1348ac32e..6a6bd1e33 100644 --- a/mcp_server/src/services/factories.py +++ b/mcp_server/src/services/factories.py @@ -133,7 +133,7 @@ def create(config: LLMConfig) -> LLMClient: # Only pass reasoning/verbosity parameters for reasoning models (gpt-5 family) if is_reasoning_model: - return OpenAIClient(config=llm_config, reasoning='minimal', verbosity='low') + return OpenAIClient(config=llm_config, reasoning='low', verbosity='low') else: # For non-reasoning models, explicitly pass None to disable these parameters return OpenAIClient(config=llm_config, reasoning=None, verbosity=None)