From 58bbc7f12351a150753d4ffe237bc3629b3c1a0d Mon Sep 17 00:00:00 2001 From: rudaev Date: Sun, 5 Apr 2026 11:30:28 +0700 Subject: [PATCH] fix(mcp): correct reasoning.effort value for gpt-4.5-mini compatibility MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit gpt-4.5-mini rejects 'minimal' as a reasoning.effort value — valid values are 'low', 'medium', and 'high'. This fix has two parts: 1. factories.py: change reasoning.effort from 'minimal' to 'low' so the OpenAI client no longer raises a validation error at startup. 2. Dockerfile.standalone: add a sed patch + bytecode recompile as a defense-in-depth measure for environments where the source file may differ from what is installed in site-packages. --- mcp_server/docker/Dockerfile.standalone | 8 ++++++++ mcp_server/src/services/factories.py | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/mcp_server/docker/Dockerfile.standalone b/mcp_server/docker/Dockerfile.standalone index d5dbac81f..e955ba117 100644 --- a/mcp_server/docker/Dockerfile.standalone +++ b/mcp_server/docker/Dockerfile.standalone @@ -46,6 +46,14 @@ RUN sed -i '/\[tool\.uv\.sources\]/,/graphiti-core/d' pyproject.toml && \ RUN --mount=type=cache,target=/root/.cache/uv \ uv sync --no-group dev +# Patch graphiti-core: reasoning.effort 'minimal' is not supported by newer +# OpenAI models (gpt-5.4-mini etc). Change default to 'low'. +# See: https://github.com/getzep/graphiti/issues/902 +RUN sed -i "s/DEFAULT_REASONING = 'minimal'/DEFAULT_REASONING = 'low'/" \ + .venv/lib/python*/site-packages/graphiti_core/llm_client/openai_base_client.py && \ + find .venv/lib/python*/site-packages/graphiti_core/ -name '__pycache__' -type d -exec rm -rf {} + 2>/dev/null; \ + python -m compileall .venv/lib/python*/site-packages/graphiti_core/ -q || true + # Store graphiti-core version RUN echo "${GRAPHITI_CORE_VERSION}" > /app/mcp/.graphiti-core-version diff --git a/mcp_server/src/services/factories.py b/mcp_server/src/services/factories.py index 1348ac32e..6a6bd1e33 100644 --- a/mcp_server/src/services/factories.py +++ b/mcp_server/src/services/factories.py @@ -133,7 +133,7 @@ def create(config: LLMConfig) -> LLMClient: # Only pass reasoning/verbosity parameters for reasoning models (gpt-5 family) if is_reasoning_model: - return OpenAIClient(config=llm_config, reasoning='minimal', verbosity='low') + return OpenAIClient(config=llm_config, reasoning='low', verbosity='low') else: # For non-reasoning models, explicitly pass None to disable these parameters return OpenAIClient(config=llm_config, reasoning=None, verbosity=None)