Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions mcp_server/docker/Dockerfile.standalone
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,14 @@ RUN sed -i '/\[tool\.uv\.sources\]/,/graphiti-core/d' pyproject.toml && \
RUN --mount=type=cache,target=/root/.cache/uv \
uv sync --no-group dev

# Patch graphiti-core: reasoning.effort 'minimal' is not supported by newer
# OpenAI models (gpt-5.4-mini etc). Change default to 'low'.
# See: https://github.com/getzep/graphiti/issues/902
RUN sed -i "s/DEFAULT_REASONING = 'minimal'/DEFAULT_REASONING = 'low'/" \
.venv/lib/python*/site-packages/graphiti_core/llm_client/openai_base_client.py && \
find .venv/lib/python*/site-packages/graphiti_core/ -name '__pycache__' -type d -exec rm -rf {} + 2>/dev/null; \
python -m compileall .venv/lib/python*/site-packages/graphiti_core/ -q || true

# Store graphiti-core version
RUN echo "${GRAPHITI_CORE_VERSION}" > /app/mcp/.graphiti-core-version

Expand Down
2 changes: 1 addition & 1 deletion mcp_server/src/services/factories.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ def create(config: LLMConfig) -> LLMClient:

# Only pass reasoning/verbosity parameters for reasoning models (gpt-5 family)
if is_reasoning_model:
return OpenAIClient(config=llm_config, reasoning='minimal', verbosity='low')
return OpenAIClient(config=llm_config, reasoning='low', verbosity='low')
else:
# For non-reasoning models, explicitly pass None to disable these parameters
return OpenAIClient(config=llm_config, reasoning=None, verbosity=None)
Expand Down
Loading