Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 12 additions & 2 deletions graphiti_core/llm_client/openai_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,8 +90,11 @@ async def _create_structured_completion(
request_kwargs['temperature'] = temperature_value

# Only include reasoning and verbosity parameters for reasoning models
# Map legacy reasoning effort values to current OpenAI API values
_REASONING_MAP = {'minimal': 'low', 'default': 'medium', 'maximum': 'high'}
if is_reasoning_model and reasoning is not None:
request_kwargs['reasoning'] = {'effort': reasoning} # type: ignore
effort = _REASONING_MAP.get(reasoning, reasoning)
request_kwargs['reasoning'] = {'effort': effort} # type: ignore

if is_reasoning_model and verbosity is not None:
request_kwargs['text'] = {'verbosity': verbosity} # type: ignore
Expand All @@ -116,10 +119,17 @@ async def _create_completion(
model.startswith('gpt-5') or model.startswith('o1') or model.startswith('o3')
)

# gpt-5 family requires max_completion_tokens instead of max_tokens
token_kwargs = (
{'max_completion_tokens': max_tokens}
if is_reasoning_model
else {'max_tokens': max_tokens}
)

return await self.client.chat.completions.create(
model=model,
messages=messages,
temperature=temperature if not is_reasoning_model else None,
max_tokens=max_tokens,
response_format={'type': 'json_object'},
**token_kwargs,
)
52 changes: 52 additions & 0 deletions mcp_server/config/config-second-brain-stdio.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
# Second Brain Graphiti MCP Server Configuration (stdio transport)

server:
transport: "stdio"

llm:
provider: "openai"
model: "gpt-5.4-mini"
max_tokens: 4096

providers:
openai:
api_key: ${OPENAI_API_KEY}
api_url: ${OPENAI_API_URL:https://api.openai.com/v1}

embedder:
provider: "openai"
model: "text-embedding-3-small"
dimensions: 1536

providers:
openai:
api_key: ${OPENAI_API_KEY}
api_url: ${OPENAI_API_URL:https://api.openai.com/v1}

database:
provider: "falkordb"
providers:
falkordb:
uri: ${FALKORDB_URI:redis://localhost:6379}
password: ${FALKORDB_PASSWORD:}
database: ${FALKORDB_DATABASE:second_brain}

graphiti:
group_id: ${GRAPHITI_GROUP_ID:second_brain}
episode_id_prefix: ${EPISODE_ID_PREFIX:}
user_id: ${USER_ID:chris}
entity_types:
- name: "Project"
description: "A project, program, client engagement, or initiative"
- name: "Person"
description: "A person — stakeholder, collaborator, contact, team member"
- name: "Organization"
description: "A company, agency, lab, partner org, or vendor"
- name: "Decision"
description: "A significant choice with rationale and impact"
- name: "Topic"
description: "A theme or subject that spans multiple projects"
- name: "Deliverable"
description: "A specific output — report, presentation, tool, workshop, strategy document"
- name: "Event"
description: "A time-bound activity — meeting, workshop, call, conference"
54 changes: 54 additions & 0 deletions mcp_server/config/config-second-brain.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
# Second Brain Graphiti MCP Server Configuration

server:
transport: "http"
host: "0.0.0.0"
port: 8000

llm:
provider: "openai"
model: "gpt-4o-mini"
max_tokens: 4096

providers:
openai:
api_key: ${OPENAI_API_KEY}
api_url: ${OPENAI_API_URL:https://api.openai.com/v1}

embedder:
provider: "openai"
model: "text-embedding-3-small"
dimensions: 1536

providers:
openai:
api_key: ${OPENAI_API_KEY}
api_url: ${OPENAI_API_URL:https://api.openai.com/v1}

database:
provider: "falkordb"
providers:
falkordb:
uri: ${FALKORDB_URI:redis://localhost:6379}
password: ${FALKORDB_PASSWORD:}
database: ${FALKORDB_DATABASE:second_brain}

graphiti:
group_id: ${GRAPHITI_GROUP_ID:second-brain}
episode_id_prefix: ${EPISODE_ID_PREFIX:}
user_id: ${USER_ID:chris}
entity_types:
- name: "Project"
description: "A project, program, client engagement, or initiative"
- name: "Person"
description: "A person — stakeholder, collaborator, contact, team member"
- name: "Organization"
description: "A company, agency, lab, partner org, or vendor"
- name: "Decision"
description: "A significant choice with rationale and impact"
- name: "Topic"
description: "A theme or subject that spans multiple projects"
- name: "Deliverable"
description: "A specific output — report, presentation, tool, workshop, strategy document"
- name: "Event"
description: "A time-bound activity — meeting, workshop, call, conference"
117 changes: 86 additions & 31 deletions mcp_server/src/graphiti_mcp_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,12 @@ async def initialize(self) -> None:
try:
embedder_client = EmbedderFactory.create(self.config.embedder)
except Exception as e:
logger.warning(f'Failed to create embedder client: {e}')
logger.error(f'Failed to create embedder client: {e}')
raise RuntimeError(
f'Embedder initialization failed: {e}. '
f'Search functionality requires a working embedder. '
f'Check your embedder configuration and API keys.'
) from e

# Get database configuration
db_config = DatabaseDriverFactory.create_config(self.config.database)
Expand All @@ -210,36 +215,56 @@ async def initialize(self) -> None:
self.entity_types = custom_types

# Initialize Graphiti client with appropriate driver
try:
if self.config.database.provider.lower() == 'falkordb':
# For FalkorDB, create a FalkorDriver instance directly
from graphiti_core.driver.falkordb_driver import FalkorDriver

falkor_driver = FalkorDriver(
host=db_config['host'],
port=db_config['port'],
password=db_config['password'],
database=db_config['database'],
)

self.client = Graphiti(
graph_driver=falkor_driver,
llm_client=llm_client,
embedder=embedder_client,
max_coroutines=self.semaphore_limit,
)
else:
# For Neo4j (default), use the original approach
self.client = Graphiti(
uri=db_config['uri'],
user=db_config['user'],
password=db_config['password'],
llm_client=llm_client,
embedder=embedder_client,
max_coroutines=self.semaphore_limit,
)
except Exception as db_error:
# Check for connection errors
# Retry with backoff -- FalkorDB may not be up yet after a system restart
import asyncio as _asyncio
max_retries = 5
retry_delay = 2 # seconds, doubles each attempt
last_error = None

for attempt in range(max_retries):
try:
if self.config.database.provider.lower() == 'falkordb':
# For FalkorDB, create a FalkorDriver instance directly
from graphiti_core.driver.falkordb_driver import FalkorDriver

falkor_driver = FalkorDriver(
host=db_config['host'],
port=db_config['port'],
password=db_config['password'],
database=db_config['database'],
)

self.client = Graphiti(
graph_driver=falkor_driver,
llm_client=llm_client,
embedder=embedder_client,
max_coroutines=self.semaphore_limit,
)
else:
# For Neo4j (default), use the original approach
self.client = Graphiti(
uri=db_config['uri'],
user=db_config['user'],
password=db_config['password'],
llm_client=llm_client,
embedder=embedder_client,
max_coroutines=self.semaphore_limit,
)
last_error = None
break # Connected successfully
except Exception as e:
last_error = e
error_msg = str(e).lower()
if 'connection refused' in error_msg or 'could not connect' in error_msg:
if attempt < max_retries - 1:
wait = retry_delay * (2 ** attempt)
logger.warning(f'Database not ready (attempt {attempt + 1}/{max_retries}), retrying in {wait}s...')
await _asyncio.sleep(wait)
continue
raise # Non-connection error, don't retry

if last_error is not None:
db_error = last_error
error_msg = str(db_error).lower()
if 'connection refused' in error_msg or 'could not connect' in error_msg:
db_provider = self.config.database.provider
Expand Down Expand Up @@ -305,6 +330,22 @@ async def initialize(self) -> None:
logger.info(f'Using database: {self.config.database.provider}')
logger.info(f'Using group_id: {self.config.graphiti.group_id}')

# Verify search pipeline works end-to-end
if embedder_client:
try:
test_vector = await embedder_client.create(input_data=['health check'])
if test_vector and len(test_vector) > 0:
logger.info(
f'Search health check passed: embedder returned {len(test_vector)}-dim vector'
)
else:
logger.warning(
'Search health check: embedder returned empty vector. '
'Search results may be incomplete.'
)
except Exception as e:
logger.warning(f'Search health check failed: {e}. Search may not work correctly.')

except Exception as e:
logger.error(f'Failed to initialize Graphiti client: {e}')
raise
Expand Down Expand Up @@ -441,6 +482,13 @@ async def search_nodes(
node_labels=entity_types,
)

# Verify embedder is available for hybrid search
if client.embedder is None:
return ErrorResponse(
error='Search unavailable: embedder not initialized. '
'Restart the server and check embedder configuration.'
)

# Use the search_ method with node search config
from graphiti_core.search.search_config_recipes import NODE_HYBRID_SEARCH_RRF

Expand Down Expand Up @@ -520,6 +568,13 @@ async def search_memory_facts(
else []
)

# Verify embedder is available for search
if client.embedder is None:
return ErrorResponse(
error='Search unavailable: embedder not initialized. '
'Restart the server and check embedder configuration.'
)

relevant_edges = await client.search(
group_ids=effective_group_ids,
query=query,
Expand Down
Loading