Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 19 additions & 1 deletion graphiti_core/llm_client/openai_base_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,14 @@

DEFAULT_MODEL = 'gpt-4.1-mini'
DEFAULT_SMALL_MODEL = 'gpt-4.1-nano'
DEFAULT_REASONING = 'minimal'
DEFAULT_REASONING = 'low'
DEFAULT_VERBOSITY = 'low'
VALID_REASONING_VALUES: frozenset[str | None] = frozenset(
{None, 'none', 'low', 'medium', 'high', 'xhigh', 'minimal', 'intense'}
)
VALID_VERBOSITY_VALUES: frozenset[str | None] = frozenset(
{None, 'low', 'medium', 'high'}
)


class BaseOpenAIClient(LLMClient):
Expand All @@ -59,6 +65,18 @@ def __init__(
if cache:
raise NotImplementedError('Caching is not implemented for OpenAI-based clients')

if reasoning not in VALID_REASONING_VALUES:
raise ValueError(
f'Invalid reasoning value: {reasoning!r}. '
f'Must be one of {sorted(v for v in VALID_REASONING_VALUES if v is not None)}'
)

if verbosity not in VALID_VERBOSITY_VALUES:
raise ValueError(
f'Invalid verbosity value: {verbosity!r}. '
f'Must be one of {sorted(v for v in VALID_VERBOSITY_VALUES if v is not None)}'
)

if config is None:
config = LLMConfig()

Expand Down
50 changes: 45 additions & 5 deletions tests/llm_client/test_azure_openai_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ async def test_structured_completion_strips_reasoning_for_unsupported_models():
client = AzureOpenAILLMClient(
azure_client=dummy_client,
config=LLMConfig(),
reasoning='minimal',
reasoning='low',
verbosity='low',
)

Expand All @@ -72,7 +72,7 @@ async def test_structured_completion_strips_reasoning_for_unsupported_models():
temperature=0.4,
max_tokens=64,
response_model=DummyResponseModel,
reasoning='minimal',
reasoning='low',
verbosity='low',
)

Expand All @@ -96,7 +96,7 @@ async def test_reasoning_fields_forwarded_for_supported_models():
client = AzureOpenAILLMClient(
azure_client=dummy_client,
config=LLMConfig(),
reasoning='intense',
reasoning='high',
verbosity='high',
)

Expand All @@ -106,13 +106,13 @@ async def test_reasoning_fields_forwarded_for_supported_models():
temperature=0.7,
max_tokens=128,
response_model=DummyResponseModel,
reasoning='intense',
reasoning='high',
verbosity='high',
)

call_args = dummy_client.responses.parse_calls[0]
assert 'temperature' not in call_args
assert call_args['reasoning'] == {'effort': 'intense'}
assert call_args['reasoning'] == {'effort': 'high'}
assert call_args['text'] == {'verbosity': 'high'}

await client._create_completion(
Expand All @@ -124,3 +124,43 @@ async def test_reasoning_fields_forwarded_for_supported_models():

create_args = dummy_client.chat.completions.create_calls[0]
assert 'temperature' not in create_args


@pytest.mark.parametrize('invalid_reasoning', ['invalid', '', 'NONE'])
def test_invalid_reasoning_raises_error(invalid_reasoning):
with pytest.raises(ValueError, match='Invalid reasoning value'):
AzureOpenAILLMClient(
azure_client=DummyAzureClient(),
config=LLMConfig(),
reasoning=invalid_reasoning,
)


@pytest.mark.parametrize('invalid_verbosity', ['minimal', 'invalid', '', 'LOW'])
def test_invalid_verbosity_raises_error(invalid_verbosity):
with pytest.raises(ValueError, match='Invalid verbosity value'):
AzureOpenAILLMClient(
azure_client=DummyAzureClient(),
config=LLMConfig(),
verbosity=invalid_verbosity,
)


@pytest.mark.parametrize('valid_reasoning', [None, 'none', 'low', 'medium', 'high', 'xhigh'])
def test_valid_reasoning_accepted(valid_reasoning):
client = AzureOpenAILLMClient(
azure_client=DummyAzureClient(),
config=LLMConfig(),
reasoning=valid_reasoning,
)
assert client.reasoning == valid_reasoning


@pytest.mark.parametrize('valid_verbosity', [None, 'low', 'medium', 'high'])
def test_valid_verbosity_accepted(valid_verbosity):
client = AzureOpenAILLMClient(
azure_client=DummyAzureClient(),
config=LLMConfig(),
verbosity=valid_verbosity,
)
assert client.verbosity == valid_verbosity
Loading