Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 4 additions & 0 deletions packages/botonic-plugin-ai-agents/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,10 @@ All notable changes to Botonic will be documented in this file.

</details>

## [0.47.2] - 2026-04-15

- [PR-3199](https://github.com/hubtype/botonic/pull/3199): Fix async calls to track LLM runs.

## [0.47.1] - 2026-03-31

- [PR-3196](https://github.com/hubtype/botonic/pull/3196): Disable global sdk tracing.
Expand Down
2 changes: 1 addition & 1 deletion packages/botonic-plugin-ai-agents/package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "@botonic/plugin-ai-agents",
"version": "0.47.1",
"version": "0.47.2",
"main": "./lib/cjs/index.js",
"module": "./lib/esm/index.js",
"description": "Use AI Agents to generate your contents",
Expand Down
8 changes: 4 additions & 4 deletions packages/botonic-plugin-ai-agents/src/guardrails/input.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@ import {
type UserMessageItem,
} from '@openai/agents'
import { z } from 'zod'
import { AZURE_OPENAI_API_VERSION, isProd, OPENAI_PROVIDER } from '../constants'
import type { LLMConfig } from '../llm-config'
import { isProd } from '../constants'
import { getApiVersion, type LLMConfig } from '../llm-config'
import { HubtypeApiClient } from '../services/hubtype-api-client'
import { TrackFeature, TrackProductName } from '../services/types'
import type { GuardrailRule, ResultRawResponse } from '../types'
Expand Down Expand Up @@ -53,7 +53,7 @@ export function createInputGuardrail(
const result = await runner.run(agent, [lastMessage], { context })
const endTime = Date.now()

void sendGuardrailLlmRunTracking(
await sendGuardrailLlmRunTracking(
result,
trackingContext,
llmConfig,
Expand Down Expand Up @@ -97,7 +97,7 @@ async function sendGuardrailLlmRunTracking(
const durationPerCall = Math.round(totalDuration / rawResponses.length)
const temperature =
(llmConfig.modelSettings.temperature as number | undefined) ?? 0
const apiVersion = OPENAI_PROVIDER === 'azure' ? AZURE_OPENAI_API_VERSION : ''
const apiVersion = getApiVersion()

const llmRuns = rawResponses.map(response => ({
inference_id: trackingContext.inferenceId,
Expand Down
8 changes: 8 additions & 0 deletions packages/botonic-plugin-ai-agents/src/llm-config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -94,3 +94,11 @@ export class LLMConfig {
throw new Error(`Unsupported model: ${model}`)
}
}

export function getApiVersion(): string {
// Return NOT_API_VERSION_FOR_OPENAI_PROVIDER if OPENAI_PROVIDER
// is not azure to avoid error when tracking in llm_runs endpoint
return OPENAI_PROVIDER === 'azure'
? AZURE_OPENAI_API_VERSION
: 'NOT_API_VERSION_FOR_OPENAI_PROVIDER'
}
9 changes: 4 additions & 5 deletions packages/botonic-plugin-ai-agents/src/runner.ts
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,9 @@ import {
RunToolCallItem,
RunToolCallOutputItem,
} from '@openai/agents'
import { AZURE_OPENAI_API_VERSION, isProd, OPENAI_PROVIDER } from './constants'
import { isProd, OPENAI_PROVIDER } from './constants'
import type { DebugLogger } from './debug-logger'
import type { LLMConfig } from './llm-config'
import { getApiVersion, type LLMConfig } from './llm-config'
import { HubtypeApiClient } from './services/hubtype-api-client'
import { TrackFeature, TrackProductName } from './services/types'
import { retrieveKnowledge } from './tools'
Expand Down Expand Up @@ -90,7 +90,7 @@ export class AIAgentRunner<

const endTime = Date.now()

void this.sendLlmRunTracking(result, context, startTime, endTime)
await this.sendLlmRunTracking(result, context, startTime, endTime)

const outputMessages = result.finalOutput?.messages || []
const hasExit =
Expand Down Expand Up @@ -158,8 +158,7 @@ export class AIAgentRunner<
const durationPerCall = Math.round(totalDuration / rawResponses.length)
const temperature =
(this.llmConfig.modelSettings.temperature as number | undefined) ?? 0
const apiVersion =
OPENAI_PROVIDER === 'azure' ? AZURE_OPENAI_API_VERSION : ''
const apiVersion = getApiVersion()

const llmRuns = rawResponses.map(response => ({
inference_id: this.inferenceId,
Expand Down
Loading