Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 17 additions & 2 deletions apps/client/src/services/llm_chat.ts
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ export async function getAvailableModels(): Promise<LlmModelInfo[]> {
export interface StreamCallbacks {
onChunk: (text: string) => void;
onThinking?: (text: string) => void;
onToolUse?: (toolName: string, input: Record<string, unknown>) => void;
onToolUse?: (toolName: string, input: Record<string, unknown>, requiresApproval?: boolean) => void;
onToolResult?: (toolName: string, result: string, isError?: boolean) => void;
onCitation?: (citation: LlmCitation) => void;
onUsage?: (usage: LlmUsage) => void;
Expand Down Expand Up @@ -76,7 +76,7 @@ export async function streamChatCompletion(
callbacks.onThinking?.(data.content);
break;
case "tool_use":
callbacks.onToolUse?.(data.toolName, data.toolInput);
callbacks.onToolUse?.(data.toolName, data.toolInput, data.requiresApproval);
// Yield to force Preact to commit the pending tool call
// state before we process the result.
await new Promise((r) => setTimeout(r, 1));
Expand Down Expand Up @@ -112,3 +112,18 @@ export async function streamChatCompletion(
reader.releaseLock();
}
}

/**
* Execute a mutating tool call after user approval.
*/
export async function executeToolCall(toolName: string, toolInput: Record<string, unknown>): Promise<{ result: string; isError?: boolean }> {
const response = await server.post<{ result?: object; error?: string }>("llm-chat/execute-tool", { toolName, toolInput });

if (response.error) {
return { result: response.error, isError: true };
}

return {
result: typeof response.result === "string" ? response.result : JSON.stringify(response.result)
};
}
6 changes: 5 additions & 1 deletion apps/client/src/translations/en/translation.json
Original file line number Diff line number Diff line change
Expand Up @@ -1664,7 +1664,11 @@
"note_context_enabled": "Click to disable note context: {{title}}",
"note_context_disabled": "Click to include current note in context",
"no_provider_message": "No AI provider configured. Add one to start chatting.",
"add_provider": "Add AI Provider"
"add_provider": "Add AI Provider",
"approve": "Approve",
"reject": "Reject",
"pending_approval": "Pending approval",
"rejected_by_user": "Rejected by user"
},
"sidebar_chat": {
"title": "AI Chat",
Expand Down
22 changes: 18 additions & 4 deletions apps/client/src/widgets/type_widgets/llm_chat/ChatMessage.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,8 @@ function MarkdownContent({ html, isStreaming }: { html: string; isStreaming?: bo
interface Props {
message: StoredMessage;
isStreaming?: boolean;
onApproveToolCall?: (toolCallId: string) => Promise<void>;
onRejectToolCall?: (toolCallId: string) => void;
}

type ContentGroup =
Expand Down Expand Up @@ -127,7 +129,7 @@ function CitationsSection({ citations }: { citations: LlmCitation[] }) {
);
}

export default function ChatMessage({ message, isStreaming }: Props) {
export default function ChatMessage({ message, isStreaming, onApproveToolCall, onRejectToolCall }: Props) {
const isError = message.type === "error";
const isThinking = message.type === "thinking";
const textContent = typeof message.content === "string" ? message.content : getMessageText(message.content);
Expand Down Expand Up @@ -172,7 +174,7 @@ export default function ChatMessage({ message, isStreaming }: Props) {
<div className="llm-chat-message-content">
{message.role === "assistant" && !isError ? (
hasBlockContent ? (
renderContentBlocks(message.content as ContentBlock[], isStreaming)
renderContentBlocks(message.content as ContentBlock[], isStreaming, onApproveToolCall, onRejectToolCall)
) : (
<MarkdownContent html={renderedContent || ""} isStreaming={isStreaming} />
)
Expand Down Expand Up @@ -244,7 +246,12 @@ function groupContentBlocks(blocks: ContentBlock[]): ContentGroup[] {
return groups;
}

function renderContentBlocks(blocks: ContentBlock[], isStreaming?: boolean) {
function renderContentBlocks(
blocks: ContentBlock[],
isStreaming?: boolean,
onApproveToolCall?: (toolCallId: string) => Promise<void>,
onRejectToolCall?: (toolCallId: string) => void
) {
return groupContentBlocks(blocks).map((group) => {
if (group.type === "text") {
const html = renderMarkdown(group.block.content);
Expand All @@ -256,6 +263,13 @@ function renderContentBlocks(blocks: ContentBlock[], isStreaming?: boolean) {
);
}

return <ToolCallCard key={group.index} toolCalls={group.blocks.map((b) => b.toolCall)} />;
return (
<ToolCallCard
key={group.index}
toolCalls={group.blocks.map((b) => b.toolCall)}
onApprove={onApproveToolCall}
onReject={onRejectToolCall}
/>
);
});
}
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,14 @@ interface ExpandableSectionProps {
label: ComponentChildren;
className?: string;
children: ComponentChildren;
/** Whether the section should be expanded by default */
defaultExpanded?: boolean;
}

/** A collapsible section within an ExpandableCard. */
export function ExpandableSection({ icon, label, className, children }: ExpandableSectionProps) {
export function ExpandableSection({ icon, label, className, children, defaultExpanded }: ExpandableSectionProps) {
return (
<details className={`expandable-section ${className ?? ""}`}>
<details className={`expandable-section ${className ?? ""}`} open={defaultExpanded}>
<summary className="expandable-section-summary">
<span className={icon} />
<span className="expandable-section-label">{label}</span>
Expand Down
7 changes: 6 additions & 1 deletion apps/client/src/widgets/type_widgets/llm_chat/LlmChat.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,12 @@ export default function LlmChat({ note, ntxId, noteContext }: TypeWidgetProps) {
/>
)}
{chat.messages.map(msg => (
<ChatMessage key={msg.id} message={msg} />
<ChatMessage
key={msg.id}
message={msg}
onApproveToolCall={chat.approveToolCall}
onRejectToolCall={chat.rejectToolCall}
/>
))}
{chat.isStreaming && chat.streamingThinking && (
<ChatMessage
Expand Down
54 changes: 54 additions & 0 deletions apps/client/src/widgets/type_widgets/llm_chat/ToolCallCard.css
Original file line number Diff line number Diff line change
Expand Up @@ -111,3 +111,57 @@
.llm-chat-tool-call-result-error pre {
color: var(--danger-color, #dc3545);
}

/* Tool call approval UI */
.llm-chat-tool-call-approval {
display: flex;
align-items: center;
justify-content: space-between;
padding: 0.5rem 0.75rem;
border-top: 1px solid var(--main-border-color);
background: color-mix(in srgb, var(--warning-color, #ffc107) 8%, transparent);
}

.llm-chat-tool-call-approval-label {
font-size: 0.8rem;
font-weight: 500;
color: var(--warning-color, #b8860b);
}

.llm-chat-tool-call-approval-actions {
display: flex;
gap: 0.5rem;
}

.llm-chat-tool-call-approve-btn,
.llm-chat-tool-call-reject-btn {
display: inline-flex;
align-items: center;
gap: 0.25rem;
padding: 0.25rem 0.75rem;
border: 1px solid var(--main-border-color);
border-radius: 4px;
font-size: 0.8rem;
cursor: pointer;
transition: background 0.15s, border-color 0.15s;
}

.llm-chat-tool-call-approve-btn {
background: color-mix(in srgb, var(--success-color, #28a745) 15%, transparent);
color: var(--success-color, #28a745);
border-color: var(--success-color, #28a745);
}

.llm-chat-tool-call-approve-btn:hover {
background: color-mix(in srgb, var(--success-color, #28a745) 30%, transparent);
}

.llm-chat-tool-call-reject-btn {
background: color-mix(in srgb, var(--danger-color, #dc3545) 10%, transparent);
color: var(--danger-color, #dc3545);
border-color: var(--danger-color, #dc3545);
}

.llm-chat-tool-call-reject-btn:hover {
background: color-mix(in srgb, var(--danger-color, #dc3545) 25%, transparent);
}
36 changes: 33 additions & 3 deletions apps/client/src/widgets/type_widgets/llm_chat/ToolCallCard.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ function getToolCallContext(toolCall: ToolCall): ToolCallContext {

function toolCallIcon(toolCall: ToolCall): string {
if (toolCall.isError) return "bx bx-error-circle";
if (toolCall.requiresApproval && !toolCall.result) return "bx bx-shield-quarter";
if (!toolCall.result) return "bx bx-loader-alt bx-spin";

const name = toolCall.toolName;
Expand Down Expand Up @@ -178,19 +179,44 @@ function ToolCallLabel({ toolCall }: { toolCall: ToolCall }) {
}

/** A single tool call section within a ToolCallCard. */
function ToolCallSection({ toolCall }: { toolCall: ToolCall }) {
function ToolCallSection({ toolCall, onApprove, onReject }: {
toolCall: ToolCall;
onApprove?: (toolCallId: string) => Promise<void>;
onReject?: (toolCallId: string) => void;
}) {
const hasError = toolCall.isError;
const isPendingApproval = toolCall.requiresApproval && !toolCall.result && !toolCall.rejected;

return (
<ExpandableSection
icon={toolCallIcon(toolCall)}
label={<ToolCallLabel toolCall={toolCall} />}
className={hasError ? "llm-chat-tool-call-error" : ""}
defaultExpanded={isPendingApproval}
>
<div className="llm-chat-tool-call-input">
<strong>{t("llm_chat.input")}</strong>
<KeyValueTable data={toolCall.input} />
</div>
{isPendingApproval && onApprove && onReject && (
<div className="llm-chat-tool-call-approval">
<span className="llm-chat-tool-call-approval-label">{t("llm_chat.pending_approval")}</span>
<div className="llm-chat-tool-call-approval-actions">
<button
className="llm-chat-tool-call-approve-btn"
onClick={() => onApprove(toolCall.id)}
>
<span className="bx bx-check" /> {t("llm_chat.approve")}
</button>
<button
className="llm-chat-tool-call-reject-btn"
onClick={() => onReject(toolCall.id)}
>
<span className="bx bx-x" /> {t("llm_chat.reject")}
</button>
</div>
</div>
)}
{toolCall.result && (
<div className={`llm-chat-tool-call-result ${hasError ? "llm-chat-tool-call-result-error" : ""}`}>
<strong>{hasError ? t("llm_chat.error") : t("llm_chat.result")}</strong>
Expand All @@ -202,11 +228,15 @@ function ToolCallSection({ toolCall }: { toolCall: ToolCall }) {
}

/** A card that groups one or more sequential tool calls together. */
export default function ToolCallCard({ toolCalls }: { toolCalls: ToolCall[] }) {
export default function ToolCallCard({ toolCalls, onApprove, onReject }: {
toolCalls: ToolCall[];
onApprove?: (toolCallId: string) => Promise<void>;
onReject?: (toolCallId: string) => void;
}) {
return (
<ExpandableCard>
{toolCalls.map((tc, idx) => (
<ToolCallSection key={tc.id ?? idx} toolCall={tc} />
<ToolCallSection key={tc.id ?? idx} toolCall={tc} onApprove={onApprove} onReject={onReject} />
))}
</ExpandableCard>
);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@ export interface ToolCall {
input: Record<string, unknown>;
result?: string;
isError?: boolean;
requiresApproval?: boolean;
rejected?: boolean;
}

/** A block of text content (rendered as Markdown for assistant messages). */
Expand Down
54 changes: 50 additions & 4 deletions apps/client/src/widgets/type_widgets/llm_chat/useLlmChat.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,8 @@ import type { LlmCitation, LlmMessage, LlmModelInfo, LlmUsage } from "@triliumne
import { RefObject } from "preact";
import { useCallback, useEffect, useRef, useState } from "preact/hooks";

import { getAvailableModels, streamChatCompletion } from "../../../services/llm_chat.js";
import { executeToolCall, getAvailableModels, streamChatCompletion } from "../../../services/llm_chat.js";
import { t } from "../../../services/i18n.js";
import { randomString } from "../../../services/utils.js";
import type { ContentBlock, LlmChatContent, StoredMessage } from "./llm_chat_types.js";

Expand Down Expand Up @@ -62,6 +63,10 @@ export interface UseLlmChatReturn {
clearMessages: () => void;
/** Refresh the provider/models list */
refreshModels: () => void;
/** Approve a pending mutating tool call */
approveToolCall: (toolCallId: string) => Promise<void>;
/** Reject a pending mutating tool call */
rejectToolCall: (toolCallId: string) => void;
}

export function useLlmChat(
Expand Down Expand Up @@ -267,13 +272,14 @@ export function useLlmChat(
thinkingContent += text;
setStreamingThinking(thinkingContent);
},
onToolUse: (toolName, toolInput) => {
onToolUse: (toolName, toolInput, requiresApproval) => {
contentBlocks.push({
type: "tool_call",
toolCall: {
id: randomString(),
toolName,
input: toolInput
input: toolInput,
requiresApproval
}
});
setStreamingBlocks([...contentBlocks]);
Expand Down Expand Up @@ -365,6 +371,44 @@ export function useLlmChat(
}
}, [handleSubmit]);

/** Approve a pending mutating tool call — execute it server-side and update the message. */
const approveToolCall = useCallback(async (toolCallId: string) => {
// Find the tool call in messages
for (const msg of messages) {
if (!Array.isArray(msg.content)) continue;
for (const block of msg.content) {
if (block.type === "tool_call" && block.toolCall.id === toolCallId && block.toolCall.requiresApproval && !block.toolCall.result) {
const { result, isError } = await executeToolCall(block.toolCall.toolName, block.toolCall.input);
// Update the tool call block immutably
const updatedContent = msg.content.map(b =>
b.type === "tool_call" && b.toolCall.id === toolCallId
? { ...b, toolCall: { ...b.toolCall, result, isError } }
: b
);
const updatedMessages = messages.map(m =>
m.id === msg.id ? { ...m, content: updatedContent } : m
);
setMessages(updatedMessages);
return;
}
}
}
}, [messages, setMessages]);

/** Reject a pending mutating tool call. */
const rejectToolCall = useCallback((toolCallId: string) => {
const updatedMessages = messages.map(msg => {
if (!Array.isArray(msg.content)) return msg;
const updatedContent = msg.content.map(b =>
b.type === "tool_call" && b.toolCall.id === toolCallId
? { ...b, toolCall: { ...b.toolCall, rejected: true, result: t("llm_chat.rejected_by_user"), isError: true } }
: b
);
return { ...msg, content: updatedContent };
});
setMessages(updatedMessages);
}, [messages, setMessages]);

return {
// State
messages,
Expand Down Expand Up @@ -402,6 +446,8 @@ export function useLlmChat(
loadFromContent,
getContent,
clearMessages,
refreshModels
refreshModels,
approveToolCall,
rejectToolCall
};
}
Loading
Loading