diff --git a/apps/stage-tamagotchi/package.json b/apps/stage-tamagotchi/package.json index 872d7a880e..cad7d0ab36 100644 --- a/apps/stage-tamagotchi/package.json +++ b/apps/stage-tamagotchi/package.json @@ -113,6 +113,7 @@ "reka-ui": "^2.9.6", "remark-parse": "^11.0.0", "remark-rehype": "^11.1.2", + "replicate": "catalog:", "semver": "^7.7.4", "shiki": "^4.0.2", "splitpanes": "catalog:", diff --git a/apps/stage-tamagotchi/src/main/configs/artistry.ts b/apps/stage-tamagotchi/src/main/configs/artistry.ts new file mode 100644 index 0000000000..af5b752b13 --- /dev/null +++ b/apps/stage-tamagotchi/src/main/configs/artistry.ts @@ -0,0 +1,26 @@ +import { any, array, number, object, optional, string } from 'valibot' + +import { createConfig } from '../libs/electron/persistence' + +export const artistryConfigSchema = object({ + artistryProvider: optional(string(), 'comfyui'), + artistryGlobals: optional(object({ + comfyuiServerUrl: optional(string(), 'http://localhost:8188'), + comfyuiSavedWorkflows: optional(array(any()), []), + comfyuiActiveWorkflow: optional(string(), ''), + replicateApiKey: optional(string(), ''), + replicateDefaultModel: optional(string(), 'black-forest-labs/flux-schnell'), + replicateAspectRatio: optional(string(), '16:9'), + replicateInferenceSteps: optional(number(), 4), + nanobananaApiKey: optional(string(), ''), + nanobananaModel: optional(string(), 'gemini-3.1-flash-image-preview'), + nanobananaResolution: optional(string(), '1K'), + }), {}), +}) + +export function createArtistryConfig() { + const config = createConfig('artistry', 'options.json', artistryConfigSchema) + config.setup() + + return config +} diff --git a/apps/stage-tamagotchi/src/main/index.ts b/apps/stage-tamagotchi/src/main/index.ts index 6b0804f1da..e87d06fc46 100644 --- a/apps/stage-tamagotchi/src/main/index.ts +++ b/apps/stage-tamagotchi/src/main/index.ts @@ -9,9 +9,9 @@ import messages from '@proj-airi/i18n/locales' import { electronApp, optimizer } from '@electron-toolkit/utils' import { Format, LogLevel, setGlobalFormat, setGlobalHookPostLog, setGlobalLogLevel, useLogg } from '@guiiai/logg' +import { createContext } from '@moeru/eventa/adapters/electron/main' import { initScreenCaptureForMain } from '@proj-airi/electron-screen-capture/main' import { app, ipcMain } from 'electron' -import { noop } from 'es-toolkit' import { createLoggLogger, injeca, lifecycle } from 'injeca' import { isLinux } from 'std-env' @@ -19,6 +19,7 @@ import icon from '../../resources/icon.png?asset' import { openDebugger, setupDebugger } from './app/debugger' import { nullFileLoggerHandle, setupFileLogger } from './app/file-logger' +import { createArtistryConfig } from './configs/artistry' import { createGlobalAppConfig } from './configs/global' import { emitAppBeforeQuit, emitAppReady, emitAppWindowAllClosed } from './libs/bootkit/lifecycle' import { setElectronMainDirname } from './libs/electron/location' @@ -28,6 +29,7 @@ import { setupServerChannel } from './services/airi/channel-server' import { setupBuiltInServer } from './services/airi/http-server' import { setupMcpStdioManager } from './services/airi/mcp-servers' import { setupPluginHost } from './services/airi/plugins' +import { setupArtistryBridge } from './services/airi/widgets/artistry-bridge' import { setupAutoUpdater } from './services/electron/auto-updater' import { setupTray } from './tray' import { setupAboutWindowReusable } from './windows/about' @@ -101,15 +103,16 @@ app.whenReady().then(async () => { injeca.setLogger(createLoggLogger(useLogg('injeca').useGlobalConfig())) const appConfig = injeca.provide('configs:app', () => createGlobalAppConfig()) + const artistryConfig = injeca.provide('configs:artistry', () => createArtistryConfig()) const electronApp = injeca.provide('host:electron:app', () => app) const autoUpdater = injeca.provide('services:auto-updater', { dependsOn: { appConfig }, build: ({ dependsOn }) => setupAutoUpdater({ getStoredUpdateLane: () => dependsOn.appConfig.get()?.updateChannel, setStoredUpdateLane: (lane) => { - const currentConfig = dependsOn.appConfig.get() + const current = dependsOn.appConfig.get() dependsOn.appConfig.update({ - language: currentConfig?.language ?? 'en', + language: current?.language ?? 'en', updateChannel: lane, }) }, @@ -192,8 +195,15 @@ app.whenReady().then(async () => { }) injeca.invoke({ - dependsOn: { mainWindow, tray, serverChannel, airiHttpServer, pluginHost, mcpStdioManager, onboardingWindow: onboardingWindowManager }, - callback: noop, + dependsOn: { mainWindow, tray, serverChannel, airiHttpServer, pluginHost, mcpStdioManager, onboardingWindow: onboardingWindowManager, widgetsWindow: widgetsManager, artistryConfig }, + callback: async (deps) => { + const { context } = createContext(ipcMain) + await setupArtistryBridge({ + widgetsManager: deps.widgetsWindow, + context, + artistryConfig: deps.artistryConfig, + }) + }, }) injeca.start().catch(err => console.error(err)) diff --git a/apps/stage-tamagotchi/src/main/services/airi/i18n/index.ts b/apps/stage-tamagotchi/src/main/services/airi/i18n/index.ts index 19363c24c6..303e08af60 100644 --- a/apps/stage-tamagotchi/src/main/services/airi/i18n/index.ts +++ b/apps/stage-tamagotchi/src/main/services/airi/i18n/index.ts @@ -17,7 +17,10 @@ export async function createI18nService(params: { context: ReturnType { - config.update({ ...config.get(), language: locale }) + const current = config.get() + if (current) { + config.update({ ...current, language: locale as string }) + } params.i18n.locale(locale) }) diff --git a/apps/stage-tamagotchi/src/main/services/airi/widgets/artistry-bridge.ts b/apps/stage-tamagotchi/src/main/services/airi/widgets/artistry-bridge.ts new file mode 100644 index 0000000000..96b32cc6b9 --- /dev/null +++ b/apps/stage-tamagotchi/src/main/services/airi/widgets/artistry-bridge.ts @@ -0,0 +1,550 @@ +import type { createContext as createMainEventaContext } from '@moeru/eventa/adapters/electron/main' +import type { ProvidedBy } from 'injeca' + +import type { artistryConfigSchema } from '../../../configs/artistry' +import type { Config } from '../../../libs/electron/persistence' +import type { WidgetsWindowManager } from '../../../windows/widgets' +import type { ArtistryProvider, ArtistryRequest } from './providers/base' + +import { Buffer } from 'node:buffer' +import { createHash } from 'node:crypto' + +import { useLogg } from '@guiiai/logg' +import { defineInvokeHandler } from '@moeru/eventa' +import { errorMessageFrom } from '@moeru/std' +import { + artistryGenerateHeadless, + artistrySyncConfig, + artistryTestComfyUIConnection, +} from '@proj-airi/stage-shared' +import { injeca } from 'injeca' + +import { ComfyUIProvider } from './providers/comfyui' +import { NanoBananaProvider } from './providers/nanobanana' +import { ReplicateProvider } from './providers/replicate' + +const log = useLogg('artistry-bridge').useGlobalConfig() +const DEFAULT_REMIX_ID = '48250602' + +interface ArtistrySyncSnapshot { + provider?: string + model?: string + promptPrefix?: string + options?: Record + globals?: Record +} + +interface TriggerConfig { + provider?: string + model?: string + promptPrefix?: string + options?: Record + globals?: Record +} + +function robustParse(input: unknown, context?: string): Record { + if (typeof input === 'object' && input !== null) + return input as Record + if (typeof input === 'string' && input.trim()) { + try { + const parsed = JSON.parse(input) + if (typeof parsed === 'object' && parsed !== null) + return parsed as Record + log.warn(`[Artistry Bridge] robustParse(${context || 'unknown'}): Parsed JSON is not an object: ${typeof parsed}`) + return {} + } + catch (e) { + log.warn(`[Artistry Bridge] robustParse(${context || 'unknown'}): JSON parse failed: ${errorMessageFrom(e)} | Input: ${input.slice(0, 100)}`) + return {} + } + } + return {} +} + +const lastTriggerMap = new Map() +const activeRunMap = new Map() + +/** + * Volatile storage for active character card artistry defaults. + * Synced from the renderer App.vue whenever the character or settings change. + */ +const cardDefaults: ArtistrySyncSnapshot = { + provider: undefined as string | undefined, + model: undefined as string | undefined, + promptPrefix: undefined as string | undefined, + options: undefined as Record | undefined, + globals: undefined as Record | undefined, +} + +function createRunId(widgetId: string) { + return `${widgetId}:${Date.now()}:${Math.random().toString(36).slice(2, 8)}` +} + +async function downloadImageAsBase64(url: string): Promise { + try { + log.log(`[Artistry Bridge] Downloading image from: ${url}`) + const response = await fetch(url) + if (!response.ok) + throw new Error(`Failed to fetch image: ${response.statusText}`) + const buffer = await response.arrayBuffer() + const base64 = Buffer.from(buffer).toString('base64') + // NOTICE: Downstream renderer paths consume this via fetch(), which requires a data URL. + return `data:image/png;base64,${base64}` + } + catch (error: unknown) { + log.error(`[Artistry Bridge] Failed to download image: ${errorMessageFrom(error)}`) + throw error + } +} + +function supportsJobCallback(provider: ArtistryProvider): provider is ArtistryProvider & Required> { + return typeof provider.setJobCallback === 'function' +} + +// Maintaining a registry of providers +export const artistryProviders = new Map() +artistryProviders.set('comfyui', new ComfyUIProvider()) +artistryProviders.set('replicate', new ReplicateProvider()) +artistryProviders.set('nanobanana', new NanoBananaProvider()) + +// Deduplication map for headless requests +const pendingHeadlessRequests = new Map>() + +export async function generateHeadless(params: { + prompt: string + model?: string + provider?: string + options?: Record + globals?: Record +}): Promise<{ imageUrl?: string, base64?: string, error?: string }> { + // Resolve config and effective globals early to secure the deduplication fingerprint + const { config: artistryConfig } = await injeca.resolve({ config: 'configs:artistry' } as { config: ProvidedBy> }) + const activeGlobals = (params.globals || artistryConfig.get()?.artistryGlobals || {}) as Record + + // Create a fingerprint for deduplication + const sourceImage = activeGlobals?.image + const imageHash = typeof sourceImage === 'string' + ? createHash('sha256').update(sourceImage).digest('hex') + : 'NONE' + + // We hash the globals (excluding the heavy image already covered by imageHash) + // to ensure that changing a workflow or provider setting triggers a unique execution. + const { image, ...globalsForFingerprint } = activeGlobals + const globalsHash = createHash('sha256').update(JSON.stringify(globalsForFingerprint)).digest('hex') + + const fingerprint = JSON.stringify({ + p: params.prompt, + m: params.model, + pr: params.provider, + o: params.options, + ih: imageHash, + gh: globalsHash, // Include globals hash (Issue #39) + }) + + if (pendingHeadlessRequests.has(fingerprint)) { + log.log(`[Headless] Deduplicating identical request: ${params.prompt.slice(0, 30)}...`) + return pendingHeadlessRequests.get(fingerprint)! + } + + const executionPromise = (async () => { + const requestedProvider = (params.provider || artistryConfig.get()?.artistryProvider || 'comfyui').trim().toLowerCase() + const provider = artistryProviders.get(requestedProvider) + if (!provider) { + log.error(`[Headless] CRITICAL: Provider '${requestedProvider}' not found in registry! fallback to replicate`) + throw new Error(`Provider '${requestedProvider}' not found.`) + } + + // Initialize the provider + if (provider.initialize && activeGlobals) { + log.log(`[Headless] Initializing provider ${requestedProvider} with globals...`) + await provider.initialize(activeGlobals) + } + + log.log(`[Headless] Globals keys: ${Object.keys(activeGlobals || {}).join(', ')}`) + if (activeGlobals?.image) + log.log(`[Headless] Source image length: ${activeGlobals.image.length}`) + + const request: ArtistryRequest = { + prompt: params.prompt, + negativePrompt: params.options?.negativePrompt, + width: typeof params.options?.width === 'number' ? params.options.width : undefined, + height: typeof params.options?.height === 'number' ? params.options.height : undefined, + model: params.model, + extra: { + ...params.options, + image: activeGlobals?.image, + internalJobId: createRunId('headless'), + }, + } + + log.log(`[Headless] Starting generation with provider: ${requestedProvider}, model: ${params.model || 'default'}`) + const job = await provider.generate(request) + log.log(`[Headless] Job created: ${job.jobId}`) + + // Polling/Wait for result + if (!supportsJobCallback(provider)) { + let isDone = false + let lastStatus = await provider.getStatus(job.jobId) + const start = Date.now() + const timeout = 1000 * 60 * 5 // 5 minutes timeout + + while (!isDone) { + if (Date.now() - start > timeout) { + log.error(`[Headless] Job ${job.jobId} timed out after 5 minutes.`) + throw new Error('Image generation timed out after 5 minutes.') + } + + log.log(`[Headless] Polling status for job: ${job.jobId}...`) + lastStatus = await provider.getStatus(job.jobId) + log.log(`[Headless] Status for job ${job.jobId}: ${lastStatus.status}`) + + if (lastStatus.status === 'succeeded' || lastStatus.status === 'failed') { + isDone = true + } + if (!isDone) { + await new Promise(resolve => setTimeout(resolve, 2000)) + } + } + + if (lastStatus.status === 'failed') { + log.error(`[Headless] Job ${job.jobId} failed: ${lastStatus.error || 'Unknown error'}`) + throw new Error(lastStatus.error || 'Generation failed') + } + + log.log(`[Headless] Job ${job.jobId} succeeded. Image URL: ${lastStatus.imageUrl}`) + const base64 = lastStatus.imageUrl ? await downloadImageAsBase64(lastStatus.imageUrl) : undefined + return { imageUrl: lastStatus.imageUrl, base64 } + } + else { + // For providers with callbacks (like ComfyUI), we wait for the result via the callback + log.log(`[Headless] Using callback-based wait logic for provider: ${requestedProvider}`) + return new Promise<{ imageUrl?: string, base64?: string }>((resolve, reject) => { + const timeout = 1000 * 60 * 5 // 5 minutes timeout + const timer = setTimeout(() => { + reject(new Error('Image generation timed out after 5 minutes.')) + }, timeout) + + provider.setJobCallback(request.extra?.internalJobId as string, async (status) => { + if (status.status === 'succeeded') { + clearTimeout(timer) + try { + const base64 = status.imageUrl ? await downloadImageAsBase64(status.imageUrl) : undefined + resolve({ imageUrl: status.imageUrl, base64 }) + } + catch (e) { + reject(e) + } + } + else if (status.status === 'failed') { + clearTimeout(timer) + reject(new Error(status.error || 'Generation failed')) + } + }) + }) + } + })() + + pendingHeadlessRequests.set(fingerprint, executionPromise) + + try { + return await executionPromise + } + catch (err) { + return { error: err instanceof Error ? err.message : String(err) } + } + finally { + // Remove from map after completion so it can be re-triggered later + pendingHeadlessRequests.delete(fingerprint) + } +} + +async function handleArtistryTrigger(params: { + id: string + componentName?: string + componentProps?: unknown + widgetsManager: WidgetsWindowManager +}) { + if (params.componentName !== 'comfy' && params.componentName !== 'artistry') + return + + log.log(`🔍 Intercepted widget update [${params.id}] for component: ${params.componentName}`) + + const props = robustParse(params.componentProps, 'componentProps') + const payload = robustParse(props.payload, 'payload') + const artistryConfigOverrides = robustParse(props._artistryConfig, '_artistryConfig') + const status = props.status + const prompt = (payload.prompt || props.prompt) as string | undefined + + // Build configuration with fallbacks: + // 1. Explicitly provided in component props (_artistryConfig) + // 2. Character-level defaults synced from renderer (cardDefaults) + const config: TriggerConfig = { + provider: artistryConfigOverrides.provider as string | undefined, + model: (artistryConfigOverrides.model as string | undefined) || cardDefaults.model, + promptPrefix: (artistryConfigOverrides.promptPrefix as string | undefined) || cardDefaults.promptPrefix, + options: { + ...cardDefaults.options, + ...robustParse(artistryConfigOverrides.options, 'artistryOptions'), + }, + // NOTICE: Keep legacy `Globals` fallback while standardizing on `globals`. + // Older widget payloads can still send `Globals`, and dropping it now would break them. + globals: robustParse(artistryConfigOverrides.globals || artistryConfigOverrides.Globals || cardDefaults.globals, 'artistryGlobals'), + } + const { config: artistryConfig } = await injeca.resolve({ config: 'configs:artistry' } as { config: ProvidedBy> }) + const providerId = config.provider || cardDefaults.provider || artistryConfig.get()?.artistryProvider || 'comfyui' + + // [BY DESIGN]: Short-circuit if artistry is explicitly disabled (provider: 'none'). + // This prevents noisy "Provider not found" errors when the feature is intentionally bypassed. + if (providerId === 'none') { + log.log(`[Artistry Bridge] Provider is 'none'. Bypassing generation for widget: ${params.id}`) + return + } + + // Extract options and remix ID fallback + const options = config.options || {} + // TODO: move remix defaults into per-card/provider config to remove this fallback heuristic. + const remixId = (payload.remixId || props.remixId || options.remixId) as string | undefined + || (props.status === 'generating' && !prompt ? DEFAULT_REMIX_ID : undefined) + + const mode = props.mode || (remixId ? 'remix' : 'generate') + const triggerFingerprint = `${mode}:${remixId || ''}:${prompt || ''}` + + // [BY DESIGN]: We only trigger a new generation if the fingerprint (mode + remixId + prompt) + // has actually changed for this specific widget instance. This denotes our stance on the matter: + // it serves as a critical safety guard against redundant, billable API calls triggered + // by reactive UI loops or state synchronization "storms". While this prevents retrying + // the exact same prompt on the same widget instance without a manual modification, + // it protects users from unexpected credit consumption in a high-frequency reactive + // bridge environment. (Refer to Catalog Issue #31). + if (status === 'generating' && lastTriggerMap.get(params.id) !== triggerFingerprint && (prompt || remixId)) { + log.log(`🎯 TRIGGER DETECTED [${params.id}]: ${triggerFingerprint} | Mode: ${mode} | Provider: ${providerId}`) + lastTriggerMap.set(params.id, triggerFingerprint) + const runId = createRunId(params.id) + activeRunMap.set(params.id, runId) + + const provider = artistryProviders.get(providerId) + if (!provider) { + log.error(`🔴 Provider '${providerId}' not found.`) + params.widgetsManager.updateWidget({ + id: params.id, + componentProps: { status: 'error', actionLabel: `Provider '${providerId}' not available` }, + }) + return + } + + // Initialize the provider with global config fallback + const activeGlobals = config.globals || artistryConfig.get()?.artistryGlobals + if (provider.initialize && activeGlobals) { + log.log(`[Artistry Bridge] Initializing provider ${providerId} with ${config.globals ? 'provided' : 'fallback'} globals...`) + await provider.initialize(activeGlobals) + } + + try { + // Build the abstract request + const request: ArtistryRequest = { + prompt: config.promptPrefix ? `${config.promptPrefix} ${prompt}` : (prompt || ''), + model: config.model, + extra: { + ...options, + ...props, // Include root componentProps overrides (template, node overrides) + ...payload, // Payload takes precedence + internalJobId: runId, // Track each generation independently, even on the same widget. + remixId, + }, + } + + const updateIfActive = (statusUpdate: Record) => { + // NOTICE: the same widget can kick off another generation before the previous one fully + // settles. Only the most recent run is allowed to keep updating the widget state. + if (activeRunMap.get(params.id) !== runId) + return + + // [BY DESIGN]: Merging status updates into existing props preserves fields like imageUrl + // that would otherwise be lost when the final 'done' status is sent. + const existing = params.widgetsManager.getWidgetSnapshot(params.id) + params.widgetsManager.updateWidget({ + id: params.id, + componentProps: { + ...(existing?.componentProps as any || {}), + ...statusUpdate, + }, + }) + } + + // If the provider accepts callbacks (like ComfyUI streaming stdout) + if (supportsJobCallback(provider)) { + provider.setJobCallback(runId, (statusUpdate) => { + updateIfActive(statusUpdate as Record) + if (statusUpdate.status === 'succeeded') { + log.log(`🎉 Job complete (via callback) for ${params.id}. Sending final status: done`) + updateIfActive({ status: 'done', progress: 100, actionLabel: undefined }) + } + else if (statusUpdate.status === 'failed') { + log.log(`🔴 Job failed (via callback) for ${params.id}. Preserving error status.`) + // [BY DESIGN]: Don't send status: 'done' here to avoid clearing the error message (Issue #56) + } + }) + } + + const job = await provider.generate(request) + + // Polling loop for providers that don't do callbacks (like Replicate) + if (!supportsJobCallback(provider)) { + let isDone = false + const startTime = Date.now() + const timeoutLength = 1000 * 60 * 5 // 5 minutes timeout (Issue #56) + + while (!isDone) { + // Check for timeout + if (Date.now() - startTime > timeoutLength) { + log.error(`[Artistry Bridge] Job ${job.jobId} timed out after 5 minutes.`) + updateIfActive({ status: 'error', actionLabel: 'Generation timed out' }) + break + } + + // Check if this run is still the active one for this widget. + // If a user started a new generation, we must kill the old polling loop. + if (activeRunMap.get(params.id) !== runId) { + log.log(`[Artistry Bridge] Stale polling loop detected for ${params.id}. Aborting background task.`) + break + } + + const status = await provider.getStatus(job.jobId) + if (status.status === 'succeeded' || status.status === 'failed') { + isDone = true + } + + updateIfActive(status as Record) + + if (!isDone) { + await new Promise(resolve => setTimeout(resolve, 2000)) + } + } + + if (isDone) { + const finalStatus = await provider.getStatus(job.jobId) + if (finalStatus.status === 'succeeded') { + log.log(`🎉 Job complete (via polling) for ${params.id}. Sending final status: done`) + updateIfActive({ status: 'done', progress: 100, actionLabel: undefined }) + } + else { + log.log(`🔴 Job failed (via polling) for ${params.id}. Preserving error status.`) + } + } + } + } + catch (error: unknown) { + const message = errorMessageFrom(error) ?? 'Unknown generation error' + log.error(`🔴 Generation failed: ${message}`) + if (activeRunMap.get(params.id) === runId) { + lastTriggerMap.delete(params.id) // [BY DESIGN]: Clear fingerprint on failure to allow retry (Issue #44) + params.widgetsManager.updateWidget({ + id: params.id, + componentProps: { status: 'error', actionLabel: message }, + }) + } + } + } +} + +export async function setupArtistryBridge(params: { + widgetsManager: WidgetsWindowManager + context?: ReturnType['context'] + artistryConfig: Config +}) { + log.log('🚀 Initializing Artistry bridge (Spawn + Update Interceptor + Headless Handler)...') + + if (params.context) { + defineInvokeHandler(params.context, artistryGenerateHeadless, async (payload) => { + log.log(`[Artistry Bridge] [Headless] Received invoke for prompt: ${payload.prompt.slice(0, 50)}...`) + return await generateHeadless(payload) + }) + + defineInvokeHandler(params.context, artistrySyncConfig, (payload) => { + log.log(`🔄 Syncing artistry config to main. Provider: ${payload.provider}`) + params.artistryConfig.update({ + artistryProvider: payload.provider || params.artistryConfig.get()?.artistryProvider || 'comfyui', + artistryGlobals: payload.globals || params.artistryConfig.get()?.artistryGlobals || { + comfyuiServerUrl: 'http://localhost:8188', + comfyuiSavedWorkflows: [], + comfyuiActiveWorkflow: '', + replicateApiKey: '', + replicateDefaultModel: 'black-forest-labs/flux-schnell', + replicateAspectRatio: '16:9', + replicateInferenceSteps: 4, + nanobananaApiKey: '', + nanobananaModel: 'gemini-3.1-flash-image-preview', + nanobananaResolution: '1K', + }, + }) + + // Update character-level defaults (volatile only) + cardDefaults.provider = payload.provider + cardDefaults.model = payload.model + cardDefaults.promptPrefix = payload.promptPrefix + cardDefaults.options = payload.options + cardDefaults.globals = payload.globals + }) + + defineInvokeHandler(params.context, artistryTestComfyUIConnection, async (payload) => { + log.log(`🔌 Testing ComfyUI connection at: ${payload.url}`) + try { + const url = payload.url.replace(/\/+$/, '') + const controller = new AbortController() + const id = setTimeout(() => controller.abort(), 10000) + const resp = await fetch(`${url}/system_stats`, { signal: controller.signal }) + clearTimeout(id) + + if (!resp.ok) + throw new Error(`HTTP ${resp.status}`) + const data = await resp.json() as { devices?: Array<{ name?: string, vram_total?: number }> } + const gpus = data.devices?.map(d => d.name).join(', ') || 'Unknown GPU' + const vram = data.devices?.[0]?.vram_total + const vramStr = vram ? `${(vram / 1024 / 1024 / 1024).toFixed(1)} GB` : '' + return { + ok: true, + info: `Connected — ${gpus}${vramStr ? ` (${vramStr} VRAM)` : ''}`, + } + } + catch (e: unknown) { + const message = errorMessageFrom(e) ?? 'Unknown connection error' + log.error(`🔌 ComfyUI connection test failed: ${message}`) + return { + ok: false, + info: `Failed: ${message}`, + } + } + }) + } + + const originalUpdateWidget = params.widgetsManager.updateWidget + params.widgetsManager.updateWidget = async (payload) => { + const snapshot = params.widgetsManager.getWidgetSnapshot(payload.id) + await originalUpdateWidget.call(params.widgetsManager, payload) + await handleArtistryTrigger({ + id: payload.id, + componentName: snapshot?.componentName, + componentProps: payload.componentProps, + widgetsManager: params.widgetsManager, + }) + } + + const originalPushWidget = params.widgetsManager.pushWidget + params.widgetsManager.pushWidget = async (payload) => { + if (payload.componentName === 'comfy' || payload.componentName === 'artistry') { + log.log(`🖼️ Enabling 'Living Wall' mode for ${payload.id}. Forcing infinite TTL. (Component: ${payload.componentName})`) + payload.ttlMs = 0 + } + + const resultId = await originalPushWidget.call(params.widgetsManager, payload) + + await handleArtistryTrigger({ + id: resultId, + componentName: payload.componentName, + componentProps: payload.componentProps, + widgetsManager: params.widgetsManager, + }) + + return resultId + } +} diff --git a/apps/stage-tamagotchi/src/main/services/airi/widgets/index.ts b/apps/stage-tamagotchi/src/main/services/airi/widgets/index.ts index 913a5e206f..bef0fa4713 100644 --- a/apps/stage-tamagotchi/src/main/services/airi/widgets/index.ts +++ b/apps/stage-tamagotchi/src/main/services/airi/widgets/index.ts @@ -5,7 +5,16 @@ import type { WidgetsWindowManager } from '../../../windows/widgets' import { defineInvokeHandlers } from '@moeru/eventa' -import { widgetsAdd, widgetsClear, widgetsFetch, widgetsOpenWindow, widgetsPrepareWindow, widgetsRemove, widgetsUpdate } from '../../../../shared/eventa' +import { + widgetsAdd, + widgetsClear, + widgetsFetch, + widgetsHideWindow, + widgetsOpenWindow, + widgetsPrepareWindow, + widgetsRemove, + widgetsUpdate, +} from '../../../../shared/eventa' import { normalizeOptionalWidgetId, normalizeRequiredWidgetId, @@ -49,6 +58,7 @@ export function createWidgetsService(params: { context: ReturnType { + if (!isFromWindow(options as InvokeOptions, params.window)) + return undefined + return params.widgetsManager!.hideWindow(payload ?? undefined) + }, widgetsAdd: async (payload, options) => { if (!isFromWindow(options as InvokeOptions, params.window)) return undefined diff --git a/apps/stage-tamagotchi/src/main/services/airi/widgets/providers/base.ts b/apps/stage-tamagotchi/src/main/services/airi/widgets/providers/base.ts new file mode 100644 index 0000000000..0905cb61ce --- /dev/null +++ b/apps/stage-tamagotchi/src/main/services/airi/widgets/providers/base.ts @@ -0,0 +1,109 @@ +/** + * Abstract Artistry Provider Interface + * + * All image generation providers (ComfyUI, Replicate, etc.) must implement + * this interface. The bridge dispatches to the active provider based on + * the current AIRI card's artistry settings. + */ + +export interface ArtistryRequest { + /** The text prompt describing the desired image */ + prompt: string + /** Negative prompt — things to avoid (provider support varies) */ + negativePrompt?: string + /** Image width in pixels */ + width?: number + /** Image height in pixels */ + height?: number + /** Provider-specific model identifier */ + model?: string + /** Provider-specific extras (e.g. remixId, checkpoint, seed, aspect_ratio) */ + extra?: Record +} + +export interface ArtistryJob { + /** Internal job ID for tracking */ + jobId: string + /** Provider's native job/prediction ID */ + providerJobId: string +} + +export type ArtistryJobStatusType = 'queued' | 'running' | 'succeeded' | 'failed' | 'cancelled' + +export interface ArtistryJobStatus { + status: ArtistryJobStatusType + /** Generation progress 0-100 (not all providers support this) */ + progress?: number + /** Final output image URL */ + imageUrl?: string + /** Error message if failed */ + error?: string + /** Human-readable label of current stage (e.g. "Sampling", "VAE Decode") */ + actionLabel?: string +} + +export interface ArtistryProviderConfig { + /** Unique provider ID (e.g. "comfyui", "replicate") */ + id: string + /** Human-readable display name */ + name: string + /** Provider-specific configuration (API keys, paths, etc.) */ + settings: Record +} + +export interface ArtistryProvider { + /** Unique provider ID */ + readonly id: string + /** Human-readable display name */ + readonly name: string + + /** + * Start an image generation job. + * Returns a job handle for tracking. + */ + generate: (request: ArtistryRequest) => Promise + + /** + * Poll the current status of a running job. + * Returns status, progress, and final image URL when done. + */ + getStatus: (jobId: string) => Promise + + /** + * Cancel a running job (optional — not all providers support this). + */ + cancel?: (jobId: string) => Promise + + /** + * Called when the provider is first initialized with its config. + */ + initialize?: (config: Record) => Promise + + /** + * Optional push callback for providers that stream or callback status updates. + */ + setJobCallback?: (jobId: string, callback: (status: ArtistryJobStatus) => void) => void + + /** + * Clean up resources when the provider is being switched out. + */ + dispose?: () => void +} + +/** + * Per-card artistry settings stored in AiriExtension.modules.artistry + */ +export interface ArtistryModuleSettings { + /** Active provider ID (e.g. "comfyui", "replicate") */ + provider?: string + /** Provider-specific model identifier */ + model?: string + /** String prepended to every LLM-generated prompt for style consistency */ + defaultPromptPrefix?: string + /** + * Free-form provider-specific options as a JSON object. + * For Replicate: { go_fast: true, megapixels: "1", aspect_ratio: "16:9", ... } + * For ComfyUI: { remixId: 48250602, checkpoint: "bunnyMint.safetensors" } + */ + providerOptions?: Record +} diff --git a/apps/stage-tamagotchi/src/main/services/airi/widgets/providers/comfyui.ts b/apps/stage-tamagotchi/src/main/services/airi/widgets/providers/comfyui.ts new file mode 100644 index 0000000000..d4aec9463f --- /dev/null +++ b/apps/stage-tamagotchi/src/main/services/airi/widgets/providers/comfyui.ts @@ -0,0 +1,394 @@ +import type { ArtistryJob, ArtistryJobStatus, ArtistryProvider, ArtistryRequest } from './base' + +import { Buffer } from 'node:buffer' + +import { useLogg } from '@guiiai/logg' + +const log = useLogg('providers-comfyui').useGlobalConfig() + +const POLL_INTERVAL_MS = 5000 +const POLL_TIMEOUT_MS = 1000 * 60 * 5 // 5 minutes + +export class ComfyUIProvider implements ArtistryProvider { + readonly id = 'comfyui' + readonly name = 'ComfyUI (Local)' + + private serverUrl = 'http://localhost:8188' + private savedWorkflows: any[] = [] + private activeWorkflowId = '' + + private jobResults = new Map() + private callbacks = new Map void>() + + private async fetchWithTimeout(url: string, options: RequestInit = {}, timeoutMs = 30000) { + const controller = new AbortController() + const id = setTimeout(() => controller.abort(), timeoutMs) + try { + const response = await fetch(url, { + ...options, + signal: controller.signal, + }) + clearTimeout(id) + return response + } + catch (error) { + clearTimeout(id) + throw error + } + } + + setJobCallback(jobId: string, callback: (status: ArtistryJobStatus) => void) { + this.callbacks.set(jobId, callback) + // If we already have a result, fire it immediately + const result = this.jobResults.get(jobId) + if (result) + callback(result) + } + + private updateStatus(jobId: string, status: ArtistryJobStatus) { + this.jobResults.set(jobId, status) + const callback = this.callbacks.get(jobId) + if (callback) + callback(status) + } + + async initialize(config: any): Promise { + if (config?.comfyuiServerUrl) + this.serverUrl = config.comfyuiServerUrl.replace(/\/+$/, '') // strip trailing slashes + if (config?.comfyuiSavedWorkflows) + this.savedWorkflows = config.comfyuiSavedWorkflows + if (config?.comfyuiActiveWorkflow) + this.activeWorkflowId = config.comfyuiActiveWorkflow + } + + async generate(request: ArtistryRequest): Promise { + const jobId = request.extra?.internalJobId || Math.random().toString(36).slice(2) + + // Resolve which workflow template to use --- per-request template override takes precedence over card model default + const templateId = request.extra?.template || request.model || this.activeWorkflowId + const template = this.savedWorkflows.find((w: any) => w.id === templateId) + + if (!template) { + this.updateStatus(jobId, { + status: 'failed', + error: 'No workflow template configured. Upload a workflow in Settings > Providers > ComfyUI.', + actionLabel: 'Error: No workflow configured', + }) + return { jobId, providerJobId: jobId } + } + + // Start async generation + this.pollForResult(jobId, template, request) + + return { jobId, providerJobId: jobId } + } + + private async pollForResult( + jobId: string, + template: { workflow: Record, exposedFields: Record }, + request: ArtistryRequest, + ) { + this.updateStatus(jobId, { status: 'running', actionLabel: 'Preparing workflow...' }) + + try { + // 0. Handle potential image and prompt upload bidirectional flow + const extraStr = JSON.stringify(request.extra || {}) + const workflowStr = JSON.stringify(template.workflow || {}) + const hasImagePlaceholder = extraStr.includes('{{IMAGE}}') || workflowStr.includes('{{IMAGE}}') + const hasPromptPlaceholder = extraStr.includes('{{PROMPT}}') || workflowStr.includes('{{PROMPT}}') + + let uploadedImageName = '' + if (hasImagePlaceholder && request.extra?.image) { + log.log(`[ComfyUI] Bidirectional flow detected. Uploading texture for job ${jobId}...`) + this.updateStatus(jobId, { status: 'running', actionLabel: 'Uploading texture to ComfyUI...' }) + try { + uploadedImageName = await this.uploadImage(request.extra.image) + log.log(`[ComfyUI] Texture uploaded as: ${uploadedImageName}`) + } + catch (e: any) { + log.error(`[ComfyUI] Texture upload failed: ${e.message}`) + } + } + + // 1. Apply overrides to the workflow template (standard injection) + let resolvedPrompt = this.applyOverrides(template, request) + + // 2. Perform final placeholder resolution across the ENTIRE resolved prompt + if (hasImagePlaceholder || hasPromptPlaceholder) { + log.log(`[ComfyUI] Performing final placeholder resolution for ${jobId}...`) + const replacements: Record = { + '{{PROMPT}}': request.prompt || '', + } + if (uploadedImageName) { + replacements['{{IMAGE}}'] = uploadedImageName + } + + resolvedPrompt = this.replacePlaceholders(resolvedPrompt, replacements) + } + + log.log(`[ComfyUI] Resolved prompt for ${jobId}:`, JSON.stringify(resolvedPrompt, null, 2)) + + // 2. POST /prompt to queue the workflow + this.updateStatus(jobId, { status: 'running', actionLabel: 'Queuing in ComfyUI...' }) + + let queueResp: Response + try { + queueResp = await this.fetchWithTimeout(`${this.serverUrl}/prompt`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ prompt: resolvedPrompt }), + }, 15000) + } + catch (e: any) { + throw new Error(`Cannot connect to ComfyUI at ${this.serverUrl}: ${e.message}`) + } + + if (!queueResp.ok) { + const errorBody = await queueResp.text() + throw new Error(`Workflow error: ${errorBody.slice(0, 200)}`) + } + + const queueData = await queueResp.json() + const promptId = queueData.prompt_id + if (!promptId) { + throw new Error('ComfyUI returned no prompt_id') + } + + log.log(`[ComfyUI] Queued prompt ${promptId} for job ${jobId}`) + this.updateStatus(jobId, { status: 'running', actionLabel: 'Generating...' }) + + // 3. Poll /history/{prompt_id} until completion + let historyDone = false + let attempt = 0 + const startTime = Date.now() + + while (!historyDone) { + await new Promise(r => setTimeout(r, POLL_INTERVAL_MS)) + attempt++ + + if (Date.now() - startTime > POLL_TIMEOUT_MS) { + throw new Error('Generation timed out after 5 minutes') + } + + if (attempt % 3 === 0) { + log.log(`[ComfyUI] Polling history for ${promptId}... attempt ${attempt}`) + } + + let histResp: Response + try { + histResp = await this.fetchWithTimeout(`${this.serverUrl}/history/${promptId}`, {}, 10000) + } + catch (e: any) { + throw new Error(`ComfyUI disconnected during polling: ${e.message}`) + } + + if (histResp.ok) { + const histData = await histResp.json() + if (histData[promptId]) { + let outputs = histData[promptId].outputs + const stats = histData[promptId].status + + // 3.1. Race condition protection: If outputs are missing, wait a beat and retry once + if ((!outputs || Object.keys(outputs).length === 0) && !historyDone) { + log.warn(`[ComfyUI] Job ${jobId} finished but outputs are empty. Retrying history in 1s...`) + await new Promise(r => setTimeout(r, 1000)) + const retryResp = await this.fetchWithTimeout(`${this.serverUrl}/history/${promptId}`, {}, 10000) + if (retryResp.ok) { + const retryData = await retryResp.json() + if (retryData[promptId] && retryData[promptId].outputs) { + log.log(`[ComfyUI] Retry successful for ${jobId}. Managed to find outputs!`) + outputs = retryData[promptId].outputs + } + } + } + + // Log raw history if no images found or if there are status messages + if (stats?.messages && stats.messages.length > 0) { + log.warn(`[ComfyUI] History messages for ${promptId}:`, stats.messages) + } + + // Find first image in any node's output + for (const nodeId in outputs) { + const nodeOutput = outputs[nodeId] + if (nodeOutput.images && nodeOutput.images.length > 0) { + const img = nodeOutput.images[0] + const imageUrl = `${this.serverUrl}/view?filename=${encodeURIComponent(img.filename)}&subfolder=${encodeURIComponent(img.subfolder || '')}&type=${encodeURIComponent(img.type || 'output')}` + log.log(`[ComfyUI] Generation complete for job ${jobId}. Image: ${imageUrl}`) + this.updateStatus(jobId, { status: 'succeeded', progress: 100, imageUrl }) + historyDone = true + break + } + } + + // Job finished but no images + if (!historyDone) { + log.error(`[ComfyUI] Job finished for ${jobId} (Prompt ${promptId}) but no output images found. Raw History:`, JSON.stringify(histData[promptId], null, 2)) + this.updateStatus(jobId, { + status: 'failed', + error: 'Job completed but no images were generated', + actionLabel: 'Error: No images generated', + }) + historyDone = true + } + } + } + } + } + catch (error: any) { + const errorMessage = error.message || String(error) + log.error(`[ComfyUI] Generation failed for job ${jobId}: ${errorMessage}`) + this.updateStatus(jobId, { + status: 'failed', + error: errorMessage, + actionLabel: `Error: ${errorMessage.slice(0, 50)}${errorMessage.length > 50 ? '...' : ''}`, + }) + } + finally { + // Clean up callback and job result after completion to prevent memory leaks + setTimeout(() => { + this.callbacks.delete(jobId) + this.jobResults.delete(jobId) + }, 10000) + } + } + + /** + * Apply request overrides to a workflow template. + * Matches nodes by _meta.title and overwrites exposed input fields. + * Mirrors the logic from CUIPP's getComfyTemplate.js. + */ + private applyOverrides( + template: { workflow: Record, exposedFields: Record }, + request: ArtistryRequest, + ): Record { + // Deep clone the workflow so we don't mutate the stored template + const prompt = JSON.parse(JSON.stringify(template.workflow)) + + // Build overrides from the request + const overrides: Record> = {} + + // The main prompt text goes into the first exposed "text" field we find + // COMPAT: If the user ALREADY used a {{PROMPT}} placeholder in the extra params, we skip this auto-injection + const hasPromptPlaceholder = JSON.stringify(request.extra).includes('{{PROMPT}}') + if (request.prompt && !hasPromptPlaceholder) { + for (const [nodeTitle, fields] of Object.entries(template.exposedFields)) { + if (fields.includes('text')) { + if (!overrides[nodeTitle]) + overrides[nodeTitle] = {} + overrides[nodeTitle].text = request.prompt + break // Only inject into the first text field + } + } + } + + // Merge in any explicit per-node overrides from request.extra + // We skip known reserved keys and look for keys that might be node titles + const reservedKeys = ['template', 'internalJobId', 'remixId', 'options'] + if (request.extra) { + for (const [key, value] of Object.entries(request.extra)) { + if (reservedKeys.includes(key)) + continue + + // If it's an object, treat it as a potential node override + if (typeof value === 'object' && value !== null && !Array.isArray(value)) { + if (!overrides[key]) + overrides[key] = {} + Object.assign(overrides[key], value) + } + } + } + + // Still support legacy .options nesting just in case + if (request.extra?.options) { + for (const [nodeTitle, fields] of Object.entries(request.extra.options as Record>)) { + if (!overrides[nodeTitle]) + overrides[nodeTitle] = {} + Object.assign(overrides[nodeTitle], fields) + } + } + + // Apply overrides to matching nodes + for (const nodeId in prompt) { + const node = prompt[nodeId] + const title = node._meta?.title + if (title && overrides[title]) { + const nodeOverrides = overrides[title] + for (const [field, value] of Object.entries(nodeOverrides)) { + // Only override exposed fields (security boundary) + if (template.exposedFields[title]?.includes(field)) { + node.inputs[field] = value + } + } + } + } + + // Auto-randomize seed if it's exposed and not explicitly set + for (const [nodeTitle, fields] of Object.entries(template.exposedFields)) { + if (fields.includes('seed') && (overrides[nodeTitle]?.seed === undefined || overrides[nodeTitle]?.seed === null)) { + for (const nodeId in prompt) { + const node = prompt[nodeId] + if (node._meta?.title === nodeTitle) { + node.inputs.seed = Math.floor(Math.random() * 1e15) + break + } + } + } + } + + return prompt + } + + async getStatus(jobId: string): Promise { + return this.jobResults.get(jobId) || { status: 'queued' } + } + + private async uploadImage(base64Data: string): Promise { + // 1. Clean data URL prefix if present + const base64 = base64Data.replace(/^data:image\/\w+;base64,/, '') + const buffer = Buffer.from(base64, 'base64') + + // 2. Prepare multipart form data + const formData = new FormData() + const fileName = `vhack_${Date.now()}.png` + + // Electron/Node 18+ fetch handles Blobs in FormData + const blob = new Blob([buffer], { type: 'image/png' }) + formData.append('image', blob, fileName) + formData.append('overwrite', 'true') + + const response = await this.fetchWithTimeout(`${this.serverUrl}/upload/image`, { + method: 'POST', + body: formData, + }, 60000) // 1 minute timeout for uploads + + if (!response.ok) { + const error = await response.text() + throw new Error(`ComfyUI upload failed: ${error}`) + } + + const data = await response.json() + return data.name // Returns the filename in ComfyUI's input folder + } + + private replacePlaceholders(obj: any, replacements: Record): any { + if (typeof obj === 'string') { + let result = obj + for (const [placeholder, value] of Object.entries(replacements)) { + result = result.replace(new RegExp(placeholder.replace(/[.*+?^${}()|[\]\\/]/g, '\\$&'), 'g'), value) + } + return result + } + + if (Array.isArray(obj)) + return obj.map(item => this.replacePlaceholders(item, replacements)) + + if (obj !== null && typeof obj === 'object') { + const newObj: any = {} + for (const [key, value] of Object.entries(obj)) { + newObj[key] = this.replacePlaceholders(value, replacements) + } + return newObj + } + return obj + } +} diff --git a/apps/stage-tamagotchi/src/main/services/airi/widgets/providers/nanobanana.ts b/apps/stage-tamagotchi/src/main/services/airi/widgets/providers/nanobanana.ts new file mode 100644 index 0000000000..a93ea8243a --- /dev/null +++ b/apps/stage-tamagotchi/src/main/services/airi/widgets/providers/nanobanana.ts @@ -0,0 +1,115 @@ +import type { ArtistryJob, ArtistryJobStatus, ArtistryProvider, ArtistryRequest } from './base' + +import { useLogg } from '@guiiai/logg' + +const log = useLogg('providers-nanobanana').useGlobalConfig() + +export class NanoBananaProvider implements ArtistryProvider { + readonly id = 'nanobanana' + readonly name = 'Nano Banana (Google AI Studio)' + private apiKey = '' + private defaultModel = 'gemini-1.5-flash' + private defaultResolution = '1K' + + private jobResults = new Map() + private callbacks = new Map void>() + + setJobCallback(jobId: string, callback: (status: ArtistryJobStatus) => void) { + this.callbacks.set(jobId, callback) + const result = this.jobResults.get(jobId) + if (result) + callback(result) + } + + private updateStatus(jobId: string, status: ArtistryJobStatus) { + this.jobResults.set(jobId, status) + const callback = this.callbacks.get(jobId) + if (callback) + callback(status) + } + + async initialize(config: any) { + this.apiKey = config.nanobananaApiKey || config.apiKey || '' + if (config.nanobananaModel) + this.defaultModel = config.nanobananaModel + if (config.nanobananaResolution) + this.defaultResolution = config.nanobananaResolution + log.log(`[Nano Banana] Initialized. API Key present: ${!!this.apiKey}`) + } + + async generate(request: ArtistryRequest): Promise { + if (!this.apiKey) { + throw new Error('Nano Banana API Key not configured') + } + + const jobId = request.extra?.internalJobId || `nanobanana-${Date.now()}` + const model = request.model || this.defaultModel + const resolution = request.extra?.resolution || this.defaultResolution + + // Robust image extraction & cleansing + let base64Image = request.extra?.image || request.extra?.providerOptions?.image || '' + if (base64Image.includes('base64,')) + base64Image = base64Image.split('base64,')[1] + + this.runGeneration(jobId, model, resolution, request.prompt, base64Image) + + return { + jobId, + providerJobId: jobId, + } + } + + private async runGeneration(jobId: string, model: string, resolution: string, prompt: string, base64Image: string) { + this.updateStatus(jobId, { status: 'running', actionLabel: 'Inscribing with Nano Banana...' }) + + try { + const url = `https://generativelanguage.googleapis.com/v1beta/models/${model}:generateContent?key=${this.apiKey}` + const generationParts: any[] = [{ text: prompt }] + if (base64Image) { + generationParts.push({ inline_data: { mime_type: 'image/jpeg', data: base64Image } }) + } + + const response = await fetch(url, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + contents: [{ parts: generationParts }], + generationConfig: { imageConfig: { aspectRatio: '1:1', imageSize: resolution } }, + }), + }) + + const json = await response.json() + if (json.error) { + throw new Error(json.error.message || 'Nano Banana API Error') + } + + // Search all parts for the first image + const responseParts = json.candidates?.[0]?.content?.parts || [] + const imagePart = responseParts.find((p: any) => p.inlineData?.data) + const inlineData = imagePart?.inlineData + + if (inlineData?.data) { + const dataUrl = `data:${inlineData.mimeType};base64,${inlineData.data}` + this.updateStatus(jobId, { status: 'succeeded', progress: 100, imageUrl: dataUrl }) + } + else { + throw new Error('No image data returned from Nano Banana') + } + } + catch (e: any) { + log.error(`[Nano Banana] Generation failed: ${e.message}`) + this.updateStatus(jobId, { status: 'failed', error: e.message }) + } + finally { + // Clean up callback and job result after completion to prevent memory leaks + setTimeout(() => { + this.callbacks.delete(jobId) + this.jobResults.delete(jobId) + }, 10000) + } + } + + async getStatus(jobId: string): Promise { + return this.jobResults.get(jobId) || { status: 'queued' } + } +} diff --git a/apps/stage-tamagotchi/src/main/services/airi/widgets/providers/replicate.ts b/apps/stage-tamagotchi/src/main/services/airi/widgets/providers/replicate.ts new file mode 100644 index 0000000000..76cb1610e1 --- /dev/null +++ b/apps/stage-tamagotchi/src/main/services/airi/widgets/providers/replicate.ts @@ -0,0 +1,202 @@ +import type { ArtistryJob, ArtistryJobStatus, ArtistryProvider, ArtistryRequest } from './base' + +import Replicate from 'replicate' + +import { useLogg } from '@guiiai/logg' + +const log = useLogg('providers-replicate').useGlobalConfig() + +export class ReplicateProvider implements ArtistryProvider { + readonly id = 'replicate' + readonly name = 'Replicate.ai (Cloud)' + + private apiKey = '' + private defaultModel = 'black-forest-labs/flux-schnell' + private aspectRatio = '16:9' + private inferenceSteps = 4 + private replicate: Replicate | null = null + + private jobResults = new Map() + private callbacks = new Map void>() + + setJobCallback(jobId: string, callback: (status: ArtistryJobStatus) => void) { + this.callbacks.set(jobId, callback) + const result = this.jobResults.get(jobId) + if (result) + callback(result) + } + + private updateStatus(jobId: string, status: ArtistryJobStatus) { + this.jobResults.set(jobId, status) + const callback = this.callbacks.get(jobId) + if (callback) + callback(status) + } + + async initialize(config: any): Promise { + if (config?.replicateApiKey) { + this.apiKey = config.replicateApiKey + this.replicate = new Replicate({ auth: this.apiKey }) + } + else { + this.apiKey = '' + this.replicate = null + } + if (config?.replicateDefaultModel) + this.defaultModel = config.replicateDefaultModel + if (config?.replicateAspectRatio) + this.aspectRatio = config.replicateAspectRatio + if (config?.replicateInferenceSteps) + this.inferenceSteps = config.replicateInferenceSteps + } + + async generate(request: ArtistryRequest): Promise { + if (!this.replicate) { + throw new Error('Replicate provider is not configured. Missing API Key.') + } + + const model = (request.model || request.extra?.model || this.defaultModel) as `${string}/${string}` + const base64Image = request.extra?.image || '' + + // 1. Start with defaults + const hasPromptPlaceholder = JSON.stringify(request.extra).includes('{{PROMPT}}') + let inputOptions: Record = { + go_fast: request.extra?.go_fast ?? true, + aspect_ratio: request.extra?.aspect_ratio ?? this.aspectRatio, + output_format: request.extra?.output_format ?? 'png', + output_quality: request.extra?.output_quality ?? 80, + num_inference_steps: request.extra?.num_inference_steps ?? this.inferenceSteps, + } + + // Default prompt injection if NO placeholder is used in overrides + if (request.prompt && !hasPromptPlaceholder) { + inputOptions.prompt = request.prompt + } + + // 2. Merge overrides from the "JSON Parameters" textarea if present + if (request.extra) { + const { image, internalJobId, remixId, ...rest } = request.extra + // [BY DESIGN]: Strip 'prompt' from rest to avoid overwriting the prefixed version from the bridge. + const { prompt: _overriddenPrompt, ...safeRest } = rest as any + inputOptions = { ...inputOptions, ...safeRest } + } + + // 3. Recursive placeholder replacement for {{IMAGE}} and {{PROMPT}} + const replacePlaceholders = (obj: any): any => { + if (typeof obj === 'string') { + let result = obj + // Handle image replacement + if (result.includes('{{IMAGE}}')) { + const dataUrl = base64Image.startsWith('data:') ? base64Image : `data:image/jpeg;base64,${base64Image}` + result = result.replace(/\{\{IMAGE\}\}/g, dataUrl) + } + // Handle prompt replacement + if (result.includes('{{PROMPT}}')) { + const truncatedPrompt = this.truncatePrompt(request.prompt || '') + result = result.replace(/\{\{PROMPT\}\}/g, truncatedPrompt) + } + return result + } + if (Array.isArray(obj)) + return obj.map(replacePlaceholders) + if (typeof obj === 'object' && obj !== null) { + const newObj: any = {} + for (const key in obj) + newObj[key] = replacePlaceholders(obj[key]) + return newObj + } + return obj + } + + inputOptions = replacePlaceholders(inputOptions) + + // Ensure main prompt is also truncated if not using a placeholder + if (inputOptions.prompt && !hasPromptPlaceholder) { + inputOptions.prompt = this.truncatePrompt(inputOptions.prompt) + } + + log.log(`[Replicate] Generating with model ${model}. Input keys: ${Object.keys(inputOptions).join(', ')}`) + + // We don't await the result here because the interface expects us to return an ArtistryJob immediately. + // However, replicate.run() blocks until completion. We'll run it in the background and store the result. + const jobId = request.extra?.internalJobId || Math.random().toString(36).slice(2) + + // Start generation asynchronously + this.runGeneration(jobId, model, inputOptions) + + return { jobId, providerJobId: jobId } + } + + private async runGeneration(jobId: string, model: `${string}/${string}`, input: object) { + this.updateStatus(jobId, { status: 'running', actionLabel: 'Requesting cloud generation...' }) + + try { + const output = await this.replicate!.run(model, { input }) + + if (!output) { + throw new Error('No output received from Replicate.') + } + + log.log(`[Replicate] Raw output type: ${typeof output}, isArray: ${Array.isArray(output)}`) + + // Replicate's run() can return a single string, an array of strings, or an array of FileUpload objects + const items = Array.isArray(output) ? output : [output] + if (items.length > 0) { + const first = items[0] + let imageUrl: string | undefined + + // Case 1: FileUpload object with .url() method (common in recent SDK versions) + if (typeof first === 'object' && first !== null && 'url' in first && typeof (first as any).url === 'function') { + imageUrl = (first as any).url().href + } + // Case 2: Object with url property as a string + else if (typeof first === 'object' && first !== null && 'url' in first && typeof (first as any).url === 'string') { + imageUrl = (first as any).url + } + // Case 3: Simple string (the URL itself) + else if (typeof first === 'string') { + imageUrl = first + } + + if (imageUrl && (imageUrl.startsWith('http') || imageUrl.startsWith('data:'))) { + log.log(`[Replicate] EXTRACTED IMAGE: ${imageUrl.startsWith('data:') ? 'DATA_URL' : imageUrl}`) + this.updateStatus(jobId, { status: 'succeeded', progress: 100, imageUrl }) + } + else { + log.error(`[Replicate] Failed to extract URL from output: ${JSON.stringify(first)}`) + throw new Error('Output does not contain a recognizable image URL.') + } + } + else { + throw new Error('Replicate returned an empty output array.') + } + } + catch (error: any) { + const errorMessage = error.message || (typeof error === 'object' ? JSON.stringify(error) : String(error)) + log.error(`[Replicate] Generation Failed for ${jobId}: ${errorMessage}`) + this.updateStatus(jobId, { + status: 'failed', + error: errorMessage, + actionLabel: `Error: ${errorMessage.slice(0, 50)}${errorMessage.length > 50 ? '...' : ''}`, + }) + } + finally { + // Clean up callback and job result after completion to prevent memory leaks + setTimeout(() => { + this.callbacks.delete(jobId) + this.jobResults.delete(jobId) + }, 10000) + } + } + + async getStatus(jobId: string): Promise { + return this.jobResults.get(jobId) || { status: 'queued' } + } + + private truncatePrompt(prompt: string, maxChars: number = 380): string { + if (prompt.length <= maxChars) + return prompt + log.log(`[Replicate] Truncating prompt from ${prompt.length} to ${maxChars} chars.`) + return `${prompt.slice(0, maxChars)}...` + } +} diff --git a/apps/stage-tamagotchi/src/main/windows/widgets/index.ts b/apps/stage-tamagotchi/src/main/windows/widgets/index.ts index 5c788e3078..c3a4c18406 100644 --- a/apps/stage-tamagotchi/src/main/windows/widgets/index.ts +++ b/apps/stage-tamagotchi/src/main/windows/widgets/index.ts @@ -124,6 +124,7 @@ export interface WidgetsWindowManager { * - Resolves after the registry, renderer, and child windows have been cleared */ clearWidgets: () => Promise + hideWindow: (params?: { id?: string }) => Promise /** * Reads the current snapshot for a single widget id. * @@ -654,6 +655,14 @@ export function setupWidgetsWindowManager(params: { return toSnapshot(record) } + async function hideWindow(params?: { id?: string }) { + const id = params?.id + const context = id ? windowContexts.get(id) : undefined + const window = context?.window || activeWidgetsWindow + if (window && !window.isDestroyed()) + window.hide() + } + widgetsManager = { getWindow, openWindow, @@ -661,6 +670,7 @@ export function setupWidgetsWindowManager(params: { updateWidget, removeWidget, clearWidgets, + hideWindow, getWidgetSnapshot, prepareWidgetWindow, } diff --git a/apps/stage-tamagotchi/src/renderer/App.vue b/apps/stage-tamagotchi/src/renderer/App.vue index 749ebcaf2a..ec95379305 100644 --- a/apps/stage-tamagotchi/src/renderer/App.vue +++ b/apps/stage-tamagotchi/src/renderer/App.vue @@ -2,6 +2,7 @@ import { defineInvokeHandler } from '@moeru/eventa' import { useElectronEventaContext, useElectronEventaInvoke } from '@proj-airi/electron-vueuse' import { themeColorFromValue, useThemeColor } from '@proj-airi/stage-layouts/composables/theme-color' +import { artistrySyncConfig } from '@proj-airi/stage-shared' import { ToasterRoot } from '@proj-airi/stage-ui/components' import { useInferencePreload } from '@proj-airi/stage-ui/composables' import { useSharedAnalyticsStore } from '@proj-airi/stage-ui/stores/analytics' @@ -12,6 +13,7 @@ import { useDisplayModelsStore } from '@proj-airi/stage-ui/stores/display-models import { useModsServerChannelStore } from '@proj-airi/stage-ui/stores/mods/api/channel-server' import { useContextBridgeStore } from '@proj-airi/stage-ui/stores/mods/api/context-bridge' import { useAiriCardStore } from '@proj-airi/stage-ui/stores/modules/airi-card' +import { useArtistryStore } from '@proj-airi/stage-ui/stores/modules/artistry' import { usePerfTracerBridgeStore } from '@proj-airi/stage-ui/stores/perf-tracer-bridge' import { listProvidersForPluginHost, shouldPublishPluginHostCapabilities } from '@proj-airi/stage-ui/stores/plugin-host-capabilities' import { useSettings, useSettingsAudioDevice } from '@proj-airi/stage-ui/stores/settings' @@ -71,6 +73,8 @@ const mcpToolsStore = useTamagotchiMcpToolsStore() const pluginToolsStore = useTamagotchiPluginToolsStore() const stageWindowLifecycleStore = useStageWindowLifecycleStore() const settingsAudioDeviceStore = useSettingsAudioDevice() +const artistryStore = useArtistryStore() +const { activeProvider, artistryGlobals, activeModel, defaultPromptPrefix, providerOptions } = storeToRefs(artistryStore) const context = useElectronEventaContext() usePerfTracerBridgeStore() initializeStageThreeRuntimeTraceBridge() @@ -87,6 +91,7 @@ const inspectPluginHost = useElectronEventaInvoke(electronPluginInspect) const startTrackingCursorPoint = useElectronEventaInvoke(electronStartTrackMousePosition) const reportPluginCapability = useElectronEventaInvoke(electronPluginUpdateCapability) const setLocale = useElectronEventaInvoke(i18nSetLocale) +const syncArtistryConfig = useElectronEventaInvoke(artistrySyncConfig) const isChatWindowRoute = () => route.path === '/chat' const isWidgetsWindowRoute = () => route.path === '/widgets' @@ -138,10 +143,22 @@ void mcpToolsStore.refresh().catch((error) => { void refreshPluginRuntimeTools() watch(language, () => { - i18n.locale.value = language.value - setLocale(language.value) + i18n.locale.value = language.value || 'en' + setLocale(language.value || 'en') }) +watch([activeProvider, artistryGlobals, activeModel, defaultPromptPrefix, providerOptions], () => { + if (activeProvider.value) { + void syncArtistryConfig({ + provider: activeProvider.value as string, + globals: JSON.parse(JSON.stringify(artistryGlobals.value)), + model: activeModel.value, + promptPrefix: defaultPromptPrefix.value, + options: providerOptions.value, + }) + } +}, { deep: true, immediate: true }) + const { updateThemeColor } = useThemeColor(themeColorFromValue({ light: 'rgb(255 255 255)', dark: 'rgb(18 18 18)' })) watch(dark, () => updateThemeColor(), { immediate: true }) watch(route, () => updateThemeColor(), { immediate: true }) diff --git a/apps/stage-tamagotchi/src/renderer/components/InteractiveArea.vue b/apps/stage-tamagotchi/src/renderer/components/InteractiveArea.vue index 53954857ed..0cd08f6a5a 100644 --- a/apps/stage-tamagotchi/src/renderer/components/InteractiveArea.vue +++ b/apps/stage-tamagotchi/src/renderer/components/InteractiveArea.vue @@ -2,43 +2,66 @@ import type { ChatHistoryItem } from '@proj-airi/stage-ui/types/chat' import { errorMessageFrom } from '@moeru/std' -import { ChatHistory } from '@proj-airi/stage-ui/components' +import { ChatHistory, JournalPreviewModal } from '@proj-airi/stage-ui/components' +import { useBackgroundStore } from '@proj-airi/stage-ui/stores/background' import { useChatOrchestratorStore } from '@proj-airi/stage-ui/stores/chat' import { useChatSessionStore } from '@proj-airi/stage-ui/stores/chat/session-store' import { useChatStreamStore } from '@proj-airi/stage-ui/stores/chat/stream-store' +import { useJournalPreviewStore } from '@proj-airi/stage-ui/stores/journal-preview' +import { useAiriCardStore } from '@proj-airi/stage-ui/stores/modules/airi-card' import { BasicTextarea } from '@proj-airi/ui' import { useLocalStorage } from '@vueuse/core' import { storeToRefs } from 'pinia' import { DropdownMenuContent, DropdownMenuItem, DropdownMenuPortal, DropdownMenuRoot, DropdownMenuTrigger } from 'reka-ui' -import { computed, ref, watch } from 'vue' +import { computed, onMounted, ref, watch } from 'vue' import { useI18n } from 'vue-i18n' +import { useRouter } from 'vue-router' import { useChatSyncStore } from '../stores/chat-sync' +const router = useRouter() const messageInput = ref('') +const lastEnterTime = ref(0) const attachments = ref<{ type: 'image', data: string, mimeType: string, url: string }[]>([]) const chatOrchestrator = useChatOrchestratorStore() const chatSession = useChatSessionStore() const chatStream = useChatStreamStore() const chatSyncStore = useChatSyncStore() +const backgroundStore = useBackgroundStore() +const journalPreviewStore = useJournalPreviewStore() +const airiCardStore = useAiriCardStore() + const { messages } = storeToRefs(chatSession) const { streamingMessage } = storeToRefs(chatStream) const { sending } = storeToRefs(chatOrchestrator) +const { activeCardId } = storeToRefs(airiCardStore) const { t } = useI18n() +const { openImagePreview } = journalPreviewStore const isComposing = ref(false) const DOUBLE_ENTER_INTERVAL_MS = 300 const TRAILING_NEWLINES_REGEX = /[\r\n]+$/ const SEND_MODES = ['enter', 'ctrl-enter', 'double-enter'] as const type SendMode = (typeof SEND_MODES)[number] const sendMode = useLocalStorage('ui/chat/settings/send-mode', 'enter') -const lastEnterTime = ref(0) const sendModeLabels = computed>(() => ({ 'enter': t('stage.send-mode.enter'), 'ctrl-enter': t('stage.send-mode.ctrl-enter'), 'double-enter': t('stage.send-mode.double-enter'), })) +const latestImageEntries = computed(() => { + if (!activeCardId.value) + return [] + return backgroundStore.journalEntries.slice(0, 3) +}) + +function navigateToImageJournal() { + if (!activeCardId.value) + return + router.push(`/settings/airi-card?cardId=${activeCardId.value}&tab=gallery`) +} + async function handleSend() { if (isComposing.value) { return @@ -59,7 +82,7 @@ async function handleSend() { await chatSyncStore.requestIngest({ text: textToSend, attachments: attachmentsToSend, - toolset: 'widgets', + toolset: 'artistry', }) attachmentsToSend.forEach(att => URL.revokeObjectURL(att.url)) @@ -67,10 +90,7 @@ async function handleSend() { catch (error) { // restore on failure messageInput.value = textToSend - attachments.value = attachmentsToSend.map(att => ({ - ...att, - url: URL.createObjectURL(new Blob([Uint8Array.from(atob(att.data), c => c.charCodeAt(0))], { type: att.mimeType })), - })) + attachments.value = attachmentsToSend chatSession.setSessionMessages(chatSession.activeSessionId, [ ...messages.value, { @@ -86,6 +106,19 @@ function sendFromKeyboard() { void handleSend() } +const fileInput = ref(null) + +function handleManualAttach() { + fileInput.value?.click() +} + +function handleFileSelect(event: Event) { + const target = event.target as HTMLInputElement + if (target.files?.length) { + handleFilePaste(Array.from(target.files)) + } +} + function handleMessageInputKeydown(event: KeyboardEvent) { if (isComposing.value || event.key !== 'Enter') return @@ -159,6 +192,10 @@ async function handleDeleteMessage(index: number) { await chatSyncStore.requestDeleteMessage({ index }) } +onMounted(() => { + backgroundStore.initializeStore() +}) + async function handleRetryMessage(index: number) { await chatSyncStore.requestRetry({ sessionId: chatSession.activeSessionId, @@ -178,6 +215,37 @@ async function handleRetryMessage(index: number) { @retry-message="handleRetryMessage($event.index)" /> + + +
+
+ +
+ {{ entry.title }} +
+ + + +
+
+ + + + + + +
+ + + diff --git a/apps/stage-tamagotchi/src/renderer/pages/widgets.vue b/apps/stage-tamagotchi/src/renderer/pages/widgets.vue index 52d7468e37..2224abb331 100644 --- a/apps/stage-tamagotchi/src/renderer/pages/widgets.vue +++ b/apps/stage-tamagotchi/src/renderer/pages/widgets.vue @@ -163,6 +163,7 @@ const Registry: Record> = { 'extension-ui': defineAsyncComponent(async () => (await import('../widgets/extension-ui')).ExtensionUi), 'map': defineAsyncComponent(async () => (await import('../widgets/map')).Map), 'weather': defineAsyncComponent(async () => (await import('../widgets/weather')).Weather), + 'artistry': defineAsyncComponent(async () => (await import('../widgets/artistry')).Artistry), } const GenericWidget = defineComponent({ @@ -221,6 +222,7 @@ function handleClose() {
} function resolveTools(toolset?: ToolsetId) { - if (toolset === 'widgets') { - return async () => { - const [widgetTools, weatherToolset] = await Promise.all([ + const toolsetRegistry: Record Promise> = { + widgets: async () => { + const [w, we] = await Promise.all([widgetsTools(), weatherTools()]) + return [...w, ...we] + }, + artistry: async () => { + const [ai, wi, we] = await Promise.all([ + imageJournalTools(), widgetsTools(), weatherTools(), ]) + return [...ai, ...wi, ...we] + }, + } - return [ - ...widgetTools, - ...weatherToolset, - ] - } + if (toolset && toolsetRegistry[toolset]) { + return toolsetRegistry[toolset] } return undefined diff --git a/apps/stage-tamagotchi/src/renderer/stores/tools/builtin/image-journal.test.ts b/apps/stage-tamagotchi/src/renderer/stores/tools/builtin/image-journal.test.ts new file mode 100644 index 0000000000..c5a4669a8c --- /dev/null +++ b/apps/stage-tamagotchi/src/renderer/stores/tools/builtin/image-journal.test.ts @@ -0,0 +1,42 @@ +import { resolveArtistryConfigFromStore } from '@proj-airi/stage-ui/stores/modules/artistry' +import { describe, expect, it } from 'vitest' + +describe('image_journal config snapshot', () => { + it('extracts plain values instead of leaking Ref objects', () => { + const config = resolveArtistryConfigFromStore({ + activeProvider: { value: 'comfyui' }, + activeModel: { value: 'flux' }, + defaultPromptPrefix: { value: 'anime style' }, + providerOptions: { value: { seed: 42 } }, + comfyuiServerUrl: { value: 'http://localhost:8188' }, + comfyuiSavedWorkflows: { value: [{ id: 'wf-1' }] }, + comfyuiActiveWorkflow: { value: 'wf-1' }, + replicateApiKey: { value: 'r8_xxx' }, + replicateDefaultModel: { value: 'black-forest-labs/flux-schnell' }, + replicateAspectRatio: { value: '16:9' }, + replicateInferenceSteps: { value: 4 }, + nanobananaApiKey: { value: 'AIza-test' }, + nanobananaModel: { value: 'gemini-3.1-flash-image-preview' }, + nanobananaResolution: { value: '1K' }, + }) + + expect(config).toEqual({ + provider: 'comfyui', + model: 'flux', + promptPrefix: 'anime style', + options: { seed: 42 }, + globals: { + comfyuiServerUrl: 'http://localhost:8188', + comfyuiSavedWorkflows: [{ id: 'wf-1' }], + comfyuiActiveWorkflow: 'wf-1', + replicateApiKey: 'r8_xxx', + replicateDefaultModel: 'black-forest-labs/flux-schnell', + replicateAspectRatio: '16:9', + replicateInferenceSteps: 4, + nanobananaApiKey: 'AIza-test', + nanobananaModel: 'gemini-3.1-flash-image-preview', + nanobananaResolution: '1K', + }, + }) + }) +}) diff --git a/apps/stage-tamagotchi/src/renderer/stores/tools/builtin/image-journal.ts b/apps/stage-tamagotchi/src/renderer/stores/tools/builtin/image-journal.ts new file mode 100644 index 0000000000..880b136481 --- /dev/null +++ b/apps/stage-tamagotchi/src/renderer/stores/tools/builtin/image-journal.ts @@ -0,0 +1,210 @@ +import type { ResolvedArtistryConfig } from '@proj-airi/stage-ui/stores/modules/artistry' +import type { Tool } from '@xsai/shared-chat' + +import { defineInvoke } from '@moeru/eventa' +import { createContext } from '@moeru/eventa/adapters/electron/renderer' +import { artistryGenerateHeadless } from '@proj-airi/stage-shared' +import { useBackgroundStore } from '@proj-airi/stage-ui/stores/background' +import { useAiriCardStore } from '@proj-airi/stage-ui/stores/modules/airi-card' +import { resolveArtistryConfigFromStore, useArtistryStore } from '@proj-airi/stage-ui/stores/modules/artistry' +import { tool } from '@xsai/tool' +import { z } from 'zod' + +import { widgetsAdd } from '../../../../shared/eventa' + +export function getArtistryConfig(): ResolvedArtistryConfig { + return resolveArtistryConfigFromStore(useArtistryStore()) +} + +function createInvokers() { + const { context } = createContext(window.electron.ipcRenderer) + return { + generateHeadless: defineInvoke(context, artistryGenerateHeadless), + addWidget: defineInvoke(context, widgetsAdd), + } +} + +type Invokers = ReturnType +let invokeCache: Invokers | undefined + +function getInvokers(): Invokers { + if (!invokeCache) + invokeCache = createInvokers() + return invokeCache +} + +const imageJournalParams = z.object({ + action: z.enum(['create', 'apply']).describe('Choose "create" to generate a new image, or "apply" to use an existing one.'), + prompt: z.string().optional().describe('Description for the image (required for "create").'), + title: z.string().optional().describe('Label for the entry (optional).'), + query: z.string().optional().describe('Search term for existing images (required for "apply").'), + mode: z.enum(['inline', 'widget', 'bg', 'bg_widget']).optional().describe('Display mode: "inline" (in chat), "widget" (overlay), "bg" (environment), or "bg_widget" (both). Defaults to character preference.'), +}) + +async function executeCreateImageJournalEntry(params: { prompt?: string, title?: string, mode?: 'inline' | 'widget' | 'bg' | 'bg_widget' }) { + if (!params.prompt?.trim()) + throw new Error('prompt is required for image_journal.create') + + const backgroundStore = useBackgroundStore() + const cardStore = useAiriCardStore() + const activeCard = cardStore.activeCard + const globalArtistryConfig = getArtistryConfig() + + const airiExt = activeCard?.extensions?.airi + const cardArtistry = airiExt?.modules?.artistry + const artistryConfig = { + provider: cardArtistry?.provider || globalArtistryConfig.provider, + model: cardArtistry?.model || globalArtistryConfig.model, + promptPrefix: cardArtistry?.promptPrefix || globalArtistryConfig.promptPrefix, + options: cardArtistry?.options || globalArtistryConfig.options, + globals: globalArtistryConfig.globals, + } + + const title = params.title || `Generation ${new Date().toLocaleString()}` + + // Resolve mode: explicit param > character fallback > global default (inline) + const spawnMode = cardArtistry?.spawnMode + const mode = params.mode || spawnMode || 'inline' + + const { addWidget, generateHeadless } = getInvokers() + + try { + const artistryResult = await generateHeadless({ + prompt: artistryConfig.promptPrefix ? `${artistryConfig.promptPrefix} ${params.prompt}` : params.prompt as string, + model: artistryConfig.model as string, + provider: artistryConfig.provider as string, + options: JSON.parse(JSON.stringify(artistryConfig.options || {})), + globals: JSON.parse(JSON.stringify(artistryConfig.globals || {})), + }) + + if (artistryResult.error || (!artistryResult.base64 && !artistryResult.imageUrl)) { + throw new Error(`Failed to generate image: ${artistryResult.error || 'No output received'}`) + } + + let blob: Blob + if (artistryResult.base64) { + const response = await fetch(artistryResult.base64) + blob = await response.blob() + } + else { + const response = await fetch(artistryResult.imageUrl!) + blob = await response.blob() + } + + const entryId = await backgroundStore.addBackground('journal', blob, title, params.prompt, cardStore.activeCardId) + + // Handle Application Logic based on Mode + if (mode === 'bg' || mode === 'bg_widget') { + const cardId = cardStore.activeCardId + if (cardId) { + const card = cardStore.cards.get(cardId) + if (card) { + const extension = JSON.parse(JSON.stringify(card.extensions || {})) + if (!extension.airi) + extension.airi = {} + if (!extension.airi.modules) + extension.airi.modules = {} + extension.airi.modules.activeBackgroundId = entryId + cardStore.updateCard(cardId, { ...card, extensions: extension }) + } + } + } + + if (mode === 'widget' || mode === 'bg_widget') { + try { + await addWidget({ + componentName: 'artistry', + componentProps: { + status: 'done', + entryId, + imageUrl: artistryResult.imageUrl || artistryResult.base64, + prompt: params.prompt as string, + title, + _skipIngestion: true, + }, + size: 'm', + ttlMs: 0, + }) + } + catch (e) { + console.warn('[ImageJournalTool] Failed to spawn Result widget', e) + } + } + + // Return structured result for UI rendering + return JSON.stringify({ + message: `Image created in ${mode} mode${mode === 'bg' || mode === 'bg_widget' ? ' and set as background' : ''}.`, + entryId, + imageUrl: artistryResult.imageUrl || artistryResult.base64, + title, + prompt: params.prompt, + mode, + }) + } + catch (e) { + console.error('[ImageJournalTool] Failed to create entry', e) + return `Error: ${e instanceof Error ? e.message : String(e)}` + } +} + +async function executeSetAsBackground(params: { query?: string }) { + if (!params.query?.trim()) + return 'Error: query is required for image_journal.apply. Provide a title or ID to search for.' + + const backgroundStore = useBackgroundStore() + const cardStore = useAiriCardStore() + const cardId = cardStore.activeCardId + const query = params.query.toLowerCase().trim() + + const entries = Array.from(backgroundStore.entries.values()) + .filter(e => e.characterId === null || e.characterId === cardId) + + let entry = entries.find(e => e.type === 'journal' && (e.id === query || e.id.toLowerCase().includes(query))) + if (!entry) + entry = entries.find(e => e.type === 'journal' && e.title.toLowerCase().includes(query)) + if (!entry) + entry = entries.find(e => e.type !== 'journal' && e.title.toLowerCase().includes(query)) + + if (entry) { + try { + if (cardId) { + const card = cardStore.cards.get(cardId) + if (card) { + const extension = JSON.parse(JSON.stringify(card.extensions || {})) + if (!extension.airi) + extension.airi = {} + if (!extension.airi.modules) + extension.airi.modules = {} + extension.airi.modules.activeBackgroundId = entry.id + cardStore.updateCard(cardId, { ...card, extensions: extension }) + } + } + return `Background set to "${entry.title}".` + } + catch (e) { + return `Error applying "${entry.title}": ${e instanceof Error ? e.message : String(e)}` + } + } + + const available = entries.filter(e => e.type === 'journal').map(e => e.title).slice(0, 10) + return `No match for "${params.query}".${available.length > 0 ? ` Try: ${available.join(', ')}` : ''}` +} + +async function executeImageJournalAction(params: any) { + if (params.action === 'create') + return await executeCreateImageJournalEntry(params) + if (params.action === 'apply' || params.action === 'set_as_background') + return await executeSetAsBackground(params) + return 'No action performed.' +} + +const tools: Promise[] = [ + tool({ + name: 'image_journal', + description: 'Manage AI-generated images. Use "create" to generate and display images. An optional "mode" (inline, widget, bg, bg_widget) can override the default character routing preference. Use "apply" to switch to an existing image from the journal.', + execute: params => executeImageJournalAction(params), + parameters: imageJournalParams, + }), +] + +export const imageJournalTools = async () => Promise.all(tools) diff --git a/apps/stage-tamagotchi/src/renderer/widgets/artistry/components/Comfy.vue b/apps/stage-tamagotchi/src/renderer/widgets/artistry/components/Comfy.vue new file mode 100644 index 0000000000..aeb4511e5a --- /dev/null +++ b/apps/stage-tamagotchi/src/renderer/widgets/artistry/components/Comfy.vue @@ -0,0 +1,349 @@ + + + + + diff --git a/apps/stage-tamagotchi/src/renderer/widgets/artistry/index.ts b/apps/stage-tamagotchi/src/renderer/widgets/artistry/index.ts new file mode 100644 index 0000000000..fbab7238af --- /dev/null +++ b/apps/stage-tamagotchi/src/renderer/widgets/artistry/index.ts @@ -0,0 +1 @@ +export { default as Artistry } from './components/Comfy.vue' diff --git a/apps/stage-tamagotchi/src/shared/eventa/index.ts b/apps/stage-tamagotchi/src/shared/eventa/index.ts index e9a6f3f54a..1ea26fdb24 100644 --- a/apps/stage-tamagotchi/src/shared/eventa/index.ts +++ b/apps/stage-tamagotchi/src/shared/eventa/index.ts @@ -53,6 +53,8 @@ export const electronSetUpdaterPreferences = defineInvokeEventa('eventa:event:electron:windows:caption-overlay:is-following-window-changed') export const captionGetIsFollowingWindow = defineInvokeEventa('eventa:invoke:electron:windows:caption-overlay:get-is-following-window') +export const electronCaptionToggleVisibility = defineInvokeEventa('eventa:invoke:electron:windows:caption:toggle-visibility') +export const electronCaptionSyncDocking = defineInvokeEventa('eventa:invoke:electron:windows:caption:sync-docking') export type RequestWindowActionDefault = 'confirm' | 'cancel' | 'close' export interface RequestWindowPayload { @@ -180,8 +182,12 @@ export const electronMcpApplyAndRestart = defineInvokeEventa('eventa:invoke:electron:mcp:get-runtime-status') export const electronMcpListTools = defineInvokeEventa('eventa:invoke:electron:mcp:list-tools') export const electronMcpCallTool = defineInvokeEventa('eventa:invoke:electron:mcp:call-tool') +export const electronMcpGetConfig = defineInvokeEventa('eventa:invoke:electron:mcp:get-config') +export const electronMcpUpdateConfig = defineInvokeEventa>('eventa:invoke:electron:mcp:update-config') +export const electronMcpConfigChanged = defineEventa('eventa:event:electron:mcp:config-changed') export const widgetsOpenWindow = defineInvokeEventa('eventa:invoke:electron:windows:widgets:open') +export const widgetsHideWindow = defineInvokeEventa('eventa:invoke:electron:windows:widgets:hide') export const widgetsAdd = defineInvokeEventa('eventa:invoke:electron:windows:widgets:add') export const widgetsRemove = defineInvokeEventa('eventa:invoke:electron:windows:widgets:remove') export const widgetsClear = defineInvokeEventa('eventa:invoke:electron:windows:widgets:clear') @@ -245,6 +251,8 @@ export const widgetsUpdateEvent = defineEventa('eventa:eve // Onboarding window events export const electronOnboardingClose = defineInvokeEventa('eventa:invoke:electron:windows:onboarding:close') +export const electronOnboardingCompleted = defineInvokeEventa('eventa:invoke:electron:windows:onboarding:completed') +export const electronOnboardingSkipped = defineInvokeEventa('eventa:invoke:electron:windows:onboarding:skipped') export const electronOpenOnboarding = defineInvokeEventa('eventa:invoke:electron:windows:onboarding:open') // Auth — OIDC Authorization Code + PKCE flow via system browser diff --git a/fetch_unresolved.cjs b/fetch_unresolved.cjs new file mode 100644 index 0000000000..ab7675b877 --- /dev/null +++ b/fetch_unresolved.cjs @@ -0,0 +1,40 @@ +const { spawnSync } = require('node:child_process') + +const query = ` +query { + repository(owner: "moeru-ai", name: "airi") { + pullRequest(number: 1636) { + reviewThreads(last: 80) { + nodes { + id + isResolved + comments(last: 1) { + nodes { + body + author { + login + } + } + } + } + } + } + } +} +` + +const result = spawnSync('gh', ['api', 'graphql', '-f', `query=${query}`], { + encoding: 'utf8', + maxBuffer: 10 * 1024 * 1024, +}) + +if (result.error) { + console.error(result.error) + process.exit(1) +} + +const data = JSON.parse(result.stdout) +const threads = data.data.repository.pullRequest.reviewThreads.nodes +const unresolved = threads.filter(t => !t.isResolved) + +console.log(JSON.stringify(unresolved, null, 2)) diff --git a/packages/i18n/src/locales/en/settings.yaml b/packages/i18n/src/locales/en/settings.yaml index 4745c8d4cb..529c769ca6 100644 --- a/packages/i18n/src/locales/en/settings.yaml +++ b/packages/i18n/src/locales/en/settings.yaml @@ -70,7 +70,8 @@ dialogs: stateNotGranted: Not granted bug-report: title: Bug report (´;ω;`)ヾ(・∀・`) - subtitle: Oops, sorry we made something wrong. Would you mind telling us what happened? + subtitle: Oops, sorry we made something wrong. Would you mind telling us what + happened? trigger-label: Report a Bug submit-label: Send Bug Report triage-description: Include screenshot and page context to help us triage this issue. @@ -234,6 +235,7 @@ pages: scenario: 'Error: A scenario is required.' systemprompt: 'Error: Please, provide a system prompt.' posthistoryinstructions: 'Error: Post history prompt is required.' + invalid_artistry_json: 'Error: Artistry provider options contains invalid JSON.' modules: Modules name_asc: Name (A-Z) name_desc: Name (Z-A) @@ -372,6 +374,68 @@ pages: warmup: description: Whether to warm up before detecting beats for better accuracy. label: Warmup + artistry: + title: Artistry + description: Image generation, scene background and drawing. + page: + title: Artistry Provider Configuration + description: Select the active backend provider for image generation. Characters can override this in their Card settings. + providers: + none: + name: None + description: Bypass and disable the image generation module globally. + comfyui: + name: ComfyUI (Local) + description: Use a local ComfyUI instance via WSL for image generation. + replicate: + name: Replicate.ai (Cloud) + description: Use cloud-based models via the Replicate API. + nanobanana: + name: Nano Banana (Preview) + description: Use Google AI Studio for image preview and reactions. + card: + description: Configure how AIRI generates images and visual content. + comfyui_empty: No ComfyUI workflows configured. Go to Settings → Providers → ComfyUI to upload a workflow template. + exposed_fields: '{count} exposed fields' + open_on_replicate: Open on Replicate + instruction_sync: + title: ComfyUI Instruction Sync + description: A specialized prompt is ready for your {workflowName} workflow. Applying this will overwrite current widget instructions so the AI knows how to use this specific template. + apply: Apply Recommended Prompt + keep: Keep Existing + provider: Artistry Provider + spawn_mode: + label: Manifestation Mode (Spawn Mode) + description: Choose how images are delivered to the interface. + options: + bg: Background Environment + inline: Inline Chat + widget: Overlay Widget + bg_widget: Dual (Background + Widget) + autonomous: + title: Cinematic Autonomy (Autonomous Artist) + description: When enabled, the "Producer" runs in parallel to the character to decide if a visual is needed. This prevents the character from forgetting to manifest scenes. + threshold: Manifestation Threshold + threshold_description: Use {min} for aggressive scene creation or {max} to require stronger evidence before generating. + threshold_min: Always Generate (0%) + threshold_max: Strict (100%) + model: + label: Artistry Model (Optional Override) + description: Model identifier if needed by provider + prompt-prefix: + label: Artistry Prompt Default Prefix + description: Pre-pended to every prompt sent to the image generator. + options: + label: Artistry Provider Options (JSON) + widget-instructions: + label: Widget System Prompt + description: Custom instructions for the AI on how to use the generation + capabilities. + categories: + essential: Essential + messaging: Messaging + gaming: Gaming + artistry: Artistry consciousness: description: Personality, desired model, etc. sections: @@ -594,18 +658,6 @@ pages: buy: Charge description: Flux packages to choose from. providers: - explained: - chat: Text generation model providers. e.g. OpenRouter, OpenAI, Ollama. - Speech: Speech (text-to-speech) model providers. e.g. ElevenLabs, Azure Speech. - Transcription: >- - Transcription (speech-to-text) model providers. e.g. Whisper.cpp, - OpenAI, Azure Speech - helpinfo: - title: First time here? - description: > - AIRI requires at least one {chat} provider to be configured to think, - and behave properly. You could think of it as the brain of the - characters living in AIRI system. catalog: edit: config-id-not-found: Provider configuration not found. @@ -711,6 +763,51 @@ pages: title: Basic voice: title: Voice Settings + labels: + recommended: Recommended + filters: + pricing: Pricing + deployment: Deployment + all: All + free: Free + paid: Paid + internal: Internal + local: Local + cloud: Cloud + categories: + chat: + title: Chat + description: Text generation model providers. e.g. OpenRouter, OpenAI, Ollama. + speech: + title: Speech + description: Speech (text-to-speech) model providers. e.g. ElevenLabs, Azure Speech. + transcription: + title: Transcription + description: >- + Transcription (speech-to-text) model providers. e.g. Whisper.cpp, + OpenAI, Azure Speech + artistry: + title: Artistry + description: Image generation and design model providers. e.g. ComfyUI, Replicate. + items: + comfyui: + description: Local image generation runner. + replicate: + description: Cloud-based model inference service. + nanobanana: + description: Google AI Studio Image Preview. + helpinfo: + title: First time here? + description: > + AIRI requires at least one {chat} provider to be configured to think, + and behave properly. You could think of it as the brain of the + characters living in AIRI system. + explained: + chat: Text generation model providers. e.g. OpenRouter, OpenAI, Ollama. + Speech: Speech (text-to-speech) model providers. e.g. ElevenLabs, Azure Speech. + Transcription: >- + Transcription (speech-to-text) model providers. e.g. Whisper.cpp, + OpenAI, Azure Speech description: LLMs, speech providers, etc. provider: app-local-audio-transcription: @@ -789,6 +886,98 @@ pages: speech-noop: title: None description: No speech output. + comfyui: + settings: + title: ComfyUI (Local) + heading: ComfyUI Native API + description: Connect to your local ComfyUI and bring your own workflows. + info: + what_you_need: + label: What You Need + value: ComfyUI running locally or on your network. + how_to_export: + label: How To Export + value: Enable Dev Mode → "Save (API Format)". + scope_boundary: + label: Scope Boundary + value: Model downloads & node installs are your job. + connection: + title: Connection + connected: Connected + failed: Connection failed + error_prefix: Error + test: Test + testing: Testing... + unknown_error: Unknown connection error + unknown_gpu: Unknown GPU + server_url: + label: Server URL + description: The address where ComfyUI is running + placeholder: http://localhost:8188 + cors: + title: CORS Block Detected + description: ComfyUI blocks requests from other applications by default. To allow AIRI to connect, you must start ComfyUI with the `--enable-cors-header "*"` flag. + command: python main.py --enable-cors-header "*" + workflows: + title: Workflow Templates + upload: Upload Workflow + cancel_upload: Cancel + empty: No workflows uploaded yet. Click "Upload Workflow" to import a workflow_api.json from ComfyUI. + exposed_parameters: Exposed Parameters + summary: '{nodes} nodes · {fields} exposed fields' + remove: Remove + config_snippet: Artistry Config Snippet + copy_json: Copy JSON + paste_hint: Paste this into your AIRI Card artistry config to override these nodes. + upload: + prompt: Drop or select a workflow_api.json file + invalid_json: Invalid JSON + workflow_name: + label: Workflow Name + description: Give this workflow a recognizable name + placeholder: e.g. Anime Text2Img + select_fields: 'Select fields to expose to the AI agent:' + fields_exposed: '{count} field(s) exposed' + save: Save Workflow + replicate: + settings: + title: Replicate.ai + heading: Replicate.ai Configuration + description: Configure your cloud image generation settings. + api_key: + label: API Key + description: Your Replicate API token (starts with r8_) + placeholder: r8_XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX + default_model: + label: Default Model + description: Fallback owner/model string to use if the character card doesn't specify one + placeholder: black-forest-labs/flux-schnell + aspect_ratio: + label: Aspect Ratio + description: Default image aspect ratio (e.g. 16:9, 1:1, 9:16) + placeholder: '16:9' + inference_steps: + label: Inference Steps + description: Number of steps for the diffusion process (lower is faster, higher is better quality) + nanobanana: + settings: + title: Nano Banana + heading: Nano Banana (Google AI Studio) + description: Configure Google Gemini's native image generation capabilities. + api_key: + label: API Key + description: Your Google AI Studio API Key + placeholder: AIpk... + preferred_model: + label: Preferred Model + description: The specific Gemini image preview model to use + default_resolution: + label: Default Resolution + description: The target resolution for generated images + model_options: + nano_banana_2: Nano Banana 2 (Gemini 3.1 Flash Image) + nano_banana_pro: Nano Banana Pro (Gemini 3 Pro Image) + nano_banana: Nano Banana (Gemini 2.5 Flash Image) deepseek: description: deepseek.com title: DeepSeek @@ -1039,9 +1228,6 @@ pages: errors: title: QR scan failed failed: Failed to scan or connect with the QR code. - scene: - description: Configure the environment where the character lives - title: Scene system: color-scheme: description: Change the color scheme of the stage. @@ -1193,6 +1379,32 @@ pages: button: Open credits: buy: Buy + scene: + title: Scenes + description: Customize the virtual environment for your characters. + beta_label: Scenes System + beta_description: Each character card specifies its own preferred background + from this gallery. Setting it here will set it as the default for the + currently active character. + background_image: + title: Active Character Background + no_background: No background active for this character. Upload a square or + landscape image for best results. + upload: Upload to Gallery + change: Change Background + clear: Clear Default + gallery: + title: Scene Gallery + empty: No images in gallery yet. Upload one above! + set_as_global: Set as Character Default + delete: Delete from Gallery + global_badge: Character Default + active_badge: Current Scene + delete_confirm: Are you sure you want to delete this background? + tip: + label: Tip! + description: Using a square image will leverage cover cropping in + portrait mode, focusing on the center of the scene. sections: section: general: diff --git a/packages/stage-pages/src/pages/settings/airi-card/components/CardCreationDialog.vue b/packages/stage-pages/src/pages/settings/airi-card/components/CardCreationDialog.vue index 51230c602d..128f58ae02 100644 --- a/packages/stage-pages/src/pages/settings/airi-card/components/CardCreationDialog.vue +++ b/packages/stage-pages/src/pages/settings/airi-card/components/CardCreationDialog.vue @@ -4,8 +4,10 @@ import type { AiriExtension } from '@proj-airi/stage-ui/stores/modules/airi-card import kebabcase from '@stdlib/string-base-kebabcase' +import { DEFAULT_ARTISTRY_WIDGET_INSTRUCTION } from '@proj-airi/stage-ui/constants/prompts/artistry-instruction' import { useDisplayModelsStore } from '@proj-airi/stage-ui/stores/display-models' import { useAiriCardStore } from '@proj-airi/stage-ui/stores/modules/airi-card' +import { useArtistryStore } from '@proj-airi/stage-ui/stores/modules/artistry' import { useConsciousnessStore } from '@proj-airi/stage-ui/stores/modules/consciousness' import { useSpeechStore } from '@proj-airi/stage-ui/stores/modules/speech' import { useProvidersStore } from '@proj-airi/stage-ui/stores/providers' @@ -23,9 +25,27 @@ import { import { computed, ref, toRaw, watch } from 'vue' import { useI18n } from 'vue-i18n' +import CardCreationTabArtistry from './tabs/CardCreationTabArtistry.vue' + interface Props { modelValue: boolean cardId?: string // If provided, edit mode; otherwise create mode + initialTab?: string +} + +interface LegacyArtistrySettings { + provider?: string + model?: string + promptPrefix?: string + widgetInstruction?: string + options?: Record +} + +type AiriExtensionWithLegacyArtistry = AiriExtension & { + artistry?: LegacyArtistrySettings + modules?: AiriExtension['modules'] & { + artistry?: LegacyArtistrySettings + } } const props = defineProps() @@ -42,11 +62,13 @@ const speechStore = useSpeechStore() const providersStore = useProvidersStore() const displayModelsStore = useDisplayModelsStore() const stageModelStore = useSettingsStageModel() +const artistryStore = useArtistryStore() const { activeProvider: consciousnessProvider, activeModel: defaultConsciousnessModel } = storeToRefs(consciousnessStore) const { activeSpeechProvider: speechProvider, activeSpeechModel: defaultSpeechModel, activeSpeechVoiceId: defaultSpeechVoiceId } = storeToRefs(speechStore) const { displayModels } = storeToRefs(displayModelsStore) const { stageModelSelected: defaultDisplayModelId } = storeToRefs(stageModelStore) +const { activeProvider: defaultArtistryProvider } = storeToRefs(artistryStore) // Determine if we're in edit mode const isEditMode = computed(() => !!props.cardId) @@ -59,6 +81,16 @@ const selectedSpeechModel = ref('') const selectedSpeechVoiceId = ref('') const selectedDisplayModelId = ref('') +// Artistry configuration +const selectedArtistryProvider = ref('') +const selectedArtistryModel = ref('') +const selectedArtistryPromptPrefix = ref('') +const selectedArtistryWidgetInstruction = ref('') +const selectedArtistrySpawnMode = ref<'bg' | 'widget' | 'inline' | 'bg_widget'>('bg_widget') +const selectedArtistryAutonomousEnabled = ref(false) +const selectedArtistryAutonomousThreshold = ref(70) +const selectedArtistryConfigStr = ref('{\n \n}') + // Computed: available display model options const displayModelOptions = computed(() => displayModels.value.map(model => ({ @@ -119,6 +151,16 @@ const speechVoiceOptions = computed(() => { })) }) +// Computed: available artistry provider options +const artistryProviderOptions = computed(() => { + return [ + { value: 'none', label: 'None (Disabled)' }, + { value: 'replicate', label: 'Replicate' }, + { value: 'comfyui', label: 'ComfyUI' }, + { value: 'nanobanana', label: 'Nano Banana' }, + ] +}) + // Load models for current providers on init watch(() => [consciousnessProvider.value, speechProvider.value], async ([consProvider, spProvider]) => { if (consProvider) { @@ -184,6 +226,7 @@ const tabs: Tab[] = [ { id: 'identity', label: t('settings.pages.card.creation.identity'), icon: 'i-solar:emoji-funny-square-bold-duotone' }, { id: 'behavior', label: t('settings.pages.card.creation.behavior'), icon: 'i-solar:chat-round-line-bold-duotone' }, { id: 'modules', label: t('settings.pages.card.modules'), icon: 'i-solar:widget-4-bold-duotone' }, + { id: 'artistry', label: t('settings.pages.modules.artistry.title'), icon: 'i-solar:gallery-bold-duotone' }, { id: 'settings', label: t('settings.pages.card.creation.settings'), icon: 'i-solar:settings-bold-duotone' }, ] @@ -191,8 +234,11 @@ const tabs: Tab[] = [ const activeTab = computed({ get: () => { // If current active tab is not in available tabs, reset to first tab - if (!tabs.some(tab => tab.id === activeTabId.value)) + if (!tabs.some(tab => tab.id === activeTabId.value)) { + if (props.initialTab && tabs.some(tab => tab.id === props.initialTab)) + return props.initialTab return tabs[0]?.id || '' + } return activeTabId.value }, set: (value: string) => { @@ -200,6 +246,16 @@ const activeTab = computed({ }, }) +// Reset active tab when dialog opens +watch(() => props.modelValue, (isOpen) => { + if (isOpen) { + if (props.initialTab && tabs.some(tab => tab.id === props.initialTab)) + activeTabId.value = props.initialTab + else + activeTabId.value = '' // Let computed handle default + } +}) + // Check for errors, and save built Cards : const showError = ref(false) @@ -251,8 +307,36 @@ function saveCard(card: Card): boolean { errorMessage.value = t('settings.pages.card.creation.errors.posthistoryinstructions') return false } + + // Validate Artistry JSON if provided + if (selectedArtistryConfigStr.value.trim()) { + try { + const parsed = JSON.parse(selectedArtistryConfigStr.value) + if (typeof parsed !== 'object' || parsed === null || Array.isArray(parsed)) { + throw new Error('Not an object') + } + } + catch (e) { + showError.value = true + errorMessage.value = t('settings.pages.card.creation.errors.invalid_artistry_json') + return false + } + } + showError.value = false + // Build options with final safety parse + let artistryOptions: Record | undefined + if (selectedArtistryConfigStr.value.trim()) { + try { + artistryOptions = JSON.parse(selectedArtistryConfigStr.value) + } + catch { + // Should not happen due to validation above + artistryOptions = undefined + } + } + // Build card with modules extension const cardWithModules = { ...rawCard, @@ -270,12 +354,21 @@ function saveCard(card: Card): boolean { voice_id: selectedSpeechVoiceId.value || defaultSpeechVoiceId.value, }, displayModelId: selectedDisplayModelId.value || defaultDisplayModelId.value, + artistry: { + provider: selectedArtistryProvider.value || defaultArtistryProvider.value, + model: selectedArtistryModel.value, + promptPrefix: selectedArtistryPromptPrefix.value, + widgetInstruction: selectedArtistryWidgetInstruction.value, + spawnMode: selectedArtistrySpawnMode.value, + options: artistryOptions, + autonomousEnabled: selectedArtistryAutonomousEnabled.value, + autonomousThreshold: selectedArtistryAutonomousThreshold.value, + }, }, agents: {}, } as AiriExtension, }, } - if (isEditMode.value && props.cardId) { // Edit mode: update existing card cardStore.updateCard(props.cardId, cardWithModules) @@ -295,7 +388,7 @@ function saveCard(card: Card): boolean { function initializeCard(): Card { // Extract existing card data if in edit mode const existingCard = (isEditMode.value && props.cardId) ? cardStore.getCard(props.cardId) : undefined - const airiExt = existingCard?.extensions?.airi as AiriExtension | undefined + const airiExt = existingCard?.extensions?.airi as AiriExtensionWithLegacyArtistry | undefined // Initialize module selections with fallback logic (handles all cases: create, edit with/without extension) selectedConsciousnessProvider.value = airiExt?.modules?.consciousness?.provider || consciousnessProvider.value @@ -305,6 +398,23 @@ function initializeCard(): Card { selectedSpeechVoiceId.value = airiExt?.modules?.speech?.voice_id || defaultSpeechVoiceId.value selectedDisplayModelId.value = airiExt?.modules?.displayModelId || defaultDisplayModelId.value + // NOTICE: keep legacy `extensions.airi.artistry` fallback so existing cards continue to load. + const artistrySettings = airiExt?.modules?.artistry || airiExt?.artistry + selectedArtistryProvider.value = artistrySettings?.provider || defaultArtistryProvider.value + selectedArtistryModel.value = artistrySettings?.model || '' + selectedArtistryPromptPrefix.value = artistrySettings?.promptPrefix || '' + selectedArtistryWidgetInstruction.value = artistrySettings?.widgetInstruction || DEFAULT_ARTISTRY_WIDGET_INSTRUCTION + selectedArtistrySpawnMode.value = (artistrySettings as any)?.spawnMode || 'bg_widget' + selectedArtistryAutonomousEnabled.value = (artistrySettings as any)?.autonomousEnabled ?? false + selectedArtistryAutonomousThreshold.value = (artistrySettings as any)?.autonomousThreshold ?? 70 + + try { + selectedArtistryConfigStr.value = artistrySettings?.options ? JSON.stringify(artistrySettings.options, null, 2) : '{\n \n}' + } + catch { + selectedArtistryConfigStr.value = '{\n \n}' + } + // Return existing card data or defaults if (existingCard) { return { ...toRaw(existingCard) } @@ -546,6 +656,20 @@ function getDefaultPlaceholder(defaultValue: string | undefined): string {
+ +
+ + +
+ +
+
+
+

+ Pinned Background +

+

+ Select the image to show when this character is active. +

+
+ +
+
+