diff --git a/apps/dashboard/src/pages/PipelineDetail.tsx b/apps/dashboard/src/pages/PipelineDetail.tsx
index 1aed546a..281deaa8 100644
--- a/apps/dashboard/src/pages/PipelineDetail.tsx
+++ b/apps/dashboard/src/pages/PipelineDetail.tsx
@@ -13,15 +13,14 @@ interface StreamProgress {
status: string
cumulative_record_count: number
run_record_count: number
- records_per_second?: number
errors?: Array<{ message: string; failure_type?: string }>
}
interface GlobalProgress {
elapsed_ms: number
run_record_count: number
- rows_per_second: number
- window_rows_per_second: number
+ records_per_second: number
+ window_records_per_second: number
state_checkpoint_count: number
}
@@ -201,11 +200,11 @@ export function PipelineDetail({ id, onBack }: PipelineDetailProps) {
diff --git a/apps/engine/package.json b/apps/engine/package.json
index ee97502b..883e5574 100644
--- a/apps/engine/package.json
+++ b/apps/engine/package.json
@@ -56,10 +56,12 @@
"dotenv": "^16.4.7",
"googleapis": "^148.0.0",
"hono": "^4",
+ "ink": "^7.0.0",
"openapi-fetch": "^0.17.0",
"pg": "^8.16.3",
"pino": "^10",
"pino-pretty": "^13",
+ "react": "^19.2.4",
"ws": "^8.18.0",
"zod": "^4.3.6"
},
@@ -67,6 +69,7 @@
"@hyperjump/json-schema": "^1.17.5",
"@types/node": "^24.10.1",
"@types/pg": "^8.15.4",
+ "@types/react": "^19.2.14",
"openapi-typescript": "^7.13.0",
"vitest": "^3.2.4"
},
diff --git a/apps/engine/src/__generated__/openapi.d.ts b/apps/engine/src/__generated__/openapi.d.ts
index 126953a8..085fda15 100644
--- a/apps/engine/src/__generated__/openapi.d.ts
+++ b/apps/engine/src/__generated__/openapi.d.ts
@@ -465,7 +465,7 @@ export interface components {
* @enum {string}
*/
type: "trace";
- /** @description Diagnostic/status payload with subtypes for error, stream status, estimates, and progress. */
+ /** @description Diagnostic/status payload with subtypes for error, stream status, estimates, and global progress. */
trace: {
/** @constant */
trace_type: "error";
@@ -486,25 +486,19 @@ export interface components {
} | {
/** @constant */
trace_type: "stream_status";
- /** @description Per-stream status update. Sources emit the minimal form (stream + status). The engine emits enriched versions with record counts and throughput rates. */
+ /** @description Per-stream status update. Sources emit the minimal form (stream + status). The engine enriches with record counts. Only emitted on status transitions. */
stream_status: {
/** @description Stream being reported on. */
stream: string;
/**
- * @description Current phase of the stream within this sync run.
+ * @description Lifecycle status. Errors are orthogonal — a stream can be complete with errors. Sources may store richer error statuses internally for retry logic.
* @enum {string}
*/
- status: "started" | "running" | "complete" | "transient_error" | "system_error" | "config_error" | "auth_error";
+ status: "started" | "complete";
/** @description Cumulative records synced for this stream across all sync runs. Monotonically increasing; initialized from engine state on resume. Set by the engine, not the source. */
cumulative_record_count?: number;
/** @description Records synced for this stream in the current sync run. Set by the engine. */
run_record_count?: number;
- /** @description Records synced since the last stream_status emission for this stream. Set by the engine. Used for instantaneous per-stream throughput. */
- window_record_count?: number;
- /** @description Average records per second for this stream over the entire run: run_record_count / elapsed seconds. Set by the engine. */
- records_per_second?: number;
- /** @description Average API requests per second for this stream over the entire run. Set by the engine from source-reported request counts. */
- requests_per_second?: number;
};
} | {
/** @constant */
@@ -520,19 +514,27 @@ export interface components {
};
} | {
/** @constant */
- trace_type: "progress";
- /** @description Periodic global sync progress emitted by the engine. Aggregate stats only — per-stream detail is in stream_status messages. Each emission is a full replacement. */
- progress: {
+ trace_type: "global_progress";
+ /** @description Global sync progress emitted by the engine, co-emitted with every stream_status trace. Aggregate stats only — per-stream detail is in stream_status messages. Each emission is a full replacement. */
+ global_progress: {
/** @description Wall-clock milliseconds since the sync run started. */
elapsed_ms: number;
/** @description Total records synced across all streams in this run. */
run_record_count: number;
+ /** @description Total records synced across all streams across all runs. */
+ cumulative_record_count?: number;
/** @description Overall throughput for the entire run: run_record_count / elapsed seconds. */
- rows_per_second: number;
+ records_per_second: number;
/** @description Instantaneous throughput: total records in last window / window duration. Measures only the most recent reporting interval. */
- window_rows_per_second: number;
+ window_records_per_second: number;
/** @description Total source_state messages observed so far in this sync run. */
state_checkpoint_count: number;
+ /** @description Total API requests made by the source in this run. */
+ request_count?: number;
+ /** @description Total API requests across all runs. */
+ cumulative_request_count?: number;
+ /** @description Total wall-clock time across all runs. */
+ cumulative_elapsed_ms?: number;
};
};
};
@@ -642,35 +644,39 @@ export interface components {
elapsed_ms?: number;
/** @description Full sync state at the end of the run. source: accumulated from source_state messages; engine: updated cumulative record counts; destination: reserved. Consumers can persist this directly and pass it back on resume. */
state?: components["schemas"]["SyncState"];
- /** @description Final global aggregates. Same shape as trace/progress. */
+ /** @description Final global aggregates. Same shape as trace/global_progress. */
global_progress?: {
/** @description Wall-clock milliseconds since the sync run started. */
elapsed_ms: number;
/** @description Total records synced across all streams in this run. */
run_record_count: number;
+ /** @description Total records synced across all streams across all runs. */
+ cumulative_record_count?: number;
/** @description Overall throughput for the entire run: run_record_count / elapsed seconds. */
- rows_per_second: number;
+ records_per_second: number;
/** @description Instantaneous throughput: total records in last window / window duration. Measures only the most recent reporting interval. */
- window_rows_per_second: number;
+ window_records_per_second: number;
/** @description Total source_state messages observed so far in this sync run. */
state_checkpoint_count: number;
+ /** @description Total API requests made by the source in this run. */
+ request_count?: number;
+ /** @description Total API requests across all runs. */
+ cumulative_request_count?: number;
+ /** @description Total wall-clock time across all runs. */
+ cumulative_elapsed_ms?: number;
};
/** @description Per-stream end-of-sync summary. Errors only appear here, not in stream_status messages. */
stream_progress?: {
[key: string]: {
/**
- * @description Final stream status.
+ * @description Lifecycle status. Errors are orthogonal — a stream can be complete with errors.
* @enum {string}
*/
- status: "started" | "running" | "complete" | "transient_error" | "system_error" | "config_error" | "auth_error";
+ status: "started" | "complete";
/** @description Cumulative records synced for this stream across all runs. */
cumulative_record_count: number;
/** @description Records synced in this run. */
run_record_count: number;
- /** @description Average records/sec for this stream over the run. */
- records_per_second?: number;
- /** @description Average requests/sec for this stream over the run. */
- requests_per_second?: number;
/** @description All accumulated errors for this stream during this run. */
errors?: {
/** @description Human-readable error description. */
@@ -770,7 +776,7 @@ export interface components {
Message: components["schemas"]["RecordMessage"] | components["schemas"]["SourceStateMessage"] | components["schemas"]["CatalogMessage"] | components["schemas"]["LogMessage"] | components["schemas"]["TraceMessage"] | components["schemas"]["SpecMessage"] | components["schemas"]["ConnectionStatusMessage"] | components["schemas"]["ControlMessage"] | components["schemas"]["EofMessage"];
DiscoverOutput: components["schemas"]["CatalogMessage"] | components["schemas"]["LogMessage"] | components["schemas"]["TraceMessage"];
DestinationOutput: components["schemas"]["SourceStateMessage"] | components["schemas"]["TraceMessage"] | components["schemas"]["LogMessage"] | components["schemas"]["EofMessage"];
- SyncOutput: components["schemas"]["SourceStateMessage"] | components["schemas"]["TraceMessage"] | components["schemas"]["LogMessage"] | components["schemas"]["EofMessage"] | components["schemas"]["ControlMessage"];
+ SyncOutput: components["schemas"]["SourceStateMessage"] | components["schemas"]["CatalogMessage"] | components["schemas"]["TraceMessage"] | components["schemas"]["LogMessage"] | components["schemas"]["EofMessage"] | components["schemas"]["ControlMessage"];
CheckOutput: components["schemas"]["ConnectionStatusMessage"] | components["schemas"]["LogMessage"] | components["schemas"]["TraceMessage"];
SetupOutput: components["schemas"]["ControlMessage"] | components["schemas"]["LogMessage"] | components["schemas"]["TraceMessage"];
TeardownOutput: components["schemas"]["LogMessage"] | components["schemas"]["TraceMessage"];
diff --git a/apps/engine/src/__generated__/openapi.json b/apps/engine/src/__generated__/openapi.json
index 14172963..f427c24e 100644
--- a/apps/engine/src/__generated__/openapi.json
+++ b/apps/engine/src/__generated__/openapi.json
@@ -1523,14 +1523,9 @@
"type": "string",
"enum": [
"started",
- "running",
- "complete",
- "transient_error",
- "system_error",
- "config_error",
- "auth_error"
+ "complete"
],
- "description": "Current phase of the stream within this sync run."
+ "description": "Lifecycle status. Errors are orthogonal — a stream can be complete with errors. Sources may store richer error statuses internally for retry logic."
},
"cumulative_record_count": {
"description": "Cumulative records synced for this stream across all sync runs. Monotonically increasing; initialized from engine state on resume. Set by the engine, not the source.",
@@ -1543,27 +1538,13 @@
"type": "integer",
"minimum": -9007199254740991,
"maximum": 9007199254740991
- },
- "window_record_count": {
- "description": "Records synced since the last stream_status emission for this stream. Set by the engine. Used for instantaneous per-stream throughput.",
- "type": "integer",
- "minimum": -9007199254740991,
- "maximum": 9007199254740991
- },
- "records_per_second": {
- "description": "Average records per second for this stream over the entire run: run_record_count / elapsed seconds. Set by the engine.",
- "type": "number"
- },
- "requests_per_second": {
- "description": "Average API requests per second for this stream over the entire run. Set by the engine from source-reported request counts.",
- "type": "number"
}
},
"required": [
"stream",
"status"
],
- "description": "Per-stream status update. Sources emit the minimal form (stream + status). The engine emits enriched versions with record counts and throughput rates."
+ "description": "Per-stream status update. Sources emit the minimal form (stream + status). The engine enriches with record counts. Only emitted on status transitions."
}
},
"required": [
@@ -1614,9 +1595,9 @@
"properties": {
"trace_type": {
"type": "string",
- "const": "progress"
+ "const": "global_progress"
},
- "progress": {
+ "global_progress": {
"type": "object",
"properties": {
"elapsed_ms": {
@@ -1631,11 +1612,17 @@
"maximum": 9007199254740991,
"description": "Total records synced across all streams in this run."
},
- "rows_per_second": {
+ "cumulative_record_count": {
+ "description": "Total records synced across all streams across all runs.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
+ },
+ "records_per_second": {
"type": "number",
"description": "Overall throughput for the entire run: run_record_count / elapsed seconds."
},
- "window_rows_per_second": {
+ "window_records_per_second": {
"type": "number",
"description": "Instantaneous throughput: total records in last window / window duration. Measures only the most recent reporting interval."
},
@@ -1644,25 +1631,43 @@
"minimum": -9007199254740991,
"maximum": 9007199254740991,
"description": "Total source_state messages observed so far in this sync run."
+ },
+ "request_count": {
+ "description": "Total API requests made by the source in this run.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
+ },
+ "cumulative_request_count": {
+ "description": "Total API requests across all runs.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
+ },
+ "cumulative_elapsed_ms": {
+ "description": "Total wall-clock time across all runs.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
}
},
"required": [
"elapsed_ms",
"run_record_count",
- "rows_per_second",
- "window_rows_per_second",
+ "records_per_second",
+ "window_records_per_second",
"state_checkpoint_count"
],
- "description": "Periodic global sync progress emitted by the engine. Aggregate stats only — per-stream detail is in stream_status messages. Each emission is a full replacement."
+ "description": "Global sync progress emitted by the engine, co-emitted with every stream_status trace. Aggregate stats only — per-stream detail is in stream_status messages. Each emission is a full replacement."
}
},
"required": [
"trace_type",
- "progress"
+ "global_progress"
]
}
],
- "description": "Diagnostic/status payload with subtypes for error, stream status, estimates, and progress.",
+ "description": "Diagnostic/status payload with subtypes for error, stream status, estimates, and global progress.",
"type": "object",
"discriminator": {
"propertyName": "trace_type"
@@ -1893,7 +1898,7 @@
"$ref": "#/components/schemas/SyncState"
},
"global_progress": {
- "description": "Final global aggregates. Same shape as trace/progress.",
+ "description": "Final global aggregates. Same shape as trace/global_progress.",
"type": "object",
"properties": {
"elapsed_ms": {
@@ -1908,11 +1913,17 @@
"maximum": 9007199254740991,
"description": "Total records synced across all streams in this run."
},
- "rows_per_second": {
+ "cumulative_record_count": {
+ "description": "Total records synced across all streams across all runs.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
+ },
+ "records_per_second": {
"type": "number",
"description": "Overall throughput for the entire run: run_record_count / elapsed seconds."
},
- "window_rows_per_second": {
+ "window_records_per_second": {
"type": "number",
"description": "Instantaneous throughput: total records in last window / window duration. Measures only the most recent reporting interval."
},
@@ -1921,13 +1932,31 @@
"minimum": -9007199254740991,
"maximum": 9007199254740991,
"description": "Total source_state messages observed so far in this sync run."
+ },
+ "request_count": {
+ "description": "Total API requests made by the source in this run.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
+ },
+ "cumulative_request_count": {
+ "description": "Total API requests across all runs.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
+ },
+ "cumulative_elapsed_ms": {
+ "description": "Total wall-clock time across all runs.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
}
},
"required": [
"elapsed_ms",
"run_record_count",
- "rows_per_second",
- "window_rows_per_second",
+ "records_per_second",
+ "window_records_per_second",
"state_checkpoint_count"
]
},
@@ -1944,14 +1973,9 @@
"type": "string",
"enum": [
"started",
- "running",
- "complete",
- "transient_error",
- "system_error",
- "config_error",
- "auth_error"
+ "complete"
],
- "description": "Final stream status."
+ "description": "Lifecycle status. Errors are orthogonal — a stream can be complete with errors."
},
"cumulative_record_count": {
"type": "integer",
@@ -1965,14 +1989,6 @@
"maximum": 9007199254740991,
"description": "Records synced in this run."
},
- "records_per_second": {
- "description": "Average records/sec for this stream over the run.",
- "type": "number"
- },
- "requests_per_second": {
- "description": "Average requests/sec for this stream over the run.",
- "type": "number"
- },
"errors": {
"description": "All accumulated errors for this stream during this run.",
"type": "array",
@@ -2374,6 +2390,9 @@
{
"$ref": "#/components/schemas/SourceStateMessage"
},
+ {
+ "$ref": "#/components/schemas/CatalogMessage"
+ },
{
"$ref": "#/components/schemas/TraceMessage"
},
@@ -2392,6 +2411,7 @@
"propertyName": "type",
"mapping": {
"source_state": "#/components/schemas/SourceStateMessage",
+ "catalog": "#/components/schemas/CatalogMessage",
"trace": "#/components/schemas/TraceMessage",
"log": "#/components/schemas/LogMessage",
"eof": "#/components/schemas/EofMessage",
diff --git a/apps/engine/src/api/app.ts b/apps/engine/src/api/app.ts
index 79221dd0..ea92a9eb 100644
--- a/apps/engine/src/api/app.ts
+++ b/apps/engine/src/api/app.ts
@@ -10,6 +10,7 @@ import { HTTPException } from 'hono/http-exception'
import pg from 'pg'
import type { Message, ConnectorResolver, TraceMessage } from '../lib/index.js'
import type { EofPayload } from '@stripe/sync-protocol'
+import { renderSyncProgress } from '../lib/sync-progress-state.js'
import {
createEngine,
createConnectorSchemas,
@@ -123,101 +124,8 @@ async function* logApiStream(
const dangerouslyVerbose = process.env.DANGEROUSLY_VERBOSE_LOGGING === 'true'
-const REASON_EMOJI: Record = {
- complete: '✅',
- time_limit: '⏱️',
- state_limit: '📦',
- error: '❌',
- aborted: '🛑',
-}
-
-const STATUS_EMOJI: Record = {
- complete: '✅',
- started: '🔄',
- running: '🔄',
- transient_error: '⚠️',
- system_error: '❌',
- config_error: '❌',
- auth_error: '🔒',
-}
-
function formatEof(eof: EofPayload): string {
- const emoji = REASON_EMOJI[eof.reason] ?? '❓'
- const elapsed = eof.global_progress?.elapsed_ms
- ? `${(eof.global_progress.elapsed_ms / 1000).toFixed(1)}s`
- : ''
- const totalRows = eof.global_progress?.run_record_count ?? 0
- const rps = eof.global_progress?.rows_per_second?.toFixed(1) ?? '0'
- const checkpoints = eof.global_progress?.state_checkpoint_count ?? 0
-
- const lines: string[] = []
- lines.push(
- `${emoji} Sync ${eof.reason}${elapsed ? ` (${elapsed}` : ''}${totalRows ? ` | ${totalRows} rows, ${rps} rows/s` : ''}${checkpoints ? `, ${checkpoints} checkpoints` : ''}${elapsed ? ')' : ''}`
- )
-
- const sp = eof.stream_progress
- if (sp) {
- let complete = 0
- let inProgress = 0
- let errored = 0
- let pending = 0
- const errorStreams: string[] = []
- const activeStreams: { name: string; rows: number; rps: string }[] = []
-
- for (const [name, s] of Object.entries(sp)) {
- if (s.status === 'complete') {
- complete++
- if (s.run_record_count > 0) {
- activeStreams.push({
- name,
- rows: s.run_record_count,
- rps: s.records_per_second?.toFixed(1) ?? '0',
- })
- }
- } else if (s.status === 'started' || s.status === 'running') {
- inProgress++
- if (s.run_record_count > 0) {
- activeStreams.push({
- name,
- rows: s.run_record_count,
- rps: s.records_per_second?.toFixed(1) ?? '0',
- })
- }
- } else if (
- s.status === 'transient_error' ||
- s.status === 'system_error' ||
- s.status === 'config_error' ||
- s.status === 'auth_error'
- ) {
- errored++
- const errMsg = s.errors?.[0]?.message ?? s.status
- errorStreams.push(`${STATUS_EMOJI[s.status]} ${name}: ${errMsg}`)
- } else {
- pending++
- }
- }
-
- // Show streams that synced rows this run
- for (const s of activeStreams.sort((a, b) => b.rows - a.rows)) {
- lines.push(` ✅ ${s.name}: ${s.rows} rows @ ${s.rps} rows/s`)
- }
-
- // Show errored streams
- for (const e of errorStreams) {
- lines.push(` ${e}`)
- }
-
- // Summary line
- const parts: string[] = []
- if (complete) parts.push(`${complete} complete`)
- if (inProgress) parts.push(`${inProgress} in progress`)
- if (errored) parts.push(`${errored} errored`)
- if (pending) parts.push(`${pending} pending`)
- parts.push(`${totalRows} total rows this run`)
- lines.push(` 📊 ${parts.join(', ')}`)
- }
-
- return lines.join('\n')
+ return renderSyncProgress(eof, [], true).join('\n')
}
/**
diff --git a/apps/engine/src/cli/sync-ui.tsx b/apps/engine/src/cli/sync-ui.tsx
new file mode 100644
index 00000000..d10072b9
--- /dev/null
+++ b/apps/engine/src/cli/sync-ui.tsx
@@ -0,0 +1,306 @@
+import React from 'react'
+import { Box, Text } from 'ink'
+import type { EofPayload, EofStreamProgress } from '@stripe/sync-protocol'
+
+// ── Formatting helpers ────────────────────────────────────────────
+
+function fmt(n: number): string {
+ return n.toLocaleString('en-US')
+}
+
+function fmtDuration(ms: number): string {
+ if (ms < 1000) return `${ms}ms`
+ if (ms < 60_000) return `${(ms / 1000).toFixed(1)}s`
+ const mins = Math.floor(ms / 60_000)
+ const secs = Math.round((ms % 60_000) / 1000)
+ if (mins < 60) return secs > 0 ? `${mins}m ${secs}s` : `${mins}m`
+ const hrs = Math.floor(mins / 60)
+ const rm = mins % 60
+ return rm > 0 ? `${hrs}h ${rm}m` : `${hrs}h`
+}
+
+function fmtRate(rps: number): string {
+ return rps >= 1000 ? `${(rps / 1000).toFixed(1)}k/s` : `${rps.toFixed(1)}/s`
+}
+
+// ── Constants ─────────────────────────────────────────────────────
+
+const REASON_COLOR: Record = {
+ complete: 'green',
+ time_limit: 'yellow',
+ state_limit: 'blue',
+ error: 'red',
+ aborted: 'red',
+}
+
+const REASON_LABEL: Record = {
+ complete: 'Sync complete',
+ time_limit: 'Time limit reached',
+ state_limit: 'State limit reached',
+ error: 'Sync failed',
+ aborted: 'Sync aborted',
+}
+
+const ERROR_COLOR: Record = {
+ transient_error: 'yellow',
+ system_error: 'red',
+ config_error: 'magenta',
+ auth_error: 'red',
+}
+
+const ERROR_LABEL: Record = {
+ transient_error: 'transient',
+ system_error: 'system',
+ config_error: 'config',
+ auth_error: 'auth',
+}
+
+// ── Sub-components ────────────────────────────────────────────────
+
+function Divider({ width = 60 }: { width?: number }) {
+ return {'─'.repeat(width)}
+}
+
+function StatRow({
+ label,
+ value,
+ dimLabel = false,
+}: {
+ label: string
+ value: string
+ dimLabel?: boolean
+}) {
+ return (
+
+ {label}
+ {value}
+
+ )
+}
+
+function StreamRow({
+ name,
+ info,
+ nameWidth,
+ running,
+}: {
+ name: string
+ info: EofStreamProgress
+ nameWidth: number
+ running: boolean
+}) {
+ const cum = info.cumulative_record_count
+ const run = info.run_record_count
+ const isComplete = info.status === 'complete'
+ const hasErrors = (info.errors?.length ?? 0) > 0
+
+ return (
+
+
+ {/* Status dot */}
+
+ {isComplete ? (
+ {'✓ '}
+ ) : running ? (
+ {'▶ '}
+ ) : (
+ {'· '}
+ )}
+
+ {/* Stream name */}
+
+
+ {name.padEnd(nameWidth)}
+
+
+ {/* Cumulative count */}
+
+ {cum > 0 ? fmt(cum) : '—'}
+
+ {/* Run delta */}
+
+ {run > 0 ? +{fmt(run)} : null}
+
+
+
+ {/* Per-stream errors */}
+ {(info.errors ?? []).map((err, i) => (
+
+
+ [{ERROR_LABEL[err.failure_type ?? 'system_error'] ?? 'error'}] {err.message}
+
+
+ ))}
+
+ )
+}
+
+// ── Main component ────────────────────────────────────────────────
+
+export interface SyncProgressProps {
+ eof: EofPayload
+ catalog: string[]
+ final: boolean
+ /** Number of pipeline_sync calls made so far (backfill mode) */
+ attempt?: number
+}
+
+export function SyncProgressUI({ eof, catalog, final, attempt }: SyncProgressProps) {
+ const gp = eof.global_progress
+ const sp = eof.stream_progress ?? {}
+
+ // Partition streams
+ const complete: [string, EofStreamProgress][] = []
+ const started: [string, EofStreamProgress][] = []
+ const pending: string[] = []
+
+ const known = new Set(Object.keys(sp))
+ for (const [name, info] of Object.entries(sp)) {
+ if (info.status === 'complete') complete.push([name, info])
+ else started.push([name, info])
+ }
+ for (const name of catalog) {
+ if (!known.has(name)) pending.push(name)
+ }
+
+ complete.sort((a, b) => b[1].cumulative_record_count - a[1].cumulative_record_count)
+ started.sort((a, b) => b[1].cumulative_record_count - a[1].cumulative_record_count)
+
+ const allStreamNames = [...complete.map((c) => c[0]), ...started.map((s) => s[0]), ...pending]
+ const nameWidth = Math.max(...allStreamNames.map((n) => n.length), 12)
+
+ const errCount = Object.values(sp).filter((i) => (i.errors?.length ?? 0) > 0).length
+ const reasonColor = REASON_COLOR[eof.reason] ?? 'white'
+ const reasonLabel = REASON_LABEL[eof.reason] ?? eof.reason
+
+ return (
+
+ {/* ── Header ── */}
+
+ {final ? (
+
+
+ {reasonLabel}
+
+ {attempt != null && attempt > 1 && ({attempt} attempts)}
+
+ ) : (
+
+
+ Syncing
+
+ {attempt != null && attempt > 1 && · attempt {attempt}}
+ {gp && (
+
+ {' '}
+ · {fmtDuration(gp.elapsed_ms)}
+ {gp.window_records_per_second != null && gp.window_records_per_second > 0
+ ? ` · ${fmtRate(gp.window_records_per_second)}`
+ : ''}
+
+ )}
+
+ )}
+
+
+ {/* ── Global stats ── */}
+ {gp && (
+
+
+
+ 0 && (gp.cumulative_record_count ?? 0) > gp.run_record_count
+ ? ` (+${fmt(gp.run_record_count)} this run)`
+ : ''
+ }`}
+ />
+
+
+ {final && gp.records_per_second > 0 && (
+
+ )}
+
+
+
+ )}
+
+ {/* ── Stream table header ── */}
+ {allStreamNames.length > 0 && (
+
+
+
+
+ {'stream'.padEnd(nameWidth)}
+
+
+
+
+ total
+
+
+
+
+ this run
+
+
+
+ )}
+
+ {/* ── Complete streams ── */}
+ {complete.length > 0 && (
+ 0 || pending.length > 0 ? 1 : 0}>
+ {complete.map(([name, info]) => (
+
+ ))}
+
+ )}
+
+ {/* ── In-progress streams ── */}
+ {started.length > 0 && (
+ 0 ? 1 : 0}>
+ {started.map(([name, info]) => (
+
+ ))}
+
+ )}
+
+ {/* ── Pending streams (collapsed) ── */}
+ {pending.length > 0 && (
+
+
+ {'· '}
+
+
+ {pending.length} pending: {pending.slice(0, 5).join(', ')}
+ {pending.length > 5 ? ` +${pending.length - 5} more` : ''}
+
+
+ )}
+
+ {/* ── Footer summary ── */}
+ {complete.length + started.length + pending.length > 0 && (
+
+
+
+ )}
+
+ {complete.length > 0 && {complete.length} complete}
+ {started.length > 0 && {started.length} in progress}
+ {pending.length > 0 && {pending.length} pending}
+ {errCount > 0 && {errCount} with errors}
+ {gp && gp.run_record_count > 0 && (
+ +{fmt(gp.run_record_count)} records this run
+ )}
+ {eof.cutoff && cutoff: {eof.cutoff}}
+
+
+ )
+}
diff --git a/apps/engine/src/cli/sync.ts b/apps/engine/src/cli/sync.ts
index 5dbd9674..f31256c9 100644
--- a/apps/engine/src/cli/sync.ts
+++ b/apps/engine/src/cli/sync.ts
@@ -1,8 +1,12 @@
+import React from 'react'
+import { render } from 'ink'
import { defineCommand } from 'citty'
import type { Engine } from '../lib/engine.js'
import type { ConnectorResolver } from '../lib/index.js'
import { readonlyStateStore, type StateStore } from '../lib/state-store.js'
import { type PipelineConfig, type SyncState, emptySyncState } from '@stripe/sync-protocol'
+import { createSyncDisplayState } from '../lib/sync-progress-state.js'
+import { SyncProgressUI } from './sync-ui.js'
export function createSyncCmd(engine: Engine, _resolver: ConnectorResolver) {
return defineCommand({
@@ -46,6 +50,16 @@ export function createSyncCmd(engine: Engine, _resolver: ConnectorResolver) {
type: 'string',
description: 'Stop after N seconds',
},
+ baseUrl: {
+ type: 'string',
+ description:
+ 'Stripe API base URL (or STRIPE_API_BASE env, default: https://api.stripe.com)',
+ },
+ progress: {
+ type: 'boolean',
+ default: false,
+ description: 'Force progress display (auto-enabled when stderr is a TTY)',
+ },
live: {
type: 'boolean',
default: false,
@@ -84,6 +98,10 @@ export function createSyncCmd(engine: Engine, _resolver: ConnectorResolver) {
// Inject optional source config overrides
const stripeConfig = pipeline.source.stripe as Record
+ const baseUrl = args.baseUrl || process.env.STRIPE_API_BASE
+ if (baseUrl) {
+ stripeConfig.base_url = baseUrl
+ }
if (backfillLimit) {
stripeConfig.backfill_limit = backfillLimit
}
@@ -101,7 +119,21 @@ export function createSyncCmd(engine: Engine, _resolver: ConnectorResolver) {
: undefined
const output = engine.pipeline_sync(pipeline, { state: syncState, time_limit: timeLimit })
- // Persist state checkpoints and stream NDJSON to stdout
+ const showProgress = args.progress || process.stderr.isTTY
+ const display = showProgress ? createSyncDisplayState() : null
+
+ // Mount Ink UI on stderr if showing progress
+ const inkInstance = display
+ ? render(
+ React.createElement(SyncProgressUI, {
+ eof: display.state.eof,
+ catalog: display.state.catalog,
+ final: false,
+ }),
+ { stdout: process.stderr }
+ )
+ : null
+
for await (const msg of output) {
if (msg.type === 'source_state') {
if (msg.source_state.state_type === 'global') {
@@ -110,9 +142,26 @@ export function createSyncCmd(engine: Engine, _resolver: ConnectorResolver) {
await store.set(msg.source_state.stream, msg.source_state.data)
}
}
- process.stdout.write(JSON.stringify(msg) + '\n')
+
+ if (display && inkInstance) {
+ const changed = display.update(msg)
+ if (changed) {
+ const final = msg.type === 'eof'
+ inkInstance.rerender(
+ React.createElement(SyncProgressUI, {
+ eof: display.state.eof,
+ catalog: display.state.catalog,
+ final,
+ })
+ )
+ }
+ } else if (!display) {
+ process.stdout.write(JSON.stringify(msg) + '\n')
+ }
}
+ inkInstance?.unmount()
+
if ('close' in store && typeof store.close === 'function') {
await store.close()
}
diff --git a/apps/engine/src/lib/engine.test.ts b/apps/engine/src/lib/engine.test.ts
index 0f46285f..b50a2cde 100644
--- a/apps/engine/src/lib/engine.test.ts
+++ b/apps/engine/src/lib/engine.test.ts
@@ -252,7 +252,7 @@ describe('protocol schemas', () => {
trace_type: 'stream_status',
stream_status: {
stream: 'customers',
- status: 'running',
+ status: 'started',
},
},
})
@@ -1072,7 +1072,7 @@ describe('engine.pipeline_sync() pipeline', () => {
trace_type: 'stream_status' as const,
stream_status: {
stream: 'customers',
- status: 'running' as const,
+ status: 'started' as const,
},
},
}
diff --git a/apps/engine/src/lib/engine.ts b/apps/engine/src/lib/engine.ts
index 91a8bd9d..9f742590 100644
--- a/apps/engine/src/lib/engine.ts
+++ b/apps/engine/src/lib/engine.ts
@@ -14,6 +14,7 @@ import {
SyncState,
RecordMessage,
SourceStateMessage,
+ EofMessage,
coerceSyncState,
collectFirst,
split,
@@ -578,6 +579,7 @@ export async function createEngine(resolver: ConnectorResolver): Promise
yield* trackProgress({
initial_state: normalizedState,
+ catalog: filteredCatalog,
recordCounter,
})(limited)
})()
diff --git a/apps/engine/src/lib/pipeline.test.ts b/apps/engine/src/lib/pipeline.test.ts
index 752827e1..d5a43887 100644
--- a/apps/engine/src/lib/pipeline.test.ts
+++ b/apps/engine/src/lib/pipeline.test.ts
@@ -304,13 +304,13 @@ describe('log()', () => {
type: 'trace',
trace: {
trace_type: 'stream_status',
- stream_status: { stream: 'orders', status: 'running' },
+ stream_status: { stream: 'orders', status: 'started' },
},
},
]
await drain(log(toAsync(msgs)))
expect(logger.info).toHaveBeenCalledWith(
- { stream: 'orders', status: 'running' },
+ { stream: 'orders', status: 'started' },
'stream_status'
)
})
diff --git a/apps/engine/src/lib/progress.test.ts b/apps/engine/src/lib/progress.test.ts
index 896b8401..2212ab07 100644
--- a/apps/engine/src/lib/progress.test.ts
+++ b/apps/engine/src/lib/progress.test.ts
@@ -72,8 +72,14 @@ describe('trackProgress', () => {
const outputs = await collect(
trackProgress({
- interval_ms: 0,
- initial_cumulative_counts: { customers: 5 },
+ initial_state: {
+ source: { streams: {}, global: {} },
+ destination: { streams: {}, global: {} },
+ engine: {
+ streams: { customers: { cumulative_record_count: 5 } },
+ global: {},
+ },
+ },
recordCounter: counter,
})(
toAsync([
@@ -100,10 +106,10 @@ describe('trackProgress', () => {
)
)
- const progressTraces = outputs.filter(
- (m) => m.type === 'trace' && m.trace.trace_type === 'progress'
+ const globalProgressTraces = outputs.filter(
+ (m) => m.type === 'trace' && m.trace.trace_type === 'global_progress'
)
- expect(progressTraces.length).toBeGreaterThan(0)
+ expect(globalProgressTraces.length).toBeGreaterThan(0)
const eof = outputs.find((m) => m.type === 'eof')
expect(eof).toBeDefined()
@@ -118,7 +124,7 @@ describe('trackProgress', () => {
},
destination: { streams: {}, global: {} },
engine: {
- streams: { customers: { cumulative_record_count: 7 } },
+ streams: { customers: { cumulative_record_count: 7, status: 'complete' } },
global: {},
},
},
@@ -138,6 +144,137 @@ describe('trackProgress', () => {
})
})
+ it('emits stream_status only on transitions, not periodically', async () => {
+ const counter = createRecordCounter()
+ const outputs = await collect(
+ trackProgress({
+ recordCounter: counter,
+ })(
+ toAsync([
+ {
+ type: 'source_state',
+ source_state: { state_type: 'stream', stream: 'customers', data: { cursor: '1' } },
+ },
+ // Second source_state for same stream should NOT emit another stream_status
+ {
+ type: 'source_state',
+ source_state: { state_type: 'stream', stream: 'customers', data: { cursor: '2' } },
+ },
+ {
+ type: 'trace',
+ trace: {
+ trace_type: 'stream_status',
+ stream_status: { stream: 'customers', status: 'complete' },
+ },
+ },
+ { type: 'eof', eof: { reason: 'complete' } },
+ ])
+ )
+ )
+
+ const streamStatusTraces = outputs.filter(
+ (m) =>
+ m.type === 'trace' && m.trace.trace_type === 'stream_status' && m._emitted_by === 'engine'
+ )
+ // First source_state → started transition + complete transition + final on EOF = 3
+ // The second source_state should NOT trigger another (already started)
+ const statusValues = streamStatusTraces.map((m) => (m as any).trace.stream_status.status)
+ // started (from first source_state), complete (from stream_status trace), complete (final on EOF)
+ expect(statusValues).toEqual(['started', 'complete', 'complete'])
+ })
+
+ it('co-emits global_progress with every stream_status', async () => {
+ const counter = createRecordCounter()
+ const outputs = await collect(
+ trackProgress({
+ recordCounter: counter,
+ })(
+ toAsync([
+ {
+ type: 'source_state',
+ source_state: { state_type: 'stream', stream: 'customers', data: { cursor: '1' } },
+ },
+ { type: 'eof', eof: { reason: 'complete' } },
+ ])
+ )
+ )
+
+ // Every stream_status from engine should be followed by a global_progress
+ const engineTraces = outputs.filter((m) => m.type === 'trace' && m._emitted_by === 'engine')
+ for (let i = 0; i < engineTraces.length - 1; i++) {
+ const current = engineTraces[i] as any
+ const next = engineTraces[i + 1] as any
+ if (current.trace.trace_type === 'stream_status') {
+ expect(next.trace.trace_type).toBe('global_progress')
+ }
+ }
+ })
+
+ it('emits catalog as first message when provided', async () => {
+ const counter = createRecordCounter()
+ const outputs = await collect(
+ trackProgress({
+ recordCounter: counter,
+ catalog: {
+ streams: [
+ {
+ stream: { name: 'customers', primary_key: [['id']] },
+ sync_mode: 'incremental',
+ destination_sync_mode: 'append',
+ },
+ ],
+ },
+ })(toAsync([{ type: 'eof', eof: { reason: 'complete' } }]))
+ )
+
+ expect(outputs[0]).toMatchObject({
+ type: 'catalog',
+ catalog: { streams: [{ name: 'customers', primary_key: [['id']] }] },
+ })
+ })
+
+ it('errors are orthogonal to lifecycle status', async () => {
+ const counter = createRecordCounter()
+ const outputs = await collect(
+ trackProgress({
+ recordCounter: counter,
+ })(
+ toAsync([
+ {
+ type: 'source_state',
+ source_state: { state_type: 'stream', stream: 'customers', data: { cursor: '1' } },
+ },
+ {
+ type: 'trace',
+ trace: {
+ trace_type: 'error',
+ error: {
+ message: 'rate limited',
+ failure_type: 'transient_error',
+ stream: 'customers',
+ },
+ },
+ },
+ {
+ type: 'trace',
+ trace: {
+ trace_type: 'stream_status',
+ stream_status: { stream: 'customers', status: 'complete' },
+ },
+ },
+ { type: 'eof', eof: { reason: 'complete' } },
+ ])
+ )
+ )
+
+ const eof = outputs.find((m) => m.type === 'eof') as any
+ // Stream is complete AND has errors — they're orthogonal
+ expect(eof.eof.stream_progress.customers.status).toBe('complete')
+ expect(eof.eof.stream_progress.customers.errors).toEqual([
+ { message: 'rate limited', failure_type: 'transient_error' },
+ ])
+ })
+
it('aggregates multiple stream states and global state into EOF', async () => {
const counter = createRecordCounter()
await collect(
@@ -165,7 +302,6 @@ describe('trackProgress', () => {
const outputs = await collect(
trackProgress({
- interval_ms: 0,
recordCounter: counter,
})(
toAsync([
@@ -210,8 +346,8 @@ describe('trackProgress', () => {
destination: { streams: {}, global: {} },
engine: {
streams: {
- customers: { cumulative_record_count: 1 },
- invoices: { cumulative_record_count: 1 },
+ customers: { cumulative_record_count: 1, status: 'started' },
+ invoices: { cumulative_record_count: 1, status: 'started' },
},
global: {},
},
@@ -239,7 +375,6 @@ describe('trackProgress', () => {
const outputs = await collect(
trackProgress({
- interval_ms: 0,
initial_state: {
source: {
streams: {
@@ -294,10 +429,9 @@ describe('trackProgress', () => {
},
engine: {
streams: {
- customers: { cumulative_record_count: 6, note: 'keep-me' },
+ customers: { cumulative_record_count: 6, note: 'keep-me', status: 'started' },
invoices: { cumulative_record_count: 2, untouched: true },
},
- global: { sync_id: 'prev' },
},
},
},
@@ -322,30 +456,43 @@ describe('trackProgress', () => {
const outputs = await collect(
trackProgress({
- interval_ms: 0,
initial_state: initialState,
recordCounter: createRecordCounter(),
})(toAsync([{ type: 'eof', eof: { reason: 'complete' } }]))
)
const eof = outputs.find((m) => m.type === 'eof')
+ // Engine global is enriched with cumulative totals, so partial match
expect(eof).toMatchObject({
type: 'eof',
- eof: { state: initialState },
+ eof: {
+ state: {
+ source: initialState.source,
+ destination: initialState.destination,
+ engine: {
+ streams: initialState.engine.streams,
+ },
+ },
+ },
})
})
- it('omits state from EOF when no source_state messages were emitted', async () => {
+ it('includes engine global cumulative stats even when no source_state messages were emitted', async () => {
const counter = createRecordCounter()
const outputs = await collect(
trackProgress({
- interval_ms: 0,
recordCounter: counter,
})(toAsync([{ type: 'eof', eof: { reason: 'complete' } }]))
)
- const eof = outputs.find((m) => m.type === 'eof')
+ const eof = outputs.find((m) => m.type === 'eof') as any
expect(eof).toBeDefined()
- expect((eof as any).eof.state).toBeUndefined()
+ // Engine global always has cumulative stats (zeroed out for fresh runs)
+ expect(eof.eof.state.engine.global).toMatchObject({
+ cumulative_record_count: 0,
+ cumulative_request_count: 0,
+ })
+ // No source or destination state since no messages were emitted
+ expect(Object.keys(eof.eof.state.source.streams)).toHaveLength(0)
})
})
diff --git a/apps/engine/src/lib/progress.ts b/apps/engine/src/lib/progress.ts
index b316323c..9d764216 100644
--- a/apps/engine/src/lib/progress.ts
+++ b/apps/engine/src/lib/progress.ts
@@ -3,9 +3,10 @@ import type {
SyncState,
SyncOutput,
TraceStreamStatus,
- TraceProgress,
+ TraceGlobalProgress,
EofPayload,
EofStreamProgress,
+ ConfiguredCatalog,
} from '@stripe/sync-protocol'
import { emptySyncState } from '@stripe/sync-protocol'
@@ -37,15 +38,14 @@ export function createRecordCounter() {
}
export function trackProgress(opts: {
- interval_ms?: number
initial_state?: SyncState
- initial_cumulative_counts?: Record
+ /** Configured catalog — emitted as the first message so the UI knows all streams upfront. */
+ catalog?: ConfiguredCatalog
/** Shared counter fed by createRecordCounter().tap() on the data path. */
recordCounter?: ReturnType
}): (msgs: AsyncIterable) => AsyncIterable {
- const intervalMs = opts.interval_ms ?? 2000
-
return async function* (messages) {
+ // Initialize cumulative counts from engine state
const initialCumulativeCounts = opts.initial_state?.engine?.streams
? Object.fromEntries(
Object.entries(opts.initial_state.engine.streams)
@@ -55,33 +55,59 @@ export function trackProgress(opts: {
])
.filter(([, v]) => typeof v === 'number' && v >= 0)
)
- : (opts.initial_cumulative_counts ?? {})
+ : {}
const cumulativeRecordCount = new Map(Object.entries(initialCumulativeCounts))
- const prevSnapshotCounts = new Map()
+
+ // Initialize cumulative global stats from engine state
+ const engineGlobal = (opts.initial_state?.engine?.global ?? {}) as Record
+ let cumulativeGlobalRecordCount = (engineGlobal.cumulative_record_count as number) ?? 0
+ let cumulativeRequestCount = (engineGlobal.cumulative_request_count as number) ?? 0
+ let cumulativeElapsedMs = (engineGlobal.cumulative_elapsed_ms as number) ?? 0
+
let stateCheckpointCount = 0
const streamStatus = new Map()
+ const lastEmittedStatus = new Map()
// Restore stream statuses: engine state first, then source state overrides
// (source state is authoritative — streams the source skips emit no messages)
if (opts.initial_state?.engine?.streams) {
for (const [stream, data] of Object.entries(opts.initial_state.engine.streams)) {
const status = (data as { status?: Status })?.status
- if (status) streamStatus.set(stream, status)
+ if (status === 'started' || status === 'complete') {
+ streamStatus.set(stream, status)
+ }
}
}
if (opts.initial_state?.source?.streams) {
for (const [stream, data] of Object.entries(opts.initial_state.source.streams)) {
- const status = (data as { status?: string })?.status
- if (status) streamStatus.set(stream, status as Status)
+ const srcStatus = (data as { status?: string })?.status
+ // Map source error statuses to lifecycle status for the engine
+ if (srcStatus === 'complete') {
+ streamStatus.set(stream, 'complete')
+ } else if (
+ srcStatus === 'pending' ||
+ srcStatus === 'transient_error' ||
+ srcStatus === 'system_error' ||
+ srcStatus === 'config_error' ||
+ srcStatus === 'auth_error'
+ ) {
+ // Source hasn't completed — keep as started (or don't set if not started yet)
+ if (streamStatus.has(stream)) {
+ // Already has a status from engine state, keep it unless it was complete
+ // and source says otherwise
+ } else if (srcStatus !== 'pending') {
+ streamStatus.set(stream, 'started')
+ }
+ }
}
}
+
const streamErrors = new Map()
const hadInitialState = opts.initial_state != null
const finalState: SyncState = structuredClone(opts.initial_state ?? emptySyncState())
const startedAt = Date.now()
let lastWindowAt = startedAt
- let lastEmitAt = startedAt
let prevWindowTotal = 0
function elapsedMs() {
@@ -103,10 +129,6 @@ export function trackProgress(opts: {
return sum
}
- function windowRecordCount(stream: string): number {
- return runRecordCount(stream) - (prevSnapshotCounts.get(stream) ?? 0)
- }
-
function totalWindowRecords(): number {
return totalRunRecords() - prevWindowTotal
}
@@ -122,12 +144,8 @@ export function trackProgress(opts: {
}
function snapshotWindow() {
- if (opts.recordCounter) {
- for (const [k, v] of opts.recordCounter.counts) prevSnapshotCounts.set(k, v)
- }
prevWindowTotal = totalRunRecords()
lastWindowAt = Date.now()
- lastEmitAt = Date.now()
}
function buildStreamStatus(stream: string): SyncOutput | undefined {
@@ -144,8 +162,6 @@ export function trackProgress(opts: {
status,
cumulative_record_count: cumulative,
run_record_count: run,
- window_record_count: windowRecordCount(stream),
- records_per_second: run / elapsedSec(),
},
},
_emitted_by: 'engine',
@@ -155,31 +171,49 @@ export function trackProgress(opts: {
function buildGlobalProgress(): SyncOutput {
const windowDuration = Math.max((Date.now() - lastWindowAt) / 1000, 0.001)
- const progress: TraceProgress = {
+ const runRecords = totalRunRecords()
+ const globalProgress: TraceGlobalProgress = {
elapsed_ms: elapsedMs(),
- run_record_count: totalRunRecords(),
- rows_per_second: totalRunRecords() / elapsedSec(),
- window_rows_per_second: totalWindowRecords() / windowDuration,
+ run_record_count: runRecords,
+ cumulative_record_count: cumulativeGlobalRecordCount + runRecords,
+ records_per_second: runRecords / elapsedSec(),
+ window_records_per_second: totalWindowRecords() / windowDuration,
state_checkpoint_count: stateCheckpointCount,
+ cumulative_request_count: cumulativeRequestCount,
+ cumulative_elapsed_ms: cumulativeElapsedMs + elapsedMs(),
}
return {
type: 'trace',
- trace: { trace_type: 'progress' as const, progress },
+ trace: { trace_type: 'global_progress' as const, global_progress: globalProgress },
_emitted_by: 'engine',
_ts: new Date().toISOString(),
} as SyncOutput
}
- function buildStreamProgress(stream: string): EofStreamProgress | undefined {
+ /** Emit stream_status + global_progress pair if status changed. */
+ function* emitIfStatusChanged(stream: string): Iterable {
+ const current = streamStatus.get(stream)
+ if (!current) return
+ if (lastEmittedStatus.get(stream) === current) return
+
+ lastEmittedStatus.set(stream, current)
+ const ss = buildStreamStatus(stream)
+ if (ss) yield ss
+ yield buildGlobalProgress()
+ snapshotWindow()
+ }
+
+ function buildStreamProgress(stream: string, finalEof = false): EofStreamProgress | undefined {
const status = streamStatus.get(stream)
if (!status) return undefined
const run = runRecordCount(stream)
const cumulative = (cumulativeRecordCount.get(stream) ?? 0) + run
+ // At EOF, no stream can still be in-flight — promote 'started' → 'complete'
+ const resolvedStatus = finalEof && status === 'started' ? 'complete' : status
return {
- status,
+ status: resolvedStatus,
cumulative_record_count: cumulative,
run_record_count: run,
- records_per_second: run / elapsedSec(),
errors: streamErrors.has(stream) ? streamErrors.get(stream) : undefined,
}
}
@@ -199,6 +233,15 @@ export function trackProgress(opts: {
}
}
+ // Update engine global state with cumulative totals
+ const runRecords = totalRunRecords()
+ finalState.engine.global = {
+ ...finalState.engine.global,
+ cumulative_record_count: cumulativeGlobalRecordCount + runRecords,
+ cumulative_request_count: cumulativeRequestCount,
+ cumulative_elapsed_ms: cumulativeElapsedMs + elapsedMs(),
+ }
+
const hasAnyState =
Object.keys(finalState.source.streams).length > 0 ||
Object.keys(finalState.source.global).length > 0 ||
@@ -215,18 +258,22 @@ export function trackProgress(opts: {
const streams = allStreams()
const streamProgressMap: Record = {}
for (const s of streams) {
- const sp = buildStreamProgress(s)
+ const sp = buildStreamProgress(s, true)
if (sp) streamProgressMap[s] = sp
}
+ const runRecords = totalRunRecords()
const eof: EofPayload = {
reason,
state: buildAccumulatedState(),
global_progress: {
elapsed_ms: elapsedMs(),
- run_record_count: totalRunRecords(),
- rows_per_second: totalRunRecords() / elapsedSec(),
- window_rows_per_second: totalWindowRecords() / windowDuration,
+ run_record_count: runRecords,
+ cumulative_record_count: cumulativeGlobalRecordCount + runRecords,
+ records_per_second: runRecords / elapsedSec(),
+ window_records_per_second: totalWindowRecords() / windowDuration,
state_checkpoint_count: stateCheckpointCount,
+ cumulative_request_count: cumulativeRequestCount,
+ cumulative_elapsed_ms: cumulativeElapsedMs + elapsedMs(),
},
stream_progress: Object.keys(streamProgressMap).length > 0 ? streamProgressMap : undefined,
}
@@ -238,16 +285,14 @@ export function trackProgress(opts: {
} as SyncOutput
}
- function* maybeEmitProgress(): Iterable {
- const now = Date.now()
- if (now - lastEmitAt < intervalMs) return
-
- for (const stream of allStreams()) {
- const ss = buildStreamStatus(stream)
- if (ss) yield ss
- }
- yield buildGlobalProgress()
- snapshotWindow()
+ // Emit catalog as first message so the UI knows all streams upfront
+ if (opts.catalog) {
+ yield {
+ type: 'catalog',
+ catalog: { streams: opts.catalog.streams.map((cs) => cs.stream) },
+ _emitted_by: 'engine',
+ _ts: new Date().toISOString(),
+ } as SyncOutput
}
for await (const msg of messages) {
@@ -256,28 +301,35 @@ export function trackProgress(opts: {
if (msg.source_state.state_type === 'stream') {
const stream = msg.source_state.stream
finalState.source.streams[stream] = msg.source_state.data
- if (!streamStatus.has(stream)) streamStatus.set(stream, 'started')
+ if (!streamStatus.has(stream)) {
+ streamStatus.set(stream, 'started')
+ yield* emitIfStatusChanged(stream)
+ }
} else if (msg.source_state.state_type === 'global') {
finalState.source.global = msg.source_state.data as Record
}
} else if (msg.type === 'trace') {
if (msg.trace.trace_type === 'stream_status') {
const ss = msg.trace.stream_status
- streamStatus.set(ss.stream, ss.status)
+ const newStatus = ss.status as Status
+ // Only accept valid lifecycle statuses
+ if (newStatus === 'started' || newStatus === 'complete') {
+ streamStatus.set(ss.stream, newStatus)
+ yield* emitIfStatusChanged(ss.stream)
+ }
} else if (msg.trace.trace_type === 'error') {
const err = msg.trace.error
if (err.stream) {
const errs = streamErrors.get(err.stream) ?? []
errs.push({ message: err.message, failure_type: err.failure_type as FailureType })
streamErrors.set(err.stream, errs)
- if (err.failure_type && streamStatus.get(err.stream) !== 'complete') {
- streamStatus.set(err.stream, err.failure_type as Status)
- }
+ // Errors don't change lifecycle status — they're orthogonal
}
}
}
if (msg.type === 'eof') {
+ // Emit final stream_status + global_progress for all streams
for (const stream of allStreams()) {
const ss = buildStreamStatus(stream)
if (ss) yield ss
@@ -287,8 +339,12 @@ export function trackProgress(opts: {
return
}
+ // Suppress upstream stream_status traces — the engine re-emits enriched versions
+ if (msg.type === 'trace' && msg.trace.trace_type === 'stream_status') {
+ continue
+ }
+
yield msg
- yield* maybeEmitProgress()
}
}
}
diff --git a/apps/engine/src/lib/sync-progress-state.ts b/apps/engine/src/lib/sync-progress-state.ts
new file mode 100644
index 00000000..8d35b8aa
--- /dev/null
+++ b/apps/engine/src/lib/sync-progress-state.ts
@@ -0,0 +1,226 @@
+import type {
+ SyncOutput,
+ EofPayload,
+ EofStreamProgress,
+ TraceGlobalProgress,
+} from '@stripe/sync-protocol'
+
+// ── Reducer: SyncOutput messages → EofPayload ────────────────────
+//
+// At any point during a sync, the accumulated state is a valid EofPayload.
+// The final EOF message from the engine replaces it wholesale.
+// Display is purely a function of (EofPayload, catalog).
+
+export interface SyncDisplayState {
+ catalog: string[]
+ eof: EofPayload
+}
+
+export function createSyncDisplayState(): {
+ state: SyncDisplayState
+ /** Returns true if the message changed the display state. */
+ update: (msg: SyncOutput) => boolean
+} {
+ const state: SyncDisplayState = {
+ catalog: [],
+ eof: { reason: 'complete' },
+ }
+
+ function ensureStream(name: string): EofStreamProgress {
+ if (!state.eof.stream_progress) state.eof.stream_progress = {}
+ if (!state.eof.stream_progress[name]) {
+ state.eof.stream_progress[name] = {
+ status: 'started',
+ cumulative_record_count: 0,
+ run_record_count: 0,
+ }
+ }
+ return state.eof.stream_progress[name]
+ }
+
+ function update(msg: SyncOutput): boolean {
+ if (msg.type === 'catalog') {
+ state.catalog = (msg.catalog as { streams: Array<{ name: string }> }).streams.map(
+ (s) => s.name
+ )
+ return true
+ }
+
+ if (msg.type === 'trace') {
+ const t = msg.trace
+ if (t.trace_type === 'stream_status') {
+ const ss = t.stream_status
+ const sp = ensureStream(ss.stream)
+ sp.status = ss.status as 'started' | 'complete'
+ if (ss.cumulative_record_count != null)
+ sp.cumulative_record_count = ss.cumulative_record_count
+ if (ss.run_record_count != null) sp.run_record_count = ss.run_record_count
+ return true
+ }
+ if (t.trace_type === 'global_progress') {
+ state.eof.global_progress = (
+ t as { trace_type: 'global_progress'; global_progress: TraceGlobalProgress }
+ ).global_progress
+ return false // rendered with preceding stream_status
+ }
+ if (t.trace_type === 'error') {
+ const err = (
+ t as {
+ trace_type: 'error'
+ error: { message: string; failure_type?: string; stream?: string }
+ }
+ ).error
+ if (err.stream) {
+ const sp = ensureStream(err.stream)
+ if (!sp.errors) sp.errors = []
+ sp.errors.push({
+ message: err.message,
+ failure_type: err.failure_type as
+ | 'config_error'
+ | 'system_error'
+ | 'transient_error'
+ | 'auth_error'
+ | undefined,
+ })
+ }
+ return false
+ }
+ }
+
+ if (msg.type === 'eof') {
+ // The engine's EOF is authoritative — replace everything
+ state.eof = msg.eof
+ return true
+ }
+
+ return false
+ }
+
+ return { state, update }
+}
+
+// ── Renderer: (EofPayload, catalog) → string[] ──────────────────
+
+const ERROR_EMOJI: Record = {
+ transient_error: '⚠️',
+ system_error: '❌',
+ config_error: '⚙️',
+ auth_error: '🔒',
+}
+
+const REASON_EMOJI: Record = {
+ complete: '✅',
+ time_limit: '⏱️',
+ state_limit: '📦',
+ error: '❌',
+ aborted: '🛑',
+}
+
+function fmt(n: number): string {
+ return n.toLocaleString('en-US')
+}
+
+function fmtDuration(ms: number): string {
+ if (ms < 1000) return `${ms}ms`
+ if (ms < 60_000) return `${(ms / 1000).toFixed(1)}s`
+ const mins = Math.floor(ms / 60_000)
+ const secs = Math.round((ms % 60_000) / 1000)
+ if (mins < 60) return secs > 0 ? `${mins}m ${secs}s` : `${mins}m`
+ const hrs = Math.floor(mins / 60)
+ const rm = mins % 60
+ return rm > 0 ? `${hrs}h ${rm}m` : `${hrs}h`
+}
+
+/**
+ * Render a sync progress table from an EofPayload and optional catalog.
+ * Pure function — no side effects. Returns an array of lines.
+ *
+ * @param eof - The current (or final) EOF payload
+ * @param catalog - Stream names from the catalog (to derive pending streams)
+ * @param final - True when rendering after the actual EOF message (changes header)
+ */
+export function renderSyncProgress(
+ eof: EofPayload,
+ catalog: string[] = [],
+ final = false
+): string[] {
+ const lines: string[] = []
+ const gp = eof.global_progress
+
+ // Header
+ if (final) {
+ lines.push(`${REASON_EMOJI[eof.reason] ?? '❓'} Sync ${eof.reason}`)
+ } else {
+ lines.push('🔄 Syncing...')
+ }
+
+ if (gp) {
+ const cumRecords = gp.cumulative_record_count ?? gp.run_record_count
+ const cumElapsed = gp.cumulative_elapsed_ms ?? gp.elapsed_ms
+ lines.push(
+ ` Total: ${fmt(cumRecords)} records | ${fmt(gp.cumulative_request_count ?? 0)} requests | ${fmtDuration(cumElapsed)}`
+ )
+ lines.push(
+ ` This run: +${fmt(gp.run_record_count)} records | ${fmt(gp.request_count ?? 0)} requests | ${fmtDuration(gp.elapsed_ms)} | ${gp.records_per_second.toFixed(1)} records/s`
+ )
+ }
+
+ // Group streams by status
+ const sp = eof.stream_progress ?? {}
+ const complete: [string, EofStreamProgress][] = []
+ const started: [string, EofStreamProgress][] = []
+ const pending: string[] = []
+
+ const known = new Set(Object.keys(sp))
+ for (const [name, info] of Object.entries(sp)) {
+ if (info.status === 'complete') complete.push([name, info])
+ else started.push([name, info])
+ }
+ for (const name of catalog) {
+ if (!known.has(name)) pending.push(name)
+ }
+
+ complete.sort((a, b) => b[1].cumulative_record_count - a[1].cumulative_record_count)
+ started.sort((a, b) => b[1].cumulative_record_count - a[1].cumulative_record_count)
+
+ const allNames = [...complete.map((c) => c[0]), ...started.map((s) => s[0]), ...pending]
+ const maxName = Math.max(...allNames.map((n) => n.length), 10)
+
+ function streamLine(name: string, info: EofStreamProgress) {
+ const cum = info.cumulative_record_count
+ const run = info.run_record_count
+ const countStr = cum > 0 ? `${fmt(cum).padStart(10)}${run > 0 ? ` (+${fmt(run)})` : ''}` : ''
+ lines.push(` ${name.padEnd(maxName)} ${countStr}`)
+ for (const err of info.errors ?? []) {
+ const emoji = ERROR_EMOJI[err.failure_type ?? 'system_error'] ?? '❌'
+ lines.push(` ${emoji} ${err.message}${err.failure_type ? ` (${err.failure_type})` : ''}`)
+ }
+ }
+
+ lines.push('')
+ if (complete.length > 0) {
+ lines.push(` ✅ Complete (${complete.length}):`)
+ for (const [name, info] of complete) streamLine(name, info)
+ }
+ if (started.length > 0) {
+ lines.push(` 🔄 Started (${started.length}):`)
+ for (const [name, info] of started) streamLine(name, info)
+ }
+ if (pending.length > 0) {
+ lines.push(` ⏳ Pending (${pending.length}):`)
+ lines.push(` ${pending.join(', ')}`)
+ }
+
+ // Summary
+ const errCount = Object.values(sp).filter((i) => (i.errors?.length ?? 0) > 0).length
+ lines.push('')
+ const parts: string[] = []
+ if (complete.length) parts.push(`${complete.length} complete`)
+ if (started.length) parts.push(`${started.length} started`)
+ if (pending.length) parts.push(`${pending.length} pending`)
+ if (errCount) parts.push(`${errCount} with errors`)
+ parts.push(`+${fmt(gp?.run_record_count ?? 0)} records this run`)
+ lines.push(` 📊 ${parts.join(' | ')}`)
+
+ return lines
+}
diff --git a/apps/engine/tsconfig.json b/apps/engine/tsconfig.json
index 2481fe54..a7aaf861 100644
--- a/apps/engine/tsconfig.json
+++ b/apps/engine/tsconfig.json
@@ -2,7 +2,8 @@
"extends": "../../tsconfig.base.json",
"compilerOptions": {
"outDir": "dist",
- "rootDir": "src"
+ "rootDir": "src",
+ "jsx": "react-jsx"
},
"include": ["src/**/*"],
"exclude": ["src/**/*.test.ts", "src/**/__tests__/**"]
diff --git a/apps/service/src/__generated__/openapi.d.ts b/apps/service/src/__generated__/openapi.d.ts
index 9e00a197..6c1e87c4 100644
--- a/apps/service/src/__generated__/openapi.d.ts
+++ b/apps/service/src/__generated__/openapi.d.ts
@@ -317,35 +317,39 @@ export interface operations {
elapsed_ms?: number;
/** @description Full sync state at the end of the run. source: accumulated from source_state messages; engine: updated cumulative record counts; destination: reserved. Consumers can persist this directly and pass it back on resume. */
state?: components["schemas"]["SyncState"];
- /** @description Final global aggregates. Same shape as trace/progress. */
+ /** @description Final global aggregates. Same shape as trace/global_progress. */
global_progress?: {
/** @description Wall-clock milliseconds since the sync run started. */
elapsed_ms: number;
/** @description Total records synced across all streams in this run. */
run_record_count: number;
+ /** @description Total records synced across all streams across all runs. */
+ cumulative_record_count?: number;
/** @description Overall throughput for the entire run: run_record_count / elapsed seconds. */
- rows_per_second: number;
+ records_per_second: number;
/** @description Instantaneous throughput: total records in last window / window duration. Measures only the most recent reporting interval. */
- window_rows_per_second: number;
+ window_records_per_second: number;
/** @description Total source_state messages observed so far in this sync run. */
state_checkpoint_count: number;
+ /** @description Total API requests made by the source in this run. */
+ request_count?: number;
+ /** @description Total API requests across all runs. */
+ cumulative_request_count?: number;
+ /** @description Total wall-clock time across all runs. */
+ cumulative_elapsed_ms?: number;
};
/** @description Per-stream end-of-sync summary. Errors only appear here, not in stream_status messages. */
stream_progress?: {
[key: string]: {
/**
- * @description Final stream status.
+ * @description Lifecycle status. Errors are orthogonal — a stream can be complete with errors.
* @enum {string}
*/
- status: "started" | "running" | "complete" | "transient_error" | "system_error" | "config_error" | "auth_error";
+ status: "started" | "complete";
/** @description Cumulative records synced for this stream across all runs. */
cumulative_record_count: number;
/** @description Records synced in this run. */
run_record_count: number;
- /** @description Average records/sec for this stream over the run. */
- records_per_second?: number;
- /** @description Average requests/sec for this stream over the run. */
- requests_per_second?: number;
/** @description All accumulated errors for this stream during this run. */
errors?: {
/** @description Human-readable error description. */
@@ -445,35 +449,39 @@ export interface operations {
elapsed_ms?: number;
/** @description Full sync state at the end of the run. source: accumulated from source_state messages; engine: updated cumulative record counts; destination: reserved. Consumers can persist this directly and pass it back on resume. */
state?: components["schemas"]["SyncState"];
- /** @description Final global aggregates. Same shape as trace/progress. */
+ /** @description Final global aggregates. Same shape as trace/global_progress. */
global_progress?: {
/** @description Wall-clock milliseconds since the sync run started. */
elapsed_ms: number;
/** @description Total records synced across all streams in this run. */
run_record_count: number;
+ /** @description Total records synced across all streams across all runs. */
+ cumulative_record_count?: number;
/** @description Overall throughput for the entire run: run_record_count / elapsed seconds. */
- rows_per_second: number;
+ records_per_second: number;
/** @description Instantaneous throughput: total records in last window / window duration. Measures only the most recent reporting interval. */
- window_rows_per_second: number;
+ window_records_per_second: number;
/** @description Total source_state messages observed so far in this sync run. */
state_checkpoint_count: number;
+ /** @description Total API requests made by the source in this run. */
+ request_count?: number;
+ /** @description Total API requests across all runs. */
+ cumulative_request_count?: number;
+ /** @description Total wall-clock time across all runs. */
+ cumulative_elapsed_ms?: number;
};
/** @description Per-stream end-of-sync summary. Errors only appear here, not in stream_status messages. */
stream_progress?: {
[key: string]: {
/**
- * @description Final stream status.
+ * @description Lifecycle status. Errors are orthogonal — a stream can be complete with errors.
* @enum {string}
*/
- status: "started" | "running" | "complete" | "transient_error" | "system_error" | "config_error" | "auth_error";
+ status: "started" | "complete";
/** @description Cumulative records synced for this stream across all runs. */
cumulative_record_count: number;
/** @description Records synced in this run. */
run_record_count: number;
- /** @description Average records/sec for this stream over the run. */
- records_per_second?: number;
- /** @description Average requests/sec for this stream over the run. */
- requests_per_second?: number;
/** @description All accumulated errors for this stream during this run. */
errors?: {
/** @description Human-readable error description. */
@@ -565,35 +573,39 @@ export interface operations {
elapsed_ms?: number;
/** @description Full sync state at the end of the run. source: accumulated from source_state messages; engine: updated cumulative record counts; destination: reserved. Consumers can persist this directly and pass it back on resume. */
state?: components["schemas"]["SyncState"];
- /** @description Final global aggregates. Same shape as trace/progress. */
+ /** @description Final global aggregates. Same shape as trace/global_progress. */
global_progress?: {
/** @description Wall-clock milliseconds since the sync run started. */
elapsed_ms: number;
/** @description Total records synced across all streams in this run. */
run_record_count: number;
+ /** @description Total records synced across all streams across all runs. */
+ cumulative_record_count?: number;
/** @description Overall throughput for the entire run: run_record_count / elapsed seconds. */
- rows_per_second: number;
+ records_per_second: number;
/** @description Instantaneous throughput: total records in last window / window duration. Measures only the most recent reporting interval. */
- window_rows_per_second: number;
+ window_records_per_second: number;
/** @description Total source_state messages observed so far in this sync run. */
state_checkpoint_count: number;
+ /** @description Total API requests made by the source in this run. */
+ request_count?: number;
+ /** @description Total API requests across all runs. */
+ cumulative_request_count?: number;
+ /** @description Total wall-clock time across all runs. */
+ cumulative_elapsed_ms?: number;
};
/** @description Per-stream end-of-sync summary. Errors only appear here, not in stream_status messages. */
stream_progress?: {
[key: string]: {
/**
- * @description Final stream status.
+ * @description Lifecycle status. Errors are orthogonal — a stream can be complete with errors.
* @enum {string}
*/
- status: "started" | "running" | "complete" | "transient_error" | "system_error" | "config_error" | "auth_error";
+ status: "started" | "complete";
/** @description Cumulative records synced for this stream across all runs. */
cumulative_record_count: number;
/** @description Records synced in this run. */
run_record_count: number;
- /** @description Average records/sec for this stream over the run. */
- records_per_second?: number;
- /** @description Average requests/sec for this stream over the run. */
- requests_per_second?: number;
/** @description All accumulated errors for this stream during this run. */
errors?: {
/** @description Human-readable error description. */
@@ -746,35 +758,39 @@ export interface operations {
elapsed_ms?: number;
/** @description Full sync state at the end of the run. source: accumulated from source_state messages; engine: updated cumulative record counts; destination: reserved. Consumers can persist this directly and pass it back on resume. */
state?: components["schemas"]["SyncState"];
- /** @description Final global aggregates. Same shape as trace/progress. */
+ /** @description Final global aggregates. Same shape as trace/global_progress. */
global_progress?: {
/** @description Wall-clock milliseconds since the sync run started. */
elapsed_ms: number;
/** @description Total records synced across all streams in this run. */
run_record_count: number;
+ /** @description Total records synced across all streams across all runs. */
+ cumulative_record_count?: number;
/** @description Overall throughput for the entire run: run_record_count / elapsed seconds. */
- rows_per_second: number;
+ records_per_second: number;
/** @description Instantaneous throughput: total records in last window / window duration. Measures only the most recent reporting interval. */
- window_rows_per_second: number;
+ window_records_per_second: number;
/** @description Total source_state messages observed so far in this sync run. */
state_checkpoint_count: number;
+ /** @description Total API requests made by the source in this run. */
+ request_count?: number;
+ /** @description Total API requests across all runs. */
+ cumulative_request_count?: number;
+ /** @description Total wall-clock time across all runs. */
+ cumulative_elapsed_ms?: number;
};
/** @description Per-stream end-of-sync summary. Errors only appear here, not in stream_status messages. */
stream_progress?: {
[key: string]: {
/**
- * @description Final stream status.
+ * @description Lifecycle status. Errors are orthogonal — a stream can be complete with errors.
* @enum {string}
*/
- status: "started" | "running" | "complete" | "transient_error" | "system_error" | "config_error" | "auth_error";
+ status: "started" | "complete";
/** @description Cumulative records synced for this stream across all runs. */
cumulative_record_count: number;
/** @description Records synced in this run. */
run_record_count: number;
- /** @description Average records/sec for this stream over the run. */
- records_per_second?: number;
- /** @description Average requests/sec for this stream over the run. */
- requests_per_second?: number;
/** @description All accumulated errors for this stream during this run. */
errors?: {
/** @description Human-readable error description. */
diff --git a/apps/service/src/__generated__/openapi.json b/apps/service/src/__generated__/openapi.json
index fc895615..ce140883 100644
--- a/apps/service/src/__generated__/openapi.json
+++ b/apps/service/src/__generated__/openapi.json
@@ -157,7 +157,7 @@
"$ref": "#/components/schemas/SyncState"
},
"global_progress": {
- "description": "Final global aggregates. Same shape as trace/progress.",
+ "description": "Final global aggregates. Same shape as trace/global_progress.",
"type": "object",
"properties": {
"elapsed_ms": {
@@ -172,11 +172,17 @@
"maximum": 9007199254740991,
"description": "Total records synced across all streams in this run."
},
- "rows_per_second": {
+ "cumulative_record_count": {
+ "description": "Total records synced across all streams across all runs.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
+ },
+ "records_per_second": {
"type": "number",
"description": "Overall throughput for the entire run: run_record_count / elapsed seconds."
},
- "window_rows_per_second": {
+ "window_records_per_second": {
"type": "number",
"description": "Instantaneous throughput: total records in last window / window duration. Measures only the most recent reporting interval."
},
@@ -185,13 +191,31 @@
"minimum": -9007199254740991,
"maximum": 9007199254740991,
"description": "Total source_state messages observed so far in this sync run."
+ },
+ "request_count": {
+ "description": "Total API requests made by the source in this run.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
+ },
+ "cumulative_request_count": {
+ "description": "Total API requests across all runs.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
+ },
+ "cumulative_elapsed_ms": {
+ "description": "Total wall-clock time across all runs.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
}
},
"required": [
"elapsed_ms",
"run_record_count",
- "rows_per_second",
- "window_rows_per_second",
+ "records_per_second",
+ "window_records_per_second",
"state_checkpoint_count"
],
"additionalProperties": false
@@ -209,14 +233,9 @@
"type": "string",
"enum": [
"started",
- "running",
- "complete",
- "transient_error",
- "system_error",
- "config_error",
- "auth_error"
+ "complete"
],
- "description": "Final stream status."
+ "description": "Lifecycle status. Errors are orthogonal — a stream can be complete with errors."
},
"cumulative_record_count": {
"type": "integer",
@@ -230,14 +249,6 @@
"maximum": 9007199254740991,
"description": "Records synced in this run."
},
- "records_per_second": {
- "description": "Average records/sec for this stream over the run.",
- "type": "number"
- },
- "requests_per_second": {
- "description": "Average requests/sec for this stream over the run.",
- "type": "number"
- },
"errors": {
"description": "All accumulated errors for this stream during this run.",
"type": "array",
@@ -468,7 +479,7 @@
"$ref": "#/components/schemas/SyncState"
},
"global_progress": {
- "description": "Final global aggregates. Same shape as trace/progress.",
+ "description": "Final global aggregates. Same shape as trace/global_progress.",
"type": "object",
"properties": {
"elapsed_ms": {
@@ -483,11 +494,17 @@
"maximum": 9007199254740991,
"description": "Total records synced across all streams in this run."
},
- "rows_per_second": {
+ "cumulative_record_count": {
+ "description": "Total records synced across all streams across all runs.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
+ },
+ "records_per_second": {
"type": "number",
"description": "Overall throughput for the entire run: run_record_count / elapsed seconds."
},
- "window_rows_per_second": {
+ "window_records_per_second": {
"type": "number",
"description": "Instantaneous throughput: total records in last window / window duration. Measures only the most recent reporting interval."
},
@@ -496,13 +513,31 @@
"minimum": -9007199254740991,
"maximum": 9007199254740991,
"description": "Total source_state messages observed so far in this sync run."
+ },
+ "request_count": {
+ "description": "Total API requests made by the source in this run.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
+ },
+ "cumulative_request_count": {
+ "description": "Total API requests across all runs.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
+ },
+ "cumulative_elapsed_ms": {
+ "description": "Total wall-clock time across all runs.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
}
},
"required": [
"elapsed_ms",
"run_record_count",
- "rows_per_second",
- "window_rows_per_second",
+ "records_per_second",
+ "window_records_per_second",
"state_checkpoint_count"
],
"additionalProperties": false
@@ -520,14 +555,9 @@
"type": "string",
"enum": [
"started",
- "running",
- "complete",
- "transient_error",
- "system_error",
- "config_error",
- "auth_error"
+ "complete"
],
- "description": "Final stream status."
+ "description": "Lifecycle status. Errors are orthogonal — a stream can be complete with errors."
},
"cumulative_record_count": {
"type": "integer",
@@ -541,14 +571,6 @@
"maximum": 9007199254740991,
"description": "Records synced in this run."
},
- "records_per_second": {
- "description": "Average records/sec for this stream over the run.",
- "type": "number"
- },
- "requests_per_second": {
- "description": "Average requests/sec for this stream over the run.",
- "type": "number"
- },
"errors": {
"description": "All accumulated errors for this stream during this run.",
"type": "array",
@@ -747,7 +769,7 @@
"$ref": "#/components/schemas/SyncState"
},
"global_progress": {
- "description": "Final global aggregates. Same shape as trace/progress.",
+ "description": "Final global aggregates. Same shape as trace/global_progress.",
"type": "object",
"properties": {
"elapsed_ms": {
@@ -762,11 +784,17 @@
"maximum": 9007199254740991,
"description": "Total records synced across all streams in this run."
},
- "rows_per_second": {
+ "cumulative_record_count": {
+ "description": "Total records synced across all streams across all runs.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
+ },
+ "records_per_second": {
"type": "number",
"description": "Overall throughput for the entire run: run_record_count / elapsed seconds."
},
- "window_rows_per_second": {
+ "window_records_per_second": {
"type": "number",
"description": "Instantaneous throughput: total records in last window / window duration. Measures only the most recent reporting interval."
},
@@ -775,13 +803,31 @@
"minimum": -9007199254740991,
"maximum": 9007199254740991,
"description": "Total source_state messages observed so far in this sync run."
+ },
+ "request_count": {
+ "description": "Total API requests made by the source in this run.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
+ },
+ "cumulative_request_count": {
+ "description": "Total API requests across all runs.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
+ },
+ "cumulative_elapsed_ms": {
+ "description": "Total wall-clock time across all runs.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
}
},
"required": [
"elapsed_ms",
"run_record_count",
- "rows_per_second",
- "window_rows_per_second",
+ "records_per_second",
+ "window_records_per_second",
"state_checkpoint_count"
],
"additionalProperties": false
@@ -799,14 +845,9 @@
"type": "string",
"enum": [
"started",
- "running",
- "complete",
- "transient_error",
- "system_error",
- "config_error",
- "auth_error"
+ "complete"
],
- "description": "Final stream status."
+ "description": "Lifecycle status. Errors are orthogonal — a stream can be complete with errors."
},
"cumulative_record_count": {
"type": "integer",
@@ -820,14 +861,6 @@
"maximum": 9007199254740991,
"description": "Records synced in this run."
},
- "records_per_second": {
- "description": "Average records/sec for this stream over the run.",
- "type": "number"
- },
- "requests_per_second": {
- "description": "Average requests/sec for this stream over the run.",
- "type": "number"
- },
"errors": {
"description": "All accumulated errors for this stream during this run.",
"type": "array",
@@ -1080,7 +1113,7 @@
"$ref": "#/components/schemas/SyncState"
},
"global_progress": {
- "description": "Final global aggregates. Same shape as trace/progress.",
+ "description": "Final global aggregates. Same shape as trace/global_progress.",
"type": "object",
"properties": {
"elapsed_ms": {
@@ -1095,11 +1128,17 @@
"maximum": 9007199254740991,
"description": "Total records synced across all streams in this run."
},
- "rows_per_second": {
+ "cumulative_record_count": {
+ "description": "Total records synced across all streams across all runs.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
+ },
+ "records_per_second": {
"type": "number",
"description": "Overall throughput for the entire run: run_record_count / elapsed seconds."
},
- "window_rows_per_second": {
+ "window_records_per_second": {
"type": "number",
"description": "Instantaneous throughput: total records in last window / window duration. Measures only the most recent reporting interval."
},
@@ -1108,13 +1147,31 @@
"minimum": -9007199254740991,
"maximum": 9007199254740991,
"description": "Total source_state messages observed so far in this sync run."
+ },
+ "request_count": {
+ "description": "Total API requests made by the source in this run.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
+ },
+ "cumulative_request_count": {
+ "description": "Total API requests across all runs.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
+ },
+ "cumulative_elapsed_ms": {
+ "description": "Total wall-clock time across all runs.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
}
},
"required": [
"elapsed_ms",
"run_record_count",
- "rows_per_second",
- "window_rows_per_second",
+ "records_per_second",
+ "window_records_per_second",
"state_checkpoint_count"
],
"additionalProperties": false
@@ -1132,14 +1189,9 @@
"type": "string",
"enum": [
"started",
- "running",
- "complete",
- "transient_error",
- "system_error",
- "config_error",
- "auth_error"
+ "complete"
],
- "description": "Final stream status."
+ "description": "Lifecycle status. Errors are orthogonal — a stream can be complete with errors."
},
"cumulative_record_count": {
"type": "integer",
@@ -1153,14 +1205,6 @@
"maximum": 9007199254740991,
"description": "Records synced in this run."
},
- "records_per_second": {
- "description": "Average records/sec for this stream over the run.",
- "type": "number"
- },
- "requests_per_second": {
- "description": "Average requests/sec for this stream over the run.",
- "type": "number"
- },
"errors": {
"description": "All accumulated errors for this stream during this run.",
"type": "array",
diff --git a/apps/service/src/__tests__/workflow.test.ts b/apps/service/src/__tests__/workflow.test.ts
index c3cd8016..42bbec5d 100644
--- a/apps/service/src/__tests__/workflow.test.ts
+++ b/apps/service/src/__tests__/workflow.test.ts
@@ -17,8 +17,12 @@ const emptyState = {
engine: { streams: {}, global: {} },
}
const noErrors: RunResult = { errors: [], state: emptyState }
-const permanentSyncError: RunResult = {
- errors: [{ message: 'permanent sync failure', failure_type: 'auth_error', stream: 'customers' }],
+const globalPermanentSyncError: RunResult = {
+ errors: [{ message: 'bad API key', failure_type: 'auth_error' }],
+ state: emptyState,
+}
+const streamPermanentSyncError: RunResult = {
+ errors: [{ message: 'stream auth failure', failure_type: 'auth_error', stream: 'customers' }],
state: emptyState,
}
@@ -341,7 +345,7 @@ describe('pipelineWorkflow (unit — stubbed activities)', () => {
})
})
- it('transitions to error instead of ready when reconcile returns permanent sync errors', async () => {
+ it('transitions to error when reconcile returns global permanent sync errors', async () => {
const statusWrites: string[] = []
const worker = await Worker.create({
@@ -354,7 +358,7 @@ describe('pipelineWorkflow (unit — stubbed activities)', () => {
},
pipelineSync: async (_pipelineId: string, opts?) => {
if (opts?.input) return noErrors
- return { ...permanentSyncError, eof: { reason: 'complete' as const } }
+ return { ...globalPermanentSyncError, eof: { reason: 'complete' as const } }
},
}),
})
@@ -375,6 +379,41 @@ describe('pipelineWorkflow (unit — stubbed activities)', () => {
})
})
+ it('does not park workflow when reconcile returns stream-scoped permanent errors', async () => {
+ const statusWrites: string[] = []
+
+ const worker = await Worker.create({
+ connection: testEnv.nativeConnection,
+ taskQueue: 'test-queue-3b-stream-error',
+ workflowsPath,
+ activities: stubActivities({
+ updatePipelineStatus: async (_id: string, status: string) => {
+ statusWrites.push(status)
+ },
+ pipelineSync: async (_pipelineId: string, opts?) => {
+ if (opts?.input) return noErrors
+ return { ...streamPermanentSyncError, eof: { reason: 'complete' as const } }
+ },
+ }),
+ })
+
+ await worker.runUntil(async () => {
+ const handle = await testEnv.client.workflow.start('pipelineWorkflow', {
+ args: [testPipelineId],
+ workflowId: 'test-sync-3b-stream-error',
+ taskQueue: 'test-queue-3b-stream-error',
+ })
+
+ await new Promise((r) => setTimeout(r, 500))
+ await signalDelete(handle)
+ await handle.result()
+
+ // Stream-scoped permanent errors don't park the workflow
+ expect(statusWrites).toContain('ready')
+ expect(statusWrites).not.toContain('error')
+ })
+ })
+
it('retries transient sync activity failures and still reaches ready', async () => {
const statusWrites: string[] = []
let reconcileCalls = 0
diff --git a/apps/service/src/temporal/activities/_shared.ts b/apps/service/src/temporal/activities/_shared.ts
index f00e4b11..20587659 100644
--- a/apps/service/src/temporal/activities/_shared.ts
+++ b/apps/service/src/temporal/activities/_shared.ts
@@ -86,12 +86,9 @@ export async function drainMessages(
count++
if (message.type === 'eof') {
eof = message.eof
- if (eof.stream_progress) {
- const engineStreams: Record = { ...state.engine.streams }
- for (const [name, sp] of Object.entries(eof.stream_progress)) {
- engineStreams[name] = { cumulative_record_count: sp.cumulative_record_count }
- }
- state = { ...state, engine: { ...state.engine, streams: engineStreams } }
+ // eof.state is authoritative — built by trackProgress with full accumulated state
+ if (eof.state) {
+ state = eof.state
}
} else if (message.type === 'control') {
if (message.control.control_type === 'source_config') {
diff --git a/apps/service/src/temporal/sync-errors.test.ts b/apps/service/src/temporal/sync-errors.test.ts
new file mode 100644
index 00000000..d52ba1c7
--- /dev/null
+++ b/apps/service/src/temporal/sync-errors.test.ts
@@ -0,0 +1,71 @@
+import { describe, it, expect } from 'vitest'
+import { classifySyncErrors } from './sync-errors.js'
+
+describe('classifySyncErrors', () => {
+ it('classifies global permanent errors (no stream field)', () => {
+ const result = classifySyncErrors([{ message: 'bad key', failure_type: 'auth_error' }])
+
+ expect(result.globalPermanent).toHaveLength(1)
+ expect(result.streamPermanent).toHaveLength(0)
+ expect(result.permanent).toHaveLength(1)
+ expect(result.transient).toHaveLength(0)
+ })
+
+ it('classifies stream-scoped permanent errors', () => {
+ const result = classifySyncErrors([
+ { message: 'stream auth fail', failure_type: 'auth_error', stream: 'treasury' },
+ ])
+
+ expect(result.globalPermanent).toHaveLength(0)
+ expect(result.streamPermanent).toHaveLength(1)
+ expect(result.permanent).toHaveLength(1)
+ expect(result.transient).toHaveLength(0)
+ })
+
+ it('classifies transient errors regardless of stream scope', () => {
+ const result = classifySyncErrors([
+ { message: 'rate limit', failure_type: 'transient_error', stream: 'customers' },
+ { message: 'timeout', failure_type: 'transient_error' },
+ ])
+
+ expect(result.transient).toHaveLength(2)
+ expect(result.permanent).toHaveLength(0)
+ expect(result.globalPermanent).toHaveLength(0)
+ expect(result.streamPermanent).toHaveLength(0)
+ })
+
+ it('classifies system_error as permanent', () => {
+ const result = classifySyncErrors([
+ { message: 'deterministic failure', failure_type: 'system_error', stream: 'treasury' },
+ ])
+
+ expect(result.permanent).toHaveLength(1)
+ expect(result.streamPermanent).toHaveLength(1)
+ expect(result.transient).toHaveLength(0)
+ })
+
+ it('separates mixed errors into correct buckets', () => {
+ const result = classifySyncErrors([
+ { message: 'bad key', failure_type: 'auth_error' },
+ { message: 'feature gate', failure_type: 'config_error', stream: 'treasury' },
+ { message: 'rate limit', failure_type: 'transient_error' },
+ ])
+
+ expect(result.globalPermanent).toHaveLength(1)
+ expect(result.globalPermanent[0].message).toBe('bad key')
+
+ expect(result.streamPermanent).toHaveLength(1)
+ expect(result.streamPermanent[0].message).toBe('feature gate')
+
+ expect(result.permanent).toHaveLength(2)
+ expect(result.transient).toHaveLength(1)
+ expect(result.transient[0].message).toBe('rate limit')
+ })
+
+ it('treats unknown failure_type as transient', () => {
+ const result = classifySyncErrors([{ message: 'unknown' }])
+
+ expect(result.transient).toHaveLength(1)
+ expect(result.permanent).toHaveLength(0)
+ })
+})
diff --git a/apps/service/src/temporal/sync-errors.ts b/apps/service/src/temporal/sync-errors.ts
index 7f9a74fe..c9a1baef 100644
--- a/apps/service/src/temporal/sync-errors.ts
+++ b/apps/service/src/temporal/sync-errors.ts
@@ -7,23 +7,34 @@ export type SyncRunError = {
export type ClassifiedSyncErrors = {
transient: SyncRunError[]
permanent: SyncRunError[]
+ /** Permanent errors without a stream scope — bad API key, invalid config. Parks the workflow. */
+ globalPermanent: SyncRunError[]
+ /** Permanent errors scoped to a single stream — feature gate, per-stream auth. Stream is skipped on resume. */
+ streamPermanent: SyncRunError[]
}
-const PERMANENT_FAILURE_TYPES = new Set(['config_error', 'auth_error'])
+const PERMANENT_FAILURE_TYPES = new Set(['config_error', 'auth_error', 'system_error'])
export function classifySyncErrors(errors: SyncRunError[]): ClassifiedSyncErrors {
const transient: SyncRunError[] = []
const permanent: SyncRunError[] = []
+ const globalPermanent: SyncRunError[] = []
+ const streamPermanent: SyncRunError[] = []
for (const error of errors) {
if (PERMANENT_FAILURE_TYPES.has(error.failure_type ?? '')) {
permanent.push(error)
+ if (error.stream) {
+ streamPermanent.push(error)
+ } else {
+ globalPermanent.push(error)
+ }
} else {
transient.push(error)
}
}
- return { transient, permanent }
+ return { transient, permanent, globalPermanent, streamPermanent }
}
export function summarizeSyncErrors(errors: SyncRunError[]): string {
diff --git a/apps/service/src/temporal/workflows/pipeline-workflow.ts b/apps/service/src/temporal/workflows/pipeline-workflow.ts
index 07851631..721d09f5 100644
--- a/apps/service/src/temporal/workflows/pipeline-workflow.ts
+++ b/apps/service/src/temporal/workflows/pipeline-workflow.ts
@@ -130,9 +130,10 @@ export async function pipelineWorkflow(
const events = await waitForLiveEvents()
if (!events) return
- const result = await pipelineSync(pipelineId, { input: events })
+ const result = await pipelineSync(pipelineId, { state: syncState, input: events })
operationCount++
- if (classifySyncErrors(result.errors).permanent.length > 0) {
+ syncState = result.state
+ if (classifySyncErrors(result.errors).globalPermanent.length > 0) {
await markPermanentError()
return
}
@@ -166,7 +167,7 @@ export async function pipelineWorkflow(
})
operationCount++
syncState = result.state
- if (classifySyncErrors(result.errors).permanent.length > 0) {
+ if (classifySyncErrors(result.errors).globalPermanent.length > 0) {
await markPermanentError()
return
}
diff --git a/docs/engine/sync-lifecycle-source-stripe.md b/docs/engine/sync-lifecycle-source-stripe.md
new file mode 100644
index 00000000..e14bb1a5
--- /dev/null
+++ b/docs/engine/sync-lifecycle-source-stripe.md
@@ -0,0 +1,326 @@
+# Sync Lifecycle — Stripe Source
+
+How the Stripe source manages pagination within a `time_range` assigned by the
+engine. For the overall sync lifecycle and protocol, see
+[sync-lifecycle.md](./sync-lifecycle.md).
+
+## Overview
+
+The engine assigns a `time_range` per stream via the configured catalog. The
+Stripe source paginates all records within that range using an n-ary search
+algorithm: start with the full range, paginate, and subdivide if the range
+takes more than one request to complete. No upfront density probing — the source discovers
+the right granularity by doing the work.
+
+## Source State
+
+```ts
+type StripeStreamState = {
+ remaining: Array<{
+ gte: string // ISO 8601 — inclusive lower bound
+ lt: string // ISO 8601 — exclusive upper bound
+ cursor: string | null // Stripe pagination cursor; null = not yet started
+ }>
+}
+```
+
+- `cursor: null` → range planned but first page not yet fetched.
+- `cursor: "cus_abc"` → resume pagination after this object.
+- Range removed from list → complete.
+- `remaining: []` → source is done with the assigned `time_range`.
+
+## Algorithm
+
+### 1. Initialization (no existing state)
+
+The source receives `time_range` from the catalog and has no state. It starts
+with the full range as a single entry:
+
+```
+Engine assigns: time_range { gte: "2018-01-01", lt: "2024-04-17" }
+
+state: {
+ remaining: [
+ { gte: "2018-01-01", lt: "2024-04-17", cursor: null }
+ ]
+}
+```
+
+### 2. Pagination
+
+The source picks a range from `remaining` and paginates it:
+
+1. Call the Stripe list API with `created[gte]` and `created[lt]` filters,
+ plus `starting_after` if cursor is set.
+2. Emit records.
+3. Update cursor in state, emit `source_state`.
+4. When a range is exhausted (`has_more: false`), remove it from `remaining`.
+
+```
+First page fetched, got cursor:
+
+state: {
+ remaining: [
+ { gte: "2018-01-01", lt: "2024-04-17", cursor: "cus_abc" }
+ ]
+}
+→ emit source_state
+
+Pagination exhausted, range complete:
+
+state: {
+ remaining: []
+}
+→ emit source_state (done)
+```
+
+### 3. Subdivision (n-ary search)
+
+If a range didn't complete in the previous request, the source subdivides it
+at the start of the next request. The source knows the `created` timestamp of
+the last record it paginated (from the cursor). It splits the unpaginated
+portion into N parts (where N = `max_segments_per_stream`):
+
+```
+Previous request ended with:
+ remaining: [{ gte: "2018-01-01", lt: "2024-04-17", cursor: "cus_xyz" }]
+
+Last record seen had created=2020-06-15. Range didn't complete → subdivide.
+The paginated portion [2018, 2020-06-15) keeps its cursor.
+The unpaginated portion [2020-06-15, 2024-04-17) splits into N=2:
+
+ remaining: [
+ { gte: "2018-01-01", lt: "2020-06-15", cursor: "cus_xyz" },
+ { gte: "2020-06-15", lt: "2022-05-16", cursor: null },
+ { gte: "2022-05-16", lt: "2024-04-17", cursor: null }
+ ]
+```
+
+**When to subdivide:** At the start of a request, if any range in `remaining`
+has a cursor (meaning it was in progress last request but didn't complete).
+Subdivision happens between requests, not mid-request.
+
+**Recursive:** If a subdivided range still doesn't complete in one request,
+it gets split again next time. Each pass narrows the ranges until they're
+small enough to complete in a single request.
+
+### 4. Resumption (existing state)
+
+If the source has existing state (from a previous request in the same sync
+run), it resumes directly from `remaining`:
+
+```
+Source receives time_range { gte: "2018-01-01", lt: "2024-04-17" }
+Existing state: {
+ remaining: [
+ { gte: "2022-05-16", lt: "2024-04-17", cursor: "cus_xyz" }
+ ]
+}
+
+→ Resume paginating from cus_xyz in [2022-05-16, 2024-04-17)
+→ No re-initialization
+```
+
+### 5. Completion
+
+When a sub-range is exhausted, the source removes it from `remaining` and
+emits a `stream_status: range_complete`:
+
+```
+→ emit trace { stream_status: { stream: 'customers', status: 'range_complete',
+ range_complete: { gte: '2018-01-01', lt: '2019-06-01' } } }
+```
+
+The engine merges this into `completed_ranges`.
+
+When all sub-ranges are done (`remaining: []`), the source emits
+`stream_status: complete` for the stream.
+
+## Full Example
+
+Shows the messages emitted by the source during a two-request backfill of
+`customers` with `time_range: [2018, 2024)`.
+
+### Request 1 — full range, doesn't complete
+
+Stripe returns max 100 records per page. Each page = 1 API request = 1 state
+checkpoint.
+
+```
+Source initializes: remaining: [{ gte: "2018", lt: "2024", cursor: null }]
+
+← trace { stream_status: { stream: "customers", status: "start" } }
+← record { stream: "customers", data: { id: "cus_001", ... } }
+ ... 100 records (page 1) ...
+← state { stream: "customers", data: { remaining: [{ gte: "2018", lt: "2024", cursor: "cus_100" }] } }
+← record { stream: "customers", data: { ... } }
+ ... 100 records (page 2) ...
+← state { stream: "customers", data: { remaining: [{ gte: "2018", lt: "2024", cursor: "cus_200" }] } }
+ ... pages 3-50 (5000 records total) ...
+← state { stream: "customers", data: { remaining: [{ gte: "2018", lt: "2024", cursor: "cus_5000" }] } }
+ ... source cut off (time limit / state limit) ...
+
+← end { has_more: true }
+```
+
+Range didn't complete in one request → source will subdivide on next request.
+
+### Request 2 — source subdivides, finishes first sub-range
+
+```
+Source resumes, sees remaining: [{ gte: "2018", lt: "2024", cursor: "cus_5000" }]
+Last record had created=2019-03. Range didn't complete → subdivide:
+ remaining: [
+ { gte: "2018", lt: "2019-03", cursor: "cus_5000" }, // current (has cursor)
+ { gte: "2019-03", lt: "2021-09", cursor: null }, // new
+ { gte: "2021-09", lt: "2024", cursor: null } // new
+ ]
+
+← record { stream: "customers", data: { ... } }
+ ... 100 records (page) ...
+← state { ... }
+ ... finishes [2018, 2019-03) after a few more pages ...
+← trace { stream_status: { stream: "customers", status: "range_complete",
+ range_complete: { gte: "2018", lt: "2019-03" } } }
+← state { stream: "customers", data: { remaining: [
+ { gte: "2019-03", lt: "2021-09", cursor: null },
+ { gte: "2021-09", lt: "2024", cursor: null }
+ ] } }
+ ... starts [2019-03, 2021-09), paginates several pages ...
+← state { stream: "customers", data: { remaining: [
+ { gte: "2019-03", lt: "2021-09", cursor: "cus_8000" },
+ { gte: "2021-09", lt: "2024", cursor: null }
+ ] } }
+ ... cut off ...
+
+← end { has_more: true }
+```
+
+### Request 3 — finishes remaining ranges
+
+```
+Source resumes: remaining: [
+ { gte: "2019-03", lt: "2021-09", cursor: "cus_8000" },
+ { gte: "2021-09", lt: "2024", cursor: null }
+]
+These ranges made progress last request — no further subdivision, resume.
+
+ ... paginates [2019-03, 2021-09) page by page ...
+← trace { stream_status: { stream: "customers", status: "range_complete",
+ range_complete: { gte: "2019-03", lt: "2021-09" } } }
+ ... paginates [2021-09, 2024) page by page ...
+← trace { stream_status: { stream: "customers", status: "range_complete",
+ range_complete: { gte: "2021-09", lt: "2024" } } }
+← state { stream: "customers", data: { remaining: [] } }
+← trace { stream_status: { stream: "customers", status: "complete" } }
+
+← end { has_more: false }
+```
+
+Engine's `completed_ranges` for customers after merging all `range_complete` messages:
+`[{ gte: "2018", lt: "2024" }]`
+
+## State on the Wire
+
+Source state is opaque to the engine. The engine learns about range completion
+via `stream_status: range_complete` messages, not by inspecting source state:
+
+```ts
+{
+ type: 'source_state',
+ source_state: {
+ state_type: 'stream',
+ stream: 'customers',
+ time_range: { gte: '2018-01-01T00:00:00Z', lt: '2024-04-17T00:00:00Z' },
+ data: {
+ remaining: [
+ { gte: '2022-05-16T00:00:00Z', lt: '2024-04-17T00:00:00Z', cursor: 'cus_xyz' }
+ ]
+ }
+ }
+}
+```
+
+## Concurrency
+
+Three controls govern how the source uses the Stripe API:
+
+```ts
+// Source config — only max_concurrent_streams is user-configurable
+type StripeSourceConfig = {
+ api_key: string
+ account_id?: string
+ max_concurrent_streams?: number // default 5
+}
+
+// Derived internally by the source:
+// live_mode = inferred from api_key prefix (sk_live_ vs sk_test_)
+// max_requests_per_second = live_mode ? 20 : 10
+// effective_streams = min(max_concurrent_streams, configured_stream_count)
+// max_segments_per_stream = floor(max_requests_per_second / effective_streams)
+```
+
+| Control | What it controls | How it's set |
+| ------------------------- | -------------------------------------------- | ------------------------------------------ |
+| `max_concurrent_streams` | Streams paginating in parallel | Config (default 5), capped at catalog size |
+| `max_requests_per_second` | Global rate limit across all activity | Inferred from API key mode |
+| `max_segments_per_stream` | Sub-ranges per stream (n-ary search fan-out) | Derived: rps / concurrent streams |
+
+### Examples
+
+| Scenario | Mode | Streams | `effective_streams` | `rps` | `max_segments_per_stream` | Max concurrent requests |
+| ---------------- | ---- | ------- | ------------------- | ----- | ------------------------- | ----------------------- |
+| 20 streams, live | live | 20 | 5 | 20 | 4 | 20 |
+| 20 streams, test | test | 20 | 5 | 10 | 2 | 10 |
+| 3 streams, live | live | 3 | 3 | 20 | 6 | 18 |
+| 1 stream, live | live | 1 | 1 | 20 | 20 | 20 |
+| 1 stream, test | test | 1 | 1 | 10 | 10 | 10 |
+
+When fewer streams are configured, each stream gets more segments — the full
+rate limit budget is distributed across whatever streams exist. A single-stream
+sync gets the entire budget.
+
+## Parallel Pagination
+
+The source paginates up to `max_segments_per_stream` ranges from `remaining`
+concurrently per stream, and up to `effective_streams` streams in parallel.
+Records from different ranges/streams are interleaved on the output stream.
+State checkpoints are emitted after each page, reflecting the current state
+of all ranges. This ensures resumability if the source is cut off mid-run.
+
+The global rate limiter (`max_requests_per_second`) governs all API calls
+regardless of which stream or segment they belong to.
+
+## Source Logs
+
+The Stripe source emits `log` messages for real-time operational visibility.
+These are passed through by the engine.
+
+| Level | Message | When |
+|---|---|---|
+| info | `{stream}: {rps} requests/sec` | Periodically during pagination |
+| warn | `rate limited: retrying in {n}s` | Stripe returned 429 |
+| warn | `retry {n}/{max}: {status} {message}` | Request failed, retrying |
+
+## Error Handling
+
+- **Transient errors** (rate limits, 5xx, timeouts): Retried at the HTTP
+ layer with exponential backoff. Emit a `transient` error trace for
+ observability regardless of whether the retry succeeded.
+- **Stream errors** (resource not available, permission denied): Emit a
+ `stream` error trace, stop this stream, move to the next.
+- **Global errors** (invalid API key): Emit a `global` error trace, stop.
+
+The source does not store error state. If a range fails after all retries,
+the range stays in `remaining` with its cursor for the next attempt.
+
+## Events
+
+The `/events` endpoint is treated as just another stream in the catalog —
+same `time_range` model, same `remaining`-based pagination. No special
+incremental mode or live polling by default.
+
+For experimental live event polling (using events as a webhook replacement),
+an opt-in flag stores cursor state in `source.global`, which is completely
+separate from the per-stream backfill cursor logic. This is not enabled by
+default.
diff --git a/docs/engine/sync-lifecycle.md b/docs/engine/sync-lifecycle.md
new file mode 100644
index 00000000..201b7641
--- /dev/null
+++ b/docs/engine/sync-lifecycle.md
@@ -0,0 +1,474 @@
+# Sync Lifecycle
+
+How finite sync runs work: run identity, opaque state, optional time ranges, and
+terminal stream status. For message types and connector interfaces, see
+[protocol.md](./protocol.md).
+
+## Scope
+
+This design is intentionally narrow:
+
+- Incremental backfills only.
+- Finite reads only.
+- `full_refresh` is out of scope.
+- Live `/events` polling is out of scope.
+- Generic stall detection is out of scope.
+
+## Removed From This Protocol
+
+To keep lifecycle semantics tight, this protocol explicitly removes these ideas:
+
+- **No `full_refresh` lifecycle.** `sync_mode: 'full_refresh'` and
+ `destination_sync_mode: 'overwrite'` are not part of this protocol. They need
+ separate semantics because "done for this run" and "historical coverage" mean
+ different things for a full reread.
+- **No `range_complete`-driven terminality.** `range_complete` remains optional
+ progress telemetry only. It does not drive `has_more`.
+- **No cross-request range subdivision in the protocol.** The protocol does not
+ assume that a partially paginated time range can be split into smaller ranges
+ between requests.
+
+## Motivation
+
+The base protocol treats each `read()` call as independent. The caller manages
+pagination, upper bounds, and continuation externally. That creates three
+problems:
+
+1. **Backfill bounds shift between calls.** A stream that derives its own upper
+ bound from `now()` can chase a moving target forever.
+2. **No run identity.** Multiple requests that belong to one logical backfill
+ have no shared context.
+3. **Completion is ambiguous.** If the engine inspects source-specific state to
+ guess whether a stream is done, protocol behavior depends on connector
+ internals instead of explicit source signals.
+
+This design introduces **sync runs** as a first-class concept. The engine owns
+run identity and optional outer time bounds. The source owns pagination and
+emits explicit lifecycle signals.
+
+---
+
+## Layers
+
+```
+CLIENT ←—start/end—→ ENGINE ←—iterator—→ SOURCE
+```
+
+| Concern | Client | Engine | Source |
+| --- | --- | --- | --- |
+| What to sync | Provides catalog | Passes catalog through, may inject `time_range` | Syncs what it's given |
+| When to sync | Decides | — | — |
+| Run identity | Generates `sync_run_id` | Tracks run continuity | Unaware |
+| Time range bounds | — | Freezes `started_at`, injects `time_range` when supported | Respects `time_range` if present |
+| Internal pagination | — | — | Manages `starting_after` / equivalent |
+| Stream lifecycle | Consumes | Tracks terminal streams | Emits `started`, optional `range_complete`, `complete` |
+| Progress reporting | Consumes | Emits run-level snapshots | Emits records, checkpoints, traces |
+| Error reporting | Decides retry policy above the engine | Passes through, stops on `global` | Classifies and emits trace errors |
+| State | Opaque round-trip | Manages engine section | Manages source section |
+| `has_more` | Reads, acts | Derives from explicit terminal stream state | — |
+
+---
+
+## Core Rule
+
+The engine trusts only explicit stream status messages for lifecycle:
+
+- `started` means the stream is active for this request.
+- `range_complete` is progress telemetry only.
+- `complete` is the only terminal signal.
+
+The engine does **not** inspect source state to infer completion. Source state is
+opaque cursor data.
+
+---
+
+## Messages
+
+### `start` — client → engine
+
+Begins or continues a sync run. See [Types](#types) for `StartPayload`.
+
+### `end` — engine → client
+
+The request is done. See [Types](#types) for `EndPayload`.
+
+`has_more: true` means at least one configured stream has not emitted
+`stream_status: complete` for this run yet. Continue by sending another `start`
+with the same `sync_run_id` and the previous `ending_state` as the next
+`starting_state`.
+
+`has_more: false` means every configured stream is terminal for this run. The
+next sync should use a new `sync_run_id`.
+
+### Source → engine
+
+Sources are iterators that yield these message types:
+
+```ts
+// Data record
+{ type: 'record', record: { stream: string, data: Record, emitted_at: string } }
+
+// Checkpoint. Data is opaque to the engine.
+{ type: 'source_state', source_state: { state_type: 'stream', stream: string, data: unknown } }
+
+// Global checkpoint for source-wide state.
+{ type: 'source_state', source_state: { state_type: 'global', data: unknown } }
+
+// Stream status
+{ type: 'trace', trace: { trace_type: 'stream_status', stream_status: StreamStatus } }
+
+// Error
+{ type: 'trace', trace: { trace_type: 'error', error: SyncError & { stack_trace?: string } } }
+
+// Diagnostic log
+{ type: 'log', log: { level: 'debug' | 'info' | 'warn' | 'error', message: string } }
+```
+
+### Engine → client
+
+The engine emits four message types: `progress`, `record`, `log`, and `end`.
+
+```ts
+{
+ type: 'progress',
+ progress: {
+ elapsed_ms: number,
+ global_state_count: number,
+ derived: {
+ records_per_second: number,
+ states_per_second: number,
+ },
+ streams: Record,
+ errors: SyncError[]
+ }
+}
+
+{ type: 'record', record: { stream: string, data: Record, emitted_at: string } }
+
+{ type: 'log', log: { level: 'info' | 'warn' | 'error', message: string } }
+
+{
+ type: 'end',
+ end: {
+ has_more: boolean,
+ ending_state: SyncState,
+ request_progress: ProgressPayload,
+ }
+}
+```
+
+The engine does not pass trace messages through to the client. It folds them
+into `progress` and `log`.
+
+---
+
+## Stream Status
+
+`stream_status` is a discriminated union on `status`:
+
+```ts
+type StreamStatus =
+ | { stream: string; status: 'started' }
+ | { stream: string; status: 'range_complete'; range_complete: { gte: string; lt: string } }
+ | { stream: string; status: 'complete' }
+```
+
+| Status | Meaning | Engine action |
+| --- | --- | --- |
+| `started` | Stream is active | Mark stream active for progress |
+| `range_complete` | A time range finished | Update progress only |
+| `complete` | Stream is terminal for this run | Mark stream terminal |
+
+`range_complete` is optional and only meaningful for streams that support
+engine-assigned `time_range`. It is not used to derive `has_more`.
+
+A source that decides to stop a stream after a stream-level error should still
+emit `complete` for that stream. That keeps lifecycle semantics explicit:
+errors explain *why* the stream stopped; `complete` says it is terminal.
+
+---
+
+## Types
+
+### Configured catalog (client → engine → source)
+
+The client provides the catalog. The engine may inject `time_range` into
+streams that support it.
+
+```ts
+type ConfiguredStream = {
+ name: string
+ primary_key: string[][]
+ json_schema?: Record
+ sync_mode: 'incremental'
+ destination_sync_mode: 'append' | 'append_dedup'
+ cursor_field?: string[]
+ backfill_limit?: number
+
+ // Source capability from discover/spec.
+ supports_time_range?: boolean
+
+ // Set by engine only when supports_time_range is true.
+ time_range?: {
+ gte?: string
+ lt: string
+ }
+}
+
+type ConfiguredCatalog = {
+ streams: ConfiguredStream[]
+}
+```
+
+### Start message (client → engine)
+
+```ts
+type StartPayload = {
+ sync_run_id: string
+ source_config: Record
+ destination_config: Record
+ configured_catalog: ConfiguredCatalog
+ starting_state?: SyncState
+}
+```
+
+### End message (engine → client)
+
+```ts
+type EndPayload = {
+ has_more: boolean
+ ending_state: SyncState
+ request_progress: ProgressPayload
+}
+```
+
+### Progress message (engine → client)
+
+```ts
+type SyncError =
+ | { error_level: 'global'; message: string }
+ | { error_level: 'stream'; message: string; stream: string }
+ | { error_level: 'transient'; message: string; stream?: string }
+
+type StreamProgress = {
+ state_count: number
+ record_count: number
+ completed_ranges?: Array<{ gte: string; lt: string }>
+ terminal: boolean
+}
+
+type ProgressPayload = {
+ elapsed_ms: number
+ global_state_count: number
+ derived: {
+ records_per_second: number
+ states_per_second: number
+ }
+ streams: Record
+ errors: SyncError[]
+}
+```
+
+`completed_ranges` is progress data only. It does not determine completion.
+
+### SyncState (round-tripped between start and end)
+
+```ts
+type SyncState = {
+ source: SourceState
+ engine: EngineState
+}
+
+type SourceState = {
+ streams: Record
+ global: Record
+}
+
+type EngineState = {
+ sync_run_id: string
+ started_at: string
+ terminal_streams: string[]
+ run_progress: ProgressPayload
+}
+```
+
+### Source state — Stripe example
+
+Source state is opaque to the engine. For Stripe list endpoints, the source can
+store the last emitted object ID as `starting_after`:
+
+```ts
+type StripeStreamState = {
+ starting_after: string | null
+}
+```
+
+For time-range streams, the assigned `time_range` lives in the catalog, not in
+source state.
+
+---
+
+## Sync Runs
+
+A sync run is identified by `sync_run_id`. Within a run, `started_at` is frozen.
+
+### New run
+
+1. Client sends `start` with a new `sync_run_id`.
+2. Engine freezes `started_at = now()` and stores it in engine state.
+3. For each configured stream where `supports_time_range` is true, the engine
+ injects `time_range.lt = started_at`.
+4. Source runs, emits records, checkpoints, and explicit stream statuses.
+5. Engine emits progress, forwards records to the destination, and returns
+ `end`.
+
+### Continuation
+
+1. Client sends `start` with the same `sync_run_id` and previous `ending_state`.
+2. Engine preserves `started_at` from engine state.
+3. The engine re-injects the same `time_range` into streams that support it.
+4. Source resumes from its opaque cursor state.
+
+### Completion
+
+When `has_more: false`:
+
+- Every configured stream is present in `engine.terminal_streams`.
+- The client should start the next sync with a new `sync_run_id`.
+
+### Example
+
+```
+sync_run_id: "sr_1"
+ request 1: customers [2018, 2024) → timed out → end { has_more: true }
+ request 2: customers [2018, 2024) → complete → end { has_more: false }
+```
+
+The range is stable across requests. The source resumes within that range using
+its own cursor state.
+
+---
+
+## Time Ranges
+
+Time range support is optional per stream.
+
+### Streams with `supports_time_range: true`
+
+- The engine injects `time_range`.
+- `time_range.lt` is frozen to `started_at` for the duration of the run.
+- The source resumes within that range using opaque source state.
+- The source may emit `range_complete` for progress reporting.
+
+### Streams with `supports_time_range: false`
+
+- The engine does not inject `time_range`.
+- The source paginates using its own cursor semantics only.
+- No coverage accounting is implied.
+
+### Why this matters
+
+- Frozen upper bounds prevent moving-target backfills for eligible streams.
+- Streams without time filtering still fit the same continuation contract.
+- The engine never needs to understand source-specific pagination tokens.
+
+---
+
+## `has_more` Derivation
+
+The engine derives `has_more` from explicit terminal stream state:
+
+```ts
+has_more = configured_catalog.streams.some(
+ (stream) => !engine.terminal_streams.includes(stream.name)
+)
+```
+
+`completed_ranges` and source-state shape do not participate in this decision.
+
+---
+
+## Error Handling
+
+### Error levels
+
+| `error_level` | Blast radius | Engine action | Example |
+| --- | --- | --- | --- |
+| `global` | Entire sync | Abort all streams, `has_more: false` | Invalid API key |
+| `stream` | One stream | Keep processing other streams | Resource unavailable |
+| `transient` | One request or page | Informational | Rate limited, retried |
+
+### Source → engine error flow
+
+```ts
+{ type: 'trace', trace: { trace_type: 'error', error: SyncError } }
+```
+
+### Engine behavior
+
+- `global`: stop immediately and emit `end { has_more: false }`
+- `stream`: record the error and continue with other streams
+- `transient`: record the error only
+
+Errors are not stored in source state. They are separate from lifecycle.
+
+---
+
+## Engine Logs
+
+The engine emits `log` messages for anomalies and failures only.
+
+### warn
+
+| Message | When |
+| --- | --- |
+| `state before started: {stream}` | Source emitted `source_state` before `stream_status: started` |
+| `state after complete: {stream}` | Source emitted `source_state` after `stream_status: complete` |
+| `duplicate started: {stream}` | Source emitted `stream_status: started` twice |
+| `unknown stream: {stream}` | Source emitted a message for a stream not in the catalog |
+
+### error
+
+| Message | When |
+| --- | --- |
+| `global error: {message}` | Source emitted `error_level: global` |
+| `stream error: {stream}: {message}` | Source emitted `error_level: stream` |
+| `source crashed: {message}` | Source iterator threw |
+
+---
+
+## Wire Format
+
+NDJSON. One message per line.
+
+```json
+{"type":"start","sync_run_id":"sr_abc","source_config":{},"configured_catalog":{"streams":[{"name":"customers","sync_mode":"incremental","supports_time_range":true}]}}
+{"type":"progress","progress":{"elapsed_ms":100,"global_state_count":0,"derived":{"records_per_second":0,"states_per_second":0},"streams":{"customers":{"state_count":0,"record_count":0,"completed_ranges":[],"terminal":false}},"errors":[]}}
+{"type":"record","record":{"stream":"customers","data":{"id":"cus_123"}}}
+{"type":"progress","progress":{"elapsed_ms":1600,"global_state_count":1,"derived":{"records_per_second":1562,"states_per_second":0.6},"streams":{"customers":{"state_count":1,"record_count":2500,"completed_ranges":[],"terminal":false}},"errors":[]}}
+{"type":"progress","progress":{"elapsed_ms":3200,"global_state_count":2,"derived":{"records_per_second":1562,"states_per_second":0.6},"streams":{"customers":{"state_count":2,"record_count":5000,"completed_ranges":[{"gte":"2018-01-01T00:00:00Z","lt":"2024-04-17T00:00:00Z"}],"terminal":true}},"errors":[]}}
+{"type":"end","end":{"has_more":false,"ending_state":{"source":{"streams":{"customers":{"starting_after":null}},"global":{}},"engine":{"sync_run_id":"sr_abc","started_at":"2024-04-17T00:00:00Z","terminal_streams":["customers"],"run_progress":{"elapsed_ms":3200,"global_state_count":2,"derived":{"records_per_second":1562,"states_per_second":0.6},"streams":{"customers":{"state_count":2,"record_count":5000,"completed_ranges":[{"gte":"2018-01-01T00:00:00Z","lt":"2024-04-17T00:00:00Z"}],"terminal":true}},"errors":[]}}},"request_progress":{"elapsed_ms":3200,"global_state_count":2,"derived":{"records_per_second":1562,"states_per_second":0.6},"streams":{"customers":{"state_count":2,"record_count":5000,"completed_ranges":[{"gte":"2018-01-01T00:00:00Z","lt":"2024-04-17T00:00:00Z"}],"terminal":true}},"errors":[]}}}
+```
+
+---
+
+## Client Loop
+
+```ts
+let state = undefined
+const syncRunId = crypto.randomUUID()
+
+do {
+ const { end } = await engine.sync({
+ sync_run_id: syncRunId,
+ source_config,
+ destination_config,
+ configured_catalog,
+ starting_state: state,
+ })
+ state = end.ending_state
+} while (end.has_more)
+```
+
+The client does not need to interpret source state. It only needs to round-trip
+`ending_state` and continue until `has_more` is false.
diff --git a/packages/protocol/src/helpers.ts b/packages/protocol/src/helpers.ts
index 6fad2240..1ccaa8dc 100644
--- a/packages/protocol/src/helpers.ts
+++ b/packages/protocol/src/helpers.ts
@@ -108,13 +108,16 @@ export function isTraceStreamStatus(
return msg.type === 'trace' && msg.trace.trace_type === 'stream_status'
}
-/** Type guard for trace progress messages. */
-export function isTraceProgress(
+/** Type guard for trace global_progress messages. */
+export function isTraceGlobalProgress(
msg: Message
-): msg is TraceMessage & { trace: { trace_type: 'progress' } } {
- return msg.type === 'trace' && msg.trace.trace_type === 'progress'
+): msg is TraceMessage & { trace: { trace_type: 'global_progress' } } {
+ return msg.type === 'trace' && msg.trace.trace_type === 'global_progress'
}
+/** @deprecated Use isTraceGlobalProgress. */
+export const isTraceProgress = isTraceGlobalProgress
+
export function emptySectionState(): SectionState {
return { streams: {}, global: {} }
}
diff --git a/packages/protocol/src/index.ts b/packages/protocol/src/index.ts
index 87f84a3d..f82718f9 100644
--- a/packages/protocol/src/index.ts
+++ b/packages/protocol/src/index.ts
@@ -23,6 +23,7 @@ export {
isDataMessage,
isTraceError,
isTraceStreamStatus,
+ isTraceGlobalProgress,
isTraceProgress,
// State constructors
coerceSyncState,
diff --git a/packages/protocol/src/protocol.ts b/packages/protocol/src/protocol.ts
index 4bcd91d0..2c1238ba 100644
--- a/packages/protocol/src/protocol.ts
+++ b/packages/protocol/src/protocol.ts
@@ -260,16 +260,11 @@ export const TraceStreamStatus = z
.object({
stream: z.string().describe('Stream being reported on.'),
status: z
- .enum([
- 'started',
- 'running',
- 'complete',
- 'transient_error',
- 'system_error',
- 'config_error',
- 'auth_error',
- ])
- .describe('Current phase of the stream within this sync run.'),
+ .enum(['started', 'complete'])
+ .describe(
+ 'Lifecycle status. Errors are orthogonal — a stream can be complete with errors. ' +
+ 'Sources may store richer error statuses internally for retry logic.'
+ ),
cumulative_record_count: z
.number()
.int()
@@ -284,32 +279,10 @@ export const TraceStreamStatus = z
.int()
.optional()
.describe('Records synced for this stream in the current sync run. Set by the engine.'),
- window_record_count: z
- .number()
- .int()
- .optional()
- .describe(
- 'Records synced since the last stream_status emission for this stream. ' +
- 'Set by the engine. Used for instantaneous per-stream throughput.'
- ),
- records_per_second: z
- .number()
- .optional()
- .describe(
- 'Average records per second for this stream over the entire run: ' +
- 'run_record_count / elapsed seconds. Set by the engine.'
- ),
- requests_per_second: z
- .number()
- .optional()
- .describe(
- 'Average API requests per second for this stream over the entire run. ' +
- 'Set by the engine from source-reported request counts.'
- ),
})
.describe(
'Per-stream status update. Sources emit the minimal form (stream + status). ' +
- 'The engine emits enriched versions with record counts and throughput rates.'
+ 'The engine enriches with record counts. Only emitted on status transitions.'
)
export type TraceStreamStatus = z.infer
@@ -322,17 +295,22 @@ export const TraceEstimate = z
.describe('Sync progress estimate for a stream.')
export type TraceEstimate = z.infer
-export const TraceProgress = z
+export const TraceGlobalProgress = z
.object({
elapsed_ms: z.number().int().describe('Wall-clock milliseconds since the sync run started.'),
run_record_count: z
.number()
.int()
.describe('Total records synced across all streams in this run.'),
- rows_per_second: z
+ cumulative_record_count: z
+ .number()
+ .int()
+ .optional()
+ .describe('Total records synced across all streams across all runs.'),
+ records_per_second: z
.number()
.describe('Overall throughput for the entire run: run_record_count / elapsed seconds.'),
- window_rows_per_second: z
+ window_records_per_second: z
.number()
.describe(
'Instantaneous throughput: total records in last window / window duration. ' +
@@ -342,13 +320,33 @@ export const TraceProgress = z
.number()
.int()
.describe('Total source_state messages observed so far in this sync run.'),
+ request_count: z
+ .number()
+ .int()
+ .optional()
+ .describe('Total API requests made by the source in this run.'),
+ cumulative_request_count: z
+ .number()
+ .int()
+ .optional()
+ .describe('Total API requests across all runs.'),
+ cumulative_elapsed_ms: z
+ .number()
+ .int()
+ .optional()
+ .describe('Total wall-clock time across all runs.'),
})
.describe(
- 'Periodic global sync progress emitted by the engine. ' +
+ 'Global sync progress emitted by the engine, co-emitted with every stream_status trace. ' +
'Aggregate stats only — per-stream detail is in stream_status messages. ' +
'Each emission is a full replacement.'
)
-export type TraceProgress = z.infer
+export type TraceGlobalProgress = z.infer
+
+/** @deprecated Use TraceGlobalProgress. */
+export const TraceProgress = TraceGlobalProgress
+/** @deprecated Use TraceGlobalProgress. */
+export type TraceProgress = TraceGlobalProgress
export const TracePayload = z
.discriminatedUnion('trace_type', [
@@ -365,43 +363,27 @@ export const TracePayload = z
estimate: TraceEstimate,
}),
z.object({
- trace_type: z.literal('progress'),
- progress: TraceProgress,
+ trace_type: z.literal('global_progress'),
+ global_progress: TraceGlobalProgress,
}),
])
.describe(
- 'Diagnostic/status payload with subtypes for error, stream status, estimates, and progress.'
+ 'Diagnostic/status payload with subtypes for error, stream status, estimates, and global progress.'
)
export type TracePayload = z.infer
-// MARK: - EOF payload (depends on TraceProgress)
+// MARK: - EOF payload (depends on TraceGlobalProgress)
export const EofStreamProgress = z
.object({
status: z
- .enum([
- 'started',
- 'running',
- 'complete',
- 'transient_error',
- 'system_error',
- 'config_error',
- 'auth_error',
- ])
- .describe('Final stream status.'),
+ .enum(['started', 'complete'])
+ .describe('Lifecycle status. Errors are orthogonal — a stream can be complete with errors.'),
cumulative_record_count: z
.number()
.int()
.describe('Cumulative records synced for this stream across all runs.'),
run_record_count: z.number().int().describe('Records synced in this run.'),
- records_per_second: z
- .number()
- .optional()
- .describe('Average records/sec for this stream over the run.'),
- requests_per_second: z
- .number()
- .optional()
- .describe('Average requests/sec for this stream over the run.'),
errors: z
.array(
z.object({
@@ -440,8 +422,8 @@ export const EofPayload = z
'engine: updated cumulative record counts; destination: reserved. ' +
'Consumers can persist this directly and pass it back on resume.'
),
- global_progress: TraceProgress.optional().describe(
- 'Final global aggregates. Same shape as trace/progress.'
+ global_progress: TraceGlobalProgress.optional().describe(
+ 'Final global aggregates. Same shape as trace/global_progress.'
),
stream_progress: z
.record(z.string(), EofStreamProgress)
@@ -577,6 +559,7 @@ export type DestinationOutput = z.infer
export const SyncOutput = z
.discriminatedUnion('type', [
SourceStateMessage,
+ CatalogMessage,
TraceMessage,
LogMessage,
EofMessage,
diff --git a/packages/source-stripe/src/client.ts b/packages/source-stripe/src/client.ts
index 7696f723..81464fba 100644
--- a/packages/source-stripe/src/client.ts
+++ b/packages/source-stripe/src/client.ts
@@ -98,7 +98,7 @@ export function makeClient(
params?: Record
): Promise {
if (method === 'GET') {
- return withHttpRetry(() => request(method, path, params), { label: `${method} ${path}`, signal: pipelineSignal })
+ return withHttpRetry(() => request(method, path, params), { signal: pipelineSignal })
}
return request(method, path, params)
}
diff --git a/packages/source-stripe/src/index.test.ts b/packages/source-stripe/src/index.test.ts
index 645c9746..436b1100 100644
--- a/packages/source-stripe/src/index.test.ts
+++ b/packages/source-stripe/src/index.test.ts
@@ -544,8 +544,8 @@ describe('StripeSource', () => {
source.read({ config, catalog: catalog({ name: 'customers', primary_key: [['id']] }) })
)
- // trace(stream_status started) + trace(error) + source_state(transient_error)
- expect(messages).toHaveLength(3)
+ // trace(stream_status started) + trace(error) + source_state(transient_error) + trace(stream_status complete)
+ expect(messages).toHaveLength(4)
expect(messages[0]).toMatchObject({
type: 'trace',
trace: {
@@ -576,6 +576,14 @@ describe('StripeSource', () => {
data: { status: 'transient_error' },
},
})
+
+ expect(messages[3]).toMatchObject({
+ type: 'trace',
+ trace: {
+ trace_type: 'stream_status',
+ stream_status: { stream: 'customers', status: 'complete' },
+ },
+ })
})
it('emits TraceMessage error with failure_type config_error for unknown stream', async () => {
@@ -628,7 +636,8 @@ describe('StripeSource', () => {
source.read({ config, catalog: catalog({ name: 'customers', primary_key: [['id']] }) })
)
- expect(messages).toHaveLength(3)
+ // started + error + state(system_error) + complete
+ expect(messages).toHaveLength(4)
const errorMsg = messages[1] as TraceMessage
expect(errorMsg.type).toBe('trace')
expect(errorMsg.trace.trace_type).toBe('error')
@@ -646,6 +655,14 @@ describe('StripeSource', () => {
data: { status: 'system_error' },
},
})
+
+ expect(messages[3]).toMatchObject({
+ type: 'trace',
+ trace: {
+ trace_type: 'stream_status',
+ stream_status: { stream: 'customers', status: 'complete' },
+ },
+ })
})
it('proceeds with backfill using fallback timestamp when getAccount fails (fault-tolerant)', async () => {
@@ -731,7 +748,8 @@ describe('StripeSource', () => {
source.read({ config, catalog: catalog({ name: 'tax_ids', primary_key: [['id']] }) })
)
- expect(messages).toHaveLength(3)
+ // started + error + state(auth_error) + complete
+ expect(messages).toHaveLength(4)
const errorMsg = messages[1] as TraceMessage
expect(errorMsg.trace.trace_type).toBe('error')
const traceError = (
@@ -748,6 +766,14 @@ describe('StripeSource', () => {
type: 'source_state',
source_state: { state_type: 'stream', stream: 'tax_ids', data: { status: 'auth_error' } },
})
+
+ expect(messages[3]).toMatchObject({
+ type: 'trace',
+ trace: {
+ trace_type: 'stream_status',
+ stream_status: { stream: 'tax_ids', status: 'complete' },
+ },
+ })
})
it('does not treat near-miss auth errors as skippable', async () => {
@@ -768,7 +794,8 @@ describe('StripeSource', () => {
source.read({ config, catalog: catalog({ name: 'customers', primary_key: [['id']] }) })
)
- expect(messages).toHaveLength(3)
+ // started + error + state(system_error) + complete
+ expect(messages).toHaveLength(4)
expect(messages[1]).toMatchObject({
type: 'trace',
trace: {
@@ -787,6 +814,13 @@ describe('StripeSource', () => {
data: { status: 'system_error' },
},
})
+ expect(messages[3]).toMatchObject({
+ type: 'trace',
+ trace: {
+ trace_type: 'stream_status',
+ stream_status: { stream: 'customers', status: 'complete' },
+ },
+ })
})
it('marks known skippable Stripe list errors as complete without emitting error traces', async () => {
@@ -824,6 +858,52 @@ describe('StripeSource', () => {
})
})
+ it('treats Unrecognized request URL as skippable (feature not available)', async () => {
+ const listFn = vi
+ .fn()
+ .mockRejectedValueOnce(
+ new Error(
+ 'Unrecognized request URL (GET: /v1/treasury/financial_accounts). Please see https://stripe.com/docs'
+ )
+ )
+
+ const registry: Record = {
+ treasury_financial_accounts: makeConfig({
+ order: 1,
+ tableName: 'treasury_financial_accounts',
+ listFn: listFn as ResourceConfig['listFn'],
+ }),
+ }
+
+ vi.mocked(buildResourceRegistry).mockReturnValue(registry as any)
+ const messages = await collect(
+ source.read({
+ config,
+ catalog: catalog({
+ name: 'treasury_financial_accounts',
+ primary_key: [['id']],
+ }),
+ })
+ )
+
+ // Skippable: started + complete, no error trace
+ expect(messages).toHaveLength(2)
+ expect(messages[0]).toMatchObject({
+ type: 'trace',
+ trace: {
+ trace_type: 'stream_status',
+ stream_status: { stream: 'treasury_financial_accounts', status: 'started' },
+ },
+ })
+ expect(messages[1]).toMatchObject({
+ type: 'trace',
+ trace: {
+ trace_type: 'stream_status',
+ stream_status: { stream: 'treasury_financial_accounts', status: 'complete' },
+ },
+ })
+ })
+
it('continues to next stream after error on previous stream', async () => {
const failingListFn = vi.fn().mockRejectedValueOnce(new Error('Connection refused'))
const successListFn = vi.fn().mockResolvedValueOnce({
@@ -855,11 +935,11 @@ describe('StripeSource', () => {
})
)
- // customers: started + error + error_state = 3
+ // customers: started + error + error_state + complete = 4
// invoices: started + record + state + complete = 4
- expect(messages).toHaveLength(7)
+ expect(messages).toHaveLength(8)
- // Customers errored
+ // Customers errored but still emits terminal stream_status
expect(messages[0]).toMatchObject({
type: 'trace',
trace: {
@@ -879,16 +959,23 @@ describe('StripeSource', () => {
data: { status: 'system_error' },
},
})
+ expect(messages[3]).toMatchObject({
+ type: 'trace',
+ trace: {
+ trace_type: 'stream_status',
+ stream_status: { stream: 'customers', status: 'complete' },
+ },
+ })
// Invoices succeeded
- expect(messages[3]).toMatchObject({
+ expect(messages[4]).toMatchObject({
type: 'trace',
trace: {
trace_type: 'stream_status',
stream_status: { stream: 'invoices', status: 'started' },
},
})
- expect(messages[6]).toMatchObject({
+ expect(messages[7]).toMatchObject({
type: 'trace',
trace: {
trace_type: 'stream_status',
diff --git a/packages/source-stripe/src/resourceRegistry.ts b/packages/source-stripe/src/resourceRegistry.ts
index 604753d4..f3d55db7 100644
--- a/packages/source-stripe/src/resourceRegistry.ts
+++ b/packages/source-stripe/src/resourceRegistry.ts
@@ -119,14 +119,14 @@ export function buildResourceRegistry(
supportsForwardPagination: isV2 || endpoint.supportsStartingAfter,
sync: true,
dependencies: [],
- listFn: buildSpecAwareListFn((params) => withHttpRetry(() => rawListFn(params), { label: `LIST ${endpoint.apiPath} (${tableName})` }), {
+ listFn: buildSpecAwareListFn((params) => withHttpRetry(() => rawListFn(params)), {
isV2,
supportsLimit: endpoint.supportsLimit,
supportsStartingAfter: endpoint.supportsStartingAfter,
supportsEndingBefore: endpoint.supportsEndingBefore,
supportsCreatedFilter: endpoint.supportsCreatedFilter,
}),
- retrieveFn: (id) => withHttpRetry(() => rawRetrieveFn(id), { label: `GET ${endpoint.apiPath}/${id} (${tableName})` }),
+ retrieveFn: (id) => withHttpRetry(() => rawRetrieveFn(id)),
nestedResources: children.length > 0 ? children : undefined,
}
registry[tableName] = config
diff --git a/packages/source-stripe/src/retry.ts b/packages/source-stripe/src/retry.ts
index d33fafd5..e15589e8 100644
--- a/packages/source-stripe/src/retry.ts
+++ b/packages/source-stripe/src/retry.ts
@@ -15,8 +15,6 @@ const RETRYABLE_NETWORK_CODES = new Set([
])
export type HttpRetryOptions = {
- /** Human-readable label for log messages (e.g. "GET /v1/customers") */
- label?: string
maxRetries?: number
baseDelayMs?: number
maxDelayMs?: number
@@ -129,9 +127,8 @@ export async function withHttpRetry(
const status = getHttpErrorStatus(err)
const errName = err instanceof Error ? err.name : 'UnknownError'
const errMsg = err instanceof Error ? err.message : String(err)
- const labelPart = opts.label ? ` ${opts.label}` : ''
console.error(
- `[source-stripe] retry${labelPart} attempt=${attempt + 1}/${maxRetries} delay=${delayMs}ms status=${status ?? 'n/a'} error=${errName}: ${errMsg}`
+ `[source-stripe] retry attempt=${attempt + 1}/${maxRetries} delay=${delayMs}ms status=${status ?? 'n/a'} error=${errName}: ${errMsg}`
)
await sleep(delayMs, opts.signal)
diff --git a/packages/source-stripe/src/src-list-api.ts b/packages/source-stripe/src/src-list-api.ts
index c063ca2c..972a264b 100644
--- a/packages/source-stripe/src/src-list-api.ts
+++ b/packages/source-stripe/src/src-list-api.ts
@@ -6,6 +6,7 @@ import type { SegmentState, BackfillState } from './index.js'
import type { RateLimiter } from './rate-limiter.js'
import { MAX_SEGMENTS, MAX_CONCURRENCY } from './rate-limiter.js'
import { StripeApiRequestError } from '@stripe/sync-openapi'
+import { isRetryableHttpError } from './retry.js'
import type { StripeClient } from './client.js'
// MARK: - Rate-limit wrapper
@@ -40,9 +41,15 @@ function withRateLimit(listFn: ListFn, rateLimiter: RateLimiter, signal?: AbortS
}
export function getFailureType(err: unknown): 'transient_error' | 'system_error' | 'auth_error' {
- const isRateLimit = err instanceof Error && err.message.includes('Rate limit')
- const isAuth = err instanceof StripeApiRequestError && (err.status === 401 || err.status === 403)
- return isRateLimit ? 'transient_error' : isAuth ? 'auth_error' : 'system_error'
+ if (err instanceof StripeApiRequestError && (err.status === 401 || err.status === 403)) {
+ return 'auth_error'
+ }
+ // Rate limit message check (belt + suspenders alongside HTTP status check)
+ if (err instanceof Error && err.message.includes('Rate limit')) {
+ return 'transient_error'
+ }
+ // 429, 5xx, network errors, timeouts → retriable; everything else → permanent
+ return isRetryableHttpError(err) ? 'transient_error' : 'system_error'
}
export function errorToTrace(err: unknown, stream: string): TraceMessage {
@@ -83,6 +90,7 @@ const SKIPPABLE_ERROR_PATTERNS = [
'Must provide customer',
'Must provide ',
'not set up to use',
+ 'Unrecognized request URL',
]
// MARK: - Compact state (generative — O(concurrency) not O(total segments))
@@ -696,6 +704,14 @@ export async function* listApiBackfill(opts: {
...(streamState?.backfill ? { backfill: streamState.backfill } : {}),
},
})
+ // Errors are orthogonal to lifecycle — always emit terminal status
+ yield {
+ type: 'trace',
+ trace: {
+ trace_type: 'stream_status',
+ stream_status: { stream: stream.name, status: 'complete' },
+ },
+ } satisfies TraceMessage
}
}
}
diff --git a/packages/source-stripe/src/transport.test.ts b/packages/source-stripe/src/transport.test.ts
index d6290466..488d07b0 100644
--- a/packages/source-stripe/src/transport.test.ts
+++ b/packages/source-stripe/src/transport.test.ts
@@ -77,6 +77,7 @@ describe('parsePositiveInteger', () => {
})
+
describe('getHttpsProxyAgentForTarget', () => {
it('returns an agent only when the target should use the proxy', () => {
expect(
diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml
index 2ed8e41d..358149a4 100644
--- a/pnpm-lock.yaml
+++ b/pnpm-lock.yaml
@@ -118,7 +118,7 @@ importers:
version: link:../engine
'@stripe/sync-service':
specifier: workspace:*
- version: file:apps/service(@aws-sdk/client-sts@3.1013.0)(@aws-sdk/rds-signer@3.1013.0)(tslib@2.8.1)
+ version: file:apps/service(@aws-sdk/client-sts@3.1013.0)(@aws-sdk/rds-signer@3.1013.0)(@types/react@19.2.14)(tslib@2.8.1)
'@stripe/sync-source-stripe':
specifier: workspace:*
version: link:../../packages/source-stripe
@@ -194,6 +194,9 @@ importers:
hono:
specifier: ^4
version: 4.12.8
+ ink:
+ specifier: ^7.0.0
+ version: 7.0.0(@types/react@19.2.14)(react@19.2.4)
openapi-fetch:
specifier: ^0.17.0
version: 0.17.0
@@ -206,6 +209,9 @@ importers:
pino-pretty:
specifier: ^13
version: 13.1.3
+ react:
+ specifier: ^19.2.4
+ version: 19.2.4
ws:
specifier: ^8.18.0
version: 8.18.3
@@ -222,6 +228,9 @@ importers:
'@types/pg':
specifier: ^8.15.4
version: 8.15.6
+ '@types/react':
+ specifier: ^19.2.14
+ version: 19.2.14
openapi-typescript:
specifier: ^7.13.0
version: 7.13.0(typescript@5.9.3)
@@ -680,6 +689,10 @@ importers:
packages:
+ '@alcalzone/ansi-tokenize@0.3.0':
+ resolution: {integrity: sha512-p+CMKJ93HFmLkjXKlXiVGlMQEuRb6H0MokBSwUsX+S6BRX8eV5naFZpQJFfJHjRZY0Hmnqy1/r6UWl3x+19zYA==}
+ engines: {node: '>=18'}
+
'@alloc/quick-lru@5.2.0':
resolution: {integrity: sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==}
engines: {node: '>=10'}
@@ -2824,6 +2837,10 @@ packages:
resolution: {integrity: sha512-/6w/C21Pm1A7aZitlI5Ni/2J6FFQN8i1Cvz3kHABAAbw93v/NlvKdVOqz7CCWz/3iv/JplRSEEZ83XION15ovw==}
engines: {node: '>=6'}
+ ansi-escapes@7.3.0:
+ resolution: {integrity: sha512-BvU8nYgGQBxcmMuEeUEmNTvrMVjJNSH7RgW24vXexN4Ven6qCvy4TntnvlnwnMLTVlcRQQdbRY8NKnaIoeWDNg==}
+ engines: {node: '>=18'}
+
ansi-regex@5.0.1:
resolution: {integrity: sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==}
engines: {node: '>=8'}
@@ -2861,6 +2878,10 @@ packages:
resolution: {integrity: sha512-kNOjDqAh7px0XWNI+4QbzoiR/nTkHAWNud2uvnJquD1/x5a7EQZMJT0AczqK0Qn67oY/TTQ1LbUKajZpp3I9tQ==}
engines: {node: '>=8.0.0'}
+ auto-bind@5.0.1:
+ resolution: {integrity: sha512-ooviqdwwgfIfNmDwo94wlshcdzfO64XV0Cg6oDsDYBJfITDz1EngD2z7DkbvCWn+XIMsIqW27sEVF6qcpJrRcg==}
+ engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0}
+
autoprefixer@10.4.27:
resolution: {integrity: sha512-NP9APE+tO+LuJGn7/9+cohklunJsXWiaWEfV3si4Gi/XHDwVNgkwr1J3RQYFIvPy76GmJ9/bW8vyoU1LcxwKHA==}
engines: {node: ^10 || ^12 || >=14}
@@ -2949,6 +2970,10 @@ packages:
resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==}
engines: {node: '>=10'}
+ chalk@5.6.2:
+ resolution: {integrity: sha512-7NzBL0rN6fMUW+f7A6Io4h40qQlG+xGmtMxfbnH/K7TAtt8JQWVQK+6g0UXKMeVJoyV5EkkNsErQ8pVD3bLHbA==}
+ engines: {node: ^12.17.0 || ^14.13 || >=16.0.0}
+
change-case@5.4.4:
resolution: {integrity: sha512-HRQyTk2/YPEkt9TnUPbOpr64Uw3KOicFWPVBb+xiHvd6eBx/qPr9xqfBFDT8P2vWsvvz4jbEkfDe71W3VyNu2w==}
@@ -2966,6 +2991,18 @@ packages:
class-variance-authority@0.7.1:
resolution: {integrity: sha512-Ka+9Trutv7G8M6WT6SeiRWz792K5qEqIGEGzXKhAE6xOWAY6pPH8U+9IY3oCMv6kqTmLsv7Xh/2w2RigkePMsg==}
+ cli-boxes@4.0.1:
+ resolution: {integrity: sha512-5IOn+jcCEHEraYolBPs/sT4BxYCe2nHg374OPiItB1O96KZFseS2gthU4twyYzeDcFew4DaUM/xwc5BQf08JJw==}
+ engines: {node: '>=18.20 <19 || >=20.10'}
+
+ cli-cursor@4.0.0:
+ resolution: {integrity: sha512-VGtlMu3x/4DOtIUwEkRezxUZ2lBacNJCHash0N0WeZDBS+7Ux1dm3XWAgWYxLJFMMdOeXMHXorshEFhbMSGelg==}
+ engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0}
+
+ cli-truncate@6.0.0:
+ resolution: {integrity: sha512-3+YKIUFsohD9MIoOFPFBldjAlnfCmCDcqe6aYGFqlDTRKg80p4wg35L+j83QQ63iOlKRccEkbn8IuM++HsgEjA==}
+ engines: {node: '>=22'}
+
client-only@0.0.1:
resolution: {integrity: sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA==}
@@ -2977,6 +3014,10 @@ packages:
resolution: {integrity: sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==}
engines: {node: '>=6'}
+ code-excerpt@4.0.0:
+ resolution: {integrity: sha512-xxodCmBen3iy2i0WtAK8FlFNrRzjUqjRsMfho58xT/wvZU1YTM3fCnRjcy1gJPMepaRlgm/0e6w8SpWHpn3/cA==}
+ engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0}
+
codemirror@6.0.2:
resolution: {integrity: sha512-VhydHotNW5w1UGK0Qj96BwSk/Zqbp9WbnyK2W/eVMv4QyF41INRGpjUhFJY7/uDNuudSc33a/PKr4iDqRduvHw==}
@@ -3032,6 +3073,10 @@ packages:
convert-source-map@2.0.0:
resolution: {integrity: sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==}
+ convert-to-spaces@2.0.1:
+ resolution: {integrity: sha512-rcQ1bsQO9799wq24uE5AM2tAILy4gXGIK/njFWcVQkGNZ96edlpY+A7bjwvzjYvLDyzmG1MmMLZhpcsb+klNMQ==}
+ engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0}
+
crelt@1.0.6:
resolution: {integrity: sha512-VQ2MBenTq1fWZUH9DJNGti7kKv6EeAuYr3cLwxUWhIu1baTaXh4Ib5W2CqHVqib4/MqbYGJqiL3Zb8GJZr3l4g==}
@@ -3129,6 +3174,10 @@ packages:
resolution: {integrity: sha512-Qohcme7V1inbAfvjItgw0EaxVX5q2rdVEZHRBrEQdRZTssLDGsL8Lwrznl8oQ/6kuTJONLaDcGjkNP247XEhcA==}
engines: {node: '>=10.13.0'}
+ environment@1.1.0:
+ resolution: {integrity: sha512-xUtoPkMggbz0MPyPiIWr1Kp4aeWJjDZ6SMvURhimjdZgsRuDplF5/s9hcgGhyXMhs+6vpnuoiZ2kFiu3FMnS8Q==}
+ engines: {node: '>=18'}
+
es-define-property@1.0.1:
resolution: {integrity: sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==}
engines: {node: '>= 0.4'}
@@ -3151,6 +3200,9 @@ packages:
resolution: {integrity: sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==}
engines: {node: '>= 0.4'}
+ es-toolkit@1.45.1:
+ resolution: {integrity: sha512-/jhoOj/Fx+A+IIyDNOvO3TItGmlMKhtX8ISAHKE90c4b/k1tqaqEZ+uUqfpU8DMnW5cgNJv606zS55jGvza0Xw==}
+
esbuild@0.28.0:
resolution: {integrity: sha512-sNR9MHpXSUV/XB4zmsFKN+QgVG82Cc7+/aaxJ8Adi8hyOac+EXptIp45QBPaVyX3N70664wRbTcLTOemCAnyqw==}
engines: {node: '>=18'}
@@ -3160,6 +3212,10 @@ packages:
resolution: {integrity: sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==}
engines: {node: '>=6'}
+ escape-string-regexp@2.0.0:
+ resolution: {integrity: sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==}
+ engines: {node: '>=8'}
+
escape-string-regexp@4.0.0:
resolution: {integrity: sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==}
engines: {node: '>=10'}
@@ -3379,6 +3435,10 @@ packages:
resolution: {integrity: sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==}
engines: {node: 6.* || 8.* || >= 10.*}
+ get-east-asian-width@1.5.0:
+ resolution: {integrity: sha512-CQ+bEO+Tva/qlmw24dCejulK5pMzVnUOFOijVogd3KQs07HnRIgp8TGipvCCRT06xeYEbpbgwaCxglFyiuIcmA==}
+ engines: {node: '>=18'}
+
get-intrinsic@1.3.0:
resolution: {integrity: sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==}
engines: {node: '>= 0.4'}
@@ -3525,6 +3585,10 @@ packages:
resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==}
engines: {node: '>=0.8.19'}
+ indent-string@5.0.0:
+ resolution: {integrity: sha512-m6FAo/spmsW2Ab2fU35JTYwtOKa2yAwXSwgjSv1TJzh4Mh7mC3lzAOVLBprb72XsTrgkEIsl7YrFNAiDiRhIGg==}
+ engines: {node: '>=12'}
+
index-to-position@1.2.0:
resolution: {integrity: sha512-Yg7+ztRkqslMAS2iFaU+Oa4KTSidr63OsFGlOrJoW981kIYO3CGCS3wA95P1mUi/IVSJkn0D479KTJpVpvFNuw==}
engines: {node: '>=18'}
@@ -3532,6 +3596,19 @@ packages:
indexof@0.0.1:
resolution: {integrity: sha512-i0G7hLJ1z0DE8dsqJa2rycj9dBmNKgXBvotXtZYXakU9oivfB9Uj2ZBC27qqef2U58/ZLwalxa1X/RDCdkHtVg==}
+ ink@7.0.0:
+ resolution: {integrity: sha512-fMie5/VwIYXofMyND0s+fOVhwVBBPYx+uuqJ6V6rUBGjui+2UYp+0fWtvhSeKT4z+X1uH98a4ge5Vj3aTlL6mg==}
+ engines: {node: '>=22'}
+ peerDependencies:
+ '@types/react': '>=19.2.0'
+ react: '>=19.2.0'
+ react-devtools-core: '>=6.1.2'
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+ react-devtools-core:
+ optional: true
+
is-extglob@2.1.1:
resolution: {integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==}
engines: {node: '>=0.10.0'}
@@ -3540,10 +3617,19 @@ packages:
resolution: {integrity: sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==}
engines: {node: '>=8'}
+ is-fullwidth-code-point@5.1.0:
+ resolution: {integrity: sha512-5XHYaSyiqADb4RnZ1Bdad6cPp8Toise4TzEjcOYDHZkTCbKgiUl7WTUCpNWHuxmDt91wnsZBc9xinNzopv3JMQ==}
+ engines: {node: '>=18'}
+
is-glob@4.0.3:
resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==}
engines: {node: '>=0.10.0'}
+ is-in-ci@2.0.0:
+ resolution: {integrity: sha512-cFeerHriAnhrQSbpAxL37W1wcJKUUX07HyLWZCW1URJT/ra3GyUTzBgUnh24TMVfNTV2Hij2HLxkPHFZfOZy5w==}
+ engines: {node: '>=20'}
+ hasBin: true
+
is-number@7.0.0:
resolution: {integrity: sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==}
engines: {node: '>=0.12.0'}
@@ -3777,6 +3863,10 @@ packages:
resolution: {integrity: sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==}
engines: {node: '>= 0.6'}
+ mimic-fn@2.1.0:
+ resolution: {integrity: sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==}
+ engines: {node: '>=6'}
+
minimatch@10.1.1:
resolution: {integrity: sha512-enIvLvRAFZYXJzkCYG5RKmPfrFArdLv+R+lbQ53BmIMLIry74bjKzX6iHAm8WYamJkhSSEabrWN5D97XnKObjQ==}
engines: {node: 20 || >=22}
@@ -3877,6 +3967,10 @@ packages:
once@1.4.0:
resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==}
+ onetime@5.1.2:
+ resolution: {integrity: sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==}
+ engines: {node: '>=6'}
+
openapi-fetch@0.13.8:
resolution: {integrity: sha512-yJ4QKRyNxE44baQ9mY5+r/kAzZ8yXMemtNAOFwOzRXJscdjSxxzWSNlyBAr+o5JjkUw9Lc3W7OIoca0cY3PYnQ==}
@@ -3924,6 +4018,10 @@ packages:
parseuri@0.0.6:
resolution: {integrity: sha512-AUjen8sAkGgao7UyCX6Ahv0gIK2fABKmYjvP4xmy5JaKvcbTRueIqIPHLAfq30xJddqSE033IOMUSOMCcK3Sow==}
+ patch-console@2.0.0:
+ resolution: {integrity: sha512-0YNdUceMdaQwoKce1gatDScmMo5pu/tfABfnzEqeG0gtTmd7mh/WcwgUjtAeOU7N8nFFlbQBnFK2gXW5fGvmMA==}
+ engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0}
+
path-exists@4.0.0:
resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==}
engines: {node: '>=8'}
@@ -4116,6 +4214,12 @@ packages:
peerDependencies:
react: ^19.2.4
+ react-reconciler@0.33.0:
+ resolution: {integrity: sha512-KetWRytFv1epdpJc3J4G75I4WrplZE5jOL7Yq0p34+OVOKF4Se7WrdIdVC45XsSSmUTlht2FM/fM1FZb1mfQeA==}
+ engines: {node: '>=0.10.0'}
+ peerDependencies:
+ react: ^19.2.0
+
react-refresh@0.17.0:
resolution: {integrity: sha512-z6F7K9bV85EfseRCp2bzrpyQ0Gkw1uLoCel9XBVWPg/TjRj94SkJzUTGfOa4bs7iJvBWtQG0Wq7wnI0syw3EBQ==}
engines: {node: '>=0.10.0'}
@@ -4177,6 +4281,10 @@ packages:
resolve-pkg-maps@1.0.0:
resolution: {integrity: sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==}
+ restore-cursor@4.0.0:
+ resolution: {integrity: sha512-I9fPXU9geO9bHOt9pHHOhOkYerIMsmVaWB0rA2AI9ERh/+x/i7MV5HKBNrg+ljO5eoPVgCcnFuRjJ9uH6I/3eg==}
+ engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0}
+
reusify@1.1.0:
resolution: {integrity: sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==}
engines: {iojs: '>=1.0.0', node: '>=0.10.0'}
@@ -4261,10 +4369,17 @@ packages:
siginfo@2.0.0:
resolution: {integrity: sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==}
+ signal-exit@3.0.7:
+ resolution: {integrity: sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==}
+
signal-exit@4.1.0:
resolution: {integrity: sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==}
engines: {node: '>=14'}
+ slice-ansi@9.0.0:
+ resolution: {integrity: sha512-SO/3iYL5S3W57LLEniscOGPZgOqZUPCx6d3dB+52B80yJ0XstzsC/eV8gnA4tM3MHDrKz+OCFSLNjswdSC+/bA==}
+ engines: {node: '>=22'}
+
socket.io-client@2.5.0:
resolution: {integrity: sha512-lOO9clmdgssDykiOmVQQitwBAF3I6mYcQAo7hQ7AM6Ny5X7fp8hIJ3HcQs3Rjz4SoggoxA1OgrQyY8EgTbcPYw==}
@@ -4302,6 +4417,10 @@ packages:
resolution: {integrity: sha512-UcjcJOWknrNkF6PLX83qcHM6KHgVKNkV62Y8a5uYDVv9ydGQVwAHMKqHdJje1VTWpljG0WYpCDhrCdAOYH4TWg==}
engines: {node: '>= 10.x'}
+ stack-utils@2.0.6:
+ resolution: {integrity: sha512-XlkWvfIm6RmsWtNJx+uqtKLS8eqFbxUg0ZzLXqY0caEy9l7hruX8IpiDnjsLavoBgqCCR71TqWO8MaXYheJ3RQ==}
+ engines: {node: '>=10'}
+
stackback@0.0.2:
resolution: {integrity: sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==}
@@ -4316,6 +4435,10 @@ packages:
resolution: {integrity: sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==}
engines: {node: '>=12'}
+ string-width@8.2.0:
+ resolution: {integrity: sha512-6hJPQ8N0V0P3SNmP6h2J99RLuzrWz2gvT7VnK5tKvrNqJoyS9W4/Fb8mo31UiPvy00z7DQXkP2hnKBVav76thw==}
+ engines: {node: '>=20'}
+
string_decoder@1.3.0:
resolution: {integrity: sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==}
@@ -4405,6 +4528,10 @@ packages:
resolution: {integrity: sha512-MeQTA1r0litLUf0Rp/iisCaL8761lKAZHaimlbGK4j0HysC4PLfqygQj9srcs0m2RdtDYnF8UuYyKpbjHYp7Jw==}
engines: {node: ^14.18.0 || >=16.0.0}
+ tagged-tag@1.0.0:
+ resolution: {integrity: sha512-yEFYrVhod+hdNyx7g5Bnkkb0G6si8HJurOoOEgC8B/O0uXLHlaey/65KRv6cuWBNhBgHKAROVpc7QyYqE5gFng==}
+ engines: {node: '>=20'}
+
tailwind-merge@3.5.0:
resolution: {integrity: sha512-I8K9wewnVDkL1NTGoqWmVEIlUcB9gFriAEkXkfCjX5ib8ezGxtR3xD7iZIxrfArjEsH7F1CHD4RFUtxefdqV/A==}
@@ -4415,6 +4542,10 @@ packages:
resolution: {integrity: sha512-1MOpMXuhGzGL5TTCZFItxCc0AARf1EZFQkGqMm7ERKj8+Hgr5oLvJOVFcC+lRmR8hCe2S3jC4T5D7Vg/d7/fhA==}
engines: {node: '>=6'}
+ terminal-size@4.0.1:
+ resolution: {integrity: sha512-avMLDQpUI9I5XFrklECw1ZEUPJhqzcwSWsyyI8blhRLT+8N1jLJWLWWYQpB2q2xthq8xDvjZPISVh53T/+CLYQ==}
+ engines: {node: '>=18'}
+
terser-webpack-plugin@5.4.0:
resolution: {integrity: sha512-Bn5vxm48flOIfkdl5CaD2+1CiUVbonWQ3KQPyP7/EuIl9Gbzq/gQFOzaMFUEgVjB1396tcK0SG8XcNJ/2kDH8g==}
engines: {node: '>= 10.13.0'}
@@ -4512,6 +4643,10 @@ packages:
resolution: {integrity: sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA==}
engines: {node: '>=16'}
+ type-fest@5.5.0:
+ resolution: {integrity: sha512-PlBfpQwiUvGViBNX84Yxwjsdhd1TUlXr6zjX7eoirtCPIr08NAmxwa+fcYBTeRQxHo9YC9wwF3m9i700sHma8g==}
+ engines: {node: '>=20'}
+
typescript@5.9.3:
resolution: {integrity: sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==}
engines: {node: '>=14.17'}
@@ -4730,10 +4865,18 @@ packages:
engines: {node: '>=8'}
hasBin: true
+ widest-line@6.0.0:
+ resolution: {integrity: sha512-U89AsyEeAsyoF0zVJBkG9zBgekjgjK7yk9sje3F4IQpXBJ10TF6ByLlIfjMhcmHMJgHZI4KHt4rdNfktzxIAMA==}
+ engines: {node: '>=20'}
+
word-wrap@1.2.5:
resolution: {integrity: sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==}
engines: {node: '>=0.10.0'}
+ wrap-ansi@10.0.0:
+ resolution: {integrity: sha512-SGcvg80f0wUy2/fXES19feHMz8E0JoXv2uNgHOu4Dgi2OrCy1lqwFYEJz1BLbDI0exjPMe/ZdzZ/YpGECBG/aQ==}
+ engines: {node: '>=20'}
+
wrap-ansi@7.0.0:
resolution: {integrity: sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==}
engines: {node: '>=10'}
@@ -4769,6 +4912,18 @@ packages:
utf-8-validate:
optional: true
+ ws@8.20.0:
+ resolution: {integrity: sha512-sAt8BhgNbzCtgGbt2OxmpuryO63ZoDk/sqaB/znQm94T4fCEsy/yV+7CdC1kJhOU9lboAEU7R3kquuycDoibVA==}
+ engines: {node: '>=10.0.0'}
+ peerDependencies:
+ bufferutil: ^4.0.1
+ utf-8-validate: '>=5.0.2'
+ peerDependenciesMeta:
+ bufferutil:
+ optional: true
+ utf-8-validate:
+ optional: true
+
xmlhttprequest-ssl@1.6.3:
resolution: {integrity: sha512-3XfeQE/wNkvrIktn2Kf0869fC0BN6UpydVasGIeSm2B1Llihf7/0UfZM+eCkOw3P7bP4+qPgqhm7ZoxuJtFU0Q==}
engines: {node: '>=0.4.0'}
@@ -4807,6 +4962,9 @@ packages:
resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==}
engines: {node: '>=10'}
+ yoga-layout@3.2.1:
+ resolution: {integrity: sha512-0LPOt3AxKqMdFBZA3HBAt/t/8vIKq7VaQYbuA8WxCgung+p9TVyKRYdpvCb80HcdTN2NkbIKbhNwKUfm3tQywQ==}
+
zhead@2.2.4:
resolution: {integrity: sha512-8F0OI5dpWIA5IGG5NHUg9staDwz/ZPxZtvGVf01j7vHqSyZ0raHY+78atOVxRqb73AotX22uV1pXt3gYSstGag==}
@@ -4824,6 +4982,11 @@ packages:
snapshots:
+ '@alcalzone/ansi-tokenize@0.3.0':
+ dependencies:
+ ansi-styles: 6.2.3
+ is-fullwidth-code-point: 5.1.0
+
'@alloc/quick-lru@5.2.0': {}
'@aws-crypto/sha256-browser@5.2.0':
@@ -6738,14 +6901,14 @@ snapshots:
transitivePeerDependencies:
- pg-native
- '@stripe/sync-engine@file:apps/engine(@aws-sdk/client-sts@3.1013.0)(@aws-sdk/rds-signer@3.1013.0)':
+ '@stripe/sync-engine@file:apps/engine(@aws-sdk/client-sts@3.1013.0)(@aws-sdk/rds-signer@3.1013.0)(@types/react@19.2.14)':
dependencies:
'@hono/node-server': 1.19.11(hono@4.12.8)
'@scalar/hono-api-reference': 0.6.0(hono@4.12.8)
'@stripe/sync-destination-google-sheets': file:packages/destination-google-sheets
'@stripe/sync-destination-postgres': file:packages/destination-postgres(@aws-sdk/client-sts@3.1013.0)(@aws-sdk/rds-signer@3.1013.0)
'@stripe/sync-hono-zod-openapi': file:packages/hono-zod-openapi
- '@stripe/sync-integration-supabase': file:apps/supabase(@aws-sdk/client-sts@3.1013.0)(@aws-sdk/rds-signer@3.1013.0)
+ '@stripe/sync-integration-supabase': file:apps/supabase(@aws-sdk/client-sts@3.1013.0)(@aws-sdk/rds-signer@3.1013.0)(@types/react@19.2.14)
'@stripe/sync-protocol': file:packages/protocol
'@stripe/sync-source-stripe': file:packages/source-stripe
'@stripe/sync-state-postgres': file:packages/state-postgres
@@ -6755,19 +6918,23 @@ snapshots:
dotenv: 16.6.1
googleapis: 148.0.0
hono: 4.12.8
+ ink: 7.0.0(@types/react@19.2.14)(react@19.2.4)
openapi-fetch: 0.17.0
pg: 8.16.3
pino: 10.1.0
pino-pretty: 13.1.3
+ react: 19.2.4
ws: 8.18.3
zod: 4.3.6
transitivePeerDependencies:
- '@aws-sdk/client-sts'
- '@aws-sdk/rds-signer'
+ - '@types/react'
- bufferutil
- debug
- encoding
- pg-native
+ - react-devtools-core
- supports-color
- utf-8-validate
@@ -6778,10 +6945,10 @@ snapshots:
zod: 4.3.6
zod-openapi: 5.4.6(zod@4.3.6)
- '@stripe/sync-integration-supabase@file:apps/supabase(@aws-sdk/client-sts@3.1013.0)(@aws-sdk/rds-signer@3.1013.0)':
+ '@stripe/sync-integration-supabase@file:apps/supabase(@aws-sdk/client-sts@3.1013.0)(@aws-sdk/rds-signer@3.1013.0)(@types/react@19.2.14)':
dependencies:
'@stripe/sync-destination-postgres': file:packages/destination-postgres(@aws-sdk/client-sts@3.1013.0)(@aws-sdk/rds-signer@3.1013.0)
- '@stripe/sync-engine': file:apps/engine(@aws-sdk/client-sts@3.1013.0)(@aws-sdk/rds-signer@3.1013.0)
+ '@stripe/sync-engine': file:apps/engine(@aws-sdk/client-sts@3.1013.0)(@aws-sdk/rds-signer@3.1013.0)(@types/react@19.2.14)
'@stripe/sync-protocol': file:packages/protocol
'@stripe/sync-source-stripe': file:packages/source-stripe
'@stripe/sync-state-postgres': file:packages/state-postgres
@@ -6789,10 +6956,12 @@ snapshots:
transitivePeerDependencies:
- '@aws-sdk/client-sts'
- '@aws-sdk/rds-signer'
+ - '@types/react'
- bufferutil
- debug
- encoding
- pg-native
+ - react-devtools-core
- supports-color
- utf-8-validate
@@ -6805,13 +6974,13 @@ snapshots:
citty: 0.1.6
zod: 4.3.6
- '@stripe/sync-service@file:apps/service(@aws-sdk/client-sts@3.1013.0)(@aws-sdk/rds-signer@3.1013.0)(tslib@2.8.1)':
+ '@stripe/sync-service@file:apps/service(@aws-sdk/client-sts@3.1013.0)(@aws-sdk/rds-signer@3.1013.0)(@types/react@19.2.14)(tslib@2.8.1)':
dependencies:
'@hono/node-server': 1.19.11(hono@4.12.8)
'@scalar/hono-api-reference': 0.6.0(hono@4.12.8)
'@stripe/sync-destination-google-sheets': file:packages/destination-google-sheets
'@stripe/sync-destination-postgres': file:packages/destination-postgres(@aws-sdk/client-sts@3.1013.0)(@aws-sdk/rds-signer@3.1013.0)
- '@stripe/sync-engine': file:apps/engine(@aws-sdk/client-sts@3.1013.0)(@aws-sdk/rds-signer@3.1013.0)
+ '@stripe/sync-engine': file:apps/engine(@aws-sdk/client-sts@3.1013.0)(@aws-sdk/rds-signer@3.1013.0)(@types/react@19.2.14)
'@stripe/sync-hono-zod-openapi': file:packages/hono-zod-openapi
'@stripe/sync-protocol': file:packages/protocol
'@stripe/sync-source-stripe': file:packages/source-stripe
@@ -6831,10 +7000,12 @@ snapshots:
- '@aws-sdk/client-sts'
- '@aws-sdk/rds-signer'
- '@swc/helpers'
+ - '@types/react'
- bufferutil
- encoding
- esbuild
- pg-native
+ - react-devtools-core
- supports-color
- tslib
- uglify-js
@@ -7479,6 +7650,10 @@ snapshots:
ansi-colors@4.1.3: {}
+ ansi-escapes@7.3.0:
+ dependencies:
+ environment: 1.1.0
+
ansi-regex@5.0.1: {}
ansi-regex@6.2.2: {}
@@ -7503,6 +7678,8 @@ snapshots:
atomic-sleep@1.0.0: {}
+ auto-bind@5.0.1: {}
+
autoprefixer@10.4.27(postcss@8.5.8):
dependencies:
browserslist: 4.28.1
@@ -7595,6 +7772,8 @@ snapshots:
ansi-styles: 4.3.0
supports-color: 7.2.0
+ chalk@5.6.2: {}
+
change-case@5.4.4: {}
check-error@2.1.1: {}
@@ -7609,6 +7788,17 @@ snapshots:
dependencies:
clsx: 2.1.1
+ cli-boxes@4.0.1: {}
+
+ cli-cursor@4.0.0:
+ dependencies:
+ restore-cursor: 4.0.0
+
+ cli-truncate@6.0.0:
+ dependencies:
+ slice-ansi: 9.0.0
+ string-width: 8.2.0
+
client-only@0.0.1: {}
cliui@8.0.1:
@@ -7619,6 +7809,10 @@ snapshots:
clsx@2.1.1: {}
+ code-excerpt@4.0.0:
+ dependencies:
+ convert-to-spaces: 2.0.1
+
codemirror@6.0.2:
dependencies:
'@codemirror/autocomplete': 6.20.1
@@ -7670,6 +7864,8 @@ snapshots:
convert-source-map@2.0.0: {}
+ convert-to-spaces@2.0.1: {}
+
crelt@1.0.6: {}
cross-spawn@7.0.6:
@@ -7763,6 +7959,8 @@ snapshots:
graceful-fs: 4.2.11
tapable: 2.3.2
+ environment@1.1.0: {}
+
es-define-property@1.0.1: {}
es-errors@1.3.0: {}
@@ -7782,6 +7980,8 @@ snapshots:
has-tostringtag: 1.0.2
hasown: 2.0.2
+ es-toolkit@1.45.1: {}
+
esbuild@0.28.0:
optionalDependencies:
'@esbuild/aix-ppc64': 0.28.0
@@ -7813,6 +8013,8 @@ snapshots:
escalade@3.2.0: {}
+ escape-string-regexp@2.0.0: {}
+
escape-string-regexp@4.0.0: {}
eslint-config-prettier@10.1.8(eslint@9.39.1(jiti@2.6.1)):
@@ -8038,6 +8240,8 @@ snapshots:
get-caller-file@2.0.5: {}
+ get-east-asian-width@1.5.0: {}
+
get-intrinsic@1.3.0:
dependencies:
call-bind-apply-helpers: 1.0.2
@@ -8199,18 +8403,60 @@ snapshots:
imurmurhash@0.1.4: {}
+ indent-string@5.0.0: {}
+
index-to-position@1.2.0: {}
indexof@0.0.1: {}
+ ink@7.0.0(@types/react@19.2.14)(react@19.2.4):
+ dependencies:
+ '@alcalzone/ansi-tokenize': 0.3.0
+ ansi-escapes: 7.3.0
+ ansi-styles: 6.2.3
+ auto-bind: 5.0.1
+ chalk: 5.6.2
+ cli-boxes: 4.0.1
+ cli-cursor: 4.0.0
+ cli-truncate: 6.0.0
+ code-excerpt: 4.0.0
+ es-toolkit: 1.45.1
+ indent-string: 5.0.0
+ is-in-ci: 2.0.0
+ patch-console: 2.0.0
+ react: 19.2.4
+ react-reconciler: 0.33.0(react@19.2.4)
+ scheduler: 0.27.0
+ signal-exit: 3.0.7
+ slice-ansi: 9.0.0
+ stack-utils: 2.0.6
+ string-width: 8.2.0
+ terminal-size: 4.0.1
+ type-fest: 5.5.0
+ widest-line: 6.0.0
+ wrap-ansi: 10.0.0
+ ws: 8.20.0
+ yoga-layout: 3.2.1
+ optionalDependencies:
+ '@types/react': 19.2.14
+ transitivePeerDependencies:
+ - bufferutil
+ - utf-8-validate
+
is-extglob@2.1.1: {}
is-fullwidth-code-point@3.0.0: {}
+ is-fullwidth-code-point@5.1.0:
+ dependencies:
+ get-east-asian-width: 1.5.0
+
is-glob@4.0.3:
dependencies:
is-extglob: 2.1.1
+ is-in-ci@2.0.0: {}
+
is-number@7.0.0: {}
is-stream@2.0.1: {}
@@ -8402,6 +8648,8 @@ snapshots:
dependencies:
mime-db: 1.52.0
+ mimic-fn@2.1.0: {}
+
minimatch@10.1.1:
dependencies:
'@isaacs/brace-expansion': 5.0.0
@@ -8482,6 +8730,10 @@ snapshots:
dependencies:
wrappy: 1.0.2
+ onetime@5.1.2:
+ dependencies:
+ mimic-fn: 2.1.0
+
openapi-fetch@0.13.8:
dependencies:
openapi-typescript-helpers: 0.0.15
@@ -8537,6 +8789,8 @@ snapshots:
parseuri@0.0.6: {}
+ patch-console@2.0.0: {}
+
path-exists@4.0.0: {}
path-expression-matcher@1.1.3: {}
@@ -8743,6 +8997,11 @@ snapshots:
react: 19.2.4
scheduler: 0.27.0
+ react-reconciler@0.33.0(react@19.2.4):
+ dependencies:
+ react: 19.2.4
+ scheduler: 0.27.0
+
react-refresh@0.17.0: {}
react-remove-scroll-bar@2.3.8(@types/react@19.2.14)(react@19.2.4):
@@ -8792,6 +9051,11 @@ snapshots:
resolve-pkg-maps@1.0.0: {}
+ restore-cursor@4.0.0:
+ dependencies:
+ onetime: 5.1.2
+ signal-exit: 3.0.7
+
reusify@1.1.0: {}
rimraf@6.1.0:
@@ -8926,8 +9190,15 @@ snapshots:
siginfo@2.0.0: {}
+ signal-exit@3.0.7: {}
+
signal-exit@4.1.0: {}
+ slice-ansi@9.0.0:
+ dependencies:
+ ansi-styles: 6.2.3
+ is-fullwidth-code-point: 5.1.0
+
socket.io-client@2.5.0:
dependencies:
backo2: 1.0.2
@@ -8981,6 +9252,10 @@ snapshots:
split2@4.2.0: {}
+ stack-utils@2.0.6:
+ dependencies:
+ escape-string-regexp: 2.0.0
+
stackback@0.0.2: {}
std-env@3.9.0: {}
@@ -8997,6 +9272,11 @@ snapshots:
emoji-regex: 9.2.2
strip-ansi: 7.1.2
+ string-width@8.2.0:
+ dependencies:
+ get-east-asian-width: 1.5.0
+ strip-ansi: 7.1.2
+
string_decoder@1.3.0:
dependencies:
safe-buffer: 5.2.1
@@ -9068,12 +9348,16 @@ snapshots:
dependencies:
'@pkgr/core': 0.2.9
+ tagged-tag@1.0.0: {}
+
tailwind-merge@3.5.0: {}
tailwindcss@4.2.2: {}
tapable@2.3.2: {}
+ terminal-size@4.0.1: {}
+
terser-webpack-plugin@5.4.0(@swc/core@1.15.21)(webpack@5.105.4(@swc/core@1.15.21)):
dependencies:
'@jridgewell/trace-mapping': 0.3.31
@@ -9151,6 +9435,10 @@ snapshots:
type-fest@4.41.0: {}
+ type-fest@5.5.0:
+ dependencies:
+ tagged-tag: 1.0.0
+
typescript@5.9.3: {}
undici-types@6.21.0: {}
@@ -9432,8 +9720,18 @@ snapshots:
siginfo: 2.0.0
stackback: 0.0.2
+ widest-line@6.0.0:
+ dependencies:
+ string-width: 8.2.0
+
word-wrap@1.2.5: {}
+ wrap-ansi@10.0.0:
+ dependencies:
+ ansi-styles: 6.2.3
+ string-width: 8.2.0
+ strip-ansi: 7.1.2
+
wrap-ansi@7.0.0:
dependencies:
ansi-styles: 4.3.0
@@ -9452,6 +9750,8 @@ snapshots:
ws@8.18.3: {}
+ ws@8.20.0: {}
+
xmlhttprequest-ssl@1.6.3: {}
xtend@4.0.2: {}
@@ -9481,6 +9781,8 @@ snapshots:
yocto-queue@0.1.0: {}
+ yoga-layout@3.2.1: {}
+
zhead@2.2.4: {}
zod-openapi@5.4.6(zod@4.3.6):