diff --git a/apps/dashboard/src/pages/PipelineDetail.tsx b/apps/dashboard/src/pages/PipelineDetail.tsx
index 1aed546a..281deaa8 100644
--- a/apps/dashboard/src/pages/PipelineDetail.tsx
+++ b/apps/dashboard/src/pages/PipelineDetail.tsx
@@ -13,15 +13,14 @@ interface StreamProgress {
status: string
cumulative_record_count: number
run_record_count: number
- records_per_second?: number
errors?: Array<{ message: string; failure_type?: string }>
}
interface GlobalProgress {
elapsed_ms: number
run_record_count: number
- rows_per_second: number
- window_rows_per_second: number
+ records_per_second: number
+ window_records_per_second: number
state_checkpoint_count: number
}
@@ -201,11 +200,11 @@ export function PipelineDetail({ id, onBack }: PipelineDetailProps) {
diff --git a/apps/engine/package.json b/apps/engine/package.json
index ee97502b..883e5574 100644
--- a/apps/engine/package.json
+++ b/apps/engine/package.json
@@ -56,10 +56,12 @@
"dotenv": "^16.4.7",
"googleapis": "^148.0.0",
"hono": "^4",
+ "ink": "^7.0.0",
"openapi-fetch": "^0.17.0",
"pg": "^8.16.3",
"pino": "^10",
"pino-pretty": "^13",
+ "react": "^19.2.4",
"ws": "^8.18.0",
"zod": "^4.3.6"
},
@@ -67,6 +69,7 @@
"@hyperjump/json-schema": "^1.17.5",
"@types/node": "^24.10.1",
"@types/pg": "^8.15.4",
+ "@types/react": "^19.2.14",
"openapi-typescript": "^7.13.0",
"vitest": "^3.2.4"
},
diff --git a/apps/engine/src/__generated__/openapi.d.ts b/apps/engine/src/__generated__/openapi.d.ts
index 33acd7db..7711ab08 100644
--- a/apps/engine/src/__generated__/openapi.d.ts
+++ b/apps/engine/src/__generated__/openapi.d.ts
@@ -467,7 +467,7 @@ export interface components {
* @enum {string}
*/
type: "trace";
- /** @description Diagnostic/status payload with subtypes for error, stream status, estimates, and progress. */
+ /** @description Diagnostic/status payload with subtypes for error, stream status, estimates, and global progress. */
trace: {
/** @constant */
trace_type: "error";
@@ -488,15 +488,15 @@ export interface components {
} | {
/** @constant */
trace_type: "stream_status";
- /** @description Per-stream status update. Sources emit the minimal form (stream + status). The engine emits enriched versions with record counts and throughput rates. */
+ /** @description Per-stream status update. Sources emit the minimal form (stream + status). The engine enriches with record counts. Only emitted on status transitions. */
stream_status: {
/** @description Stream being reported on. */
stream: string;
/**
- * @description Current phase of the stream within this sync run.
+ * @description Lifecycle status. Errors are orthogonal — a stream can be complete with errors. Sources may store richer error statuses internally for retry logic.
* @enum {string}
*/
- status: "start" | "running" | "complete" | "range_complete";
+ status: "started" | "complete";
/** @description Present when status is range_complete. The sub-range that finished. */
range_complete?: {
/** @description Inclusive lower bound (ISO 8601). */
@@ -508,12 +508,6 @@ export interface components {
cumulative_record_count?: number;
/** @description Records synced for this stream in the current sync run. Set by the engine. */
run_record_count?: number;
- /** @description Records synced since the last stream_status emission for this stream. Set by the engine. Used for instantaneous per-stream throughput. */
- window_record_count?: number;
- /** @description Average records per second for this stream over the entire run: run_record_count / elapsed seconds. Set by the engine. */
- records_per_second?: number;
- /** @description Average API requests per second for this stream over the entire run. Set by the engine from source-reported request counts. */
- requests_per_second?: number;
};
} | {
/** @constant */
@@ -529,19 +523,27 @@ export interface components {
};
} | {
/** @constant */
- trace_type: "progress";
- /** @description Periodic global sync progress emitted by the engine. Aggregate stats only — per-stream detail is in stream_status messages. Each emission is a full replacement. */
- progress: {
+ trace_type: "global_progress";
+ /** @description Global sync progress emitted by the engine, co-emitted with every stream_status trace. Aggregate stats only — per-stream detail is in stream_status messages. Each emission is a full replacement. */
+ global_progress: {
/** @description Wall-clock milliseconds since the sync run started. */
elapsed_ms: number;
/** @description Total records synced across all streams in this run. */
run_record_count: number;
+ /** @description Total records synced across all streams across all runs. */
+ cumulative_record_count?: number;
/** @description Overall throughput for the entire run: run_record_count / elapsed seconds. */
- rows_per_second: number;
+ records_per_second: number;
/** @description Instantaneous throughput: total records in last window / window duration. Measures only the most recent reporting interval. */
- window_rows_per_second: number;
+ window_records_per_second: number;
/** @description Total source_state messages observed so far in this sync run. */
state_checkpoint_count: number;
+ /** @description Total API requests made by the source in this run. */
+ request_count?: number;
+ /** @description Total API requests across all runs. */
+ cumulative_request_count?: number;
+ /** @description Total wall-clock time across all runs. */
+ cumulative_elapsed_ms?: number;
};
};
};
@@ -651,35 +653,39 @@ export interface components {
elapsed_ms?: number;
/** @description Full sync state at the end of the run. source: accumulated from source_state messages; engine: updated cumulative record counts; destination: reserved. Consumers can persist this directly and pass it back on resume. */
state?: components["schemas"]["SyncState"];
- /** @description Final global aggregates. Same shape as trace/progress. */
+ /** @description Final global aggregates. Same shape as trace/global_progress. */
global_progress?: {
/** @description Wall-clock milliseconds since the sync run started. */
elapsed_ms: number;
/** @description Total records synced across all streams in this run. */
run_record_count: number;
+ /** @description Total records synced across all streams across all runs. */
+ cumulative_record_count?: number;
/** @description Overall throughput for the entire run: run_record_count / elapsed seconds. */
- rows_per_second: number;
+ records_per_second: number;
/** @description Instantaneous throughput: total records in last window / window duration. Measures only the most recent reporting interval. */
- window_rows_per_second: number;
+ window_records_per_second: number;
/** @description Total source_state messages observed so far in this sync run. */
state_checkpoint_count: number;
+ /** @description Total API requests made by the source in this run. */
+ request_count?: number;
+ /** @description Total API requests across all runs. */
+ cumulative_request_count?: number;
+ /** @description Total wall-clock time across all runs. */
+ cumulative_elapsed_ms?: number;
};
/** @description Per-stream end-of-sync summary. Errors only appear here, not in stream_status messages. */
stream_progress?: {
[key: string]: {
/**
- * @description Final stream status.
+ * @description Lifecycle status. Errors are orthogonal — a stream can be complete with errors.
* @enum {string}
*/
- status: "start" | "running" | "complete" | "range_complete";
+ status: "started" | "complete";
/** @description Cumulative records synced for this stream across all runs. */
cumulative_record_count: number;
/** @description Records synced in this run. */
run_record_count: number;
- /** @description Average records/sec for this stream over the run. */
- records_per_second?: number;
- /** @description Average requests/sec for this stream over the run. */
- requests_per_second?: number;
/** @description All accumulated errors for this stream during this run. */
errors?: {
/** @description Human-readable error description. */
@@ -779,7 +785,7 @@ export interface components {
Message: components["schemas"]["RecordMessage"] | components["schemas"]["SourceStateMessage"] | components["schemas"]["CatalogMessage"] | components["schemas"]["LogMessage"] | components["schemas"]["TraceMessage"] | components["schemas"]["SpecMessage"] | components["schemas"]["ConnectionStatusMessage"] | components["schemas"]["ControlMessage"] | components["schemas"]["EofMessage"];
DiscoverOutput: components["schemas"]["CatalogMessage"] | components["schemas"]["LogMessage"] | components["schemas"]["TraceMessage"];
DestinationOutput: components["schemas"]["SourceStateMessage"] | components["schemas"]["TraceMessage"] | components["schemas"]["LogMessage"] | components["schemas"]["EofMessage"];
- SyncOutput: components["schemas"]["SourceStateMessage"] | components["schemas"]["TraceMessage"] | components["schemas"]["LogMessage"] | components["schemas"]["EofMessage"] | components["schemas"]["ControlMessage"];
+ SyncOutput: components["schemas"]["SourceStateMessage"] | components["schemas"]["CatalogMessage"] | components["schemas"]["TraceMessage"] | components["schemas"]["LogMessage"] | components["schemas"]["EofMessage"] | components["schemas"]["ControlMessage"];
CheckOutput: components["schemas"]["ConnectionStatusMessage"] | components["schemas"]["LogMessage"] | components["schemas"]["TraceMessage"];
SetupOutput: components["schemas"]["ControlMessage"] | components["schemas"]["LogMessage"] | components["schemas"]["TraceMessage"];
TeardownOutput: components["schemas"]["LogMessage"] | components["schemas"]["TraceMessage"];
diff --git a/apps/engine/src/__generated__/openapi.json b/apps/engine/src/__generated__/openapi.json
index 84e5b14b..05b07b36 100644
--- a/apps/engine/src/__generated__/openapi.json
+++ b/apps/engine/src/__generated__/openapi.json
@@ -1528,12 +1528,10 @@
"status": {
"type": "string",
"enum": [
- "start",
- "running",
- "complete",
- "range_complete"
+ "started",
+ "complete"
],
- "description": "Current phase of the stream within this sync run."
+ "description": "Lifecycle status. Errors are orthogonal — a stream can be complete with errors. Sources may store richer error statuses internally for retry logic."
},
"range_complete": {
"description": "Present when status is range_complete. The sub-range that finished.",
@@ -1564,27 +1562,13 @@
"type": "integer",
"minimum": -9007199254740991,
"maximum": 9007199254740991
- },
- "window_record_count": {
- "description": "Records synced since the last stream_status emission for this stream. Set by the engine. Used for instantaneous per-stream throughput.",
- "type": "integer",
- "minimum": -9007199254740991,
- "maximum": 9007199254740991
- },
- "records_per_second": {
- "description": "Average records per second for this stream over the entire run: run_record_count / elapsed seconds. Set by the engine.",
- "type": "number"
- },
- "requests_per_second": {
- "description": "Average API requests per second for this stream over the entire run. Set by the engine from source-reported request counts.",
- "type": "number"
}
},
"required": [
"stream",
"status"
],
- "description": "Per-stream status update. Sources emit the minimal form (stream + status). The engine emits enriched versions with record counts and throughput rates."
+ "description": "Per-stream status update. Sources emit the minimal form (stream + status). The engine enriches with record counts. Only emitted on status transitions."
}
},
"required": [
@@ -1635,9 +1619,9 @@
"properties": {
"trace_type": {
"type": "string",
- "const": "progress"
+ "const": "global_progress"
},
- "progress": {
+ "global_progress": {
"type": "object",
"properties": {
"elapsed_ms": {
@@ -1652,11 +1636,17 @@
"maximum": 9007199254740991,
"description": "Total records synced across all streams in this run."
},
- "rows_per_second": {
+ "cumulative_record_count": {
+ "description": "Total records synced across all streams across all runs.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
+ },
+ "records_per_second": {
"type": "number",
"description": "Overall throughput for the entire run: run_record_count / elapsed seconds."
},
- "window_rows_per_second": {
+ "window_records_per_second": {
"type": "number",
"description": "Instantaneous throughput: total records in last window / window duration. Measures only the most recent reporting interval."
},
@@ -1665,25 +1655,43 @@
"minimum": -9007199254740991,
"maximum": 9007199254740991,
"description": "Total source_state messages observed so far in this sync run."
+ },
+ "request_count": {
+ "description": "Total API requests made by the source in this run.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
+ },
+ "cumulative_request_count": {
+ "description": "Total API requests across all runs.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
+ },
+ "cumulative_elapsed_ms": {
+ "description": "Total wall-clock time across all runs.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
}
},
"required": [
"elapsed_ms",
"run_record_count",
- "rows_per_second",
- "window_rows_per_second",
+ "records_per_second",
+ "window_records_per_second",
"state_checkpoint_count"
],
- "description": "Periodic global sync progress emitted by the engine. Aggregate stats only — per-stream detail is in stream_status messages. Each emission is a full replacement."
+ "description": "Global sync progress emitted by the engine, co-emitted with every stream_status trace. Aggregate stats only — per-stream detail is in stream_status messages. Each emission is a full replacement."
}
},
"required": [
"trace_type",
- "progress"
+ "global_progress"
]
}
],
- "description": "Diagnostic/status payload with subtypes for error, stream status, estimates, and progress.",
+ "description": "Diagnostic/status payload with subtypes for error, stream status, estimates, and global progress.",
"type": "object",
"discriminator": {
"propertyName": "trace_type"
@@ -1914,7 +1922,7 @@
"$ref": "#/components/schemas/SyncState"
},
"global_progress": {
- "description": "Final global aggregates. Same shape as trace/progress.",
+ "description": "Final global aggregates. Same shape as trace/global_progress.",
"type": "object",
"properties": {
"elapsed_ms": {
@@ -1929,11 +1937,17 @@
"maximum": 9007199254740991,
"description": "Total records synced across all streams in this run."
},
- "rows_per_second": {
+ "cumulative_record_count": {
+ "description": "Total records synced across all streams across all runs.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
+ },
+ "records_per_second": {
"type": "number",
"description": "Overall throughput for the entire run: run_record_count / elapsed seconds."
},
- "window_rows_per_second": {
+ "window_records_per_second": {
"type": "number",
"description": "Instantaneous throughput: total records in last window / window duration. Measures only the most recent reporting interval."
},
@@ -1942,13 +1956,31 @@
"minimum": -9007199254740991,
"maximum": 9007199254740991,
"description": "Total source_state messages observed so far in this sync run."
+ },
+ "request_count": {
+ "description": "Total API requests made by the source in this run.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
+ },
+ "cumulative_request_count": {
+ "description": "Total API requests across all runs.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
+ },
+ "cumulative_elapsed_ms": {
+ "description": "Total wall-clock time across all runs.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
}
},
"required": [
"elapsed_ms",
"run_record_count",
- "rows_per_second",
- "window_rows_per_second",
+ "records_per_second",
+ "window_records_per_second",
"state_checkpoint_count"
]
},
@@ -1964,12 +1996,10 @@
"status": {
"type": "string",
"enum": [
- "start",
- "running",
- "complete",
- "range_complete"
+ "started",
+ "complete"
],
- "description": "Final stream status."
+ "description": "Lifecycle status. Errors are orthogonal — a stream can be complete with errors."
},
"cumulative_record_count": {
"type": "integer",
@@ -1983,14 +2013,6 @@
"maximum": 9007199254740991,
"description": "Records synced in this run."
},
- "records_per_second": {
- "description": "Average records/sec for this stream over the run.",
- "type": "number"
- },
- "requests_per_second": {
- "description": "Average requests/sec for this stream over the run.",
- "type": "number"
- },
"errors": {
"description": "All accumulated errors for this stream during this run.",
"type": "array",
@@ -2392,6 +2414,9 @@
{
"$ref": "#/components/schemas/SourceStateMessage"
},
+ {
+ "$ref": "#/components/schemas/CatalogMessage"
+ },
{
"$ref": "#/components/schemas/TraceMessage"
},
@@ -2410,6 +2435,7 @@
"propertyName": "type",
"mapping": {
"source_state": "#/components/schemas/SourceStateMessage",
+ "catalog": "#/components/schemas/CatalogMessage",
"trace": "#/components/schemas/TraceMessage",
"log": "#/components/schemas/LogMessage",
"eof": "#/components/schemas/EofMessage",
diff --git a/apps/engine/src/api/app.ts b/apps/engine/src/api/app.ts
index f817284e..cb58a991 100644
--- a/apps/engine/src/api/app.ts
+++ b/apps/engine/src/api/app.ts
@@ -10,6 +10,7 @@ import { HTTPException } from 'hono/http-exception'
import pg from 'pg'
import type { Message, ConnectorResolver, TraceMessage } from '../lib/index.js'
import type { EofPayload } from '@stripe/sync-protocol'
+import { renderSyncProgress } from '../lib/sync-progress-state.js'
import {
createEngine,
createConnectorSchemas,
@@ -123,94 +124,8 @@ async function* logApiStream(
const dangerouslyVerbose = process.env.DANGEROUSLY_VERBOSE_LOGGING === 'true'
-const REASON_EMOJI: Record = {
- complete: '✅',
- time_limit: '⏱️',
- state_limit: '📦',
- error: '❌',
- aborted: '🛑',
-}
-
-const STATUS_EMOJI: Record = {
- complete: '✅',
- start: '🔄',
- running: '🔄',
- range_complete: '🔄',
-}
-
function formatEof(eof: EofPayload): string {
- const emoji = REASON_EMOJI[eof.reason] ?? '❓'
- const elapsed = eof.global_progress?.elapsed_ms
- ? `${(eof.global_progress.elapsed_ms / 1000).toFixed(1)}s`
- : ''
- const totalRows = eof.global_progress?.run_record_count ?? 0
- const rps = eof.global_progress?.rows_per_second?.toFixed(1) ?? '0'
- const checkpoints = eof.global_progress?.state_checkpoint_count ?? 0
-
- const lines: string[] = []
- lines.push(
- `${emoji} Sync ${eof.reason}${elapsed ? ` (${elapsed}` : ''}${totalRows ? ` | ${totalRows} rows, ${rps} rows/s` : ''}${checkpoints ? `, ${checkpoints} checkpoints` : ''}${elapsed ? ')' : ''}`
- )
-
- const sp = eof.stream_progress
- if (sp) {
- let complete = 0
- let inProgress = 0
- let errored = 0
- let pending = 0
- const errorStreams: string[] = []
- const activeStreams: { name: string; rows: number; rps: string }[] = []
-
- for (const [name, s] of Object.entries(sp)) {
- if (s.errors?.length) {
- errored++
- const errMsg = s.errors[0]?.message ?? 'unknown error'
- errorStreams.push(`❌ ${name}: ${errMsg}`)
- }
- if (s.status === 'complete') {
- complete++
- if (s.run_record_count > 0) {
- activeStreams.push({
- name,
- rows: s.run_record_count,
- rps: s.records_per_second?.toFixed(1) ?? '0',
- })
- }
- } else if (s.status === 'start' || s.status === 'running') {
- inProgress++
- if (s.run_record_count > 0) {
- activeStreams.push({
- name,
- rows: s.run_record_count,
- rps: s.records_per_second?.toFixed(1) ?? '0',
- })
- }
- } else {
- pending++
- }
- }
-
- // Show streams that synced rows this run
- for (const s of activeStreams.sort((a, b) => b.rows - a.rows)) {
- lines.push(` ✅ ${s.name}: ${s.rows} rows @ ${s.rps} rows/s`)
- }
-
- // Show errored streams
- for (const e of errorStreams) {
- lines.push(` ${e}`)
- }
-
- // Summary line
- const parts: string[] = []
- if (complete) parts.push(`${complete} complete`)
- if (inProgress) parts.push(`${inProgress} in progress`)
- if (errored) parts.push(`${errored} errored`)
- if (pending) parts.push(`${pending} pending`)
- parts.push(`${totalRows} total rows this run`)
- lines.push(` 📊 ${parts.join(', ')}`)
- }
-
- return lines.join('\n')
+ return renderSyncProgress(eof, [], true).join('\n')
}
/**
diff --git a/apps/engine/src/cli/sync-ui.tsx b/apps/engine/src/cli/sync-ui.tsx
new file mode 100644
index 00000000..d10072b9
--- /dev/null
+++ b/apps/engine/src/cli/sync-ui.tsx
@@ -0,0 +1,306 @@
+import React from 'react'
+import { Box, Text } from 'ink'
+import type { EofPayload, EofStreamProgress } from '@stripe/sync-protocol'
+
+// ── Formatting helpers ────────────────────────────────────────────
+
+function fmt(n: number): string {
+ return n.toLocaleString('en-US')
+}
+
+function fmtDuration(ms: number): string {
+ if (ms < 1000) return `${ms}ms`
+ if (ms < 60_000) return `${(ms / 1000).toFixed(1)}s`
+ const mins = Math.floor(ms / 60_000)
+ const secs = Math.round((ms % 60_000) / 1000)
+ if (mins < 60) return secs > 0 ? `${mins}m ${secs}s` : `${mins}m`
+ const hrs = Math.floor(mins / 60)
+ const rm = mins % 60
+ return rm > 0 ? `${hrs}h ${rm}m` : `${hrs}h`
+}
+
+function fmtRate(rps: number): string {
+ return rps >= 1000 ? `${(rps / 1000).toFixed(1)}k/s` : `${rps.toFixed(1)}/s`
+}
+
+// ── Constants ─────────────────────────────────────────────────────
+
+const REASON_COLOR: Record = {
+ complete: 'green',
+ time_limit: 'yellow',
+ state_limit: 'blue',
+ error: 'red',
+ aborted: 'red',
+}
+
+const REASON_LABEL: Record = {
+ complete: 'Sync complete',
+ time_limit: 'Time limit reached',
+ state_limit: 'State limit reached',
+ error: 'Sync failed',
+ aborted: 'Sync aborted',
+}
+
+const ERROR_COLOR: Record = {
+ transient_error: 'yellow',
+ system_error: 'red',
+ config_error: 'magenta',
+ auth_error: 'red',
+}
+
+const ERROR_LABEL: Record = {
+ transient_error: 'transient',
+ system_error: 'system',
+ config_error: 'config',
+ auth_error: 'auth',
+}
+
+// ── Sub-components ────────────────────────────────────────────────
+
+function Divider({ width = 60 }: { width?: number }) {
+ return {'─'.repeat(width)}
+}
+
+function StatRow({
+ label,
+ value,
+ dimLabel = false,
+}: {
+ label: string
+ value: string
+ dimLabel?: boolean
+}) {
+ return (
+
+ {label}
+ {value}
+
+ )
+}
+
+function StreamRow({
+ name,
+ info,
+ nameWidth,
+ running,
+}: {
+ name: string
+ info: EofStreamProgress
+ nameWidth: number
+ running: boolean
+}) {
+ const cum = info.cumulative_record_count
+ const run = info.run_record_count
+ const isComplete = info.status === 'complete'
+ const hasErrors = (info.errors?.length ?? 0) > 0
+
+ return (
+
+
+ {/* Status dot */}
+
+ {isComplete ? (
+ {'✓ '}
+ ) : running ? (
+ {'▶ '}
+ ) : (
+ {'· '}
+ )}
+
+ {/* Stream name */}
+
+
+ {name.padEnd(nameWidth)}
+
+
+ {/* Cumulative count */}
+
+ {cum > 0 ? fmt(cum) : '—'}
+
+ {/* Run delta */}
+
+ {run > 0 ? +{fmt(run)} : null}
+
+
+
+ {/* Per-stream errors */}
+ {(info.errors ?? []).map((err, i) => (
+
+
+ [{ERROR_LABEL[err.failure_type ?? 'system_error'] ?? 'error'}] {err.message}
+
+
+ ))}
+
+ )
+}
+
+// ── Main component ────────────────────────────────────────────────
+
+export interface SyncProgressProps {
+ eof: EofPayload
+ catalog: string[]
+ final: boolean
+ /** Number of pipeline_sync calls made so far (backfill mode) */
+ attempt?: number
+}
+
+export function SyncProgressUI({ eof, catalog, final, attempt }: SyncProgressProps) {
+ const gp = eof.global_progress
+ const sp = eof.stream_progress ?? {}
+
+ // Partition streams
+ const complete: [string, EofStreamProgress][] = []
+ const started: [string, EofStreamProgress][] = []
+ const pending: string[] = []
+
+ const known = new Set(Object.keys(sp))
+ for (const [name, info] of Object.entries(sp)) {
+ if (info.status === 'complete') complete.push([name, info])
+ else started.push([name, info])
+ }
+ for (const name of catalog) {
+ if (!known.has(name)) pending.push(name)
+ }
+
+ complete.sort((a, b) => b[1].cumulative_record_count - a[1].cumulative_record_count)
+ started.sort((a, b) => b[1].cumulative_record_count - a[1].cumulative_record_count)
+
+ const allStreamNames = [...complete.map((c) => c[0]), ...started.map((s) => s[0]), ...pending]
+ const nameWidth = Math.max(...allStreamNames.map((n) => n.length), 12)
+
+ const errCount = Object.values(sp).filter((i) => (i.errors?.length ?? 0) > 0).length
+ const reasonColor = REASON_COLOR[eof.reason] ?? 'white'
+ const reasonLabel = REASON_LABEL[eof.reason] ?? eof.reason
+
+ return (
+
+ {/* ── Header ── */}
+
+ {final ? (
+
+
+ {reasonLabel}
+
+ {attempt != null && attempt > 1 && ({attempt} attempts)}
+
+ ) : (
+
+
+ Syncing
+
+ {attempt != null && attempt > 1 && · attempt {attempt}}
+ {gp && (
+
+ {' '}
+ · {fmtDuration(gp.elapsed_ms)}
+ {gp.window_records_per_second != null && gp.window_records_per_second > 0
+ ? ` · ${fmtRate(gp.window_records_per_second)}`
+ : ''}
+
+ )}
+
+ )}
+
+
+ {/* ── Global stats ── */}
+ {gp && (
+
+
+
+ 0 && (gp.cumulative_record_count ?? 0) > gp.run_record_count
+ ? ` (+${fmt(gp.run_record_count)} this run)`
+ : ''
+ }`}
+ />
+
+
+ {final && gp.records_per_second > 0 && (
+
+ )}
+
+
+
+ )}
+
+ {/* ── Stream table header ── */}
+ {allStreamNames.length > 0 && (
+
+
+
+
+ {'stream'.padEnd(nameWidth)}
+
+
+
+
+ total
+
+
+
+
+ this run
+
+
+
+ )}
+
+ {/* ── Complete streams ── */}
+ {complete.length > 0 && (
+ 0 || pending.length > 0 ? 1 : 0}>
+ {complete.map(([name, info]) => (
+
+ ))}
+
+ )}
+
+ {/* ── In-progress streams ── */}
+ {started.length > 0 && (
+ 0 ? 1 : 0}>
+ {started.map(([name, info]) => (
+
+ ))}
+
+ )}
+
+ {/* ── Pending streams (collapsed) ── */}
+ {pending.length > 0 && (
+
+
+ {'· '}
+
+
+ {pending.length} pending: {pending.slice(0, 5).join(', ')}
+ {pending.length > 5 ? ` +${pending.length - 5} more` : ''}
+
+
+ )}
+
+ {/* ── Footer summary ── */}
+ {complete.length + started.length + pending.length > 0 && (
+
+
+
+ )}
+
+ {complete.length > 0 && {complete.length} complete}
+ {started.length > 0 && {started.length} in progress}
+ {pending.length > 0 && {pending.length} pending}
+ {errCount > 0 && {errCount} with errors}
+ {gp && gp.run_record_count > 0 && (
+ +{fmt(gp.run_record_count)} records this run
+ )}
+ {eof.cutoff && cutoff: {eof.cutoff}}
+
+
+ )
+}
diff --git a/apps/engine/src/cli/sync.ts b/apps/engine/src/cli/sync.ts
index 5dbd9674..f31256c9 100644
--- a/apps/engine/src/cli/sync.ts
+++ b/apps/engine/src/cli/sync.ts
@@ -1,8 +1,12 @@
+import React from 'react'
+import { render } from 'ink'
import { defineCommand } from 'citty'
import type { Engine } from '../lib/engine.js'
import type { ConnectorResolver } from '../lib/index.js'
import { readonlyStateStore, type StateStore } from '../lib/state-store.js'
import { type PipelineConfig, type SyncState, emptySyncState } from '@stripe/sync-protocol'
+import { createSyncDisplayState } from '../lib/sync-progress-state.js'
+import { SyncProgressUI } from './sync-ui.js'
export function createSyncCmd(engine: Engine, _resolver: ConnectorResolver) {
return defineCommand({
@@ -46,6 +50,16 @@ export function createSyncCmd(engine: Engine, _resolver: ConnectorResolver) {
type: 'string',
description: 'Stop after N seconds',
},
+ baseUrl: {
+ type: 'string',
+ description:
+ 'Stripe API base URL (or STRIPE_API_BASE env, default: https://api.stripe.com)',
+ },
+ progress: {
+ type: 'boolean',
+ default: false,
+ description: 'Force progress display (auto-enabled when stderr is a TTY)',
+ },
live: {
type: 'boolean',
default: false,
@@ -84,6 +98,10 @@ export function createSyncCmd(engine: Engine, _resolver: ConnectorResolver) {
// Inject optional source config overrides
const stripeConfig = pipeline.source.stripe as Record
+ const baseUrl = args.baseUrl || process.env.STRIPE_API_BASE
+ if (baseUrl) {
+ stripeConfig.base_url = baseUrl
+ }
if (backfillLimit) {
stripeConfig.backfill_limit = backfillLimit
}
@@ -101,7 +119,21 @@ export function createSyncCmd(engine: Engine, _resolver: ConnectorResolver) {
: undefined
const output = engine.pipeline_sync(pipeline, { state: syncState, time_limit: timeLimit })
- // Persist state checkpoints and stream NDJSON to stdout
+ const showProgress = args.progress || process.stderr.isTTY
+ const display = showProgress ? createSyncDisplayState() : null
+
+ // Mount Ink UI on stderr if showing progress
+ const inkInstance = display
+ ? render(
+ React.createElement(SyncProgressUI, {
+ eof: display.state.eof,
+ catalog: display.state.catalog,
+ final: false,
+ }),
+ { stdout: process.stderr }
+ )
+ : null
+
for await (const msg of output) {
if (msg.type === 'source_state') {
if (msg.source_state.state_type === 'global') {
@@ -110,9 +142,26 @@ export function createSyncCmd(engine: Engine, _resolver: ConnectorResolver) {
await store.set(msg.source_state.stream, msg.source_state.data)
}
}
- process.stdout.write(JSON.stringify(msg) + '\n')
+
+ if (display && inkInstance) {
+ const changed = display.update(msg)
+ if (changed) {
+ const final = msg.type === 'eof'
+ inkInstance.rerender(
+ React.createElement(SyncProgressUI, {
+ eof: display.state.eof,
+ catalog: display.state.catalog,
+ final,
+ })
+ )
+ }
+ } else if (!display) {
+ process.stdout.write(JSON.stringify(msg) + '\n')
+ }
}
+ inkInstance?.unmount()
+
if ('close' in store && typeof store.close === 'function') {
await store.close()
}
diff --git a/apps/engine/src/lib/engine.test.ts b/apps/engine/src/lib/engine.test.ts
index 6773ca77..48f90ea1 100644
--- a/apps/engine/src/lib/engine.test.ts
+++ b/apps/engine/src/lib/engine.test.ts
@@ -252,7 +252,7 @@ describe('protocol schemas', () => {
trace_type: 'stream_status',
stream_status: {
stream: 'customers',
- status: 'running',
+ status: 'started',
},
},
})
@@ -1072,7 +1072,7 @@ describe('engine.pipeline_sync() pipeline', () => {
trace_type: 'stream_status' as const,
stream_status: {
stream: 'customers',
- status: 'running' as const,
+ status: 'started' as const,
},
},
}
@@ -1296,13 +1296,24 @@ describe('engine cancellation integration', () => {
const engine = await createEngine(makeResolver(source, destination))
const iter = engine.pipeline_sync(defaultPipeline)[Symbol.asyncIterator]()
- expect(await iter.next()).toMatchObject({
- value: {
- type: 'source_state',
- source_state: { stream: 'customers', data: { cursor: 'cus_1' } },
- },
- done: false,
- })
+ // Consume messages until we find a source_state (engine now emits catalog + trace messages first)
+ let found = false
+ for (let i = 0; i < 20; i++) {
+ const result = await iter.next()
+ if (result.done) break
+ if (result.value?.type === 'source_state') {
+ expect(result).toMatchObject({
+ value: {
+ type: 'source_state',
+ source_state: { stream: 'customers', data: { cursor: 'cus_1' } },
+ },
+ done: false,
+ })
+ found = true
+ break
+ }
+ }
+ expect(found).toBe(true)
const blockedNext = iter.next()
void blockedNext.catch(() => undefined)
diff --git a/apps/engine/src/lib/engine.ts b/apps/engine/src/lib/engine.ts
index 4089684c..c5765e6a 100644
--- a/apps/engine/src/lib/engine.ts
+++ b/apps/engine/src/lib/engine.ts
@@ -15,6 +15,7 @@ import {
SectionState,
RecordMessage,
SourceStateMessage,
+ EofMessage,
coerceSyncState,
collectFirst,
split,
@@ -605,6 +606,7 @@ export async function createEngine(resolver: ConnectorResolver): Promise
yield* trackProgress({
initial_state: normalizedState,
+ catalog: filteredCatalog,
recordCounter,
})(limited)
})()
diff --git a/apps/engine/src/lib/pipeline.test.ts b/apps/engine/src/lib/pipeline.test.ts
index 752827e1..d5a43887 100644
--- a/apps/engine/src/lib/pipeline.test.ts
+++ b/apps/engine/src/lib/pipeline.test.ts
@@ -304,13 +304,13 @@ describe('log()', () => {
type: 'trace',
trace: {
trace_type: 'stream_status',
- stream_status: { stream: 'orders', status: 'running' },
+ stream_status: { stream: 'orders', status: 'started' },
},
},
]
await drain(log(toAsync(msgs)))
expect(logger.info).toHaveBeenCalledWith(
- { stream: 'orders', status: 'running' },
+ { stream: 'orders', status: 'started' },
'stream_status'
)
})
diff --git a/apps/engine/src/lib/progress.test.ts b/apps/engine/src/lib/progress.test.ts
index 9e9add2d..f4e12078 100644
--- a/apps/engine/src/lib/progress.test.ts
+++ b/apps/engine/src/lib/progress.test.ts
@@ -72,8 +72,14 @@ describe('trackProgress', () => {
const outputs = await collect(
trackProgress({
- interval_ms: 0,
- initial_cumulative_counts: { customers: 5 },
+ initial_state: {
+ source: { streams: {}, global: {} },
+ destination: { streams: {}, global: {} },
+ engine: {
+ streams: { customers: { cumulative_record_count: 5 } },
+ global: {},
+ },
+ },
recordCounter: counter,
})(
toAsync([
@@ -100,10 +106,10 @@ describe('trackProgress', () => {
)
)
- const progressTraces = outputs.filter(
- (m) => m.type === 'trace' && m.trace.trace_type === 'progress'
+ const globalProgressTraces = outputs.filter(
+ (m) => m.type === 'trace' && m.trace.trace_type === 'global_progress'
)
- expect(progressTraces.length).toBeGreaterThan(0)
+ expect(globalProgressTraces.length).toBeGreaterThan(0)
const eof = outputs.find((m) => m.type === 'eof')
expect(eof).toBeDefined()
@@ -118,7 +124,7 @@ describe('trackProgress', () => {
},
destination: { streams: {}, global: {} },
engine: {
- streams: { customers: { cumulative_record_count: 7 } },
+ streams: { customers: { cumulative_record_count: 7, status: 'complete' } },
global: {},
},
},
@@ -138,6 +144,137 @@ describe('trackProgress', () => {
})
})
+ it('emits stream_status only on transitions, not periodically', async () => {
+ const counter = createRecordCounter()
+ const outputs = await collect(
+ trackProgress({
+ recordCounter: counter,
+ })(
+ toAsync([
+ {
+ type: 'source_state',
+ source_state: { state_type: 'stream', stream: 'customers', data: { cursor: '1' } },
+ },
+ // Second source_state for same stream should NOT emit another stream_status
+ {
+ type: 'source_state',
+ source_state: { state_type: 'stream', stream: 'customers', data: { cursor: '2' } },
+ },
+ {
+ type: 'trace',
+ trace: {
+ trace_type: 'stream_status',
+ stream_status: { stream: 'customers', status: 'complete' },
+ },
+ },
+ { type: 'eof', eof: { reason: 'complete' } },
+ ])
+ )
+ )
+
+ const streamStatusTraces = outputs.filter(
+ (m) =>
+ m.type === 'trace' && m.trace.trace_type === 'stream_status' && m._emitted_by === 'engine'
+ )
+ // First source_state → started transition + complete transition + final on EOF = 3
+ // The second source_state should NOT trigger another (already started)
+ const statusValues = streamStatusTraces.map((m) => (m as any).trace.stream_status.status)
+ // started (from first source_state), complete (from stream_status trace), complete (final on EOF)
+ expect(statusValues).toEqual(['started', 'complete', 'complete'])
+ })
+
+ it('co-emits global_progress with every stream_status', async () => {
+ const counter = createRecordCounter()
+ const outputs = await collect(
+ trackProgress({
+ recordCounter: counter,
+ })(
+ toAsync([
+ {
+ type: 'source_state',
+ source_state: { state_type: 'stream', stream: 'customers', data: { cursor: '1' } },
+ },
+ { type: 'eof', eof: { reason: 'complete' } },
+ ])
+ )
+ )
+
+ // Every stream_status from engine should be followed by a global_progress
+ const engineTraces = outputs.filter((m) => m.type === 'trace' && m._emitted_by === 'engine')
+ for (let i = 0; i < engineTraces.length - 1; i++) {
+ const current = engineTraces[i] as any
+ const next = engineTraces[i + 1] as any
+ if (current.trace.trace_type === 'stream_status') {
+ expect(next.trace.trace_type).toBe('global_progress')
+ }
+ }
+ })
+
+ it('emits catalog as first message when provided', async () => {
+ const counter = createRecordCounter()
+ const outputs = await collect(
+ trackProgress({
+ recordCounter: counter,
+ catalog: {
+ streams: [
+ {
+ stream: { name: 'customers', primary_key: [['id']] },
+ sync_mode: 'incremental',
+ destination_sync_mode: 'append',
+ },
+ ],
+ },
+ })(toAsync([{ type: 'eof', eof: { reason: 'complete' } }]))
+ )
+
+ expect(outputs[0]).toMatchObject({
+ type: 'catalog',
+ catalog: { streams: [{ name: 'customers', primary_key: [['id']] }] },
+ })
+ })
+
+ it('errors are orthogonal to lifecycle status', async () => {
+ const counter = createRecordCounter()
+ const outputs = await collect(
+ trackProgress({
+ recordCounter: counter,
+ })(
+ toAsync([
+ {
+ type: 'source_state',
+ source_state: { state_type: 'stream', stream: 'customers', data: { cursor: '1' } },
+ },
+ {
+ type: 'trace',
+ trace: {
+ trace_type: 'error',
+ error: {
+ message: 'rate limited',
+ failure_type: 'transient_error',
+ stream: 'customers',
+ },
+ },
+ },
+ {
+ type: 'trace',
+ trace: {
+ trace_type: 'stream_status',
+ stream_status: { stream: 'customers', status: 'complete' },
+ },
+ },
+ { type: 'eof', eof: { reason: 'complete' } },
+ ])
+ )
+ )
+
+ const eof = outputs.find((m) => m.type === 'eof') as any
+ // Stream is complete AND has errors — they're orthogonal
+ expect(eof.eof.stream_progress.customers.status).toBe('complete')
+ expect(eof.eof.stream_progress.customers.errors).toEqual([
+ { message: 'rate limited', failure_type: 'transient_error' },
+ ])
+ })
+
it('aggregates multiple stream states and global state into EOF', async () => {
const counter = createRecordCounter()
await collect(
@@ -165,7 +302,6 @@ describe('trackProgress', () => {
const outputs = await collect(
trackProgress({
- interval_ms: 0,
recordCounter: counter,
})(
toAsync([
@@ -210,8 +346,8 @@ describe('trackProgress', () => {
destination: { streams: {}, global: {} },
engine: {
streams: {
- customers: { cumulative_record_count: 1 },
- invoices: { cumulative_record_count: 1 },
+ customers: { cumulative_record_count: 1, status: 'started' },
+ invoices: { cumulative_record_count: 1, status: 'started' },
},
global: {},
},
@@ -239,7 +375,6 @@ describe('trackProgress', () => {
const outputs = await collect(
trackProgress({
- interval_ms: 0,
initial_state: {
source: {
streams: {
@@ -294,10 +429,9 @@ describe('trackProgress', () => {
},
engine: {
streams: {
- customers: { cumulative_record_count: 6, note: 'keep-me' },
+ customers: { cumulative_record_count: 6, note: 'keep-me', status: 'started' },
invoices: { cumulative_record_count: 2, untouched: true },
},
- global: { sync_id: 'prev' },
},
},
},
@@ -322,37 +456,50 @@ describe('trackProgress', () => {
const outputs = await collect(
trackProgress({
- interval_ms: 0,
initial_state: initialState,
recordCounter: createRecordCounter(),
})(toAsync([{ type: 'eof', eof: { reason: 'complete' } }]))
)
const eof = outputs.find((m) => m.type === 'eof')
+ // Engine global is enriched with cumulative totals, so partial match
expect(eof).toMatchObject({
type: 'eof',
- eof: { state: initialState },
+ eof: {
+ state: {
+ source: initialState.source,
+ destination: initialState.destination,
+ engine: {
+ streams: initialState.engine.streams,
+ },
+ },
+ },
})
})
- it('omits state from EOF when no source_state messages were emitted', async () => {
+ it('includes engine global cumulative stats even when no source_state messages were emitted', async () => {
const counter = createRecordCounter()
const outputs = await collect(
trackProgress({
- interval_ms: 0,
recordCounter: counter,
})(toAsync([{ type: 'eof', eof: { reason: 'complete' } }]))
)
- const eof = outputs.find((m) => m.type === 'eof')
+ const eof = outputs.find((m) => m.type === 'eof') as any
expect(eof).toBeDefined()
- expect((eof as any).eof.state).toBeUndefined()
+ // Engine global always has cumulative stats (zeroed out for fresh runs)
+ expect(eof.eof.state.engine.global).toMatchObject({
+ cumulative_record_count: 0,
+ cumulative_request_count: 0,
+ })
+ // No source or destination state since no messages were emitted
+ expect(Object.keys(eof.eof.state.source.streams)).toHaveLength(0)
})
it('accumulates range_complete into completed_ranges in engine state', async () => {
const outputs = await collect(
trackProgress({
- interval_ms: 999_999,
+
recordCounter: createRecordCounter(),
})(
toAsync([
@@ -360,7 +507,7 @@ describe('trackProgress', () => {
type: 'trace',
trace: {
trace_type: 'stream_status',
- stream_status: { stream: 'customers', status: 'start' },
+ stream_status: { stream: 'customers', status: 'started' },
},
},
{
@@ -369,7 +516,7 @@ describe('trackProgress', () => {
trace_type: 'stream_status',
stream_status: {
stream: 'customers',
- status: 'range_complete',
+ status: 'started',
range_complete: { gte: '2024-01-01T00:00:00Z', lt: '2024-06-01T00:00:00Z' },
},
},
@@ -380,7 +527,7 @@ describe('trackProgress', () => {
trace_type: 'stream_status',
stream_status: {
stream: 'customers',
- status: 'range_complete',
+ status: 'started',
range_complete: { gte: '2024-06-01T00:00:00Z', lt: '2025-01-01T00:00:00Z' },
},
},
@@ -410,7 +557,6 @@ describe('trackProgress', () => {
it('range_complete does not overwrite stream status', async () => {
const outputs = await collect(
trackProgress({
- interval_ms: 999_999,
recordCounter: createRecordCounter(),
})(
toAsync([
@@ -418,7 +564,7 @@ describe('trackProgress', () => {
type: 'trace',
trace: {
trace_type: 'stream_status',
- stream_status: { stream: 'customers', status: 'running' },
+ stream_status: { stream: 'customers', status: 'started' },
},
},
{
@@ -427,7 +573,7 @@ describe('trackProgress', () => {
trace_type: 'stream_status',
stream_status: {
stream: 'customers',
- status: 'range_complete',
+ status: 'started',
range_complete: { gte: '2024-01-01T00:00:00Z', lt: '2024-06-01T00:00:00Z' },
},
},
@@ -442,7 +588,7 @@ describe('trackProgress', () => {
type: 'eof',
eof: {
stream_progress: {
- customers: { status: 'running' },
+ customers: { status: 'complete' },
},
},
})
@@ -451,7 +597,7 @@ describe('trackProgress', () => {
it('seeds completed_ranges from initial engine state', async () => {
const outputs = await collect(
trackProgress({
- interval_ms: 999_999,
+
initial_state: {
source: { streams: {}, global: {} },
destination: { streams: {}, global: {} },
@@ -473,7 +619,7 @@ describe('trackProgress', () => {
trace_type: 'stream_status',
stream_status: {
stream: 'customers',
- status: 'range_complete',
+ status: 'started',
range_complete: { gte: '2024-06-01T00:00:00Z', lt: '2025-01-01T00:00:00Z' },
},
},
diff --git a/apps/engine/src/lib/progress.ts b/apps/engine/src/lib/progress.ts
index 32d2e82a..f25fe028 100644
--- a/apps/engine/src/lib/progress.ts
+++ b/apps/engine/src/lib/progress.ts
@@ -4,9 +4,10 @@ import type {
SyncOutput,
TraceStreamStatus,
TraceError,
- TraceProgress,
+ TraceGlobalProgress,
EofPayload,
EofStreamProgress,
+ ConfiguredCatalog,
} from '@stripe/sync-protocol'
import { emptySyncState } from '@stripe/sync-protocol'
@@ -59,15 +60,14 @@ export function createRecordCounter() {
}
export function trackProgress(opts: {
- interval_ms?: number
initial_state?: SyncState
- initial_cumulative_counts?: Record
+ /** Configured catalog — emitted as the first message so the UI knows all streams upfront. */
+ catalog?: ConfiguredCatalog
/** Shared counter fed by createRecordCounter().tap() on the data path. */
recordCounter?: ReturnType
}): (msgs: AsyncIterable) => AsyncIterable {
- const intervalMs = opts.interval_ms ?? 2000
-
return async function* (messages) {
+ // Initialize cumulative counts from engine state
const initialCumulativeCounts = opts.initial_state?.engine?.streams
? Object.fromEntries(
Object.entries(opts.initial_state.engine.streams)
@@ -77,18 +77,27 @@ export function trackProgress(opts: {
])
.filter(([, v]) => typeof v === 'number' && v >= 0)
)
- : (opts.initial_cumulative_counts ?? {})
+ : {}
const cumulativeRecordCount = new Map(Object.entries(initialCumulativeCounts))
- const prevSnapshotCounts = new Map()
+
+ // Initialize cumulative global stats from engine state
+ const engineGlobal = (opts.initial_state?.engine?.global ?? {}) as Record
+ let cumulativeGlobalRecordCount = (engineGlobal.cumulative_record_count as number) ?? 0
+ let cumulativeRequestCount = (engineGlobal.cumulative_request_count as number) ?? 0
+ let cumulativeElapsedMs = (engineGlobal.cumulative_elapsed_ms as number) ?? 0
+
let stateCheckpointCount = 0
const streamStatus = new Map()
const completedRanges = new Map()
+ const lastEmittedStatus = new Map()
// Restore stream statuses and completed_ranges from engine state
if (opts.initial_state?.engine?.streams) {
for (const [stream, data] of Object.entries(opts.initial_state.engine.streams)) {
const d = data as { status?: Status; completed_ranges?: Range[] }
- if (d?.status) streamStatus.set(stream, d.status)
+ if (d?.status === 'started' || d?.status === 'complete') {
+ streamStatus.set(stream, d.status)
+ }
if (d?.completed_ranges && Array.isArray(d.completed_ranges)) {
completedRanges.set(stream, d.completed_ranges.slice())
}
@@ -96,17 +105,34 @@ export function trackProgress(opts: {
}
if (opts.initial_state?.source?.streams) {
for (const [stream, data] of Object.entries(opts.initial_state.source.streams)) {
- const status = (data as { status?: string })?.status
- if (status) streamStatus.set(stream, status as Status)
+ const srcStatus = (data as { status?: string })?.status
+ // Map source error statuses to lifecycle status for the engine
+ if (srcStatus === 'complete') {
+ streamStatus.set(stream, 'complete')
+ } else if (
+ srcStatus === 'pending' ||
+ srcStatus === 'transient_error' ||
+ srcStatus === 'system_error' ||
+ srcStatus === 'config_error' ||
+ srcStatus === 'auth_error'
+ ) {
+ // Source hasn't completed — keep as started (or don't set if not started yet)
+ if (streamStatus.has(stream)) {
+ // Already has a status from engine state, keep it unless it was complete
+ // and source says otherwise
+ } else if (srcStatus !== 'pending') {
+ streamStatus.set(stream, 'started')
+ }
+ }
}
}
+
const streamErrors = new Map()
const hadInitialState = opts.initial_state != null
const finalState: SyncState = structuredClone(opts.initial_state ?? emptySyncState())
const startedAt = Date.now()
let lastWindowAt = startedAt
- let lastEmitAt = startedAt
let prevWindowTotal = 0
function elapsedMs() {
@@ -128,10 +154,6 @@ export function trackProgress(opts: {
return sum
}
- function windowRecordCount(stream: string): number {
- return runRecordCount(stream) - (prevSnapshotCounts.get(stream) ?? 0)
- }
-
function totalWindowRecords(): number {
return totalRunRecords() - prevWindowTotal
}
@@ -148,12 +170,8 @@ export function trackProgress(opts: {
}
function snapshotWindow() {
- if (opts.recordCounter) {
- for (const [k, v] of opts.recordCounter.counts) prevSnapshotCounts.set(k, v)
- }
prevWindowTotal = totalRunRecords()
lastWindowAt = Date.now()
- lastEmitAt = Date.now()
}
function buildStreamStatus(stream: string): SyncOutput | undefined {
@@ -170,8 +188,6 @@ export function trackProgress(opts: {
status,
cumulative_record_count: cumulative,
run_record_count: run,
- window_record_count: windowRecordCount(stream),
- records_per_second: run / elapsedSec(),
},
},
_emitted_by: 'engine',
@@ -181,31 +197,49 @@ export function trackProgress(opts: {
function buildGlobalProgress(): SyncOutput {
const windowDuration = Math.max((Date.now() - lastWindowAt) / 1000, 0.001)
- const progress: TraceProgress = {
+ const runRecords = totalRunRecords()
+ const globalProgress: TraceGlobalProgress = {
elapsed_ms: elapsedMs(),
- run_record_count: totalRunRecords(),
- rows_per_second: totalRunRecords() / elapsedSec(),
- window_rows_per_second: totalWindowRecords() / windowDuration,
+ run_record_count: runRecords,
+ cumulative_record_count: cumulativeGlobalRecordCount + runRecords,
+ records_per_second: runRecords / elapsedSec(),
+ window_records_per_second: totalWindowRecords() / windowDuration,
state_checkpoint_count: stateCheckpointCount,
+ cumulative_request_count: cumulativeRequestCount,
+ cumulative_elapsed_ms: cumulativeElapsedMs + elapsedMs(),
}
return {
type: 'trace',
- trace: { trace_type: 'progress' as const, progress },
+ trace: { trace_type: 'global_progress' as const, global_progress: globalProgress },
_emitted_by: 'engine',
_ts: new Date().toISOString(),
} as SyncOutput
}
- function buildStreamProgress(stream: string): EofStreamProgress | undefined {
+ /** Emit stream_status + global_progress pair if status changed. */
+ function* emitIfStatusChanged(stream: string): Iterable {
+ const current = streamStatus.get(stream)
+ if (!current) return
+ if (lastEmittedStatus.get(stream) === current) return
+
+ lastEmittedStatus.set(stream, current)
+ const ss = buildStreamStatus(stream)
+ if (ss) yield ss
+ yield buildGlobalProgress()
+ snapshotWindow()
+ }
+
+ function buildStreamProgress(stream: string, finalEof = false): EofStreamProgress | undefined {
const status = streamStatus.get(stream)
if (!status) return undefined
const run = runRecordCount(stream)
const cumulative = (cumulativeRecordCount.get(stream) ?? 0) + run
+ // At EOF, no stream can still be in-flight — promote 'started' → 'complete'
+ const resolvedStatus = finalEof && status === 'started' ? 'complete' : status
return {
- status,
+ status: resolvedStatus,
cumulative_record_count: cumulative,
run_record_count: run,
- records_per_second: run / elapsedSec(),
errors: streamErrors.has(stream) ? streamErrors.get(stream) : undefined,
}
}
@@ -226,6 +260,15 @@ export function trackProgress(opts: {
}
}
+ // Update engine global state with cumulative totals
+ const runRecords = totalRunRecords()
+ finalState.engine.global = {
+ ...finalState.engine.global,
+ cumulative_record_count: cumulativeGlobalRecordCount + runRecords,
+ cumulative_request_count: cumulativeRequestCount,
+ cumulative_elapsed_ms: cumulativeElapsedMs + elapsedMs(),
+ }
+
const hasAnyState =
Object.keys(finalState.source.streams).length > 0 ||
Object.keys(finalState.source.global).length > 0 ||
@@ -242,18 +285,22 @@ export function trackProgress(opts: {
const streams = allStreams()
const streamProgressMap: Record = {}
for (const s of streams) {
- const sp = buildStreamProgress(s)
+ const sp = buildStreamProgress(s, true)
if (sp) streamProgressMap[s] = sp
}
+ const runRecords = totalRunRecords()
const eof: EofPayload = {
reason,
state: buildAccumulatedState(),
global_progress: {
elapsed_ms: elapsedMs(),
- run_record_count: totalRunRecords(),
- rows_per_second: totalRunRecords() / elapsedSec(),
- window_rows_per_second: totalWindowRecords() / windowDuration,
+ run_record_count: runRecords,
+ cumulative_record_count: cumulativeGlobalRecordCount + runRecords,
+ records_per_second: runRecords / elapsedSec(),
+ window_records_per_second: totalWindowRecords() / windowDuration,
state_checkpoint_count: stateCheckpointCount,
+ cumulative_request_count: cumulativeRequestCount,
+ cumulative_elapsed_ms: cumulativeElapsedMs + elapsedMs(),
},
stream_progress: Object.keys(streamProgressMap).length > 0 ? streamProgressMap : undefined,
}
@@ -265,16 +312,14 @@ export function trackProgress(opts: {
} as SyncOutput
}
- function* maybeEmitProgress(): Iterable {
- const now = Date.now()
- if (now - lastEmitAt < intervalMs) return
-
- for (const stream of allStreams()) {
- const ss = buildStreamStatus(stream)
- if (ss) yield ss
- }
- yield buildGlobalProgress()
- snapshotWindow()
+ // Emit catalog as first message so the UI knows all streams upfront
+ if (opts.catalog) {
+ yield {
+ type: 'catalog',
+ catalog: { streams: opts.catalog.streams.map((cs) => cs.stream) },
+ _emitted_by: 'engine',
+ _ts: new Date().toISOString(),
+ } as SyncOutput
}
for await (const msg of messages) {
@@ -283,19 +328,25 @@ export function trackProgress(opts: {
if (msg.source_state.state_type === 'stream') {
const stream = msg.source_state.stream
finalState.source.streams[stream] = msg.source_state.data
- if (!streamStatus.has(stream)) streamStatus.set(stream, 'start')
+ if (!streamStatus.has(stream)) {
+ streamStatus.set(stream, 'started')
+ yield* emitIfStatusChanged(stream)
+ }
} else if (msg.source_state.state_type === 'global') {
finalState.source.global = msg.source_state.data as Record
}
} else if (msg.type === 'trace') {
if (msg.trace.trace_type === 'stream_status') {
const ss = msg.trace.stream_status
- if (ss.status === 'range_complete' && ss.range_complete) {
+ if (ss.range_complete) {
const existing = completedRanges.get(ss.stream) ?? []
existing.push({ gte: ss.range_complete.gte, lt: ss.range_complete.lt })
completedRanges.set(ss.stream, mergeRanges(existing))
- } else {
- streamStatus.set(ss.stream, ss.status)
+ }
+ const newStatus = ss.status as Status
+ if (newStatus === 'started' || newStatus === 'complete') {
+ streamStatus.set(ss.stream, newStatus)
+ yield* emitIfStatusChanged(ss.stream)
}
} else if (msg.trace.trace_type === 'error') {
const err = msg.trace.error
@@ -308,6 +359,7 @@ export function trackProgress(opts: {
}
if (msg.type === 'eof') {
+ // Emit final stream_status + global_progress for all streams
for (const stream of allStreams()) {
const ss = buildStreamStatus(stream)
if (ss) yield ss
@@ -317,8 +369,12 @@ export function trackProgress(opts: {
return
}
+ // Suppress upstream stream_status traces — the engine re-emits enriched versions
+ if (msg.type === 'trace' && msg.trace.trace_type === 'stream_status') {
+ continue
+ }
+
yield msg
- yield* maybeEmitProgress()
}
}
}
diff --git a/apps/engine/src/lib/sync-progress-state.ts b/apps/engine/src/lib/sync-progress-state.ts
new file mode 100644
index 00000000..8d35b8aa
--- /dev/null
+++ b/apps/engine/src/lib/sync-progress-state.ts
@@ -0,0 +1,226 @@
+import type {
+ SyncOutput,
+ EofPayload,
+ EofStreamProgress,
+ TraceGlobalProgress,
+} from '@stripe/sync-protocol'
+
+// ── Reducer: SyncOutput messages → EofPayload ────────────────────
+//
+// At any point during a sync, the accumulated state is a valid EofPayload.
+// The final EOF message from the engine replaces it wholesale.
+// Display is purely a function of (EofPayload, catalog).
+
+export interface SyncDisplayState {
+ catalog: string[]
+ eof: EofPayload
+}
+
+export function createSyncDisplayState(): {
+ state: SyncDisplayState
+ /** Returns true if the message changed the display state. */
+ update: (msg: SyncOutput) => boolean
+} {
+ const state: SyncDisplayState = {
+ catalog: [],
+ eof: { reason: 'complete' },
+ }
+
+ function ensureStream(name: string): EofStreamProgress {
+ if (!state.eof.stream_progress) state.eof.stream_progress = {}
+ if (!state.eof.stream_progress[name]) {
+ state.eof.stream_progress[name] = {
+ status: 'started',
+ cumulative_record_count: 0,
+ run_record_count: 0,
+ }
+ }
+ return state.eof.stream_progress[name]
+ }
+
+ function update(msg: SyncOutput): boolean {
+ if (msg.type === 'catalog') {
+ state.catalog = (msg.catalog as { streams: Array<{ name: string }> }).streams.map(
+ (s) => s.name
+ )
+ return true
+ }
+
+ if (msg.type === 'trace') {
+ const t = msg.trace
+ if (t.trace_type === 'stream_status') {
+ const ss = t.stream_status
+ const sp = ensureStream(ss.stream)
+ sp.status = ss.status as 'started' | 'complete'
+ if (ss.cumulative_record_count != null)
+ sp.cumulative_record_count = ss.cumulative_record_count
+ if (ss.run_record_count != null) sp.run_record_count = ss.run_record_count
+ return true
+ }
+ if (t.trace_type === 'global_progress') {
+ state.eof.global_progress = (
+ t as { trace_type: 'global_progress'; global_progress: TraceGlobalProgress }
+ ).global_progress
+ return false // rendered with preceding stream_status
+ }
+ if (t.trace_type === 'error') {
+ const err = (
+ t as {
+ trace_type: 'error'
+ error: { message: string; failure_type?: string; stream?: string }
+ }
+ ).error
+ if (err.stream) {
+ const sp = ensureStream(err.stream)
+ if (!sp.errors) sp.errors = []
+ sp.errors.push({
+ message: err.message,
+ failure_type: err.failure_type as
+ | 'config_error'
+ | 'system_error'
+ | 'transient_error'
+ | 'auth_error'
+ | undefined,
+ })
+ }
+ return false
+ }
+ }
+
+ if (msg.type === 'eof') {
+ // The engine's EOF is authoritative — replace everything
+ state.eof = msg.eof
+ return true
+ }
+
+ return false
+ }
+
+ return { state, update }
+}
+
+// ── Renderer: (EofPayload, catalog) → string[] ──────────────────
+
+const ERROR_EMOJI: Record = {
+ transient_error: '⚠️',
+ system_error: '❌',
+ config_error: '⚙️',
+ auth_error: '🔒',
+}
+
+const REASON_EMOJI: Record = {
+ complete: '✅',
+ time_limit: '⏱️',
+ state_limit: '📦',
+ error: '❌',
+ aborted: '🛑',
+}
+
+function fmt(n: number): string {
+ return n.toLocaleString('en-US')
+}
+
+function fmtDuration(ms: number): string {
+ if (ms < 1000) return `${ms}ms`
+ if (ms < 60_000) return `${(ms / 1000).toFixed(1)}s`
+ const mins = Math.floor(ms / 60_000)
+ const secs = Math.round((ms % 60_000) / 1000)
+ if (mins < 60) return secs > 0 ? `${mins}m ${secs}s` : `${mins}m`
+ const hrs = Math.floor(mins / 60)
+ const rm = mins % 60
+ return rm > 0 ? `${hrs}h ${rm}m` : `${hrs}h`
+}
+
+/**
+ * Render a sync progress table from an EofPayload and optional catalog.
+ * Pure function — no side effects. Returns an array of lines.
+ *
+ * @param eof - The current (or final) EOF payload
+ * @param catalog - Stream names from the catalog (to derive pending streams)
+ * @param final - True when rendering after the actual EOF message (changes header)
+ */
+export function renderSyncProgress(
+ eof: EofPayload,
+ catalog: string[] = [],
+ final = false
+): string[] {
+ const lines: string[] = []
+ const gp = eof.global_progress
+
+ // Header
+ if (final) {
+ lines.push(`${REASON_EMOJI[eof.reason] ?? '❓'} Sync ${eof.reason}`)
+ } else {
+ lines.push('🔄 Syncing...')
+ }
+
+ if (gp) {
+ const cumRecords = gp.cumulative_record_count ?? gp.run_record_count
+ const cumElapsed = gp.cumulative_elapsed_ms ?? gp.elapsed_ms
+ lines.push(
+ ` Total: ${fmt(cumRecords)} records | ${fmt(gp.cumulative_request_count ?? 0)} requests | ${fmtDuration(cumElapsed)}`
+ )
+ lines.push(
+ ` This run: +${fmt(gp.run_record_count)} records | ${fmt(gp.request_count ?? 0)} requests | ${fmtDuration(gp.elapsed_ms)} | ${gp.records_per_second.toFixed(1)} records/s`
+ )
+ }
+
+ // Group streams by status
+ const sp = eof.stream_progress ?? {}
+ const complete: [string, EofStreamProgress][] = []
+ const started: [string, EofStreamProgress][] = []
+ const pending: string[] = []
+
+ const known = new Set(Object.keys(sp))
+ for (const [name, info] of Object.entries(sp)) {
+ if (info.status === 'complete') complete.push([name, info])
+ else started.push([name, info])
+ }
+ for (const name of catalog) {
+ if (!known.has(name)) pending.push(name)
+ }
+
+ complete.sort((a, b) => b[1].cumulative_record_count - a[1].cumulative_record_count)
+ started.sort((a, b) => b[1].cumulative_record_count - a[1].cumulative_record_count)
+
+ const allNames = [...complete.map((c) => c[0]), ...started.map((s) => s[0]), ...pending]
+ const maxName = Math.max(...allNames.map((n) => n.length), 10)
+
+ function streamLine(name: string, info: EofStreamProgress) {
+ const cum = info.cumulative_record_count
+ const run = info.run_record_count
+ const countStr = cum > 0 ? `${fmt(cum).padStart(10)}${run > 0 ? ` (+${fmt(run)})` : ''}` : ''
+ lines.push(` ${name.padEnd(maxName)} ${countStr}`)
+ for (const err of info.errors ?? []) {
+ const emoji = ERROR_EMOJI[err.failure_type ?? 'system_error'] ?? '❌'
+ lines.push(` ${emoji} ${err.message}${err.failure_type ? ` (${err.failure_type})` : ''}`)
+ }
+ }
+
+ lines.push('')
+ if (complete.length > 0) {
+ lines.push(` ✅ Complete (${complete.length}):`)
+ for (const [name, info] of complete) streamLine(name, info)
+ }
+ if (started.length > 0) {
+ lines.push(` 🔄 Started (${started.length}):`)
+ for (const [name, info] of started) streamLine(name, info)
+ }
+ if (pending.length > 0) {
+ lines.push(` ⏳ Pending (${pending.length}):`)
+ lines.push(` ${pending.join(', ')}`)
+ }
+
+ // Summary
+ const errCount = Object.values(sp).filter((i) => (i.errors?.length ?? 0) > 0).length
+ lines.push('')
+ const parts: string[] = []
+ if (complete.length) parts.push(`${complete.length} complete`)
+ if (started.length) parts.push(`${started.length} started`)
+ if (pending.length) parts.push(`${pending.length} pending`)
+ if (errCount) parts.push(`${errCount} with errors`)
+ parts.push(`+${fmt(gp?.run_record_count ?? 0)} records this run`)
+ lines.push(` 📊 ${parts.join(' | ')}`)
+
+ return lines
+}
diff --git a/apps/engine/tsconfig.json b/apps/engine/tsconfig.json
index 2481fe54..a7aaf861 100644
--- a/apps/engine/tsconfig.json
+++ b/apps/engine/tsconfig.json
@@ -2,7 +2,8 @@
"extends": "../../tsconfig.base.json",
"compilerOptions": {
"outDir": "dist",
- "rootDir": "src"
+ "rootDir": "src",
+ "jsx": "react-jsx"
},
"include": ["src/**/*"],
"exclude": ["src/**/*.test.ts", "src/**/__tests__/**"]
diff --git a/apps/service/src/__generated__/openapi.d.ts b/apps/service/src/__generated__/openapi.d.ts
index a75f2801..ba1b4d55 100644
--- a/apps/service/src/__generated__/openapi.d.ts
+++ b/apps/service/src/__generated__/openapi.d.ts
@@ -319,35 +319,39 @@ export interface operations {
elapsed_ms?: number;
/** @description Full sync state at the end of the run. source: accumulated from source_state messages; engine: updated cumulative record counts; destination: reserved. Consumers can persist this directly and pass it back on resume. */
state?: components["schemas"]["SyncState"];
- /** @description Final global aggregates. Same shape as trace/progress. */
+ /** @description Final global aggregates. Same shape as trace/global_progress. */
global_progress?: {
/** @description Wall-clock milliseconds since the sync run started. */
elapsed_ms: number;
/** @description Total records synced across all streams in this run. */
run_record_count: number;
+ /** @description Total records synced across all streams across all runs. */
+ cumulative_record_count?: number;
/** @description Overall throughput for the entire run: run_record_count / elapsed seconds. */
- rows_per_second: number;
+ records_per_second: number;
/** @description Instantaneous throughput: total records in last window / window duration. Measures only the most recent reporting interval. */
- window_rows_per_second: number;
+ window_records_per_second: number;
/** @description Total source_state messages observed so far in this sync run. */
state_checkpoint_count: number;
+ /** @description Total API requests made by the source in this run. */
+ request_count?: number;
+ /** @description Total API requests across all runs. */
+ cumulative_request_count?: number;
+ /** @description Total wall-clock time across all runs. */
+ cumulative_elapsed_ms?: number;
};
/** @description Per-stream end-of-sync summary. Errors only appear here, not in stream_status messages. */
stream_progress?: {
[key: string]: {
/**
- * @description Final stream status.
+ * @description Lifecycle status. Errors are orthogonal — a stream can be complete with errors.
* @enum {string}
*/
- status: "start" | "running" | "complete" | "range_complete";
+ status: "started" | "complete";
/** @description Cumulative records synced for this stream across all runs. */
cumulative_record_count: number;
/** @description Records synced in this run. */
run_record_count: number;
- /** @description Average records/sec for this stream over the run. */
- records_per_second?: number;
- /** @description Average requests/sec for this stream over the run. */
- requests_per_second?: number;
/** @description All accumulated errors for this stream during this run. */
errors?: {
/** @description Human-readable error description. */
@@ -447,35 +451,39 @@ export interface operations {
elapsed_ms?: number;
/** @description Full sync state at the end of the run. source: accumulated from source_state messages; engine: updated cumulative record counts; destination: reserved. Consumers can persist this directly and pass it back on resume. */
state?: components["schemas"]["SyncState"];
- /** @description Final global aggregates. Same shape as trace/progress. */
+ /** @description Final global aggregates. Same shape as trace/global_progress. */
global_progress?: {
/** @description Wall-clock milliseconds since the sync run started. */
elapsed_ms: number;
/** @description Total records synced across all streams in this run. */
run_record_count: number;
+ /** @description Total records synced across all streams across all runs. */
+ cumulative_record_count?: number;
/** @description Overall throughput for the entire run: run_record_count / elapsed seconds. */
- rows_per_second: number;
+ records_per_second: number;
/** @description Instantaneous throughput: total records in last window / window duration. Measures only the most recent reporting interval. */
- window_rows_per_second: number;
+ window_records_per_second: number;
/** @description Total source_state messages observed so far in this sync run. */
state_checkpoint_count: number;
+ /** @description Total API requests made by the source in this run. */
+ request_count?: number;
+ /** @description Total API requests across all runs. */
+ cumulative_request_count?: number;
+ /** @description Total wall-clock time across all runs. */
+ cumulative_elapsed_ms?: number;
};
/** @description Per-stream end-of-sync summary. Errors only appear here, not in stream_status messages. */
stream_progress?: {
[key: string]: {
/**
- * @description Final stream status.
+ * @description Lifecycle status. Errors are orthogonal — a stream can be complete with errors.
* @enum {string}
*/
- status: "start" | "running" | "complete" | "range_complete";
+ status: "started" | "complete";
/** @description Cumulative records synced for this stream across all runs. */
cumulative_record_count: number;
/** @description Records synced in this run. */
run_record_count: number;
- /** @description Average records/sec for this stream over the run. */
- records_per_second?: number;
- /** @description Average requests/sec for this stream over the run. */
- requests_per_second?: number;
/** @description All accumulated errors for this stream during this run. */
errors?: {
/** @description Human-readable error description. */
@@ -567,35 +575,39 @@ export interface operations {
elapsed_ms?: number;
/** @description Full sync state at the end of the run. source: accumulated from source_state messages; engine: updated cumulative record counts; destination: reserved. Consumers can persist this directly and pass it back on resume. */
state?: components["schemas"]["SyncState"];
- /** @description Final global aggregates. Same shape as trace/progress. */
+ /** @description Final global aggregates. Same shape as trace/global_progress. */
global_progress?: {
/** @description Wall-clock milliseconds since the sync run started. */
elapsed_ms: number;
/** @description Total records synced across all streams in this run. */
run_record_count: number;
+ /** @description Total records synced across all streams across all runs. */
+ cumulative_record_count?: number;
/** @description Overall throughput for the entire run: run_record_count / elapsed seconds. */
- rows_per_second: number;
+ records_per_second: number;
/** @description Instantaneous throughput: total records in last window / window duration. Measures only the most recent reporting interval. */
- window_rows_per_second: number;
+ window_records_per_second: number;
/** @description Total source_state messages observed so far in this sync run. */
state_checkpoint_count: number;
+ /** @description Total API requests made by the source in this run. */
+ request_count?: number;
+ /** @description Total API requests across all runs. */
+ cumulative_request_count?: number;
+ /** @description Total wall-clock time across all runs. */
+ cumulative_elapsed_ms?: number;
};
/** @description Per-stream end-of-sync summary. Errors only appear here, not in stream_status messages. */
stream_progress?: {
[key: string]: {
/**
- * @description Final stream status.
+ * @description Lifecycle status. Errors are orthogonal — a stream can be complete with errors.
* @enum {string}
*/
- status: "start" | "running" | "complete" | "range_complete";
+ status: "started" | "complete";
/** @description Cumulative records synced for this stream across all runs. */
cumulative_record_count: number;
/** @description Records synced in this run. */
run_record_count: number;
- /** @description Average records/sec for this stream over the run. */
- records_per_second?: number;
- /** @description Average requests/sec for this stream over the run. */
- requests_per_second?: number;
/** @description All accumulated errors for this stream during this run. */
errors?: {
/** @description Human-readable error description. */
@@ -748,35 +760,39 @@ export interface operations {
elapsed_ms?: number;
/** @description Full sync state at the end of the run. source: accumulated from source_state messages; engine: updated cumulative record counts; destination: reserved. Consumers can persist this directly and pass it back on resume. */
state?: components["schemas"]["SyncState"];
- /** @description Final global aggregates. Same shape as trace/progress. */
+ /** @description Final global aggregates. Same shape as trace/global_progress. */
global_progress?: {
/** @description Wall-clock milliseconds since the sync run started. */
elapsed_ms: number;
/** @description Total records synced across all streams in this run. */
run_record_count: number;
+ /** @description Total records synced across all streams across all runs. */
+ cumulative_record_count?: number;
/** @description Overall throughput for the entire run: run_record_count / elapsed seconds. */
- rows_per_second: number;
+ records_per_second: number;
/** @description Instantaneous throughput: total records in last window / window duration. Measures only the most recent reporting interval. */
- window_rows_per_second: number;
+ window_records_per_second: number;
/** @description Total source_state messages observed so far in this sync run. */
state_checkpoint_count: number;
+ /** @description Total API requests made by the source in this run. */
+ request_count?: number;
+ /** @description Total API requests across all runs. */
+ cumulative_request_count?: number;
+ /** @description Total wall-clock time across all runs. */
+ cumulative_elapsed_ms?: number;
};
/** @description Per-stream end-of-sync summary. Errors only appear here, not in stream_status messages. */
stream_progress?: {
[key: string]: {
/**
- * @description Final stream status.
+ * @description Lifecycle status. Errors are orthogonal — a stream can be complete with errors.
* @enum {string}
*/
- status: "start" | "running" | "complete" | "range_complete";
+ status: "started" | "complete";
/** @description Cumulative records synced for this stream across all runs. */
cumulative_record_count: number;
/** @description Records synced in this run. */
run_record_count: number;
- /** @description Average records/sec for this stream over the run. */
- records_per_second?: number;
- /** @description Average requests/sec for this stream over the run. */
- requests_per_second?: number;
/** @description All accumulated errors for this stream during this run. */
errors?: {
/** @description Human-readable error description. */
diff --git a/apps/service/src/__generated__/openapi.json b/apps/service/src/__generated__/openapi.json
index c2599c25..ea23d1d8 100644
--- a/apps/service/src/__generated__/openapi.json
+++ b/apps/service/src/__generated__/openapi.json
@@ -157,7 +157,7 @@
"$ref": "#/components/schemas/SyncState"
},
"global_progress": {
- "description": "Final global aggregates. Same shape as trace/progress.",
+ "description": "Final global aggregates. Same shape as trace/global_progress.",
"type": "object",
"properties": {
"elapsed_ms": {
@@ -172,11 +172,17 @@
"maximum": 9007199254740991,
"description": "Total records synced across all streams in this run."
},
- "rows_per_second": {
+ "cumulative_record_count": {
+ "description": "Total records synced across all streams across all runs.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
+ },
+ "records_per_second": {
"type": "number",
"description": "Overall throughput for the entire run: run_record_count / elapsed seconds."
},
- "window_rows_per_second": {
+ "window_records_per_second": {
"type": "number",
"description": "Instantaneous throughput: total records in last window / window duration. Measures only the most recent reporting interval."
},
@@ -185,13 +191,31 @@
"minimum": -9007199254740991,
"maximum": 9007199254740991,
"description": "Total source_state messages observed so far in this sync run."
+ },
+ "request_count": {
+ "description": "Total API requests made by the source in this run.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
+ },
+ "cumulative_request_count": {
+ "description": "Total API requests across all runs.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
+ },
+ "cumulative_elapsed_ms": {
+ "description": "Total wall-clock time across all runs.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
}
},
"required": [
"elapsed_ms",
"run_record_count",
- "rows_per_second",
- "window_rows_per_second",
+ "records_per_second",
+ "window_records_per_second",
"state_checkpoint_count"
],
"additionalProperties": false
@@ -208,12 +232,10 @@
"status": {
"type": "string",
"enum": [
- "start",
- "running",
- "complete",
- "range_complete"
+ "started",
+ "complete"
],
- "description": "Final stream status."
+ "description": "Lifecycle status. Errors are orthogonal — a stream can be complete with errors."
},
"cumulative_record_count": {
"type": "integer",
@@ -227,14 +249,6 @@
"maximum": 9007199254740991,
"description": "Records synced in this run."
},
- "records_per_second": {
- "description": "Average records/sec for this stream over the run.",
- "type": "number"
- },
- "requests_per_second": {
- "description": "Average requests/sec for this stream over the run.",
- "type": "number"
- },
"errors": {
"description": "All accumulated errors for this stream during this run.",
"type": "array",
@@ -465,7 +479,7 @@
"$ref": "#/components/schemas/SyncState"
},
"global_progress": {
- "description": "Final global aggregates. Same shape as trace/progress.",
+ "description": "Final global aggregates. Same shape as trace/global_progress.",
"type": "object",
"properties": {
"elapsed_ms": {
@@ -480,11 +494,17 @@
"maximum": 9007199254740991,
"description": "Total records synced across all streams in this run."
},
- "rows_per_second": {
+ "cumulative_record_count": {
+ "description": "Total records synced across all streams across all runs.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
+ },
+ "records_per_second": {
"type": "number",
"description": "Overall throughput for the entire run: run_record_count / elapsed seconds."
},
- "window_rows_per_second": {
+ "window_records_per_second": {
"type": "number",
"description": "Instantaneous throughput: total records in last window / window duration. Measures only the most recent reporting interval."
},
@@ -493,13 +513,31 @@
"minimum": -9007199254740991,
"maximum": 9007199254740991,
"description": "Total source_state messages observed so far in this sync run."
+ },
+ "request_count": {
+ "description": "Total API requests made by the source in this run.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
+ },
+ "cumulative_request_count": {
+ "description": "Total API requests across all runs.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
+ },
+ "cumulative_elapsed_ms": {
+ "description": "Total wall-clock time across all runs.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
}
},
"required": [
"elapsed_ms",
"run_record_count",
- "rows_per_second",
- "window_rows_per_second",
+ "records_per_second",
+ "window_records_per_second",
"state_checkpoint_count"
],
"additionalProperties": false
@@ -516,12 +554,10 @@
"status": {
"type": "string",
"enum": [
- "start",
- "running",
- "complete",
- "range_complete"
+ "started",
+ "complete"
],
- "description": "Final stream status."
+ "description": "Lifecycle status. Errors are orthogonal — a stream can be complete with errors."
},
"cumulative_record_count": {
"type": "integer",
@@ -535,14 +571,6 @@
"maximum": 9007199254740991,
"description": "Records synced in this run."
},
- "records_per_second": {
- "description": "Average records/sec for this stream over the run.",
- "type": "number"
- },
- "requests_per_second": {
- "description": "Average requests/sec for this stream over the run.",
- "type": "number"
- },
"errors": {
"description": "All accumulated errors for this stream during this run.",
"type": "array",
@@ -741,7 +769,7 @@
"$ref": "#/components/schemas/SyncState"
},
"global_progress": {
- "description": "Final global aggregates. Same shape as trace/progress.",
+ "description": "Final global aggregates. Same shape as trace/global_progress.",
"type": "object",
"properties": {
"elapsed_ms": {
@@ -756,11 +784,17 @@
"maximum": 9007199254740991,
"description": "Total records synced across all streams in this run."
},
- "rows_per_second": {
+ "cumulative_record_count": {
+ "description": "Total records synced across all streams across all runs.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
+ },
+ "records_per_second": {
"type": "number",
"description": "Overall throughput for the entire run: run_record_count / elapsed seconds."
},
- "window_rows_per_second": {
+ "window_records_per_second": {
"type": "number",
"description": "Instantaneous throughput: total records in last window / window duration. Measures only the most recent reporting interval."
},
@@ -769,13 +803,31 @@
"minimum": -9007199254740991,
"maximum": 9007199254740991,
"description": "Total source_state messages observed so far in this sync run."
+ },
+ "request_count": {
+ "description": "Total API requests made by the source in this run.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
+ },
+ "cumulative_request_count": {
+ "description": "Total API requests across all runs.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
+ },
+ "cumulative_elapsed_ms": {
+ "description": "Total wall-clock time across all runs.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
}
},
"required": [
"elapsed_ms",
"run_record_count",
- "rows_per_second",
- "window_rows_per_second",
+ "records_per_second",
+ "window_records_per_second",
"state_checkpoint_count"
],
"additionalProperties": false
@@ -792,12 +844,10 @@
"status": {
"type": "string",
"enum": [
- "start",
- "running",
- "complete",
- "range_complete"
+ "started",
+ "complete"
],
- "description": "Final stream status."
+ "description": "Lifecycle status. Errors are orthogonal — a stream can be complete with errors."
},
"cumulative_record_count": {
"type": "integer",
@@ -811,14 +861,6 @@
"maximum": 9007199254740991,
"description": "Records synced in this run."
},
- "records_per_second": {
- "description": "Average records/sec for this stream over the run.",
- "type": "number"
- },
- "requests_per_second": {
- "description": "Average requests/sec for this stream over the run.",
- "type": "number"
- },
"errors": {
"description": "All accumulated errors for this stream during this run.",
"type": "array",
@@ -1071,7 +1113,7 @@
"$ref": "#/components/schemas/SyncState"
},
"global_progress": {
- "description": "Final global aggregates. Same shape as trace/progress.",
+ "description": "Final global aggregates. Same shape as trace/global_progress.",
"type": "object",
"properties": {
"elapsed_ms": {
@@ -1086,11 +1128,17 @@
"maximum": 9007199254740991,
"description": "Total records synced across all streams in this run."
},
- "rows_per_second": {
+ "cumulative_record_count": {
+ "description": "Total records synced across all streams across all runs.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
+ },
+ "records_per_second": {
"type": "number",
"description": "Overall throughput for the entire run: run_record_count / elapsed seconds."
},
- "window_rows_per_second": {
+ "window_records_per_second": {
"type": "number",
"description": "Instantaneous throughput: total records in last window / window duration. Measures only the most recent reporting interval."
},
@@ -1099,13 +1147,31 @@
"minimum": -9007199254740991,
"maximum": 9007199254740991,
"description": "Total source_state messages observed so far in this sync run."
+ },
+ "request_count": {
+ "description": "Total API requests made by the source in this run.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
+ },
+ "cumulative_request_count": {
+ "description": "Total API requests across all runs.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
+ },
+ "cumulative_elapsed_ms": {
+ "description": "Total wall-clock time across all runs.",
+ "type": "integer",
+ "minimum": -9007199254740991,
+ "maximum": 9007199254740991
}
},
"required": [
"elapsed_ms",
"run_record_count",
- "rows_per_second",
- "window_rows_per_second",
+ "records_per_second",
+ "window_records_per_second",
"state_checkpoint_count"
],
"additionalProperties": false
@@ -1122,12 +1188,10 @@
"status": {
"type": "string",
"enum": [
- "start",
- "running",
- "complete",
- "range_complete"
+ "started",
+ "complete"
],
- "description": "Final stream status."
+ "description": "Lifecycle status. Errors are orthogonal — a stream can be complete with errors."
},
"cumulative_record_count": {
"type": "integer",
@@ -1141,14 +1205,6 @@
"maximum": 9007199254740991,
"description": "Records synced in this run."
},
- "records_per_second": {
- "description": "Average records/sec for this stream over the run.",
- "type": "number"
- },
- "requests_per_second": {
- "description": "Average requests/sec for this stream over the run.",
- "type": "number"
- },
"errors": {
"description": "All accumulated errors for this stream during this run.",
"type": "array",
diff --git a/apps/service/src/__tests__/workflow.test.ts b/apps/service/src/__tests__/workflow.test.ts
index c3cd8016..42bbec5d 100644
--- a/apps/service/src/__tests__/workflow.test.ts
+++ b/apps/service/src/__tests__/workflow.test.ts
@@ -17,8 +17,12 @@ const emptyState = {
engine: { streams: {}, global: {} },
}
const noErrors: RunResult = { errors: [], state: emptyState }
-const permanentSyncError: RunResult = {
- errors: [{ message: 'permanent sync failure', failure_type: 'auth_error', stream: 'customers' }],
+const globalPermanentSyncError: RunResult = {
+ errors: [{ message: 'bad API key', failure_type: 'auth_error' }],
+ state: emptyState,
+}
+const streamPermanentSyncError: RunResult = {
+ errors: [{ message: 'stream auth failure', failure_type: 'auth_error', stream: 'customers' }],
state: emptyState,
}
@@ -341,7 +345,7 @@ describe('pipelineWorkflow (unit — stubbed activities)', () => {
})
})
- it('transitions to error instead of ready when reconcile returns permanent sync errors', async () => {
+ it('transitions to error when reconcile returns global permanent sync errors', async () => {
const statusWrites: string[] = []
const worker = await Worker.create({
@@ -354,7 +358,7 @@ describe('pipelineWorkflow (unit — stubbed activities)', () => {
},
pipelineSync: async (_pipelineId: string, opts?) => {
if (opts?.input) return noErrors
- return { ...permanentSyncError, eof: { reason: 'complete' as const } }
+ return { ...globalPermanentSyncError, eof: { reason: 'complete' as const } }
},
}),
})
@@ -375,6 +379,41 @@ describe('pipelineWorkflow (unit — stubbed activities)', () => {
})
})
+ it('does not park workflow when reconcile returns stream-scoped permanent errors', async () => {
+ const statusWrites: string[] = []
+
+ const worker = await Worker.create({
+ connection: testEnv.nativeConnection,
+ taskQueue: 'test-queue-3b-stream-error',
+ workflowsPath,
+ activities: stubActivities({
+ updatePipelineStatus: async (_id: string, status: string) => {
+ statusWrites.push(status)
+ },
+ pipelineSync: async (_pipelineId: string, opts?) => {
+ if (opts?.input) return noErrors
+ return { ...streamPermanentSyncError, eof: { reason: 'complete' as const } }
+ },
+ }),
+ })
+
+ await worker.runUntil(async () => {
+ const handle = await testEnv.client.workflow.start('pipelineWorkflow', {
+ args: [testPipelineId],
+ workflowId: 'test-sync-3b-stream-error',
+ taskQueue: 'test-queue-3b-stream-error',
+ })
+
+ await new Promise((r) => setTimeout(r, 500))
+ await signalDelete(handle)
+ await handle.result()
+
+ // Stream-scoped permanent errors don't park the workflow
+ expect(statusWrites).toContain('ready')
+ expect(statusWrites).not.toContain('error')
+ })
+ })
+
it('retries transient sync activity failures and still reaches ready', async () => {
const statusWrites: string[] = []
let reconcileCalls = 0
diff --git a/apps/service/src/temporal/activities/_shared.ts b/apps/service/src/temporal/activities/_shared.ts
index f00e4b11..20587659 100644
--- a/apps/service/src/temporal/activities/_shared.ts
+++ b/apps/service/src/temporal/activities/_shared.ts
@@ -86,12 +86,9 @@ export async function drainMessages(
count++
if (message.type === 'eof') {
eof = message.eof
- if (eof.stream_progress) {
- const engineStreams: Record = { ...state.engine.streams }
- for (const [name, sp] of Object.entries(eof.stream_progress)) {
- engineStreams[name] = { cumulative_record_count: sp.cumulative_record_count }
- }
- state = { ...state, engine: { ...state.engine, streams: engineStreams } }
+ // eof.state is authoritative — built by trackProgress with full accumulated state
+ if (eof.state) {
+ state = eof.state
}
} else if (message.type === 'control') {
if (message.control.control_type === 'source_config') {
diff --git a/apps/service/src/temporal/sync-errors.test.ts b/apps/service/src/temporal/sync-errors.test.ts
new file mode 100644
index 00000000..d52ba1c7
--- /dev/null
+++ b/apps/service/src/temporal/sync-errors.test.ts
@@ -0,0 +1,71 @@
+import { describe, it, expect } from 'vitest'
+import { classifySyncErrors } from './sync-errors.js'
+
+describe('classifySyncErrors', () => {
+ it('classifies global permanent errors (no stream field)', () => {
+ const result = classifySyncErrors([{ message: 'bad key', failure_type: 'auth_error' }])
+
+ expect(result.globalPermanent).toHaveLength(1)
+ expect(result.streamPermanent).toHaveLength(0)
+ expect(result.permanent).toHaveLength(1)
+ expect(result.transient).toHaveLength(0)
+ })
+
+ it('classifies stream-scoped permanent errors', () => {
+ const result = classifySyncErrors([
+ { message: 'stream auth fail', failure_type: 'auth_error', stream: 'treasury' },
+ ])
+
+ expect(result.globalPermanent).toHaveLength(0)
+ expect(result.streamPermanent).toHaveLength(1)
+ expect(result.permanent).toHaveLength(1)
+ expect(result.transient).toHaveLength(0)
+ })
+
+ it('classifies transient errors regardless of stream scope', () => {
+ const result = classifySyncErrors([
+ { message: 'rate limit', failure_type: 'transient_error', stream: 'customers' },
+ { message: 'timeout', failure_type: 'transient_error' },
+ ])
+
+ expect(result.transient).toHaveLength(2)
+ expect(result.permanent).toHaveLength(0)
+ expect(result.globalPermanent).toHaveLength(0)
+ expect(result.streamPermanent).toHaveLength(0)
+ })
+
+ it('classifies system_error as permanent', () => {
+ const result = classifySyncErrors([
+ { message: 'deterministic failure', failure_type: 'system_error', stream: 'treasury' },
+ ])
+
+ expect(result.permanent).toHaveLength(1)
+ expect(result.streamPermanent).toHaveLength(1)
+ expect(result.transient).toHaveLength(0)
+ })
+
+ it('separates mixed errors into correct buckets', () => {
+ const result = classifySyncErrors([
+ { message: 'bad key', failure_type: 'auth_error' },
+ { message: 'feature gate', failure_type: 'config_error', stream: 'treasury' },
+ { message: 'rate limit', failure_type: 'transient_error' },
+ ])
+
+ expect(result.globalPermanent).toHaveLength(1)
+ expect(result.globalPermanent[0].message).toBe('bad key')
+
+ expect(result.streamPermanent).toHaveLength(1)
+ expect(result.streamPermanent[0].message).toBe('feature gate')
+
+ expect(result.permanent).toHaveLength(2)
+ expect(result.transient).toHaveLength(1)
+ expect(result.transient[0].message).toBe('rate limit')
+ })
+
+ it('treats unknown failure_type as transient', () => {
+ const result = classifySyncErrors([{ message: 'unknown' }])
+
+ expect(result.transient).toHaveLength(1)
+ expect(result.permanent).toHaveLength(0)
+ })
+})
diff --git a/apps/service/src/temporal/sync-errors.ts b/apps/service/src/temporal/sync-errors.ts
index 7f9a74fe..c9a1baef 100644
--- a/apps/service/src/temporal/sync-errors.ts
+++ b/apps/service/src/temporal/sync-errors.ts
@@ -7,23 +7,34 @@ export type SyncRunError = {
export type ClassifiedSyncErrors = {
transient: SyncRunError[]
permanent: SyncRunError[]
+ /** Permanent errors without a stream scope — bad API key, invalid config. Parks the workflow. */
+ globalPermanent: SyncRunError[]
+ /** Permanent errors scoped to a single stream — feature gate, per-stream auth. Stream is skipped on resume. */
+ streamPermanent: SyncRunError[]
}
-const PERMANENT_FAILURE_TYPES = new Set(['config_error', 'auth_error'])
+const PERMANENT_FAILURE_TYPES = new Set(['config_error', 'auth_error', 'system_error'])
export function classifySyncErrors(errors: SyncRunError[]): ClassifiedSyncErrors {
const transient: SyncRunError[] = []
const permanent: SyncRunError[] = []
+ const globalPermanent: SyncRunError[] = []
+ const streamPermanent: SyncRunError[] = []
for (const error of errors) {
if (PERMANENT_FAILURE_TYPES.has(error.failure_type ?? '')) {
permanent.push(error)
+ if (error.stream) {
+ streamPermanent.push(error)
+ } else {
+ globalPermanent.push(error)
+ }
} else {
transient.push(error)
}
}
- return { transient, permanent }
+ return { transient, permanent, globalPermanent, streamPermanent }
}
export function summarizeSyncErrors(errors: SyncRunError[]): string {
diff --git a/apps/service/src/temporal/workflows/pipeline-workflow.ts b/apps/service/src/temporal/workflows/pipeline-workflow.ts
index 07851631..721d09f5 100644
--- a/apps/service/src/temporal/workflows/pipeline-workflow.ts
+++ b/apps/service/src/temporal/workflows/pipeline-workflow.ts
@@ -130,9 +130,10 @@ export async function pipelineWorkflow(
const events = await waitForLiveEvents()
if (!events) return
- const result = await pipelineSync(pipelineId, { input: events })
+ const result = await pipelineSync(pipelineId, { state: syncState, input: events })
operationCount++
- if (classifySyncErrors(result.errors).permanent.length > 0) {
+ syncState = result.state
+ if (classifySyncErrors(result.errors).globalPermanent.length > 0) {
await markPermanentError()
return
}
@@ -166,7 +167,7 @@ export async function pipelineWorkflow(
})
operationCount++
syncState = result.state
- if (classifySyncErrors(result.errors).permanent.length > 0) {
+ if (classifySyncErrors(result.errors).globalPermanent.length > 0) {
await markPermanentError()
return
}
diff --git a/docs/engine/sync-lifecycle-source-stripe.md b/docs/engine/sync-lifecycle-source-stripe.md
index 4f9e65cf..e8b75f50 100644
--- a/docs/engine/sync-lifecycle-source-stripe.md
+++ b/docs/engine/sync-lifecycle-source-stripe.md
@@ -2,7 +2,9 @@
How the Stripe source manages pagination within a `time_range` assigned by the
engine. For the overall sync lifecycle and protocol, see
-[sync-lifecycle.md](./sync-lifecycle.md).
+[sync-lifecycle.md](./sync-lifecycle.md). Request-to-request continuation is
+driven by the engine's terminal `eof { reason, has_more }` message in this
+phase.
## Overview
@@ -149,7 +151,7 @@ checkpoint.
```
Source initializes: remaining: [{ gte: "2018", lt: "2024", cursor: null }]
-← trace { stream_status: { stream: "customers", status: "start" } }
+← trace { stream_status: { stream: "customers", status: "started" } }
← record { stream: "customers", data: { id: "cus_001", ... } }
... 100 records (page 1) ...
← state { stream: "customers", data: { remaining: [{ gte: "2018", lt: "2024", cursor: "cus_100" }] } }
@@ -160,7 +162,7 @@ Source initializes: remaining: [{ gte: "2018", lt: "2024", cursor: null }]
← state { stream: "customers", data: { remaining: [{ gte: "2018", lt: "2024", cursor: "cus_5000" }] } }
... source cut off (time limit / state limit) ...
-← end { has_more: true }
+← eof { reason: "time_limit", has_more: true }
```
Range didn't complete in one request → source will subdivide on next request.
@@ -193,7 +195,7 @@ Last record had created=2019-03. Range didn't complete → subdivide:
] } }
... cut off ...
-← end { has_more: true }
+← eof { reason: "time_limit", has_more: true }
```
### Request 3 — finishes remaining ranges
@@ -214,7 +216,7 @@ These ranges made progress last request — no further subdivision, resume.
← state { stream: "customers", data: { remaining: [] } }
← trace { stream_status: { stream: "customers", status: "complete" } }
-← end { has_more: false }
+← eof { reason: "complete", has_more: false }
```
Engine's `completed_ranges` for customers after merging all `range_complete` messages:
@@ -223,7 +225,9 @@ Engine's `completed_ranges` for customers after merging all `range_complete` mes
## State on the Wire
Source state is opaque to the engine. The engine learns about range completion
-via `stream_status: range_complete` messages, not by inspecting source state:
+via `stream_status: range_complete` messages, not by inspecting source state.
+The configured `time_range` lives on the stream in the catalog, not inside the
+`source_state` envelope:
```ts
{
@@ -231,7 +235,6 @@ via `stream_status: range_complete` messages, not by inspecting source state:
source_state: {
state_type: 'stream',
stream: 'customers',
- time_range: { gte: '2018-01-01T00:00:00Z', lt: '2024-04-17T00:00:00Z' },
data: {
remaining: [
{ gte: '2022-05-16T00:00:00Z', lt: '2024-04-17T00:00:00Z', cursor: 'cus_xyz' }
diff --git a/docs/engine/sync-lifecycle.md b/docs/engine/sync-lifecycle.md
index 670d5076..f80517eb 100644
--- a/docs/engine/sync-lifecycle.md
+++ b/docs/engine/sync-lifecycle.md
@@ -1,8 +1,14 @@
# Sync Lifecycle
-How finite sync runs work: run identity, opaque state, optional time ranges, and
-terminal stream status. For message types and connector interfaces, see
-[protocol.md](./protocol.md).
+How finite sync requests work today: opaque source state, optional
+engine-assigned `time_range`, explicit stream lifecycle signals, and
+request-level continuation via `eof`. For exact wire types and connector
+interfaces, see [protocol.md](./protocol.md).
+
+This phase intentionally keeps the existing terminal `eof` message for backward
+compatibility. Treat `eof` as the current alias of a future `end` message. The
+explicit client/engine `start` / `end` envelope is deferred to
+[`docs/plans/2026-04-17-start-end-envelope-migration.md`](../plans/2026-04-17-start-end-envelope-migration.md).
## Scope
@@ -13,59 +19,61 @@ This design is intentionally narrow:
- `full_refresh` is out of scope.
- Live `/events` polling is out of scope.
- Generic stall detection is out of scope.
+- Client/engine envelope renames are out of scope for this phase.
-## Removed From This Protocol
+## Removed From This Phase
-To keep lifecycle semantics tight, this protocol explicitly removes these ideas:
+To keep lifecycle semantics tight, this phase explicitly removes these ideas:
- **No `full_refresh` lifecycle.** `sync_mode: 'full_refresh'` and
- `destination_sync_mode: 'overwrite'` are not part of this protocol. They need
- separate semantics because "done for this run" and "historical coverage" mean
- different things for a full reread.
+ `destination_sync_mode: 'overwrite'` need separate semantics because "done for
+ this request sequence" and "historical coverage" mean different things for a
+ full reread.
- **No `range_complete`-driven terminality.** `range_complete` remains optional
progress telemetry only. It does not drive `has_more`.
-- **No cross-request range subdivision in the protocol.** The protocol does not
- assume that a partially paginated time range can be split into smaller ranges
- between requests.
+- **No cross-request range subdivision in the generic protocol.** The generic
+ lifecycle does not assume that a partially paginated time range can be split
+ into smaller ranges between requests.
+- **No `start` / `end` envelope migration in this phase.** Existing request
+ entrypoints stay as-is. The terminal message remains `eof`.
## Motivation
-The base protocol treats each `read()` call as independent. The caller manages
+The base protocol treats each request as independent. The caller manages
pagination, upper bounds, and continuation externally. That creates three
problems:
-1. **Backfill bounds shift between calls.** A stream that derives its own upper
- bound from `now()` can chase a moving target forever.
-2. **No run identity.** Multiple requests that belong to one logical backfill
- have no shared context.
-3. **Completion is ambiguous.** If the engine inspects source-specific state to
+1. **Backfill bounds shift between requests.** A stream that derives its own
+ upper bound from `now()` can chase a moving target forever.
+2. **Completion is ambiguous.** If the engine inspects source-specific state to
guess whether a stream is done, protocol behavior depends on connector
internals instead of explicit source signals.
+3. **`eof.reason` is not enough on its own.** It explains why the request
+ stopped, but it does not tell the caller whether it should continue.
-This design introduces **sync runs** as a first-class concept. The engine owns
-run identity and optional outer time bounds. The source owns pagination and
-emits explicit lifecycle signals.
+This phase keeps the request shape stable and adds `has_more` to `eof` so
+callers can continue without interpreting opaque source state.
---
## Layers
```
-CLIENT ←—start/end—→ ENGINE ←—iterator—→ SOURCE
+CLIENT ←—existing sync API + eof—→ ENGINE ←—iterator—→ SOURCE
```
-| Concern | Client | Engine | Source |
-| ------------------- | ------------------------------------- | --------------------------------------------------------- | ------------------------------------------------------ |
-| What to sync | Provides catalog | Passes catalog through, may inject `time_range` | Syncs what it's given |
-| When to sync | Decides | — | — |
-| Run identity | Generates `sync_run_id` | Tracks run continuity | Unaware |
-| Time range bounds | — | Freezes `started_at`, injects `time_range` when supported | Respects `time_range` if present |
-| Internal pagination | — | — | Manages `starting_after` / equivalent |
-| Stream lifecycle | Consumes | Tracks terminal streams | Emits `started`, optional `range_complete`, `complete` |
-| Progress reporting | Consumes | Emits run-level snapshots | Emits records, checkpoints, traces |
-| Error reporting | Decides retry policy above the engine | Passes through, stops on `global` | Classifies and emits trace errors |
-| State | Opaque round-trip | Manages engine section | Manages source section |
-| `has_more` | Reads, acts | Derives from explicit terminal stream state | — |
+| Concern | Client | Engine | Source |
+| ------------------- | --------------------------------- | ---------------------------------------------------- | ------------------------------------------------------ |
+| What to sync | Provides catalog | Passes catalog through, may inject `time_range` | Syncs what it's given |
+| When to sync | Decides | — | — |
+| Time range bounds | — | May inject `time_range`, may preserve bound metadata | Respects `time_range` if present |
+| Internal pagination | — | — | Manages `starting_after` / equivalent |
+| Stream lifecycle | Consumes | Tracks terminal streams | Emits `started`, optional `range_complete`, `complete` |
+| Progress reporting | Consumes | Emits request-level snapshots and terminal state | Emits records, checkpoints, traces |
+| Error reporting | Decides retry policy above engine | Passes through / aggregates | Classifies and emits trace errors |
+| State | Opaque round-trip | Manages engine section | Manages source section |
+| `has_more` | Reads, acts | Derives from explicit terminal stream state | — |
+| Terminal message | Receives `eof` | Emits `eof` with `reason` + `has_more` | — |
---
@@ -76,95 +84,58 @@ The engine trusts only explicit stream status messages for lifecycle:
- `started` means the stream is active for this request.
- `range_complete` is progress telemetry only.
- `complete` is the only terminal signal.
+- There is no `running` status.
The engine does **not** inspect source state to infer completion. Source state is
opaque cursor data.
---
-## Messages
-
-### `start` — client → engine
+## Request + Terminal Message
-Begins or continues a sync run. See [Types](#types) for `StartPayload`.
+### Client → engine
-### `end` — engine → client
+The client/engine request envelope stays unchanged in this phase. A later plan
+can introduce an explicit `start` message and any run-identity cleanup after
+the `eof.has_more` flow is stable.
-The request is done. See [Types](#types) for `EndPayload`.
+### `eof` — engine → client
-`has_more: true` means at least one configured stream has not emitted
-`stream_status: complete` for this run yet. Continue by sending another `start`
-with the same `sync_run_id` and the previous `ending_state` as the next
-`starting_state`.
+Every request ends with `eof`. This phase keeps `eof.reason` and adds
+`has_more`.
-`has_more: false` means every configured stream is terminal for this run. The
-next sync should use a new `sync_run_id`.
-
-### Source → engine
+`has_more: true` means at least one configured stream has not yet emitted
+`stream_status: complete` for this request sequence. Continue by calling the
+same sync entrypoint again with the returned `eof.state`.
-Sources are iterators that yield these message types:
-
-```ts
-// Data record
-{ type: 'record', record: { stream: string, data: Record, emitted_at: string } }
-
-// Checkpoint. Data is opaque to the engine.
-{ type: 'source_state', source_state: { state_type: 'stream', stream: string, data: unknown } }
-
-// Global checkpoint for source-wide state.
-{ type: 'source_state', source_state: { state_type: 'global', data: unknown } }
-
-// Stream status
-{ type: 'trace', trace: { trace_type: 'stream_status', stream_status: StreamStatus } }
-
-// Error
-{ type: 'trace', trace: { trace_type: 'error', error: SyncError & { stack_trace?: string } } }
-
-// Diagnostic log
-{ type: 'log', log: { level: 'debug' | 'info' | 'warn' | 'error', message: string } }
-```
-
-### Engine → client
-
-The engine emits four message types: `progress`, `record`, `log`, and `end`.
+`has_more: false` means every configured stream is terminal. The next sync
+should begin from a fresh caller-controlled starting point.
```ts
{
- type: 'progress',
- progress: {
- elapsed_ms: number,
- global_state_count: number,
- derived: {
- records_per_second: number,
- states_per_second: number,
- },
- streams: Record,
- errors: SyncError[]
- }
+ type: 'eof',
+ eof: {
+ reason: 'time_limit',
+ has_more: true,
+ state: SyncState,
+ },
}
+```
-{ type: 'record', record: { stream: string, data: Record, emitted_at: string } }
-
-{ type: 'log', log: { level: 'info' | 'warn' | 'error', message: string } }
+`eof` may also carry the terminal progress snapshot for the request. The exact
+wire shape continues to live in `protocol.ts`.
-{
- type: 'end',
- end: {
- has_more: boolean,
- ending_state: SyncState,
- request_progress: ProgressPayload,
- }
-}
-```
+### Source → engine
-The engine does not pass trace messages through to the client. It folds them
-into `progress` and `log`.
+Sources remain iterators that emit `record`, `source_state`, `trace`, and `log`
+messages. This phase does not introduce new top-level `progress` or `end`
+message kinds.
---
## Stream Status
-`stream_status` is a discriminated union on `status`:
+`stream_status` remains a discriminated union on `status`:
```ts
type StreamStatus =
@@ -179,8 +150,8 @@ type StreamStatus =
| `range_complete` | A time range finished | Update progress only |
| `complete` | Stream is terminal for this run | Mark stream terminal |
-`range_complete` is optional and only meaningful for streams that support
-engine-assigned `time_range`. It is not used to derive `has_more`.
+`range_complete` is only meaningful for streams that support engine-assigned
+`time_range`. It is not used to derive `has_more`.
A source that decides to stop a stream after a stream-level error should still
emit `complete` for that stream. That keeps lifecycle semantics explicit:
@@ -188,164 +159,17 @@ errors explain _why_ the stream stopped; `complete` says it is terminal.
---
-## Types
-
-### Configured catalog (client → engine → source)
-
-The client provides the catalog. The engine may inject `time_range` into
-streams that support it.
-
-```ts
-type ConfiguredStream = {
- name: string
- primary_key: string[][]
- json_schema?: Record
- sync_mode: 'incremental'
- destination_sync_mode: 'append' | 'append_dedup'
- cursor_field?: string[]
- backfill_limit?: number
-
- // Source capability from discover/spec.
- supports_time_range?: boolean
-
- // Set by engine only when supports_time_range is true.
- time_range?: {
- gte?: string
- lt: string
- }
-}
-
-type ConfiguredCatalog = {
- streams: ConfiguredStream[]
-}
-```
+## State and Continuation
-### Start message (client → engine)
+`SyncState` is still round-tripped opaquely by the caller:
-```ts
-type StartPayload = {
- sync_run_id: string
- source_config: Record
- destination_config: Record
- configured_catalog: ConfiguredCatalog
- starting_state?: SyncState
-}
-```
+- The **source section** stores cursor data the engine does not interpret.
+- The **engine section** may store terminal streams, `completed_ranges`,
+ progress snapshots, and any bound metadata needed to continue safely.
-### End message (engine → client)
-
-```ts
-type EndPayload = {
- has_more: boolean
- ending_state: SyncState
- request_progress: ProgressPayload
-}
-```
-
-### Progress message (engine → client)
-
-```ts
-type SyncError =
- | { error_level: 'global'; message: string }
- | { error_level: 'stream'; message: string; stream: string }
- | { error_level: 'transient'; message: string; stream?: string }
-
-type StreamProgress = {
- state_count: number
- record_count: number
- completed_ranges?: Array<{ gte: string; lt: string }>
- terminal: boolean
-}
-
-type ProgressPayload = {
- elapsed_ms: number
- global_state_count: number
- derived: {
- records_per_second: number
- states_per_second: number
- }
- streams: Record
- errors: SyncError[]
-}
-```
-
-`completed_ranges` is progress data only. It does not determine completion.
-
-### SyncState (round-tripped between start and end)
-
-```ts
-type SyncState = {
- source: SourceState
- engine: EngineState
-}
-
-type SourceState = {
- streams: Record
- global: Record
-}
-
-type EngineState = {
- sync_run_id: string
- started_at: string
- terminal_streams: string[]
- run_progress: ProgressPayload
-}
-```
-
-### Source state — Stripe example
-
-Source state is opaque to the engine. For Stripe list endpoints, the source can
-store the last emitted object ID as `starting_after`:
-
-```ts
-type StripeStreamState = {
- starting_after: string | null
-}
-```
-
-For time-range streams, the assigned `time_range` lives in the catalog, not in
-source state.
-
----
-
-## Sync Runs
-
-A sync run is identified by `sync_run_id`. Within a run, `started_at` is frozen.
-
-### New run
-
-1. Client sends `start` with a new `sync_run_id`.
-2. Engine freezes `started_at = now()` and stores it in engine state.
-3. For each configured stream where `supports_time_range` is true, the engine
- injects `time_range.lt = started_at`.
-4. Source runs, emits records, checkpoints, and explicit stream statuses.
-5. Engine emits progress, forwards records to the destination, and returns
- `end`.
-
-### Continuation
-
-1. Client sends `start` with the same `sync_run_id` and previous `ending_state`.
-2. Engine preserves `started_at` from engine state.
-3. The engine re-injects the same `time_range` into streams that support it.
-4. Source resumes from its opaque cursor state.
-
-### Completion
-
-When `has_more: false`:
-
-- Every configured stream is present in `engine.terminal_streams`.
-- The client should start the next sync with a new `sync_run_id`.
-
-### Example
-
-```
-sync_run_id: "sr_1"
- request 1: customers [2018, 2024) → timed out → end { has_more: true }
- request 2: customers [2018, 2024) → complete → end { has_more: false }
-```
-
-The range is stable across requests. The source resumes within that range using
-its own cursor state.
+Callers continue by round-tripping `eof.state`. This phase does **not** require
+an explicit `sync_run_id`; if we add one later, that belongs to the follow-up
+`start` / `end` envelope plan.
---
@@ -355,8 +179,8 @@ Time range support is optional per stream.
### Streams with `supports_time_range: true`
-- The engine injects `time_range`.
-- `time_range.lt` is frozen to `started_at` for the duration of the run.
+- The engine may inject `time_range`.
+- The engine may preserve any needed upper-bound metadata in engine-owned state.
- The source resumes within that range using opaque source state.
- The source may emit `range_complete` for progress reporting.
@@ -368,7 +192,7 @@ Time range support is optional per stream.
### Why this matters
-- Frozen upper bounds prevent moving-target backfills for eligible streams.
+- Stable upper bounds prevent moving-target backfills for eligible streams.
- Streams without time filtering still fit the same continuation contract.
- The engine never needs to understand source-specific pagination tokens.
@@ -384,91 +208,25 @@ has_more = configured_catalog.streams.some(
)
```
-`completed_ranges` and source-state shape do not participate in this decision.
+`completed_ranges`, source-state shape, and transient errors do not participate
+in this decision.
---
-## Error Handling
+## Errors
-### Error levels
+Lifecycle and errors remain orthogonal:
-| `error_level` | Blast radius | Engine action | Example |
-| ------------- | ------------------- | ------------------------------------ | --------------------- |
-| `global` | Entire sync | Abort all streams, `has_more: false` | Invalid API key |
-| `stream` | One stream | Keep processing other streams | Resource unavailable |
-| `transient` | One request or page | Informational | Rate limited, retried |
-
-### Source → engine error flow
-
-```ts
-{ type: 'trace', trace: { trace_type: 'error', error: SyncError } }
-```
-
-### Engine behavior
-
-- `global`: stop immediately and emit `end { has_more: false }`
-- `stream`: record the error and continue with other streams
-- `transient`: record the error only
-
-Errors are not stored in source state. They are separate from lifecycle.
-
----
-
-## Engine Logs
-
-The engine emits `log` messages for anomalies and failures only.
-
-### warn
-
-| Message | When |
-| -------------------------------- | ------------------------------------------------------------- |
-| `state before started: {stream}` | Source emitted `source_state` before `stream_status: started` |
-| `state after complete: {stream}` | Source emitted `source_state` after `stream_status: complete` |
-| `duplicate started: {stream}` | Source emitted `stream_status: started` twice |
-| `unknown stream: {stream}` | Source emitted a message for a stream not in the catalog |
-
-### error
-
-| Message | When |
-| ----------------------------------- | ------------------------------------ |
-| `global error: {message}` | Source emitted `error_level: global` |
-| `stream error: {stream}: {message}` | Source emitted `error_level: stream` |
-| `source crashed: {message}` | Source iterator threw |
-
----
-
-## Wire Format
-
-NDJSON. One message per line.
-
-```json
-{"type":"start","sync_run_id":"sr_abc","source_config":{},"configured_catalog":{"streams":[{"name":"customers","sync_mode":"incremental","supports_time_range":true}]}}
-{"type":"progress","progress":{"elapsed_ms":100,"global_state_count":0,"derived":{"records_per_second":0,"states_per_second":0},"streams":{"customers":{"state_count":0,"record_count":0,"completed_ranges":[],"terminal":false}},"errors":[]}}
-{"type":"record","record":{"stream":"customers","data":{"id":"cus_123"}}}
-{"type":"progress","progress":{"elapsed_ms":1600,"global_state_count":1,"derived":{"records_per_second":1562,"states_per_second":0.6},"streams":{"customers":{"state_count":1,"record_count":2500,"completed_ranges":[],"terminal":false}},"errors":[]}}
-{"type":"progress","progress":{"elapsed_ms":3200,"global_state_count":2,"derived":{"records_per_second":1562,"states_per_second":0.6},"streams":{"customers":{"state_count":2,"record_count":5000,"completed_ranges":[{"gte":"2018-01-01T00:00:00Z","lt":"2024-04-17T00:00:00Z"}],"terminal":true}},"errors":[]}}
-{"type":"end","end":{"has_more":false,"ending_state":{"source":{"streams":{"customers":{"starting_after":null}},"global":{}},"engine":{"sync_run_id":"sr_abc","started_at":"2024-04-17T00:00:00Z","terminal_streams":["customers"],"run_progress":{"elapsed_ms":3200,"global_state_count":2,"derived":{"records_per_second":1562,"states_per_second":0.6},"streams":{"customers":{"state_count":2,"record_count":5000,"completed_ranges":[{"gte":"2018-01-01T00:00:00Z","lt":"2024-04-17T00:00:00Z"}],"terminal":true}},"errors":[]}}},"request_progress":{"elapsed_ms":3200,"global_state_count":2,"derived":{"records_per_second":1562,"states_per_second":0.6},"streams":{"customers":{"state_count":2,"record_count":5000,"completed_ranges":[{"gte":"2018-01-01T00:00:00Z","lt":"2024-04-17T00:00:00Z"}],"terminal":true}},"errors":[]}}}
-```
+- `eof.reason` explains why the request stopped (`complete`, `state_limit`,
+ `time_limit`, `error`, `aborted`).
+- `has_more` explains whether the caller should continue.
+- Stream-level errors do not replace explicit `complete`.
+- `range_complete` never implies terminality.
---
-## Client Loop
-
-```ts
-let state = undefined
-const syncRunId = crypto.randomUUID()
-
-do {
- const { end } = await engine.sync({
- sync_run_id: syncRunId,
- source_config,
- destination_config,
- configured_catalog,
- starting_state: state,
- })
- state = end.ending_state
-} while (end.has_more)
-```
+## Future Follow-Up
-The client does not need to interpret source state. It only needs to round-trip
-`ending_state` and continue until `has_more` is false.
+Explicit `start` input, explicit `end` output, `sync_run_id`, and any eventual
+rename of `eof` are intentionally split into a later plan:
+[`docs/plans/2026-04-17-start-end-envelope-migration.md`](../plans/2026-04-17-start-end-envelope-migration.md).
diff --git a/docs/plans/2026-04-17-start-end-envelope-migration.md b/docs/plans/2026-04-17-start-end-envelope-migration.md
new file mode 100644
index 00000000..30e6562d
--- /dev/null
+++ b/docs/plans/2026-04-17-start-end-envelope-migration.md
@@ -0,0 +1,109 @@
+# Start/End Envelope Migration Implementation Plan
+
+> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task.
+
+**Goal:** Introduce explicit client/engine `start` and `end` messages as a
+follow-up phase without blocking the current `eof`-based lifecycle rollout.
+
+**Architecture:** Phase 1 keeps the existing sync entrypoints and terminal
+`eof` message, preserving `eof.reason` and adding `has_more`. Phase 2 layers an
+explicit `start` request and `end` terminal message on top of the same
+lifecycle semantics and engine state, then migrates callers incrementally
+before considering any rename or deprecation of `eof`.
+
+**Tech Stack:** TypeScript, Zod, `packages/protocol`, `apps/engine`,
+`apps/service`, docs, tests.
+
+---
+
+### Task 1: Freeze The Current Phase Boundary
+
+**Files:**
+
+- Modify: `docs/engine/sync-lifecycle.md`
+- Modify: `docs/engine/sync-lifecycle-source-stripe.md`
+- Create: `docs/plans/2026-04-17-start-end-envelope-migration.md`
+
+**Step 1: Document phase 1**
+
+State that phase 1 keeps the existing sync request entrypoints and terminal
+`eof`.
+
+**Step 2: Preserve terminal semantics**
+
+Document that `eof.reason` remains authoritative for why a request stopped, and
+`has_more` is the continuation signal.
+
+**Step 3: Defer envelope cleanup**
+
+Move `start`, `end`, and `sync_run_id` out of the phase-1 lifecycle doc so the
+current plan stays focused on source/engine lifecycle semantics.
+
+### Task 2: Add Explicit Protocol Aliases
+
+**Files:**
+
+- Modify: `packages/protocol/src/protocol.ts`
+- Modify: `apps/engine/src/lib/pipeline.ts`
+- Modify: `apps/engine/src/lib/backfill.ts`
+- Test: `apps/engine/src/lib/progress.test.ts`
+- Test: `apps/engine/src/lib/engine.test.ts`
+
+**Step 1: Add `StartPayload`**
+
+Define a client → engine `start` payload that aliases the existing request
+shape instead of replacing it.
+
+**Step 2: Add `EndPayload`**
+
+Define an engine → client `end` payload as an alias of terminal `eof`,
+preserving `reason`, `has_more`, and continuation state.
+
+**Step 3: Keep dual compatibility**
+
+Ensure the engine can emit and consume the explicit envelope without removing
+legacy `eof`.
+
+### Task 3: Migrate Call Sites
+
+**Files:**
+
+- Modify: `apps/service/src/api/app.ts`
+- Modify: `apps/engine/src/api/app.ts`
+- Modify: client call sites that consume sync output
+- Test: relevant API and integration tests
+
+**Step 1: Update internal callers**
+
+Teach internal callers to understand the explicit `start` / `end` envelope or
+the legacy `eof` alias.
+
+**Step 2: Preserve compatibility at boundaries**
+
+Do not break existing clients while the migration is in progress.
+
+**Step 3: Add integration coverage**
+
+Add tests that prove both forms behave identically for continuation and
+terminality.
+
+### Task 4: Decide The Long-Term `eof` Story
+
+**Files:**
+
+- Modify: `docs/engine/sync-lifecycle.md`
+- Modify: `packages/protocol/src/protocol.ts`
+- Modify: migration / compatibility docs as needed
+
+**Step 1: Choose the permanent contract**
+
+Decide whether `eof` remains a permanent alias of `end` or becomes legacy-only.
+
+**Step 2: Measure adoption**
+
+If deprecating `eof`, add enough logging or metrics to confirm no callers still
+depend on the old name.
+
+**Step 3: Remove only after migration**
+
+Do not remove `eof` until every supported caller has switched.
diff --git a/packages/protocol/src/helpers.ts b/packages/protocol/src/helpers.ts
index 6fad2240..1ccaa8dc 100644
--- a/packages/protocol/src/helpers.ts
+++ b/packages/protocol/src/helpers.ts
@@ -108,13 +108,16 @@ export function isTraceStreamStatus(
return msg.type === 'trace' && msg.trace.trace_type === 'stream_status'
}
-/** Type guard for trace progress messages. */
-export function isTraceProgress(
+/** Type guard for trace global_progress messages. */
+export function isTraceGlobalProgress(
msg: Message
-): msg is TraceMessage & { trace: { trace_type: 'progress' } } {
- return msg.type === 'trace' && msg.trace.trace_type === 'progress'
+): msg is TraceMessage & { trace: { trace_type: 'global_progress' } } {
+ return msg.type === 'trace' && msg.trace.trace_type === 'global_progress'
}
+/** @deprecated Use isTraceGlobalProgress. */
+export const isTraceProgress = isTraceGlobalProgress
+
export function emptySectionState(): SectionState {
return { streams: {}, global: {} }
}
diff --git a/packages/protocol/src/index.ts b/packages/protocol/src/index.ts
index 87f84a3d..f82718f9 100644
--- a/packages/protocol/src/index.ts
+++ b/packages/protocol/src/index.ts
@@ -23,6 +23,7 @@ export {
isDataMessage,
isTraceError,
isTraceStreamStatus,
+ isTraceGlobalProgress,
isTraceProgress,
// State constructors
coerceSyncState,
diff --git a/packages/protocol/src/protocol.ts b/packages/protocol/src/protocol.ts
index 646dd364..efb96722 100644
--- a/packages/protocol/src/protocol.ts
+++ b/packages/protocol/src/protocol.ts
@@ -271,8 +271,11 @@ export const TraceStreamStatus = z
.object({
stream: z.string().describe('Stream being reported on.'),
status: z
- .enum(['start', 'running', 'complete', 'range_complete'])
- .describe('Current phase of the stream within this sync run.'),
+ .enum(['started', 'complete'])
+ .describe(
+ 'Lifecycle status. Errors are orthogonal — a stream can be complete with errors. ' +
+ 'Sources may store richer error statuses internally for retry logic.'
+ ),
range_complete: z
.object({
gte: z.string().describe('Inclusive lower bound (ISO 8601).'),
@@ -294,32 +297,10 @@ export const TraceStreamStatus = z
.int()
.optional()
.describe('Records synced for this stream in the current sync run. Set by the engine.'),
- window_record_count: z
- .number()
- .int()
- .optional()
- .describe(
- 'Records synced since the last stream_status emission for this stream. ' +
- 'Set by the engine. Used for instantaneous per-stream throughput.'
- ),
- records_per_second: z
- .number()
- .optional()
- .describe(
- 'Average records per second for this stream over the entire run: ' +
- 'run_record_count / elapsed seconds. Set by the engine.'
- ),
- requests_per_second: z
- .number()
- .optional()
- .describe(
- 'Average API requests per second for this stream over the entire run. ' +
- 'Set by the engine from source-reported request counts.'
- ),
})
.describe(
'Per-stream status update. Sources emit the minimal form (stream + status). ' +
- 'The engine emits enriched versions with record counts and throughput rates.'
+ 'The engine enriches with record counts. Only emitted on status transitions.'
)
export type TraceStreamStatus = z.infer
@@ -332,17 +313,22 @@ export const TraceEstimate = z
.describe('Sync progress estimate for a stream.')
export type TraceEstimate = z.infer
-export const TraceProgress = z
+export const TraceGlobalProgress = z
.object({
elapsed_ms: z.number().int().describe('Wall-clock milliseconds since the sync run started.'),
run_record_count: z
.number()
.int()
.describe('Total records synced across all streams in this run.'),
- rows_per_second: z
+ cumulative_record_count: z
+ .number()
+ .int()
+ .optional()
+ .describe('Total records synced across all streams across all runs.'),
+ records_per_second: z
.number()
.describe('Overall throughput for the entire run: run_record_count / elapsed seconds.'),
- window_rows_per_second: z
+ window_records_per_second: z
.number()
.describe(
'Instantaneous throughput: total records in last window / window duration. ' +
@@ -352,13 +338,33 @@ export const TraceProgress = z
.number()
.int()
.describe('Total source_state messages observed so far in this sync run.'),
+ request_count: z
+ .number()
+ .int()
+ .optional()
+ .describe('Total API requests made by the source in this run.'),
+ cumulative_request_count: z
+ .number()
+ .int()
+ .optional()
+ .describe('Total API requests across all runs.'),
+ cumulative_elapsed_ms: z
+ .number()
+ .int()
+ .optional()
+ .describe('Total wall-clock time across all runs.'),
})
.describe(
- 'Periodic global sync progress emitted by the engine. ' +
+ 'Global sync progress emitted by the engine, co-emitted with every stream_status trace. ' +
'Aggregate stats only — per-stream detail is in stream_status messages. ' +
'Each emission is a full replacement.'
)
-export type TraceProgress = z.infer
+export type TraceGlobalProgress = z.infer
+
+/** @deprecated Use TraceGlobalProgress. */
+export const TraceProgress = TraceGlobalProgress
+/** @deprecated Use TraceGlobalProgress. */
+export type TraceProgress = TraceGlobalProgress
export const TracePayload = z
.discriminatedUnion('trace_type', [
@@ -375,35 +381,27 @@ export const TracePayload = z
estimate: TraceEstimate,
}),
z.object({
- trace_type: z.literal('progress'),
- progress: TraceProgress,
+ trace_type: z.literal('global_progress'),
+ global_progress: TraceGlobalProgress,
}),
])
.describe(
- 'Diagnostic/status payload with subtypes for error, stream status, estimates, and progress.'
+ 'Diagnostic/status payload with subtypes for error, stream status, estimates, and global progress.'
)
export type TracePayload = z.infer
-// MARK: - EOF payload (depends on TraceProgress)
+// MARK: - EOF payload (depends on TraceGlobalProgress)
export const EofStreamProgress = z
.object({
status: z
- .enum(['start', 'running', 'complete', 'range_complete'])
- .describe('Final stream status.'),
+ .enum(['started', 'complete'])
+ .describe('Lifecycle status. Errors are orthogonal — a stream can be complete with errors.'),
cumulative_record_count: z
.number()
.int()
.describe('Cumulative records synced for this stream across all runs.'),
run_record_count: z.number().int().describe('Records synced in this run.'),
- records_per_second: z
- .number()
- .optional()
- .describe('Average records/sec for this stream over the run.'),
- requests_per_second: z
- .number()
- .optional()
- .describe('Average requests/sec for this stream over the run.'),
errors: z
.array(
z.object({
@@ -442,8 +440,8 @@ export const EofPayload = z
'engine: updated cumulative record counts; destination: reserved. ' +
'Consumers can persist this directly and pass it back on resume.'
),
- global_progress: TraceProgress.optional().describe(
- 'Final global aggregates. Same shape as trace/progress.'
+ global_progress: TraceGlobalProgress.optional().describe(
+ 'Final global aggregates. Same shape as trace/global_progress.'
),
stream_progress: z
.record(z.string(), EofStreamProgress)
@@ -579,6 +577,7 @@ export type DestinationOutput = z.infer
export const SyncOutput = z
.discriminatedUnion('type', [
SourceStateMessage,
+ CatalogMessage,
TraceMessage,
LogMessage,
EofMessage,
diff --git a/packages/source-stripe/src/client.ts b/packages/source-stripe/src/client.ts
index c1f99e54..81464fba 100644
--- a/packages/source-stripe/src/client.ts
+++ b/packages/source-stripe/src/client.ts
@@ -98,10 +98,7 @@ export function makeClient(
params?: Record
): Promise {
if (method === 'GET') {
- return withHttpRetry(() => request(method, path, params), {
- label: `${method} ${path}`,
- signal: pipelineSignal,
- })
+ return withHttpRetry(() => request(method, path, params), { signal: pipelineSignal })
}
return request(method, path, params)
}
diff --git a/packages/source-stripe/src/index.test.ts b/packages/source-stripe/src/index.test.ts
index c050d175..a555e802 100644
--- a/packages/source-stripe/src/index.test.ts
+++ b/packages/source-stripe/src/index.test.ts
@@ -247,7 +247,7 @@ describe('StripeSource', () => {
m.type === 'trace' &&
m.trace.trace_type === 'stream_status' &&
(m.trace as { stream_status: { status: string; stream?: string } }).stream_status
- .status === 'start' &&
+ .status === 'started' &&
(m.trace as { stream_status: { stream?: string } }).stream_status.stream === 'customers'
)
).toBe(true)
@@ -266,8 +266,8 @@ describe('StripeSource', () => {
(m) =>
m.type === 'trace' &&
m.trace.trace_type === 'stream_status' &&
- (m.trace as { stream_status: { status: string; stream?: string } }).stream_status
- .status === 'range_complete' &&
+ (m.trace as { stream_status: { range_complete?: unknown; stream?: string } })
+ .stream_status.range_complete != null &&
(m.trace as { stream_status: { stream?: string } }).stream_status.stream === 'customers'
)
).toBe(true)
@@ -339,8 +339,8 @@ describe('StripeSource', () => {
})
)
- // Streams run in parallel — order is not fixed; each stream emits start, records,
- // checkpoints, range_complete, final state, and complete (counts vary with ranges).
+ // Streams run in parallel — order is not fixed; each stream emits started, records,
+ // checkpoints, segment range_complete, final state, and complete (counts vary with ranges).
const custRecords = messages.filter(
(m): m is RecordMessage => m.type === 'record' && m.record.stream === 'customers'
)
@@ -354,7 +354,10 @@ describe('StripeSource', () => {
(m) =>
m.type === 'trace' &&
m.trace.trace_type === 'stream_status' &&
- (m.trace as { stream_status: { status: string } }).stream_status.status === 'start'
+ (m.trace as { stream_status: { status: string; range_complete?: unknown } }).stream_status
+ .status === 'started' &&
+ (m.trace as { stream_status: { range_complete?: unknown } }).stream_status
+ .range_complete == null
)
expect(starts).toHaveLength(2)
@@ -438,7 +441,7 @@ describe('StripeSource', () => {
m.trace.trace_type === 'stream_status' &&
(m.trace as { stream_status: { status: string; stream?: string } }).stream_status
.stream === 'customers' &&
- (m.trace as { stream_status: { status: string } }).stream_status.status === 'start'
+ (m.trace as { stream_status: { status: string } }).stream_status.status === 'started'
)
).toBe(true)
expect(
@@ -455,10 +458,10 @@ describe('StripeSource', () => {
(m) =>
m.type === 'trace' &&
m.trace.trace_type === 'stream_status' &&
- (m.trace as { stream_status: { status: string; stream?: string } }).stream_status
- .stream === 'customers' &&
- (m.trace as { stream_status: { status: string } }).stream_status.status ===
- 'range_complete'
+ (m.trace as { stream_status: { range_complete?: unknown; stream?: string } })
+ .stream_status.stream === 'customers' &&
+ (m.trace as { stream_status: { range_complete?: unknown } }).stream_status
+ .range_complete != null
)
).toBe(true)
expect(
@@ -614,12 +617,13 @@ describe('StripeSource', () => {
source.read({ config, catalog: catalog({ name: 'customers', primary_key: [['id']] }) })
)
- expect(messages).toHaveLength(2)
+ // started + error + complete
+ expect(messages).toHaveLength(3)
expect(messages[0]).toMatchObject({
type: 'trace',
trace: {
trace_type: 'stream_status',
- stream_status: { stream: 'customers', status: 'start' },
+ stream_status: { stream: 'customers', status: 'started' },
},
})
@@ -636,6 +640,13 @@ describe('StripeSource', () => {
expect(traceError.message).toContain('Rate limit')
expect(traceError.stream).toBe('customers')
expect(traceError.stack_trace).toBeDefined()
+ expect(messages[2]).toMatchObject({
+ type: 'trace',
+ trace: {
+ trace_type: 'stream_status',
+ stream_status: { stream: 'customers', status: 'complete' },
+ },
+ })
})
it('emits TraceMessage error with failure_type config_error for unknown stream', async () => {
@@ -679,7 +690,7 @@ describe('StripeSource', () => {
source.read({ config, catalog: catalog({ name: 'customers', primary_key: [['id']] }) })
)
- expect(messages).toHaveLength(2)
+ expect(messages).toHaveLength(3)
const errorMsg = messages[1] as TraceMessage
expect(errorMsg.type).toBe('trace')
expect(errorMsg.trace.trace_type).toBe('error')
@@ -688,6 +699,13 @@ describe('StripeSource', () => {
).error
expect(traceError.failure_type).toBe('system_error')
expect(traceError.message).toContain('Connection refused')
+ expect(messages[2]).toMatchObject({
+ type: 'trace',
+ trace: {
+ trace_type: 'stream_status',
+ stream_status: { stream: 'customers', status: 'complete' },
+ },
+ })
})
it('proceeds with backfill using fallback timestamp when getAccount fails (fault-tolerant)', async () => {
@@ -773,6 +791,7 @@ describe('StripeSource', () => {
source.read({ config, catalog: catalog({ name: 'tax_ids', primary_key: [['id']] }) })
)
+ // started + error + log (global auth errors do not emit stream complete)
expect(messages).toHaveLength(3)
const errorMsg = messages[1] as TraceMessage
expect(errorMsg.trace.trace_type).toBe('error')
@@ -810,7 +829,7 @@ describe('StripeSource', () => {
source.read({ config, catalog: catalog({ name: 'customers', primary_key: [['id']] }) })
)
- expect(messages).toHaveLength(2)
+ expect(messages).toHaveLength(3)
expect(messages[1]).toMatchObject({
type: 'trace',
trace: {
@@ -821,6 +840,13 @@ describe('StripeSource', () => {
},
},
})
+ expect(messages[2]).toMatchObject({
+ type: 'trace',
+ trace: {
+ trace_type: 'stream_status',
+ stream_status: { stream: 'customers', status: 'complete' },
+ },
+ })
})
it('marks known skippable Stripe list errors as complete without emitting error traces', async () => {
@@ -846,7 +872,7 @@ describe('StripeSource', () => {
type: 'trace',
trace: {
trace_type: 'stream_status',
- stream_status: { stream: 'invoices', status: 'start' },
+ stream_status: { stream: 'invoices', status: 'started' },
},
})
expect(messages[1]).toMatchObject({
@@ -858,6 +884,52 @@ describe('StripeSource', () => {
})
})
+ it('treats Unrecognized request URL as skippable (feature not available)', async () => {
+ const listFn = vi
+ .fn()
+ .mockRejectedValueOnce(
+ new Error(
+ 'Unrecognized request URL (GET: /v1/treasury/financial_accounts). Please see https://stripe.com/docs'
+ )
+ )
+
+ const registry: Record = {
+ treasury_financial_accounts: makeConfig({
+ order: 1,
+ tableName: 'treasury_financial_accounts',
+ listFn: listFn as ResourceConfig['listFn'],
+ }),
+ }
+
+ vi.mocked(buildResourceRegistry).mockReturnValue(registry as any)
+ const messages = await collect(
+ source.read({
+ config,
+ catalog: catalog({
+ name: 'treasury_financial_accounts',
+ primary_key: [['id']],
+ }),
+ })
+ )
+
+ // Skippable: started + complete, no error trace
+ expect(messages).toHaveLength(2)
+ expect(messages[0]).toMatchObject({
+ type: 'trace',
+ trace: {
+ trace_type: 'stream_status',
+ stream_status: { stream: 'treasury_financial_accounts', status: 'started' },
+ },
+ })
+ expect(messages[1]).toMatchObject({
+ type: 'trace',
+ trace: {
+ trace_type: 'stream_status',
+ stream_status: { stream: 'treasury_financial_accounts', status: 'complete' },
+ },
+ })
+ })
+
it('continues to next stream after error on previous stream', async () => {
const failingListFn = vi.fn().mockRejectedValueOnce(new Error('Connection refused'))
const successListFn = vi.fn().mockResolvedValueOnce({
@@ -914,7 +986,7 @@ describe('StripeSource', () => {
.stream === 'customers' &&
(m.trace as { stream_status: { status: string } }).stream_status.status === 'complete'
)
- expect(custComplete).toHaveLength(0)
+ expect(custComplete).toHaveLength(1)
const invComplete = messages.filter(
(m) =>
@@ -971,7 +1043,7 @@ describe('StripeSource', () => {
(m) =>
m.type === 'trace' &&
m.trace.trace_type === 'stream_status' &&
- (m.trace as { stream_status: { status: string } }).stream_status.status === 'start'
+ (m.trace as { stream_status: { status: string } }).stream_status.status === 'started'
)
).toBe(true)
})
@@ -1003,7 +1075,7 @@ describe('StripeSource', () => {
(m) =>
m.type === 'trace' &&
m.trace.trace_type === 'stream_status' &&
- (m.trace as { stream_status: { status: string } }).stream_status.status === 'start'
+ (m.trace as { stream_status: { status: string } }).stream_status.status === 'started'
)
).toBe(true)
})
@@ -1035,7 +1107,7 @@ describe('StripeSource', () => {
(m) =>
m.type === 'trace' &&
m.trace.trace_type === 'stream_status' &&
- (m.trace as { stream_status: { status: string } }).stream_status.status === 'start'
+ (m.trace as { stream_status: { status: string } }).stream_status.status === 'started'
)
).toBe(true)
})
@@ -1153,7 +1225,7 @@ describe('StripeSource', () => {
m.trace.trace_type === 'stream_status' &&
(m.trace as { stream_status: { status: string; stream?: string } }).stream_status
.stream === 'customers' &&
- (m.trace as { stream_status: { status: string } }).stream_status.status === 'start'
+ (m.trace as { stream_status: { status: string } }).stream_status.status === 'started'
)
).toBe(true)
expect(
@@ -1178,10 +1250,10 @@ describe('StripeSource', () => {
(m) =>
m.type === 'trace' &&
m.trace.trace_type === 'stream_status' &&
- (m.trace as { stream_status: { status: string; stream?: string } }).stream_status
- .stream === 'customers' &&
- (m.trace as { stream_status: { status: string } }).stream_status.status ===
- 'range_complete'
+ (m.trace as { stream_status: { range_complete?: unknown; stream?: string } })
+ .stream_status.stream === 'customers' &&
+ (m.trace as { stream_status: { range_complete?: unknown } }).stream_status
+ .range_complete != null
)
).toBe(true)
expect(
@@ -1760,7 +1832,7 @@ describe('StripeSource', () => {
const m1 = await iter.next()
expect(m1.value).toMatchObject({
type: 'trace',
- trace: { trace_type: 'stream_status', stream_status: { status: 'start' } },
+ trace: { trace_type: 'stream_status', stream_status: { status: 'started' } },
})
await drainUntilStreamBackfillComplete(iter, 'customers')
@@ -1827,7 +1899,7 @@ describe('StripeSource', () => {
const m1 = await iter.next()
expect(m1.value).toMatchObject({
type: 'trace',
- trace: { trace_type: 'stream_status', stream_status: { status: 'start' } },
+ trace: { trace_type: 'stream_status', stream_status: { status: 'started' } },
})
// Queue an event AFTER stream_status started — capturedOnEvent is now set.
@@ -1891,8 +1963,8 @@ describe('StripeSource', () => {
(m) =>
m.type === 'trace' &&
m.trace.trace_type === 'stream_status' &&
- (m.trace as { stream_status: { status: string } }).stream_status.status ===
- 'range_complete'
+ (m.trace as { stream_status: { range_complete?: unknown } }).stream_status
+ .range_complete != null
)
).toBe(true)
expect(
@@ -2100,15 +2172,15 @@ describe('StripeSource', () => {
expect(messages[0]).toMatchObject({
type: 'trace',
- trace: { trace_type: 'stream_status', stream_status: { status: 'start' } },
+ trace: { trace_type: 'stream_status', stream_status: { status: 'started' } },
})
expect(
messages.some(
(m) =>
m.type === 'trace' &&
m.trace.trace_type === 'stream_status' &&
- (m.trace as { stream_status: { status: string } }).stream_status.status ===
- 'range_complete'
+ (m.trace as { stream_status: { range_complete?: unknown } }).stream_status
+ .range_complete != null
)
).toBe(true)
expect(messages.some((m) => m.type === 'source_state')).toBe(true)
@@ -2145,7 +2217,7 @@ describe('StripeSource', () => {
(m): m is TraceMessage =>
m.type === 'trace' &&
m.trace.trace_type === 'stream_status' &&
- (m.trace as { stream_status: { status: string } }).stream_status.status === 'start'
+ (m.trace as { stream_status: { status: string } }).stream_status.status === 'started'
)
expect(started).toHaveLength(0)
@@ -2624,7 +2696,7 @@ describe('StripeSource', () => {
type: 'trace',
trace: {
trace_type: 'stream_status',
- stream_status: { stream: 'customers', status: 'start' },
+ stream_status: { stream: 'customers', status: 'started' },
},
})
diff --git a/packages/source-stripe/src/resourceRegistry.ts b/packages/source-stripe/src/resourceRegistry.ts
index ce2e5004..f3d55db7 100644
--- a/packages/source-stripe/src/resourceRegistry.ts
+++ b/packages/source-stripe/src/resourceRegistry.ts
@@ -119,23 +119,14 @@ export function buildResourceRegistry(
supportsForwardPagination: isV2 || endpoint.supportsStartingAfter,
sync: true,
dependencies: [],
- listFn: buildSpecAwareListFn(
- (params) =>
- withHttpRetry(() => rawListFn(params), {
- label: `LIST ${endpoint.apiPath} (${tableName})`,
- }),
- {
- isV2,
- supportsLimit: endpoint.supportsLimit,
- supportsStartingAfter: endpoint.supportsStartingAfter,
- supportsEndingBefore: endpoint.supportsEndingBefore,
- supportsCreatedFilter: endpoint.supportsCreatedFilter,
- }
- ),
- retrieveFn: (id) =>
- withHttpRetry(() => rawRetrieveFn(id), {
- label: `GET ${endpoint.apiPath}/${id} (${tableName})`,
- }),
+ listFn: buildSpecAwareListFn((params) => withHttpRetry(() => rawListFn(params)), {
+ isV2,
+ supportsLimit: endpoint.supportsLimit,
+ supportsStartingAfter: endpoint.supportsStartingAfter,
+ supportsEndingBefore: endpoint.supportsEndingBefore,
+ supportsCreatedFilter: endpoint.supportsCreatedFilter,
+ }),
+ retrieveFn: (id) => withHttpRetry(() => rawRetrieveFn(id)),
nestedResources: children.length > 0 ? children : undefined,
}
registry[tableName] = config
diff --git a/packages/source-stripe/src/retry.ts b/packages/source-stripe/src/retry.ts
index d33fafd5..e15589e8 100644
--- a/packages/source-stripe/src/retry.ts
+++ b/packages/source-stripe/src/retry.ts
@@ -15,8 +15,6 @@ const RETRYABLE_NETWORK_CODES = new Set([
])
export type HttpRetryOptions = {
- /** Human-readable label for log messages (e.g. "GET /v1/customers") */
- label?: string
maxRetries?: number
baseDelayMs?: number
maxDelayMs?: number
@@ -129,9 +127,8 @@ export async function withHttpRetry(
const status = getHttpErrorStatus(err)
const errName = err instanceof Error ? err.name : 'UnknownError'
const errMsg = err instanceof Error ? err.message : String(err)
- const labelPart = opts.label ? ` ${opts.label}` : ''
console.error(
- `[source-stripe] retry${labelPart} attempt=${attempt + 1}/${maxRetries} delay=${delayMs}ms status=${status ?? 'n/a'} error=${errName}: ${errMsg}`
+ `[source-stripe] retry attempt=${attempt + 1}/${maxRetries} delay=${delayMs}ms status=${status ?? 'n/a'} error=${errName}: ${errMsg}`
)
await sleep(delayMs, opts.signal)
diff --git a/packages/source-stripe/src/src-list-api.ts b/packages/source-stripe/src/src-list-api.ts
index 375ba436..e2ebe640 100644
--- a/packages/source-stripe/src/src-list-api.ts
+++ b/packages/source-stripe/src/src-list-api.ts
@@ -5,6 +5,7 @@ import type { ResourceConfig } from './types.js'
import type { RemainingRange, StripeStreamState } from './index.js'
import type { RateLimiter } from './rate-limiter.js'
import { StripeApiRequestError } from '@stripe/sync-openapi'
+import { isRetryableHttpError } from './retry.js'
import type { StripeClient } from './client.js'
// MARK: - Rate-limit wrapper
@@ -41,9 +42,15 @@ function withRateLimit(listFn: ListFn, rateLimiter: RateLimiter, signal?: AbortS
// MARK: - Error classification
export function getFailureType(err: unknown): 'transient_error' | 'system_error' | 'auth_error' {
- const isRateLimit = err instanceof Error && err.message.includes('Rate limit')
- const isAuth = err instanceof StripeApiRequestError && (err.status === 401 || err.status === 403)
- return isRateLimit ? 'transient_error' : isAuth ? 'auth_error' : 'system_error'
+ if (err instanceof StripeApiRequestError && (err.status === 401 || err.status === 403)) {
+ return 'auth_error'
+ }
+ // Rate limit message check (belt + suspenders alongside HTTP status check)
+ if (err instanceof Error && err.message.includes('Rate limit')) {
+ return 'transient_error'
+ }
+ // 429, 5xx, network errors, timeouts → retriable; everything else → permanent
+ return isRetryableHttpError(err) ? 'transient_error' : 'system_error'
}
export function errorToTrace(err: unknown, stream: string): TraceMessage {
@@ -75,6 +82,7 @@ const SKIPPABLE_ERROR_PATTERNS = [
'Must provide customer',
'Must provide ',
'not set up to use',
+ 'Unrecognized request URL',
]
function isSkippableError(err: unknown): boolean {
@@ -306,7 +314,7 @@ async function* paginateRange(opts: {
trace_type: 'stream_status',
stream_status: {
stream: streamName,
- status: 'range_complete',
+ status: 'started',
range_complete: { gte: range.gte, lt: range.lt },
},
},
@@ -359,7 +367,7 @@ async function* backfillStream(opts: {
type: 'trace',
trace: {
trace_type: 'stream_status',
- stream_status: { stream: streamName, status: 'start' },
+ stream_status: { stream: streamName, status: 'started' },
},
} satisfies TraceMessage
@@ -543,6 +551,13 @@ export async function* listApiBackfill(opts: {
)
return
}
+ yield {
+ type: 'trace',
+ trace: {
+ trace_type: 'stream_status',
+ stream_status: { stream: stream.name, status: 'complete' },
+ },
+ } satisfies TraceMessage
}
})()
)
diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml
index 2ed8e41d..358149a4 100644
--- a/pnpm-lock.yaml
+++ b/pnpm-lock.yaml
@@ -118,7 +118,7 @@ importers:
version: link:../engine
'@stripe/sync-service':
specifier: workspace:*
- version: file:apps/service(@aws-sdk/client-sts@3.1013.0)(@aws-sdk/rds-signer@3.1013.0)(tslib@2.8.1)
+ version: file:apps/service(@aws-sdk/client-sts@3.1013.0)(@aws-sdk/rds-signer@3.1013.0)(@types/react@19.2.14)(tslib@2.8.1)
'@stripe/sync-source-stripe':
specifier: workspace:*
version: link:../../packages/source-stripe
@@ -194,6 +194,9 @@ importers:
hono:
specifier: ^4
version: 4.12.8
+ ink:
+ specifier: ^7.0.0
+ version: 7.0.0(@types/react@19.2.14)(react@19.2.4)
openapi-fetch:
specifier: ^0.17.0
version: 0.17.0
@@ -206,6 +209,9 @@ importers:
pino-pretty:
specifier: ^13
version: 13.1.3
+ react:
+ specifier: ^19.2.4
+ version: 19.2.4
ws:
specifier: ^8.18.0
version: 8.18.3
@@ -222,6 +228,9 @@ importers:
'@types/pg':
specifier: ^8.15.4
version: 8.15.6
+ '@types/react':
+ specifier: ^19.2.14
+ version: 19.2.14
openapi-typescript:
specifier: ^7.13.0
version: 7.13.0(typescript@5.9.3)
@@ -680,6 +689,10 @@ importers:
packages:
+ '@alcalzone/ansi-tokenize@0.3.0':
+ resolution: {integrity: sha512-p+CMKJ93HFmLkjXKlXiVGlMQEuRb6H0MokBSwUsX+S6BRX8eV5naFZpQJFfJHjRZY0Hmnqy1/r6UWl3x+19zYA==}
+ engines: {node: '>=18'}
+
'@alloc/quick-lru@5.2.0':
resolution: {integrity: sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==}
engines: {node: '>=10'}
@@ -2824,6 +2837,10 @@ packages:
resolution: {integrity: sha512-/6w/C21Pm1A7aZitlI5Ni/2J6FFQN8i1Cvz3kHABAAbw93v/NlvKdVOqz7CCWz/3iv/JplRSEEZ83XION15ovw==}
engines: {node: '>=6'}
+ ansi-escapes@7.3.0:
+ resolution: {integrity: sha512-BvU8nYgGQBxcmMuEeUEmNTvrMVjJNSH7RgW24vXexN4Ven6qCvy4TntnvlnwnMLTVlcRQQdbRY8NKnaIoeWDNg==}
+ engines: {node: '>=18'}
+
ansi-regex@5.0.1:
resolution: {integrity: sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==}
engines: {node: '>=8'}
@@ -2861,6 +2878,10 @@ packages:
resolution: {integrity: sha512-kNOjDqAh7px0XWNI+4QbzoiR/nTkHAWNud2uvnJquD1/x5a7EQZMJT0AczqK0Qn67oY/TTQ1LbUKajZpp3I9tQ==}
engines: {node: '>=8.0.0'}
+ auto-bind@5.0.1:
+ resolution: {integrity: sha512-ooviqdwwgfIfNmDwo94wlshcdzfO64XV0Cg6oDsDYBJfITDz1EngD2z7DkbvCWn+XIMsIqW27sEVF6qcpJrRcg==}
+ engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0}
+
autoprefixer@10.4.27:
resolution: {integrity: sha512-NP9APE+tO+LuJGn7/9+cohklunJsXWiaWEfV3si4Gi/XHDwVNgkwr1J3RQYFIvPy76GmJ9/bW8vyoU1LcxwKHA==}
engines: {node: ^10 || ^12 || >=14}
@@ -2949,6 +2970,10 @@ packages:
resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==}
engines: {node: '>=10'}
+ chalk@5.6.2:
+ resolution: {integrity: sha512-7NzBL0rN6fMUW+f7A6Io4h40qQlG+xGmtMxfbnH/K7TAtt8JQWVQK+6g0UXKMeVJoyV5EkkNsErQ8pVD3bLHbA==}
+ engines: {node: ^12.17.0 || ^14.13 || >=16.0.0}
+
change-case@5.4.4:
resolution: {integrity: sha512-HRQyTk2/YPEkt9TnUPbOpr64Uw3KOicFWPVBb+xiHvd6eBx/qPr9xqfBFDT8P2vWsvvz4jbEkfDe71W3VyNu2w==}
@@ -2966,6 +2991,18 @@ packages:
class-variance-authority@0.7.1:
resolution: {integrity: sha512-Ka+9Trutv7G8M6WT6SeiRWz792K5qEqIGEGzXKhAE6xOWAY6pPH8U+9IY3oCMv6kqTmLsv7Xh/2w2RigkePMsg==}
+ cli-boxes@4.0.1:
+ resolution: {integrity: sha512-5IOn+jcCEHEraYolBPs/sT4BxYCe2nHg374OPiItB1O96KZFseS2gthU4twyYzeDcFew4DaUM/xwc5BQf08JJw==}
+ engines: {node: '>=18.20 <19 || >=20.10'}
+
+ cli-cursor@4.0.0:
+ resolution: {integrity: sha512-VGtlMu3x/4DOtIUwEkRezxUZ2lBacNJCHash0N0WeZDBS+7Ux1dm3XWAgWYxLJFMMdOeXMHXorshEFhbMSGelg==}
+ engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0}
+
+ cli-truncate@6.0.0:
+ resolution: {integrity: sha512-3+YKIUFsohD9MIoOFPFBldjAlnfCmCDcqe6aYGFqlDTRKg80p4wg35L+j83QQ63iOlKRccEkbn8IuM++HsgEjA==}
+ engines: {node: '>=22'}
+
client-only@0.0.1:
resolution: {integrity: sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA==}
@@ -2977,6 +3014,10 @@ packages:
resolution: {integrity: sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==}
engines: {node: '>=6'}
+ code-excerpt@4.0.0:
+ resolution: {integrity: sha512-xxodCmBen3iy2i0WtAK8FlFNrRzjUqjRsMfho58xT/wvZU1YTM3fCnRjcy1gJPMepaRlgm/0e6w8SpWHpn3/cA==}
+ engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0}
+
codemirror@6.0.2:
resolution: {integrity: sha512-VhydHotNW5w1UGK0Qj96BwSk/Zqbp9WbnyK2W/eVMv4QyF41INRGpjUhFJY7/uDNuudSc33a/PKr4iDqRduvHw==}
@@ -3032,6 +3073,10 @@ packages:
convert-source-map@2.0.0:
resolution: {integrity: sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==}
+ convert-to-spaces@2.0.1:
+ resolution: {integrity: sha512-rcQ1bsQO9799wq24uE5AM2tAILy4gXGIK/njFWcVQkGNZ96edlpY+A7bjwvzjYvLDyzmG1MmMLZhpcsb+klNMQ==}
+ engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0}
+
crelt@1.0.6:
resolution: {integrity: sha512-VQ2MBenTq1fWZUH9DJNGti7kKv6EeAuYr3cLwxUWhIu1baTaXh4Ib5W2CqHVqib4/MqbYGJqiL3Zb8GJZr3l4g==}
@@ -3129,6 +3174,10 @@ packages:
resolution: {integrity: sha512-Qohcme7V1inbAfvjItgw0EaxVX5q2rdVEZHRBrEQdRZTssLDGsL8Lwrznl8oQ/6kuTJONLaDcGjkNP247XEhcA==}
engines: {node: '>=10.13.0'}
+ environment@1.1.0:
+ resolution: {integrity: sha512-xUtoPkMggbz0MPyPiIWr1Kp4aeWJjDZ6SMvURhimjdZgsRuDplF5/s9hcgGhyXMhs+6vpnuoiZ2kFiu3FMnS8Q==}
+ engines: {node: '>=18'}
+
es-define-property@1.0.1:
resolution: {integrity: sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==}
engines: {node: '>= 0.4'}
@@ -3151,6 +3200,9 @@ packages:
resolution: {integrity: sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==}
engines: {node: '>= 0.4'}
+ es-toolkit@1.45.1:
+ resolution: {integrity: sha512-/jhoOj/Fx+A+IIyDNOvO3TItGmlMKhtX8ISAHKE90c4b/k1tqaqEZ+uUqfpU8DMnW5cgNJv606zS55jGvza0Xw==}
+
esbuild@0.28.0:
resolution: {integrity: sha512-sNR9MHpXSUV/XB4zmsFKN+QgVG82Cc7+/aaxJ8Adi8hyOac+EXptIp45QBPaVyX3N70664wRbTcLTOemCAnyqw==}
engines: {node: '>=18'}
@@ -3160,6 +3212,10 @@ packages:
resolution: {integrity: sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==}
engines: {node: '>=6'}
+ escape-string-regexp@2.0.0:
+ resolution: {integrity: sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==}
+ engines: {node: '>=8'}
+
escape-string-regexp@4.0.0:
resolution: {integrity: sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==}
engines: {node: '>=10'}
@@ -3379,6 +3435,10 @@ packages:
resolution: {integrity: sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==}
engines: {node: 6.* || 8.* || >= 10.*}
+ get-east-asian-width@1.5.0:
+ resolution: {integrity: sha512-CQ+bEO+Tva/qlmw24dCejulK5pMzVnUOFOijVogd3KQs07HnRIgp8TGipvCCRT06xeYEbpbgwaCxglFyiuIcmA==}
+ engines: {node: '>=18'}
+
get-intrinsic@1.3.0:
resolution: {integrity: sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==}
engines: {node: '>= 0.4'}
@@ -3525,6 +3585,10 @@ packages:
resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==}
engines: {node: '>=0.8.19'}
+ indent-string@5.0.0:
+ resolution: {integrity: sha512-m6FAo/spmsW2Ab2fU35JTYwtOKa2yAwXSwgjSv1TJzh4Mh7mC3lzAOVLBprb72XsTrgkEIsl7YrFNAiDiRhIGg==}
+ engines: {node: '>=12'}
+
index-to-position@1.2.0:
resolution: {integrity: sha512-Yg7+ztRkqslMAS2iFaU+Oa4KTSidr63OsFGlOrJoW981kIYO3CGCS3wA95P1mUi/IVSJkn0D479KTJpVpvFNuw==}
engines: {node: '>=18'}
@@ -3532,6 +3596,19 @@ packages:
indexof@0.0.1:
resolution: {integrity: sha512-i0G7hLJ1z0DE8dsqJa2rycj9dBmNKgXBvotXtZYXakU9oivfB9Uj2ZBC27qqef2U58/ZLwalxa1X/RDCdkHtVg==}
+ ink@7.0.0:
+ resolution: {integrity: sha512-fMie5/VwIYXofMyND0s+fOVhwVBBPYx+uuqJ6V6rUBGjui+2UYp+0fWtvhSeKT4z+X1uH98a4ge5Vj3aTlL6mg==}
+ engines: {node: '>=22'}
+ peerDependencies:
+ '@types/react': '>=19.2.0'
+ react: '>=19.2.0'
+ react-devtools-core: '>=6.1.2'
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+ react-devtools-core:
+ optional: true
+
is-extglob@2.1.1:
resolution: {integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==}
engines: {node: '>=0.10.0'}
@@ -3540,10 +3617,19 @@ packages:
resolution: {integrity: sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==}
engines: {node: '>=8'}
+ is-fullwidth-code-point@5.1.0:
+ resolution: {integrity: sha512-5XHYaSyiqADb4RnZ1Bdad6cPp8Toise4TzEjcOYDHZkTCbKgiUl7WTUCpNWHuxmDt91wnsZBc9xinNzopv3JMQ==}
+ engines: {node: '>=18'}
+
is-glob@4.0.3:
resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==}
engines: {node: '>=0.10.0'}
+ is-in-ci@2.0.0:
+ resolution: {integrity: sha512-cFeerHriAnhrQSbpAxL37W1wcJKUUX07HyLWZCW1URJT/ra3GyUTzBgUnh24TMVfNTV2Hij2HLxkPHFZfOZy5w==}
+ engines: {node: '>=20'}
+ hasBin: true
+
is-number@7.0.0:
resolution: {integrity: sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==}
engines: {node: '>=0.12.0'}
@@ -3777,6 +3863,10 @@ packages:
resolution: {integrity: sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==}
engines: {node: '>= 0.6'}
+ mimic-fn@2.1.0:
+ resolution: {integrity: sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==}
+ engines: {node: '>=6'}
+
minimatch@10.1.1:
resolution: {integrity: sha512-enIvLvRAFZYXJzkCYG5RKmPfrFArdLv+R+lbQ53BmIMLIry74bjKzX6iHAm8WYamJkhSSEabrWN5D97XnKObjQ==}
engines: {node: 20 || >=22}
@@ -3877,6 +3967,10 @@ packages:
once@1.4.0:
resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==}
+ onetime@5.1.2:
+ resolution: {integrity: sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==}
+ engines: {node: '>=6'}
+
openapi-fetch@0.13.8:
resolution: {integrity: sha512-yJ4QKRyNxE44baQ9mY5+r/kAzZ8yXMemtNAOFwOzRXJscdjSxxzWSNlyBAr+o5JjkUw9Lc3W7OIoca0cY3PYnQ==}
@@ -3924,6 +4018,10 @@ packages:
parseuri@0.0.6:
resolution: {integrity: sha512-AUjen8sAkGgao7UyCX6Ahv0gIK2fABKmYjvP4xmy5JaKvcbTRueIqIPHLAfq30xJddqSE033IOMUSOMCcK3Sow==}
+ patch-console@2.0.0:
+ resolution: {integrity: sha512-0YNdUceMdaQwoKce1gatDScmMo5pu/tfABfnzEqeG0gtTmd7mh/WcwgUjtAeOU7N8nFFlbQBnFK2gXW5fGvmMA==}
+ engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0}
+
path-exists@4.0.0:
resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==}
engines: {node: '>=8'}
@@ -4116,6 +4214,12 @@ packages:
peerDependencies:
react: ^19.2.4
+ react-reconciler@0.33.0:
+ resolution: {integrity: sha512-KetWRytFv1epdpJc3J4G75I4WrplZE5jOL7Yq0p34+OVOKF4Se7WrdIdVC45XsSSmUTlht2FM/fM1FZb1mfQeA==}
+ engines: {node: '>=0.10.0'}
+ peerDependencies:
+ react: ^19.2.0
+
react-refresh@0.17.0:
resolution: {integrity: sha512-z6F7K9bV85EfseRCp2bzrpyQ0Gkw1uLoCel9XBVWPg/TjRj94SkJzUTGfOa4bs7iJvBWtQG0Wq7wnI0syw3EBQ==}
engines: {node: '>=0.10.0'}
@@ -4177,6 +4281,10 @@ packages:
resolve-pkg-maps@1.0.0:
resolution: {integrity: sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==}
+ restore-cursor@4.0.0:
+ resolution: {integrity: sha512-I9fPXU9geO9bHOt9pHHOhOkYerIMsmVaWB0rA2AI9ERh/+x/i7MV5HKBNrg+ljO5eoPVgCcnFuRjJ9uH6I/3eg==}
+ engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0}
+
reusify@1.1.0:
resolution: {integrity: sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==}
engines: {iojs: '>=1.0.0', node: '>=0.10.0'}
@@ -4261,10 +4369,17 @@ packages:
siginfo@2.0.0:
resolution: {integrity: sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==}
+ signal-exit@3.0.7:
+ resolution: {integrity: sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==}
+
signal-exit@4.1.0:
resolution: {integrity: sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==}
engines: {node: '>=14'}
+ slice-ansi@9.0.0:
+ resolution: {integrity: sha512-SO/3iYL5S3W57LLEniscOGPZgOqZUPCx6d3dB+52B80yJ0XstzsC/eV8gnA4tM3MHDrKz+OCFSLNjswdSC+/bA==}
+ engines: {node: '>=22'}
+
socket.io-client@2.5.0:
resolution: {integrity: sha512-lOO9clmdgssDykiOmVQQitwBAF3I6mYcQAo7hQ7AM6Ny5X7fp8hIJ3HcQs3Rjz4SoggoxA1OgrQyY8EgTbcPYw==}
@@ -4302,6 +4417,10 @@ packages:
resolution: {integrity: sha512-UcjcJOWknrNkF6PLX83qcHM6KHgVKNkV62Y8a5uYDVv9ydGQVwAHMKqHdJje1VTWpljG0WYpCDhrCdAOYH4TWg==}
engines: {node: '>= 10.x'}
+ stack-utils@2.0.6:
+ resolution: {integrity: sha512-XlkWvfIm6RmsWtNJx+uqtKLS8eqFbxUg0ZzLXqY0caEy9l7hruX8IpiDnjsLavoBgqCCR71TqWO8MaXYheJ3RQ==}
+ engines: {node: '>=10'}
+
stackback@0.0.2:
resolution: {integrity: sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==}
@@ -4316,6 +4435,10 @@ packages:
resolution: {integrity: sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==}
engines: {node: '>=12'}
+ string-width@8.2.0:
+ resolution: {integrity: sha512-6hJPQ8N0V0P3SNmP6h2J99RLuzrWz2gvT7VnK5tKvrNqJoyS9W4/Fb8mo31UiPvy00z7DQXkP2hnKBVav76thw==}
+ engines: {node: '>=20'}
+
string_decoder@1.3.0:
resolution: {integrity: sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==}
@@ -4405,6 +4528,10 @@ packages:
resolution: {integrity: sha512-MeQTA1r0litLUf0Rp/iisCaL8761lKAZHaimlbGK4j0HysC4PLfqygQj9srcs0m2RdtDYnF8UuYyKpbjHYp7Jw==}
engines: {node: ^14.18.0 || >=16.0.0}
+ tagged-tag@1.0.0:
+ resolution: {integrity: sha512-yEFYrVhod+hdNyx7g5Bnkkb0G6si8HJurOoOEgC8B/O0uXLHlaey/65KRv6cuWBNhBgHKAROVpc7QyYqE5gFng==}
+ engines: {node: '>=20'}
+
tailwind-merge@3.5.0:
resolution: {integrity: sha512-I8K9wewnVDkL1NTGoqWmVEIlUcB9gFriAEkXkfCjX5ib8ezGxtR3xD7iZIxrfArjEsH7F1CHD4RFUtxefdqV/A==}
@@ -4415,6 +4542,10 @@ packages:
resolution: {integrity: sha512-1MOpMXuhGzGL5TTCZFItxCc0AARf1EZFQkGqMm7ERKj8+Hgr5oLvJOVFcC+lRmR8hCe2S3jC4T5D7Vg/d7/fhA==}
engines: {node: '>=6'}
+ terminal-size@4.0.1:
+ resolution: {integrity: sha512-avMLDQpUI9I5XFrklECw1ZEUPJhqzcwSWsyyI8blhRLT+8N1jLJWLWWYQpB2q2xthq8xDvjZPISVh53T/+CLYQ==}
+ engines: {node: '>=18'}
+
terser-webpack-plugin@5.4.0:
resolution: {integrity: sha512-Bn5vxm48flOIfkdl5CaD2+1CiUVbonWQ3KQPyP7/EuIl9Gbzq/gQFOzaMFUEgVjB1396tcK0SG8XcNJ/2kDH8g==}
engines: {node: '>= 10.13.0'}
@@ -4512,6 +4643,10 @@ packages:
resolution: {integrity: sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA==}
engines: {node: '>=16'}
+ type-fest@5.5.0:
+ resolution: {integrity: sha512-PlBfpQwiUvGViBNX84Yxwjsdhd1TUlXr6zjX7eoirtCPIr08NAmxwa+fcYBTeRQxHo9YC9wwF3m9i700sHma8g==}
+ engines: {node: '>=20'}
+
typescript@5.9.3:
resolution: {integrity: sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==}
engines: {node: '>=14.17'}
@@ -4730,10 +4865,18 @@ packages:
engines: {node: '>=8'}
hasBin: true
+ widest-line@6.0.0:
+ resolution: {integrity: sha512-U89AsyEeAsyoF0zVJBkG9zBgekjgjK7yk9sje3F4IQpXBJ10TF6ByLlIfjMhcmHMJgHZI4KHt4rdNfktzxIAMA==}
+ engines: {node: '>=20'}
+
word-wrap@1.2.5:
resolution: {integrity: sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==}
engines: {node: '>=0.10.0'}
+ wrap-ansi@10.0.0:
+ resolution: {integrity: sha512-SGcvg80f0wUy2/fXES19feHMz8E0JoXv2uNgHOu4Dgi2OrCy1lqwFYEJz1BLbDI0exjPMe/ZdzZ/YpGECBG/aQ==}
+ engines: {node: '>=20'}
+
wrap-ansi@7.0.0:
resolution: {integrity: sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==}
engines: {node: '>=10'}
@@ -4769,6 +4912,18 @@ packages:
utf-8-validate:
optional: true
+ ws@8.20.0:
+ resolution: {integrity: sha512-sAt8BhgNbzCtgGbt2OxmpuryO63ZoDk/sqaB/znQm94T4fCEsy/yV+7CdC1kJhOU9lboAEU7R3kquuycDoibVA==}
+ engines: {node: '>=10.0.0'}
+ peerDependencies:
+ bufferutil: ^4.0.1
+ utf-8-validate: '>=5.0.2'
+ peerDependenciesMeta:
+ bufferutil:
+ optional: true
+ utf-8-validate:
+ optional: true
+
xmlhttprequest-ssl@1.6.3:
resolution: {integrity: sha512-3XfeQE/wNkvrIktn2Kf0869fC0BN6UpydVasGIeSm2B1Llihf7/0UfZM+eCkOw3P7bP4+qPgqhm7ZoxuJtFU0Q==}
engines: {node: '>=0.4.0'}
@@ -4807,6 +4962,9 @@ packages:
resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==}
engines: {node: '>=10'}
+ yoga-layout@3.2.1:
+ resolution: {integrity: sha512-0LPOt3AxKqMdFBZA3HBAt/t/8vIKq7VaQYbuA8WxCgung+p9TVyKRYdpvCb80HcdTN2NkbIKbhNwKUfm3tQywQ==}
+
zhead@2.2.4:
resolution: {integrity: sha512-8F0OI5dpWIA5IGG5NHUg9staDwz/ZPxZtvGVf01j7vHqSyZ0raHY+78atOVxRqb73AotX22uV1pXt3gYSstGag==}
@@ -4824,6 +4982,11 @@ packages:
snapshots:
+ '@alcalzone/ansi-tokenize@0.3.0':
+ dependencies:
+ ansi-styles: 6.2.3
+ is-fullwidth-code-point: 5.1.0
+
'@alloc/quick-lru@5.2.0': {}
'@aws-crypto/sha256-browser@5.2.0':
@@ -6738,14 +6901,14 @@ snapshots:
transitivePeerDependencies:
- pg-native
- '@stripe/sync-engine@file:apps/engine(@aws-sdk/client-sts@3.1013.0)(@aws-sdk/rds-signer@3.1013.0)':
+ '@stripe/sync-engine@file:apps/engine(@aws-sdk/client-sts@3.1013.0)(@aws-sdk/rds-signer@3.1013.0)(@types/react@19.2.14)':
dependencies:
'@hono/node-server': 1.19.11(hono@4.12.8)
'@scalar/hono-api-reference': 0.6.0(hono@4.12.8)
'@stripe/sync-destination-google-sheets': file:packages/destination-google-sheets
'@stripe/sync-destination-postgres': file:packages/destination-postgres(@aws-sdk/client-sts@3.1013.0)(@aws-sdk/rds-signer@3.1013.0)
'@stripe/sync-hono-zod-openapi': file:packages/hono-zod-openapi
- '@stripe/sync-integration-supabase': file:apps/supabase(@aws-sdk/client-sts@3.1013.0)(@aws-sdk/rds-signer@3.1013.0)
+ '@stripe/sync-integration-supabase': file:apps/supabase(@aws-sdk/client-sts@3.1013.0)(@aws-sdk/rds-signer@3.1013.0)(@types/react@19.2.14)
'@stripe/sync-protocol': file:packages/protocol
'@stripe/sync-source-stripe': file:packages/source-stripe
'@stripe/sync-state-postgres': file:packages/state-postgres
@@ -6755,19 +6918,23 @@ snapshots:
dotenv: 16.6.1
googleapis: 148.0.0
hono: 4.12.8
+ ink: 7.0.0(@types/react@19.2.14)(react@19.2.4)
openapi-fetch: 0.17.0
pg: 8.16.3
pino: 10.1.0
pino-pretty: 13.1.3
+ react: 19.2.4
ws: 8.18.3
zod: 4.3.6
transitivePeerDependencies:
- '@aws-sdk/client-sts'
- '@aws-sdk/rds-signer'
+ - '@types/react'
- bufferutil
- debug
- encoding
- pg-native
+ - react-devtools-core
- supports-color
- utf-8-validate
@@ -6778,10 +6945,10 @@ snapshots:
zod: 4.3.6
zod-openapi: 5.4.6(zod@4.3.6)
- '@stripe/sync-integration-supabase@file:apps/supabase(@aws-sdk/client-sts@3.1013.0)(@aws-sdk/rds-signer@3.1013.0)':
+ '@stripe/sync-integration-supabase@file:apps/supabase(@aws-sdk/client-sts@3.1013.0)(@aws-sdk/rds-signer@3.1013.0)(@types/react@19.2.14)':
dependencies:
'@stripe/sync-destination-postgres': file:packages/destination-postgres(@aws-sdk/client-sts@3.1013.0)(@aws-sdk/rds-signer@3.1013.0)
- '@stripe/sync-engine': file:apps/engine(@aws-sdk/client-sts@3.1013.0)(@aws-sdk/rds-signer@3.1013.0)
+ '@stripe/sync-engine': file:apps/engine(@aws-sdk/client-sts@3.1013.0)(@aws-sdk/rds-signer@3.1013.0)(@types/react@19.2.14)
'@stripe/sync-protocol': file:packages/protocol
'@stripe/sync-source-stripe': file:packages/source-stripe
'@stripe/sync-state-postgres': file:packages/state-postgres
@@ -6789,10 +6956,12 @@ snapshots:
transitivePeerDependencies:
- '@aws-sdk/client-sts'
- '@aws-sdk/rds-signer'
+ - '@types/react'
- bufferutil
- debug
- encoding
- pg-native
+ - react-devtools-core
- supports-color
- utf-8-validate
@@ -6805,13 +6974,13 @@ snapshots:
citty: 0.1.6
zod: 4.3.6
- '@stripe/sync-service@file:apps/service(@aws-sdk/client-sts@3.1013.0)(@aws-sdk/rds-signer@3.1013.0)(tslib@2.8.1)':
+ '@stripe/sync-service@file:apps/service(@aws-sdk/client-sts@3.1013.0)(@aws-sdk/rds-signer@3.1013.0)(@types/react@19.2.14)(tslib@2.8.1)':
dependencies:
'@hono/node-server': 1.19.11(hono@4.12.8)
'@scalar/hono-api-reference': 0.6.0(hono@4.12.8)
'@stripe/sync-destination-google-sheets': file:packages/destination-google-sheets
'@stripe/sync-destination-postgres': file:packages/destination-postgres(@aws-sdk/client-sts@3.1013.0)(@aws-sdk/rds-signer@3.1013.0)
- '@stripe/sync-engine': file:apps/engine(@aws-sdk/client-sts@3.1013.0)(@aws-sdk/rds-signer@3.1013.0)
+ '@stripe/sync-engine': file:apps/engine(@aws-sdk/client-sts@3.1013.0)(@aws-sdk/rds-signer@3.1013.0)(@types/react@19.2.14)
'@stripe/sync-hono-zod-openapi': file:packages/hono-zod-openapi
'@stripe/sync-protocol': file:packages/protocol
'@stripe/sync-source-stripe': file:packages/source-stripe
@@ -6831,10 +7000,12 @@ snapshots:
- '@aws-sdk/client-sts'
- '@aws-sdk/rds-signer'
- '@swc/helpers'
+ - '@types/react'
- bufferutil
- encoding
- esbuild
- pg-native
+ - react-devtools-core
- supports-color
- tslib
- uglify-js
@@ -7479,6 +7650,10 @@ snapshots:
ansi-colors@4.1.3: {}
+ ansi-escapes@7.3.0:
+ dependencies:
+ environment: 1.1.0
+
ansi-regex@5.0.1: {}
ansi-regex@6.2.2: {}
@@ -7503,6 +7678,8 @@ snapshots:
atomic-sleep@1.0.0: {}
+ auto-bind@5.0.1: {}
+
autoprefixer@10.4.27(postcss@8.5.8):
dependencies:
browserslist: 4.28.1
@@ -7595,6 +7772,8 @@ snapshots:
ansi-styles: 4.3.0
supports-color: 7.2.0
+ chalk@5.6.2: {}
+
change-case@5.4.4: {}
check-error@2.1.1: {}
@@ -7609,6 +7788,17 @@ snapshots:
dependencies:
clsx: 2.1.1
+ cli-boxes@4.0.1: {}
+
+ cli-cursor@4.0.0:
+ dependencies:
+ restore-cursor: 4.0.0
+
+ cli-truncate@6.0.0:
+ dependencies:
+ slice-ansi: 9.0.0
+ string-width: 8.2.0
+
client-only@0.0.1: {}
cliui@8.0.1:
@@ -7619,6 +7809,10 @@ snapshots:
clsx@2.1.1: {}
+ code-excerpt@4.0.0:
+ dependencies:
+ convert-to-spaces: 2.0.1
+
codemirror@6.0.2:
dependencies:
'@codemirror/autocomplete': 6.20.1
@@ -7670,6 +7864,8 @@ snapshots:
convert-source-map@2.0.0: {}
+ convert-to-spaces@2.0.1: {}
+
crelt@1.0.6: {}
cross-spawn@7.0.6:
@@ -7763,6 +7959,8 @@ snapshots:
graceful-fs: 4.2.11
tapable: 2.3.2
+ environment@1.1.0: {}
+
es-define-property@1.0.1: {}
es-errors@1.3.0: {}
@@ -7782,6 +7980,8 @@ snapshots:
has-tostringtag: 1.0.2
hasown: 2.0.2
+ es-toolkit@1.45.1: {}
+
esbuild@0.28.0:
optionalDependencies:
'@esbuild/aix-ppc64': 0.28.0
@@ -7813,6 +8013,8 @@ snapshots:
escalade@3.2.0: {}
+ escape-string-regexp@2.0.0: {}
+
escape-string-regexp@4.0.0: {}
eslint-config-prettier@10.1.8(eslint@9.39.1(jiti@2.6.1)):
@@ -8038,6 +8240,8 @@ snapshots:
get-caller-file@2.0.5: {}
+ get-east-asian-width@1.5.0: {}
+
get-intrinsic@1.3.0:
dependencies:
call-bind-apply-helpers: 1.0.2
@@ -8199,18 +8403,60 @@ snapshots:
imurmurhash@0.1.4: {}
+ indent-string@5.0.0: {}
+
index-to-position@1.2.0: {}
indexof@0.0.1: {}
+ ink@7.0.0(@types/react@19.2.14)(react@19.2.4):
+ dependencies:
+ '@alcalzone/ansi-tokenize': 0.3.0
+ ansi-escapes: 7.3.0
+ ansi-styles: 6.2.3
+ auto-bind: 5.0.1
+ chalk: 5.6.2
+ cli-boxes: 4.0.1
+ cli-cursor: 4.0.0
+ cli-truncate: 6.0.0
+ code-excerpt: 4.0.0
+ es-toolkit: 1.45.1
+ indent-string: 5.0.0
+ is-in-ci: 2.0.0
+ patch-console: 2.0.0
+ react: 19.2.4
+ react-reconciler: 0.33.0(react@19.2.4)
+ scheduler: 0.27.0
+ signal-exit: 3.0.7
+ slice-ansi: 9.0.0
+ stack-utils: 2.0.6
+ string-width: 8.2.0
+ terminal-size: 4.0.1
+ type-fest: 5.5.0
+ widest-line: 6.0.0
+ wrap-ansi: 10.0.0
+ ws: 8.20.0
+ yoga-layout: 3.2.1
+ optionalDependencies:
+ '@types/react': 19.2.14
+ transitivePeerDependencies:
+ - bufferutil
+ - utf-8-validate
+
is-extglob@2.1.1: {}
is-fullwidth-code-point@3.0.0: {}
+ is-fullwidth-code-point@5.1.0:
+ dependencies:
+ get-east-asian-width: 1.5.0
+
is-glob@4.0.3:
dependencies:
is-extglob: 2.1.1
+ is-in-ci@2.0.0: {}
+
is-number@7.0.0: {}
is-stream@2.0.1: {}
@@ -8402,6 +8648,8 @@ snapshots:
dependencies:
mime-db: 1.52.0
+ mimic-fn@2.1.0: {}
+
minimatch@10.1.1:
dependencies:
'@isaacs/brace-expansion': 5.0.0
@@ -8482,6 +8730,10 @@ snapshots:
dependencies:
wrappy: 1.0.2
+ onetime@5.1.2:
+ dependencies:
+ mimic-fn: 2.1.0
+
openapi-fetch@0.13.8:
dependencies:
openapi-typescript-helpers: 0.0.15
@@ -8537,6 +8789,8 @@ snapshots:
parseuri@0.0.6: {}
+ patch-console@2.0.0: {}
+
path-exists@4.0.0: {}
path-expression-matcher@1.1.3: {}
@@ -8743,6 +8997,11 @@ snapshots:
react: 19.2.4
scheduler: 0.27.0
+ react-reconciler@0.33.0(react@19.2.4):
+ dependencies:
+ react: 19.2.4
+ scheduler: 0.27.0
+
react-refresh@0.17.0: {}
react-remove-scroll-bar@2.3.8(@types/react@19.2.14)(react@19.2.4):
@@ -8792,6 +9051,11 @@ snapshots:
resolve-pkg-maps@1.0.0: {}
+ restore-cursor@4.0.0:
+ dependencies:
+ onetime: 5.1.2
+ signal-exit: 3.0.7
+
reusify@1.1.0: {}
rimraf@6.1.0:
@@ -8926,8 +9190,15 @@ snapshots:
siginfo@2.0.0: {}
+ signal-exit@3.0.7: {}
+
signal-exit@4.1.0: {}
+ slice-ansi@9.0.0:
+ dependencies:
+ ansi-styles: 6.2.3
+ is-fullwidth-code-point: 5.1.0
+
socket.io-client@2.5.0:
dependencies:
backo2: 1.0.2
@@ -8981,6 +9252,10 @@ snapshots:
split2@4.2.0: {}
+ stack-utils@2.0.6:
+ dependencies:
+ escape-string-regexp: 2.0.0
+
stackback@0.0.2: {}
std-env@3.9.0: {}
@@ -8997,6 +9272,11 @@ snapshots:
emoji-regex: 9.2.2
strip-ansi: 7.1.2
+ string-width@8.2.0:
+ dependencies:
+ get-east-asian-width: 1.5.0
+ strip-ansi: 7.1.2
+
string_decoder@1.3.0:
dependencies:
safe-buffer: 5.2.1
@@ -9068,12 +9348,16 @@ snapshots:
dependencies:
'@pkgr/core': 0.2.9
+ tagged-tag@1.0.0: {}
+
tailwind-merge@3.5.0: {}
tailwindcss@4.2.2: {}
tapable@2.3.2: {}
+ terminal-size@4.0.1: {}
+
terser-webpack-plugin@5.4.0(@swc/core@1.15.21)(webpack@5.105.4(@swc/core@1.15.21)):
dependencies:
'@jridgewell/trace-mapping': 0.3.31
@@ -9151,6 +9435,10 @@ snapshots:
type-fest@4.41.0: {}
+ type-fest@5.5.0:
+ dependencies:
+ tagged-tag: 1.0.0
+
typescript@5.9.3: {}
undici-types@6.21.0: {}
@@ -9432,8 +9720,18 @@ snapshots:
siginfo: 2.0.0
stackback: 0.0.2
+ widest-line@6.0.0:
+ dependencies:
+ string-width: 8.2.0
+
word-wrap@1.2.5: {}
+ wrap-ansi@10.0.0:
+ dependencies:
+ ansi-styles: 6.2.3
+ string-width: 8.2.0
+ strip-ansi: 7.1.2
+
wrap-ansi@7.0.0:
dependencies:
ansi-styles: 4.3.0
@@ -9452,6 +9750,8 @@ snapshots:
ws@8.18.3: {}
+ ws@8.20.0: {}
+
xmlhttprequest-ssl@1.6.3: {}
xtend@4.0.2: {}
@@ -9481,6 +9781,8 @@ snapshots:
yocto-queue@0.1.0: {}
+ yoga-layout@3.2.1: {}
+
zhead@2.2.4: {}
zod-openapi@5.4.6(zod@4.3.6):