diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4494a26d..df156c1c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -155,6 +155,7 @@ jobs: filters: | backend: - '**/*.go' + - 'magefile.go' - 'go.mod' - 'go.sum' - 'Makefile' @@ -198,6 +199,11 @@ jobs: - name: Setup git-cliff for release tests uses: ./.github/actions/setup-git-cliff + - name: Install Mage + run: | + echo "Installing Mage..." + go install github.com/magefile/mage@latest + - name: Run linting run: | echo "Running Go code quality checks..." @@ -222,6 +228,23 @@ jobs: make test continue-on-error: false + - name: Run SDK benchmarks + run: | + echo "Running SDK benchmarks with memory stats..." + go test -run=^$ -bench=. -benchmem github.com/compozy/compozy/sdk/agent \ + github.com/compozy/compozy/sdk/project \ + github.com/compozy/compozy/sdk/workflow \ + github.com/compozy/compozy/sdk/task \ + github.com/compozy/compozy/sdk/knowledge \ + github.com/compozy/compozy/sdk/memory \ + github.com/compozy/compozy/sdk/compozy | tee bench.out + continue-on-error: false + + - name: Check benchmark regressions + run: | + go run ./tools/benchcheck --baseline sdk/docs/performance-benchmarks.json --results bench.out + continue-on-error: false + - name: Upload test results uses: actions/upload-artifact@v4 if: always() diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 99aa78b5..5937ead2 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -47,10 +47,30 @@ jobs: run: | make test-coverage + - name: Generate SDK coverage report + run: | + go work sync + cd sdk + packages=$(go list ./... 2>/dev/null | grep -v '/examples') + if [ -z "$packages" ]; then + echo "no SDK packages discovered" + exit 1 + fi + go test -covermode=atomic -coverprofile=coverage.out $packages + + - name: Enforce SDK coverage + run: | + cd sdk + go tool cover -func=coverage.out | tee coverage.txt + awk '/^total:/ {gsub(/%/, "", $3); cov=$3+0; if (cov < 100) {printf "Coverage is %.2f%%, must be 100%%\n", cov; exit 1}} END {if (NR == 0) {print "No coverage summary found"; exit 1}}' coverage.txt + echo "SDK coverage requirement satisfied" + - name: Upload coverage reports to Codecov uses: codecov/codecov-action@v5 with: - files: ./coverage.out + files: | + ./coverage.out + ./sdk/coverage.out flags: unittests name: codecov-umbrella fail_ci_if_error: true diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 481ad0cf..047b13ae 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -39,7 +39,6 @@ env: NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} NPM_TOKEN: ${{ secrets.NPM_TOKEN }} PR_RELEASE_MODULE: github.com/compozy/releasepr@v0.0.15 - GOTMPDIR: ${{ runner.temp }}/go-tmp permissions: contents: write @@ -61,8 +60,6 @@ jobs: github.event.head_commit.author.name != 'github-actions[bot]') || (github.event_name == 'workflow_dispatch') runs-on: ubuntu-latest - env: - GOTMPDIR: ${{ env.GOTMPDIR }} steps: - uses: actions/checkout@v4 with: @@ -70,6 +67,8 @@ jobs: fetch-tags: true - name: Prepare Go temporary directory + env: + GOTMPDIR: ${{ runner.temp }}/go-tmp run: mkdir -p "$GOTMPDIR" - uses: ./.github/actions/setup-go @@ -86,6 +85,7 @@ jobs: GITHUB_REPOSITORY: ${{ github.repository }} GITHUB_REPOSITORY_OWNER: ${{ github.repository_owner }} INITIAL_VERSION: ${{ env.INITIAL_VERSION }} + GOTMPDIR: ${{ runner.temp }}/go-tmp run: | go run "${{ env.PR_RELEASE_MODULE }}" pr-release --force --enable-rollback --ci-output @@ -97,8 +97,6 @@ jobs: (startsWith(github.event.pull_request.title, 'ci(release): Release ') || startsWith(github.base_ref, 'release/v')) runs-on: ubuntu-latest - env: - GOTMPDIR: ${{ env.GOTMPDIR }} steps: - uses: actions/checkout@v4 with: @@ -106,6 +104,8 @@ jobs: fetch-tags: true - name: Prepare Go temporary directory + env: + GOTMPDIR: ${{ runner.temp }}/go-tmp run: mkdir -p "$GOTMPDIR" - uses: ./.github/actions/setup-go @@ -130,6 +130,7 @@ jobs: GITHUB_HEAD_REF: ${{ github.head_ref }} GITHUB_ISSUE_NUMBER: ${{ github.event.pull_request.number }} GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }} + GOTMPDIR: ${{ runner.temp }}/go-tmp run: go run "${{ env.PR_RELEASE_MODULE }}" dry-run --ci-output # Job 3: Production release on merge @@ -143,7 +144,6 @@ jobs: timeout-minutes: 120 env: DOCKER_CLI_EXPERIMENTAL: enabled - GOTMPDIR: ${{ env.GOTMPDIR }} steps: - uses: actions/checkout@v4 with: @@ -151,6 +151,8 @@ jobs: fetch-tags: true - name: Prepare Go temporary directory + env: + GOTMPDIR: ${{ runner.temp }}/go-tmp run: mkdir -p "$GOTMPDIR" - name: Set up Go with caching @@ -212,6 +214,7 @@ jobs: AUR_KEY: ${{ secrets.AUR_KEY }} COSIGN_EXPERIMENTAL: 1 GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }} + GOTMPDIR: ${{ runner.temp }}/go-tmp - name: Publish NPM Packages env: diff --git a/.github/workflows/test-sdk.yml b/.github/workflows/test-sdk.yml new file mode 100644 index 00000000..0cd94406 --- /dev/null +++ b/.github/workflows/test-sdk.yml @@ -0,0 +1,179 @@ +name: SDK Tests + +on: + push: + branches: + - main + paths: + - "sdk/**" + - "go.work" + - ".github/workflows/test-sdk.yml" + pull_request: + branches: + - main + paths: + - "sdk/**" + - "go.work" + - ".github/workflows/test-sdk.yml" + workflow_dispatch: + +env: + GO_VERSION: "1.25.2" + +jobs: + unit: + name: SDK Unit Tests + runs-on: ubuntu-latest + timeout-minutes: 20 + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Go with caching + uses: ./.github/actions/setup-go + with: + go-version: ${{ env.GO_VERSION }} + install-tools: "false" + + - name: Initialize Go workspace + run: | + rm -f go.work + go work init . ./sdk + go work sync + + - name: Download SDK dependencies + run: | + cd sdk + go mod download + go mod verify + + - name: Run unit tests with race detector and coverage + run: | + cd sdk + packages=$(go list ./... 2>/dev/null | grep -v '/examples') + if [ -z "$packages" ]; then + echo "no SDK packages discovered" + exit 1 + fi + go test -race -covermode=atomic -coverprofile=coverage.out $packages + + - name: Enforce 100% coverage + run: | + cd sdk + go tool cover -func=coverage.out | tee coverage.txt + awk '/^total:/ {gsub(/%/, "", $3); cov=$3+0; if (cov < 100) {printf "Coverage is %.2f%%, must be 100%%\n", cov; exit 1}} END {if (NR == 0) {print "No coverage summary found"; exit 1}}' coverage.txt + echo "Coverage requirement satisfied" + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v5 + with: + files: sdk/coverage.out + flags: sdk-unit + name: sdk-unit + fail_ci_if_error: true + token: ${{ secrets.CODECOV_TOKEN }} + override_branch: ${{ github.head_ref || github.ref_name }} + override_commit: ${{ github.event.pull_request.head.sha || github.sha }} + + integration: + name: SDK Integration Tests + runs-on: ubuntu-latest + timeout-minutes: 40 + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Go with caching + uses: ./.github/actions/setup-go + with: + go-version: ${{ env.GO_VERSION }} + install-tools: "false" + + - name: Initialize Go workspace + run: | + rm -f go.work + go work init . ./sdk + go work sync + + - name: Run integration tests + run: | + cd sdk + packages=$(go list ./... 2>/dev/null | grep -v '/examples') + if [ -z "$packages" ]; then + echo "no SDK packages discovered" + exit 0 + fi + go test -count=1 -tags=integration $packages + env: + TESTCONTAINERS_RYUK_DISABLED: "false" + TESTCONTAINERS_CLEANUP_TIMEOUT: "120s" + + benchmarks: + name: SDK Benchmarks + runs-on: ubuntu-latest + timeout-minutes: 30 + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set up Go with caching + uses: ./.github/actions/setup-go + with: + go-version: ${{ env.GO_VERSION }} + install-tools: "false" + + - name: Initialize Go workspace + run: | + rm -f go.work + go work init . ./sdk + go work sync + + - name: Run benchmarks + run: | + cd sdk + packages=$(go list ./... 2>/dev/null | grep -v '/examples') + if [ -z "$packages" ]; then + echo "no SDK packages discovered" + exit 0 + fi + go test -run=^$ -bench=. -benchmem $packages | tee bench.out + + - name: Detect benchmark regressions + run: | + go run ./tools/benchcheck --baseline sdk/docs/performance-benchmarks.json --results sdk/bench.out + + - name: Upload benchmark results + uses: actions/upload-artifact@v4 + with: + name: sdk-benchmarks + path: sdk/bench.out + retention-days: 7 + + lint: + name: SDK Lint + runs-on: ubuntu-latest + timeout-minutes: 15 + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Go with caching + uses: ./.github/actions/setup-go + with: + go-version: ${{ env.GO_VERSION }} + install-tools: "false" + + - name: Initialize Go workspace + run: | + rm -f go.work + go work init . ./sdk + go work sync + + - name: Run golangci-lint + uses: golangci/golangci-lint-action@v8 + with: + version: latest + working-directory: sdk + args: --timeout=5m diff --git a/.golangci.yml b/.golangci.yml index fa4477bc..51dd184d 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -80,6 +80,9 @@ linters: - lll - unparam path: _test\.go + - linters: + - funlen + path: sdk/examples - linters: - dupl path: pkg/pb diff --git a/AGENTS.md b/AGENTS.md index 3210727c..67453b1d 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -55,6 +55,10 @@ cat .cursor/rules/{go-coding-standards,architecture,test-standards,backwards-com - **Linting:** `golangci-lint run --fix --allow-parallel-runners ` (e.g., `./engine/agent/...`) - **IF YOUR SCOPE** is `.../.` then you need to run `make test` and `make lint` +### When writting code + +- **YOU MUST STRICTLY** follow the .cursor/rules/no-linebreaks.mdc or your code will be invalidate + **Enforcement:** Violating these standards results in immediate task rejection. diff --git a/CLAUDE.md b/CLAUDE.md index 3210727c..67453b1d 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -55,6 +55,10 @@ cat .cursor/rules/{go-coding-standards,architecture,test-standards,backwards-com - **Linting:** `golangci-lint run --fix --allow-parallel-runners ` (e.g., `./engine/agent/...`) - **IF YOUR SCOPE** is `.../.` then you need to run `make test` and `make lint` +### When writting code + +- **YOU MUST STRICTLY** follow the .cursor/rules/no-linebreaks.mdc or your code will be invalidate + **Enforcement:** Violating these standards results in immediate task rejection. diff --git a/GEMINI.md b/GEMINI.md index 3210727c..67453b1d 100644 --- a/GEMINI.md +++ b/GEMINI.md @@ -55,6 +55,10 @@ cat .cursor/rules/{go-coding-standards,architecture,test-standards,backwards-com - **Linting:** `golangci-lint run --fix --allow-parallel-runners ` (e.g., `./engine/agent/...`) - **IF YOUR SCOPE** is `.../.` then you need to run `make test` and `make lint` +### When writting code + +- **YOU MUST STRICTLY** follow the .cursor/rules/no-linebreaks.mdc or your code will be invalidate + **Enforcement:** Violating these standards results in immediate task rejection. diff --git a/Makefile b/Makefile index a38bd117..e8db9c5d 100644 --- a/Makefile +++ b/Makefile @@ -1,19 +1,12 @@ -include .env # Makefile for Compozy Go Project +# This Makefile delegates to Mage for parallel execution and smart caching +# Install mage: go install github.com/magefile/mage@latest # ----------------------------------------------------------------------------- -# Go Parameters & Setup +# Configuration # ----------------------------------------------------------------------------- -GOCMD=$(shell which go) -GOVERSION ?= $(shell awk '/^go /{print $$2}' go.mod 2>/dev/null || echo "1.25") -GOBUILD=$(GOCMD) build -GOTEST=$(GOCMD) test -GOFMT=gofmt -s -w -BINARY_NAME=compozy -BINARY_DIR=bin -SRC_DIRS=./... -LINTCMD=golangci-lint -BUNCMD=bun +MAGE=$(shell which mage 2>/dev/null) # Colors for output RED := \033[0;31m @@ -21,264 +14,253 @@ GREEN := \033[0;32m YELLOW := \033[0;33m NC := \033[0m # No Color +.PHONY: all test lint fmt modernize clean build dev deps schemagen schemagen-watch help integration-test typecheck +.PHONY: tidy start-docker stop-docker clean-docker reset-docker check-mage +.PHONY: swagger swagger-validate setup +.PHONY: lint-main lint-sdk typecheck-main typecheck-sdk fmt-main fmt-sdk modernize-main modernize-sdk +.PHONY: test-main test-sdk test-coverage test-coverage-main test-coverage-sdk test-nocache test-nocache-main test-nocache-sdk + # ----------------------------------------------------------------------------- -# Build Variables +# Mage Check # ----------------------------------------------------------------------------- -GIT_COMMIT := $(shell git rev-parse --short HEAD 2>/dev/null || echo "unknown") -VERSION := $(shell git describe --tags --match="v*" --always 2>/dev/null || echo "unknown") - -# Build flags for injecting version info (aligned with GoReleaser format) -BUILD_DATE := $(shell date -u +'%Y-%m-%dT%H:%M:%SZ') -LDFLAGS := -X github.com/compozy/compozy/pkg/version.Version=$(VERSION) -X github.com/compozy/compozy/pkg/version.CommitHash=$(GIT_COMMIT) -X github.com/compozy/compozy/pkg/version.BuildDate=$(BUILD_DATE) +check-mage: + @if [ -z "$(MAGE)" ]; then \ + echo "$(RED)Error: mage is not installed$(NC)"; \ + echo "Install with: go install github.com/magefile/mage@latest"; \ + exit 1; \ + fi # ----------------------------------------------------------------------------- -# Swagger/OpenAPI +# Main Targets (Mage Delegates - Parallel Execution) # ----------------------------------------------------------------------------- -SWAGGER_DIR=./docs -SWAGGER_OUTPUT=$(SWAGGER_DIR)/swagger.json +all: check-mage + @$(MAGE) all -.PHONY: all test lint fmt modernize clean build dev deps schemagen schemagen-watch help integration-test -.PHONY: tidy test-go start-docker stop-docker clean-docker reset-docker -.PHONY: swagger swagger-deps swagger-gen swagger-serve check-go-version setup clean-go-cache -.PHONY: check-func-length create-func-issues solve-func-length +setup: check-mage + @$(MAGE) setup + +clean: check-mage + @$(MAGE) clean + +build: check-mage + @$(MAGE) build # ----------------------------------------------------------------------------- -# Setup & Version Checks +# Code Quality & Formatting (Mage Delegates - Parallel Execution) # ----------------------------------------------------------------------------- -check-go-version: - @echo "Checking Go version..." - @GO_VERSION=$$($(GOCMD) version 2>/dev/null | awk '{print $$3}' | sed 's/go//'); \ - REQUIRED_VERSION=$(GOVERSION); \ - if [ -z "$$GO_VERSION" ]; then \ - echo "$(RED)Error: Go is not available$(NC)"; \ - echo "Please ensure Go $(GOVERSION) is installed via mise"; \ - exit 1; \ - elif [ "$$(printf '%s\n' "$$REQUIRED_VERSION" "$$GO_VERSION" | sort -V | head -n1)" != "$$REQUIRED_VERSION" ]; then \ - echo "$(YELLOW)Warning: Go version $$GO_VERSION found, but $(GOVERSION) is required$(NC)"; \ - echo "Please update Go to version $(GOVERSION) with: mise use go@$(GOVERSION)"; \ - exit 1; \ - else \ - echo "$(GREEN)✓ Go version $$GO_VERSION is compatible$(NC)"; \ - fi +lint: check-mage + @$(MAGE) quality:lint + +lint-main: check-mage + @$(MAGE) lintMain + +lint-sdk: check-mage + @$(MAGE) lintSDK + +typecheck: check-mage + @$(MAGE) quality:typecheck + +typecheck-main: check-mage + @$(MAGE) typecheckMain + +typecheck-sdk: check-mage + @$(MAGE) typecheckSDK + +fmt: check-mage + @$(MAGE) quality:fmt + +fmt-main: check-mage + @$(MAGE) fmtMain -setup: check-go-version deps - @echo "$(GREEN)✓ Setup complete! You can now run 'make build' or 'make dev'$(NC)" +fmt-sdk: check-mage + @$(MAGE) fmtSDK + +modernize: check-mage + @$(MAGE) quality:modernize + +modernize-main: check-mage + @$(MAGE) modernizeMain + +modernize-sdk: check-mage + @$(MAGE) modernizeSDK # ----------------------------------------------------------------------------- -# Main Targets +# Development & Dependencies (Mage Delegates) # ----------------------------------------------------------------------------- -all: swagger test lint fmt +dev: check-mage + @$(MAGE) dev -clean: - rm -rf $(BINARY_DIR)/ - rm -rf $(SWAGGER_DIR)/ - $(GOCMD) clean +tidy: check-mage + @$(MAGE) tidy -build: check-go-version swagger - mkdir -p $(BINARY_DIR) - $(GOBUILD) -ldflags "$(LDFLAGS)" -o $(BINARY_DIR)/$(BINARY_NAME) ./ - chmod +x $(BINARY_DIR)/$(BINARY_NAME) +deps: check-mage + @$(MAGE) deps # ----------------------------------------------------------------------------- -# Code Quality & Formatting +# Swagger/OpenAPI Generation (Mage Delegates) # ----------------------------------------------------------------------------- -lint: - $(BUNCMD) run lint - $(LINTCMD) run --fix --allow-parallel-runners - @echo "Running static driver import guard..." - @./scripts/check-driver-imports.sh - @echo "Running modernize analyzer for min/max suggestions..." - @echo "Linting completed successfully" - -fmt: - @echo "Formatting code..." - $(BUNCMD) run format - $(LINTCMD) fmt - @echo "Formatting completed successfully" - -modernize: - $(GOCMD) run golang.org/x/tools/gopls/internal/analysis/modernize/cmd/modernize@latest -fix ./... +swagger: check-mage + @$(MAGE) swagger + +swagger-validate: check-mage + @$(MAGE) swaggerValidate # ----------------------------------------------------------------------------- -# Development & Dependencies +# Schema Generation (Mage Delegates) # ----------------------------------------------------------------------------- +schemagen: check-mage + @$(MAGE) schema:generate -dev: EXAMPLE=weather -dev: - gow run . dev --cwd examples/$(EXAMPLE) --env-file .env --debug --watch - -tidy: - @echo "Tidying modules..." - $(GOCMD) mod tidy - -deps: check-go-version clean-go-cache swagger-deps - @echo "Installing Go dependencies..." - @echo "Installing gotestsum..." - @$(GOCMD) install gotest.tools/gotestsum@latest - @echo "Installing gow for hot reload..." - @$(GOCMD) install github.com/mitranim/gow@latest - @echo "Installing goose for migrations..." - @$(GOCMD) install github.com/pressly/goose/v3/cmd/goose@latest - @echo "Installing golangci-lint v2..." - @curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/HEAD/install.sh | sh -s -- -b $$($(GOCMD) env GOPATH)/bin v2.4.0 - @echo "$(GREEN)✓ All dependencies installed successfully$(NC)" - -clean-go-cache: - @echo "Cleaning Go build cache for fresh setup..." - @$(GOCMD) clean -cache -testcache -modcache 2>/dev/null || true - @echo "$(GREEN)✓ Go cache cleaned$(NC)" - -swagger-deps: - @echo "Installing Swagger dependencies..." - $(GOCMD) install github.com/swaggo/swag/cmd/swag@latest - @echo "Swagger dependencies installation complete." +schemagen-watch: check-mage + @$(MAGE) schema:watch # ----------------------------------------------------------------------------- -# Swagger/OpenAPI Generation +# Testing (Mage Delegates - Parallel Execution) # ----------------------------------------------------------------------------- -swagger: swagger-gen +test: check-mage + @$(MAGE) test -swagger-gen: - @echo "Generating Swagger documentation..." - @mkdir -p $(SWAGGER_DIR) - @swag init --dir ./ --generalInfo main.go --output $(SWAGGER_DIR) --parseDependency --parseInternal 2>&1 | grep -v "warning: failed to evaluate const" | grep -v "reflect: call of reflect.Value" | grep -v "strconv.ParseUint: parsing" || true - @echo "Running pre-commit on generated swagger files..." - @pre-commit run --files $(SWAGGER_DIR)/docs.go $(SWAGGER_DIR)/swagger.json $(SWAGGER_DIR)/swagger.yaml || true - @echo "Swagger documentation generated at $(SWAGGER_DIR)" +test-main: check-mage + @$(MAGE) testMain -swagger-validate: - @echo "Validating Swagger documentation..." - @swag init --dir ./ --generalInfo main.go --output $(SWAGGER_DIR) --parseDependency --parseInternal --quiet - @echo "Swagger documentation is valid" +test-sdk: check-mage + @$(MAGE) testSDK -# ----------------------------------------------------------------------------- -# Schema Generation -# ----------------------------------------------------------------------------- -schemagen: - $(GOCMD) run ./pkg/schemagen -out=./schemas - @cp -Rf ./schemas ../compozy-ui/backend/ +test-coverage: check-mage + @$(MAGE) testCoverage -schemagen-watch: - $(GOCMD) run ./pkg/schemagen -out=./schemas -watch +test-coverage-main: check-mage + @$(MAGE) testCoverageMain -# ----------------------------------------------------------------------------- -# Testing -# ----------------------------------------------------------------------------- +test-coverage-sdk: check-mage + @$(MAGE) testCoverageSDK -test: - @bun run test - @gotestsum --format pkgname -- -race -parallel=4 ./... +test-nocache: check-mage + @$(MAGE) testNoCache -test-coverage: - @bun run test - @gotestsum --format pkgname -- -race -parallel=4 -coverprofile=coverage.out -covermode=atomic ./... +test-nocache-main: check-mage + @$(MAGE) testNoCacheMain -test-nocache: - @bun run test - @gotestsum --format pkgname -- -race -count=1 -parallel=4 ./... +test-nocache-sdk: check-mage + @$(MAGE) testNoCacheSDK + +integration-sdk-compozy: check-mage + @$(MAGE) integration:sdkCompozy # ----------------------------------------------------------------------------- -# Docker & Database Management +# Docker & Database Management (Mage Delegates) # ----------------------------------------------------------------------------- -start-docker: - docker compose -f ./cluster/docker-compose.yml up -d +start-docker: check-mage + @$(MAGE) docker:start -stop-docker: - docker compose -f ./cluster/docker-compose.yml down +stop-docker: check-mage + @$(MAGE) docker:stop -clean-docker: - docker compose -f ./cluster/docker-compose.yml down --volumes +clean-docker: check-mage + @$(MAGE) docker:clean -reset-docker: - make clean-docker - make start-docker +reset-docker: check-mage + @$(MAGE) docker:reset # ----------------------------------------------------------------------------- -# Database +# Database (Mage Delegates) # ----------------------------------------------------------------------------- -DB_USER ?= postgres -DB_PASSWORD ?= postgres -DB_HOST ?= localhost -DB_PORT ?= 5432 -DB_NAME ?= compozy - -GOOSE_DBSTRING=postgres://${DB_USER}:${DB_PASSWORD}@${DB_HOST}:${DB_PORT}/${DB_NAME}?sslmode=disable -GOOSE_COMMAND = GOOSE_DRIVER=postgres GOOSE_DBSTRING=${GOOSE_DBSTRING} goose -dir ./engine/infra/postgres/migrations +migrate-status: check-mage + @$(MAGE) database:status -migrate-status: - $(GOOSE_COMMAND) status +migrate-up: check-mage + @$(MAGE) database:up -migrate-up: - $(GOOSE_COMMAND) up +migrate-down: check-mage + @$(MAGE) database:down -migrate-down: - $(GOOSE_COMMAND) down +migrate-create: check-mage + @$(MAGE) database:create -migrate-create: - $(GOOSE_COMMAND) create $(name) sql +migrate-validate: check-mage + @$(MAGE) database:validate -migrate-validate: - $(GOOSE_COMMAND) validate +migrate-reset: check-mage + @$(MAGE) database:reset -migrate-reset: - $(GOOSE_COMMAND) reset - -reset-db: - @make reset-docker +reset-db: check-mage + @$(MAGE) docker:reset # ----------------------------------------------------------------------------- -# Redis +# Redis (Mage Delegates) # ----------------------------------------------------------------------------- -REDIS_PASSWORD ?= redis_secret -REDIS_HOST ?= localhost -REDIS_PORT ?= 6379 - -redis-cli: - docker exec -it redis redis-cli -a ${REDIS_PASSWORD} +redis-cli: check-mage + @$(MAGE) redis:cli -redis-info: - docker exec redis redis-cli -a ${REDIS_PASSWORD} info +redis-info: check-mage + @$(MAGE) redis:info -redis-monitor: - docker exec -it redis redis-cli -a ${REDIS_PASSWORD} monitor +redis-monitor: check-mage + @$(MAGE) redis:monitor -redis-flush: - docker exec redis redis-cli -a ${REDIS_PASSWORD} flushall +redis-flush: check-mage + @$(MAGE) redis:flush -test-redis: - @echo "Testing Redis connection..." - @docker exec redis redis-cli -a ${REDIS_PASSWORD} ping +test-redis: check-mage + @$(MAGE) redis:testConnection # ----------------------------------------------------------------------------- # Help # ----------------------------------------------------------------------------- help: - @echo "$(GREEN)Compozy Makefile Commands$(NC)" + @echo "$(GREEN)Compozy Makefile Commands (Powered by Mage)$(NC)" + @echo "" + @echo "$(YELLOW)⚡ Performance: Tests run ~2-3x faster with parallel execution!$(NC)" @echo "" @echo "$(YELLOW)Setup & Build:$(NC)" @echo " make setup - Complete setup with Go version check and dependencies" @echo " make deps - Install all required dependencies" - @echo " make build - Build the compozy binary" + @echo " make build - Build the compozy binary (with smart caching)" @echo " make clean - Clean build artifacts" @echo "" @echo "$(YELLOW)Development:$(NC)" @echo " make dev - Run in development mode with hot reload" - @echo " make test - Run all tests" - @echo " make lint - Run linters and fix issues" - @echo " make fmt - Format code" + @echo " make test - Run all tests (main + sdk + bun) in parallel" + @echo " make test-coverage - Run all tests with coverage reports" + @echo " make test-nocache - Run all tests without cache" + @echo " make lint - Run linters in parallel (main + sdk + bun)" + @echo " make fmt - Format code in parallel (main + sdk + bun)" + @echo " make typecheck - Type check all modules in parallel" + @echo " make modernize - Modernize code patterns in parallel" @echo "" @echo "$(YELLOW)Docker & Database:$(NC)" @echo " make start-docker - Start Docker services" @echo " make stop-docker - Stop Docker services" + @echo " make reset-docker - Reset Docker environment" @echo " make migrate-up - Run database migrations" @echo " make migrate-down - Rollback last migration" + @echo " make migrate-create - Create new migration (use: make migrate-create name=my_migration)" + @echo "" + @echo "$(YELLOW)Redis:$(NC)" + @echo " make redis-cli - Open Redis CLI" + @echo " make redis-info - Show Redis info" + @echo " make redis-monitor - Monitor Redis commands" + @echo " make redis-flush - Flush all Redis data" + @echo " make test-redis - Test Redis connection" + @echo "" + @echo "$(YELLOW)Other:$(NC)" + @echo " make swagger - Generate Swagger documentation (with caching)" + @echo " make schemagen - Generate JSON schemas" + @echo " make all - Run all checks (tests + lint + format)" + @echo "" + @echo "$(YELLOW)Advanced:$(NC)" + @echo " mage -l - List all available Mage targets" + @echo " mage help - Show detailed Mage help" + @echo " mage - Run Mage target directly" @echo "" @echo "$(YELLOW)Requirements:$(NC)" - @echo " Go $(GOVERSION) or later (via mise)" - @echo " Bun (see https://bun.sh for install instructions or use Homebrew: brew install oven-sh/bun/bun)" + @echo " Go 1.25 or later (via mise)" + @echo " Mage (install: go install github.com/magefile/mage@latest)" + @echo " Bun (see https://bun.sh)" @echo " Docker & Docker Compose" @echo "" @echo "$(GREEN)Quick Start:$(NC)" - @echo " 1. make setup # Install dependencies" - @echo " 2. make start-docker # Start services" - @echo " 3. make migrate-up # Setup database" - @echo " 4. make dev # Start development server" + @echo " 1. go install github.com/magefile/mage@latest # Install mage" + @echo " 2. make setup # Install dependencies" + @echo " 3. make start-docker # Start services" + @echo " 4. make migrate-up # Setup database" + @echo " 5. make dev # Start development server" diff --git a/docs/docs.go b/docs/docs.go index eeed695c..02f35912 100644 --- a/docs/docs.go +++ b/docs/docs.go @@ -1867,7 +1867,7 @@ const docTemplate = `{ } }, "503": { - "description": "Streaming infrastructure unavailable", + "description": "Worker unavailable", "schema": { "allOf": [ { @@ -1967,7 +1967,7 @@ const docTemplate = `{ }, "/executions/agents/{exec_id}/stream": { "get": { - "description": "Streams agent execution updates over Server-Sent Events, emitting structured JSON or llm_chunk text depending on the output schema.", + "description": "Streams agent execution updates over Server-Sent Events, emitting structured JSON or llm_chunk text depending on the output schema. Served under routes.Base() (e.g., /api/v0/executions/agents/{exec_id}/stream).", "consumes": [ "*/*" ], @@ -2171,7 +2171,7 @@ const docTemplate = `{ }, "/executions/tasks/{exec_id}/stream": { "get": { - "description": "Streams task execution updates over Server-Sent Events, emitting structured JSON or llm_chunk text depending on the task output schema.", + "description": "Streams task execution updates over Server-Sent Events, emitting structured JSON or llm_chunk text depending on the task output schema. Served under routes.Base() (e.g., /api/v0/executions/tasks/{exec_id}/stream).", "consumes": [ "*/*" ], @@ -2354,7 +2354,7 @@ const docTemplate = `{ } }, "503": { - "description": "Streaming infrastructure unavailable", + "description": "Worker unavailable", "schema": { "allOf": [ { @@ -2471,7 +2471,7 @@ const docTemplate = `{ } }, "503": { - "description": "Streaming infrastructure unavailable", + "description": "Worker unavailable", "schema": { "allOf": [ { @@ -2570,7 +2570,7 @@ const docTemplate = `{ } }, "503": { - "description": "Streaming infrastructure unavailable", + "description": "Worker unavailable", "schema": { "allOf": [ { @@ -2669,7 +2669,7 @@ const docTemplate = `{ } }, "503": { - "description": "Streaming infrastructure unavailable", + "description": "Worker unavailable", "schema": { "allOf": [ { @@ -2768,7 +2768,7 @@ const docTemplate = `{ } }, "503": { - "description": "Streaming infrastructure unavailable", + "description": "Worker unavailable", "schema": { "allOf": [ { @@ -2894,7 +2894,7 @@ const docTemplate = `{ } }, "503": { - "description": "Streaming infrastructure unavailable", + "description": "Worker unavailable", "schema": { "allOf": [ { @@ -10653,7 +10653,7 @@ const docTemplate = `{ } }, "503": { - "description": "Streaming infrastructure unavailable", + "description": "Worker unavailable", "schema": { "allOf": [ { @@ -10789,7 +10789,7 @@ const docTemplate = `{ } }, "503": { - "description": "Streaming infrastructure unavailable", + "description": "Worker unavailable", "schema": { "allOf": [ { @@ -10957,7 +10957,7 @@ const docTemplate = `{ } }, "503": { - "description": "Streaming infrastructure unavailable", + "description": "Worker unavailable", "schema": { "allOf": [ { @@ -11395,6 +11395,22 @@ const docTemplate = `{ } ] }, + "on_error": { + "description": "OnError defines the transition executed when the action encounters an error.", + "allOf": [ + { + "$ref": "#/definitions/core.ErrorTransition" + } + ] + }, + "on_success": { + "description": "OnSuccess defines the transition executed when the action completes successfully.", + "allOf": [ + { + "$ref": "#/definitions/core.SuccessTransition" + } + ] + }, "output": { "description": "JSON Schema defining the expected output format from this action.\nUsed for validating agent responses and ensuring consistent output structure.\n\nIf ` + "`" + `nil` + "`" + `, no output validation is performed.\n\n**Schema format:** JSON Schema Draft 7", "allOf": [ @@ -11407,6 +11423,25 @@ const docTemplate = `{ "description": "Detailed instructions for the agent when executing this action.\nShould clearly define the expected behavior, output format, and any constraints.\n\n**Best practices:**\n- Be specific about the desired outcome\n- Include examples if complex formatting is required\n- Define clear success criteria\n- Specify any limitations or boundaries", "type": "string" }, + "retry_policy": { + "description": "RetryPolicy configures automatic retries for the action when execution fails.", + "allOf": [ + { + "$ref": "#/definitions/core.RetryPolicyConfig" + } + ] + }, + "timeout": { + "description": "Timeout specifies the maximum duration allowed for the action execution.", + "type": "string" + }, + "tools": { + "description": "Tools scoped to this action; override agent-level tool availability when provided.", + "type": "array", + "items": { + "$ref": "#/definitions/tool.Config" + } + }, "with": { "description": "Default parameters to provide to the action.\nThese are merged with runtime parameters, with runtime values taking precedence.\n\n**Use cases:**\n- Setting default configuration options\n- Providing constant context values\n- Pre-filling common parameters", "allOf": [ @@ -12974,7 +13009,7 @@ const docTemplate = `{ "type": "string" }, "max_sessions": { - "description": "MaxSessions defines the **maximum number of concurrent sessions** allowed.\n\nHelps manage resource usage and prevent server overload.\nEach agent connection typically creates one session.\n\n**Values**:\n- ` + "`" + `0` + "`" + ` or negative: Unlimited sessions (default)\n- Positive number: Maximum concurrent sessions\n\n- **Examples**:\n` + "`" + `` + "`" + `` + "`" + `yaml\nmax_sessions: 10 # Allow up to 10 concurrent connections\nmax_sessions: 1 # Single session only (useful for stateful servers)\nmax_sessions: 0 # Unlimited sessions\n` + "`" + `` + "`" + `` + "`" + `", + "description": "MaxSessions defines the **maximum number of concurrent sessions** allowed.\n\nHelps manage resource usage and prevent server overload.\nEach agent connection typically creates one session.\n\n**Values**:\n- ` + "`" + `0` + "`" + `: Unlimited sessions (default)\n- Positive number: Maximum concurrent sessions\n\n- **Examples**:\n` + "`" + `` + "`" + `` + "`" + `yaml\nmax_sessions: 10 # Allow up to 10 concurrent connections\nmax_sessions: 1 # Single session only (useful for stateful servers)\nmax_sessions: 0 # Unlimited sessions\n` + "`" + `` + "`" + `` + "`" + `", "type": "integer" }, "proto": { @@ -13364,6 +13399,19 @@ const docTemplate = `{ } } }, + "memory.PrivacyScope": { + "type": "string", + "enum": [ + "global", + "user", + "session" + ], + "x-enum-varnames": [ + "PrivacyGlobalScope", + "PrivacyUserScope", + "PrivacySessionScope" + ] + }, "memory.SystemHealth": { "type": "object", "properties": { @@ -13436,6 +13484,9 @@ const docTemplate = `{ "description": { "type": "string" }, + "expiration": { + "type": "string" + }, "flushing": { "$ref": "#/definitions/core.FlushingStrategyConfig" }, @@ -13460,6 +13511,9 @@ const docTemplate = `{ "privacy_policy": { "$ref": "#/definitions/core.PrivacyPolicyConfig" }, + "privacy_scope": { + "$ref": "#/definitions/memory.PrivacyScope" + }, "resource": { "type": "string" }, @@ -13490,6 +13544,9 @@ const docTemplate = `{ "type": "string", "example": "abc123" }, + "expiration": { + "type": "string" + }, "flushing": { "$ref": "#/definitions/core.FlushingStrategyConfig" }, @@ -13514,6 +13571,9 @@ const docTemplate = `{ "privacy_policy": { "$ref": "#/definitions/core.PrivacyPolicyConfig" }, + "privacy_scope": { + "$ref": "#/definitions/memory.PrivacyScope" + }, "resource": { "type": "string" }, @@ -15245,6 +15305,10 @@ const docTemplate = `{ "tool.Config": { "type": "object", "properties": { + "code": { + "description": "Code contains inline source executed by the selected runtime when the tool runs.\nBuilders may supply either inline JavaScript/TypeScript code or references resolved at runtime.", + "type": "string" + }, "config": { "description": "Configuration parameters passed to the tool separately from input data.\nProvides static configuration that tools can use for initialization and behavior control.\nUnlike input parameters, config is not meant to change between tool invocations.\n\n- **Use cases:** API base URLs, retry policies, timeout settings, feature flags\n- **Separation:** Keeps configuration separate from runtime input data\n- **Override:** Can be overridden at workflow or agent level\n- **Example:**\n ` + "`" + `` + "`" + `` + "`" + `yaml\n config:\n base_url: \"https://api.example.com\"\n timeout: 30\n retry_count: 3\n headers:\n User-Agent: \"Compozy/1.0\"\n ` + "`" + `` + "`" + `` + "`" + `", "allOf": [ @@ -15285,6 +15349,10 @@ const docTemplate = `{ } ] }, + "name": { + "description": "Name provides a concise, human-readable label for the tool shown in UIs and logs.\nUnlike the identifier, the name may include spaces and capitalization to improve readability.\nWhen omitted, UIs should fall back to using the identifier.", + "type": "string" + }, "output": { "description": "JSON schema defining the expected output format from the tool.\nUsed for validation after execution and documentation purposes.\nMust follow JSON Schema Draft 7 specification for compatibility.\n\n- **When nil:** No output validation is performed\n- **Use cases:** Response validation, type safety, workflow data flow verification\n- **Best practice:** Define output schema for tools used in critical workflows", "allOf": [ @@ -15297,6 +15365,10 @@ const docTemplate = `{ "description": "Resource identifier for the autoloader system (must be ` + "`" + `\"tool\"` + "`" + `).\nThis field enables automatic discovery and registration of tool configurations.", "type": "string" }, + "runtime": { + "description": "Runtime selects the execution environment for custom tool implementations.\nSupported runtimes include ` + "`" + `\"bun\"` + "`" + `, ` + "`" + `\"node\"` + "`" + `, and ` + "`" + `\"deno\"` + "`" + ` for JavaScript/TypeScript execution.\nWhen empty, the project runtime defaults are applied.", + "type": "string" + }, "timeout": { "description": "Maximum execution time for the tool in Go duration format.\nIf not specified, uses the global tool timeout from project configuration.\nThis timeout applies to the entire tool execution lifecycle.\n\n- **Examples:** ` + "`" + `\"30s\"` + "`" + `, ` + "`" + `\"5m\"` + "`" + `, ` + "`" + `\"1h\"` + "`" + `, ` + "`" + `\"500ms\"` + "`" + `\n- **Constraints:** Must be positive; zero or negative values cause validation errors\n- **Default fallback:** Uses project-level tool timeout when empty", "type": "string" @@ -15314,6 +15386,9 @@ const docTemplate = `{ "toolrouter.ToolDTO": { "type": "object", "properties": { + "code": { + "type": "string" + }, "config": { "$ref": "#/definitions/core.Input" }, @@ -15332,12 +15407,18 @@ const docTemplate = `{ "input": { "$ref": "#/definitions/schema.Schema" }, + "name": { + "type": "string" + }, "output": { "$ref": "#/definitions/schema.Schema" }, "resource": { "type": "string" }, + "runtime": { + "type": "string" + }, "timeout": { "type": "string" }, @@ -15349,6 +15430,9 @@ const docTemplate = `{ "toolrouter.ToolListItem": { "type": "object", "properties": { + "code": { + "type": "string" + }, "config": { "$ref": "#/definitions/core.Input" }, @@ -15371,12 +15455,18 @@ const docTemplate = `{ "input": { "$ref": "#/definitions/schema.Schema" }, + "name": { + "type": "string" + }, "output": { "$ref": "#/definitions/schema.Schema" }, "resource": { "type": "string" }, + "runtime": { + "type": "string" + }, "timeout": { "type": "string" }, diff --git a/docs/swagger.json b/docs/swagger.json index 09e0a06c..00ecc4f2 100644 --- a/docs/swagger.json +++ b/docs/swagger.json @@ -1960,7 +1960,7 @@ }, "/executions/agents/{exec_id}/stream": { "get": { - "description": "Streams agent execution updates over Server-Sent Events, emitting structured JSON or llm_chunk text depending on the output schema.", + "description": "Streams agent execution updates over Server-Sent Events, emitting structured JSON or llm_chunk text depending on the output schema. Served under routes.Base() (e.g., /api/v0/executions/agents/{exec_id}/stream).", "consumes": [ "*/*" ], @@ -2064,7 +2064,7 @@ } }, "503": { - "description": "Pub/Sub provider unavailable", + "description": "Streaming infrastructure unavailable", "schema": { "allOf": [ { @@ -2164,7 +2164,7 @@ }, "/executions/tasks/{exec_id}/stream": { "get": { - "description": "Streams task execution updates over Server-Sent Events, emitting structured JSON or llm_chunk text depending on the task output schema.", + "description": "Streams task execution updates over Server-Sent Events, emitting structured JSON or llm_chunk text depending on the task output schema. Served under routes.Base() (e.g., /api/v0/executions/tasks/{exec_id}/stream).", "consumes": [ "*/*" ], @@ -2268,7 +2268,7 @@ } }, "503": { - "description": "Pub/Sub provider unavailable", + "description": "Streaming infrastructure unavailable", "schema": { "allOf": [ { @@ -3013,7 +3013,7 @@ } }, "503": { - "description": "Worker unavailable", + "description": "Streaming infrastructure unavailable", "schema": { "allOf": [ { @@ -11388,6 +11388,22 @@ } ] }, + "on_error": { + "description": "OnError defines the transition executed when the action encounters an error.", + "allOf": [ + { + "$ref": "#/definitions/core.ErrorTransition" + } + ] + }, + "on_success": { + "description": "OnSuccess defines the transition executed when the action completes successfully.", + "allOf": [ + { + "$ref": "#/definitions/core.SuccessTransition" + } + ] + }, "output": { "description": "JSON Schema defining the expected output format from this action.\nUsed for validating agent responses and ensuring consistent output structure.\n\nIf `nil`, no output validation is performed.\n\n**Schema format:** JSON Schema Draft 7", "allOf": [ @@ -11400,6 +11416,25 @@ "description": "Detailed instructions for the agent when executing this action.\nShould clearly define the expected behavior, output format, and any constraints.\n\n**Best practices:**\n- Be specific about the desired outcome\n- Include examples if complex formatting is required\n- Define clear success criteria\n- Specify any limitations or boundaries", "type": "string" }, + "retry_policy": { + "description": "RetryPolicy configures automatic retries for the action when execution fails.", + "allOf": [ + { + "$ref": "#/definitions/core.RetryPolicyConfig" + } + ] + }, + "timeout": { + "description": "Timeout specifies the maximum duration allowed for the action execution.", + "type": "string" + }, + "tools": { + "description": "Tools scoped to this action; override agent-level tool availability when provided.", + "type": "array", + "items": { + "$ref": "#/definitions/tool.Config" + } + }, "with": { "description": "Default parameters to provide to the action.\nThese are merged with runtime parameters, with runtime values taking precedence.\n\n**Use cases:**\n- Setting default configuration options\n- Providing constant context values\n- Pre-filling common parameters", "allOf": [ @@ -12967,7 +13002,7 @@ "type": "string" }, "max_sessions": { - "description": "MaxSessions defines the **maximum number of concurrent sessions** allowed.\n\nHelps manage resource usage and prevent server overload.\nEach agent connection typically creates one session.\n\n**Values**:\n- `0` or negative: Unlimited sessions (default)\n- Positive number: Maximum concurrent sessions\n\n- **Examples**:\n```yaml\nmax_sessions: 10 # Allow up to 10 concurrent connections\nmax_sessions: 1 # Single session only (useful for stateful servers)\nmax_sessions: 0 # Unlimited sessions\n```", + "description": "MaxSessions defines the **maximum number of concurrent sessions** allowed.\n\nHelps manage resource usage and prevent server overload.\nEach agent connection typically creates one session.\n\n**Values**:\n- `0`: Unlimited sessions (default)\n- Positive number: Maximum concurrent sessions\n\n- **Examples**:\n```yaml\nmax_sessions: 10 # Allow up to 10 concurrent connections\nmax_sessions: 1 # Single session only (useful for stateful servers)\nmax_sessions: 0 # Unlimited sessions\n```", "type": "integer" }, "proto": { @@ -13357,6 +13392,22 @@ } } }, + "memory.PrivacyScope": { + "type": "string", + "enum": [ + "global", + "user", + "session", + "global", + "user", + "session" + ], + "x-enum-varnames": [ + "PrivacyGlobalScope", + "PrivacyUserScope", + "PrivacySessionScope" + ] + }, "memory.SystemHealth": { "type": "object", "properties": { @@ -13429,6 +13480,9 @@ "description": { "type": "string" }, + "expiration": { + "type": "string" + }, "flushing": { "$ref": "#/definitions/core.FlushingStrategyConfig" }, @@ -13453,6 +13507,9 @@ "privacy_policy": { "$ref": "#/definitions/core.PrivacyPolicyConfig" }, + "privacy_scope": { + "$ref": "#/definitions/memory.PrivacyScope" + }, "resource": { "type": "string" }, @@ -13483,6 +13540,9 @@ "type": "string", "example": "abc123" }, + "expiration": { + "type": "string" + }, "flushing": { "$ref": "#/definitions/core.FlushingStrategyConfig" }, @@ -13507,6 +13567,9 @@ "privacy_policy": { "$ref": "#/definitions/core.PrivacyPolicyConfig" }, + "privacy_scope": { + "$ref": "#/definitions/memory.PrivacyScope" + }, "resource": { "type": "string" }, @@ -15238,6 +15301,10 @@ "tool.Config": { "type": "object", "properties": { + "code": { + "description": "Code contains inline source executed by the selected runtime when the tool runs.\nBuilders may supply either inline JavaScript/TypeScript code or references resolved at runtime.", + "type": "string" + }, "config": { "description": "Configuration parameters passed to the tool separately from input data.\nProvides static configuration that tools can use for initialization and behavior control.\nUnlike input parameters, config is not meant to change between tool invocations.\n\n- **Use cases:** API base URLs, retry policies, timeout settings, feature flags\n- **Separation:** Keeps configuration separate from runtime input data\n- **Override:** Can be overridden at workflow or agent level\n- **Example:**\n ```yaml\n config:\n base_url: \"https://api.example.com\"\n timeout: 30\n retry_count: 3\n headers:\n User-Agent: \"Compozy/1.0\"\n ```", "allOf": [ @@ -15278,6 +15345,10 @@ } ] }, + "name": { + "description": "Name provides a concise, human-readable label for the tool shown in UIs and logs.\nUnlike the identifier, the name may include spaces and capitalization to improve readability.\nWhen omitted, UIs should fall back to using the identifier.", + "type": "string" + }, "output": { "description": "JSON schema defining the expected output format from the tool.\nUsed for validation after execution and documentation purposes.\nMust follow JSON Schema Draft 7 specification for compatibility.\n\n- **When nil:** No output validation is performed\n- **Use cases:** Response validation, type safety, workflow data flow verification\n- **Best practice:** Define output schema for tools used in critical workflows", "allOf": [ @@ -15290,6 +15361,10 @@ "description": "Resource identifier for the autoloader system (must be `\"tool\"`).\nThis field enables automatic discovery and registration of tool configurations.", "type": "string" }, + "runtime": { + "description": "Runtime selects the execution environment for custom tool implementations.\nSupported runtimes include `\"bun\"`, `\"node\"`, and `\"deno\"` for JavaScript/TypeScript execution.\nWhen empty, the project runtime defaults are applied.", + "type": "string" + }, "timeout": { "description": "Maximum execution time for the tool in Go duration format.\nIf not specified, uses the global tool timeout from project configuration.\nThis timeout applies to the entire tool execution lifecycle.\n\n- **Examples:** `\"30s\"`, `\"5m\"`, `\"1h\"`, `\"500ms\"`\n- **Constraints:** Must be positive; zero or negative values cause validation errors\n- **Default fallback:** Uses project-level tool timeout when empty", "type": "string" @@ -15307,6 +15382,9 @@ "toolrouter.ToolDTO": { "type": "object", "properties": { + "code": { + "type": "string" + }, "config": { "$ref": "#/definitions/core.Input" }, @@ -15325,12 +15403,18 @@ "input": { "$ref": "#/definitions/schema.Schema" }, + "name": { + "type": "string" + }, "output": { "$ref": "#/definitions/schema.Schema" }, "resource": { "type": "string" }, + "runtime": { + "type": "string" + }, "timeout": { "type": "string" }, @@ -15342,6 +15426,9 @@ "toolrouter.ToolListItem": { "type": "object", "properties": { + "code": { + "type": "string" + }, "config": { "$ref": "#/definitions/core.Input" }, @@ -15364,12 +15451,18 @@ "input": { "$ref": "#/definitions/schema.Schema" }, + "name": { + "type": "string" + }, "output": { "$ref": "#/definitions/schema.Schema" }, "resource": { "type": "string" }, + "runtime": { + "type": "string" + }, "timeout": { "type": "string" }, diff --git a/docs/swagger.yaml b/docs/swagger.yaml index 67d28eb6..60359fc9 100644 --- a/docs/swagger.yaml +++ b/docs/swagger.yaml @@ -26,6 +26,14 @@ definitions: If `nil`, the action accepts any input format without validation. **Schema format:** JSON Schema Draft 7 + on_error: + allOf: + - $ref: '#/definitions/core.ErrorTransition' + description: OnError defines the transition executed when the action encounters an error. + on_success: + allOf: + - $ref: '#/definitions/core.SuccessTransition' + description: OnSuccess defines the transition executed when the action completes successfully. output: allOf: - $ref: '#/definitions/schema.Schema' @@ -47,6 +55,18 @@ definitions: - Define clear success criteria - Specify any limitations or boundaries type: string + retry_policy: + allOf: + - $ref: '#/definitions/core.RetryPolicyConfig' + description: RetryPolicy configures automatic retries for the action when execution fails. + timeout: + description: Timeout specifies the maximum duration allowed for the action execution. + type: string + tools: + description: Tools scoped to this action; override agent-level tool availability when provided. + items: + $ref: '#/definitions/tool.Config' + type: array with: allOf: - $ref: '#/definitions/core.Input' @@ -1514,7 +1534,7 @@ definitions: Each agent connection typically creates one session. **Values**: - - `0` or negative: Unlimited sessions (default) + - `0`: Unlimited sessions (default) - Positive number: Maximum concurrent sessions - **Examples**: @@ -1856,6 +1876,19 @@ definitions: token_usage: $ref: '#/definitions/memory.TokenUsageHealth' type: object + memory.PrivacyScope: + enum: + - global + - user + - session + - global + - user + - session + type: string + x-enum-varnames: + - PrivacyGlobalScope + - PrivacyUserScope + - PrivacySessionScope memory.SystemHealth: properties: healthy: @@ -1903,6 +1936,8 @@ definitions: type: string description: type: string + expiration: + type: string flushing: $ref: '#/definitions/core.FlushingStrategyConfig' id: @@ -1919,6 +1954,8 @@ definitions: $ref: '#/definitions/core.PersistenceConfig' privacy_policy: $ref: '#/definitions/core.PrivacyPolicyConfig' + privacy_scope: + $ref: '#/definitions/memory.PrivacyScope' resource: type: string token_allocation: @@ -1939,6 +1976,8 @@ definitions: etag: example: abc123 type: string + expiration: + type: string flushing: $ref: '#/definitions/core.FlushingStrategyConfig' id: @@ -1955,6 +1994,8 @@ definitions: $ref: '#/definitions/core.PersistenceConfig' privacy_policy: $ref: '#/definitions/core.PrivacyPolicyConfig' + privacy_scope: + $ref: '#/definitions/memory.PrivacyScope' resource: type: string token_allocation: @@ -3334,6 +3375,11 @@ definitions: type: object tool.Config: properties: + code: + description: |- + Code contains inline source executed by the selected runtime when the tool runs. + Builders may supply either inline JavaScript/TypeScript code or references resolved at runtime. + type: string config: allOf: - $ref: '#/definitions/core.Input' @@ -3406,6 +3452,12 @@ definitions: - **When nil:** Tool accepts any input format (no validation performed) - **Use cases:** Parameter validation, type safety, auto-generated documentation - **Integration:** Automatically converts to LLM function parameters + name: + description: |- + Name provides a concise, human-readable label for the tool shown in UIs and logs. + Unlike the identifier, the name may include spaces and capitalization to improve readability. + When omitted, UIs should fall back to using the identifier. + type: string output: allOf: - $ref: '#/definitions/schema.Schema' @@ -3422,6 +3474,12 @@ definitions: Resource identifier for the autoloader system (must be `"tool"`). This field enables automatic discovery and registration of tool configurations. type: string + runtime: + description: |- + Runtime selects the execution environment for custom tool implementations. + Supported runtimes include `"bun"`, `"node"`, and `"deno"` for JavaScript/TypeScript execution. + When empty, the project runtime defaults are applied. + type: string timeout: description: |- Maximum execution time for the tool in Go duration format. @@ -3445,6 +3503,8 @@ definitions: type: object toolrouter.ToolDTO: properties: + code: + type: string config: $ref: '#/definitions/core.Input' cwd: @@ -3457,10 +3517,14 @@ definitions: type: string input: $ref: '#/definitions/schema.Schema' + name: + type: string output: $ref: '#/definitions/schema.Schema' resource: type: string + runtime: + type: string timeout: type: string with: @@ -3468,6 +3532,8 @@ definitions: type: object toolrouter.ToolListItem: properties: + code: + type: string config: $ref: '#/definitions/core.Input' cwd: @@ -3483,10 +3549,14 @@ definitions: type: string input: $ref: '#/definitions/schema.Schema' + name: + type: string output: $ref: '#/definitions/schema.Schema' resource: type: string + runtime: + type: string timeout: type: string with: @@ -5384,7 +5454,7 @@ paths: consumes: - '*/*' description: Streams agent execution updates over Server-Sent Events, emitting structured JSON or - llm_chunk text depending on the output schema. + llm_chunk text depending on the output schema. Served under routes.Base() (e.g., /api/v0/executions/agents/{exec_id}/stream). parameters: - description: Agent execution ID example: '"2Z4PVTL6K27XVT4A3NPKMDD5BG"' @@ -5442,7 +5512,7 @@ paths: $ref: '#/definitions/router.ErrorInfo' type: object "503": - description: Pub/Sub provider unavailable + description: Streaming infrastructure unavailable schema: allOf: - $ref: '#/definitions/router.Response' @@ -5504,7 +5574,7 @@ paths: consumes: - '*/*' description: Streams task execution updates over Server-Sent Events, emitting structured JSON or - llm_chunk text depending on the task output schema. + llm_chunk text depending on the task output schema. Served under routes.Base() (e.g., /api/v0/executions/tasks/{exec_id}/stream). parameters: - description: Task execution ID example: '"2Z4PVTL6K27XVT4A3NPKMDD5BG"' @@ -5562,7 +5632,7 @@ paths: $ref: '#/definitions/router.ErrorInfo' type: object "503": - description: Pub/Sub provider unavailable + description: Streaming infrastructure unavailable schema: allOf: - $ref: '#/definitions/router.Response' @@ -5979,7 +6049,7 @@ paths: $ref: '#/definitions/router.ErrorInfo' type: object "503": - description: Worker unavailable + description: Streaming infrastructure unavailable schema: allOf: - $ref: '#/definitions/router.Response' diff --git a/engine/agent/action_config.go b/engine/agent/action_config.go index e6c256af..193e4cb8 100644 --- a/engine/agent/action_config.go +++ b/engine/agent/action_config.go @@ -8,6 +8,7 @@ import ( "github.com/compozy/compozy/engine/attachment" "github.com/compozy/compozy/engine/core" "github.com/compozy/compozy/engine/schema" + "github.com/compozy/compozy/engine/tool" ) // ActionConfig defines a structured action that an agent can perform. @@ -101,7 +102,17 @@ type ActionConfig struct { CWD *core.PathCWD // Attachments at action scope - Attachments attachment.Attachments `json:"attachments,omitempty" yaml:"attachments,omitempty" mapstructure:"attachments,omitempty"` + Attachments attachment.Attachments `json:"attachments,omitempty" yaml:"attachments,omitempty" mapstructure:"attachments,omitempty"` + // Tools scoped to this action; override agent-level tool availability when provided. + Tools []tool.Config `json:"tools,omitempty" yaml:"tools,omitempty" mapstructure:"tools,omitempty"` + // OnSuccess defines the transition executed when the action completes successfully. + OnSuccess *core.SuccessTransition `json:"on_success,omitempty" yaml:"on_success,omitempty" mapstructure:"on_success,omitempty"` + // OnError defines the transition executed when the action encounters an error. + OnError *core.ErrorTransition `json:"on_error,omitempty" yaml:"on_error,omitempty" mapstructure:"on_error,omitempty"` + // RetryPolicy configures automatic retries for the action when execution fails. + RetryPolicy *core.RetryPolicyConfig `json:"retry_policy,omitempty" yaml:"retry_policy,omitempty" mapstructure:"retry_policy,omitempty"` + // Timeout specifies the maximum duration allowed for the action execution. + Timeout string `json:"timeout,omitempty" yaml:"timeout,omitempty" mapstructure:"timeout,omitempty"` } func (a *ActionConfig) SetCWD(path string) error { @@ -129,7 +140,19 @@ func (a *ActionConfig) Validate(ctx context.Context) error { schema.NewCWDValidator(a.CWD, a.ID), schema.NewStructValidator(a), ) - return v.Validate(ctx) + if err := v.Validate(ctx); err != nil { + return err + } + if a.Timeout != "" { + duration, err := core.ParseHumanDuration(a.Timeout) + if err != nil { + return fmt.Errorf("invalid action timeout '%s': %w", a.Timeout, err) + } + if duration <= 0 { + return fmt.Errorf("action timeout must be positive, got: %s", a.Timeout) + } + } + return nil } func (a *ActionConfig) ValidateInput(ctx context.Context, input *core.Input) error { diff --git a/engine/llm/service.go b/engine/llm/service.go index 104e0dea..c8eb5f33 100644 --- a/engine/llm/service.go +++ b/engine/llm/service.go @@ -81,37 +81,40 @@ func registerNativeBuiltins( return builtin.RegisterBuiltins(ctx, registerFn, builtin.Options{Definitions: definitions}) } -func logNativeTools(ctx context.Context, cfg *appconfig.NativeToolsConfig, result *builtin.Result) { +func logNativeTools( + ctx context.Context, + cfg *appconfig.NativeToolsConfig, + result *builtin.Result, + userNative []string, +) { log := logger.FromContext(ctx) execAllowlistCount := 0 - ids := []string{} + builtinIDs := []string{} if result != nil { execAllowlistCount = len(result.ExecCommands) - ids = append(ids, result.RegisteredIDs...) - } - if cfg != nil && cfg.Enabled && len(ids) > 0 { - log.Info( - "Native builtin tools registered", - "count", - len(ids), - "ids", - ids, - "exec_allowlist_count", - execAllowlistCount, - "root_dir", - cfg.RootDir, - "fetch_timeout_ms", - cfg.Fetch.Timeout.Milliseconds(), - "fetch_max_body_bytes", - cfg.Fetch.MaxBodyBytes, - ) + builtinIDs = append(builtinIDs, result.RegisteredIDs...) + } + enabled := cfg != nil && cfg.Enabled + if enabled || len(builtinIDs) > 0 || len(userNative) > 0 { + fields := []any{ + "enabled", enabled, + "builtin_count", len(builtinIDs), + "builtin_ids", builtinIDs, + "user_native_count", len(userNative), + "user_native_ids", userNative, + "exec_allowlist_count", execAllowlistCount, + } + if cfg != nil { + fields = append(fields, + "root_dir", cfg.RootDir, + "fetch_timeout_ms", cfg.Fetch.Timeout.Milliseconds(), + "fetch_max_body_bytes", cfg.Fetch.MaxBodyBytes, + ) + } + log.Info("Native tools registered", fields...) return } - enabled := false - if cfg != nil { - enabled = cfg.Enabled - } - log.Info("Native builtin tools disabled", "enabled", enabled, "exec_allowlist_count", execAllowlistCount) + log.Info("Native tools disabled", "enabled", enabled, "exec_allowlist_count", execAllowlistCount) } func configureToolRegistry( @@ -148,8 +151,8 @@ func configureToolRegistry( if appCfg := appconfig.FromContext(ctx); appCfg != nil { nativeCfg = appCfg.Runtime.NativeTools } - logNativeTools(ctx, &nativeCfg, result) - registerRuntimeTools(ctx, registry, runtime, tools) + userNative := registerRuntimeTools(ctx, registry, runtime, tools) + logNativeTools(ctx, &nativeCfg, result, userNative) return nil } @@ -168,14 +171,25 @@ func registerRuntimeTools( registry ToolRegistry, runtime runtime.Runtime, configs []tool.Config, -) { +) []string { log := logger.FromContext(ctx) + userNativeIDs := make([]string, 0) for i := range configs { - localTool := NewLocalToolAdapter(&configs[i], &runtimeAdapter{manager: runtime}) + cfg := &configs[i] + if cfg.IsNative() { + userNativeIDs = append(userNativeIDs, cfg.ID) + nativeTool := NewNativeToolAdapter(cfg) + if err := registry.Register(ctx, nativeTool); err != nil { + log.Warn("Failed to register native tool", "tool", cfg.ID, "error", err) + } + continue + } + localTool := NewLocalToolAdapter(cfg, &runtimeAdapter{manager: runtime}) if err := registry.Register(ctx, localTool); err != nil { - log.Warn("Failed to register local tool", "tool", configs[i].ID, "error", err) + log.Warn("Failed to register local tool", "tool", cfg.ID, "error", err) } } + return userNativeIDs } func assembleOrchestratorConfig( diff --git a/engine/llm/tool_registry.go b/engine/llm/tool_registry.go index fa6bab0d..e23cf430 100644 --- a/engine/llm/tool_registry.go +++ b/engine/llm/tool_registry.go @@ -15,6 +15,7 @@ import ( mcpmetrics "github.com/compozy/compozy/engine/mcp/metrics" "github.com/compozy/compozy/engine/schema" "github.com/compozy/compozy/engine/tool" + nativeuser "github.com/compozy/compozy/engine/tool/nativeuser" "github.com/compozy/compozy/pkg/logger" "github.com/tmc/langchaingo/tools" "golang.org/x/sync/singleflight" @@ -587,6 +588,83 @@ func (a *localToolAdapter) Call(ctx context.Context, input string) (string, erro return string(result), nil } +type nativeToolAdapter struct { + config *tool.Config +} + +func NewNativeToolAdapter(config *tool.Config) Tool { + return &nativeToolAdapter{config: config} +} + +func (a *nativeToolAdapter) Name() string { + return a.config.ID +} + +func (a *nativeToolAdapter) Description() string { + return a.config.Description +} + +func (a *nativeToolAdapter) ParameterSchema() map[string]any { + if a.config == nil || a.config.InputSchema == nil { + return nil + } + source := map[string]any(*a.config.InputSchema) + copied, err := core.DeepCopy(source) + if err != nil { + return core.CloneMap(source) + } + return copied +} + +func (a *nativeToolAdapter) Call(ctx context.Context, input string) (string, error) { + log := logger.FromContext(ctx) + definition, ok := nativeuser.Lookup(a.config.ID) + if !ok { + err := fmt.Errorf("native handler missing for tool %s", a.config.ID) + log.Error("Native tool handler not registered", "tool", a.config.ID) + return "", core.NewError(err, "TOOL_EXECUTION_ERROR", map[string]any{"tool": a.config.ID}) + } + var inputMap map[string]any + if err := json.Unmarshal([]byte(input), &inputMap); err != nil { + return "", core.NewError(err, "INVALID_TOOL_INPUT", map[string]any{"tool": a.config.ID}) + } + coreInput := core.NewInput(inputMap) + if err := a.config.ValidateInput(ctx, &coreInput); err != nil { + return "", core.NewError(err, "INVALID_TOOL_INPUT", map[string]any{"tool": a.config.ID}) + } + configMap := a.config.GetConfig().AsMap() + inputCopy := coreInput.AsMap() + var ( + outputMap map[string]any + execErr error + ) + func() { + defer func() { + if r := recover(); r != nil { + execErr = fmt.Errorf("native tool handler panic: %v", r) + log.Error("Native tool handler panicked", "tool", a.config.ID, "panic", r) + } + }() + log.Debug("Executing native tool", "tool", a.config.ID) + outputMap, execErr = definition.Handler(ctx, inputCopy, configMap) + }() + if execErr != nil { + return "", core.NewError(execErr, "TOOL_EXECUTION_ERROR", map[string]any{"tool": a.config.ID}) + } + if outputMap == nil { + return "", core.NewError(fmt.Errorf("nil output"), "TOOL_EMPTY_OUTPUT", map[string]any{"tool": a.config.ID}) + } + output := core.Output(outputMap) + if err := a.config.ValidateOutput(ctx, &output); err != nil { + return "", core.NewError(err, "TOOL_INVALID_OUTPUT", map[string]any{"tool": a.config.ID}) + } + encoded, err := json.Marshal(output) + if err != nil { + return "", fmt.Errorf("failed to marshal output: %w", err) + } + return string(encoded), nil +} + // fetchFreshMCPTools retrieves the latest MCP tool list using singleflight. func (r *toolRegistry) fetchFreshMCPTools(ctx context.Context) ([]tools.Tool, error) { v, err, _ := r.sfGroup.Do("refresh-mcp-tools", func() (any, error) { diff --git a/engine/llm/tool_registry_test.go b/engine/llm/tool_registry_test.go index 587dbd90..13ff8bbd 100644 --- a/engine/llm/tool_registry_test.go +++ b/engine/llm/tool_registry_test.go @@ -9,7 +9,11 @@ import ( "testing" "time" + "github.com/compozy/compozy/engine/core" "github.com/compozy/compozy/engine/mcp" + "github.com/compozy/compozy/engine/schema" + "github.com/compozy/compozy/engine/tool" + nativeuser "github.com/compozy/compozy/engine/tool/nativeuser" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -215,6 +219,106 @@ func TestToolRegistry_FindRefreshesOnStaleMiss(t *testing.T) { require.GreaterOrEqual(t, dynamic.Hits(), 2) } +func buildNativeToolConfig() *tool.Config { + return &tool.Config{ + ID: "native-tool", + Description: "Native tool", + Implementation: tool.ImplementationNative, + InputSchema: &schema.Schema{ + "type": "object", + "properties": map[string]any{ + "name": map[string]any{"type": "string"}, + }, + "required": []string{"name"}, + }, + OutputSchema: &schema.Schema{ + "type": "object", + "properties": map[string]any{ + "result": map[string]any{"type": "string"}, + }, + "required": []string{"result"}, + }, + Config: &core.Input{"sample": true}, + } +} + +func TestNativeToolAdapter(t *testing.T) { + t.Run("Should execute native handler successfully", func(t *testing.T) { + nativeuser.Reset() + t.Cleanup(nativeuser.Reset) + handler := func(_ context.Context, input map[string]any, cfg map[string]any) (map[string]any, error) { + assert.Equal(t, map[string]any{"sample": true}, cfg) + assert.Equal(t, "alice", input["name"]) + return map[string]any{"result": "ok"}, nil + } + require.NoError(t, nativeuser.Register("native-tool", handler)) + adapter := NewNativeToolAdapter(buildNativeToolConfig()) + output, err := adapter.Call(t.Context(), `{"name":"alice"}`) + require.NoError(t, err) + assert.Contains(t, output, "\"result\":\"ok\"") + }) + + t.Run("Should validate input schema", func(t *testing.T) { + nativeuser.Reset() + t.Cleanup(nativeuser.Reset) + require.NoError( + t, + nativeuser.Register( + "native-tool", + func(context.Context, map[string]any, map[string]any) (map[string]any, error) { + return map[string]any{"result": "ok"}, nil + }, + ), + ) + adapter := NewNativeToolAdapter(buildNativeToolConfig()) + _, err := adapter.Call(t.Context(), `{"unexpected":true}`) + require.Error(t, err) + coreErr, ok := err.(*core.Error) + require.True(t, ok) + assert.Equal(t, "INVALID_TOOL_INPUT", coreErr.Code) + }) + + t.Run("Should recover from panic", func(t *testing.T) { + nativeuser.Reset() + t.Cleanup(nativeuser.Reset) + require.NoError( + t, + nativeuser.Register( + "native-tool", + func(context.Context, map[string]any, map[string]any) (map[string]any, error) { + panic("boom") + }, + ), + ) + adapter := NewNativeToolAdapter(buildNativeToolConfig()) + _, err := adapter.Call(t.Context(), `{"name":"alice"}`) + require.Error(t, err) + coreErr, ok := err.(*core.Error) + require.True(t, ok) + assert.Equal(t, "TOOL_EXECUTION_ERROR", coreErr.Code) + }) + + t.Run("Should validate output schema", func(t *testing.T) { + nativeuser.Reset() + t.Cleanup(nativeuser.Reset) + require.NoError( + t, + nativeuser.Register( + "native-tool", + func(context.Context, map[string]any, map[string]any) (map[string]any, error) { + return map[string]any{"unexpected": true}, nil + }, + ), + ) + adapter := NewNativeToolAdapter(buildNativeToolConfig()) + _, err := adapter.Call(t.Context(), `{"name":"alice"}`) + require.Error(t, err) + coreErr, ok := err.(*core.Error) + require.True(t, ok) + assert.Equal(t, "TOOL_INVALID_OUTPUT", coreErr.Code) + }) +} + func TestToolRegistry_InvalidateCacheClearsIndex(t *testing.T) { dynamic := newDynamicToolsServer(t, []mcp.ToolDefinition{ {Name: "alpha", Description: "A", MCPName: "mcp-one"}, diff --git a/engine/mcp/config.go b/engine/mcp/config.go index 117a966a..4fbe39cb 100644 --- a/engine/mcp/config.go +++ b/engine/mcp/config.go @@ -317,7 +317,7 @@ type Config struct { // Each agent connection typically creates one session. // // **Values**: - // - `0` or negative: Unlimited sessions (default) + // - `0`: Unlimited sessions (default) // - Positive number: Maximum concurrent sessions // // - **Examples**: @@ -582,6 +582,9 @@ func (c *Config) validateLimits(_ context.Context) error { if c.StartTimeout < 0 { return errors.New("start_timeout cannot be negative") } + if c.MaxSessions < 0 { + return errors.New("max_sessions cannot be negative") + } return nil } diff --git a/engine/mcp/config_test.go b/engine/mcp/config_test.go index 4781fdd8..061f96a2 100644 --- a/engine/mcp/config_test.go +++ b/engine/mcp/config_test.go @@ -430,7 +430,7 @@ func TestConfig_Validate_HeadersAndOrder(t *testing.T) { }) } func TestConfig_validateLimits(t *testing.T) { - t.Run("Should accept negative MaxSessions", func(t *testing.T) { + t.Run("Should accept zero MaxSessions", func(t *testing.T) { prev, had := os.LookupEnv("MCP_PROXY_URL") _ = os.Setenv("MCP_PROXY_URL", "http://localhost:6001") t.Cleanup(func() { @@ -444,13 +444,13 @@ func TestConfig_validateLimits(t *testing.T) { ID: "test-mcp", URL: "http://localhost:3000", Transport: mcpproxy.TransportSSE, - MaxSessions: -1, + MaxSessions: 0, } err := config.Validate(t.Context()) - assert.NoError(t, err, "Negative MaxSessions should pass validation (unlimited)") + assert.NoError(t, err, "Zero MaxSessions should pass validation (unlimited)") }) - t.Run("Should accept zero MaxSessions", func(t *testing.T) { + t.Run("Should reject negative MaxSessions", func(t *testing.T) { prev, had := os.LookupEnv("MCP_PROXY_URL") _ = os.Setenv("MCP_PROXY_URL", "http://localhost:6001") t.Cleanup(func() { @@ -464,10 +464,11 @@ func TestConfig_validateLimits(t *testing.T) { ID: "test-mcp", URL: "http://localhost:3000", Transport: mcpproxy.TransportSSE, - MaxSessions: 0, + MaxSessions: -1, } err := config.Validate(t.Context()) - assert.NoError(t, err, "Zero MaxSessions should pass validation (unlimited)") + require.Error(t, err) + assert.Contains(t, err.Error(), "max_sessions cannot be negative") }) t.Run("Should reject negative StartTimeout", func(t *testing.T) { diff --git a/engine/memory/config.go b/engine/memory/config.go index 9e9e557c..7bf558df 100644 --- a/engine/memory/config.go +++ b/engine/memory/config.go @@ -192,6 +192,10 @@ type Config struct { // default_redaction_string: "[REDACTED]" // ``` PrivacyPolicy *memcore.PrivacyPolicyConfig `json:"privacy_policy,omitempty" yaml:"privacy_policy,omitempty" mapstructure:"privacy_policy,omitempty"` + // PrivacyScope controls how memory is shared across tenants/users/sessions. + PrivacyScope PrivacyScope `json:"privacy_scope,omitempty" yaml:"privacy_scope,omitempty" mapstructure:"privacy_scope,omitempty"` + // Expiration defines how long memory data is retained before cleanup. + Expiration string `json:"expiration,omitempty" yaml:"expiration,omitempty" mapstructure:"expiration,omitempty"` // Locking configures **distributed lock timeouts** for concurrent memory operations. // **Critical for preventing race conditions** when multiple agents access the same memory. @@ -222,6 +226,8 @@ type Config struct { ttlManager *TTLManager `json:"-" yaml:"-"` // ttlManagerOnce ensures thread-safe initialization of ttlManager ttlManagerOnce sync.Once `json:"-" yaml:"-"` + // parsedExpiration caches the parsed expiration duration for runtime use. + parsedExpiration time.Duration } // --- Implementation for core.Configurable pattern --- @@ -292,6 +298,12 @@ func (c *Config) Validate(ctx context.Context) error { if err := c.validateLocking(ctx); err != nil { return err } + if err := c.validatePrivacyScope(); err != nil { + return err + } + if err := c.validateExpiration(ctx); err != nil { + return err + } return c.validateTokenBased(ctx) } @@ -416,6 +428,46 @@ func (c *Config) validateLocking(_ context.Context) error { return nil } +func (c *Config) validatePrivacyScope() error { + if c.PrivacyScope == "" { + c.PrivacyScope = PrivacyGlobalScope + return nil + } + if c.PrivacyScope.IsValid() { + return nil + } + return fmt.Errorf( + "memory config ID '%s': privacy scope '%s' is invalid", + c.ID, + c.PrivacyScope, + ) +} + +func (c *Config) validateExpiration(_ context.Context) error { + if c.Expiration == "" { + c.parsedExpiration = 0 + return nil + } + duration, err := core.ParseHumanDuration(c.Expiration) + if err != nil { + return fmt.Errorf( + "memory config ID '%s': invalid expiration duration '%s': %w", + c.ID, + c.Expiration, + err, + ) + } + if duration < 0 { + return fmt.Errorf( + "memory config ID '%s': expiration duration must be non-negative, got '%s'", + c.ID, + c.Expiration, + ) + } + c.parsedExpiration = duration + return nil +} + func (c *Config) validateTokenBased(_ context.Context) error { if c.Type == memcore.TokenBasedMemory { if c.MaxTokens <= 0 && c.MaxContextRatio <= 0 && c.MaxMessages <= 0 { @@ -588,6 +640,9 @@ func (c *Config) copyConfigFields(from *Config) { c.MaxTokens = from.MaxTokens c.MaxMessages = from.MaxMessages c.MaxContextRatio = from.MaxContextRatio + c.PrivacyScope = from.PrivacyScope + c.Expiration = from.Expiration + c.parsedExpiration = from.parsedExpiration if from.TokenAllocation != nil { if v, err := core.DeepCopy(from.TokenAllocation); err == nil { c.TokenAllocation = v @@ -624,6 +679,7 @@ func (c *Config) copyConfigFields(from *Config) { } else { c.TokenProvider = nil } + c.DefaultKeyTemplate = from.DefaultKeyTemplate c.filePath = from.filePath c.CWD = from.CWD } diff --git a/engine/memory/config/router/dto.go b/engine/memory/config/router/dto.go index 9f78f7d1..891b97e7 100644 --- a/engine/memory/config/router/dto.go +++ b/engine/memory/config/router/dto.go @@ -40,6 +40,8 @@ type MemoryCoreDTO struct { Flushing *memcore.FlushingStrategyConfig `json:"flushing,omitempty"` Persistence memcore.PersistenceConfig `json:"persistence"` PrivacyPolicy *memcore.PrivacyPolicyConfig `json:"privacy_policy,omitempty"` + PrivacyScope memory.PrivacyScope `json:"privacy_scope,omitempty"` + Expiration string `json:"expiration,omitempty"` Locking *memcore.LockConfig `json:"locking,omitempty"` TokenProvider *memcore.TokenProviderConfig `json:"token_provider,omitempty"` DefaultKeyTemplate string `json:"default_key_template,omitempty"` @@ -95,6 +97,8 @@ func convertMemoryConfigToDTO(cfg *memory.Config) (MemoryCoreDTO, error) { Flushing: cfg.Flushing, Persistence: cfg.Persistence, PrivacyPolicy: cfg.PrivacyPolicy, + PrivacyScope: cfg.PrivacyScope, + Expiration: cfg.Expiration, Locking: cfg.Locking, TokenProvider: cfg.TokenProvider, DefaultKeyTemplate: cfg.DefaultKeyTemplate, diff --git a/engine/memory/config_resolver.go b/engine/memory/config_resolver.go index 3e06cb1f..bbc138e0 100644 --- a/engine/memory/config_resolver.go +++ b/engine/memory/config_resolver.go @@ -69,6 +69,9 @@ func (rb *ResourceBuilder) Build(ctx context.Context) (*memcore.Resource, error) Persistence: rb.config.Persistence, TokenCounter: "", // Token counter determined at runtime TokenProvider: rb.config.TokenProvider, + PrivacyScope: rb.config.PrivacyScope, + Expiration: rb.config.Expiration, + ParsedExpiration: rb.config.parsedExpiration, Metadata: nil, // Metadata not stored in config DisableFlush: false, // Flush enabled by default } @@ -426,6 +429,9 @@ func cloneConfigForValidation(cfg *Config) *Config { DefaultKeyTemplate: cfg.DefaultKeyTemplate, filePath: cfg.filePath, ttlManager: cfg.ttlManager, + PrivacyScope: cfg.PrivacyScope, + Expiration: cfg.Expiration, + parsedExpiration: cfg.parsedExpiration, } cloned.TokenAllocation = cloneTokenAllocation(cfg.TokenAllocation) cloned.Flushing = cloneFlushingConfig(cfg.Flushing) diff --git a/engine/memory/core/types.go b/engine/memory/core/types.go index b602970f..240210af 100644 --- a/engine/memory/core/types.go +++ b/engine/memory/core/types.go @@ -4,6 +4,8 @@ import ( "fmt" "math" "time" + + "github.com/compozy/compozy/engine/core" ) // Type defines the type of memory strategy being used. @@ -41,6 +43,28 @@ const ( // Note: PriorityBasedFlushing removed - use EvictionPolicyConfig with PriorityEviction instead ) +// PrivacyScope defines the visibility boundary for a memory resource. +type PrivacyScope string + +const ( + // PrivacyGlobalScope shares memory data across all tenants. + PrivacyGlobalScope PrivacyScope = "global" + // PrivacyUserScope restricts memory data to a single user. + PrivacyUserScope PrivacyScope = "user" + // PrivacySessionScope restricts memory data to a single session. + PrivacySessionScope PrivacyScope = "session" +) + +// IsValid reports whether the privacy scope is supported. Empty indicates unset. +func (p PrivacyScope) IsValid() bool { + switch p { + case PrivacyGlobalScope, PrivacyUserScope, PrivacySessionScope, "": + return true + default: + return false + } +} + // Resource holds the static configuration for a memory resource, // typically loaded from a project's configuration files. type Resource struct { @@ -90,6 +114,12 @@ type Resource struct { // PrivacyPolicy defines rules for handling sensitive data in memory. PrivacyPolicy *PrivacyPolicyConfig `yaml:"privacy_policy,omitempty" json:"privacy_policy,omitempty"` + // PrivacyScope controls how memory data is shared across tenants/users/sessions. + PrivacyScope PrivacyScope `yaml:"privacy_scope,omitempty" json:"privacy_scope,omitempty"` + // Expiration specifies how long memory data should live before automatic cleanup. + Expiration string `yaml:"expiration,omitempty" json:"expiration,omitempty"` + // ParsedExpiration caches the parsed expiration duration for runtime use. + ParsedExpiration time.Duration `yaml:"-" json:"-"` // Advanced configuration // TokenCounter specifies a custom token counting implementation. @@ -170,6 +200,12 @@ func (r *Resource) Validate() error { if err := r.validateEvictionPolicy(); err != nil { return err } + if err := r.validatePrivacyScope(); err != nil { + return err + } + if err := r.validateExpiration(); err != nil { + return err + } return r.validateTTLFormats() } @@ -202,6 +238,29 @@ func (r *Resource) validateMemoryTypeConstraints() error { return nil } +func (r *Resource) validatePrivacyScope() error { + if r.PrivacyScope.IsValid() { + return nil + } + return fmt.Errorf("invalid privacy scope: %s", r.PrivacyScope) +} + +func (r *Resource) validateExpiration() error { + if r.Expiration == "" { + r.ParsedExpiration = 0 + return nil + } + duration, err := core.ParseHumanDuration(r.Expiration) + if err != nil { + return fmt.Errorf("invalid expiration duration '%s': %w", r.Expiration, err) + } + if duration < 0 { + return fmt.Errorf("expiration duration must be non-negative, got %s", duration) + } + r.ParsedExpiration = duration + return nil +} + // validateContextRatio validates max_context_ratio value func (r *Resource) validateContextRatio() error { if r.MaxContextRatio > 1 { diff --git a/engine/memory/types.go b/engine/memory/types.go new file mode 100644 index 00000000..73fb2d1e --- /dev/null +++ b/engine/memory/types.go @@ -0,0 +1,15 @@ +package memory + +import memcore "github.com/compozy/compozy/engine/memory/core" + +// PrivacyScope re-exports the engine memory privacy scope enumeration for SDKs. +type PrivacyScope = memcore.PrivacyScope + +const ( + // PrivacyGlobalScope shares memory data across all tenants. + PrivacyGlobalScope = memcore.PrivacyGlobalScope + // PrivacyUserScope restricts memory data to a single user. + PrivacyUserScope = memcore.PrivacyUserScope + // PrivacySessionScope restricts memory data to a single session instance. + PrivacySessionScope = memcore.PrivacySessionScope +) diff --git a/engine/project/config.go b/engine/project/config.go index 6f3fcadb..0d836c1b 100644 --- a/engine/project/config.go +++ b/engine/project/config.go @@ -17,7 +17,9 @@ import ( "github.com/compozy/compozy/engine/core" "github.com/compozy/compozy/engine/infra/monitoring" "github.com/compozy/compozy/engine/knowledge" + "github.com/compozy/compozy/engine/mcp" "github.com/compozy/compozy/engine/memory" + projectschedule "github.com/compozy/compozy/engine/project/schedule" "github.com/compozy/compozy/engine/schema" "github.com/compozy/compozy/engine/tool" ) @@ -310,6 +312,10 @@ type Config struct { // Workflows defines the list of workflow files that compose this project's AI capabilities. Workflows []*WorkflowSourceConfig `json:"workflows" yaml:"workflows" mapstructure:"workflows"` + // Schedules defines automated workflow executions managed by the SDK. + // Each schedule references a workflow by identifier and applies cron-based execution semantics. + Schedules []*projectschedule.Config `json:"schedules,omitempty" yaml:"schedules,omitempty" mapstructure:"schedules,omitempty"` + // Models configures the LLM providers and model settings available to this project. // // $ref: schema://provider @@ -448,6 +454,9 @@ type Config struct { // Knowledge defines the default binding for tasks or agents within the project scope (MVP single binding). Knowledge []core.KnowledgeBinding `json:"knowledge,omitempty" yaml:"knowledge,omitempty" mapstructure:"knowledge,omitempty"` + // MCPs declares project-scoped MCP server definitions accessible to workflows and agents. + MCPs []mcp.Config `json:"mcps,omitempty" yaml:"mcps,omitempty" mapstructure:"mcps,omitempty"` + // Memories declares project-scoped memory resources that agents and tasks can reference // by ID. These are indexed into the ResourceStore under the current project and can be // used across workflows for conversation and state sharing. @@ -462,7 +471,7 @@ type Config struct { // // The Resource field on memory.Config is optional in project-level definitions and will // default to "memory" during validation. - Memories []memory.Config `json:"memories,omitempty" yaml:"memories,omitempty" mapstructure:"memories,omitempty"` + Memories []*memory.Config `json:"memories,omitempty" yaml:"memories,omitempty" mapstructure:"memories,omitempty"` // MonitoringConfig enables observability and metrics collection for performance tracking. // @@ -662,20 +671,23 @@ func (p *Config) validateMemories(ctx context.Context) error { return nil } ids := make(map[string]struct{}, len(p.Memories)) - for i := range p.Memories { - if strings.TrimSpace(p.Memories[i].Resource) == "" { - p.Memories[i].Resource = string(core.ConfigMemory) + for i, mem := range p.Memories { + if mem == nil { + return fmt.Errorf("memory[%d] cannot be nil", i) + } + if strings.TrimSpace(mem.Resource) == "" { + mem.Resource = string(core.ConfigMemory) } - if p.Memories[i].ID == "" { + if mem.ID == "" { return fmt.Errorf("memory[%d] missing required ID field", i) } - if _, ok := ids[p.Memories[i].ID]; ok { - return fmt.Errorf("duplicate memory ID '%s' found in project memories", p.Memories[i].ID) + if _, ok := ids[mem.ID]; ok { + return fmt.Errorf("duplicate memory ID '%s' found in project memories", mem.ID) } - if err := p.Memories[i].Validate(ctx); err != nil { + if err := mem.Validate(ctx); err != nil { return fmt.Errorf("memory[%d] validation failed: %w", i, err) } - ids[p.Memories[i].ID] = struct{}{} + ids[mem.ID] = struct{}{} } return nil } diff --git a/engine/project/indexer.go b/engine/project/indexer.go index 07acde0a..bfc0f88e 100644 --- a/engine/project/indexer.go +++ b/engine/project/indexer.go @@ -7,9 +7,11 @@ import ( "runtime" "sync" + "dario.cat/mergo" "golang.org/x/sync/errgroup" "github.com/compozy/compozy/engine/knowledge" + enginememory "github.com/compozy/compozy/engine/memory" "github.com/compozy/compozy/engine/resources" "github.com/compozy/compozy/engine/schema" "github.com/compozy/compozy/pkg/logger" @@ -239,7 +241,7 @@ func schemaID(s *schema.Schema) string { return schema.GetID(s) } // indexProjectTools publishes project-level tools to the store. func (p *Config) indexProjectTools( - ctx context.Context, + groupCtx context.Context, group *errgroup.Group, store resources.ResourceStore, metaSources *metaCache, @@ -250,9 +252,10 @@ func (p *Config) indexProjectTools( return fmt.Errorf("project tool at index %d missing id", i) } key := resources.ResourceKey{Project: p.Name, Type: resources.ResourceTool, ID: tool.ID} - value := tool + keyCopy := key + toolCopy := tool group.Go(func() error { - return p.putResourceWithMeta(ctx, store, metaSources, key, value) + return p.putResourceWithMeta(groupCtx, store, metaSources, keyCopy, toolCopy) }) } return nil @@ -260,26 +263,32 @@ func (p *Config) indexProjectTools( // indexProjectMemories publishes project-level memory resources to the store. func (p *Config) indexProjectMemories( - ctx context.Context, + groupCtx context.Context, group *errgroup.Group, store resources.ResourceStore, metaSources *metaCache, ) error { - for i := range p.Memories { - memory := &p.Memories[i] + for i, memory := range p.Memories { + if memory == nil { + return fmt.Errorf("project memory at index %d cannot be nil", i) + } if memory.ID == "" { return fmt.Errorf("project memory at index %d missing id", i) } - if memory.Resource == "" { - memory.Resource = string(resources.ResourceMemory) + memClone := new(enginememory.Config) + if err := mergo.Merge(memClone, memory, mergo.WithOverride); err != nil { + return fmt.Errorf("memory '%s' clone failed: %w", memory.ID, err) } - if err := memory.Validate(ctx); err != nil { + if memClone.Resource == "" { + memClone.Resource = string(resources.ResourceMemory) + } + if err := memClone.Validate(groupCtx); err != nil { return fmt.Errorf("memory '%s' validation failed: %w", memory.ID, err) } key := resources.ResourceKey{Project: p.Name, Type: resources.ResourceMemory, ID: memory.ID} - value := memory + keyCopy := key group.Go(func() error { - return p.putResourceWithMeta(ctx, store, metaSources, key, value) + return p.putResourceWithMeta(groupCtx, store, metaSources, keyCopy, memClone) }) } return nil @@ -287,7 +296,7 @@ func (p *Config) indexProjectMemories( // indexProjectSchemas publishes project-level schemas to the store. func (p *Config) indexProjectSchemas( - ctx context.Context, + groupCtx context.Context, group *errgroup.Group, store resources.ResourceStore, metaSources *metaCache, @@ -299,9 +308,10 @@ func (p *Config) indexProjectSchemas( continue } key := resources.ResourceKey{Project: p.Name, Type: resources.ResourceSchema, ID: sid} - value := schemaValue + keyCopy := key + schemaCopy := schemaValue group.Go(func() error { - return p.putResourceWithMeta(ctx, store, metaSources, key, value) + return p.putResourceWithMeta(groupCtx, store, metaSources, keyCopy, schemaCopy) }) } return nil @@ -309,7 +319,7 @@ func (p *Config) indexProjectSchemas( // indexProjectModels publishes project-level models to the store. func (p *Config) indexProjectModels( - ctx context.Context, + groupCtx context.Context, group *errgroup.Group, store resources.ResourceStore, metaSources *metaCache, @@ -321,16 +331,17 @@ func (p *Config) indexProjectModels( } id := fmt.Sprintf("%s:%s", string(model.Provider), model.Model) key := resources.ResourceKey{Project: p.Name, Type: resources.ResourceModel, ID: id} - value := model + keyCopy := key + modelCopy := model group.Go(func() error { - return p.putResourceWithMeta(ctx, store, metaSources, key, value) + return p.putResourceWithMeta(groupCtx, store, metaSources, keyCopy, modelCopy) }) } return nil } func (p *Config) indexProjectEmbedders( - ctx context.Context, + groupCtx context.Context, group *errgroup.Group, store resources.ResourceStore, metaSources *metaCache, @@ -341,16 +352,17 @@ func (p *Config) indexProjectEmbedders( return fmt.Errorf("project embedder at index %d missing id", i) } key := resources.ResourceKey{Project: p.Name, Type: resources.ResourceEmbedder, ID: embedder.ID} - value := embedder + keyCopy := key + embedderCopy := embedder group.Go(func() error { - return p.putResourceWithMeta(ctx, store, metaSources, key, value) + return p.putResourceWithMeta(groupCtx, store, metaSources, keyCopy, embedderCopy) }) } return nil } func (p *Config) indexProjectVectorDBs( - ctx context.Context, + groupCtx context.Context, group *errgroup.Group, store resources.ResourceStore, metaSources *metaCache, @@ -361,16 +373,17 @@ func (p *Config) indexProjectVectorDBs( return fmt.Errorf("project vector_db at index %d missing id", i) } key := resources.ResourceKey{Project: p.Name, Type: resources.ResourceVectorDB, ID: vectorDB.ID} - value := vectorDB + keyCopy := key + vectorDBCopy := vectorDB group.Go(func() error { - return p.putResourceWithMeta(ctx, store, metaSources, key, value) + return p.putResourceWithMeta(groupCtx, store, metaSources, keyCopy, vectorDBCopy) }) } return nil } func (p *Config) indexProjectKnowledgeBases( - ctx context.Context, + groupCtx context.Context, group *errgroup.Group, store resources.ResourceStore, metaSources *metaCache, @@ -380,13 +393,15 @@ func (p *Config) indexProjectKnowledgeBases( if knowledgeBase.ID == "" { return fmt.Errorf("project knowledge_base at index %d missing id", i) } - if knowledgeBase.Ingest == "" { - knowledgeBase.Ingest = knowledge.IngestManual - } key := resources.ResourceKey{Project: p.Name, Type: resources.ResourceKnowledgeBase, ID: knowledgeBase.ID} - value := knowledgeBase + keyCopy := key + knowledgeBaseCopy := *knowledgeBase + if knowledgeBaseCopy.Ingest == "" { + knowledgeBaseCopy.Ingest = knowledge.IngestManual + } + copyValue := knowledgeBaseCopy group.Go(func() error { - return p.putResourceWithMeta(ctx, store, metaSources, key, value) + return p.putResourceWithMeta(groupCtx, store, metaSources, keyCopy, ©Value) }) } return nil diff --git a/engine/project/memories_test.go b/engine/project/memories_test.go index 2ab59974..83a9203e 100644 --- a/engine/project/memories_test.go +++ b/engine/project/memories_test.go @@ -17,7 +17,7 @@ func TestProject_IndexMemoriesToResourceStore(t *testing.T) { p := &Config{ Name: "demo-mem", - Memories: []memory.Config{ + Memories: []*memory.Config{ { ID: "conversation", Type: memcore.BufferMemory, @@ -48,7 +48,7 @@ func TestProject_ValidateMemories(t *testing.T) { p := &Config{ Name: "demo-validate", CWD: cwd, - Memories: []memory.Config{ + Memories: []*memory.Config{ { // Resource is intentionally omitted to verify defaulting ID: "conv", diff --git a/engine/project/router/dto_test.go b/engine/project/router/dto_test.go index a01b7c5d..00281817 100644 --- a/engine/project/router/dto_test.go +++ b/engine/project/router/dto_test.go @@ -51,7 +51,7 @@ func TestToProjectDTO(t *testing.T) { ID: "tool-1", Description: "Test tool", }}, - Memories: []memory.Config{{ + Memories: []*memory.Config{{ ID: "memory-1", }}, MonitoringConfig: &monitoring.Config{ diff --git a/engine/project/schedule/config.go b/engine/project/schedule/config.go new file mode 100644 index 00000000..a39357d6 --- /dev/null +++ b/engine/project/schedule/config.go @@ -0,0 +1,132 @@ +package schedule + +import ( + "context" + "fmt" + "regexp" + "strings" + "time" + + "github.com/robfig/cron/v3" +) + +// Config describes a workflow schedule registration that runs a workflow on a cron cadence. +// +// Schedules are project-scoped resources that reference a workflow by identifier and define +// the cron expression, timezone, and optional retry policy used when launching executions. +type Config struct { + // ID uniquely identifies the schedule within the project. + ID string `json:"id" yaml:"id" mapstructure:"id"` + // WorkflowID references the workflow that should be executed when the schedule fires. + WorkflowID string `json:"workflow_id" yaml:"workflow_id" mapstructure:"workflow_id"` + // Cron is the cron expression that determines when the schedule triggers. + Cron string `json:"cron" yaml:"cron" mapstructure:"cron"` + // Timezone provides the IANA timezone name used when evaluating the cron expression. + Timezone string `json:"timezone,omitempty" yaml:"timezone,omitempty" mapstructure:"timezone,omitempty"` + // Input contains default input values that are supplied to the workflow when triggered. + Input map[string]any `json:"input,omitempty" yaml:"input,omitempty" mapstructure:"input,omitempty"` + // Retry configures retry behavior for failed scheduled executions. + Retry *RetryPolicy `json:"retry,omitempty" yaml:"retry,omitempty" mapstructure:"retry,omitempty"` + // Enabled toggles whether the schedule is active. + Enabled *bool `json:"enabled,omitempty" yaml:"enabled,omitempty" mapstructure:"enabled,omitempty"` + // Description explains the schedule purpose for operators. + Description string `json:"description,omitempty" yaml:"description,omitempty" mapstructure:"description,omitempty"` +} + +// RetryPolicy defines retry behavior for scheduled workflow executions. +type RetryPolicy struct { + // MaxAttempts is the number of retry attempts after the initial run fails. + MaxAttempts int `json:"max_attempts" yaml:"max_attempts" mapstructure:"max_attempts"` + // Backoff is the delay between retry attempts. + Backoff time.Duration `json:"backoff" yaml:"backoff" mapstructure:"backoff"` +} + +// Validate normalizes the schedule configuration and validates identifiers, cron expression, +// optional timezone, and retry settings before the schedule is registered. +func (c *Config) Validate(ctx context.Context) error { + if err := ensureContext(ctx); err != nil { + return err + } + c.ID = strings.TrimSpace(c.ID) + if err := validateIdentifier(c.ID); err != nil { + return fmt.Errorf("schedule id: %w", err) + } + c.WorkflowID = strings.TrimSpace(c.WorkflowID) + if err := validateIdentifier(c.WorkflowID); err != nil { + return fmt.Errorf("workflow_id: %w", err) + } + c.Cron = strings.TrimSpace(c.Cron) + if err := validateCronExpression(c.Cron); err != nil { + return fmt.Errorf("cron: %w", err) + } + if tz := strings.TrimSpace(c.Timezone); tz != "" { + c.Timezone = tz + if err := validateTimezone(tz); err != nil { + return fmt.Errorf("timezone: %w", err) + } + } + if c.Retry != nil { + if err := c.Retry.Validate(ctx); err != nil { + return fmt.Errorf("retry: %w", err) + } + } + return nil +} + +// Validate checks that the retry policy uses a valid context and ensures +// positive attempt and backoff values before activation. +func (r *RetryPolicy) Validate(ctx context.Context) error { + if err := ensureContext(ctx); err != nil { + return err + } + if r.MaxAttempts <= 0 { + return fmt.Errorf("max_attempts must be positive: got %d", r.MaxAttempts) + } + if err := validateDuration(r.Backoff); err != nil { + return fmt.Errorf("backoff: %w", err) + } + return nil +} + +var scheduleIDPattern = regexp.MustCompile(`^[A-Za-z0-9-]+$`) + +func ensureContext(ctx context.Context) error { + if ctx == nil { + return fmt.Errorf("context is required") + } + return nil +} + +func validateIdentifier(id string) error { + if id == "" { + return fmt.Errorf("id is required") + } + if !scheduleIDPattern.MatchString(id) { + return fmt.Errorf("id must contain only letters, numbers, or hyphens") + } + return nil +} + +func validateCronExpression(expr string) error { + if expr == "" { + return fmt.Errorf("cron expression is required") + } + if _, err := cron.ParseStandard(expr); err != nil { + return fmt.Errorf("cron expression is invalid: %w", err) + } + return nil +} + +func validateTimezone(name string) error { + if _, err := time.LoadLocation(name); err != nil { + return fmt.Errorf("timezone is invalid: %w", err) + } + return nil +} + +func validateDuration(d time.Duration) error { + if d <= 0 { + return fmt.Errorf("duration must be positive: got %s", d) + } + return nil +} diff --git a/engine/resources/store.go b/engine/resources/store.go index 26331611..7aa5c3e9 100644 --- a/engine/resources/store.go +++ b/engine/resources/store.go @@ -26,8 +26,10 @@ const ( ResourceEmbedder ResourceType = core.ConfigEmbedder ResourceVectorDB ResourceType = core.ConfigVectorDB // Resource-specific extensions not yet in core: - ResourceSchema ResourceType = "schema" - ResourceModel ResourceType = "model" + ResourceSchema ResourceType = "schema" + ResourceModel ResourceType = "model" + ResourceSchedule ResourceType = "schedule" + ResourceWebhook ResourceType = "webhook" // ResourceMeta stores provenance or auxiliary metadata for resources. // Not exposed via public HTTP router; used by importers/admin tooling. ResourceMeta ResourceType = "meta" diff --git a/engine/runtime/config.go b/engine/runtime/config.go index e5a5c9bc..4f31a5fa 100644 --- a/engine/runtime/config.go +++ b/engine/runtime/config.go @@ -19,6 +19,7 @@ type Config struct { EntrypointPath string // Path to entrypoint file BunPermissions []string // Bun-specific permissions NodeOptions []string // Node.js-specific options + NativeTools *NativeToolsConfig // Application config integration fields Environment string // Deployment environment (development, staging, production) // Memory management @@ -45,6 +46,7 @@ func DefaultConfig() *Config { BunPermissions: []string{ "--allow-read", // Minimal permissions - allow read only by default }, + NativeTools: &NativeToolsConfig{}, Environment: "development", // Default environment MaxMemoryMB: 2048, // Default 2GB memory limit MaxStderrCaptureSize: 1 * 1024 * 1024, // Default 1MB stderr buffer @@ -73,6 +75,7 @@ func TestConfig() *Config { BunPermissions: []string{ "--allow-read", }, + NativeTools: &NativeToolsConfig{}, Environment: "testing", // Test environment MaxMemoryMB: 512, // Lower memory limit for tests } diff --git a/engine/runtime/types.go b/engine/runtime/types.go index 81656022..0dea02d2 100644 --- a/engine/runtime/types.go +++ b/engine/runtime/types.go @@ -21,6 +21,12 @@ var SupportedRuntimeTypes = []string{ RuntimeTypeNode, } +// NativeToolsConfig controls enablement of builtin runtime-native tools provided by the engine. +type NativeToolsConfig struct { + CallAgents bool + CallWorkflows bool +} + // IsValidRuntimeType checks if the given runtime type is valid func IsValidRuntimeType(runtimeType string) bool { return slices.Contains(SupportedRuntimeTypes, runtimeType) diff --git a/engine/schema/property.go b/engine/schema/property.go new file mode 100644 index 00000000..7217af0c --- /dev/null +++ b/engine/schema/property.go @@ -0,0 +1,26 @@ +package schema + +import ( + "fmt" + + "github.com/compozy/compozy/engine/core" +) + +// Property represents a named JSON Schema property with optional required state. +type Property struct { + Name string `json:"name"` + Schema *Schema `json:"schema"` + Required bool `json:"required"` +} + +// Clone creates a deep copy of the property to avoid shared state between callers. +func (p *Property) Clone() (*Property, error) { + if p == nil { + return nil, nil + } + copied, err := core.DeepCopy(*p) + if err != nil { + return nil, fmt.Errorf("failed to clone property %q: %w", p.Name, err) + } + return &copied, nil +} diff --git a/engine/tool/config.go b/engine/tool/config.go index 19dd0bd9..1c88b452 100644 --- a/engine/tool/config.go +++ b/engine/tool/config.go @@ -7,6 +7,7 @@ import ( "context" "errors" "fmt" + "strings" "time" "dario.cat/mergo" @@ -18,6 +19,12 @@ import ( "gopkg.in/yaml.v3" ) +const ( + ImplementationRuntime = "runtime" + ImplementationNative = "native" + RuntimeGo = "go" +) + // Config represents a tool configuration in Compozy. // // Tools are **executable components** that extend AI agent capabilities by providing @@ -97,21 +104,38 @@ import ( type Config struct { // Resource identifier for the autoloader system (must be `"tool"`). // This field enables automatic discovery and registration of tool configurations. - Resource string `json:"resource,omitempty" yaml:"resource,omitempty" mapstructure:"resource,omitempty"` + Resource string `json:"resource,omitempty" yaml:"resource,omitempty" mapstructure:"resource,omitempty"` // Unique identifier for the tool within the project scope. // Used for referencing the tool in agent configurations, workflows, and function calls. // Must be unique across all tools in the project. // // - **Examples:** `"file-reader"`, `"api-client"`, `"data-processor"` // - **Naming:** Use kebab-case for consistency with other Compozy identifiers - ID string `json:"id,omitempty" yaml:"id,omitempty" mapstructure:"id,omitempty"` + ID string `json:"id,omitempty" yaml:"id,omitempty" mapstructure:"id,omitempty"` + // Name provides a concise, human-readable label for the tool shown in UIs and logs. + // Unlike the identifier, the name may include spaces and capitalization to improve readability. + // When omitted, UIs should fall back to using the identifier. + Name string `json:"name,omitempty" yaml:"name,omitempty" mapstructure:"name,omitempty"` // Human-readable description of the tool's functionality and purpose. // This description is used by AI agents to understand when and how to use the tool. // Should clearly explain capabilities, limitations, and expected use cases. // // - **Best practices:** Be specific about what the tool does and its constraints // - **Example:** `"Read and parse various file formats including JSON, YAML, and CSV with size limits"` - Description string `json:"description,omitempty" yaml:"description,omitempty" mapstructure:"description,omitempty"` + Description string `json:"description,omitempty" yaml:"description,omitempty" mapstructure:"description,omitempty"` + // Runtime selects the execution environment for custom tool implementations. + // Supported runtimes include `"bun"`, `"node"`, and `"deno"` for JavaScript/TypeScript execution. + // When empty, the project runtime defaults are applied. + Runtime string `json:"runtime,omitempty" yaml:"runtime,omitempty" mapstructure:"runtime,omitempty"` + // Implementation defines how the tool executes within the Compozy engine. + // Supported values are: + // - `"runtime"`: executes via an external runtime such as Bun (default) + // - `"native"`: executes via an in-process Go handler registered at runtime + // When unset, the implementation defaults to `"runtime"` unless the runtime is explicitly `"go"`. + Implementation string `json:"implementation,omitempty" yaml:"implementation,omitempty" mapstructure:"implementation,omitempty"` + // Code contains inline source executed by the selected runtime when the tool runs. + // Builders may supply either inline JavaScript/TypeScript code or references resolved at runtime. + Code string `json:"code,omitempty" yaml:"code,omitempty" mapstructure:"code,omitempty"` // Maximum execution time for the tool in Go duration format. // If not specified, uses the global tool timeout from project configuration. // This timeout applies to the entire tool execution lifecycle. @@ -119,7 +143,7 @@ type Config struct { // - **Examples:** `"30s"`, `"5m"`, `"1h"`, `"500ms"` // - **Constraints:** Must be positive; zero or negative values cause validation errors // - **Default fallback:** Uses project-level tool timeout when empty - Timeout string `json:"timeout,omitempty" yaml:"timeout,omitempty" mapstructure:"timeout,omitempty"` + Timeout string `json:"timeout,omitempty" yaml:"timeout,omitempty" mapstructure:"timeout,omitempty"` // JSON schema defining the expected input parameters for the tool. // Used for validation before execution and to generate LLM function call definitions. // Must follow JSON Schema Draft 7 specification for compatibility. @@ -127,7 +151,7 @@ type Config struct { // - **When nil:** Tool accepts any input format (no validation performed) // - **Use cases:** Parameter validation, type safety, auto-generated documentation // - **Integration:** Automatically converts to LLM function parameters - InputSchema *schema.Schema `json:"input,omitempty" yaml:"input,omitempty" mapstructure:"input,omitempty"` + InputSchema *schema.Schema `json:"input,omitempty" yaml:"input,omitempty" mapstructure:"input,omitempty"` // JSON schema defining the expected output format from the tool. // Used for validation after execution and documentation purposes. // Must follow JSON Schema Draft 7 specification for compatibility. @@ -135,14 +159,14 @@ type Config struct { // - **When nil:** No output validation is performed // - **Use cases:** Response validation, type safety, workflow data flow verification // - **Best practice:** Define output schema for tools used in critical workflows - OutputSchema *schema.Schema `json:"output,omitempty" yaml:"output,omitempty" mapstructure:"output,omitempty"` + OutputSchema *schema.Schema `json:"output,omitempty" yaml:"output,omitempty" mapstructure:"output,omitempty"` // Default input parameters merged with runtime parameters provided by agents. // Provides a way to set tool defaults while allowing runtime customization. // // - **Merge strategy:** Runtime parameters override defaults (shallow merge) // - **Use cases:** Default API URLs, fallback configurations, preset options // - **Security note:** Avoid storing secrets here; use environment variables instead - With *core.Input `json:"with,omitempty" yaml:"with,omitempty" mapstructure:"with,omitempty"` + With *core.Input `json:"with,omitempty" yaml:"with,omitempty" mapstructure:"with,omitempty"` // Configuration parameters passed to the tool separately from input data. // Provides static configuration that tools can use for initialization and behavior control. // Unlike input parameters, config is not meant to change between tool invocations. @@ -159,7 +183,7 @@ type Config struct { // headers: // User-Agent: "Compozy/1.0" // ``` - Config *core.Input `json:"config,omitempty" yaml:"config,omitempty" mapstructure:"config,omitempty"` + Config *core.Input `json:"config,omitempty" yaml:"config,omitempty" mapstructure:"config,omitempty"` // Environment variables available during tool execution. // Variables are isolated to the tool's execution context for security. // Used for configuration, API keys, and runtime settings. @@ -173,7 +197,7 @@ type Config struct { // BASE_URL: "https://api.example.com" // DEBUG: "{{ .project.debug | default(false) }}" // ``` - Env *core.EnvMap `json:"env,omitempty" yaml:"env,omitempty" mapstructure:"env,omitempty"` + Env *core.EnvMap `json:"env,omitempty" yaml:"env,omitempty" mapstructure:"env,omitempty"` // filePath stores the filesystem path where this configuration was loaded from. // Used internally for resolving relative paths and debugging. @@ -337,6 +361,39 @@ func (t *Config) HasSchema() bool { return t.InputSchema != nil || t.OutputSchema != nil } +// EffectiveImplementation returns the canonical implementation mode for the tool configuration. +// When the Implementation field is empty, it infers the value from the runtime, defaulting to "runtime". +func (t *Config) EffectiveImplementation() (string, error) { + impl := strings.TrimSpace(strings.ToLower(t.Implementation)) + if impl == "" { + runtime := strings.TrimSpace(strings.ToLower(t.Runtime)) + if runtime == RuntimeGo { + return ImplementationNative, nil + } + return ImplementationRuntime, nil + } + switch impl { + case ImplementationRuntime, ImplementationNative: + return impl, nil + default: + return "", fmt.Errorf("invalid implementation '%s'", t.Implementation) + } +} + +// SetImplementation stores the canonical implementation value using runtime inference rules. +func (t *Config) SetImplementation(implementation string) { + t.Implementation = strings.TrimSpace(strings.ToLower(implementation)) +} + +// IsNative reports whether the configuration represents a native Go tool. +func (t *Config) IsNative() bool { + impl, err := t.EffectiveImplementation() + if err != nil { + return false + } + return impl == ImplementationNative +} + // Validate ensures the tool configuration is valid and complete. // Performs comprehensive validation of all configuration fields including working directory // and timeout format. Should be called before using the tool in production workflows. @@ -347,6 +404,24 @@ func (t *Config) Validate(ctx context.Context) error { if err := v.Validate(ctx); err != nil { return err } + impl, err := t.EffectiveImplementation() + if err != nil { + return err + } + t.SetImplementation(impl) + if impl == ImplementationNative { + runtime := strings.TrimSpace(strings.ToLower(t.Runtime)) + switch runtime { + case "": + t.Runtime = RuntimeGo + case RuntimeGo: + t.Runtime = runtime + default: + return fmt.Errorf("native tools must use runtime '%s', got: %s", RuntimeGo, t.Runtime) + } + } else if t.Runtime != "" { + t.Runtime = strings.TrimSpace(strings.ToLower(t.Runtime)) + } if t.Timeout != "" { timeout, err := time.ParseDuration(t.Timeout) if err != nil { diff --git a/engine/tool/inline/manager.go b/engine/tool/inline/manager.go new file mode 100644 index 00000000..0cc3a80d --- /dev/null +++ b/engine/tool/inline/manager.go @@ -0,0 +1,476 @@ +package inline + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "errors" + "fmt" + "io/fs" + "os" + "path/filepath" + "sort" + "strings" + "sync" + + "github.com/compozy/compozy/engine/core" + "github.com/compozy/compozy/engine/resources" + "github.com/compozy/compozy/engine/tool" + "github.com/compozy/compozy/pkg/logger" +) + +const ( + entrypointFileName = "__inline_entrypoint.ts" + modulePrefix = "inline" + defaultModuleExt = ".ts" + defaultDirPerm fs.FileMode = 0o755 +) + +// Options configures the inline manager with project metadata and resource store dependencies. +// Fields are normalized when building the manager so runtime paths are deterministic. +type Options struct { + ProjectRoot string + ProjectName string + Store resources.ResourceStore + UserEntrypoint string + WorkerFilePerm fs.FileMode +} + +// Manager materializes inline tools on disk and watches project resources for live updates. +// It keeps generated modules and entrypoint files in sync so worker runtimes execute fresh code. +type Manager struct { + opts Options + inlineDir string + entrypointPath string + + mu sync.Mutex + modules map[string]moduleState + entrypointHash string + + startOnce sync.Once + closeOnce sync.Once + startErr error + + syncCh chan struct{} + cancel context.CancelFunc + wg sync.WaitGroup +} + +type moduleState struct { + fileName string + checksum string +} + +type moduleSpec struct { + id string + code string + fileName string + checksum string +} + +// NewManager constructs an inline Manager, validating options and preparing runtime directories +// without starting background synchronization loops. +func NewManager(ctx context.Context, opts Options) (*Manager, error) { + if ctx == nil { + return nil, fmt.Errorf("context is required") + } + if opts.Store == nil { + return nil, fmt.Errorf("resource store is required") + } + project := strings.TrimSpace(opts.ProjectName) + if project == "" { + return nil, fmt.Errorf("project name is required") + } + root := strings.TrimSpace(opts.ProjectRoot) + if root == "" { + cwd, err := os.Getwd() + if err != nil { + return nil, fmt.Errorf("resolve project root: %w", err) + } + root = cwd + } + perm := opts.WorkerFilePerm + if perm == 0 { + perm = 0o600 + } + inlineDir := filepath.Join(core.GetStoreDir(root), "runtime", "inline") + manager := &Manager{ + opts: Options{ + ProjectRoot: root, + ProjectName: project, + Store: opts.Store, + UserEntrypoint: strings.TrimSpace(opts.UserEntrypoint), + WorkerFilePerm: perm, + }, + inlineDir: inlineDir, + entrypointPath: filepath.Join(inlineDir, entrypointFileName), + modules: make(map[string]moduleState), + } + return manager, nil +} + +// Start initializes synchronization goroutines and subscribes to tool changes in the resource store. +// It returns any initialization error and preserves the first failure for subsequent callers. +func (m *Manager) Start(ctx context.Context) error { + m.startOnce.Do(func() { + if ctx == nil { + m.startErr = fmt.Errorf("context is required") + return + } + if err := os.MkdirAll(m.inlineDir, defaultDirPerm); err != nil { + m.startErr = fmt.Errorf("ensure inline directory: %w", err) + return + } + syncCtx, cancel := context.WithCancel(context.WithoutCancel(ctx)) + m.cancel = cancel + m.syncCh = make(chan struct{}, 1) + m.wg.Add(1) + go m.runSyncLoop(syncCtx) + if err := m.Sync(ctx); err != nil { + m.startErr = err + cancel() + m.wg.Wait() + return + } + events, err := m.opts.Store.Watch(syncCtx, m.opts.ProjectName, resources.ResourceTool) + if err != nil { + m.startErr = fmt.Errorf("watch tool resources: %w", err) + cancel() + m.wg.Wait() + return + } + m.wg.Add(1) + go m.runWatcher(syncCtx, events) + m.startErr = nil + }) + return m.startErr +} + +// Close signals shutdown to background loops and waits for all goroutines to exit. +// It is safe to call multiple times and always returns nil. +func (m *Manager) Close(_ context.Context) error { + var err error + m.closeOnce.Do(func() { + if m.cancel != nil { + m.cancel() + } + m.wg.Wait() + }) + return err +} + +// Sync reconciles inline modules against the current tool registry and writes updated source files. +// It can be invoked manually and is also used internally by the background watcher. +func (m *Manager) Sync(ctx context.Context) error { + if err := os.MkdirAll(m.inlineDir, defaultDirPerm); err != nil { + return fmt.Errorf("ensure inline directory: %w", err) + } + modules, err := m.collectModules(ctx) + if err != nil { + return err + } + m.mu.Lock() + defer m.mu.Unlock() + if err := m.applyModuleDiff(ctx, modules); err != nil { + return err + } + return m.writeEntrypoint(ctx, modules) +} + +// EntrypointPath returns the absolute path to the generated inline entrypoint TypeScript file. +func (m *Manager) EntrypointPath() string { + return m.entrypointPath +} + +// ModulePath reports the absolute file path for a tool module and whether it exists in the local cache. +func (m *Manager) ModulePath(toolID string) (string, bool) { + m.mu.Lock() + defer m.mu.Unlock() + state, ok := m.modules[toolID] + if !ok { + return "", false + } + return filepath.Join(m.inlineDir, state.fileName), true +} + +func (m *Manager) enqueueSync() { + if m.syncCh == nil { + return + } + select { + case m.syncCh <- struct{}{}: + default: + } +} + +func (m *Manager) runSyncLoop(ctx context.Context) { + defer m.wg.Done() + for { + select { + case <-ctx.Done(): + return + case <-m.syncCh: + if err := m.Sync(ctx); err != nil { + log := logger.FromContext(ctx) + if log != nil { + log.Warn("inline manager sync failed", "error", err) + } + } + } + } +} + +func (m *Manager) runWatcher(ctx context.Context, events <-chan resources.Event) { + defer m.wg.Done() + for { + select { + case <-ctx.Done(): + return + case _, ok := <-events: + if !ok { + return + } + m.enqueueSync() + } + } +} + +func (m *Manager) collectModules(ctx context.Context) ([]moduleSpec, error) { + items, err := m.opts.Store.ListWithValues(ctx, m.opts.ProjectName, resources.ResourceTool) + if err != nil { + return nil, fmt.Errorf("list inline tools: %w", err) + } + modules := make([]moduleSpec, 0, len(items)) + for _, item := range items { + cfg, err := extractToolConfig(item.Value) + if err != nil { + log := logger.FromContext(ctx) + if log != nil { + log.Warn("skip invalid tool config for inline sync", "error", err, "tool_id", item.Key.ID) + } + continue + } + if !includeInlineTool(cfg) { + continue + } + code := ensureTrailingNewline(cfg.Code) + hash := contentHash(code) + fileName := fmt.Sprintf("%s_%s%s", sanitizeToolID(cfg.ID), hash[:12], defaultModuleExt) + modules = append(modules, moduleSpec{ + id: cfg.ID, + code: code, + fileName: fileName, + checksum: hash, + }) + } + sort.Slice(modules, func(i, j int) bool { + return modules[i].id < modules[j].id + }) + return modules, nil +} + +func (m *Manager) applyModuleDiff(_ context.Context, modules []moduleSpec) error { + next := make(map[string]moduleState, len(modules)) + for _, spec := range modules { + current, ok := m.modules[spec.id] + if ok && current.checksum == spec.checksum && current.fileName == spec.fileName { + next[spec.id] = current + continue + } + if err := writeFileAtomic(m.inlineDir, spec.fileName, []byte(spec.code), m.opts.WorkerFilePerm); err != nil { + return fmt.Errorf("write inline module %s: %w", spec.id, err) + } + if ok && current.fileName != spec.fileName { + _ = os.Remove(filepath.Join(m.inlineDir, current.fileName)) + } + next[spec.id] = moduleState{fileName: spec.fileName, checksum: spec.checksum} + } + for toolID, state := range m.modules { + if _, ok := next[toolID]; ok { + continue + } + path := filepath.Join(m.inlineDir, state.fileName) + if err := os.Remove(path); err != nil && !errors.Is(err, os.ErrNotExist) { + return fmt.Errorf("remove stale inline module %s: %w", toolID, err) + } + } + m.modules = next + return nil +} + +func (m *Manager) writeEntrypoint(_ context.Context, modules []moduleSpec) error { + content := m.buildEntrypoint(modules) + hash := contentHash(content) + if hash == m.entrypointHash { + return nil + } + dir := filepath.Dir(m.entrypointPath) + name := filepath.Base(m.entrypointPath) + if err := writeFileAtomic(dir, name, []byte(content), m.opts.WorkerFilePerm); err != nil { + return fmt.Errorf("write inline entrypoint: %w", err) + } + m.entrypointHash = hash + return nil +} + +func (m *Manager) buildEntrypoint(modules []moduleSpec) string { + var imports []string + var assignments []string + for idx, spec := range modules { + alias := fmt.Sprintf("%s%d", modulePrefix, idx) + imports = append(imports, fmt.Sprintf("import %s from \"./%s\";", alias, filepath.ToSlash(spec.fileName))) + assignments = append(assignments, fmt.Sprintf(" %q: %s,", spec.id, alias)) + } + userImport := m.resolveUserImport() + builder := strings.Builder{} + builder.WriteString("// Code generated by Compozy inline manager. DO NOT EDIT.\n") + if userImport != "" { + builder.WriteString(fmt.Sprintf("import * as userExports from %q;\n", userImport)) + } else { + builder.WriteString("const userExports = {};\n") + } + for _, line := range imports { + builder.WriteString(line) + builder.WriteByte('\n') + } + builder.WriteString("const baseExports = userExports?.default ?? userExports ?? {};\n") + builder.WriteString("const inlineExports = {\n") + for _, assign := range assignments { + builder.WriteString(assign) + builder.WriteByte('\n') + } + builder.WriteString("};\n") + builder.WriteString("export default {\n") + builder.WriteString(" ...baseExports,\n") + if len(assignments) > 0 { + builder.WriteString(" ...inlineExports,\n") + } + builder.WriteString("};\n") + return builder.String() +} + +func (m *Manager) resolveUserImport() string { + path := strings.TrimSpace(m.opts.UserEntrypoint) + if path == "" { + return "" + } + if bareModuleSpecifier(path) { + return path + } + target := path + if !filepath.IsAbs(target) { + target = filepath.Join(m.opts.ProjectRoot, target) + } + target = filepath.Clean(target) + rel, err := filepath.Rel(filepath.Dir(m.entrypointPath), target) + if err != nil { + return filepath.ToSlash(path) + } + rel = filepath.ToSlash(rel) + if strings.HasPrefix(rel, "../") || strings.HasPrefix(rel, "./") { + return rel + } + return "./" + rel +} + +func includeInlineTool(cfg *tool.Config) bool { + if cfg == nil { + return false + } + code := strings.TrimSpace(cfg.Code) + if code == "" { + return false + } + impl := strings.TrimSpace(cfg.Implementation) + if impl == "" || impl == tool.ImplementationRuntime { + return true + } + return false +} + +func extractToolConfig(value any) (*tool.Config, error) { + switch v := value.(type) { + case *tool.Config: + return v, nil + case tool.Config: + return &v, nil + default: + return nil, fmt.Errorf("unexpected tool config type %T", value) + } +} + +func sanitizeToolID(id string) string { + if strings.TrimSpace(id) == "" { + return "tool" + } + builder := strings.Builder{} + for _, r := range strings.ToLower(id) { + switch { + case r >= 'a' && r <= 'z': + builder.WriteRune(r) + case r >= '0' && r <= '9': + builder.WriteRune(r) + case r == '-' || r == '_': + builder.WriteRune(r) + default: + builder.WriteRune('-') + } + } + slug := strings.Trim(builder.String(), "-_") + if slug == "" { + return "tool" + } + return slug +} + +func contentHash(content string) string { + sum := sha256.Sum256([]byte(content)) + return hex.EncodeToString(sum[:]) +} + +func ensureTrailingNewline(code string) string { + if strings.HasSuffix(code, "\n") { + return code + } + return code + "\n" +} + +func writeFileAtomic(dir, name string, data []byte, perm fs.FileMode) error { + if perm == 0 { + perm = 0o600 + } + tmp, err := os.CreateTemp(dir, "inline-*") + if err != nil { + return err + } + tmpName := tmp.Name() + if _, err := tmp.Write(data); err != nil { + tmp.Close() + os.Remove(tmpName) + return err + } + if err := tmp.Close(); err != nil { + os.Remove(tmpName) + return err + } + if err := os.Chmod(tmpName, perm); err != nil { + os.Remove(tmpName) + return err + } + target := filepath.Join(dir, name) + if err := os.Rename(tmpName, target); err != nil { + os.Remove(tmpName) + return err + } + return nil +} + +func bareModuleSpecifier(path string) bool { + if path == "" { + return false + } + if strings.HasPrefix(path, ".") { + return false + } + return !strings.ContainsAny(path, `/\`) +} diff --git a/engine/tool/inline/manager_test.go b/engine/tool/inline/manager_test.go new file mode 100644 index 00000000..4ef1ca85 --- /dev/null +++ b/engine/tool/inline/manager_test.go @@ -0,0 +1,154 @@ +package inline_test + +import ( + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/compozy/compozy/engine/resources" + "github.com/compozy/compozy/engine/tool" + "github.com/compozy/compozy/engine/tool/inline" + testhelpers "github.com/compozy/compozy/test/helpers" +) + +func TestManagerSyncWritesModules(t *testing.T) { + t.Parallel() + ctx := testhelpers.NewTestContext(t) + tmp := t.TempDir() + store := resources.NewMemoryResourceStore() + manager, err := inline.NewManager(ctx, inline.Options{ + ProjectRoot: tmp, + ProjectName: "proj-inline", + Store: store, + }) + require.NoError(t, err) + toolCfg := &tool.Config{ + ID: "weather-brief", + Implementation: tool.ImplementationRuntime, + Code: "export default () => 'sunny';", + } + _, err = store.Put(ctx, resources.ResourceKey{ + Project: "proj-inline", + Type: resources.ResourceTool, + ID: "weather-brief", + }, toolCfg) + require.NoError(t, err) + require.NoError(t, manager.Start(ctx)) + t.Cleanup(func() { + require.NoError(t, manager.Close(ctx)) + }) + modulePath, ok := manager.ModulePath("weather-brief") + require.True(t, ok) + data, err := os.ReadFile(modulePath) + require.NoError(t, err) + require.Equal(t, "export default () => 'sunny';\n", string(data)) + entry, err := os.ReadFile(manager.EntrypointPath()) + require.NoError(t, err) + require.Contains(t, string(entry), `"weather-brief"`) + require.Contains(t, string(entry), "inline0") +} + +func TestManagerSyncIsConcurrencySafe(t *testing.T) { + t.Parallel() + ctx := testhelpers.NewTestContext(t) + tmp := t.TempDir() + store := resources.NewMemoryResourceStore() + manager, err := inline.NewManager(ctx, inline.Options{ + ProjectRoot: tmp, + ProjectName: "proj-concurrent", + Store: store, + }) + require.NoError(t, err) + toolCfg := &tool.Config{ + ID: "metrics-report", + Implementation: tool.ImplementationRuntime, + Code: "export default () => ({ status: 'ok' });", + } + _, err = store.Put(ctx, resources.ResourceKey{ + Project: "proj-concurrent", + Type: resources.ResourceTool, + ID: "metrics-report", + }, toolCfg) + require.NoError(t, err) + require.NoError(t, manager.Start(ctx)) + t.Cleanup(func() { + require.NoError(t, manager.Close(ctx)) + }) + errCh := make(chan error, 8) + for i := 0; i < 8; i++ { + go func() { + errCh <- manager.Sync(ctx) + }() + } + for i := 0; i < 8; i++ { + require.NoError(t, <-errCh) + } + modulePath, ok := manager.ModulePath("metrics-report") + require.True(t, ok) + _, err = os.Stat(modulePath) + require.NoError(t, err) +} + +func TestManagerWatcherRegeneratesOnUpdates(t *testing.T) { + t.Parallel() + ctx := testhelpers.NewTestContext(t) + tmp := t.TempDir() + store := resources.NewMemoryResourceStore() + manager, err := inline.NewManager(ctx, inline.Options{ + ProjectRoot: tmp, + ProjectName: "proj-watch", + Store: store, + UserEntrypoint: filepath.Join("src", "entrypoint.ts"), + }) + require.NoError(t, err) + initial := &tool.Config{ + ID: "alpha", + Implementation: tool.ImplementationRuntime, + Code: "export default () => 'v1';", + } + _, err = store.Put(ctx, resources.ResourceKey{ + Project: "proj-watch", + Type: resources.ResourceTool, + ID: "alpha", + }, initial) + require.NoError(t, err) + require.NoError(t, manager.Start(ctx)) + t.Cleanup(func() { + require.NoError(t, manager.Close(ctx)) + }) + initialPath, ok := manager.ModulePath("alpha") + require.True(t, ok) + data, err := os.ReadFile(initialPath) + require.NoError(t, err) + require.Contains(t, string(data), "'v1'") + updated := &tool.Config{ + ID: "alpha", + Implementation: tool.ImplementationRuntime, + Code: "export default () => 'v2';", + } + _, err = store.Put(ctx, resources.ResourceKey{ + Project: "proj-watch", + Type: resources.ResourceTool, + ID: "alpha", + }, updated) + require.NoError(t, err) + require.Eventually(t, func() bool { + modulePath, ok := manager.ModulePath("alpha") + if !ok { + return false + } + content, readErr := os.ReadFile(modulePath) + if readErr != nil { + return false + } + return strings.Contains(string(content), "'v2'") + }, 2*time.Second, 50*time.Millisecond) + entryContent, err := os.ReadFile(manager.EntrypointPath()) + require.NoError(t, err) + require.Contains(t, string(entryContent), "alpha") + require.Contains(t, string(entryContent), "inlineExports") +} diff --git a/engine/tool/nativeuser/registry.go b/engine/tool/nativeuser/registry.go new file mode 100644 index 00000000..490e9be7 --- /dev/null +++ b/engine/tool/nativeuser/registry.go @@ -0,0 +1,83 @@ +package nativeuser + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" +) + +// Handler defines the signature required for Go-native tool handlers. +// The input and config maps are copies derived from the tool configuration to +// prevent mutation of shared state. +type Handler func(ctx context.Context, input map[string]any, cfg map[string]any) (map[string]any, error) + +// Definition represents a registered native tool handler. +type Definition struct { + ID string + Handler Handler +} + +var ( + registry sync.Map + // ErrInvalidID indicates an empty tool identifier was provided. + ErrInvalidID = errors.New("native tool id is required") + // ErrNilHandler indicates a nil handler was provided during registration. + ErrNilHandler = errors.New("native tool handler is required") + // ErrAlreadyRegistered indicates a handler has already been registered for the given ID. + ErrAlreadyRegistered = errors.New("native tool handler already registered") +) + +// Register stores a native tool handler for the given ID. IDs are case-sensitive after trimming. +func Register(id string, handler Handler) error { + trimmed := strings.TrimSpace(id) + if trimmed == "" { + return ErrInvalidID + } + if handler == nil { + return ErrNilHandler + } + definition := Definition{ID: trimmed, Handler: handler} + if _, loaded := registry.LoadOrStore(trimmed, definition); loaded { + return fmt.Errorf("%w: %s", ErrAlreadyRegistered, trimmed) + } + return nil +} + +// Lookup retrieves a registered native handler definition by ID. +func Lookup(id string) (Definition, bool) { + trimmed := strings.TrimSpace(id) + if trimmed == "" { + return Definition{}, false + } + value, ok := registry.Load(trimmed) + if !ok { + return Definition{}, false + } + def, ok := value.(Definition) + if !ok { + return Definition{}, false + } + return def, true +} + +// IDs returns the set of registered native tool identifiers. +func IDs() []string { + ids := make([]string, 0) + registry.Range(func(key, _ any) bool { + if id, ok := key.(string); ok { + ids = append(ids, id) + } + return true + }) + return ids +} + +// Reset clears the registry. This helper is intended for tests. +func Reset() { + registry.Range(func(key, _ any) bool { + registry.Delete(key) + return true + }) +} diff --git a/engine/tool/nativeuser/registry_test.go b/engine/tool/nativeuser/registry_test.go new file mode 100644 index 00000000..455d14be --- /dev/null +++ b/engine/tool/nativeuser/registry_test.go @@ -0,0 +1,96 @@ +package nativeuser + +import ( + "context" + "fmt" + "sync" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestRegisterAndLookup(t *testing.T) { + t.Run("Should register tool and retrieve it via Lookup", func(t *testing.T) { + Reset() + t.Cleanup(Reset) + ctx := t.Context() + h := func(context.Context, map[string]any, map[string]any) (map[string]any, error) { + return map[string]any{"ok": true}, nil + } + require.NoError(t, Register("test-tool", h)) + def, ok := Lookup("test-tool") + require.True(t, ok) + assert.Equal(t, "test-tool", def.ID) + res, err := def.Handler(ctx, map[string]any{}, map[string]any{}) + require.NoError(t, err) + assert.Equal(t, map[string]any{"ok": true}, res) + }) +} + +func TestRegisterValidation(t *testing.T) { + t.Run("Should reject empty ID", func(t *testing.T) { + Reset() + t.Cleanup(Reset) + assert.Equal( + t, + ErrInvalidID, + Register("", func(context.Context, map[string]any, map[string]any) (map[string]any, error) { + return nil, nil + }), + ) + }) + + t.Run("Should reject nil handler", func(t *testing.T) { + Reset() + t.Cleanup(Reset) + assert.Equal(t, ErrNilHandler, Register("tool", nil)) + }) +} + +func TestRegisterDuplicate(t *testing.T) { + t.Run("Should reject duplicate registration", func(t *testing.T) { + Reset() + t.Cleanup(Reset) + h := func(context.Context, map[string]any, map[string]any) (map[string]any, error) { + return nil, nil + } + require.NoError(t, Register("dup", h)) + err := Register("dup", h) + require.Error(t, err) + assert.ErrorIs(t, err, ErrAlreadyRegistered) + }) +} + +func TestRegisterConcurrent(t *testing.T) { + t.Run("Should register handlers concurrently without conflicts", func(t *testing.T) { + Reset() + t.Cleanup(Reset) + var wg sync.WaitGroup + ctx := t.Context() + errCh := make(chan error, 25) + for i := 0; i < 25; i++ { + toolID := fmt.Sprintf("tool-%d", i) + wg.Go(func() { + h := func(context.Context, map[string]any, map[string]any) (map[string]any, error) { + return map[string]any{"id": toolID}, nil + } + errCh <- Register(toolID, h) + }) + } + wg.Wait() + close(errCh) + for err := range errCh { + require.NoError(t, err) + } + ids := IDs() + assert.Len(t, ids, 25) + for _, id := range ids { + def, ok := Lookup(id) + require.True(t, ok) + res, err := def.Handler(ctx, map[string]any{}, map[string]any{}) + require.NoError(t, err) + assert.Equal(t, id, res["id"]) + } + }) +} diff --git a/engine/tool/router/dto.go b/engine/tool/router/dto.go index 8b546cc1..92390643 100644 --- a/engine/tool/router/dto.go +++ b/engine/tool/router/dto.go @@ -11,16 +11,20 @@ import ( // ToolDTO is the canonical typed representation for tools. type ToolDTO struct { - Resource string `json:"resource,omitempty"` - ID string `json:"id"` - Description string `json:"description,omitempty"` - Timeout string `json:"timeout,omitempty"` - InputSchema *schema.Schema `json:"input,omitempty"` - OutputSchema *schema.Schema `json:"output,omitempty"` - With *core.Input `json:"with,omitempty"` - Config *core.Input `json:"config,omitempty"` - Env *core.EnvMap `json:"env,omitempty"` - Cwd string `json:"cwd,omitempty"` + Resource string `json:"resource,omitempty"` + ID string `json:"id"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Runtime string `json:"runtime,omitempty"` + Implementation string `json:"implementation,omitempty"` + Code string `json:"code,omitempty"` + Timeout string `json:"timeout,omitempty"` + InputSchema *schema.Schema `json:"input,omitempty"` + OutputSchema *schema.Schema `json:"output,omitempty"` + With *core.Input `json:"with,omitempty"` + Config *core.Input `json:"config,omitempty"` + Env *core.EnvMap `json:"env,omitempty"` + Cwd string `json:"cwd,omitempty"` } // ToolListItem is the list representation and includes an optional ETag. @@ -50,16 +54,20 @@ func ConvertToolConfigToDTO(cfg *tool.Config) (ToolDTO, error) { return ToolDTO{}, fmt.Errorf("deep copy tool config: %w", err) } return ToolDTO{ - Resource: clone.Resource, - ID: clone.ID, - Description: clone.Description, - Timeout: clone.Timeout, - InputSchema: clone.InputSchema, - OutputSchema: clone.OutputSchema, - With: clone.With, - Config: clone.Config, - Env: clone.Env, - Cwd: cwdPath(clone.GetCWD()), + Resource: clone.Resource, + ID: clone.ID, + Name: clone.Name, + Description: clone.Description, + Runtime: clone.Runtime, + Implementation: clone.Implementation, + Code: clone.Code, + Timeout: clone.Timeout, + InputSchema: clone.InputSchema, + OutputSchema: clone.OutputSchema, + With: clone.With, + Config: clone.Config, + Env: clone.Env, + Cwd: cwdPath(clone.GetCWD()), }, nil } diff --git a/engine/tool/router/tools_top.go b/engine/tool/router/tools_top.go index 30612f91..52c84f65 100644 --- a/engine/tool/router/tools_top.go +++ b/engine/tool/router/tools_top.go @@ -244,7 +244,8 @@ func respondToolError(c *gin.Context, err error) { switch { case errors.Is(err, tooluc.ErrInvalidInput), errors.Is(err, tooluc.ErrProjectMissing), - errors.Is(err, tooluc.ErrIDMissing): + errors.Is(err, tooluc.ErrIDMissing), + errors.Is(err, tooluc.ErrNativeImplementation): router.RespondProblem(c, &core.Problem{Status: http.StatusBadRequest, Detail: err.Error()}) case errors.Is(err, tooluc.ErrNotFound): router.RespondProblem(c, &core.Problem{Status: http.StatusNotFound, Detail: err.Error()}) diff --git a/engine/tool/uc/store_decode.go b/engine/tool/uc/store_decode.go index 2dbd51e4..a4b01e49 100644 --- a/engine/tool/uc/store_decode.go +++ b/engine/tool/uc/store_decode.go @@ -15,6 +15,14 @@ func decodeToolBody(body map[string]any, pathID string) (*tool.Config, error) { if err := cfg.FromMap(body); err != nil { return nil, fmt.Errorf("decode tool config: %w", err) } + impl, err := cfg.EffectiveImplementation() + if err != nil { + return nil, err + } + if impl == tool.ImplementationNative { + return nil, ErrNativeImplementation + } + cfg.SetImplementation(impl) return normalizeToolID(cfg, pathID) } @@ -49,5 +57,10 @@ func normalizeToolID(cfg *tool.Config, pathID string) (*tool.Config, error) { return nil, fmt.Errorf("id mismatch: body=%s path=%s", bodyID, id) } cfg.ID = id + impl, err := cfg.EffectiveImplementation() + if err != nil { + return nil, err + } + cfg.SetImplementation(impl) return cfg, nil } diff --git a/engine/tool/uc/store_errors.go b/engine/tool/uc/store_errors.go index cb9c0b9f..5b1f82cd 100644 --- a/engine/tool/uc/store_errors.go +++ b/engine/tool/uc/store_errors.go @@ -3,12 +3,13 @@ package uc import "errors" var ( - ErrInvalidInput = errors.New("invalid input") - ErrProjectMissing = errors.New("project missing") - ErrIDMissing = errors.New("id missing") - ErrNotFound = errors.New("tool not found") - ErrETagMismatch = errors.New("etag mismatch") - ErrStaleIfMatch = errors.New("stale if-match") - ErrReferenced = errors.New("tool referenced") - ErrWorkflowNotFound = errors.New("workflow not found") + ErrInvalidInput = errors.New("invalid input") + ErrProjectMissing = errors.New("project missing") + ErrIDMissing = errors.New("id missing") + ErrNotFound = errors.New("tool not found") + ErrETagMismatch = errors.New("etag mismatch") + ErrStaleIfMatch = errors.New("stale if-match") + ErrReferenced = errors.New("tool referenced") + ErrWorkflowNotFound = errors.New("workflow not found") + ErrNativeImplementation = errors.New("native tool implementation is not supported via API") ) diff --git a/engine/tool/uc/store_test.go b/engine/tool/uc/store_test.go index 146d2b2e..28ae7ee1 100644 --- a/engine/tool/uc/store_test.go +++ b/engine/tool/uc/store_test.go @@ -62,3 +62,16 @@ func TestListTools_FilterByWorkflow(t *testing.T) { assert.Len(t, out.Items, 1) assert.Equal(t, "t1", out.Items[0]["id"]) } + +func TestUpsert_RejectsNativeImplementation(t *testing.T) { + store := resources.NewMemoryResourceStore() + ctx := t.Context() + body := map[string]any{ + "implementation": "native", + "name": "Native Tool", + "description": "Blocked", + } + _, err := NewUpsert(store).Execute(ctx, &UpsertInput{Project: "demo", ID: "native", Body: body}) + require.Error(t, err) + assert.ErrorIs(t, err, ErrNativeImplementation) +} diff --git a/engine/worker/embedded/server.go b/engine/worker/embedded/server.go index 9095eb62..5fc29d19 100644 --- a/engine/worker/embedded/server.go +++ b/engine/worker/embedded/server.go @@ -242,6 +242,15 @@ func ensurePortsAvailable(ctx context.Context, bindIP string, ports []int) error bindIP, ) } + var netErr net.Error + if errors.As(err, &netErr) && netErr.Timeout() { + return fmt.Errorf( + "timeout checking port %d on %s (may indicate port conflict or network issue): %w", + port, + bindIP, + err, + ) + } if !isConnRefused(err) { return fmt.Errorf("verify port %d on %s: %w", port, bindIP, err) } diff --git a/engine/workflow/schedule/config_definition.go b/engine/workflow/schedule/config_definition.go new file mode 100644 index 00000000..d353e438 --- /dev/null +++ b/engine/workflow/schedule/config_definition.go @@ -0,0 +1,10 @@ +package schedule + +import projectschedule "github.com/compozy/compozy/engine/project/schedule" + +// Config aliases the project-level schedule configuration so callers can continue importing +// definitions from the workflow schedule package without creating dependency cycles. +type Config = projectschedule.Config + +// RetryPolicy aliases the project-level retry configuration for scheduled workflows. +type RetryPolicy = projectschedule.RetryPolicy diff --git a/engine/workflow/schedule/manager.go b/engine/workflow/schedule/manager.go index fe0e0d03..a8f67a83 100644 --- a/engine/workflow/schedule/manager.go +++ b/engine/workflow/schedule/manager.go @@ -85,15 +85,15 @@ func NewOverrideCache() *OverrideCache { } } -// Config holds configuration options for the schedule manager -type Config struct { +// ManagerConfig holds configuration options for the schedule manager +type ManagerConfig struct { // PageSize for listing schedules from Temporal (default: 100) PageSize int } // DefaultConfig returns default configuration values -func DefaultConfig() *Config { - return &Config{ +func DefaultConfig() *ManagerConfig { + return &ManagerConfig{ PageSize: 100, } } @@ -103,7 +103,7 @@ type manager struct { client *worker.Client projectID string taskQueue string - config *Config + config *ManagerConfig mu sync.RWMutex // Track API overrides with persistence and timestamp tracking overrideCache *OverrideCache @@ -268,7 +268,7 @@ func (m *manager) getYAMLModTime(ctx context.Context, wf *workflow.Config) time. type Option func(*manager) // WithConfig sets a custom configuration for the manager -func WithConfig(cfg *Config) Option { +func WithConfig(cfg *ManagerConfig) Option { return func(m *manager) { if cfg != nil { m.config = cfg diff --git a/go.mod b/go.mod index c7acba19..7a997820 100644 --- a/go.mod +++ b/go.mod @@ -43,6 +43,7 @@ require ( github.com/knadh/koanf/providers/structs v1.0.0 github.com/knadh/koanf/v2 v2.3.0 github.com/looplab/fsm v1.0.3 + github.com/magefile/mage v1.15.0 github.com/mark3labs/mcp-go v0.41.1 github.com/mattn/go-isatty v0.0.20 github.com/mitchellh/mapstructure v1.5.0 @@ -97,6 +98,7 @@ require ( golang.org/x/time v0.14.0 google.golang.org/grpc v1.76.0 gopkg.in/yaml.v3 v3.0.1 + modernc.org/sqlite v1.39.1 ) require ( @@ -346,10 +348,9 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/validator.v2 v2.0.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - modernc.org/libc v1.66.3 // indirect + modernc.org/libc v1.66.10 // indirect modernc.org/mathutil v1.7.1 // indirect modernc.org/memory v1.11.0 // indirect - modernc.org/sqlite v1.38.2 // indirect ) replace github.com/compozy/compozy/test/fixtures => ./test/fixtures diff --git a/go.sum b/go.sum index ea40ab3e..466a6e98 100644 --- a/go.sum +++ b/go.sum @@ -495,6 +495,8 @@ github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69 github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/lufia/plan9stats v0.0.0-20250317134145-8bc96cf8fc35 h1:PpXWgLPs+Fqr325bN2FD2ISlRRztXibcX6e8f5FR5Dc= github.com/lufia/plan9stats v0.0.0-20250317134145-8bc96cf8fc35/go.mod h1:autxFIvghDt3jPTLoqZ9OZ7s9qTGNAWmYCjVFWPX/zg= +github.com/magefile/mage v1.15.0 h1:BvGheCMAsG3bWUDbZ8AyXXpCNwU9u5CB6sM+HNb9HYg= +github.com/magefile/mage v1.15.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/magiconair/properties v1.8.10 h1:s31yESBquKXCV9a/ScB3ESkOjUYYv+X0rg8SYxI99mE= github.com/magiconair/properties v1.8.10/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= @@ -1099,18 +1101,18 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= mellium.im/sasl v0.3.1 h1:wE0LW6g7U83vhvxjC1IY8DnXM+EU095yeo8XClvCdfo= mellium.im/sasl v0.3.1/go.mod h1:xm59PUYpZHhgQ9ZqoJ5QaCqzWMi8IeS49dhp6plPCzw= -modernc.org/cc/v4 v4.26.2 h1:991HMkLjJzYBIfha6ECZdjrIYz2/1ayr+FL8GN+CNzM= -modernc.org/cc/v4 v4.26.2/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0= -modernc.org/ccgo/v4 v4.28.0 h1:rjznn6WWehKq7dG4JtLRKxb52Ecv8OUGah8+Z/SfpNU= -modernc.org/ccgo/v4 v4.28.0/go.mod h1:JygV3+9AV6SmPhDasu4JgquwU81XAKLd3OKTUDNOiKE= -modernc.org/fileutil v1.3.8 h1:qtzNm7ED75pd1C7WgAGcK4edm4fvhtBsEiI/0NQ54YM= -modernc.org/fileutil v1.3.8/go.mod h1:HxmghZSZVAz/LXcMNwZPA/DRrQZEVP9VX0V4LQGQFOc= +modernc.org/cc/v4 v4.26.5 h1:xM3bX7Mve6G8K8b+T11ReenJOT+BmVqQj0FY5T4+5Y4= +modernc.org/cc/v4 v4.26.5/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0= +modernc.org/ccgo/v4 v4.28.1 h1:wPKYn5EC/mYTqBO373jKjvX2n+3+aK7+sICCv4Fjy1A= +modernc.org/ccgo/v4 v4.28.1/go.mod h1:uD+4RnfrVgE6ec9NGguUNdhqzNIeeomeXf6CL0GTE5Q= +modernc.org/fileutil v1.3.40 h1:ZGMswMNc9JOCrcrakF1HrvmergNLAmxOPjizirpfqBA= +modernc.org/fileutil v1.3.40/go.mod h1:HxmghZSZVAz/LXcMNwZPA/DRrQZEVP9VX0V4LQGQFOc= modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI= modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito= modernc.org/goabi0 v0.2.0 h1:HvEowk7LxcPd0eq6mVOAEMai46V+i7Jrj13t4AzuNks= modernc.org/goabi0 v0.2.0/go.mod h1:CEFRnnJhKvWT1c1JTI3Avm+tgOWbkOu5oPA8eH8LnMI= -modernc.org/libc v1.66.3 h1:cfCbjTUcdsKyyZZfEUKfoHcP3S0Wkvz3jgSzByEWVCQ= -modernc.org/libc v1.66.3/go.mod h1:XD9zO8kt59cANKvHPXpx7yS2ELPheAey0vjIuZOhOU8= +modernc.org/libc v1.66.10 h1:yZkb3YeLx4oynyR+iUsXsybsX4Ubx7MQlSYEw4yj59A= +modernc.org/libc v1.66.10/go.mod h1:8vGSEwvoUoltr4dlywvHqjtAqHBaw0j1jI7iFBTAr2I= modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU= modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg= modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI= @@ -1119,8 +1121,8 @@ modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8= modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns= modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w= modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE= -modernc.org/sqlite v1.38.2 h1:Aclu7+tgjgcQVShZqim41Bbw9Cho0y/7WzYptXqkEek= -modernc.org/sqlite v1.38.2/go.mod h1:cPTJYSlgg3Sfg046yBShXENNtPrWrDX8bsbAQBzgQ5E= +modernc.org/sqlite v1.39.1 h1:H+/wGFzuSCIEVCvXYVHX5RQglwhMOvtHSv+VtidL2r4= +modernc.org/sqlite v1.39.1/go.mod h1:9fjQZ0mB1LLP0GYrp39oOJXx/I2sxEnZtzCmEQIKvGE= modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0= modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A= modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= diff --git a/go.work b/go.work new file mode 100644 index 00000000..432ed585 --- /dev/null +++ b/go.work @@ -0,0 +1,6 @@ +go 1.25.2 + +use ( + . + ./sdk +) diff --git a/go.work.sum b/go.work.sum new file mode 100644 index 00000000..0662e6d8 --- /dev/null +++ b/go.work.sum @@ -0,0 +1,424 @@ +ariga.io/atlas v0.32.0/go.mod h1:Oe1xWPuu5q9LzyrWfbZmEZxFYeu4BHTyzfjeW2aZp/w= +buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.6-20250425153114-8976f5be98c1.1/go.mod h1:avRlCjnFzl98VPaeCtJ24RrV/wwHFzB8sWXhj26+n/U= +buf.build/go/protovalidate v0.12.0/go.mod h1:q3PFfbzI05LeqxSwq+begW2syjy2Z6hLxZSkP1OH/D0= +cloud.google.com/go/accessapproval v1.8.6/go.mod h1:FfmTs7Emex5UvfnnpMkhuNkRCP85URnBFt5ClLxhZaQ= +cloud.google.com/go/accesscontextmanager v1.9.6/go.mod h1:884XHwy1AQpCX5Cj2VqYse77gfLaq9f8emE2bYriilk= +cloud.google.com/go/alloydb v1.14.0/go.mod h1:OTBY1HoL0Z8PsHoMMVhkaUPKyY8oP7hzIAe/Dna6UHk= +cloud.google.com/go/alloydbconn v1.13.2/go.mod h1:0wlYQAOr2XuvxYsvNNVckmG2v17WVUKzMD+gmTOibSU= +cloud.google.com/go/analytics v0.28.0/go.mod h1:hNT09bdzGB3HsL7DBhZkoPi4t5yzZPZROoFv+JzGR7I= +cloud.google.com/go/apigateway v1.7.6/go.mod h1:SiBx36VPjShaOCk8Emf63M2t2c1yF+I7mYZaId7OHiA= +cloud.google.com/go/apigeeconnect v1.7.6/go.mod h1:zqDhHY99YSn2li6OeEjFpAlhXYnXKl6DFb/fGu0ye2w= +cloud.google.com/go/apigeeregistry v0.9.6/go.mod h1:AFEepJBKPtGDfgabG2HWaLH453VVWWFFs3P4W00jbPs= +cloud.google.com/go/appengine v1.9.6/go.mod h1:jPp9T7Opvzl97qytaRGPwoH7pFI3GAcLDaui1K8PNjY= +cloud.google.com/go/area120 v0.9.6/go.mod h1:qKSokqe0iTmwBDA3tbLWonMEnh0pMAH4YxiceiHUed4= +cloud.google.com/go/artifactregistry v1.17.1/go.mod h1:06gLv5QwQPWtaudI2fWO37gfwwRUHwxm3gA8Fe568Hc= +cloud.google.com/go/asset v1.21.0/go.mod h1:0lMJ0STdyImZDSCB8B3i/+lzIquLBpJ9KZ4pyRvzccM= +cloud.google.com/go/assuredworkloads v1.12.6/go.mod h1:QyZHd7nH08fmZ+G4ElihV1zoZ7H0FQCpgS0YWtwjCKo= +cloud.google.com/go/automl v1.14.7/go.mod h1:8a4XbIH5pdvrReOU72oB+H3pOw2JBxo9XTk39oljObE= +cloud.google.com/go/baremetalsolution v1.3.6/go.mod h1:7/CS0LzpLccRGO0HL3q2Rofxas2JwjREKut414sE9iM= +cloud.google.com/go/batch v1.12.2/go.mod h1:tbnuTN/Iw59/n1yjAYKV2aZUjvMM2VJqAgvUgft6UEU= +cloud.google.com/go/beyondcorp v1.1.6/go.mod h1:V1PigSWPGh5L/vRRmyutfnjAbkxLI2aWqJDdxKbwvsQ= +cloud.google.com/go/bigquery v1.67.0/go.mod h1:HQeP1AHFuAz0Y55heDSb0cjZIhnEkuwFRBGo6EEKHug= +cloud.google.com/go/bigtable v1.37.0/go.mod h1:HXqddP6hduwzrtiTCqZPpj9ij4hGZb4Zy1WF/dT+yaU= +cloud.google.com/go/billing v1.20.4/go.mod h1:hBm7iUmGKGCnBm6Wp439YgEdt+OnefEq/Ib9SlJYxIU= +cloud.google.com/go/binaryauthorization v1.9.5/go.mod h1:CV5GkS2eiY461Bzv+OH3r5/AsuB6zny+MruRju3ccB8= +cloud.google.com/go/certificatemanager v1.9.5/go.mod h1:kn7gxT/80oVGhjL8rurMUYD36AOimgtzSBPadtAeffs= +cloud.google.com/go/channel v1.19.5/go.mod h1:vevu+LK8Oy1Yuf7lcpDbkQQQm5I7oiY5fFTn3uwfQLY= +cloud.google.com/go/cloudbuild v1.22.2/go.mod h1:rPyXfINSgMqMZvuTk1DbZcbKYtvbYF/i9IXQ7eeEMIM= +cloud.google.com/go/clouddms v1.8.7/go.mod h1:DhWLd3nzHP8GoHkA6hOhso0R9Iou+IGggNqlVaq/KZ4= +cloud.google.com/go/cloudsqlconn v1.14.1/go.mod h1:pM5Xp20GsQosQ/cP9awtha5SMgmzbLubb/dbVsTg3Fo= +cloud.google.com/go/cloudtasks v1.13.6/go.mod h1:/IDaQqGKMixD+ayM43CfsvWF2k36GeomEuy9gL4gLmU= +cloud.google.com/go/compute v1.37.0 h1:XxtZlXYkZXub3LNaLu90TTemcFqIU1yZ4E4q9VlR39A= +cloud.google.com/go/compute v1.37.0/go.mod h1:AsK4VqrSyXBo4SMbRtfAO1VfaMjUEjEwv1UB/AwVp5Q= +cloud.google.com/go/contactcenterinsights v1.17.3/go.mod h1:7Uu2CpxS3f6XxhRdlEzYAkrChpR5P5QfcdGAFEdHOG8= +cloud.google.com/go/container v1.42.4/go.mod h1:wf9lKc3ayWVbbV/IxKIDzT7E+1KQgzkzdxEJpj1pebE= +cloud.google.com/go/containeranalysis v0.14.1/go.mod h1:28e+tlZgauWGHmEbnI5UfIsjMmrkoR1tFN0K2i71jBI= +cloud.google.com/go/datacatalog v1.26.0/go.mod h1:bLN2HLBAwB3kLTFT5ZKLHVPj/weNz6bR0c7nYp0LE14= +cloud.google.com/go/dataflow v0.10.6/go.mod h1:Vi0pTYCVGPnM2hWOQRyErovqTu2xt2sr8Rp4ECACwUI= +cloud.google.com/go/dataform v0.11.2/go.mod h1:IMmueJPEKpptT2ZLWlvIYjw6P/mYHHxA7/SUBiXqZUY= +cloud.google.com/go/datafusion v1.8.6/go.mod h1:fCyKJF2zUKC+O3hc2F9ja5EUCAbT4zcH692z8HiFZFw= +cloud.google.com/go/datalabeling v0.9.6/go.mod h1:n7o4x0vtPensZOoFwFa4UfZgkSZm8Qs0Pg/T3kQjXSM= +cloud.google.com/go/dataplex v1.25.2/go.mod h1:AH2/a7eCYvFP58scJGR7YlSY9qEhM8jq5IeOA/32IZ0= +cloud.google.com/go/dataproc/v2 v2.11.2/go.mod h1:xwukBjtfiO4vMEa1VdqyFLqJmcv7t3lo+PbLDcTEw+g= +cloud.google.com/go/dataqna v0.9.6/go.mod h1:rjnNwjh8l3ZsvrANy6pWseBJL2/tJpCcBwJV8XCx4kU= +cloud.google.com/go/datastore v1.20.0/go.mod h1:uFo3e+aEpRfHgtp5pp0+6M0o147KoPaYNaPAKpfh8Ew= +cloud.google.com/go/datastream v1.14.1/go.mod h1:JqMKXq/e0OMkEgfYe0nP+lDye5G2IhIlmencWxmesMo= +cloud.google.com/go/deploy v1.27.1/go.mod h1:il2gxiMgV3AMlySoQYe54/xpgVDoEh185nj4XjJ+GRk= +cloud.google.com/go/dialogflow v1.68.2/go.mod h1:E0Ocrhf5/nANZzBju8RX8rONf0PuIvz2fVj3XkbAhiY= +cloud.google.com/go/dlp v1.22.1/go.mod h1:Gc7tGo1UJJTBRt4OvNQhm8XEQ0i9VidAiGXBVtsftjM= +cloud.google.com/go/documentai v1.37.0/go.mod h1:qAf3ewuIUJgvSHQmmUWvM3Ogsr5A16U2WPHmiJldvLA= +cloud.google.com/go/domains v0.10.6/go.mod h1:3xzG+hASKsVBA8dOPc4cIaoV3OdBHl1qgUpAvXK7pGY= +cloud.google.com/go/edgecontainer v1.4.3/go.mod h1:q9Ojw2ox0uhAvFisnfPRAXFTB1nfRIOIXVWzdXMZLcE= +cloud.google.com/go/errorreporting v0.3.2/go.mod h1:s5kjs5r3l6A8UUyIsgvAhGq6tkqyBCUss0FRpsoVTww= +cloud.google.com/go/essentialcontacts v1.7.6/go.mod h1:/Ycn2egr4+XfmAfxpLYsJeJlVf9MVnq9V7OMQr9R4lA= +cloud.google.com/go/eventarc v1.15.5/go.mod h1:vDCqGqyY7SRiickhEGt1Zhuj81Ya4F/NtwwL3OZNskg= +cloud.google.com/go/filestore v1.10.2/go.mod h1:w0Pr8uQeSRQfCPRsL0sYKW6NKyooRgixCkV9yyLykR4= +cloud.google.com/go/firestore v1.18.0/go.mod h1:5ye0v48PhseZBdcl0qbl3uttu7FIEwEYVaWm0UIEOEU= +cloud.google.com/go/functions v1.19.6/go.mod h1:0G0RnIlbM4MJEycfbPZlCzSf2lPOjL7toLDwl+r0ZBw= +cloud.google.com/go/gkebackup v1.7.0/go.mod h1:oPHXUc6X6tg6Zf/7QmKOfXOFaVzBEgMWpLDb4LqngWA= +cloud.google.com/go/gkeconnect v0.12.4/go.mod h1:bvpU9EbBpZnXGo3nqJ1pzbHWIfA9fYqgBMJ1VjxaZdk= +cloud.google.com/go/gkehub v0.15.6/go.mod h1:sRT0cOPAgI1jUJrS3gzwdYCJ1NEzVVwmnMKEwrS2QaM= +cloud.google.com/go/gkemulticloud v1.5.3/go.mod h1:KPFf+/RcfvmuScqwS9/2MF5exZAmXSuoSLPuaQ98Xlk= +cloud.google.com/go/gsuiteaddons v1.7.7/go.mod h1:zTGmmKG/GEBCONsvMOY2ckDiEsq3FN+lzWGUiXccF9o= +cloud.google.com/go/iap v1.11.1/go.mod h1:qFipMJ4nOIv4yDHZxn31PiS8QxJJH2FlxgH9aFauejw= +cloud.google.com/go/ids v1.5.6/go.mod h1:y3SGLmEf9KiwKsH7OHvYYVNIJAtXybqsD2z8gppsziQ= +cloud.google.com/go/iot v1.8.6/go.mod h1:MThnkiihNkMysWNeNje2Hp0GSOpEq2Wkb/DkBCVYa0U= +cloud.google.com/go/kms v1.21.2/go.mod h1:8wkMtHV/9Z8mLXEXr1GK7xPSBdi6knuLXIhqjuWcI6w= +cloud.google.com/go/language v1.14.5/go.mod h1:nl2cyAVjcBct1Hk73tzxuKebk0t2eULFCaruhetdZIA= +cloud.google.com/go/lifesciences v0.10.6/go.mod h1:1nnZwaZcBThDujs9wXzECnd1S5d+UiDkPuJWAmhRi7Q= +cloud.google.com/go/managedidentities v1.7.6/go.mod h1:pYCWPaI1AvR8Q027Vtp+SFSM/VOVgbjBF4rxp1/z5p4= +cloud.google.com/go/maps v1.20.4/go.mod h1:Act0Ws4HffrECH+pL8YYy1scdSLegov7+0c6gvKqRzI= +cloud.google.com/go/mediatranslation v0.9.6/go.mod h1:WS3QmObhRtr2Xu5laJBQSsjnWFPPthsyetlOyT9fJvE= +cloud.google.com/go/memcache v1.11.6/go.mod h1:ZM6xr1mw3F8TWO+In7eq9rKlJc3jlX2MDt4+4H+/+cc= +cloud.google.com/go/metastore v1.14.6/go.mod h1:iDbuGwlDr552EkWA5E1Y/4hHme3cLv3ZxArKHXjS2OU= +cloud.google.com/go/networkconnectivity v1.17.1/go.mod h1:DTZCq8POTkHgAlOAAEDQF3cMEr/B9k1ZbpklqvHEBtg= +cloud.google.com/go/networkmanagement v1.19.1/go.mod h1:icgk265dNnilxQzpr6rO9WuAuuCmUOqq9H6WBeM2Af4= +cloud.google.com/go/networksecurity v0.10.6/go.mod h1:FTZvabFPvK2kR/MRIH3l/OoQ/i53eSix2KA1vhBMJec= +cloud.google.com/go/notebooks v1.12.6/go.mod h1:3Z4TMEqAKP3pu6DI/U+aEXrNJw9hGZIVbp+l3zw8EuA= +cloud.google.com/go/optimization v1.7.6/go.mod h1:4MeQslrSJGv+FY4rg0hnZBR/tBX2awJ1gXYp6jZpsYY= +cloud.google.com/go/orchestration v1.11.9/go.mod h1:KKXK67ROQaPt7AxUS1V/iK0Gs8yabn3bzJ1cLHw4XBg= +cloud.google.com/go/orgpolicy v1.15.0/go.mod h1:NTQLwgS8N5cJtdfK55tAnMGtvPSsy95JJhESwYHaJVs= +cloud.google.com/go/osconfig v1.14.5/go.mod h1:XH+NjBVat41I/+xgQzKOJEhuC4xI7lX2INE5SWnVr9U= +cloud.google.com/go/oslogin v1.14.6/go.mod h1:xEvcRZTkMXHfNSKdZ8adxD6wvRzeyAq3cQX3F3kbMRw= +cloud.google.com/go/phishingprotection v0.9.6/go.mod h1:VmuGg03DCI0wRp/FLSvNyjFj+J8V7+uITgHjCD/x4RQ= +cloud.google.com/go/policytroubleshooter v1.11.6/go.mod h1:jdjYGIveoYolk38Dm2JjS5mPkn8IjVqPsDHccTMu3mY= +cloud.google.com/go/privatecatalog v0.10.7/go.mod h1:Fo/PF/B6m4A9vUYt0nEF1xd0U6Kk19/Je3eZGrQ6l60= +cloud.google.com/go/pubsub v1.49.0/go.mod h1:K1FswTWP+C1tI/nfi3HQecoVeFvL4HUOB1tdaNXKhUY= +cloud.google.com/go/pubsublite v1.8.2/go.mod h1:4r8GSa9NznExjuLPEJlF1VjOPOpgf3IT6k8x/YgaOPI= +cloud.google.com/go/recaptchaenterprise/v2 v2.20.4/go.mod h1:3H8nb8j8N7Ss2eJ+zr+/H7gyorfzcxiDEtVBDvDjwDQ= +cloud.google.com/go/recommendationengine v0.9.6/go.mod h1:nZnjKJu1vvoxbmuRvLB5NwGuh6cDMMQdOLXTnkukUOE= +cloud.google.com/go/recommender v1.13.5/go.mod h1:v7x/fzk38oC62TsN5Qkdpn0eoMBh610UgArJtDIgH/E= +cloud.google.com/go/redis v1.18.2/go.mod h1:q6mPRhLiR2uLf584Lcl4tsiRn0xiFlu6fnJLwCORMtY= +cloud.google.com/go/resourcemanager v1.10.6/go.mod h1:VqMoDQ03W4yZmxzLPrB+RuAoVkHDS5tFUUQUhOtnRTg= +cloud.google.com/go/resourcesettings v1.8.3/go.mod h1:BzgfXFHIWOOmHe6ZV9+r3OWfpHJgnqXy8jqwx4zTMLw= +cloud.google.com/go/retail v1.20.0/go.mod h1:1CXWDZDJTOsK6lPjkv67gValP9+h1TMadTC9NpFFr9s= +cloud.google.com/go/run v1.9.3/go.mod h1:Si9yDIkUGr5vsXE2QVSWFmAjJkv/O8s3tJ1eTxw3p1o= +cloud.google.com/go/scheduler v1.11.7/go.mod h1:gqYs8ndLx2M5D0oMJh48aGS630YYvC432tHCnVWN13s= +cloud.google.com/go/secretmanager v1.14.7/go.mod h1:uRuB4F6NTFbg0vLQ6HsT7PSsfbY7FqHbtJP1J94qxGc= +cloud.google.com/go/security v1.18.5/go.mod h1:D1wuUkDwGqTKD0Nv7d4Fn2Dc53POJSmO4tlg1K1iS7s= +cloud.google.com/go/securitycenter v1.36.2/go.mod h1:80ocoXS4SNWxmpqeEPhttYrmlQzCPVGaPzL3wVcoJvE= +cloud.google.com/go/servicedirectory v1.12.6/go.mod h1:OojC1KhOMDYC45oyTn3Mup08FY/S0Kj7I58dxUMMTpg= +cloud.google.com/go/shell v1.8.6/go.mod h1:GNbTWf1QA/eEtYa+kWSr+ef/XTCDkUzRpV3JPw0LqSk= +cloud.google.com/go/spanner v1.80.0/go.mod h1:XQWUqx9r8Giw6gNh0Gu8xYfz7O+dAKouAkFCxG/mZC8= +cloud.google.com/go/speech v1.27.1/go.mod h1:efCfklHFL4Flxcdt9gpEMEJh9MupaBzw3QiSOVeJ6ck= +cloud.google.com/go/storagetransfer v1.12.4/go.mod h1:p1xLKvpt78aQFRJ8lZGYArgFuL4wljFzitPZoYjl/8A= +cloud.google.com/go/talent v1.8.3/go.mod h1:oD3/BilJpJX8/ad8ZUAxlXHCslTg2YBbafFH3ciZSLQ= +cloud.google.com/go/texttospeech v1.12.1/go.mod h1:f8vrD3OXAKTRr4eL0TPjZgYQhiN6ti/tKM3i1Uub5X0= +cloud.google.com/go/tpu v1.8.3/go.mod h1:Do6Gq+/Jx6Xs3LcY2WhHyGwKDKVw++9jIJp+X+0rxRE= +cloud.google.com/go/translate v1.12.5/go.mod h1:o/v+QG/bdtBV1d1edmtau0PwTfActvxPk/gtqdSDBi4= +cloud.google.com/go/video v1.23.5/go.mod h1:ZSpGFCpfTOTmb1IkmHNGC/9yI3TjIa/vkkOKBDo0Vpo= +cloud.google.com/go/videointelligence v1.12.6/go.mod h1:/l34WMndN5/bt04lHodxiYchLVuWPQjCU6SaiTswrIw= +cloud.google.com/go/vision/v2 v2.9.5/go.mod h1:1SiNZPpypqZDbOzU052ZYRiyKjwOcyqgGgqQCI/nlx8= +cloud.google.com/go/vmmigration v1.8.6/go.mod h1:uZ6/KXmekwK3JmC8PzBM/cKQmq404TTfWtThF6bbf0U= +cloud.google.com/go/vmwareengine v1.3.5/go.mod h1:QuVu2/b/eo8zcIkxBYY5QSwiyEcAy6dInI7N+keI+Jg= +cloud.google.com/go/vpcaccess v1.8.6/go.mod h1:61yymNplV1hAbo8+kBOFO7Vs+4ZHYI244rSFgmsHC6E= +cloud.google.com/go/webrisk v1.11.1/go.mod h1:+9SaepGg2lcp1p0pXuHyz3R2Yi2fHKKb4c1Q9y0qbtA= +cloud.google.com/go/websecurityscanner v1.7.6/go.mod h1:ucaaTO5JESFn5f2pjdX01wGbQ8D6h79KHrmO2uGZeiY= +cloud.google.com/go/workflows v1.14.2/go.mod h1:5nqKjMD+MsJs41sJhdVrETgvD5cOK3hUcAs8ygqYvXQ= +connectrpc.com/connect v1.11.1/go.mod h1:3AGaO6RRGMx5IKFfqbe3hvK1NqLosFNP2BxDYTPmNPo= +connectrpc.com/otelconnect v0.6.0/go.mod h1:jdcs0uiwXQVmSMgTJ2dAaWR5VbpNd7QKNkuoH7n86RA= +github.com/AssemblyAI/assemblyai-go-sdk v1.3.0/go.mod h1:H0naZbvpIW49cDA5ZZ/gggeXqi7ojSGB1mqshRk6kNE= +github.com/ClickHouse/ch-go v0.67.0/go.mod h1:2MSAeyVmgt+9a2k2SQPPG1b4qbTPzdGDpf1+bcHh+18= +github.com/ClickHouse/clickhouse-go/v2 v2.40.1/go.mod h1:GDzSBLVhladVm8V01aEB36IoBOVLLICfyeuiIp/8Ezc= +github.com/Code-Hex/go-generics-cache v1.3.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4= +github.com/IBM/watsonx-go v1.0.0/go.mod h1:8lzvpe/158JkrzvcoIcIj6OdNty5iC9co5nQHfkhRtM= +github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= +github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Microsoft/hcsshim v0.11.4/go.mod h1:smjE4dvqPX9Zldna+t5FG3rnoHhaB7QYxPRqGcpAD9w= +github.com/PuerkitoBio/goquery v1.8.1/go.mod h1:Q8ICL1kNUJ2sXGoAhPGUdYDJvgQgHzJsnnd3H7Ho5jQ= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Rhymond/go-money v1.0.15/go.mod h1:iHvCuIvitxu2JIlAlhF0g9jHqjRSr+rpdOs7Omqlupg= +github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo= +github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= +github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/amikos-tech/chroma-go v0.1.2/go.mod h1:R/RUp0aaqCWdSXWyIUTfjuNymwqBGLYFgXNZEmisphY= +github.com/amikos-tech/chroma-go v0.1.4/go.mod h1:sT6uXOo/L5S/Q0v9jpYtoR1iOM68hUE2itWw8sOwLHY= +github.com/andybalholm/brotli v1.2.0/go.mod h1:rzTDkvFWvIrjDXZHkuS16NPggd91W3kUSvPlQ1pLaKY= +github.com/andybalholm/cascadia v1.3.2/go.mod h1:7gtRlve5FxPPgIgX36uWBX58OdBsSS6lUvCFb+h7KvU= +github.com/ankane/disco-go v0.1.2/go.mod h1:nkR7DLW+KkXeRRAsWk6poMTpTOWp9/4iKYGDwg8dSS0= +github.com/antchfx/htmlquery v1.3.0/go.mod h1:zKPDVTMhfOmcwxheXUsx4rKJy8KEY/PU6eXr/2SebQ8= +github.com/antchfx/xmlquery v1.3.17/go.mod h1:Afkq4JIeXut75taLSuI31ISJ/zeq+3jG7TunF7noreA= +github.com/antchfx/xpath v1.2.4/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= +github.com/anthropics/anthropic-sdk-go v1.2.1/go.mod h1:AapDW22irxK2PSumZiQXYUFvsdQgkwIWlpESweWZI/c= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apache/arrow/go/arrow v0.0.0-20211112161151-bc219186db40/go.mod h1:Q7yQnSMnLvcXlZ8RV+jwz/6y1rQTqbX6C82SndT52Zs= +github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk= +github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2/go.mod h1:lPprDr1e6cJdyYeGXnRaJoP4Md+cDBvi2eOj00BlGmg= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10/go.mod h1:qqvMj6gHLR/EXWZw4ZbqlPbQUyenf4h82UQUlKc+l14= +github.com/aws/aws-sdk-go-v2/config v1.27.12/go.mod h1:IOrsf4IiN68+CgzyuyGUYTpCrtUQTbbMEAtR/MR/4ZU= +github.com/aws/aws-sdk-go-v2/config v1.29.4/go.mod h1:j2/AF7j/qxVmsNIChw1tWfsVKOayJoGRDjg1Tgq7NPk= +github.com/aws/aws-sdk-go-v2/credentials v1.17.12/go.mod h1:jlWtGFRtKsqc5zqerHZYmKmRkUXo3KPM14YJ13ZEjwE= +github.com/aws/aws-sdk-go-v2/credentials v1.17.57/go.mod h1:2kerxPUUbTagAr/kkaHiqvj/bcYHzi2qiJS/ZinllU0= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1/go.mod h1:zusuAeqezXzAB24LGuzuekqMAEgWkVYukBec3kr3jUg= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27/go.mod h1:w1BASFIPOPUae7AgaH4SbjNbfdkxuggLyGfNFTn8ITY= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5/go.mod h1:FSaRudD0dXiMPK2UjknVwwTYyZMRsHv3TtkabsZih5I= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34/go.mod h1:p4VfIceZokChbA9FzMbRGz5OV+lekcVtHlPKEO0gSZY= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5/go.mod h1:jU1li6RFryMz+so64PpKtudI+QzbKoIEivqdf6LNpOc= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34/go.mod h1:dFZsC0BLo346mvKQLWmoJxT+Sjp+qcVR1tRVHQGOH9Q= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34/go.mod h1:zf7Vcd1ViW7cPqYWEHLHJkS50X0JS2IKz9Cgaj6ugrs= +github.com/aws/aws-sdk-go-v2/service/bedrockagent v1.40.0/go.mod h1:WlMBqEPeaBywfaXoMAfpitHvwezq555o8waYL3cCPqo= +github.com/aws/aws-sdk-go-v2/service/bedrockagentruntime v1.41.0/go.mod h1:Kek1IWlEDT1bp8kO+soWZh37Cb13LppHUTbMiJunna0= +github.com/aws/aws-sdk-go-v2/service/bedrockruntime v1.8.1/go.mod h1:nZspkhg+9p8iApLFoyAqfyuMP0F38acy2Hm3r5r95Cg= +github.com/aws/aws-sdk-go-v2/service/bedrockruntime v1.24.3/go.mod h1:PKGlRhLmSZuA6iCbRD1oZKrTJHdm6NWwWBvHxfDNHTA= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2/go.mod h1:5CsjAbs3NlGQyZNFACh+zztPDI7fU6eW9QsxjfnuBKg= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3/go.mod h1:0yKJC/kb8sAnmlYa6Zs3QVYqaC8ug2AbnNChv5Ox3uA= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.0/go.mod h1:iu6FSzgt+M2/x3Dk8zhycdIcHjEFb36IS8HVUVFoMg0= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7/go.mod h1:YCsIZhXfRPLFFCl5xxY+1T9RKzOKjCut+28JSX2DnAk= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15/go.mod h1:SwFBy2vjtA0vZbjjaFtfN045boopadnoVPhu4Fv66vY= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15/go.mod h1:ZH34PJUc8ApjBIfgQCFvkWcUDBtl/WTD+uiYHjd8igA= +github.com/aws/aws-sdk-go-v2/service/s3 v1.78.2/go.mod h1:U5SNqwhXB3Xe6F47kXvWihPl/ilGaEDe8HD/50Z9wxc= +github.com/aws/aws-sdk-go-v2/service/sso v1.20.6/go.mod h1:qGzynb/msuZIE8I75DVRCUXw3o3ZyBmUvMwQ2t/BrGM= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.14/go.mod h1:+JJQTxB6N4niArC14YNtxcQtwEqzS3o9Z32n7q33Rfs= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.5/go.mod h1:mUYPBhaF2lGiukDEjJX2BLRRKTmoUSitGDUgM4tRxak= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13/go.mod h1:tvqlFoja8/s0o+UruA1Nrezo/df0PzdunMDDurUfg6U= +github.com/aws/aws-sdk-go-v2/service/sts v1.28.7/go.mod h1:FZf1/nKNEkHdGGJP/cI2MoIMquumuRK6ol3QQJNDxmw= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.12/go.mod h1:7Yn+p66q/jt38qMoVfNvjbm3D89mGBnkwDcijgtih8w= +github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4= +github.com/bits-and-blooms/bitset v1.22.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/bmatcuk/doublestar v1.3.4 h1:gPypJ5xD31uhX6Tf54sDPUOBXTqKH4c9aPY66CyQrS0= +github.com/bmatcuk/doublestar v1.3.4/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9MEoZQC/PmE= +github.com/bufbuild/buf v1.27.2/go.mod h1:7RImDhFDqhEsdK5wbuMhoVSlnrMggGGcd3s9WozvHtM= +github.com/bufbuild/protocompile v0.6.0/go.mod h1:YNP35qEYoYGme7QMtz5SBCoN4kL4g12jTtjuzRNdjpE= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/charmbracelet/harmonica v0.2.0/go.mod h1:KSri/1RMQOZLbw7AHqgcBycp8pgJnQMYYT8QZRqZ1Ao= +github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk= +github.com/chewxy/hm v1.0.0/go.mod h1:qg9YI4q6Fkj/whwHR1D+bOGeF7SniIP40VweVepLjg0= +github.com/chewxy/math32 v1.11.0/go.mod h1:dOB2rcuFrCn6UHrze36WSLVPKtzPMRAQvBvUwkSsLqs= +github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ= +github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= +github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8= +github.com/cilium/ebpf v0.11.0/go.mod h1:WE7CZAnqOL2RouJ4f1uyNhqr2P4CCvXFIqdRDUgWsVs= +github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY= +github.com/cockroachdb/errors v1.9.1/go.mod h1:2sxOtL2WIc096WSZqZ5h8fa17rdDq9HZOZLBCor4mBk= +github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/coder/websocket v1.8.12/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs= +github.com/cohere-ai/tokenizer v1.1.2/go.mod h1:9MNFPd9j1fuiEK3ua2HSCUxxcrfGMlSqpa93livg/C0= +github.com/cohesion-org/deepseek-go v1.3.2/go.mod h1:bOVyKj38r90UEYZFrmJOzJKPxuAh8sIzHOCnLOpiXeI= +github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0= +github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= +github.com/containerd/containerd v1.7.15/go.mod h1:ISzRRTMF8EXNpJlTzyr2XMhN+j9K302C21/+cr3kUnY= +github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o= +github.com/containerd/typeurl/v2 v2.2.0/go.mod h1:8XOOxnyatxSWuG8OfsZXVnAF4iZfedjS/8UHSPJnX4g= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/d4l3k/go-bfloat16 v0.0.0-20211005043715-690c3bdd05f1/go.mod h1:uw2gLcxEuYUlAd/EXyjc/v55nd3+47YAgWbSXVxPrNI= +github.com/deepmap/oapi-codegen/v2 v2.1.0/go.mod h1:R1wL226vc5VmCNJUvMyYr3hJMm5reyv25j952zAVXZ8= +github.com/docker/cli v24.0.6+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40= +github.com/dromara/carbon/v2 v2.6.12/go.mod h1:NGo3reeV5vhWCYWcSqbJRZm46MEwyfYI5EJRdVFoLJo= +github.com/elastic/go-sysinfo v1.15.4/go.mod h1:ZBVXmqS368dOn/jvijV/zHLfakWTYHBZPk3G244lHrU= +github.com/elastic/go-windows v1.0.2/go.mod h1:bGcDpBzXgYSqM0Gx3DM4+UxFj300SZLixie9u9ixLM8= +github.com/emirpasic/gods/v2 v2.0.0-alpha/go.mod h1:W0y4M2dtBB9U5z3YlghmpuUhiaZT2h6yoeE+C1sCp6A= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= +github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw= +github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= +github.com/gage-technologies/mistral-go v1.1.0/go.mod h1:tF++Xt7U975GcLlzhrjSQb8l/x+PrriO9QEdsgm9l28= +github.com/getsentry/sentry-go v0.12.0/go.mod h1:NSap0JBYWzHND8oMbyi0+XZhUalc1TBdRL1M71JZW2c= +github.com/getsentry/sentry-go v0.30.0/go.mod h1:WU9B9/1/sHDqeV8T+3VwwbjeR5MSXs/6aqG3mqZrezA= +github.com/getzep/zep-go v1.0.4/go.mod h1:HC1Gz7oiyrzOTvzeKC4dQKUiUy87zpIJl0ZFXXdHuss= +github.com/gin-contrib/cors v1.7.2/go.mod h1:SUJVARKgQ40dmrzgXEVxj2m7Ig1v1qIboQkPDTQ9t2E= +github.com/go-chi/chi/v5 v5.0.10 h1:rLz5avzKpjqxrYwXNfmjkrYYXOyLJd37pz53UFHC6vk= +github.com/go-chi/chi/v5 v5.0.10/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= +github.com/go-faster/city v1.0.1/go.mod h1:jKcUJId49qdW3L1qKHH/3wPeUstCVpVSXTM6vO3VcTw= +github.com/go-faster/errors v0.7.1/go.mod h1:5ySTjWFiphBs07IKuiL69nxdfd5+fzh1u7FPGZP2quo= +github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= +github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo= +github.com/go-openapi/errors v0.22.0/go.mod h1:J3DmZScxCDufmIMsdOuDHxJbdOGC0xtUynjIx092vXE= +github.com/go-openapi/inflect v0.21.0/go.mod h1:INezMuUu7SJQc2AyR3WO0DqqYUJSj8Kb4hBd7WtjlAw= +github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g= +github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs= +github.com/go-openapi/runtime v0.24.2/go.mod h1:AKurw9fNre+h3ELZfk6ILsfvPN+bvvlaU/M9q/r9hpk= +github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= +github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= +github.com/go-openapi/validate v0.21.0/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= +github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/gocolly/colly v1.2.0/go.mod h1:Hof5T3ZswNVsOHYmba1u03W65HDWgpV5HifSuueE0EA= +github.com/gofrs/uuid/v5 v5.0.0/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8= +github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-containerregistry v0.16.1/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ= +github.com/google/go-pkcs11 v0.3.0/go.mod h1:6eQoGcuNJpa7jnd5pMGdkSaQpNDYvPlXWMcjXXThLlY= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= +github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= +github.com/goph/emperror v0.17.2/go.mod h1:+ZbQ+fUNO/6FNiUo0ujtMjhgad9Xa6fQL9KhH4LNHic= +github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-plugin v1.7.0/go.mod h1:BExt6KEaIYx804z8k4gRzRLEvxKVb+kn0NMcihqOqb8= +github.com/hashicorp/hcl/v2 v2.23.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA= +github.com/hashicorp/yamux v0.1.2/go.mod h1:C+zze2n6e/7wshOZep2A70/aQU6QBRWJO/G6FT1wIns= +github.com/ianlancetaylor/demangle v0.0.0-20240312041847-bd984b5ce465/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw= +github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= +github.com/jdx/go-netrc v1.0.0/go.mod h1:Gh9eFQJnoTNIRHXl2j5bJXA1u84hQWJWgGh569zF3v8= +github.com/jonboulle/clockwork v0.5.0/go.mod h1:3mZlmanh0g2NDKO5TWZVJAfofYk64M7XN3SzBPjZF60= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/jstemmer/go-junit-report/v2 v2.1.0/go.mod h1:mgHVr7VUo5Tn8OLVr1cKnLuEy0M92wdRntM99h7RkgQ= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/kennygrant/sanitize v1.2.4/go.mod h1:LGsjYYtgxbetdg5owWB2mpgUL6e2nfw2eObZ0u0qvak= +github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs= +github.com/lyft/protoc-gen-star/v2 v2.0.4-0.20230330145011-496ad1ac90a4/go.mod h1:amey7yeodaJhXSbf/TlLvWiqQfLOSpEk//mLlc+axEk= +github.com/maruel/panicparse/v2 v2.4.0/go.mod h1:nOY2OKe8csO3F3SA5+hsxot05JLgukrF54B9x88fVp4= +github.com/mattn/go-sqlite3 v1.14.17/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/metaphorsystems/metaphor-go v0.0.0-20230816231421-43794c04824e/go.mod h1:mDz8kHE7x6Ja95drCQ2T1vLyPRc/t69Cf3wau91E3QU= +github.com/mfridman/xflag v0.1.0/go.mod h1:/483ywM5ZO5SuMVjrIGquYNE5CzLrj5Ux/LxWWnjRaE= +github.com/microcosm-cc/bluemonday v1.0.26/go.mod h1:JyzOCs9gkyQyjs+6h10UEVSe02CGwkhd72Xdqh78TWs= +github.com/microsoft/go-mssqldb v1.9.2/go.mod h1:GBbW9ASTiDC+mpgWDGKdm3FnFLTUsLYN3iFL90lQ+PA= +github.com/milvus-io/milvus-proto/go-api/v2 v2.3.5/go.mod h1:1OIl0v5PQeNxIJhCvY+K55CBUOYDZevw9g9380u1Wek= +github.com/milvus-io/milvus-proto/go-api/v2 v2.6.1-0.20250819024338-07695f709619/go.mod h1:/6UT4zZl6awVeXLeE7UGDWZvXj3IWkRsh3mqsn0DiAs= +github.com/milvus-io/milvus-sdk-go/v2 v2.3.6/go.mod h1:bYFSXVxEj6A/T8BfiR+xkofKbAVZpWiDvKr3SzYUWiA= +github.com/milvus-io/milvus-sdk-go/v2 v2.4.0/go.mod h1:8IKyxVV+kd+RADMuMpo8GXnTDq5ZxrSSWpe9nJieboQ= +github.com/milvus-io/milvus/client/v2 v2.6.0/go.mod h1:5ppFKT61Fh5Z1MkAhK7+nLnlh9C+ENBe/dpgFBH0te0= +github.com/milvus-io/milvus/pkg/v2 v2.0.0-20250319085209-5a6b4e56d59e/go.mod h1:37AWzxVs2NS4QUJrkcbeLUwi+4Av0h5mEdjLI62EANU= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= +github.com/moby/sys/mount v0.3.4/go.mod h1:KcQJMbQdJHPlq5lcYT+/CjatWM4PuxKe+XLSVS4J6Os= +github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4= +github.com/moby/sys/reexec v0.1.0/go.mod h1:EqjBg8F3X7iZe5pU6nRZnYCMUTXoxsjiIfHup5wYIN8= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nikolalohinski/gonja v1.5.3/go.mod h1:RmjwxNiXAEqcq1HeK5SSMmqFJvKOfTfXhkJv6YBtPa4= +github.com/oapi-codegen/runtime v1.1.1/go.mod h1:SK9X900oXmPWilYR5/WKPzt3Kqxn/uS/+lbpREv+eCg= +github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opensearch-project/opensearch-go v1.1.0/go.mod h1:+6/XHCuTH+fwsMJikZEWsucZ4eZMma3zNSeLrTtVGbo= +github.com/panjf2000/ants/v2 v2.11.3/go.mod h1:8u92CYMUc6gyvTIw8Ru7Mt7+/ESnJahz5EVtqfrilek= +github.com/paulmach/orb v0.11.1/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= +github.com/pdevine/tensor v0.0.0-20240510204454-f88f4562727c/go.mod h1:PSojXDXF7TbgQiD6kkd98IHOS0QqTyUEaWRiS8+BLu8= +github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pinecone-io/go-pinecone v0.4.1/go.mod h1:KwWSueZFx9zccC+thBk13+LDiOgii8cff9bliUI4tQs= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= +github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo= +github.com/redis/rueidis v1.0.34/go.mod h1:g8nPmgR4C68N3abFiOc/gUOSEKw3Tom6/teYMehg4RE= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rs/cors v1.10.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= +github.com/sahilm/fuzzy v0.1.1/go.mod h1:VFvziUEIMCrT6A6tw2RFIXPXXmzXbOsSHF0DOI8ZK9Y= +github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d/go.mod h1:uugorj2VCxiV1x+LzaIdVa9b4S4qGAcH6cbhh4qVxOU= +github.com/samber/lo v1.27.0/go.mod h1:it33p9UtPMS7z72fP4gw/EIfQB2eI8ke7GR2wc6+Rhg= +github.com/santhosh-tekuri/jsonschema/v5 v5.3.1/go.mod h1:uToXkOrWAZ6/Oc07xWQrPOhJotwFIyu2bBVN41fcDUY= +github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= +github.com/shirou/gopsutil/v3 v3.23.12/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= +github.com/shirou/gopsutil/v3 v3.24.5/go.mod h1:bsoOS1aStSs9ErQ1WWfxllSeS1K5D+U30r2NfcubMVk= +github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/smartystreets/assertions v1.1.1/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= +github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM= +github.com/smartystreets/gunit v1.4.2/go.mod h1:ZjM1ozSIMJlAz/ay4SG8PeKF00ckUp+zMHZXV9/bvak= +github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.10.0/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= +github.com/temoto/robotstxt v1.1.2/go.mod h1:+1AmkuG3IYkh1kv0d2qEB9Le88ehNO0zwOr3ujewlOo= +github.com/temporalio/tctl-kit v0.0.0-20250107205014-58462b03dfb2/go.mod h1:hk/LJCKZNNmtVSWRKepbdUJme+k/4fb/hPkekXk40sk= +github.com/testcontainers/testcontainers-go/modules/chroma v0.31.0/go.mod h1:dYvKTWVnJ58YizDYX2txYwDG4FvudYUmx37tvbza90o= +github.com/testcontainers/testcontainers-go/modules/chroma v0.37.0/go.mod h1:IWJavzQy7rxM40OqOgSN5iyckgAw21wDyE+NhSctatk= +github.com/testcontainers/testcontainers-go/modules/mariadb v0.38.0/go.mod h1:26mrWngnaRhxmgy942aVfUihLnihbIGsuIds6gGBnIE= +github.com/testcontainers/testcontainers-go/modules/milvus v0.31.0/go.mod h1:ta9EDZd+lKBMU7enljbNu5H1G495fnT0dw7hmsCPWa0= +github.com/testcontainers/testcontainers-go/modules/milvus v0.37.0/go.mod h1:bCdLqxjPKax120BMl4aO/A0gs9+4FeJkLBVf9WpjFoQ= +github.com/testcontainers/testcontainers-go/modules/mongodb v0.31.0/go.mod h1:n5KbYAdzD8xJrNVGdPvSacJtwZ4D0Q/byTMI5vR/dk8= +github.com/testcontainers/testcontainers-go/modules/mongodb v0.37.0/go.mod h1:e9/4dGJfSZW59/kXGf/ksrEvA+BqP/daax0Usp2cpsM= +github.com/testcontainers/testcontainers-go/modules/mysql v0.31.0/go.mod h1:REFmO+lSG9S6uSBEwIMZCxeI36uhScjTwChYADeO3JA= +github.com/testcontainers/testcontainers-go/modules/mysql v0.37.0/go.mod h1:vHEEHx5Kf+uq5hveaVAMrTzPY8eeRZcKcl23MRw5Tkc= +github.com/testcontainers/testcontainers-go/modules/opensearch v0.31.0/go.mod h1:l4Z7QqGpdk4wTTQk8J8CZ75pfqAz1dizm+LECOLuNVw= +github.com/testcontainers/testcontainers-go/modules/opensearch v0.37.0/go.mod h1:2jEljlB96QHSHF7Vo9S8zEDisPPrfsddzSvsCR1ihNQ= +github.com/testcontainers/testcontainers-go/modules/qdrant v0.31.0/go.mod h1:/3GyFMTSiem1j5mfI/96MufdNvB3A8Xqa+xnV4CUR4A= +github.com/testcontainers/testcontainers-go/modules/redis v0.31.0/go.mod h1:dKi5xBwy1k4u8yb3saQHu7hMEJwewHXxzbcMAuLiA6o= +github.com/testcontainers/testcontainers-go/modules/redis v0.37.0/go.mod h1:Abu9g/25Qv+FkYVx3U4Voaynou1c+7D0HIhaQJXvk6E= +github.com/testcontainers/testcontainers-go/modules/weaviate v0.31.0/go.mod h1:WNc2XhLphiLdNJdjJZvUtRj08ThLY8FL60y7FQSJTPQ= +github.com/testcontainers/testcontainers-go/modules/weaviate v0.37.0/go.mod h1:VdjCqOCJGzlGLS2p4NdLjN5rqN3/53mle+Gb+irCbOE= +github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= +github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tursodatabase/libsql-client-go v0.0.0-20240902231107-85af5b9d094d/go.mod h1:l8xTsYB90uaVdMHXMCxKKLSgw5wLYBwBKKefNIUnm9s= +github.com/uber-go/tally v3.3.15+incompatible h1:9hLSgNBP28CjIaDmAuRTq9qV+UZY+9PcvAkXO4nNMwg= +github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/urfave/cli v1.22.16/go.mod h1:EeJR6BKodywf4zciqrdw6hpCPk68JO9z5LazXZMn5Po= +github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= +github.com/urfave/cli/v2 v2.27.5/go.mod h1:3Sevf16NykTbInEnD0yKkjDAeZDS0A6bzhBH5hrMvTQ= +github.com/valyala/fasthttp v1.47.0/go.mod h1:k2zXd82h/7UZc3VOdJ2WaUqt1uZ/XpXAfE9i+HBC3lA= +github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk= +github.com/vertica/vertica-sql-go v1.3.3/go.mod h1:jnn2GFuv+O2Jcjktb7zyc4Utlbu9YVqpHH/lx63+1M4= +github.com/weaviate/weaviate v1.24.1/go.mod h1:wcg1vJgdIQL5MWBN+871DFJQa+nI2WzyXudmGjJ8cG4= +github.com/weaviate/weaviate v1.29.0/go.mod h1:UsnbM1Kmm5Om+UPU6DTo421SDeMD8SqCJqsBs/nwgcI= +github.com/weaviate/weaviate-go-client/v4 v4.13.1/go.mod h1:B2m6g77xWDskrCq1GlU6CdilS0RG2+YXEgzwXRADad0= +github.com/weaviate/weaviate-go-client/v5 v5.0.2/go.mod h1:CwZehIL4s3VfkzTu12Wy8VAUtELRtQFUt2ZniBF/lQM= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= +github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM= +github.com/xtgo/set v1.0.0/go.mod h1:d3NHzGzSa0NmB2NhFyECA+QdRp29oEn2xbT+TpeFoM8= +github.com/yargevad/filepathx v1.0.0/go.mod h1:BprfX/gpYNJHJfc35GjRRpVcwWXS89gGulUIU5tK3tA= +github.com/ydb-platform/ydb-go-genproto v0.0.0-20241112172322-ea1f63298f77/go.mod h1:Er+FePu1dNUieD+XTMDduGpQuCPssK5Q4BjF+IIXJ3I= +github.com/ydb-platform/ydb-go-sdk/v3 v3.108.1/go.mod h1:l5sSv153E18VvYcsmr51hok9Sjc16tEC8AXGbwrk+ho= +github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfSfmXjznFBSZNN13rSJjlIOI1fUNAtF7rmI= +github.com/zclconf/go-cty v1.16.2/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/zclconf/go-cty-yaml v1.1.0/go.mod h1:9YLUH4g7lOhVWqUbctnVlZ5KLpg7JAprQNgxSZ1Gyxs= +github.com/zeebo/blake3 v0.2.4/go.mod h1:7eeQ6d2iXWRGF6npfaxl2CU+xy2Fjo2gxeyZGCRUjcE= +github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= +go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I= +go.etcd.io/etcd/api/v3 v3.5.5/go.mod h1:KFtNaxGDw4Yx/BA4iPPwevUTAuqcsPxzyX8PHydchN8= +go.etcd.io/etcd/client/pkg/v3 v3.5.5/go.mod h1:ggrwbk069qxpKPq8/FKkQ3Xq9y39kbFR4LnKszpRXeQ= +go.etcd.io/etcd/client/v2 v2.305.5/go.mod h1:zQjKllfqfBVyVStbt4FaosoX2iYd8fV/GRy/PbowgP4= +go.etcd.io/etcd/client/v3 v3.5.5/go.mod h1:aApjR4WGlSumpnJ2kloS75h6aHUmAyaPLjHMxpc7E7c= +go.etcd.io/etcd/pkg/v3 v3.5.5/go.mod h1:6ksYFxttiUGzC2uxyqiyOEvhAiD0tuIqSZkX3TyPdaE= +go.etcd.io/etcd/raft/v3 v3.5.5/go.mod h1:76TA48q03g1y1VpTue92jZLr9lIHKUNcYdZOOGyx8rI= +go.etcd.io/etcd/server/v3 v3.5.5/go.mod h1:rZ95vDw/jrvsbj9XpTqPrTAB9/kzchVdhRirySPkUBc= +go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= +go.mongodb.org/mongo-driver/v2 v2.0.0/go.mod h1:nSjmNq4JUstE8IRZKTktLgMHM4F1fccL6HGX1yh+8RA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0/go.mod h1:vNUq47TGFioo+ffTSnKNdob241vePmtNZnAODKapKd0= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go4.org/unsafe/assume-no-moving-gc v0.0.0-20231121144256-b99613f794b6/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= +golang.org/x/image v0.22.0/go.mod h1:9hPFhljd4zZ1GNSIZJ49sqbp45GKK9t6w+iXvGqZUz4= +golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc= +golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY= +golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/telemetry v0.0.0-20250908211612-aef8a434d053/go.mod h1:+nZKN+XVh4LCiA9DV3ywrzN4gumyCnKjau3NGb9SGoE= +golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20250512202823-5a2f75b736a9/go.mod h1:h6yxum/C2qRb4txaZRLDHK8RyS0H/o2oEDeKY4onY/Y= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0/go.mod h1:Dk1tviKTvMCz5tvh7t+fh94dhmQVHuCt2OzJB3CTW9Y= +google.golang.org/grpc/examples v0.0.0-20230224211313-3775f633ce20/go.mod h1:Nr5H8+MlGWr5+xX/STzdoEqJrO+YteqFbMyCsrb6mH0= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gorgonia.org/vecf32 v0.9.0/go.mod h1:NCc+5D2oxddRL11hd+pCB1PEyXWOyiQxfZ/1wwhOXCA= +gorgonia.org/vecf64 v0.9.0/go.mod h1:hp7IOWCnRiVQKON73kkC/AUMtEXyf9kGlVrtPQ9ccVA= +howett.net/plist v1.0.1/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= +k8s.io/apimachinery v0.28.6/go.mod h1:QFNX/kCl/EMT2WTSz8k4WLCv2XnkOLMaL8GAVRMdpsA= +lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +modernc.org/cc/v3 v3.41.0/go.mod h1:Ni4zjJYJ04CDOhG7dn640WGfwBzfE0ecX8TyMB0Fv0Y= +modernc.org/ccgo/v3 v3.17.0/go.mod h1:Sg3fwVpmLvCUTaqEUjiBDAvshIaKDB0RXaf+zgqFu8I= +modernc.org/ccorpus2 v1.5.4/go.mod h1:Wifvo4Q/qS/h1aRoC2TffcHsnxwTikmi1AuLANuucJQ= +modernc.org/gc/v3 v3.0.0-20250121204235-2db1fde51ea4/go.mod h1:LG5UO1Ran4OO0JRKz2oNiXhR5nNrgz0PzH7UKhz0aMU= +modernc.org/lex v1.1.1/go.mod h1:6r8o8DLJkAnOsQaGi8fMoi+Vt6LTbDaCrkUK729D8xM= +modernc.org/lexer v1.0.4/go.mod h1:tOajb8S4sdfOYitzCgXDFmbVJ/LE0v1fNJ7annTw36U= +modernc.org/scannertest v1.0.2/go.mod h1:RzTm5RwglF/6shsKoEivo8N91nQIoWtcWI7ns+zPyGA= +nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= diff --git a/magefile.go b/magefile.go new file mode 100644 index 00000000..ff9ee9d9 --- /dev/null +++ b/magefile.go @@ -0,0 +1,772 @@ +//go:build mage + +package main + +import ( + "bufio" + "bytes" + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "time" + + "github.com/magefile/mage/mg" + "github.com/magefile/mage/sh" + "golang.org/x/sync/errgroup" +) + +// Default target runs when no target is specified +var Default = Dev + +const ( + binaryName = "compozy" + binaryDir = "bin" + swaggerDir = "docs" + goVersion = "1.25" + testParallelism = "4" + testParallelFlag = "-parallel=" + testParallelism + // minSDKCoverage enforces the minimum coverage percentage for sdk/compozy packages. + minSDKCoverage = 0.85 +) + +var ( + mainPackages = []string{"cli", "engine", "pkg", "test"} + // Build variables + gitCommit = getGitCommit() + version = getVersion() + buildDate = time.Now().UTC().Format("2006-01-02T15:04:05Z") +) + +// Quality contains all code quality targets +type Quality mg.Namespace + +// Docker contains all docker-related targets +type Docker mg.Namespace + +// Database contains all database-related targets +type Database mg.Namespace + +// Redis contains all redis-related targets +type Redis mg.Namespace + +// Schema contains schema generation targets +type Schema mg.Namespace + +// Integration groups integration test targets +type Integration mg.Namespace + +// Dev runs the development server with hot reload +func Dev() error { + example := os.Getenv("EXAMPLE") + if example == "" { + example = "weather" + } + return sh.RunV("gow", "run", ".", "dev", + "--cwd", "examples/"+example, + "--env-file", ".env", + "--debug", + "--watch") +} + +// Build compiles the binary with version info +func Build(ctx context.Context) error { + mg.CtxDeps(ctx, checkGoVersion, Swagger) + if err := os.MkdirAll(binaryDir, 0755); err != nil { + return err + } + ldflags := fmt.Sprintf( + "-X github.com/compozy/compozy/pkg/version.Version=%s "+ + "-X github.com/compozy/compozy/pkg/version.CommitHash=%s "+ + "-X github.com/compozy/compozy/pkg/version.BuildDate=%s", + version, gitCommit, buildDate, + ) + if err := sh.RunV("go", "build", "-ldflags", ldflags, "-o", filepath.Join(binaryDir, binaryName), "./"); err != nil { + return err + } + return sh.RunV("chmod", "+x", filepath.Join(binaryDir, binaryName)) +} + +// Clean removes build artifacts +func Clean() error { + fmt.Println("Cleaning build artifacts...") + if err := sh.Rm(binaryDir); err != nil { + fmt.Printf("Warning: failed to remove %s: %v\n", binaryDir, err) + } + if err := sh.Rm(swaggerDir); err != nil { + fmt.Printf("Warning: failed to remove %s: %v\n", swaggerDir, err) + } + return sh.RunV("go", "clean") +} + +// Test runs all test suites in parallel +func Test(ctx context.Context) error { + fmt.Println("Running all tests in parallel...") + mg.CtxDeps(ctx, testMain, testSDK, testBun) + fmt.Println("✓ All tests passed") + return nil +} + +// TestCoverage runs all tests with coverage +func TestCoverage(ctx context.Context) error { + fmt.Println("Running tests with coverage...") + mg.CtxDeps(ctx, testCoverageMain, testCoverageSDK, testBun) + fmt.Println("✓ All tests with coverage completed") + return nil +} + +// TestNoCache runs all tests without cache +func TestNoCache(ctx context.Context) error { + fmt.Println("Running tests without cache...") + mg.CtxDeps(ctx, testNoCacheMain, testNoCacheSDK, testBun) + fmt.Println("✓ All tests (no cache) completed") + return nil +} + +// All runs swagger, tests, linting, and formatting in optimal order +func All(ctx context.Context) error { + fmt.Println("Running all checks...") + if err := Swagger(ctx); err != nil { + return err + } + mg.CtxDeps(ctx, Test, Quality.Lint, Quality.Fmt) + fmt.Println("✓ All checks passed") + return nil +} + +// Setup installs all dependencies and checks Go version +func Setup(ctx context.Context) error { + mg.CtxDeps(ctx, checkGoVersion, Deps) + fmt.Println("✓ Setup complete! You can now run 'mage build' or 'mage dev'") + return nil +} + +// Deps installs all required dependencies including mage +func Deps(ctx context.Context) error { + fmt.Println("Installing Go dependencies...") + mg.CtxDeps(ctx, cleanGoCache, swaggerDeps) + deps := []struct { + name string + pkg string + }{ + {"mage", "github.com/magefile/mage@latest"}, + {"gotestsum", "gotest.tools/gotestsum@latest"}, + {"gow", "github.com/mitranim/gow@latest"}, + {"goose", "github.com/pressly/goose/v3/cmd/goose@latest"}, + } + for _, dep := range deps { + fmt.Printf("Installing %s...\n", dep.name) + if err := sh.RunV("go", "install", dep.pkg); err != nil { + return fmt.Errorf("failed to install %s: %w", dep.name, err) + } + } + fmt.Println("Installing golangci-lint v2...") + installScript := "curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/HEAD/install.sh | sh -s -- -b $(go env GOPATH)/bin v2.4.0" + if err := sh.RunV("sh", "-c", installScript); err != nil { + return fmt.Errorf("failed to install golangci-lint: %w", err) + } + fmt.Println("✓ All dependencies installed successfully") + return nil +} + +// Tidy runs go mod tidy +func Tidy() error { + fmt.Println("Tidying modules...") + return sh.RunV("go", "mod", "tidy") +} + +// Swagger generates API documentation +func Swagger(ctx context.Context) error { + needsRebuild, err := swaggerNeedsRebuild() + if err != nil { + return err + } + if !needsRebuild { + fmt.Println("✓ Swagger documentation is up-to-date") + return nil + } + fmt.Println("Generating Swagger documentation...") + if err := os.MkdirAll(swaggerDir, 0755); err != nil { + return err + } + cmd := exec.Command("swag", "init", + "--dir", "./", + "--generalInfo", "main.go", + "--output", swaggerDir, + "--parseDependency", + "--parseInternal", + ) + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("swagger generation failed: %w\n%s", err, output) + } + filtered := filterSwaggerWarnings(string(output)) + if filtered != "" { + fmt.Print(filtered) + } + fmt.Println("Running pre-commit on generated swagger files...") + preCommitFiles := []string{ + filepath.Join(swaggerDir, "docs.go"), + filepath.Join(swaggerDir, "swagger.json"), + filepath.Join(swaggerDir, "swagger.yaml"), + } + if err := sh.Run("pre-commit", append([]string{"run", "--files"}, preCommitFiles...)...); err != nil { + fmt.Println("Warning: pre-commit checks failed (continuing anyway)") + } + fmt.Printf("Swagger documentation generated at %s\n", swaggerDir) + return nil +} + +// SwaggerValidate validates Swagger documentation +func SwaggerValidate() error { + fmt.Println("Validating Swagger documentation...") + return sh.RunV("swag", "init", + "--dir", "./", + "--generalInfo", "main.go", + "--output", swaggerDir, + "--parseDependency", + "--parseInternal", + "--quiet", + ) +} + +// Lint runs all linters in parallel +func (Quality) Lint(ctx context.Context) error { + fmt.Println("Running linters in parallel...") + mg.CtxDeps(ctx, + lintMain, + lintSDK, + lintBun, + checkDriverImports, + ) + fmt.Println("✓ Linting completed successfully") + return nil +} + +// Fmt formats all code in parallel +func (Quality) Fmt(ctx context.Context) error { + fmt.Println("Formatting code in parallel...") + mg.CtxDeps(ctx, fmtMain, fmtSDK, fmtBun) + fmt.Println("✓ Formatting completed successfully") + return nil +} + +// Typecheck runs type checking on all modules +func (Quality) Typecheck(ctx context.Context) error { + fmt.Println("Type checking all modules...") + mg.CtxDeps(ctx, typecheckMain, typecheckSDK) + fmt.Println("✓ Type checking completed successfully") + return nil +} + +// Modernize modernizes code patterns in all modules +func (Quality) Modernize(ctx context.Context) error { + fmt.Println("Modernizing code patterns...") + mg.CtxDeps(ctx, modernizeMain, modernizeSDK) + fmt.Println("✓ Modernization completed successfully") + return nil +} + +// Start starts Docker services +func (Docker) Start() error { + fmt.Println("Starting Docker services...") + return sh.RunV("docker", "compose", "-f", "./cluster/docker-compose.yml", "up", "-d") +} + +// Stop stops Docker services +func (Docker) Stop() error { + fmt.Println("Stopping Docker services...") + return sh.RunV("docker", "compose", "-f", "./cluster/docker-compose.yml", "down") +} + +// Clean removes Docker volumes +func (Docker) Clean() error { + fmt.Println("Cleaning Docker volumes...") + return sh.RunV("docker", "compose", "-f", "./cluster/docker-compose.yml", "down", "--volumes") +} + +// Reset resets Docker environment +func (Docker) Reset(ctx context.Context) error { + mg.SerialCtxDeps(ctx, Docker.Clean, Docker.Start) + return nil +} + +// Status shows migration status +func (Database) Status() error { + return runGoose("status") +} + +// Up applies pending migrations +func (Database) Up() error { + return runGoose("up") +} + +// Down rolls back last migration +func (Database) Down() error { + return runGoose("down") +} + +// Create creates a new migration +func (Database) Create() error { + name := os.Getenv("name") + if name == "" { + return fmt.Errorf("migration name required: use 'name= mage database:create'") + } + return runGoose("create", name, "sql") +} + +// Validate validates migrations +func (Database) Validate() error { + return runGoose("validate") +} + +// Reset resets database +func (Database) Reset(ctx context.Context) error { + return runGoose("reset") +} + +// CLI opens Redis CLI +func (Redis) CLI() error { + password := getEnv("REDIS_PASSWORD", "redis_secret") + return sh.RunV("docker", "exec", "-it", "redis", "redis-cli", "-a", password) +} + +// Info shows Redis info +func (Redis) Info() error { + password := getEnv("REDIS_PASSWORD", "redis_secret") + return sh.RunV("docker", "exec", "redis", "redis-cli", "-a", password, "info") +} + +// Monitor monitors Redis commands +func (Redis) Monitor() error { + password := getEnv("REDIS_PASSWORD", "redis_secret") + return sh.RunV("docker", "exec", "-it", "redis", "redis-cli", "-a", password, "monitor") +} + +// Flush flushes all Redis data +func (Redis) Flush() error { + password := getEnv("REDIS_PASSWORD", "redis_secret") + return sh.RunV("docker", "exec", "redis", "redis-cli", "-a", password, "flushall") +} + +// TestConnection tests Redis connection +func (Redis) TestConnection() error { + fmt.Println("Testing Redis connection...") + password := getEnv("REDIS_PASSWORD", "redis_secret") + return sh.RunV("docker", "exec", "redis", "redis-cli", "-a", password, "ping") +} + +// SdkCompozy runs the sdk/compozy integration suite +func (Integration) SdkCompozy(ctx context.Context) error { + cmd := exec.CommandContext(ctx, "gotestsum", + "--format", "pkgname", + "--", + "-race", + "-parallel=4", + "./sdk/compozy/...", + ) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + return cmd.Run() +} + +func getIntegrationTestDirs() ([]string, error) { + entries, err := os.ReadDir("./test/integration") + if err != nil { + return nil, fmt.Errorf("failed to read integration test directory: %w", err) + } + var dirs []string + for _, entry := range entries { + if entry.IsDir() { + dirs = append(dirs, filepath.Join("./test/integration", entry.Name())) + } + } + return dirs, nil +} + +// Generate generates JSON schemas +func (Schema) Generate() error { + fmt.Println("Generating schemas...") + if err := sh.RunV("go", "run", "./pkg/schemagen", "-out=./schemas"); err != nil { + return err + } + uiPath := "../compozy-ui/backend/" + if _, err := os.Stat(uiPath); err == nil { + fmt.Println("Copying schemas to compozy-ui...") + return sh.RunV("cp", "-Rf", "./schemas", uiPath) + } + return nil +} + +// Watch watches and regenerates schemas on changes +func (Schema) Watch() error { + fmt.Println("Watching schemas for changes...") + return sh.RunV("go", "run", "./pkg/schemagen", "-out=./schemas", "-watch") +} + +// Helper functions + +func checkGoVersion() error { + fmt.Println("Checking Go version...") + out, err := sh.Output("go", "version") + if err != nil { + fmt.Println("Error: Go is not available") + fmt.Printf("Please ensure Go %s is installed via mise\n", goVersion) + return fmt.Errorf("go not found") + } + parts := strings.Fields(out) + if len(parts) < 3 { + return fmt.Errorf("unexpected go version output: %s", out) + } + installedVersion := strings.TrimPrefix(parts[2], "go") + if !isVersionCompatible(installedVersion, goVersion) { + fmt.Printf("Warning: Go version %s found, but %s is required\n", installedVersion, goVersion) + fmt.Printf("Please update Go to version %s with: mise use go@%s\n", goVersion, goVersion) + return fmt.Errorf("incompatible Go version") + } + fmt.Printf("✓ Go version %s is compatible\n", installedVersion) + return nil +} + +func cleanGoCache() error { + fmt.Println("Cleaning Go build cache for fresh setup...") + _ = sh.Run("go", "clean", "-cache", "-testcache", "-modcache") + fmt.Println("✓ Go cache cleaned") + return nil +} + +func swaggerDeps() error { + fmt.Println("Installing Swagger dependencies...") + if err := sh.RunV("go", "install", "github.com/swaggo/swag/cmd/swag@latest"); err != nil { + return err + } + fmt.Println("Swagger dependencies installation complete.") + return nil +} + +func runTestsInParallel(ctx context.Context, dirs []string, cmdArgs ...string) error { + g, ctx := errgroup.WithContext(ctx) + for _, dir := range dirs { + testDir := dir + g.Go(func() error { + if err := ctx.Err(); err != nil { + return err + } + fmt.Printf("Testing %s...\n", testDir) + pkgPath := "./" + testDir + "/..." + args := append(cmdArgs, pkgPath) + if err := sh.RunV(args[0], args[1:]...); err != nil { + return fmt.Errorf("%s: %w", testDir, err) + } + return nil + }) + } + return g.Wait() +} + +func testMain(ctx context.Context) error { + start := time.Now() + fmt.Println("Testing main module...") + err := runTestsInParallel(ctx, mainPackages, + "gotestsum", "--format", "pkgname", "--", "-race", testParallelFlag) + duration := time.Since(start) + fmt.Printf("✓ Tests completed in %s\n", duration.Round(time.Second)) + return err +} + +func testSDK(ctx context.Context) error { + fmt.Println("Testing sdk module...") + if err := sh.RunWithV(map[string]string{"GO_WORK": "off"}, "sh", "-c", + "cd sdk && gotestsum --format pkgname -- -race "+testParallelFlag+" ./..."); err != nil { + return err + } + fmt.Printf("Enforcing sdk/compozy coverage >= %.0f%%...\n", minSDKCoverage*100) + tmpFile := filepath.Join(os.TempDir(), fmt.Sprintf("sdk-cover-%d.out", time.Now().UnixNano())) + defer os.Remove(tmpFile) + if err := sh.RunWithV(map[string]string{"GO_WORK": "off"}, "sh", "-c", + fmt.Sprintf("cd sdk && go test -coverprofile=%s ./compozy/... > /dev/null", tmpFile)); err != nil { + return err + } + coverage, err := readTotalCoverage(tmpFile) + if err != nil { + return err + } + fmt.Printf("sdk/compozy coverage: %.2f%%\n", coverage*100) + if coverage < minSDKCoverage { + return fmt.Errorf("coverage %.2f%% below required %.0f%%", coverage*100, minSDKCoverage*100) + } + return nil +} + +func testBun(ctx context.Context) error { + return sh.RunV("bun", "run", "test") +} + +func testCoverageMain(ctx context.Context) error { + start := time.Now() + fmt.Println("Testing main module with coverage...") + err := runTestsInParallel(ctx, mainPackages, + "gotestsum", "--format", "pkgname", "--", "-race", testParallelFlag, + "-coverprofile=coverage.out", "-covermode=atomic") + duration := time.Since(start) + fmt.Printf("✓ Tests with coverage completed in %s\n", duration.Round(time.Second)) + return err +} + +func testCoverageSDK(ctx context.Context) error { + fmt.Println("Testing sdk module with coverage...") + return sh.RunWithV( + map[string]string{"GO_WORK": "off"}, + "sh", + "-c", + "cd sdk && gotestsum --format pkgname -- -race "+testParallelFlag+" -coverprofile=coverage-sdk.out -covermode=atomic ./...", + ) +} + +func testNoCacheMain(ctx context.Context) error { + start := time.Now() + fmt.Println("Testing main module (no cache)...") + err := runTestsInParallel(ctx, mainPackages, + "gotestsum", "--format", "pkgname", "--", "-race", "-count=1", testParallelFlag) + duration := time.Since(start) + fmt.Printf("✓ Tests (no cache) completed in %s\n", duration.Round(time.Second)) + return err +} + +func testNoCacheSDK(ctx context.Context) error { + fmt.Println("Testing sdk module (no cache)...") + return sh.RunWithV(map[string]string{"GO_WORK": "off"}, "sh", "-c", + "cd sdk && gotestsum --format pkgname -- -race -count=1 "+testParallelFlag+" ./...") +} + +//mage:expose +func lintMain(ctx context.Context) error { + fmt.Println("Linting main module...") + return sh.RunV("golangci-lint", "run", "--fix", "--allow-parallel-runners", "./...") +} + +//mage:expose +func lintSDK(ctx context.Context) error { + fmt.Println("Linting sdk module...") + return sh.RunWithV(map[string]string{"GO_WORK": "off"}, "sh", "-c", + "cd sdk && golangci-lint run --fix --allow-parallel-runners ./...") +} + +//mage:expose +func lintBun(ctx context.Context) error { + return sh.RunV("bun", "run", "lint") +} + +func checkDriverImports() error { + fmt.Println("Running static driver import guard...") + return sh.RunV("./scripts/check-driver-imports.sh") +} + +//mage:expose +func fmtMain(ctx context.Context) error { + fmt.Println("Formatting main module...") + return sh.RunV("golangci-lint", "fmt") +} + +//mage:expose +func fmtSDK(ctx context.Context) error { + fmt.Println("Formatting sdk module...") + return sh.RunWithV(map[string]string{"GO_WORK": "off"}, "sh", "-c", "cd sdk && golangci-lint fmt") +} + +//mage:expose +func fmtBun(ctx context.Context) error { + return sh.RunV("bun", "run", "format") +} + +//mage:expose +func typecheckMain(ctx context.Context) error { + fmt.Println("Type checking main module...") + return sh.RunV("go", "vet", "./...") +} + +//mage:expose +func typecheckSDK(ctx context.Context) error { + fmt.Println("Type checking sdk module...") + return sh.RunWithV(map[string]string{"GO_WORK": "off"}, "sh", "-c", "cd sdk && go vet ./...") +} + +//mage:expose +func modernizeMain(ctx context.Context) error { + fmt.Println("Modernizing main module...") + return sh.RunV( + "go", + "run", + "golang.org/x/tools/gopls/internal/analysis/modernize/cmd/modernize@latest", + "-fix", + "./...", + ) +} + +//mage:expose +func modernizeSDK(ctx context.Context) error { + fmt.Println("Modernizing sdk module...") + return sh.RunWithV(map[string]string{"GO_WORK": "off"}, "sh", "-c", + "cd sdk && go run golang.org/x/tools/gopls/internal/analysis/modernize/cmd/modernize@latest -fix ./...") +} + +func runGoose(args ...string) error { + dbUser := getEnv("DB_USER", "postgres") + dbPassword := getEnv("DB_PASSWORD", "postgres") + dbHost := getEnv("DB_HOST", "localhost") + dbPort := getEnv("DB_PORT", "5432") + dbName := getEnv("DB_NAME", "compozy") + dbString := fmt.Sprintf("postgres://%s:%s@%s:%s/%s?sslmode=disable", dbUser, dbPassword, dbHost, dbPort, dbName) + env := map[string]string{ + "GOOSE_DRIVER": "postgres", + "GOOSE_DBSTRING": dbString, + } + fullArgs := append([]string{"-dir", "./engine/infra/postgres/migrations"}, args...) + return sh.RunWithV(env, "goose", fullArgs...) +} + +func swaggerNeedsRebuild() (bool, error) { + return true, nil +} + +func filterSwaggerWarnings(output string) string { + lines := strings.Split(output, "\n") + var filtered []string + for _, line := range lines { + if strings.Contains(line, "warning: failed to evaluate const") || + strings.Contains(line, "reflect: call of reflect.Value") || + strings.Contains(line, "strconv.ParseUint: parsing") { + continue + } + if strings.TrimSpace(line) != "" { + filtered = append(filtered, line) + } + } + return strings.Join(filtered, "\n") +} + +func readTotalCoverage(path string) (float64, error) { + cmd := exec.Command("go", "tool", "cover", "-func="+path) + output, err := cmd.Output() + if err != nil { + return 0, err + } + scanner := bufio.NewScanner(bytes.NewReader(output)) + var totalLine string + for scanner.Scan() { + line := scanner.Text() + if strings.HasPrefix(line, "total:") { + totalLine = line + } + } + if err := scanner.Err(); err != nil { + return 0, err + } + if totalLine == "" { + return 0, fmt.Errorf("total coverage not found in %s", path) + } + fields := strings.Fields(totalLine) + if len(fields) == 0 { + return 0, fmt.Errorf("invalid coverage summary: %s", totalLine) + } + valueStr := strings.TrimSuffix(fields[len(fields)-1], "%") + value, err := strconv.ParseFloat(valueStr, 64) + if err != nil { + return 0, fmt.Errorf("parse coverage value: %w", err) + } + return value / 100, nil +} + +func getGitCommit() string { + out, err := sh.Output("git", "rev-parse", "--short", "HEAD") + if err != nil { + return "unknown" + } + return strings.TrimSpace(out) +} + +func getVersion() string { + out, err := sh.Output("git", "describe", "--tags", "--match=v*", "--always") + if err != nil { + return "unknown" + } + return strings.TrimSpace(out) +} + +func getEnv(key, defaultValue string) string { + if value := os.Getenv(key); value != "" { + return value + } + return defaultValue +} + +func isVersionCompatible(installed, required string) bool { + installedParts := strings.Split(installed, ".") + requiredParts := strings.Split(required, ".") + for i := 0; i < len(requiredParts) && i < len(installedParts); i++ { + installedNum, err1 := strconv.Atoi(installedParts[i]) + requiredNum, err2 := strconv.Atoi(requiredParts[i]) + if err1 != nil || err2 != nil { + continue + } + if installedNum < requiredNum { + return false + } + if installedNum > requiredNum { + return true + } + } + return true +} + +// Help displays available targets +func Help() { + fmt.Println("Compozy Mage Targets") + fmt.Println("") + fmt.Println("Setup & Build:") + fmt.Println(" mage setup - Complete setup with Go version check and dependencies") + fmt.Println(" mage deps - Install all required dependencies") + fmt.Println(" mage build - Build the compozy binary") + fmt.Println(" mage clean - Clean build artifacts") + fmt.Println("") + fmt.Println("Development:") + fmt.Println(" mage dev - Run in development mode with hot reload") + fmt.Println(" mage test - Run all tests (main + sdk + bun) in parallel") + fmt.Println(" mage testCoverage - Run all tests with coverage") + fmt.Println(" mage testNoCache - Run all tests without cache") + fmt.Println("") + fmt.Println("Code Quality (parallel execution):") + fmt.Println(" mage quality:lint - Run all linters in parallel") + fmt.Println(" mage quality:fmt - Format all code in parallel") + fmt.Println(" mage quality:typecheck - Type check all modules") + fmt.Println(" mage quality:modernize - Modernize code patterns") + fmt.Println("") + fmt.Println("Docker & Database:") + fmt.Println(" mage docker:start - Start Docker services") + fmt.Println(" mage docker:stop - Stop Docker services") + fmt.Println(" mage docker:reset - Reset Docker environment") + fmt.Println(" mage database:up - Run database migrations") + fmt.Println(" mage database:down - Rollback last migration") + fmt.Println("") + fmt.Println("Other:") + fmt.Println(" mage all - Run all checks (tests, lint, format)") + fmt.Println(" mage swagger - Generate Swagger documentation") + fmt.Println(" mage schema:generate - Generate JSON schemas") + fmt.Println(" mage -l - List all available targets") + fmt.Println("") + fmt.Println("Requirements:") + fmt.Printf(" Go %s or later (via mise)\n", goVersion) + fmt.Println(" Bun (see https://bun.sh)") + fmt.Println(" Docker & Docker Compose") + fmt.Println("") + fmt.Println("Performance:") + fmt.Println(" Tests run ~2-3x faster (parallel execution)") + fmt.Println(" Linting runs ~1.5-2x faster (parallel execution)") + fmt.Println(" Smart caching skips unnecessary rebuilds") + fmt.Println("") + fmt.Printf("Platform: %s/%s\n", runtime.GOOS, runtime.GOARCH) +} diff --git a/pkg/config/loader.go b/pkg/config/loader.go index 4eb1784f..ce3d37aa 100644 --- a/pkg/config/loader.go +++ b/pkg/config/loader.go @@ -507,7 +507,7 @@ func validateStandaloneStartTimeout(standalone *StandaloneConfig) error { func validateRedis(cfg *Config) error { // Validate component mode values via struct tags; add friendly errors for clarity. switch strings.TrimSpace(cfg.Redis.Mode) { - case "", mcpProxyModeStandalone, "distributed": + case "", mcpProxyModeStandalone, ModeDistributed: // ok default: return fmt.Errorf( diff --git a/scripts/markdown/check.go b/scripts/markdown/check.go index 229d195d..726c49ce 100644 --- a/scripts/markdown/check.go +++ b/scripts/markdown/check.go @@ -26,7 +26,7 @@ import ( tea "github.com/charmbracelet/bubbletea" "github.com/charmbracelet/huh" "github.com/charmbracelet/lipgloss" - "github.com/sethvargo/go-retry" + "github.com/looplab/fsm" "github.com/spf13/cobra" "github.com/tidwall/pretty" ) @@ -222,8 +222,8 @@ func setupFlags() { rootCmd.Flags().IntVar( &maxRetries, "max-retries", - 3, - "Maximum number of retry attempts on timeout (0 = no retry, default: 3)", + 50, + "Maximum number of retry attempts on timeout (0 = no retry, default: 50)", ) rootCmd.Flags().Float64Var( &retryBackoffMultiplier, @@ -660,7 +660,7 @@ func (c *cliArgs) validate() error { var errNoIssues = errors.New("no issues to process") func executeSolveIssues(ctx context.Context, args *cliArgs) error { - prepared, err := prepareSolveIssues(args) + prepared, err := prepareSolveIssues(ctx, args) if err != nil { if errors.Is(err, errNoIssues) { return nil @@ -706,7 +706,7 @@ func validateAndFilterEntries(entries []issueEntry, mode executionMode) ([]issue return entries, nil } -func prepareSolveIssues(args *cliArgs) (*solvePreparation, error) { +func prepareSolveIssues(ctx context.Context, args *cliArgs) (*solvePreparation, error) { prep := &solvePreparation{} var err error prep.resolvedPr, prep.issuesDir, prep.issuesDirPath, err = resolveInputs(args) @@ -725,20 +725,16 @@ func prepareSolveIssues(args *cliArgs) (*solvePreparation, error) { return nil, err } groups := groupIssues(entries) - if args.grouped { - if err := writeSummaries(prep.issuesDirPath, groups); err != nil { - return nil, err - } - prep.groupedSummarized = true - } promptRoot, err := initPromptRoot(prep.resolvedPr) if err != nil { return nil, err } - prep.jobs, err = prepareJobs( + prep.jobs, prep.groupedSummarized, err = prepareJobs( + ctx, prep.resolvedPr, groups, promptRoot, + prep.issuesDirPath, args.batchSize, args.grouped, executionMode(args.mode), @@ -767,6 +763,61 @@ type failInfo struct { err error } +type jobPhase string + +const ( + jobPhaseQueued jobPhase = "queued" + jobPhaseScheduled jobPhase = "scheduled" + jobPhaseRunning jobPhase = "running" + jobPhaseRetrying jobPhase = "retrying" + jobPhaseSucceeded jobPhase = "succeeded" + jobPhaseFailed jobPhase = "failed" + jobPhaseCanceled jobPhase = "canceled" +) + +type jobEvent string + +const ( + eventSchedule jobEvent = "schedule" + eventStart jobEvent = "start" + eventRetry jobEvent = "retry" + eventSuccess jobEvent = "success" + eventGiveUp jobEvent = "give_up" + eventCancel jobEvent = "cancel" +) + +type jobAttemptStatus string + +const ( + attemptStatusSuccess jobAttemptStatus = "success" + attemptStatusFailure jobAttemptStatus = "failure" + attemptStatusTimeout jobAttemptStatus = "timeout" + attemptStatusCanceled jobAttemptStatus = "canceled" + attemptStatusSetupFailed jobAttemptStatus = "setup_failed" +) + +type jobAttemptResult struct { + status jobAttemptStatus + exitCode int + failure *failInfo +} + +func (r jobAttemptResult) Successful() bool { + return r.status == attemptStatusSuccess +} + +func (r jobAttemptResult) NeedsRetry() bool { + return r.status == attemptStatusFailure || r.status == attemptStatusTimeout +} + +func (r jobAttemptResult) IsTimeout() bool { + return r.status == attemptStatusTimeout +} + +func (r jobAttemptResult) IsCanceled() bool { + return r.status == attemptStatusCanceled +} + func resolveInputs(args *cliArgs) (string, string, string, error) { pr := args.pr issuesDir := args.issuesDir @@ -832,32 +883,218 @@ func initPromptRoot(pr string) (string, error) { return promptRoot, nil } +type preparationState string + +const ( + prepStateCollect preparationState = "collect_entries" + prepStateGroup preparationState = "group_entries" + prepStateWriteGrouped preparationState = "write_grouped" + prepStateBatch preparationState = "batch_jobs" + prepStateFinalize preparationState = "finalize" + prepStateCompleted preparationState = "completed" + prepStateFailed preparationState = "failed" +) + +type preparationEvent string + +const ( + prepEventCollected preparationEvent = "collected" + prepEventGrouped preparationEvent = "grouped" + prepEventWriteSkipped preparationEvent = "write_skipped" + prepEventWritten preparationEvent = "write_done" + prepEventBatched preparationEvent = "batched" + prepEventFinalized preparationEvent = "finalized" + prepEventFailed preparationEvent = "failed" +) + +// promptPreparationConfig carries immutable parameters for the prompt FSM. +type promptPreparationConfig struct { + ctx context.Context + pr string + groups map[string][]issueEntry + promptRoot string + issuesDir string + batchSize int + grouped bool + mode executionMode +} + +// promptPreparationFSM orchestrates artifact preparation with explicit stages. +type promptPreparationFSM struct { + cfg promptPreparationConfig + fsm *fsm.FSM + collected []issueEntry + batches [][]issueEntry + jobs []job + groupedWritten bool + lastErr error +} + +func newPromptPreparationFSM(cfg *promptPreparationConfig) *promptPreparationFSM { + if cfg == nil { + cfg = &promptPreparationConfig{} + } + f := &promptPreparationFSM{cfg: *cfg} + if f.cfg.mode == ExecutionModePRDTasks { + f.cfg.batchSize = 1 + f.cfg.grouped = false + } + if f.cfg.batchSize <= 0 { + f.cfg.batchSize = 1 + } + f.fsm = fsm.NewFSM( + string(prepStateCollect), + fsm.Events{ + {Name: string(prepEventCollected), Src: []string{string(prepStateCollect)}, Dst: string(prepStateGroup)}, + {Name: string(prepEventGrouped), Src: []string{string(prepStateGroup)}, Dst: string(prepStateWriteGrouped)}, + { + Name: string(prepEventWriteSkipped), + Src: []string{string(prepStateWriteGrouped)}, + Dst: string(prepStateBatch), + }, + {Name: string(prepEventWritten), Src: []string{string(prepStateWriteGrouped)}, Dst: string(prepStateBatch)}, + {Name: string(prepEventBatched), Src: []string{string(prepStateBatch)}, Dst: string(prepStateFinalize)}, + { + Name: string(prepEventFinalized), + Src: []string{string(prepStateFinalize)}, + Dst: string(prepStateCompleted), + }, + { + Name: string(prepEventFailed), + Src: []string{ + string(prepStateCollect), + string(prepStateGroup), + string(prepStateWriteGrouped), + string(prepStateBatch), + string(prepStateFinalize), + }, + Dst: string(prepStateFailed), + }, + }, + fsm.Callbacks{ + "enter_" + string(prepStateFailed): f.onEnterFailed, + "enter_" + string(prepStateCompleted): f.onEnterCompleted, + }, + ) + return f +} + +func (p *promptPreparationFSM) Run() ([]job, bool, error) { + steps := []func() error{ + p.collectEntries, + p.groupEntries, + p.writeGroupedSummaries, + p.batchJobs, + p.finalize, + } + for _, step := range steps { + if err := step(); err != nil { + return nil, p.groupedWritten, err + } + if p.lastErr != nil { + return nil, p.groupedWritten, p.lastErr + } + } + return p.jobs, p.groupedWritten, nil +} + +func (p *promptPreparationFSM) collectEntries() error { + p.collected = flattenAndSortIssues(p.cfg.groups, p.cfg.mode) + return p.transition(prepEventCollected) +} + +func (p *promptPreparationFSM) groupEntries() error { + p.batches = createIssueBatches(p.collected, p.cfg.batchSize) + if len(p.batches) == 0 { + return p.fail(fmt.Errorf("no batches created for prompt preparation")) + } + return p.transition(prepEventGrouped) +} + +func (p *promptPreparationFSM) writeGroupedSummaries() error { + if !p.cfg.grouped { + return p.transition(prepEventWriteSkipped) + } + if err := writeSummaries(p.cfg.issuesDir, p.cfg.groups); err != nil { + return p.fail(fmt.Errorf("write grouped summaries: %w", err)) + } + p.groupedWritten = true + return p.transition(prepEventWritten) +} + +func (p *promptPreparationFSM) batchJobs() error { + jobs := make([]job, 0, len(p.batches)) + for idx, batchIssues := range p.batches { + jb, err := buildBatchJob(p.cfg.pr, p.cfg.promptRoot, p.cfg.grouped, idx, batchIssues, p.cfg.mode) + if err != nil { + return p.fail(err) + } + jobs = append(jobs, jb) + } + p.jobs = jobs + return p.transition(prepEventBatched) +} + +func (p *promptPreparationFSM) finalize() error { + if len(p.jobs) == 0 { + return p.fail(errors.New("no jobs finalized")) + } + return p.transition(prepEventFinalized) +} + +func (p *promptPreparationFSM) transition(evt preparationEvent) error { + if err := p.fsm.Event(p.cfg.ctx, string(evt)); err != nil { + var inTransitionErr fsm.InTransitionError + var noTransitionErr fsm.NoTransitionError + if errors.As(err, &inTransitionErr) || errors.As(err, &noTransitionErr) { + return nil + } + return fmt.Errorf("prompt preparation transition %s failed: %w", evt, err) + } + return nil +} + +func (p *promptPreparationFSM) fail(err error) error { + p.lastErr = err + if transErr := p.transition(prepEventFailed); transErr != nil { + return fmt.Errorf("propagate failure: %w", transErr) + } + return err +} + +func (p *promptPreparationFSM) onEnterFailed(_ context.Context, _ *fsm.Event) { + if p.lastErr == nil { + p.lastErr = errors.New("prompt preparation failed") + } +} + +func (p *promptPreparationFSM) onEnterCompleted(_ context.Context, _ *fsm.Event) {} + func prepareJobs( + ctx context.Context, pr string, groups map[string][]issueEntry, promptRoot string, + issuesDir string, batchSize int, grouped bool, mode executionMode, -) ([]job, error) { - if mode == ExecutionModePRDTasks { - batchSize = 1 - grouped = false - } - allIssues := flattenAndSortIssues(groups, mode) - if batchSize <= 0 { - batchSize = 1 - } - issueBatches := createIssueBatches(allIssues, batchSize) - jobs := make([]job, 0, len(issueBatches)) - for batchIdx, batchIssues := range issueBatches { - jb, err := buildBatchJob(pr, promptRoot, grouped, batchIdx, batchIssues, mode) - if err != nil { - return nil, err - } - jobs = append(jobs, jb) +) ([]job, bool, error) { + pipeline := newPromptPreparationFSM(&promptPreparationConfig{ + ctx: ctx, + pr: pr, + groups: groups, + promptRoot: promptRoot, + issuesDir: issuesDir, + batchSize: batchSize, + grouped: grouped, + mode: mode, + }) + jobs, groupedWritten, err := pipeline.Run() + if err != nil { + return nil, groupedWritten, err } - return jobs, nil + return jobs, groupedWritten, nil } // buildBatchJob converts a batch of issues into an executable job definition. @@ -977,6 +1214,7 @@ func executeJobsWithGracefulShutdown(ctx context.Context, jobs []job, args *cliA return 0, []failInfo{{err: err}}, total, nil } defer execCtx.cleanup() + execCtx.lifecycle = newExecutorLifecycle(ctx, execCtx) _, cancelJobs := execCtx.launchWorkers(ctx) defer cancelJobs() done := execCtx.waitChannel() @@ -998,6 +1236,466 @@ type jobExecutionContext struct { failuresMu sync.Mutex completed int32 wg sync.WaitGroup + lifecycle *executorLifecycle +} + +type executorState string + +const ( + executorStateInitializing executorState = "initializing" + executorStateRunning executorState = "running" + executorStateDraining executorState = "draining" + executorStateShutdown executorState = "shutdown" + executorStateTerminated executorState = "terminated" +) + +type executorEvent string + +const ( + executorEventJobsReady executorEvent = "jobs_ready" + executorEventRunComplete executorEvent = "run_complete" + executorEventCancelSignal executorEvent = "cancel_signal" + executorEventDrainComplete executorEvent = "drain_complete" + executorEventTimeoutExpired executorEvent = "timeout_expired" + executorEventShutdownDone executorEvent = "shutdown_done" +) + +// executorLifecycle coordinates executor state transitions via an FSM. +type executorLifecycle struct { + ctx context.Context + execCtx *jobExecutionContext + cancelJobs context.CancelFunc + done <-chan struct{} + fsm *fsm.FSM +} + +func newExecutorLifecycle(ctx context.Context, execCtx *jobExecutionContext) *executorLifecycle { + lc := &executorLifecycle{ctx: ctx, execCtx: execCtx} + lc.fsm = fsm.NewFSM( + string(executorStateInitializing), + fsm.Events{ + { + Name: string(executorEventJobsReady), + Src: []string{string(executorStateInitializing)}, + Dst: string(executorStateRunning), + }, + { + Name: string(executorEventRunComplete), + Src: []string{string(executorStateRunning)}, + Dst: string(executorStateShutdown), + }, + { + Name: string(executorEventCancelSignal), + Src: []string{string(executorStateRunning)}, + Dst: string(executorStateDraining), + }, + { + Name: string(executorEventDrainComplete), + Src: []string{string(executorStateDraining)}, + Dst: string(executorStateShutdown), + }, + { + Name: string(executorEventTimeoutExpired), + Src: []string{string(executorStateDraining)}, + Dst: string(executorStateTerminated), + }, + { + Name: string(executorEventShutdownDone), + Src: []string{string(executorStateShutdown)}, + Dst: string(executorStateTerminated), + }, + }, + fsm.Callbacks{ + "enter_" + string(executorStateShutdown): lc.onEnterShutdown, + }, + ) + return lc +} + +func (e *executorLifecycle) markJobsReady(cancel context.CancelFunc, done <-chan struct{}) error { + e.cancelJobs = cancel + e.done = done + return e.transition(executorEventJobsReady) +} + +func (e *executorLifecycle) awaitCompletion() (int32, []failInfo, int, error) { + if e.done == nil { + return e.resultWithError(fmt.Errorf("executor lifecycle not initialized")) + } + select { + case <-e.done: + if err := e.transition(executorEventRunComplete); err != nil { + return e.resultWithError(err) + } + if err := e.transition(executorEventShutdownDone); err != nil { + return e.resultWithError(err) + } + return e.resultWithError(nil) + case <-e.ctx.Done(): + fmt.Fprintf( + os.Stderr, + "\nReceived shutdown signal while executor in %s state; requesting drain...\n", + e.fsm.Current(), + ) + if err := e.transition(executorEventCancelSignal); err != nil { + return e.resultWithError(err) + } + if e.cancelJobs != nil { + e.cancelJobs() + } + return e.awaitShutdownAfterCancel() + } +} + +func (e *executorLifecycle) awaitShutdownAfterCancel() (int32, []failInfo, int, error) { + shutdownCtx, shutdownCancel := context.WithTimeout(context.WithoutCancel(e.ctx), gracefulShutdownTimeout) + defer shutdownCancel() + select { + case <-e.done: + fmt.Fprintf(os.Stderr, "All jobs completed gracefully within %v while draining\n", gracefulShutdownTimeout) + if err := e.transition(executorEventDrainComplete); err != nil { + return e.resultWithError(err) + } + if err := e.transition(executorEventShutdownDone); err != nil { + return e.resultWithError(err) + } + return e.resultWithError(nil) + case <-shutdownCtx.Done(): + fmt.Fprintf(os.Stderr, "Shutdown timeout exceeded (%v), forcing exit\n", gracefulShutdownTimeout) + if err := e.transition(executorEventTimeoutExpired); err != nil { + return e.resultWithError(err) + } + return e.resultWithError(fmt.Errorf("shutdown timeout exceeded")) + } +} + +func (e *executorLifecycle) transition(evt executorEvent) error { + if err := e.fsm.Event(e.ctx, string(evt)); err != nil { + var inTransitionErr fsm.InTransitionError + var noTransitionErr fsm.NoTransitionError + if errors.As(err, &inTransitionErr) || errors.As(err, &noTransitionErr) { + return nil + } + return fmt.Errorf("executor transition %s failed: %w", evt, err) + } + return nil +} + +func (e *executorLifecycle) resultWithError(err error) (int32, []failInfo, int, error) { + failed := atomic.LoadInt32(&e.execCtx.failed) + return failed, e.execCtx.failures, e.execCtx.total, err +} + +func (e *executorLifecycle) onEnterShutdown(_ context.Context, _ *fsm.Event) { + e.execCtx.reportAggregateUsage() +} + +type jobLifecycle struct { + ctx context.Context + index int + job *job + execCtx *jobExecutionContext + fsm *fsm.FSM + attempt int + currentTimeout time.Duration + lastExitCode int + lastFailure *failInfo +} + +func newJobLifecycle(ctx context.Context, index int, jb *job, execCtx *jobExecutionContext) *jobLifecycle { + l := &jobLifecycle{ + ctx: ctx, + index: index, + job: jb, + execCtx: execCtx, + } + l.fsm = fsm.NewFSM( + string(jobPhaseQueued), + fsm.Events{ + {Name: string(eventSchedule), Src: []string{string(jobPhaseQueued)}, Dst: string(jobPhaseScheduled)}, + { + Name: string(eventStart), + Src: []string{ + string(jobPhaseScheduled), + string(jobPhaseRetrying), + }, + Dst: string(jobPhaseRunning), + }, + {Name: string(eventRetry), Src: []string{string(jobPhaseRunning)}, Dst: string(jobPhaseRetrying)}, + {Name: string(eventSuccess), Src: []string{string(jobPhaseRunning)}, Dst: string(jobPhaseSucceeded)}, + { + Name: string(eventGiveUp), + Src: []string{ + string(jobPhaseRunning), + string(jobPhaseRetrying), + }, + Dst: string(jobPhaseFailed), + }, + { + Name: string(eventCancel), + Src: []string{ + string(jobPhaseQueued), + string(jobPhaseScheduled), + string(jobPhaseRunning), + string(jobPhaseRetrying), + }, + Dst: string(jobPhaseCanceled), + }, + }, + fsm.Callbacks{ + "enter_" + string(jobPhaseRunning): l.onEnterRunning, + "enter_" + string(jobPhaseRetrying): l.onEnterRetrying, + "enter_" + string(jobPhaseSucceeded): l.onEnterSucceeded, + "enter_" + string(jobPhaseFailed): l.onEnterFailed, + "enter_" + string(jobPhaseCanceled): l.onEnterCanceled, + }, + ) + return l +} + +func (l *jobLifecycle) schedule() { + l.transition(eventSchedule) +} + +func (l *jobLifecycle) startAttempt(attempt int, timeout time.Duration) { + l.attempt = attempt + l.currentTimeout = timeout + l.transition(eventStart) +} + +func (l *jobLifecycle) markRetry(failure failInfo) { + l.lastFailure = &failure + l.lastExitCode = failure.exitCode + l.transition(eventRetry) +} + +func (l *jobLifecycle) markGiveUp(failure failInfo) { + l.lastFailure = &failure + l.lastExitCode = failure.exitCode + l.transition(eventGiveUp) +} + +func (l *jobLifecycle) markSuccess() { + l.lastFailure = nil + l.lastExitCode = 0 + l.transition(eventSuccess) +} + +func (l *jobLifecycle) markCanceled(exitCode int) { + l.lastExitCode = exitCode + if exitCode == exitCodeCanceled { + l.lastFailure = &failInfo{ + codeFile: strings.Join(l.job.codeFiles, ", "), + exitCode: exitCodeCanceled, + outLog: l.job.outLog, + errLog: l.job.errLog, + err: fmt.Errorf("job canceled by shutdown"), + } + } else { + l.lastFailure = nil + } + l.transition(eventCancel) +} + +func (l *jobLifecycle) transition(evt jobEvent) { + if err := l.fsm.Event(l.ctx, string(evt)); err != nil { + var inTransitionErr fsm.InTransitionError + var noTransitionErr fsm.NoTransitionError + if errors.As(err, &inTransitionErr) || errors.As(err, &noTransitionErr) { + return + } + fmt.Fprintf(os.Stderr, "job %d transition %s failed: %v\n", l.index+1, evt, err) + } +} + +func (l *jobLifecycle) onEnterRunning(_ context.Context, _ *fsm.Event) { + useUI := l.execCtx.uiCh != nil + if l.attempt == 1 { + notifyJobStart( + useUI, + l.execCtx.uiCh, + l.index, + l.job, + l.execCtx.args.ide, + l.execCtx.args.model, + l.execCtx.args.reasoningEffort, + ) + return + } + if useUI { + l.execCtx.uiCh <- jobStartedMsg{Index: l.index} + } +} + +func (l *jobLifecycle) onEnterRetrying(_ context.Context, _ *fsm.Event) { +} + +func (l *jobLifecycle) onEnterSucceeded(_ context.Context, _ *fsm.Event) { + if l.execCtx.uiCh != nil { + l.execCtx.uiCh <- jobFinishedMsg{Index: l.index, Success: true, ExitCode: 0} + } +} + +func (l *jobLifecycle) onEnterFailed(_ context.Context, _ *fsm.Event) { + if l.lastFailure != nil { + recordFailure(&l.execCtx.failuresMu, &l.execCtx.failures, *l.lastFailure) + } + atomic.AddInt32(&l.execCtx.failed, 1) + if l.execCtx.uiCh != nil { + l.execCtx.uiCh <- jobFinishedMsg{Index: l.index, Success: false, ExitCode: l.lastExitCode} + if l.lastFailure != nil { + l.execCtx.uiCh <- jobFailureMsg{Failure: *l.lastFailure} + } + } else if l.lastFailure != nil { + fmt.Fprintf( + os.Stderr, + "\n❌ Job %d (%s) failed with exit code %d: %v\n", + l.index+1, + strings.Join(l.job.codeFiles, ", "), + l.lastExitCode, + l.lastFailure.err, + ) + } +} + +func (l *jobLifecycle) onEnterCanceled(_ context.Context, _ *fsm.Event) { + if l.lastFailure != nil { + recordFailure(&l.execCtx.failuresMu, &l.execCtx.failures, *l.lastFailure) + } + atomic.AddInt32(&l.execCtx.failed, 1) + if l.execCtx.uiCh != nil { + l.execCtx.uiCh <- jobFinishedMsg{Index: l.index, Success: false, ExitCode: exitCodeCanceled} + if l.lastFailure != nil { + l.execCtx.uiCh <- jobFailureMsg{Failure: *l.lastFailure} + } + } else if l.lastFailure != nil { + fmt.Fprintf( + os.Stderr, + "\n⚠️ Job %d (%s) canceled: %v\n", + l.index+1, + strings.Join(l.job.codeFiles, ", "), + l.lastFailure.err, + ) + } +} + +type jobRunner struct { + index int + job *job + execCtx *jobExecutionContext + lifecycle *jobLifecycle +} + +func newJobRunner(ctx context.Context, index int, jb *job, execCtx *jobExecutionContext) *jobRunner { + return &jobRunner{ + index: index, + job: jb, + execCtx: execCtx, + lifecycle: newJobLifecycle(ctx, index, jb, execCtx), + } +} + +func (r *jobRunner) run(ctx context.Context) { + r.lifecycle.schedule() + if r.execCtx.args.dryRun { + r.lifecycle.markSuccess() + return + } + attempts := maxInt(1, r.execCtx.args.maxRetries+1) + timeout := r.execCtx.args.timeout + for attempt := 1; attempt <= attempts; attempt++ { + if ctx.Err() != nil { + r.lifecycle.markCanceled(exitCodeCanceled) + return + } + r.lifecycle.startAttempt(attempt, timeout) + result := r.executeAttempt(ctx, timeout) + nextTimeout, continueLoop := r.handleResult(attempt, attempts, timeout, result) + if !continueLoop { + return + } + timeout = nextTimeout + } +} + +func (r *jobRunner) handleResult( + attempt int, + attempts int, + timeout time.Duration, + result jobAttemptResult, +) (time.Duration, bool) { + if result.Successful() { + r.lifecycle.markSuccess() + return timeout, false + } + if result.IsCanceled() { + r.lifecycle.markCanceled(result.exitCode) + return timeout, false + } + if !result.NeedsRetry() || attempt == attempts { + r.lifecycle.markGiveUp(r.ensureFailure(result, "job failed")) + return timeout, false + } + nextTimeout := r.nextTimeout(timeout) + r.lifecycle.markRetry(r.ensureFailure(result, "retrying job")) + r.logRetry(attempt, attempts-1, nextTimeout) + return nextTimeout, true +} + +func (r *jobRunner) ensureFailure(result jobAttemptResult, fallback string) failInfo { + if result.failure != nil { + return *result.failure + } + return failInfo{ + codeFile: strings.Join(r.job.codeFiles, ", "), + exitCode: result.exitCode, + outLog: r.job.outLog, + errLog: r.job.errLog, + err: fmt.Errorf("%s", fallback), + } +} + +func (r *jobRunner) executeAttempt(ctx context.Context, timeout time.Duration) jobAttemptResult { + return executeJobWithTimeout( + ctx, + r.execCtx.args, + r.job, + r.execCtx.cwd, + r.execCtx.uiCh != nil, + r.execCtx.uiCh, + r.index, + timeout, + &r.execCtx.aggregateUsage, + &r.execCtx.aggregateMu, + ) +} + +func (r *jobRunner) nextTimeout(current time.Duration) time.Duration { + if current <= 0 { + return current + } + next := time.Duration(float64(current) * r.execCtx.args.retryBackoffMultiplier) + const maxTimeout = 30 * time.Minute + if next > maxTimeout { + return maxTimeout + } + return next +} + +func (r *jobRunner) logRetry(attempt int, maxRetries int, timeout time.Duration) { + if r.execCtx.uiCh != nil { + return + } + fmt.Fprintf( + os.Stderr, + "\n🔄 [%s] Job %d (%s) retry attempt %d/%d with timeout %v\n", + time.Now().Format("15:04:05"), + r.index+1, + strings.Join(r.job.codeFiles, ", "), + attempt, + maxRetries, + timeout, + ) } func newJobExecutionContext(ctx context.Context, jobs []job, args *cliArgs) (*jobExecutionContext, error) { @@ -1041,19 +1739,7 @@ func (j *jobExecutionContext) executeJob(jobCtx context.Context, index int, jb * j.wg.Done() atomic.AddInt32(&j.completed, 1) }() - runOneJob( - jobCtx, - j.args, - index, - jb, - j.cwd, - j.uiCh, - &j.failed, - &j.failuresMu, - &j.failures, - &j.aggregateUsage, - &j.aggregateMu, - ) + newJobRunner(jobCtx, index, jb, j).run(jobCtx) } func (j *jobExecutionContext) waitChannel() <-chan struct{} { @@ -1070,15 +1756,14 @@ func (j *jobExecutionContext) awaitCompletion( done <-chan struct{}, cancelJobs context.CancelFunc, ) (int32, []failInfo, int, error) { - select { - case <-done: - j.reportAggregateUsage() - return j.failed, j.failures, j.total, nil - case <-ctx.Done(): - fmt.Fprintf(os.Stderr, "\nReceived shutdown signal, canceling remaining jobs...\n") + if j.lifecycle == nil { + j.lifecycle = newExecutorLifecycle(ctx, j) + } + if err := j.lifecycle.markJobsReady(cancelJobs, done); err != nil { cancelJobs() - return j.awaitShutdownAfterCancel(done) + return j.lifecycle.resultWithError(err) } + return j.lifecycle.awaitCompletion() } func (j *jobExecutionContext) reportAggregateUsage() { @@ -1090,21 +1775,6 @@ func (j *jobExecutionContext) reportAggregateUsage() { printAggregateTokenUsage(&j.aggregateUsage) } -func (j *jobExecutionContext) awaitShutdownAfterCancel(done <-chan struct{}) (int32, []failInfo, int, error) { - shutdownTimeout := gracefulShutdownTimeout - shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), shutdownTimeout) - defer shutdownCancel() - select { - case <-done: - fmt.Fprintf(os.Stderr, "All jobs completed gracefully within %v\n", shutdownTimeout) - j.reportAggregateUsage() - return j.failed, j.failures, j.total, nil - case <-shutdownCtx.Done(): - fmt.Fprintf(os.Stderr, "Shutdown timeout exceeded (%v), forcing exit\n", shutdownTimeout) - return j.failed, j.failures, j.total, fmt.Errorf("shutdown timeout exceeded") - } -} - func setupUI( ctx context.Context, jobs []job, @@ -1116,7 +1786,7 @@ func setupUI( } total := len(jobs) uiCh := make(chan uiMsg, total*4) - mdl := newUIModel(total) + mdl := newUIModel(ctx, total) mdl.setEventSource(uiCh) prog := tea.NewProgram(mdl, tea.WithAltScreen()) go func() { @@ -1151,40 +1821,7 @@ func setupUI( return uiCh, prog } -func runOneJob( - ctx context.Context, - args *cliArgs, - index int, - j *job, - cwd string, - uiCh chan uiMsg, - failed *int32, - failuresMu *sync.Mutex, - failures *[]failInfo, - aggregateUsage *TokenUsage, - aggregateMu *sync.Mutex, -) { - useUI := uiCh != nil - if ctx.Err() != nil { - if useUI { - uiCh <- jobFinishedMsg{Index: index, Success: false, ExitCode: exitCodeCanceled} - } - return - } - notifyJobStart(useUI, uiCh, index, j, args.ide, args.model, args.reasoningEffort) - if args.dryRun { - if useUI { - uiCh <- jobFinishedMsg{Index: index, Success: true, ExitCode: 0} - } - return - } - executeJobWithRetry( - ctx, args, j, cwd, useUI, uiCh, index, - failed, failuresMu, failures, aggregateUsage, aggregateMu, - ) -} - -func executeJobWithRetry( +func executeJobWithTimeout( ctx context.Context, args *cliArgs, j *job, @@ -1192,145 +1829,27 @@ func executeJobWithRetry( useUI bool, uiCh chan uiMsg, index int, - failed *int32, - failuresMu *sync.Mutex, - failures *[]failInfo, - aggregateUsage *TokenUsage, - aggregateMu *sync.Mutex, -) { - currentTimeout := args.timeout - attempt := 0 - maxRetries := uint64(0) - if args.maxRetries > 0 { - // #nosec G115 - maxRetries is validated to be non-negative and reasonable - maxRetries = uint64(args.maxRetries) - } - backoff := retry.WithMaxRetries(maxRetries, retry.NewConstant(1*time.Millisecond)) - err := retry.Do(ctx, backoff, func(retryCtx context.Context) error { - attempt++ - currentTimeout = calculateRetryTimeout( - currentTimeout, - attempt, - args.retryBackoffMultiplier, - args.maxRetries, - index, - j, - useUI, - ) - return executeJobAttempt( - retryCtx, args, j, cwd, useUI, uiCh, index, currentTimeout, - failed, failuresMu, failures, aggregateUsage, aggregateMu, attempt, - ) - }) - logRetryCompletion(err, attempt, index, j, useUI) -} - -func calculateRetryTimeout( - currentTimeout time.Duration, - attempt int, - multiplier float64, - maxRetries int, - index int, - j *job, - useUI bool, -) time.Duration { - if attempt > 1 { - currentTimeout = time.Duration(float64(currentTimeout) * multiplier) - if !useUI { - fmt.Fprintf( - os.Stderr, - "\n🔄 Retry attempt %d/%d for job %d (%s) with timeout %v\n", - attempt-1, - maxRetries, - index+1, - strings.Join(j.codeFiles, ", "), - currentTimeout, - ) - } - } - return currentTimeout -} - -func executeJobAttempt( - ctx context.Context, - args *cliArgs, - j *job, - cwd string, - useUI bool, - uiCh chan uiMsg, - index int, - currentTimeout time.Duration, - failed *int32, - failuresMu *sync.Mutex, - failures *[]failInfo, - aggregateUsage *TokenUsage, - aggregateMu *sync.Mutex, - attempt int, -) error { - argsWithTimeout := *args - argsWithTimeout.timeout = currentTimeout - if useUI && attempt > 1 { - uiCh <- jobStartedMsg{Index: index} - } - success, exitCode := executeJobWithTimeoutAndResult( - ctx, &argsWithTimeout, j, cwd, useUI, uiCh, - index, failed, failuresMu, failures, aggregateUsage, aggregateMu, - ) - if !success && exitCode == exitCodeTimeout { - return retry.RetryableError(fmt.Errorf("timeout")) - } - return nil -} - -func logRetryCompletion(err error, attempt int, index int, j *job, useUI bool) { - if err != nil && attempt > 1 && !useUI { - fmt.Fprintf( - os.Stderr, - "\n❌ Job %d (%s) failed after %d retry attempts\n", - index+1, - strings.Join(j.codeFiles, ", "), - attempt-1, - ) - } -} - -func executeJobWithTimeoutAndResult( - ctx context.Context, - args *cliArgs, - j *job, - cwd string, - useUI bool, - uiCh chan uiMsg, - index int, - failed *int32, - failuresMu *sync.Mutex, - failures *[]failInfo, + timeout time.Duration, aggregateUsage *TokenUsage, aggregateMu *sync.Mutex, -) (bool, int) { +) jobAttemptResult { cmd, outF, errF, monitor, err := setupCommandExecution( ctx, args, j, cwd, useUI, uiCh, index, aggregateUsage, aggregateMu, ) if err != nil { - failure := recordFailureWithContext(failuresMu, j, failures, err, -1) - atomic.AddInt32(failed, 1) - if useUI && uiCh != nil { - uiCh <- jobFinishedMsg{Index: index, Success: false, ExitCode: -1} - uiCh <- jobFailureMsg{Failure: failure} - } else { - fmt.Fprintf( - os.Stderr, - "\n❌ Failed to prepare job %d (%s): %v\n", - index+1, - strings.Join(j.codeFiles, ", "), - err, - ) - } - return false, -1 + fail := recordFailureWithContext(nil, j, nil, err, -1) + return jobAttemptResult{status: attemptStatusSetupFailed, exitCode: -1, failure: &fail} } - return executeCommandAndHandleResultWithStatus( - ctx, args.timeout, monitor, cmd, outF, errF, j, - index, useUI, uiCh, failed, failuresMu, failures, + return executeCommandAndResolve( + ctx, + timeout, + monitor, + cmd, + outF, + errF, + j, + index, + useUI, ) } @@ -1693,17 +2212,8 @@ func createLogFile(path, _ string) (*os.File, error) { return file, nil } -func handleNilCommand( - useUI bool, - uiCh chan uiMsg, - j *job, - index int, - failed *int32, - failuresMu *sync.Mutex, - failures *[]failInfo, -) (bool, int) { +func handleNilCommand(j *job, index int) jobAttemptResult { codeFileLabel := strings.Join(j.codeFiles, ", ") - atomic.AddInt32(failed, 1) failure := failInfo{ codeFile: codeFileLabel, exitCode: -1, @@ -1711,15 +2221,17 @@ func handleNilCommand( errLog: j.errLog, err: fmt.Errorf("failed to set up command (see logs)"), } - recordFailure(failuresMu, failures, failure) - if useUI { - uiCh <- jobFinishedMsg{Index: index, Success: false, ExitCode: -1} - uiCh <- jobFailureMsg{Failure: failure} - } - return false, -1 + fmt.Fprintf( + os.Stderr, + "\n❌ Failed to set up job %d (%s): %v\n", + index+1, + codeFileLabel, + failure.err, + ) + return jobAttemptResult{status: attemptStatusSetupFailed, exitCode: -1, failure: &failure} } -func executeCommandAndHandleResultWithStatus( +func executeCommandAndResolve( ctx context.Context, timeout time.Duration, monitor *activityMonitor, @@ -1729,13 +2241,9 @@ func executeCommandAndHandleResultWithStatus( j *job, index int, useUI bool, - uiCh chan uiMsg, - failed *int32, - failuresMu *sync.Mutex, - failures *[]failInfo, -) (bool, int) { +) jobAttemptResult { if cmd == nil { - return handleNilCommand(useUI, uiCh, j, index, failed, failuresMu, failures) + return handleNilCommand(j, index) } defer func() { if outF != nil { @@ -1746,91 +2254,84 @@ func executeCommandAndHandleResultWithStatus( } }() cmdDone := make(chan error, 1) + cmdDoneSignal := make(chan struct{}) go func() { cmdDone <- cmd.Run() + close(cmdDoneSignal) }() - activityTimeout := startActivityWatchdog(ctx, monitor, timeout, cmdDone) - type result struct { - success bool - exitCode int - } - resultCh := make(chan result, 1) + activityTimeout := startActivityWatchdog(ctx, monitor, timeout, cmdDoneSignal) select { case err := <-cmdDone: - success, exitCode := handleCommandCompletionWithResult( - err, j, index, useUI, uiCh, failed, failuresMu, failures, - ) - resultCh <- result{success, exitCode} + return handleCommandCompletion(err, j, index, useUI) case <-activityTimeout: - handleActivityTimeout(ctx, cmd, cmdDone, j, index, useUI, uiCh, failed, failuresMu, failures, timeout) - resultCh <- result{false, exitCodeTimeout} + return handleActivityTimeout(ctx, cmd, cmdDone, j, index, useUI, timeout) case <-ctx.Done(): - handleCommandCancellation(ctx, cmd, cmdDone, j, index, useUI, uiCh, failed, failuresMu, failures) - resultCh <- result{false, exitCodeCanceled} + return handleCommandCancellation(ctx, cmd, cmdDone, j, index, useUI) } - res := <-resultCh - return res.success, res.exitCode } func startActivityWatchdog( ctx context.Context, monitor *activityMonitor, timeout time.Duration, - cmdDone <-chan error, + cmdDone <-chan struct{}, ) <-chan struct{} { - activityTimeout := make(chan struct{}) - if monitor != nil && timeout > 0 { - go func() { - ticker := time.NewTicker(activityCheckInterval) - defer ticker.Stop() - for { - select { - case <-ticker.C: - if monitor.timeSinceLastActivity() > timeout { - close(activityTimeout) - return + if monitor == nil || timeout <= 0 { + return nil + } + activityTimeout := make(chan struct{}, 1) + go func() { + ticker := time.NewTicker(activityCheckInterval) + defer ticker.Stop() + for { + select { + case <-ticker.C: + if monitor.timeSinceLastActivity() > timeout { + select { + case activityTimeout <- struct{}{}: + default: } - case <-cmdDone: - return - case <-ctx.Done(): return } + case <-cmdDone: + return + case <-ctx.Done(): + return } - }() - } + } + }() return activityTimeout } -func handleCommandCompletionWithResult( +func handleCommandCompletion( err error, j *job, index int, useUI bool, - uiCh chan uiMsg, - failed *int32, - failuresMu *sync.Mutex, - failures *[]failInfo, -) (bool, int) { +) jobAttemptResult { if err != nil { ec := exitCodeOf(err) - atomic.AddInt32(failed, 1) codeFileLabel := strings.Join(j.codeFiles, ", ") - failInfo := failInfo{codeFile: codeFileLabel, exitCode: ec, outLog: j.outLog, errLog: j.errLog, err: err} - recordFailure( - failuresMu, - failures, - failInfo, - ) - if useUI { - uiCh <- jobFinishedMsg{Index: index, Success: false, ExitCode: ec} - uiCh <- jobFailureMsg{Failure: failInfo} + failInfo := failInfo{ + codeFile: codeFileLabel, + exitCode: ec, + outLog: j.outLog, + errLog: j.errLog, + err: err, } - return false, ec - } - if useUI { - uiCh <- jobFinishedMsg{Index: index, Success: true, ExitCode: 0} + if !useUI { + fmt.Fprintf( + os.Stderr, + "\n❌ Job %d (%s) failed with exit code %d: %v\n", + index+1, + codeFileLabel, + ec, + err, + ) + } + return jobAttemptResult{status: attemptStatusFailure, exitCode: ec, failure: &failInfo} } - return true, 0 + return jobAttemptResult{status: attemptStatusSuccess, exitCode: 0} } func handleCommandCancellation( @@ -1840,17 +2341,15 @@ func handleCommandCancellation( j *job, index int, useUI bool, - uiCh chan uiMsg, - _ *int32, - _ *sync.Mutex, - _ *[]failInfo, -) { - fmt.Fprintf( - os.Stderr, - "\nCanceling job %d (%s) due to shutdown signal\n", - index+1, - strings.Join(j.codeFiles, ", "), - ) +) jobAttemptResult { + if !useUI { + fmt.Fprintf( + os.Stderr, + "\nCanceling job %d (%s) due to shutdown signal\n", + index+1, + strings.Join(j.codeFiles, ", "), + ) + } if cmd.Process != nil { // NOTE: Attempt graceful termination before force killing spawned commands. if err := cmd.Process.Signal(syscall.SIGTERM); err != nil { @@ -1859,18 +2358,28 @@ func handleCommandCancellation( select { case <-cmdDone: - fmt.Fprintf(os.Stderr, "Job %d terminated gracefully\n", index+1) + if !useUI { + fmt.Fprintf(os.Stderr, "Job %d terminated gracefully\n", index+1) + } case <-time.After(processTerminationGracePeriod): // NOTE: Escalate to SIGKILL if the process ignores our grace period. - fmt.Fprintf(os.Stderr, "Job %d did not terminate gracefully, force killing...\n", index+1) + if !useUI { + fmt.Fprintf(os.Stderr, "Job %d did not terminate gracefully, force killing...\n", index+1) + } if err := cmd.Process.Kill(); err != nil { fmt.Fprintf(os.Stderr, "Failed to kill process: %v\n", err) } } } - if useUI { - uiCh <- jobFinishedMsg{Index: index, Success: false, ExitCode: exitCodeCanceled} + codeFileLabel := strings.Join(j.codeFiles, ", ") + failure := failInfo{ + codeFile: codeFileLabel, + exitCode: exitCodeCanceled, + outLog: j.outLog, + errLog: j.errLog, + err: fmt.Errorf("job canceled by shutdown"), } + return jobAttemptResult{status: attemptStatusCanceled, exitCode: exitCodeCanceled, failure: &failure} } func handleActivityTimeout( @@ -1880,14 +2389,9 @@ func handleActivityTimeout( j *job, index int, useUI bool, - uiCh chan uiMsg, - failed *int32, - failuresMu *sync.Mutex, - failures *[]failInfo, timeout time.Duration, -) { +) jobAttemptResult { logTimeoutMessage(index, j, timeout, useUI) - atomic.AddInt32(failed, 1) terminateTimedOutProcess(cmd, cmdDone, index, useUI) codeFileLabel := strings.Join(j.codeFiles, ", ") timeoutErr := fmt.Errorf("activity timeout: no output received for %v", timeout) @@ -1898,11 +2402,7 @@ func handleActivityTimeout( errLog: j.errLog, err: timeoutErr, } - recordFailure(failuresMu, failures, failInfo) - if useUI { - uiCh <- jobFinishedMsg{Index: index, Success: false, ExitCode: exitCodeTimeout} - uiCh <- jobFailureMsg{Failure: failInfo} - } + return jobAttemptResult{status: attemptStatusTimeout, exitCode: exitCodeTimeout, failure: &failInfo} } func logTimeoutMessage(index int, j *job, timeout time.Duration, useUI bool) { @@ -2146,11 +2646,20 @@ type ClaudeMessage struct { } `json:"message"` } -type uiViewState int +type uiViewState string + +const ( + uiViewJobs uiViewState = "jobs" + uiViewSummary uiViewState = "summary" + uiViewFailures uiViewState = "failures" +) + +type uiViewEvent string const ( - uiViewJobs uiViewState = iota - uiViewSummary + uiViewEventShowJobs uiViewEvent = "view_jobs" + uiViewEventShowSummary uiViewEvent = "view_summary" + uiViewEventShowFailures uiViewEvent = "view_failures" ) type uiModel struct { @@ -2170,13 +2679,15 @@ type uiModel struct { mainWidth int contentHeight int currentView uiViewState + viewFSM *fsm.FSM + ctx context.Context failures []failInfo aggregateUsage *TokenUsage } type uiMsg any -func newUIModel(total int) *uiModel { +func newUIModel(ctx context.Context, total int) *uiModel { vp := viewport.New(80, 24) // Increased initial height sidebarVp := viewport.New(30, 24) // Increased initial height defaultWidth := 120 @@ -2196,7 +2707,7 @@ func newUIModel(total int) *uiModel { if initialContentHeight < minContentHeight { initialContentHeight = minContentHeight } - return &uiModel{ + mdl := &uiModel{ total: total, viewport: vp, sidebarViewport: sidebarVp, @@ -2207,9 +2718,81 @@ func newUIModel(total int) *uiModel { mainWidth: initialMainWidth, contentHeight: initialContentHeight, currentView: uiViewJobs, + ctx: ctx, failures: []failInfo{}, aggregateUsage: &TokenUsage{}, } + mdl.initViewFSM() + return mdl +} + +func (m *uiModel) initViewFSM() { + m.viewFSM = fsm.NewFSM( + string(uiViewJobs), + fsm.Events{ + { + Name: string(uiViewEventShowJobs), + Src: []string{string(uiViewSummary), string(uiViewFailures)}, + Dst: string(uiViewJobs), + }, + { + Name: string(uiViewEventShowSummary), + Src: []string{string(uiViewJobs), string(uiViewFailures)}, + Dst: string(uiViewSummary), + }, + { + Name: string(uiViewEventShowFailures), + Src: []string{string(uiViewJobs), string(uiViewSummary)}, + Dst: string(uiViewFailures), + }, + }, + fsm.Callbacks{ + "before_" + string(uiViewEventShowSummary): m.beforeShowSummary, + "before_" + string(uiViewEventShowFailures): m.beforeShowFailures, + "enter_" + string(uiViewJobs): m.onEnterJobsView, + "enter_" + string(uiViewSummary): m.onEnterSummaryView, + "enter_" + string(uiViewFailures): m.onEnterFailuresView, + }, + ) +} + +func (m *uiModel) beforeShowSummary(_ context.Context, evt *fsm.Event) { + if m.completed+m.failed < m.total { + evt.Cancel(fmt.Errorf("cannot switch to summary while jobs are incomplete")) + } +} + +func (m *uiModel) beforeShowFailures(_ context.Context, evt *fsm.Event) { + if len(m.failures) == 0 { + evt.Cancel(fmt.Errorf("no failures available to display")) + } +} + +func (m *uiModel) onEnterJobsView(_ context.Context, _ *fsm.Event) { + m.currentView = uiViewJobs + m.refreshViewportContent() +} + +func (m *uiModel) onEnterSummaryView(_ context.Context, _ *fsm.Event) { + m.currentView = uiViewSummary +} + +func (m *uiModel) onEnterFailuresView(_ context.Context, _ *fsm.Event) { + m.currentView = uiViewFailures +} + +func (m *uiModel) transitionView(evt uiViewEvent) { + if m.viewFSM == nil { + return + } + if err := m.viewFSM.Event(m.ctx, string(evt)); err != nil { + var inTransitionErr fsm.InTransitionError + var noTransitionErr fsm.NoTransitionError + var invalidEventErr fsm.InvalidEventError + if errors.As(err, &inTransitionErr) || errors.As(err, &noTransitionErr) || errors.As(err, &invalidEventErr) { + return + } + } } func (m *uiModel) setEventSource(ch <-chan uiMsg) { m.events = ch } @@ -2292,14 +2875,12 @@ func (m *uiModel) handleKey(v tea.KeyMsg) tea.Cmd { func (m *uiModel) handleViewSwitchKeys(key string) tea.Cmd { switch key { case "s", "tab": - // Switch to summary view only when all tasks are complete - if m.completed+m.failed >= m.total && m.currentView == uiViewJobs { - m.currentView = uiViewSummary + if m.viewFSM != nil && m.currentView != uiViewSummary { + m.transitionView(uiViewEventShowSummary) } case "esc": - // Return to main jobs view from summary - if m.currentView == uiViewSummary { - m.currentView = uiViewJobs + if m.viewFSM != nil && m.currentView != uiViewJobs { + m.transitionView(uiViewEventShowJobs) } } return nil @@ -2498,7 +3079,7 @@ func (m *uiModel) handleJobFinished(v jobFinishedMsg) tea.Cmd { m.selectNextRunningJob() } if m.total > 0 && m.completed+m.failed >= m.total && m.failed > 0 && m.currentView != uiViewSummary { - m.currentView = uiViewSummary + m.transitionView(uiViewEventShowSummary) } m.refreshViewportContent() return m.waitEvent() @@ -2547,6 +3128,22 @@ func (m *uiModel) renderSummaryView() string { return lipgloss.JoinVertical(lipgloss.Left, separator, content) } +func (m *uiModel) renderFailuresView() string { + if len(m.failures) == 0 { + noteStyle := lipgloss.NewStyle().Foreground(lipgloss.Color("245")).MarginTop(2) + return noteStyle.Render("No failures recorded. Return with 'esc'.") + } + rows := []string{"Failure Details:"} + for _, f := range m.failures { + rows = append(rows, + fmt.Sprintf("• %s (exit %d)", f.codeFile, f.exitCode), + fmt.Sprintf(" Logs: %s (out), %s (err)", f.outLog, f.errLog), + ) + } + block := lipgloss.NewStyle().Foreground(lipgloss.Color("203")).Render(strings.Join(rows, "\n")) + return lipgloss.JoinVertical(lipgloss.Left, block, m.renderSummaryHelp()) +} + func (m *uiModel) renderSummaryHeader() string { headerStyle := lipgloss.NewStyle().Bold(true).MarginTop(1).MarginBottom(1) if m.failed > 0 { @@ -2606,6 +3203,8 @@ func (m *uiModel) View() string { switch m.currentView { case uiViewSummary: return m.renderSummaryView() + case uiViewFailures: + return m.renderFailuresView() case uiViewJobs: header, headerStyle := m.renderHeader() helpText, helpStyle := m.renderHelp() @@ -3450,12 +4049,11 @@ func buildCriticalExecutionSection() string { sb.WriteString("**VALIDATION REQUIREMENTS**:\n") sb.WriteString("- All tests MUST pass (`make test`)\n") sb.WriteString("- All linting MUST pass (`make lint`)\n") - sb.WriteString("- All type checking MUST pass (`make typecheck`)\n") sb.WriteString("- All subtasks MUST be marked complete\n") sb.WriteString("- Task status MUST be updated to 'completed'\n\n") sb.WriteString("⚠️ **WORK WILL BE INVALIDATED** if:\n") sb.WriteString("- Any requirement is incomplete\n") - sb.WriteString("- Tests/linting/typecheck fail\n") + sb.WriteString("- Tests/linting fails\n") sb.WriteString("- Project standards are violated\n") sb.WriteString("- Workarounds are used instead of proper solutions\n") sb.WriteString("- Task completion steps are skipped\n") diff --git a/sdk/agent/README.md b/sdk/agent/README.md new file mode 100644 index 00000000..2ac690a7 --- /dev/null +++ b/sdk/agent/README.md @@ -0,0 +1,113 @@ +# Agent SDK - Functional Options Pattern + +This package provides a fluent, type-safe API for creating AI agent configurations using the **functional options pattern** with **auto-generated option functions**. + +## Quick Start + +```go +package main + +import ( + "context" + "github.com/compozy/compozy/engine/core" + "github.com/compozy/compozy/sdk/agent" +) + +func main() { + ctx := context.Background() + + agentCfg, err := agent.New(ctx, "assistant", + agent.WithInstructions("You are a helpful AI assistant"), + agent.WithModel(agent.Model{ + Config: core.ProviderConfig{ + Provider: core.ProviderOpenAI, + Model: "gpt-4", + }, + }), + agent.WithMaxIterations(10), + ) + if err != nil { + panic(err) + } + + // Use agentCfg in your workflow +} +``` + +## Available Options + +### Core Configuration + +- `WithInstructions(string)` - System instructions for the agent (required) +- `WithModel(Model)` - LLM model configuration +- `WithMaxIterations(int)` - Maximum reasoning iterations (default: 5) + +### Capabilities + +- `WithTools([]tool.Config)` - Available tools for the agent +- `WithMCPs([]mcp.Config)` - Model Context Protocol servers +- `WithActions([]*ActionConfig)` - Structured actions with schemas + +### Context & Memory + +- `WithMemory([]core.MemoryReference)` - Persistent memory access +- `WithKnowledge([]core.KnowledgeBinding)` - Knowledge base integration + +### Environment + +- `WithWith(*core.Input)` - Default input parameters +- `WithEnv(*core.EnvMap)` - Environment variables +- `WithAttachments(attachment.Attachments)` - File attachments + +## Code Generation + +This package uses **auto-generated functional options** from the engine structs: + +```bash +# Regenerate options when engine/agent/config.go changes +cd sdk/agent +go generate +``` + +The `options_generated.go` file is created automatically by parsing `engine/agent/config.go`. + +## Architecture + +- **`constructor.go`** - Main constructor with validation logic +- **`options_generated.go`** - Auto-generated option functions (DO NOT EDIT) +- **`generate.go`** - go:generate directive +- **`action.go`** - Action configuration helpers + +## Validation + +The constructor performs comprehensive validation: + +- ✅ ID must be non-empty and valid +- ✅ Instructions are required +- ✅ Model must have either ref or config +- ✅ Maximum one knowledge binding +- ✅ Memory references must have IDs +- ✅ All inputs are trimmed and normalized + +## Error Handling + +Validation errors are collected and returned as a `BuildError`: + +```go +agentCfg, err := agent.New(ctx, "", // Empty ID + agent.WithInstructions(""), // Empty instructions +) + +// err is *sdkerrors.BuildError with multiple validation errors +``` + +## Testing + +Run tests: + +```bash +make test # All project tests +gotestsum --format pkgname -- -race -parallel=4 ./sdk/agent +``` + +All tests use the functional options pattern and validate the full configuration lifecycle. diff --git a/sdk/agent/constructor.go b/sdk/agent/constructor.go new file mode 100644 index 00000000..60751a26 --- /dev/null +++ b/sdk/agent/constructor.go @@ -0,0 +1,111 @@ +package agent + +import ( + "context" + "fmt" + "strings" + + engineagent "github.com/compozy/compozy/engine/agent" + "github.com/compozy/compozy/engine/core" + "github.com/compozy/compozy/pkg/logger" + sdkerrors "github.com/compozy/compozy/sdk/v2/internal/errors" + "github.com/compozy/compozy/sdk/v2/internal/validate" +) + +// New creates an agent configuration using functional options +func New(ctx context.Context, id string, opts ...Option) (*engineagent.Config, error) { + if ctx == nil { + return nil, fmt.Errorf("context is required") + } + log := logger.FromContext(ctx) + if log != nil { + log.Debug("creating agent configuration", "agent", id) + } + cfg := &engineagent.Config{ + Resource: string(core.ConfigAgent), + ID: strings.TrimSpace(id), + } + for _, opt := range opts { + opt(cfg) + } + collected := make([]error, 0, 8) + cfg.ID = strings.TrimSpace(cfg.ID) + if err := validate.ID(ctx, cfg.ID); err != nil { + collected = append(collected, fmt.Errorf("agent id is invalid: %w", err)) + } + cfg.Instructions = strings.TrimSpace(cfg.Instructions) + if err := validate.NonEmpty(ctx, "instructions", cfg.Instructions); err != nil { + collected = append(collected, err) + } + if err := validateModel(ctx, cfg); err != nil { + collected = append(collected, err) + } + if err := validateKnowledge(cfg); err != nil { + collected = append(collected, err) + } + if err := validateMemory(cfg); err != nil { + collected = append(collected, err) + } + filtered := make([]error, 0, len(collected)) + for _, err := range collected { + if err != nil { + filtered = append(filtered, err) + } + } + if len(filtered) > 0 { + return nil, &sdkerrors.BuildError{Errors: filtered} + } + cloned, err := core.DeepCopy(cfg) + if err != nil { + return nil, fmt.Errorf("failed to clone agent config: %w", err) + } + return cloned, nil +} + +func validateModel(ctx context.Context, cfg *engineagent.Config) error { + if cfg.Model.HasRef() { + cfg.Model.Ref = strings.TrimSpace(cfg.Model.Ref) + if err := validate.NonEmpty(ctx, "model reference", cfg.Model.Ref); err != nil { + return err + } + return nil + } + if cfg.Model.HasConfig() { + provider := strings.ToLower(strings.TrimSpace(string(cfg.Model.Config.Provider))) + modelName := strings.TrimSpace(cfg.Model.Config.Model) + if err := validate.NonEmpty(ctx, "model provider", provider); err != nil { + return err + } + if err := validate.NonEmpty(ctx, "model name", modelName); err != nil { + return err + } + cfg.Model.Config.Provider = core.ProviderName(provider) + cfg.Model.Config.Model = modelName + return nil + } + return nil +} + +func validateKnowledge(cfg *engineagent.Config) error { + if len(cfg.Knowledge) > 1 { + return fmt.Errorf("only one knowledge binding is supported") + } + if len(cfg.Knowledge) == 1 { + binding := cfg.Knowledge[0] + if strings.TrimSpace(binding.ID) == "" { + return fmt.Errorf("knowledge binding id cannot be empty") + } + cfg.Knowledge[0].ID = strings.TrimSpace(binding.ID) + } + return nil +} + +func validateMemory(cfg *engineagent.Config) error { + for idx := range cfg.Memory { + cfg.Memory[idx].ID = strings.TrimSpace(cfg.Memory[idx].ID) + if cfg.Memory[idx].ID == "" { + return fmt.Errorf("memory reference at index %d is missing an id", idx) + } + } + return nil +} diff --git a/sdk/agent/constructor_test.go b/sdk/agent/constructor_test.go new file mode 100644 index 00000000..a8d64c76 --- /dev/null +++ b/sdk/agent/constructor_test.go @@ -0,0 +1,225 @@ +package agent + +import ( + "context" + "errors" + "testing" + + engineagent "github.com/compozy/compozy/engine/agent" + "github.com/compozy/compozy/engine/core" + "github.com/compozy/compozy/engine/mcp" + "github.com/compozy/compozy/engine/tool" + sdkerrors "github.com/compozy/compozy/sdk/v2/internal/errors" +) + +func TestNew(t *testing.T) { + t.Run("Should create agent with minimal configuration", func(t *testing.T) { + ctx := t.Context() + cfg, err := New(ctx, "test-agent", + WithInstructions("You are a helpful assistant"), + ) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg == nil { + t.Fatal("expected config, got nil") + } + if cfg.ID != "test-agent" { + t.Errorf("expected ID 'test-agent', got '%s'", cfg.ID) + } + if cfg.Instructions != "You are a helpful assistant" { + t.Errorf("expected instructions, got '%s'", cfg.Instructions) + } + if cfg.Resource != string(core.ConfigAgent) { + t.Errorf("expected resource 'agent', got '%s'", cfg.Resource) + } + }) + t.Run("Should trim whitespace from ID and instructions", func(t *testing.T) { + ctx := t.Context() + cfg, err := New(ctx, " test-agent ", + WithInstructions(" You are helpful "), + ) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.ID != "test-agent" { + t.Errorf("expected trimmed ID 'test-agent', got '%s'", cfg.ID) + } + if cfg.Instructions != "You are helpful" { + t.Errorf("expected trimmed instructions, got '%s'", cfg.Instructions) + } + }) + t.Run("Should fail when context is nil", func(t *testing.T) { + var nilCtx context.Context + _, err := New(nilCtx, "test-agent", + WithInstructions("Test"), + ) + if err == nil { + t.Fatal("expected error for nil context") + } + if err.Error() != "context is required" { + t.Errorf("unexpected error message: %v", err) + } + }) + t.Run("Should fail when ID is empty", func(t *testing.T) { + ctx := t.Context() + _, err := New(ctx, "", + WithInstructions("Test"), + ) + if err == nil { + t.Fatal("expected error for empty ID") + } + var buildErr *sdkerrors.BuildError + if !errors.As(err, &buildErr) { + t.Errorf("expected BuildError, got %T", err) + } + }) + t.Run("Should fail when instructions are empty", func(t *testing.T) { + ctx := t.Context() + _, err := New(ctx, "test-agent") + if err == nil { + t.Fatal("expected error for empty instructions") + } + var buildErr *sdkerrors.BuildError + if !errors.As(err, &buildErr) { + t.Errorf("expected BuildError, got %T", err) + } + }) + t.Run("Should create agent with all options", func(t *testing.T) { + ctx := t.Context() + cfg, err := New(ctx, "full-agent", + WithInstructions("Complex agent"), + WithTools([]tool.Config{{ID: "tool1"}}), + WithMCPs([]mcp.Config{{ID: "mcp1"}}), + WithMaxIterations(10), + WithMemory([]core.MemoryReference{{ID: "mem1"}}), + WithKnowledge([]core.KnowledgeBinding{{ID: "kb1"}}), + ) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(cfg.Tools) != 1 { + t.Errorf("expected 1 tool, got %d", len(cfg.Tools)) + } + if len(cfg.MCPs) != 1 { + t.Errorf("expected 1 MCP, got %d", len(cfg.MCPs)) + } + if cfg.MaxIterations != 10 { + t.Errorf("expected max iterations 10, got %d", cfg.MaxIterations) + } + if len(cfg.Memory) != 1 { + t.Errorf("expected 1 memory ref, got %d", len(cfg.Memory)) + } + if len(cfg.Knowledge) != 1 { + t.Errorf("expected 1 knowledge binding, got %d", len(cfg.Knowledge)) + } + }) + t.Run("Should fail with multiple knowledge bindings", func(t *testing.T) { + ctx := t.Context() + _, err := New(ctx, "test-agent", + WithInstructions("Test"), + WithKnowledge([]core.KnowledgeBinding{ + {ID: "kb1"}, + {ID: "kb2"}, + }), + ) + if err == nil { + t.Fatal("expected error for multiple knowledge bindings") + } + }) + t.Run("Should fail with empty knowledge binding ID", func(t *testing.T) { + ctx := t.Context() + _, err := New(ctx, "test-agent", + WithInstructions("Test"), + WithKnowledge([]core.KnowledgeBinding{{ID: ""}}), + ) + if err == nil { + t.Fatal("expected error for empty knowledge binding ID") + } + }) + t.Run("Should fail with empty memory reference ID", func(t *testing.T) { + ctx := t.Context() + _, err := New(ctx, "test-agent", + WithInstructions("Test"), + WithMemory([]core.MemoryReference{{ID: ""}}), + ) + if err == nil { + t.Fatal("expected error for empty memory reference ID") + } + }) + t.Run("Should create deep copy of configuration", func(t *testing.T) { + ctx := t.Context() + cfg1, err := New(ctx, "test-agent", + WithInstructions("Test"), + ) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + cfg2 := &engineagent.Config{} + *cfg2 = *cfg1 + cfg2.Instructions = "Modified" + if cfg1.Instructions == "Modified" { + t.Error("configuration was not deep copied") + } + }) +} + +func TestWithActions(t *testing.T) { + t.Run("Should set actions", func(t *testing.T) { + ctx := t.Context() + action := &engineagent.ActionConfig{ID: "action1"} + cfg, err := New(ctx, "test", + WithInstructions("Test"), + WithActions([]*engineagent.ActionConfig{action}), + ) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(cfg.Actions) != 1 { + t.Errorf("expected 1 action, got %d", len(cfg.Actions)) + } + if cfg.Actions[0].ID != "action1" { + t.Errorf("expected action ID 'action1', got '%s'", cfg.Actions[0].ID) + } + }) +} + +func TestWithModel(t *testing.T) { + t.Run("Should set model with ref", func(t *testing.T) { + ctx := t.Context() + cfg, err := New(ctx, "test", + WithInstructions("Test"), + WithModel(engineagent.Model{Ref: "gpt-4"}), + ) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !cfg.Model.HasRef() { + t.Error("expected model to have ref") + } + if cfg.Model.Ref != "gpt-4" { + t.Errorf("expected model ref 'gpt-4', got '%s'", cfg.Model.Ref) + } + }) + t.Run("Should set model with config", func(t *testing.T) { + ctx := t.Context() + cfg, err := New(ctx, "test", + WithInstructions("Test"), + WithModel(engineagent.Model{ + Config: core.ProviderConfig{ + Provider: core.ProviderOpenAI, + Model: "gpt-4", + }, + }), + ) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !cfg.Model.HasConfig() { + t.Error("expected model to have config") + } + if cfg.Model.Config.Provider != core.ProviderOpenAI { + t.Errorf("expected provider openai, got '%s'", cfg.Model.Config.Provider) + } + }) +} diff --git a/sdk/agent/doc.go b/sdk/agent/doc.go new file mode 100644 index 00000000..d2a2043d --- /dev/null +++ b/sdk/agent/doc.go @@ -0,0 +1,3 @@ +// Package agent provides builders for configuring agents and their actions when +// composing projects through the Compozy Go SDK. +package agent diff --git a/sdk/agent/generate.go b/sdk/agent/generate.go new file mode 100644 index 00000000..621e8671 --- /dev/null +++ b/sdk/agent/generate.go @@ -0,0 +1,3 @@ +package agent + +//go:generate go run ../internal/codegen/cmd/optionsgen/main.go -engine ../../engine/agent/config.go -struct Config -output options_generated.go -package agent diff --git a/sdk/agent/options_generated.go b/sdk/agent/options_generated.go new file mode 100644 index 00000000..8ab8333b --- /dev/null +++ b/sdk/agent/options_generated.go @@ -0,0 +1,214 @@ +// Code generated by optionsgen. DO NOT EDIT. + +package agent + +import ( + agent "github.com/compozy/compozy/engine/agent" + attachment "github.com/compozy/compozy/engine/attachment" + core "github.com/compozy/compozy/engine/core" + mcp "github.com/compozy/compozy/engine/mcp" + tool "github.com/compozy/compozy/engine/tool" +) + +type Option func(*agent.Config) + +// WithTools sets the Tools field +// +// Tools available to the agent for extending its capabilities. +// When tools are defined, the agent automatically has `toolChoice` set to `"auto"`, +// enabling autonomous tool selection and invocation during task execution. +// **Tool types supported:** +// - File system operations (read, write, list) +// - API integrations (HTTP requests, webhooks) +// - Data processing utilities (parsing, transformation) +// - Custom business logic (TypeScript/JavaScript execution) +// Tools are referenced by ID and can be shared across multiple agents. +func WithTools(tools []tool.Config) Option { + return func(cfg *agent.Config) { + cfg.Tools = tools + } +} + +// WithMCPs sets the MCPs field +// +// Model Context Protocol (MCP) server configurations. +// MCPs provide standardized interfaces for extending agent capabilities +// with external services and data sources through protocol-based communication. +// **Common MCP integrations:** +// - Database connectors (PostgreSQL, Redis, MongoDB) +// - Search engines (Elasticsearch, Solr) +// - Knowledge bases (vector databases, documentation systems) +// - External APIs (REST, GraphQL, gRPC services) +// MCPs support both stdio and HTTP transport protocols. +func WithMCPs(mCPs []mcp.Config) Option { + return func(cfg *agent.Config) { + cfg.MCPs = mCPs + } +} + +// WithMaxIterations sets the MaxIterations field +// +// Maximum number of reasoning iterations the agent can perform. +// The agent may self-correct and refine its response across multiple iterations +// to improve accuracy and address complex multi-step problems. +// **Default:** `5` iterations +// **Trade-offs:** +// - Higher values enable more thorough problem-solving and self-correction +// - Each iteration consumes additional tokens and increases response latency +// - Configure based on task complexity, accuracy requirements, and cost constraints +func WithMaxIterations(maxIterations int) Option { + return func(cfg *agent.Config) { + cfg.MaxIterations = maxIterations + } +} + +// WithMemory sets the Memory field +// +// Memory references enabling the agent to access persistent context. +// Memory provides stateful interactions across workflow steps and sessions. +// **Configuration format:** +// ```yaml +// memory: +// - id: "user_context" # Memory resource ID +// key: "user:{{.user_id}}" # Dynamic key with template +// mode: "read-write" # Access mode (default: "read-write") +// ``` +// **Access modes:** +// - `"read-write"`: Full access to read and modify memory +// - `"read-only"`: Can only read existing memory entries +func WithMemory(memory []core.MemoryReference) Option { + return func(cfg *agent.Config) { + cfg.Memory = memory + } +} + +// WithModel sets the Model field +// +// Model selects which LLM model to use. +// Supports two forms: +// - string: a model ID to be resolved via the ResourceStore (e.g. "openai-gpt-4o-mini") +// - object: an inline core.ProviderConfig with provider/model/params +// During compile/link, string refs are resolved and merged with inline +// fields following project precedence rules. Defaults are filled from the +// project when neither ref nor inline identity is provided. +func WithModel(model agent.Model) Option { + return func(cfg *agent.Config) { + cfg.Model = model + } +} + +// WithAttachments sets the Attachments field +// +// Attachments declared at the agent scope. +func WithAttachments(attachments attachment.Attachments) Option { + return func(cfg *agent.Config) { + cfg.Attachments = attachments + } +} + +// WithResource sets the Resource field +// +// Resource identifier for the autoloader system (must be `"agent"`). +// This field enables automatic discovery and registration of agent configurations. +func WithResource(resource string) Option { + return func(cfg *agent.Config) { + cfg.Resource = resource + } +} + +// WithID sets the ID field +// +// Unique identifier for the agent within the project scope. +// Used for referencing the agent in workflows and other configurations. +// - **Examples:** `"code-assistant"`, `"data-analyst"`, `"customer-support"` +func WithID(id string) Option { + return func(cfg *agent.Config) { + cfg.ID = id + } +} + +// WithInstructions sets the Instructions field +// +// Provider configuration is now expressed through the polymorphic `Model` field. +// The previous `Config core.ProviderConfig` field has been removed. +// System instructions that define the agent's personality, behavior, and constraints. +// These instructions guide how the agent interprets tasks and generates responses. +// **Best practices:** +// - Be clear and specific about the agent's role +// - Define boundaries and ethical guidelines +// - Include domain-specific knowledge or constraints +// - Use markdown formatting for better structure +func WithInstructions(instructions string) Option { + return func(cfg *agent.Config) { + cfg.Instructions = instructions + } +} + +// WithActions sets the Actions field +// +// Structured actions the agent can perform with defined input/output schemas. +// Actions provide type-safe interfaces for specific agent capabilities. +// **Example:** +// ```yaml +// actions: +// - id: "review-code" +// prompt: | +// Analyze code {{.input.code}} for quality and improvements +// input: +// type: "object" +// properties: +// code: +// type: "string" +// description: "The code to review" +// output: +// type: "object" +// properties: +// quality: +// type: "string" +// description: "The quality of the code" +// ``` +// $ref: inline:#action-configuration +func WithActions(actions []*agent.ActionConfig) Option { + return func(cfg *agent.Config) { + cfg.Actions = actions + } +} + +// WithWith sets the With field +// +// Default input parameters passed to the agent on every invocation. +// These values are merged with runtime inputs, with runtime values taking precedence. +// **Use cases:** +// - Setting default configuration values +// - Providing constant context or settings +// - Injecting workflow-level parameters +func WithWith(with *core.Input) Option { + return func(cfg *agent.Config) { + cfg.With = with + } +} + +// WithEnv sets the Env field +// +// Environment variables available during agent execution. +// Used for configuration, secrets, and runtime settings. +// **Example:** +// ```yaml +// env: +// API_KEY: "{{.env.OPENAI_API_KEY}}" +// DEBUG_MODE: "true" +// ``` +func WithEnv(env *core.EnvMap) Option { + return func(cfg *agent.Config) { + cfg.Env = env + } +} + +// WithKnowledge sets the Knowledge field +// +// Knowledge declares knowledge bindings scoped to this agent. +func WithKnowledge(knowledge []core.KnowledgeBinding) Option { + return func(cfg *agent.Config) { + cfg.Knowledge = knowledge + } +} diff --git a/sdk/agentaction/README.md b/sdk/agentaction/README.md new file mode 100644 index 00000000..c884b994 --- /dev/null +++ b/sdk/agentaction/README.md @@ -0,0 +1,227 @@ +# Agent Action SDK + +The `agentaction` package provides a functional options pattern for creating agent action configurations in Compozy. Actions define structured, type-safe interfaces for specific agent capabilities. + +## Features + +- **Auto-generated functional options** from engine structs +- **Type-safe configuration** with compile-time validation +- **Centralized validation** in constructor +- **Deep copy safety** for configuration isolation +- **Zero boilerplate** - just run `go generate` when engine changes + +## Installation + +```go +import "github.com/compozy/compozy/sdk/v2/agentaction" +``` + +## Quick Start + +### Basic Action + +```go +action, err := agentaction.New(ctx, "review-code", + agentaction.WithPrompt("Analyze code for quality and improvements"), +) +``` + +### Action with Tools + +```go +action, err := agentaction.New(ctx, "analyze-code", + agentaction.WithPrompt("Perform comprehensive code analysis"), + agentaction.WithTools([]tool.Config{ + {ID: "file-reader"}, + {ID: "static-analyzer"}, + }), + agentaction.WithTimeout("5m"), +) +``` + +### Action with Input/Output Schemas + +```go +inputSchema := &schema.Schema{ + "type": "object", + "properties": map[string]any{ + "code": map[string]any{"type": "string"}, + "language": map[string]any{"type": "string"}, + }, + "required": []string{"code", "language"}, +} + +outputSchema := &schema.Schema{ + "type": "object", + "properties": map[string]any{ + "issues": map[string]any{ + "type": "array", + "items": map[string]any{"type": "object"}, + }, + }, +} + +action, err := agentaction.New(ctx, "validate-code", + agentaction.WithPrompt("Validate code against best practices"), + agentaction.WithInputSchema(inputSchema), + agentaction.WithOutputSchema(outputSchema), +) +``` + +### Action with Retry Policy + +```go +action, err := agentaction.New(ctx, "api-call", + agentaction.WithPrompt("Call external API"), + agentaction.WithRetryPolicy(&core.RetryPolicyConfig{ + MaximumAttempts: 5, + InitialInterval: "1s", + BackoffCoefficient: 2.0, + }), + agentaction.WithTimeout("30s"), +) +``` + +### Action with Transitions + +```go +action, err := agentaction.New(ctx, "process-step", + agentaction.WithPrompt("Process data step"), + agentaction.WithOnSuccess(&core.SuccessTransition{ + Next: strPtr("next-step"), + }), + agentaction.WithOnError(&core.ErrorTransition{ + Next: strPtr("error-handler"), + }), +) +``` + +## Available Options + +All options are auto-generated from `engine/agent/action_config.go`: + +### Core Configuration + +- `WithID(id string)` - Action identifier +- `WithPrompt(prompt string)` - Action instructions (required) + +### Schema Definition + +- `WithInputSchema(schema *schema.Schema)` - Input validation schema +- `WithOutputSchema(schema *schema.Schema)` - Output validation schema + +### Execution Control + +- `WithTimeout(timeout string)` - Maximum execution duration (e.g., "30s", "5m") +- `WithRetryPolicy(policy *core.RetryPolicyConfig)` - Automatic retry behavior +- `WithTools(tools []tool.Config)` - Action-scoped tools + +### Workflow Integration + +- `WithOnSuccess(transition *core.SuccessTransition)` - Success handler +- `WithOnError(transition *core.ErrorTransition)` - Error handler +- `WithWith(input *core.Input)` - Default input parameters +- `WithAttachments(attachments attachment.Attachments)` - Action-level attachments + +## Validation Rules + +The constructor performs centralized validation: + +1. **Context Required** - Must provide non-nil context +2. **ID Required** - Must be non-empty after trimming +3. **Prompt Required** - Must be non-empty after trimming +4. **Automatic Trimming** - ID and prompt are automatically trimmed + +## Error Handling + +The constructor returns `*sdkerrors.BuildError` containing all validation errors: + +```go +action, err := agentaction.New(ctx, "") +if err != nil { + var buildErr *sdkerrors.BuildError + if errors.As(err, &buildErr) { + for _, e := range buildErr.Errors { + fmt.Printf("Validation error: %v\n", e) + } + } +} +``` + +## Code Generation + +When `engine/agent/action_config.go` changes: + +```bash +cd sdk/agentaction +go generate +``` + +This automatically regenerates `options_generated.go` with all options. + +## Comparison with Old Builder Pattern + +### Before (Builder Pattern) + +```go +action, err := agent.NewAction("review-code"). + WithPrompt("Analyze code"). + AddTool("file-reader"). + WithTimeout(30*time.Second). + Build(ctx) +``` + +### After (Functional Options) + +```go +action, err := agentaction.New(ctx, "review-code", + agentaction.WithPrompt("Analyze code"), + agentaction.WithTools([]tool.Config{{ID: "file-reader"}}), + agentaction.WithTimeout("30s"), +) +``` + +### Key Differences + +1. **Context First** - `ctx` is now the first parameter +2. **No Build()** - Constructor validates immediately +3. **Collections as Slices** - `WithTools([]tool.Config)` instead of `AddTool()` +4. **Timeout as String** - `"30s"` instead of `30*time.Second` +5. **Type Safety** - All options are strongly typed + +## Best Practices + +1. **Always use context from test** - `ctx := t.Context()` in tests +2. **Validate inputs** - Constructor handles validation, don't skip it +3. **Use schemas for type safety** - Define input/output schemas for structured data +4. **Set timeouts** - Always specify reasonable timeout values +5. **Configure retries** - Use retry policy for unreliable operations + +## Examples + +See `constructor_test.go` for comprehensive usage examples covering: + +- Minimal configuration +- Full configuration with all options +- Error handling and validation +- Schema definition patterns +- Retry and timeout configuration +- Transition workflows + +## Architecture + +- **Package:** `agentaction` (separate from `agent` to avoid option type conflicts) +- **Generated Code:** `options_generated.go` (auto-generated, do not edit) +- **Constructor:** `constructor.go` (manual validation logic) +- **Tests:** `constructor_test.go` (comprehensive test coverage) +- **Generator:** `../internal/codegen/` (shared code generation infrastructure) + +## Contributing + +When adding new fields to `engine/agent/action_config.go`: + +1. Add the field with proper documentation +2. Run `go generate` in `sdk/agentaction/` +3. Update `constructor.go` if validation is needed +4. Add tests in `constructor_test.go` +5. Run `make lint && make test` diff --git a/sdk/agentaction/constructor.go b/sdk/agentaction/constructor.go new file mode 100644 index 00000000..027a4419 --- /dev/null +++ b/sdk/agentaction/constructor.go @@ -0,0 +1,55 @@ +package agentaction + +import ( + "context" + "fmt" + "strings" + + engineagent "github.com/compozy/compozy/engine/agent" + "github.com/compozy/compozy/engine/core" + "github.com/compozy/compozy/pkg/logger" + sdkerrors "github.com/compozy/compozy/sdk/v2/internal/errors" + "github.com/compozy/compozy/sdk/v2/internal/validate" +) + +// New creates an action configuration using functional options. +// +// The action ID must be provided as it uniquely identifies the action within +// an agent's scope. All other configuration is applied through options. +// +// **Example:** +// +// action, err := agentaction.New(ctx, "review-code", +// agentaction.WithPrompt("Analyze code for quality and improvements"), +// agentaction.WithTools([]enginetool.Config{{ID: "file-reader"}}), +// agentaction.WithTimeout("30s"), +// ) +func New(ctx context.Context, id string, opts ...Option) (*engineagent.ActionConfig, error) { + if ctx == nil { + return nil, fmt.Errorf("context is required") + } + log := logger.FromContext(ctx) + log.Debug("creating action configuration", "action", id) + cfg := &engineagent.ActionConfig{ + ID: strings.TrimSpace(id), + } + for _, opt := range opts { + opt(cfg) + } + collected := make([]error, 0) + if err := validate.ID(ctx, cfg.ID); err != nil { + collected = append(collected, fmt.Errorf("action id is invalid: %w", err)) + } + cfg.Prompt = strings.TrimSpace(cfg.Prompt) + if err := validate.NonEmpty(ctx, "prompt", cfg.Prompt); err != nil { + collected = append(collected, err) + } + if len(collected) > 0 { + return nil, &sdkerrors.BuildError{Errors: collected} + } + cloned, err := core.DeepCopy(cfg) + if err != nil { + return nil, fmt.Errorf("failed to clone action config: %w", err) + } + return cloned, nil +} diff --git a/sdk/agentaction/constructor_test.go b/sdk/agentaction/constructor_test.go new file mode 100644 index 00000000..515140a7 --- /dev/null +++ b/sdk/agentaction/constructor_test.go @@ -0,0 +1,203 @@ +package agentaction + +import ( + "context" + "testing" + "time" + + "github.com/compozy/compozy/engine/core" + "github.com/compozy/compozy/engine/schema" + "github.com/compozy/compozy/engine/tool" + sdkerrors "github.com/compozy/compozy/sdk/v2/internal/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNew(t *testing.T) { + t.Run("Should create action with minimal configuration", func(t *testing.T) { + ctx := t.Context() + action, err := New(ctx, "test-action", + WithPrompt("Test prompt"), + ) + require.NoError(t, err) + require.NotNil(t, action) + assert.Equal(t, "test-action", action.ID) + assert.Equal(t, "Test prompt", action.Prompt) + }) + t.Run("Should trim whitespace from ID and prompt", func(t *testing.T) { + ctx := t.Context() + action, err := New(ctx, " test-action ", + WithPrompt(" Test prompt "), + ) + require.NoError(t, err) + require.NotNil(t, action) + assert.Equal(t, "test-action", action.ID) + assert.Equal(t, "Test prompt", action.Prompt) + }) + t.Run("Should fail when context is nil", func(t *testing.T) { + var nilCtx context.Context + _, err := New(nilCtx, "test-action", + WithPrompt("Test"), + ) + require.Error(t, err) + assert.EqualError(t, err, "context is required") + }) + t.Run("Should fail when ID is empty", func(t *testing.T) { + ctx := t.Context() + _, err := New(ctx, "", + WithPrompt("Test"), + ) + require.Error(t, err) + var buildErr *sdkerrors.BuildError + require.ErrorAs(t, err, &buildErr) + }) + t.Run("Should fail when ID is whitespace only", func(t *testing.T) { + ctx := t.Context() + _, err := New(ctx, " ", + WithPrompt("Test"), + ) + require.Error(t, err) + var buildErr *sdkerrors.BuildError + require.ErrorAs(t, err, &buildErr) + }) + t.Run("Should fail when prompt is empty", func(t *testing.T) { + ctx := t.Context() + _, err := New(ctx, "test-action") + require.Error(t, err) + var buildErr *sdkerrors.BuildError + require.ErrorAs(t, err, &buildErr) + }) + t.Run("Should fail when prompt is whitespace only", func(t *testing.T) { + ctx := t.Context() + _, err := New(ctx, "test-action", + WithPrompt(" "), + ) + require.Error(t, err) + var buildErr *sdkerrors.BuildError + require.ErrorAs(t, err, &buildErr) + }) + t.Run("Should create action with all options", func(t *testing.T) { + ctx := t.Context() + inputSchema := &schema.Schema{"type": "object"} + outputSchema := &schema.Schema{"type": "object"} + withInput := &core.Input{"key": "value"} + tools := []tool.Config{{ID: "tool1"}} + onSuccess := &core.SuccessTransition{Next: strPtr("next-task")} + onError := &core.ErrorTransition{Next: strPtr("error-task")} + retryPolicy := &core.RetryPolicyConfig{MaximumAttempts: 3, InitialInterval: "1s"} + action, err := New(ctx, "full-action", + WithPrompt("Complex action"), + WithInputSchema(inputSchema), + WithOutputSchema(outputSchema), + WithWith(withInput), + WithTools(tools), + WithOnSuccess(onSuccess), + WithOnError(onError), + WithRetryPolicy(retryPolicy), + WithTimeout("30s"), + ) + require.NoError(t, err) + require.NotNil(t, action) + assert.NotNil(t, action.InputSchema) + assert.NotNil(t, action.OutputSchema) + assert.NotNil(t, action.With) + assert.Len(t, action.Tools, 1) + assert.NotNil(t, action.OnSuccess) + assert.NotNil(t, action.OnError) + assert.NotNil(t, action.RetryPolicy) + assert.Equal(t, "30s", action.Timeout) + }) + t.Run("Should create deep copy", func(t *testing.T) { + ctx := t.Context() + originalTools := []tool.Config{{ID: "tool1"}} + action, err := New(ctx, "copy-test", + WithPrompt("Test prompt"), + WithTools(originalTools), + ) + require.NoError(t, err) + require.NotNil(t, action) + originalTools[0].ID = "modified" + assert.Equal(t, "tool1", action.Tools[0].ID) + }) + t.Run("Should handle multiple error accumulation", func(t *testing.T) { + ctx := t.Context() + _, err := New(ctx, "") + require.Error(t, err) + var buildErr *sdkerrors.BuildError + require.ErrorAs(t, err, &buildErr) + assert.GreaterOrEqual(t, len(buildErr.Errors), 2) + }) + t.Run("Should work with helper functions for transitions", func(t *testing.T) { + ctx := t.Context() + action, err := New(ctx, "transition-test", + WithPrompt("Test prompt"), + WithOnSuccess(&core.SuccessTransition{Next: strPtr("success-task")}), + WithOnError(&core.ErrorTransition{Next: strPtr("error-task")}), + ) + require.NoError(t, err) + require.NotNil(t, action) + require.NotNil(t, action.OnSuccess) + require.NotNil(t, action.OnError) + assert.Equal(t, "success-task", *action.OnSuccess.Next) + assert.Equal(t, "error-task", *action.OnError.Next) + }) + t.Run("Should work with retry policy", func(t *testing.T) { + ctx := t.Context() + action, err := New(ctx, "retry-test", + WithPrompt("Test prompt"), + WithRetryPolicy(&core.RetryPolicyConfig{ + MaximumAttempts: 5, + InitialInterval: "2s", + BackoffCoefficient: 2.0, + }), + ) + require.NoError(t, err) + require.NotNil(t, action) + require.NotNil(t, action.RetryPolicy) + assert.EqualValues(t, 5, action.RetryPolicy.MaximumAttempts) + assert.Equal(t, "2s", action.RetryPolicy.InitialInterval) + }) + t.Run("Should work with timeout", func(t *testing.T) { + ctx := t.Context() + action, err := New(ctx, "timeout-test", + WithPrompt("Test prompt"), + WithTimeout("1m30s"), + ) + require.NoError(t, err) + require.NotNil(t, action) + assert.Equal(t, "1m30s", action.Timeout) + duration, parseErr := time.ParseDuration(action.Timeout) + require.NoError(t, parseErr) + assert.Equal(t, 90*time.Second, duration) + }) + t.Run("Should work with input and output schemas", func(t *testing.T) { + ctx := t.Context() + inputSchema := &schema.Schema{ + "type": "object", + "properties": map[string]any{ + "code": map[string]any{"type": "string"}, + }, + } + outputSchema := &schema.Schema{ + "type": "object", + "properties": map[string]any{ + "quality": map[string]any{"type": "string"}, + }, + } + action, err := New(ctx, "schema-test", + WithPrompt("Test prompt"), + WithInputSchema(inputSchema), + WithOutputSchema(outputSchema), + ) + require.NoError(t, err) + require.NotNil(t, action) + require.NotNil(t, action.InputSchema) + require.NotNil(t, action.OutputSchema) + assert.Equal(t, "object", (*action.InputSchema)["type"]) + assert.Equal(t, "object", (*action.OutputSchema)["type"]) + }) +} + +func strPtr(s string) *string { + return &s +} diff --git a/sdk/agentaction/generate.go b/sdk/agentaction/generate.go new file mode 100644 index 00000000..966c5d2a --- /dev/null +++ b/sdk/agentaction/generate.go @@ -0,0 +1,3 @@ +package agentaction + +//go:generate go run ../internal/codegen/cmd/optionsgen/main.go -engine ../../engine/agent/action_config.go -struct ActionConfig -output options_generated.go -package agentaction diff --git a/sdk/agentaction/options_generated.go b/sdk/agentaction/options_generated.go new file mode 100644 index 00000000..9ffe2a51 --- /dev/null +++ b/sdk/agentaction/options_generated.go @@ -0,0 +1,131 @@ +// Code generated by optionsgen. DO NOT EDIT. + +package agentaction + +import ( + agent "github.com/compozy/compozy/engine/agent" + attachment "github.com/compozy/compozy/engine/attachment" + core "github.com/compozy/compozy/engine/core" + schema "github.com/compozy/compozy/engine/schema" + tool "github.com/compozy/compozy/engine/tool" +) + +type Option func(*agent.ActionConfig) + +// WithID sets the ID field +// +// Unique identifier for the action within the agent's scope. +// Used to invoke specific actions programmatically. +// - **Examples:** `"analyze-code"`, `"generate-summary"`, `"validate-data"` +func WithID(id string) Option { + return func(cfg *agent.ActionConfig) { + cfg.ID = id + } +} + +// WithPrompt sets the Prompt field +// +// Detailed instructions for the agent when executing this action. +// Should clearly define the expected behavior, output format, and any constraints. +// **Best practices:** +// - Be specific about the desired outcome +// - Include examples if complex formatting is required +// - Define clear success criteria +// - Specify any limitations or boundaries +func WithPrompt(prompt string) Option { + return func(cfg *agent.ActionConfig) { + cfg.Prompt = prompt + } +} + +// WithInputSchema sets the InputSchema field +// +// JSON Schema defining the expected input parameters for this action. +// Enables validation and type checking of inputs before execution. +// If `nil`, the action accepts any input format without validation. +// **Schema format:** JSON Schema Draft 7 +func WithInputSchema(inputSchema *schema.Schema) Option { + return func(cfg *agent.ActionConfig) { + cfg.InputSchema = inputSchema + } +} + +// WithOutputSchema sets the OutputSchema field +// +// JSON Schema defining the expected output format from this action. +// Used for validating agent responses and ensuring consistent output structure. +// If `nil`, no output validation is performed. +// **Schema format:** JSON Schema Draft 7 +func WithOutputSchema(outputSchema *schema.Schema) Option { + return func(cfg *agent.ActionConfig) { + cfg.OutputSchema = outputSchema + } +} + +// WithWith sets the With field +// +// Default parameters to provide to the action. +// These are merged with runtime parameters, with runtime values taking precedence. +// **Use cases:** +// - Setting default configuration options +// - Providing constant context values +// - Pre-filling common parameters +func WithWith(with *core.Input) Option { + return func(cfg *agent.ActionConfig) { + cfg.With = with + } +} + +// WithAttachments sets the Attachments field +// +// Attachments at action scope +func WithAttachments(attachments attachment.Attachments) Option { + return func(cfg *agent.ActionConfig) { + cfg.Attachments = attachments + } +} + +// WithTools sets the Tools field +// +// Tools scoped to this action; override agent-level tool availability when provided. +func WithTools(tools []tool.Config) Option { + return func(cfg *agent.ActionConfig) { + cfg.Tools = tools + } +} + +// WithOnSuccess sets the OnSuccess field +// +// OnSuccess defines the transition executed when the action completes successfully. +func WithOnSuccess(onSuccess *core.SuccessTransition) Option { + return func(cfg *agent.ActionConfig) { + cfg.OnSuccess = onSuccess + } +} + +// WithOnError sets the OnError field +// +// OnError defines the transition executed when the action encounters an error. +func WithOnError(onError *core.ErrorTransition) Option { + return func(cfg *agent.ActionConfig) { + cfg.OnError = onError + } +} + +// WithRetryPolicy sets the RetryPolicy field +// +// RetryPolicy configures automatic retries for the action when execution fails. +func WithRetryPolicy(retryPolicy *core.RetryPolicyConfig) Option { + return func(cfg *agent.ActionConfig) { + cfg.RetryPolicy = retryPolicy + } +} + +// WithTimeout sets the Timeout field +// +// Timeout specifies the maximum duration allowed for the action execution. +func WithTimeout(timeout string) Option { + return func(cfg *agent.ActionConfig) { + cfg.Timeout = timeout + } +} diff --git a/sdk/client/agent.go b/sdk/client/agent.go new file mode 100644 index 00000000..088de1a3 --- /dev/null +++ b/sdk/client/agent.go @@ -0,0 +1,74 @@ +package client + +import ( + "context" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/compozy/compozy/engine/infra/server/routes" +) + +// ExecuteAgent triggers an asynchronous agent execution. +func (c *Client) ExecuteAgent( + ctx context.Context, + agentID string, + req *AgentExecuteRequest, +) (*AgentExecuteResponse, error) { + id := strings.TrimSpace(agentID) + if id == "" { + return nil, fmt.Errorf("agent id is required") + } + path := fmt.Sprintf("%s/%s/executions", routes.Agents(), url.PathEscape(id)) + resp, err := c.postJSON(ctx, path, req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + result, err := decodeEnvelope[AgentExecuteResponse](resp, http.StatusAccepted) + if err != nil { + return nil, err + } + logExecution(ctx, "agent", id, result.ExecID) + return &result, nil +} + +// ExecuteAgentSync executes an agent synchronously. +func (c *Client) ExecuteAgentSync( + ctx context.Context, + agentID string, + req *AgentExecuteRequest, +) (*AgentSyncResponse, error) { + id := strings.TrimSpace(agentID) + if id == "" { + return nil, fmt.Errorf("agent id is required") + } + path := fmt.Sprintf("%s/%s/executions/sync", routes.Agents(), url.PathEscape(id)) + resp, err := c.postJSON(ctx, path, req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + result, err := decodeEnvelope[AgentSyncResponse](resp, http.StatusOK) + if err != nil { + return nil, err + } + logExecution(ctx, "agent_sync", id, result.ExecID) + return &result, nil +} + +// ExecuteAgentStream starts an agent execution and streams results. +func (c *Client) ExecuteAgentStream( + ctx context.Context, + agentID string, + req *AgentExecuteRequest, + opts *StreamOptions, +) (*StreamSession, error) { + handle, err := c.ExecuteAgent(ctx, agentID, req) + if err != nil { + return nil, err + } + streamPath := fmt.Sprintf("%s/agents/%s/stream", routes.Executions(), url.PathEscape(handle.ExecID)) + return c.openStream(ctx, streamPath, nil, opts, handle.ExecID, handle.ExecURL) +} diff --git a/sdk/client/client.go b/sdk/client/client.go new file mode 100644 index 00000000..b8538220 --- /dev/null +++ b/sdk/client/client.go @@ -0,0 +1,92 @@ +package client + +import ( + "context" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/compozy/compozy/pkg/logger" +) + +const defaultUserAgent = "compozy-sdk-go/v2" + +// Client provides typed access to Compozy execution endpoints. +type Client struct { + baseURL *url.URL + httpClient *http.Client + apiKey string + userAgent string +} + +// Option configures client construction. +type Option func(*clientConfig) + +type clientConfig struct { + apiKey string + httpClient *http.Client + userAgent string +} + +// WithAPIKey configures the Authorization bearer token used for API calls. +func WithAPIKey(key string) Option { + return func(cfg *clientConfig) { + cfg.apiKey = strings.TrimSpace(key) + } +} + +// WithHTTPClient injects a custom HTTP client. Callers are responsible for TLS and timeout settings. +func WithHTTPClient(httpClient *http.Client) Option { + return func(cfg *clientConfig) { + if httpClient != nil { + cfg.httpClient = httpClient + } + } +} + +// WithUserAgent overrides the default User-Agent header. +func WithUserAgent(agent string) Option { + return func(cfg *clientConfig) { + cfg.userAgent = strings.TrimSpace(agent) + } +} + +// New constructs a Client targeting the provided baseURL (e.g., https://api.compozy.dev). +func New(ctx context.Context, baseURL string, opts ...Option) (*Client, error) { + if ctx == nil { + return nil, fmt.Errorf("context is required") + } + if strings.TrimSpace(baseURL) == "" { + return nil, fmt.Errorf("base url is required") + } + parsed, err := url.Parse(baseURL) + if err != nil { + return nil, fmt.Errorf("invalid base url: %w", err) + } + if parsed.Scheme == "" { + return nil, fmt.Errorf("base url must include scheme") + } + if parsed.Host == "" { + return nil, fmt.Errorf("base url must include host") + } + cfg := clientConfig{ + httpClient: http.DefaultClient, + userAgent: defaultUserAgent, + } + for _, opt := range opts { + if opt != nil { + opt(&cfg) + } + } + log := logger.FromContext(ctx) + if log != nil { + log.Debug("initializing sdk client", "base_url", parsed.String()) + } + return &Client{ + baseURL: parsed, + httpClient: cfg.httpClient, + apiKey: cfg.apiKey, + userAgent: cfg.userAgent, + }, nil +} diff --git a/sdk/client/client_test.go b/sdk/client/client_test.go new file mode 100644 index 00000000..fb686c05 --- /dev/null +++ b/sdk/client/client_test.go @@ -0,0 +1,187 @@ +package client_test + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/http/httptest" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/compozy/compozy/pkg/logger" + "github.com/compozy/compozy/sdk/v2/client" +) + +func TestExecuteWorkflow(t *testing.T) { + t.Run("Should execute workflow asynchronously and return execution handle", func(t *testing.T) { + t.Parallel() + var called atomic.Int32 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch { + case r.Method == http.MethodPost && r.URL.Path == "/api/v0/workflows/sample/executions": + called.Add(1) + require.Equal(t, "Bearer test-key", r.Header.Get("Authorization")) + body := make(map[string]any) + require.NoError(t, json.NewDecoder(r.Body).Decode(&body)) + require.Equal(t, "subtask", body["task_id"]) + require.Contains(t, body, "input") + require.Nil(t, body["input"]) + writeJSON(t, w, http.StatusAccepted, map[string]any{ + "exec_id": "exec-123", + "exec_url": "/api/v0/executions/exec-123", + "workflow_id": "sample", + }) + default: + http.NotFound(w, r) + } + })) + t.Cleanup(server.Close) + ctx := withTestLogger(t.Context()) + cl, err := client.New(ctx, server.URL, client.WithAPIKey("test-key")) + require.NoError(t, err) + resp, err := cl.ExecuteWorkflow(ctx, "sample", &client.WorkflowExecuteRequest{TaskID: "subtask"}) + require.NoError(t, err) + require.Equal(t, "exec-123", resp.ExecID) + require.Equal(t, "/api/v0/executions/exec-123", resp.ExecURL) + require.Equal(t, int32(1), called.Load()) + }) +} + +func TestExecuteTaskSync(t *testing.T) { + t.Run("Should execute task synchronously and return output", func(t *testing.T) { + t.Parallel() + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost || r.URL.Path != "/api/v0/tasks/build/executions/sync" { + http.NotFound(w, r) + return + } + writeJSON(t, w, http.StatusOK, map[string]any{ + "exec_id": "task-exec", + "output": map[string]any{"artifact": "binary"}, + }) + })) + t.Cleanup(server.Close) + ctx := withTestLogger(t.Context()) + cl, err := client.New(ctx, server.URL) + require.NoError(t, err) + resp, err := cl.ExecuteTaskSync(ctx, "build", &client.TaskExecuteRequest{}) + require.NoError(t, err) + require.Equal(t, "task-exec", resp.ExecID) + require.Equal(t, "binary", (*resp.Output)["artifact"]) + }) +} + +func TestExecuteWorkflowStream(t *testing.T) { + t.Run("Should stream workflow events until completion", func(t *testing.T) { + t.Parallel() + execBase := "/api/v0/executions/exec-456" + execPath := execBase + "/stream" + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch { + case r.Method == http.MethodPost && r.URL.Path == "/api/v0/workflows/streamed/executions": + writeJSON(t, w, http.StatusAccepted, map[string]any{ + "exec_id": "exec-456", + "exec_url": execBase, + "workflow_id": "streamed", + }) + case r.Method == http.MethodGet && r.URL.Path == execPath: + w.Header().Set("Content-Type", "text/event-stream") + flusher, ok := w.(http.Flusher) + require.True(t, ok) + fmt.Fprintf(w, "id: 1\nevent: workflow_status\ndata: {\"status\":\"running\"}\n\n") + flusher.Flush() + time.Sleep(10 * time.Millisecond) + fmt.Fprintf(w, "id: 2\nevent: complete\ndata: {\"status\":\"success\"}\n\n") + flusher.Flush() + default: + http.NotFound(w, r) + } + })) + t.Cleanup(server.Close) + ctx := withTestLogger(t.Context()) + cl, err := client.New(ctx, server.URL) + require.NoError(t, err) + session, err := cl.ExecuteWorkflowStream(ctx, "streamed", &client.WorkflowExecuteRequest{}, nil) + require.NoError(t, err) + defer func() { + require.NoError(t, session.Close()) + }() + events := collectEvents(t, session) + require.Len(t, events, 2) + require.Equal(t, int64(1), events[0].ID) + require.Equal(t, "workflow_status", events[0].Type) + require.JSONEq(t, `{"status":"running"}`, string(events[0].Data)) + require.Equal(t, "complete", events[1].Type) + errVal := <-session.Errors() + require.True(t, errors.Is(errVal, io.EOF) || errVal == nil) + }) +} + +func TestExecuteAgentError(t *testing.T) { + t.Run("Should wrap API errors returned by the server", func(t *testing.T) { + t.Parallel() + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + writeJSON(t, w, http.StatusNotFound, map[string]any{ + "error": map[string]any{ + "code": "AGENT_NOT_FOUND", + "message": "agent missing", + }, + }) + })) + t.Cleanup(server.Close) + ctx := withTestLogger(t.Context()) + cl, err := client.New(ctx, server.URL) + require.NoError(t, err) + _, execErr := cl.ExecuteAgent(ctx, "missing", &client.AgentExecuteRequest{Prompt: "hi"}) + var apiErr *client.APIError + require.ErrorAs(t, execErr, &apiErr) + require.Equal(t, "AGENT_NOT_FOUND", apiErr.Code) + require.Equal(t, http.StatusNotFound, apiErr.Status) + }) +} + +func collectEvents(t *testing.T, session *client.StreamSession) []client.StreamEvent { + t.Helper() + result := make([]client.StreamEvent, 0, 2) + timeout := time.NewTimer(2 * time.Second) + defer timeout.Stop() + for { + select { + case evt, ok := <-session.Events(): + if !ok { + return result + } + result = append(result, evt) + case <-timeout.C: + t.Fatal("timed out waiting for stream events") + } + } +} + +func writeJSON(t *testing.T, w http.ResponseWriter, status int, data map[string]any) { + t.Helper() + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(status) + payload := map[string]any{ + "status": status, + "message": http.StatusText(status), + } + if data != nil { + if raw, ok := data["error"]; ok { + payload["error"] = raw + } else { + payload["data"] = data + } + } + require.NoError(t, json.NewEncoder(w).Encode(payload)) +} + +func withTestLogger(parent context.Context) context.Context { + return logger.ContextWithLogger(parent, logger.NewForTests()) +} diff --git a/sdk/client/doc.go b/sdk/client/doc.go new file mode 100644 index 00000000..4922dbe2 --- /dev/null +++ b/sdk/client/doc.go @@ -0,0 +1,3 @@ +// Package client provides the high-level Compozy SDK v2 execution client for +// interacting with workflow, task, and agent runtime endpoints. +package client diff --git a/sdk/client/execution_helpers.go b/sdk/client/execution_helpers.go new file mode 100644 index 00000000..c0707fb9 --- /dev/null +++ b/sdk/client/execution_helpers.go @@ -0,0 +1,39 @@ +package client + +import ( + "context" + "fmt" + "net/url" + "strings" +) + +type executionRequestConfig[T any] struct { + ResourceID string + ResourceLabel string + RouteBase string + PathSuffix string + Body any + ExpectedCode int + OnSuccess func(context.Context, string, *T) +} + +func executeRequest[T any](ctx context.Context, client *Client, cfg executionRequestConfig[T]) (*T, error) { + id := strings.TrimSpace(cfg.ResourceID) + if id == "" { + return nil, fmt.Errorf("%s id is required", cfg.ResourceLabel) + } + path := fmt.Sprintf("%s/%s%s", cfg.RouteBase, url.PathEscape(id), cfg.PathSuffix) + resp, err := client.postJSON(ctx, path, cfg.Body) + if err != nil { + return nil, err + } + defer resp.Body.Close() + result, err := decodeEnvelope[T](resp, cfg.ExpectedCode) + if err != nil { + return nil, err + } + if cfg.OnSuccess != nil { + cfg.OnSuccess(ctx, id, &result) + } + return &result, nil +} diff --git a/sdk/client/http.go b/sdk/client/http.go new file mode 100644 index 00000000..672b3279 --- /dev/null +++ b/sdk/client/http.go @@ -0,0 +1,152 @@ +package client + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "strings" + + "github.com/compozy/compozy/engine/infra/server/router" +) + +type apiEnvelope struct { + Status int `json:"status"` + Message string `json:"message"` + Data json.RawMessage `json:"data"` + Error *router.ErrorInfo `json:"error"` +} + +// APIError represents an error returned by the Compozy API. +type APIError struct { + Status int + Code string + Message string + Details string +} + +// Error implements the error interface. +func (e *APIError) Error() string { + if e == nil { + return "" + } + if e.Code != "" && e.Message != "" { + return fmt.Sprintf("%s: %s (status %d)", e.Code, e.Message, e.Status) + } + if e.Message != "" { + return fmt.Sprintf("%s (status %d)", e.Message, e.Status) + } + return fmt.Sprintf("request failed with status %d", e.Status) +} + +func (c *Client) postJSON(ctx context.Context, path string, payload any) (*http.Response, error) { + return c.do(ctx, http.MethodPost, path, nil, payload) +} + +func (c *Client) do( + ctx context.Context, + method string, + path string, + query url.Values, + payload any, +) (*http.Response, error) { + if c == nil || c.baseURL == nil { + return nil, fmt.Errorf("client is not initialized") + } + if ctx == nil { + return nil, fmt.Errorf("context is required") + } + fullURL := c.resolve(path, query) + var body io.Reader + headers := make(http.Header, 4) + headers.Set("Accept", "application/json") + if payload != nil { + data, err := json.Marshal(payload) + if err != nil { + return nil, fmt.Errorf("encode payload: %w", err) + } + body = bytes.NewReader(data) + headers.Set("Content-Type", "application/json") + } + req, err := http.NewRequestWithContext(ctx, method, fullURL, body) + if err != nil { + return nil, fmt.Errorf("create request: %w", err) + } + req.Header = headers + if c.apiKey != "" { + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.apiKey)) + } + if c.userAgent != "" { + req.Header.Set("User-Agent", c.userAgent) + } + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("request failed: %w", err) + } + return resp, nil +} + +func (c *Client) resolve(path string, query url.Values) string { + path = strings.TrimSpace(path) + if !strings.HasPrefix(path, "/") { + path = "/" + path + } + ref := &url.URL{Path: path} + if len(query) > 0 { + ref.RawQuery = query.Encode() + } + return c.baseURL.ResolveReference(ref).String() +} + +func decodeEnvelope[T any](resp *http.Response, allowed ...int) (T, error) { + var zero T + if resp == nil { + return zero, fmt.Errorf("nil response") + } + decoder := json.NewDecoder(resp.Body) + var envelope apiEnvelope + if err := decoder.Decode(&envelope); err != nil { + return zero, fmt.Errorf("decode response: %w", err) + } + if !statusAllowed(resp.StatusCode, allowed) { + return zero, toAPIError(resp.StatusCode, envelope.Error, envelope.Message) + } + if envelope.Error != nil { + return zero, toAPIError(resp.StatusCode, envelope.Error, envelope.Message) + } + if len(envelope.Data) == 0 { + return zero, nil + } + var out T + if err := json.Unmarshal(envelope.Data, &out); err != nil { + return zero, fmt.Errorf("decode payload: %w", err) + } + return out, nil +} + +func statusAllowed(status int, allowed []int) bool { + if len(allowed) == 0 { + return status >= 200 && status < 300 + } + for _, code := range allowed { + if status == code { + return true + } + } + return false +} + +func toAPIError(status int, info *router.ErrorInfo, message string) *APIError { + if info == nil { + return &APIError{Status: status, Message: message} + } + return &APIError{ + Status: status, + Code: info.Code, + Message: info.Message, + Details: info.Details, + } +} diff --git a/sdk/client/stream.go b/sdk/client/stream.go new file mode 100644 index 00000000..34a8e275 --- /dev/null +++ b/sdk/client/stream.go @@ -0,0 +1,170 @@ +package client + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "sync" + + "github.com/compozy/compozy/pkg/logger" + + "github.com/compozy/compozy/sdk/v2/internal/sse" +) + +func (c *Client) openStream( + ctx context.Context, + path string, + query url.Values, + opts *StreamOptions, + execID string, + execURL string, +) (*StreamSession, error) { + if ctx == nil { + return nil, fmt.Errorf("context is required") + } + resp, err := c.executeStreamRequest(ctx, path, query, opts) + if err != nil { + return nil, err + } + streamCtx, cancel := context.WithCancel(ctx) + decoder := sse.NewDecoder(resp.Body) + events := make(chan StreamEvent) + errorsCh := make(chan error, 1) + var once sync.Once + closeFn := func() error { + once.Do(cancel) + return closeBody(resp.Body) + } + go c.consumeStream(streamCtx, decoder, events, errorsCh, closeFn) + return newStreamSession(execID, execURL, events, errorsCh, closeFn), nil +} + +func (c *Client) executeStreamRequest( + ctx context.Context, + path string, + query url.Values, + opts *StreamOptions, +) (*http.Response, error) { + req, err := c.buildStreamRequest(ctx, path, query, opts) + if err != nil { + return nil, err + } + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("stream request failed: %w", err) + } + if resp.StatusCode != http.StatusOK { + defer resp.Body.Close() + if _, decodeErr := decodeEnvelope[struct{}](resp, http.StatusOK); decodeErr != nil { + return nil, decodeErr + } + return nil, &APIError{Status: resp.StatusCode, Message: http.StatusText(resp.StatusCode)} + } + if ct := resp.Header.Get("Content-Type"); !strings.Contains(ct, "text/event-stream") { + resp.Body.Close() + return nil, fmt.Errorf("unexpected content-type: %s", ct) + } + return resp, nil +} + +func (c *Client) buildStreamRequest( + ctx context.Context, + path string, + query url.Values, + opts *StreamOptions, +) (*http.Request, error) { + requestURL := c.resolve(path, applyStreamQuery(query, opts)) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, requestURL, http.NoBody) + if err != nil { + return nil, fmt.Errorf("create stream request: %w", err) + } + req.Header.Set("Accept", "text/event-stream") + req.Header.Set("Cache-Control", "no-cache") + req.Header.Set("Connection", "keep-alive") + if opts != nil && opts.LastEventID != nil { + req.Header.Set("Last-Event-ID", fmt.Sprintf("%d", *opts.LastEventID)) + } + if c.apiKey != "" { + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.apiKey)) + } + if c.userAgent != "" { + req.Header.Set("User-Agent", c.userAgent) + } + return req, nil +} + +func (c *Client) consumeStream( + ctx context.Context, + decoder *sse.Decoder, + out chan<- StreamEvent, + errs chan<- error, + closeFn func() error, +) { + log := logger.FromContext(ctx) + defer func() { + if closeErr := closeFn(); closeErr != nil && log != nil { + log.Warn("stream close error", "error", closeErr) + } + close(out) + close(errs) + }() + for { + event, err := decoder.Next(ctx) + if err != nil { + if errors.Is(err, context.Canceled) { + errs <- err + return + } + if errors.Is(err, io.EOF) { + errs <- err + return + } + if log != nil { + log.Warn("stream read error", "error", err) + } + errs <- err + return + } + data := make([]byte, len(event.Data)) + copy(data, event.Data) + select { + case <-ctx.Done(): + errs <- ctx.Err() + return + case out <- StreamEvent{ID: event.ID, Type: event.Type, Data: data}: + } + } +} + +func applyStreamQuery(values url.Values, opts *StreamOptions) url.Values { + clone := url.Values{} + for k, v := range values { + clone[k] = append([]string(nil), v...) + } + if opts == nil { + return clone + } + if opts.PollInterval > 0 { + millis := opts.PollInterval.Milliseconds() + if millis > 0 { + clone.Set("poll_ms", fmt.Sprintf("%d", millis)) + } + } + if len(opts.Events) > 0 { + trimmed := make([]string, 0, len(opts.Events)) + for _, evt := range opts.Events { + token := strings.TrimSpace(evt) + if token != "" { + trimmed = append(trimmed, token) + } + } + if len(trimmed) > 0 { + clone.Set("events", strings.Join(trimmed, ",")) + } + } + return clone +} diff --git a/sdk/client/stream_session.go b/sdk/client/stream_session.go new file mode 100644 index 00000000..c7dfa2b9 --- /dev/null +++ b/sdk/client/stream_session.go @@ -0,0 +1,70 @@ +package client + +import ( + "io" + "sync" +) + +// StreamSession manages a live execution stream. +type StreamSession struct { + ExecID string + ExecURL string + + events <-chan StreamEvent + errors <-chan error + closeF func() error + + closeOnce sync.Once +} + +// Events returns a receive-only channel for stream events. +func (s *StreamSession) Events() <-chan StreamEvent { + return s.events +} + +// Errors returns a receive-only channel delivering terminal stream errors. +func (s *StreamSession) Errors() <-chan error { + return s.errors +} + +// Close terminates the stream and releases resources. +func (s *StreamSession) Close() error { + if s == nil { + return nil + } + var err error + s.closeOnce.Do(func() { + if s.closeF != nil { + err = s.closeF() + } + }) + return err +} + +func newStreamSession( + execID string, + execURL string, + events <-chan StreamEvent, + errors <-chan error, + closer func() error, +) *StreamSession { + return &StreamSession{ + ExecID: execID, + ExecURL: execURL, + events: events, + errors: errors, + closeF: func() error { + if closer == nil { + return nil + } + return closer() + }, + } +} + +func closeBody(body io.Closer) error { + if body == nil { + return nil + } + return body.Close() +} diff --git a/sdk/client/task.go b/sdk/client/task.go new file mode 100644 index 00000000..5c5ff76d --- /dev/null +++ b/sdk/client/task.go @@ -0,0 +1,63 @@ +package client + +import ( + "context" + "fmt" + "net/http" + "net/url" + + "github.com/compozy/compozy/engine/infra/server/routes" +) + +// ExecuteTask triggers an asynchronous direct task execution. +func (c *Client) ExecuteTask( + ctx context.Context, + taskID string, + req *TaskExecuteRequest, +) (*TaskExecuteResponse, error) { + return executeRequest(ctx, c, executionRequestConfig[TaskExecuteResponse]{ + ResourceID: taskID, + ResourceLabel: "task", + RouteBase: routes.Tasks(), + PathSuffix: "/executions", + Body: req, + ExpectedCode: http.StatusAccepted, + OnSuccess: func(execCtx context.Context, id string, res *TaskExecuteResponse) { + logExecution(execCtx, "task", id, res.ExecID) + }, + }) +} + +// ExecuteTaskSync executes a task synchronously and waits for completion. +func (c *Client) ExecuteTaskSync( + ctx context.Context, + taskID string, + req *TaskExecuteRequest, +) (*TaskSyncResponse, error) { + return executeRequest(ctx, c, executionRequestConfig[TaskSyncResponse]{ + ResourceID: taskID, + ResourceLabel: "task", + RouteBase: routes.Tasks(), + PathSuffix: "/executions/sync", + Body: req, + ExpectedCode: http.StatusOK, + OnSuccess: func(execCtx context.Context, id string, res *TaskSyncResponse) { + logExecution(execCtx, "task_sync", id, res.ExecID) + }, + }) +} + +// ExecuteTaskStream starts a task execution and streams events until completion. +func (c *Client) ExecuteTaskStream( + ctx context.Context, + taskID string, + req *TaskExecuteRequest, + opts *StreamOptions, +) (*StreamSession, error) { + handle, err := c.ExecuteTask(ctx, taskID, req) + if err != nil { + return nil, err + } + streamPath := fmt.Sprintf("%s/tasks/%s/stream", routes.Executions(), url.PathEscape(handle.ExecID)) + return c.openStream(ctx, streamPath, nil, opts, handle.ExecID, handle.ExecURL) +} diff --git a/sdk/client/types.go b/sdk/client/types.go new file mode 100644 index 00000000..54b10c2b --- /dev/null +++ b/sdk/client/types.go @@ -0,0 +1,56 @@ +package client + +import ( + "time" + + agentrouter "github.com/compozy/compozy/engine/agent/router" + tkrouter "github.com/compozy/compozy/engine/task/router" + wfrouter "github.com/compozy/compozy/engine/workflow/router" +) + +// WorkflowExecuteRequest represents the async workflow execution payload. +type WorkflowExecuteRequest = wfrouter.ExecuteWorkflowRequest + +// WorkflowExecuteResponse mirrors the engine async workflow response. +type WorkflowExecuteResponse = wfrouter.ExecuteWorkflowResponse + +// WorkflowSyncRequest mirrors the workflow sync request payload. +type WorkflowSyncRequest = wfrouter.WorkflowSyncRequest + +// WorkflowSyncResponse mirrors the workflow sync response payload. +type WorkflowSyncResponse = wfrouter.WorkflowSyncResponse + +// TaskExecuteRequest represents direct task execution input. +type TaskExecuteRequest = tkrouter.TaskExecRequest + +// TaskExecuteResponse mirrors the async task execution response. +type TaskExecuteResponse = tkrouter.TaskExecAsyncResponse + +// TaskSyncResponse mirrors the synchronous task execution response. +type TaskSyncResponse = tkrouter.TaskExecSyncResponse + +// AgentExecuteRequest represents agent execution input. +type AgentExecuteRequest = agentrouter.AgentExecRequest + +// AgentExecuteResponse mirrors the async agent execution response. +type AgentExecuteResponse = agentrouter.AgentExecAsyncResponse + +// AgentSyncResponse mirrors the synchronous agent execution response. +type AgentSyncResponse = agentrouter.AgentExecSyncResponse + +// StreamOptions customizes streaming behavior for workflow, task, or agent execution streams. +type StreamOptions struct { + // PollInterval controls the poll_ms query parameter. + PollInterval time.Duration + // Events filters emitted event types using the events query parameter. + Events []string + // LastEventID resumes streaming from a specific event id via Last-Event-ID header. + LastEventID *int64 +} + +// StreamEvent encapsulates a single SSE frame from execution streams. +type StreamEvent struct { + ID int64 + Type string + Data []byte +} diff --git a/sdk/client/workflow.go b/sdk/client/workflow.go new file mode 100644 index 00000000..bc63d68c --- /dev/null +++ b/sdk/client/workflow.go @@ -0,0 +1,72 @@ +package client + +import ( + "context" + "fmt" + "net/http" + "net/url" + + "github.com/compozy/compozy/engine/infra/server/routes" + "github.com/compozy/compozy/pkg/logger" +) + +// ExecuteWorkflow triggers an asynchronous workflow execution and returns the execution handle. +func (c *Client) ExecuteWorkflow( + ctx context.Context, + workflowID string, + req *WorkflowExecuteRequest, +) (*WorkflowExecuteResponse, error) { + return executeRequest(ctx, c, executionRequestConfig[WorkflowExecuteResponse]{ + ResourceID: workflowID, + ResourceLabel: "workflow", + RouteBase: routes.Workflows(), + PathSuffix: "/executions", + Body: req, + ExpectedCode: http.StatusAccepted, + OnSuccess: func(execCtx context.Context, id string, res *WorkflowExecuteResponse) { + logExecution(execCtx, "workflow", id, res.ExecID) + }, + }) +} + +// ExecuteWorkflowSync executes a workflow and waits for completion. +func (c *Client) ExecuteWorkflowSync( + ctx context.Context, + workflowID string, + req *WorkflowSyncRequest, +) (*WorkflowSyncResponse, error) { + return executeRequest(ctx, c, executionRequestConfig[WorkflowSyncResponse]{ + ResourceID: workflowID, + ResourceLabel: "workflow", + RouteBase: routes.Workflows(), + PathSuffix: "/executions/sync", + Body: req, + ExpectedCode: http.StatusOK, + OnSuccess: func(execCtx context.Context, id string, res *WorkflowSyncResponse) { + logExecution(execCtx, "workflow_sync", id, res.ExecID) + }, + }) +} + +// ExecuteWorkflowStream starts an asynchronous workflow execution and streams events until completion. +func (c *Client) ExecuteWorkflowStream( + ctx context.Context, + workflowID string, + req *WorkflowExecuteRequest, + opts *StreamOptions, +) (*StreamSession, error) { + handle, err := c.ExecuteWorkflow(ctx, workflowID, req) + if err != nil { + return nil, err + } + streamPath := fmt.Sprintf("%s/%s/stream", routes.Executions(), url.PathEscape(handle.ExecID)) + return c.openStream(ctx, streamPath, nil, opts, handle.ExecID, handle.ExecURL) +} + +func logExecution(ctx context.Context, kind string, resource string, execID string) { + log := logger.FromContext(ctx) + if log == nil { + return + } + log.Info("execution triggered", "kind", kind, "resource", resource, "exec_id", execID) +} diff --git a/sdk/compozy/README.md b/sdk/compozy/README.md new file mode 100644 index 00000000..af1617ca --- /dev/null +++ b/sdk/compozy/README.md @@ -0,0 +1,17 @@ +# Compozy SDK Codegen + +This package relies on generated helpers for functional options, resource loading, registration, and client-backed execution. To regenerate the Go sources run: + +```bash +cd sdk/compozy +go generate +``` + +The generator lives under `sdk/internal/sdkcodegen` and produces: + +- `options_generated.go` +- `engine_execution.go` +- `engine_loading.go` +- `engine_registration.go` + +Generated files are formatted and deterministic; rerunning `go generate` should not introduce diffs if the source spec has not changed. diff --git a/sdk/compozy/app_test.go b/sdk/compozy/app_test.go new file mode 100644 index 00000000..8bce568b --- /dev/null +++ b/sdk/compozy/app_test.go @@ -0,0 +1,58 @@ +package compozy + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + engineproject "github.com/compozy/compozy/engine/project" + engineworkflow "github.com/compozy/compozy/engine/workflow" +) + +func TestNew(t *testing.T) { + t.Parallel() + t.Run("Should construct engine with workflow", func(t *testing.T) { + t.Parallel() + workflowCfg := &engineworkflow.Config{ID: "greeting"} + engine, err := New(t.Context(), WithWorkflow(workflowCfg)) + require.NoError(t, err) + require.NotNil(t, engine) + require.Len(t, engine.workflows, 1) + assert.NotSame(t, workflowCfg, engine.workflows[0]) + assert.Equal(t, workflowCfg, engine.workflows[0]) + assert.Equal(t, ModeStandalone, engine.mode) + assert.Equal(t, defaultHost, engine.host) + }) + t.Run("Should honor project overrides", func(t *testing.T) { + t.Parallel() + projectCfg := &engineproject.Config{Name: "demo"} + engine, err := New( + t.Context(), + WithProject(projectCfg), + WithHost(" "), + WithPort(9090), + WithMode(ModeDistributed), + ) + require.NoError(t, err) + require.NotNil(t, engine) + assert.Same(t, projectCfg, engine.project) + assert.Equal(t, ModeDistributed, engine.mode) + assert.Equal(t, defaultHost, engine.host) + assert.Equal(t, 9090, engine.port) + }) + t.Run("Should error when context is nil", func(t *testing.T) { + t.Parallel() + var nilCtx context.Context + _, err := New(nilCtx, WithPort(8080)) + require.Error(t, err) + assert.Contains(t, err.Error(), "context is required") + }) + t.Run("Should require at least one resource", func(t *testing.T) { + t.Parallel() + _, err := New(t.Context()) + require.Error(t, err) + assert.Contains(t, err.Error(), "at least one resource") + }) +} diff --git a/sdk/compozy/cleanup_test.go b/sdk/compozy/cleanup_test.go new file mode 100644 index 00000000..cdaa9291 --- /dev/null +++ b/sdk/compozy/cleanup_test.go @@ -0,0 +1,61 @@ +package compozy + +import ( + "context" + "errors" + "fmt" + "testing" + + engineproject "github.com/compozy/compozy/engine/project" + "github.com/compozy/compozy/engine/resources" + enginetool "github.com/compozy/compozy/engine/tool" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type failingStore struct { + resources.ResourceStore +} + +func (f *failingStore) Put(_ context.Context, _ resources.ResourceKey, _ any) (resources.ETag, error) { + return "", fmt.Errorf("forced failure") +} + +func TestEngineCleanupUtilities(t *testing.T) { + t.Parallel() + t.Run("Should join mode cleanup errors", func(t *testing.T) { + t.Parallel() + ctx := lifecycleTestContext(t) + engine := &Engine{ctx: ctx} + called := 0 + engine.modeCleanups = []modeCleanup{ + func(context.Context) error { + called++ + return nil + }, + func(context.Context) error { + called++ + return errors.New("cleanup failure") + }, + } + err := engine.cleanupModeResources(ctx) + require.Error(t, err) + assert.Equal(t, 2, called) + }) + t.Run("Should close resource store safely", func(t *testing.T) { + t.Parallel() + ctx := lifecycleTestContext(t) + engine := &Engine{ctx: ctx} + store := resources.NewMemoryResourceStore() + require.NotPanics(t, func() { engine.cleanupStore(ctx, store) }) + }) + t.Run("Should fail tool registration when store errors", func(t *testing.T) { + t.Parallel() + ctx := lifecycleTestContext(t) + engine := &Engine{ctx: ctx} + engine.project = &engineproject.Config{Name: "cleanup"} + engine.resourceStore = &failingStore{ResourceStore: resources.NewMemoryResourceStore()} + require.Error(t, engine.RegisterTool(&enginetool.Config{ID: "cleanup-tool"})) + assert.Empty(t, engine.tools) + }) +} diff --git a/sdk/compozy/codegen/generator_test.go b/sdk/compozy/codegen/generator_test.go new file mode 100644 index 00000000..eb4477fa --- /dev/null +++ b/sdk/compozy/codegen/generator_test.go @@ -0,0 +1,37 @@ +package codegen + +import ( + "crypto/sha256" + "encoding/hex" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestGeneratedFilesHashes(t *testing.T) { + // This test locks generated outputs to make intentional template changes explicit. + // When updates are expected, run `go test -run TestGeneratedFilesHashes -v` + // and refresh the hashes from the failure output. + files := map[string]string{ + "options_generated.go": "c827fddefb3ca3a92e9148f83b8ddab434033e3a0b015877ae5686c5312a5a60", + "engine_execution.go": "4a398c36ef0d122a0fa10e4b2deaa4948d524ccaf809264b3ded3ca8ebaa32da", + "engine_loading.go": "4c67e1323d10f6ebf6d270e4838ae57058fd70488d5930f968e6e79be7bf4058", + "engine_registration.go": "d5af56637e802639fb4d14b5462e799b131072a0437441d1dd83c1e4a3078161", + } + root := filepath.Clean("..") + for name, expected := range files { + name := name + expected := expected + t.Run("Should match generated hash for "+name, func(t *testing.T) { + path := filepath.Join(root, name) + data, err := os.ReadFile(path) + require.NoError(t, err, "failed to read %s", name) + sum := sha256.Sum256(data) + hash := hex.EncodeToString(sum[:]) + assert.Equal(t, expected, hash, "hash mismatch for %s", name) + }) + } +} diff --git a/sdk/compozy/config/yaml_loader_test.go b/sdk/compozy/config/yaml_loader_test.go new file mode 100644 index 00000000..d0023e4f --- /dev/null +++ b/sdk/compozy/config/yaml_loader_test.go @@ -0,0 +1,43 @@ +package compozy_test + +import ( + "os" + "path/filepath" + "testing" + + enginecore "github.com/compozy/compozy/engine/core" + enginetask "github.com/compozy/compozy/engine/task" + engineworkflow "github.com/compozy/compozy/engine/workflow" + appconfig "github.com/compozy/compozy/pkg/config" + "github.com/compozy/compozy/pkg/logger" + compozy "github.com/compozy/compozy/sdk/v2/compozy" + "github.com/stretchr/testify/require" +) + +func TestLoadToolsFromDirPropagatesFilePathOnError(t *testing.T) { + t.Run("Should include file name when YAML parsing fails", func(t *testing.T) { + ctx := logger.ContextWithLogger(t.Context(), logger.NewForTests()) + service := appconfig.NewService() + manager := appconfig.NewManager(ctx, service) + _, err := manager.Load(ctx, appconfig.NewDefaultProvider()) + require.NoError(t, err) + ctx = appconfig.ContextWithManager(ctx, manager) + wf := &engineworkflow.Config{ID: "loader"} + wf.Tasks = []enginetask.Config{ + { + BaseConfig: enginetask.BaseConfig{ + ID: "only", + OnSuccess: &enginecore.SuccessTransition{}, + }, + }, + } + engine, err := compozy.New(ctx, compozy.WithWorkflow(wf)) + require.NoError(t, err) + dir := t.TempDir() + file := filepath.Join(dir, "bad.yaml") + require.NoError(t, os.WriteFile(file, []byte("::invalid::"), 0o600)) + err = engine.LoadToolsFromDir(ctx, dir) + require.Error(t, err) + require.Contains(t, err.Error(), "bad.yaml") + }) +} diff --git a/sdk/compozy/constants.go b/sdk/compozy/constants.go new file mode 100644 index 00000000..2f2d0348 --- /dev/null +++ b/sdk/compozy/constants.go @@ -0,0 +1,9 @@ +package compozy + +const ( + httpScheme = "http" + loopbackHostname = "127.0.0.1" + registrationUpdatedBy = "sdk" + resourceSourceProgrammatic = "sdk:programmatic" + resourceSourceYAML = "sdk:yaml" +) diff --git a/sdk/compozy/constructor.go b/sdk/compozy/constructor.go new file mode 100644 index 00000000..65cfd911 --- /dev/null +++ b/sdk/compozy/constructor.go @@ -0,0 +1,200 @@ +package compozy + +import ( + "context" + "fmt" + "strings" + + engineagent "github.com/compozy/compozy/engine/agent" + "github.com/compozy/compozy/engine/core" + engineknowledge "github.com/compozy/compozy/engine/knowledge" + enginemcp "github.com/compozy/compozy/engine/mcp" + enginememory "github.com/compozy/compozy/engine/memory" + projectschedule "github.com/compozy/compozy/engine/project/schedule" + engineschema "github.com/compozy/compozy/engine/schema" + enginetool "github.com/compozy/compozy/engine/tool" + enginewebhook "github.com/compozy/compozy/engine/webhook" + engineworkflow "github.com/compozy/compozy/engine/workflow" +) + +// New constructs an Engine using the provided functional options. +func New(ctx context.Context, opts ...Option) (*Engine, error) { + if ctx == nil { + return nil, fmt.Errorf("context is required") + } + cfg := defaultConfig() + for _, opt := range opts { + if opt == nil { + continue + } + opt(cfg) + } + cfg.normalize() + if err := cfg.validate(); err != nil { + return nil, err + } + clones, err := buildResourceClones(cfg) + if err != nil { + return nil, err + } + engine := &Engine{ + ctx: ctx, + mode: cfg.mode, + host: cfg.host, + port: cfg.port, + project: cfg.project, + workflows: clones.workflows, + agents: clones.agents, + tools: clones.tools, + knowledgeBases: clones.knowledgeBases, + memories: clones.memories, + mcps: clones.mcps, + schemas: clones.schemas, + models: clones.models, + schedules: clones.schedules, + webhooks: clones.webhooks, + standaloneTemporal: cfg.standaloneTemporal, + standaloneRedis: cfg.standaloneRedis, + } + return engine, nil +} + +func (c *config) normalize() { + c.host = strings.TrimSpace(c.host) + if c.host == "" { + c.host = defaultHost + } +} + +func (c *config) validate() error { + if c.resourceCount() == 0 { + return fmt.Errorf("at least one resource must be registered") + } + return nil +} + +func (c *config) resourceCount() int { + count := 0 + if c.project != nil { + count++ + } + count += len(c.workflows) + count += len(c.agents) + count += len(c.tools) + count += len(c.knowledgeBases) + count += len(c.memories) + count += len(c.mcps) + count += len(c.schemas) + count += len(c.models) + count += len(c.schedules) + count += len(c.webhooks) + return count +} + +type resourceClones struct { + workflows []*engineworkflow.Config + agents []*engineagent.Config + tools []*enginetool.Config + knowledgeBases []*engineknowledge.BaseConfig + memories []*enginememory.Config + mcps []*enginemcp.Config + schemas []*engineschema.Schema + models []*core.ProviderConfig + schedules []*projectschedule.Config + webhooks []*enginewebhook.Config +} + +func buildResourceClones(cfg *config) (*resourceClones, error) { + clones := &resourceClones{} + var err error + if clones.workflows, err = cloneWorkflowConfigs(cfg.workflows); err != nil { + return nil, err + } + if clones.agents, err = cloneAgentConfigs(cfg.agents); err != nil { + return nil, err + } + if clones.tools, err = cloneToolConfigs(cfg.tools); err != nil { + return nil, err + } + if clones.knowledgeBases, err = cloneKnowledgeConfigs(cfg.knowledgeBases); err != nil { + return nil, err + } + if clones.memories, err = cloneMemoryConfigs(cfg.memories); err != nil { + return nil, err + } + if clones.mcps, err = cloneMCPConfigs(cfg.mcps); err != nil { + return nil, err + } + if clones.schemas, err = cloneSchemaConfigs(cfg.schemas); err != nil { + return nil, err + } + if clones.models, err = cloneModelConfigs(cfg.models); err != nil { + return nil, err + } + if clones.schedules, err = cloneScheduleConfigs(cfg.schedules); err != nil { + return nil, err + } + if clones.webhooks, err = cloneWebhookConfigs(cfg.webhooks); err != nil { + return nil, err + } + return clones, nil +} + +func cloneWorkflowConfigs(values []*engineworkflow.Config) ([]*engineworkflow.Config, error) { + return cloneConfigSlice(values, "workflow") +} + +func cloneAgentConfigs(values []*engineagent.Config) ([]*engineagent.Config, error) { + return cloneConfigSlice(values, "agent") +} + +func cloneToolConfigs(values []*enginetool.Config) ([]*enginetool.Config, error) { + return cloneConfigSlice(values, "tool") +} + +func cloneKnowledgeConfigs(values []*engineknowledge.BaseConfig) ([]*engineknowledge.BaseConfig, error) { + return cloneConfigSlice(values, "knowledge base") +} + +func cloneMemoryConfigs(values []*enginememory.Config) ([]*enginememory.Config, error) { + return cloneConfigSlice(values, "memory") +} + +func cloneMCPConfigs(values []*enginemcp.Config) ([]*enginemcp.Config, error) { + return cloneConfigSlice(values, "mcp") +} + +func cloneSchemaConfigs(values []*engineschema.Schema) ([]*engineschema.Schema, error) { + return cloneConfigSlice(values, "schema") +} + +func cloneModelConfigs(values []*core.ProviderConfig) ([]*core.ProviderConfig, error) { + return cloneConfigSlice(values, "model") +} + +func cloneScheduleConfigs(values []*projectschedule.Config) ([]*projectschedule.Config, error) { + return cloneConfigSlice(values, "schedule") +} + +func cloneWebhookConfigs(values []*enginewebhook.Config) ([]*enginewebhook.Config, error) { + return cloneConfigSlice(values, "webhook") +} + +func cloneConfigSlice[T any](values []*T, label string) ([]*T, error) { + if len(values) == 0 { + return make([]*T, 0), nil + } + cloned := make([]*T, 0, len(values)) + for _, value := range values { + if value == nil { + cloned = append(cloned, nil) + continue + } + clonedValue, err := core.DeepCopy(value) + if err != nil { + return nil, fmt.Errorf("clone %s configs: %w", label, err) + } + cloned = append(cloned, clonedValue) + } + return cloned, nil +} diff --git a/sdk/compozy/constructor_clone_test.go b/sdk/compozy/constructor_clone_test.go new file mode 100644 index 00000000..9cb6acbd --- /dev/null +++ b/sdk/compozy/constructor_clone_test.go @@ -0,0 +1,139 @@ +package compozy + +import ( + "testing" + + engineagent "github.com/compozy/compozy/engine/agent" + enginecore "github.com/compozy/compozy/engine/core" + engineknowledge "github.com/compozy/compozy/engine/knowledge" + enginemcp "github.com/compozy/compozy/engine/mcp" + enginememory "github.com/compozy/compozy/engine/memory" + projectschedule "github.com/compozy/compozy/engine/project/schedule" + engineschema "github.com/compozy/compozy/engine/schema" + enginetool "github.com/compozy/compozy/engine/tool" + enginewebhook "github.com/compozy/compozy/engine/webhook" + engineworkflow "github.com/compozy/compozy/engine/workflow" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestCloneConfigEmptySlices(t *testing.T) { + t.Parallel() + cases := []struct { + name string + run func(t *testing.T) + }{ + {"Should clone workflow configs when input is nil", func(t *testing.T) { + clones, err := cloneWorkflowConfigs(nil) + require.NoError(t, err) + assert.Empty(t, clones) + }}, + {"Should clone agent configs when input is nil", func(t *testing.T) { + clones, err := cloneAgentConfigs(nil) + require.NoError(t, err) + assert.Empty(t, clones) + }}, + {"Should clone tool configs when input is nil", func(t *testing.T) { + clones, err := cloneToolConfigs(nil) + require.NoError(t, err) + assert.Empty(t, clones) + }}, + {"Should clone knowledge configs when input is nil", func(t *testing.T) { + clones, err := cloneKnowledgeConfigs(nil) + require.NoError(t, err) + assert.Empty(t, clones) + }}, + {"Should clone memory configs when input is nil", func(t *testing.T) { + clones, err := cloneMemoryConfigs(nil) + require.NoError(t, err) + assert.Empty(t, clones) + }}, + {"Should clone MCP configs when input is nil", func(t *testing.T) { + clones, err := cloneMCPConfigs(nil) + require.NoError(t, err) + assert.Empty(t, clones) + }}, + {"Should clone schema configs when input is nil", func(t *testing.T) { + clones, err := cloneSchemaConfigs(nil) + require.NoError(t, err) + assert.Empty(t, clones) + }}, + {"Should clone model configs when input is nil", func(t *testing.T) { + clones, err := cloneModelConfigs(nil) + require.NoError(t, err) + assert.Empty(t, clones) + }}, + {"Should clone schedule configs when input is nil", func(t *testing.T) { + clones, err := cloneScheduleConfigs(nil) + require.NoError(t, err) + assert.Empty(t, clones) + }}, + {"Should clone webhook configs when input is nil", func(t *testing.T) { + clones, err := cloneWebhookConfigs(nil) + require.NoError(t, err) + assert.Empty(t, clones) + }}, + } + for _, tc := range cases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + tc.run(t) + }) + } +} + +func TestCloneConfigDeepCopy(t *testing.T) { + t.Parallel() + t.Run("Should deep copy workflow configs", func(t *testing.T) { + t.Parallel() + original := &engineworkflow.Config{ID: "deep-copy"} + clones, err := cloneWorkflowConfigs([]*engineworkflow.Config{original}) + require.NoError(t, err) + require.Len(t, clones, 1) + assert.NotSame(t, original, clones[0]) + assert.Equal(t, original, clones[0]) + clones[0].ID = "mutated" + assert.Equal(t, "deep-copy", original.ID) + }) +} + +func TestBuildResourceClones(t *testing.T) { + t.Parallel() + t.Run("Should clone all resource configurations", func(t *testing.T) { + cfg := &config{ + workflows: []*engineworkflow.Config{{ID: "wf"}}, + agents: []*engineagent.Config{{ID: "agent"}}, + tools: []*enginetool.Config{{ID: "tool"}}, + knowledgeBases: []*engineknowledge.BaseConfig{{ID: "kb"}}, + memories: []*enginememory.Config{{ID: "mem"}}, + mcps: []*enginemcp.Config{{ID: "mcp"}}, + schemas: []*engineschema.Schema{{"type": "object"}}, + models: []*enginecore.ProviderConfig{{Provider: enginecore.ProviderName("openai"), Model: "gpt"}}, + schedules: []*projectschedule.Config{{ID: "schedule", WorkflowID: "wf"}}, + webhooks: []*enginewebhook.Config{{Slug: "hook"}}, + } + clones, err := buildResourceClones(cfg) + require.NoError(t, err) + require.Len(t, clones.workflows, 1) + require.Len(t, clones.agents, 1) + require.Len(t, clones.tools, 1) + require.Len(t, clones.knowledgeBases, 1) + require.Len(t, clones.memories, 1) + require.Len(t, clones.mcps, 1) + require.Len(t, clones.schemas, 1) + require.Len(t, clones.models, 1) + require.Len(t, clones.schedules, 1) + require.Len(t, clones.webhooks, 1) + assert.NotSame(t, cfg.workflows[0], clones.workflows[0]) + assert.NotSame(t, cfg.agents[0], clones.agents[0]) + assert.NotSame(t, cfg.tools[0], clones.tools[0]) + assert.NotSame(t, cfg.knowledgeBases[0], clones.knowledgeBases[0]) + assert.NotSame(t, cfg.memories[0], clones.memories[0]) + assert.NotSame(t, cfg.mcps[0], clones.mcps[0]) + assert.NotSame(t, cfg.schemas[0], clones.schemas[0]) + assert.NotSame(t, cfg.models[0], clones.models[0]) + assert.NotSame(t, cfg.schedules[0], clones.schedules[0]) + assert.NotSame(t, cfg.webhooks[0], clones.webhooks[0]) + }) +} diff --git a/sdk/compozy/distributed.go b/sdk/compozy/distributed.go new file mode 100644 index 00000000..78886107 --- /dev/null +++ b/sdk/compozy/distributed.go @@ -0,0 +1,93 @@ +package compozy + +import ( + "context" + "fmt" + "net" + "strings" + "time" + + "github.com/compozy/compozy/engine/infra/cache" + "github.com/compozy/compozy/engine/resources" + appconfig "github.com/compozy/compozy/pkg/config" + "github.com/compozy/compozy/pkg/logger" +) + +const defaultTemporalReachability = 5 * time.Second + +func (e *Engine) bootstrapDistributed(ctx context.Context, cfg *appconfig.Config) (*modeRuntimeState, error) { + state := &modeRuntimeState{} + if cfg == nil { + return nil, fmt.Errorf("configuration is required") + } + if err := validateDistributedConfig(cfg); err != nil { + return nil, err + } + cacheCfg := cache.FromAppConfig(cfg) + redisClient, err := cache.NewRedis(ctx, cacheCfg) + if err != nil { + return nil, fmt.Errorf("connect redis: %w", err) + } + state.addCleanup(func(_ context.Context) error { + return redisClient.Close() + }) + state.resourceStore = resources.NewRedisResourceStore(redisClient) + if err := ensureTemporalReachable(ctx, cfg); err != nil { + state.cleanupOnError(context.WithoutCancel(ctx)) + return nil, err + } + log := logger.FromContext(ctx) + if log != nil { + log.Info("connected to distributed dependencies", + "temporal_host", cfg.Temporal.HostPort, + "redis", describeRedisEndpoint(cfg), + ) + } + return state, nil +} + +func validateDistributedConfig(cfg *appconfig.Config) error { + if strings.TrimSpace(cfg.Temporal.HostPort) == "" { + return fmt.Errorf("temporal.host_port must be configured for distributed mode") + } + if hasRedisConnection(cfg) { + return nil + } + return fmt.Errorf("redis connection details are required for distributed mode") +} + +func ensureTemporalReachable(ctx context.Context, cfg *appconfig.Config) error { + timeout := cfg.Server.Timeouts.TemporalReachability + if timeout <= 0 { + timeout = defaultTemporalReachability + } + dialer := &net.Dialer{Timeout: timeout} + dialCtx, cancel := context.WithTimeout(context.WithoutCancel(ctx), timeout) + defer cancel() + conn, err := dialer.DialContext(dialCtx, "tcp", cfg.Temporal.HostPort) + if err != nil { + return fmt.Errorf("reach temporal at %s: %w", cfg.Temporal.HostPort, err) + } + _ = conn.Close() + return nil +} + +func hasRedisConnection(cfg *appconfig.Config) bool { + if strings.TrimSpace(cfg.Redis.URL) != "" { + return true + } + if strings.TrimSpace(cfg.Redis.Host) == "" { + return false + } + return strings.TrimSpace(cfg.Redis.Port) != "" +} + +func describeRedisEndpoint(cfg *appconfig.Config) string { + if strings.TrimSpace(cfg.Redis.URL) != "" { + return cfg.Redis.URL + } + if strings.TrimSpace(cfg.Redis.Host) == "" { + return "" + } + return net.JoinHostPort(cfg.Redis.Host, cfg.Redis.Port) +} diff --git a/sdk/compozy/distributed_test.go b/sdk/compozy/distributed_test.go new file mode 100644 index 00000000..b999b70b --- /dev/null +++ b/sdk/compozy/distributed_test.go @@ -0,0 +1,60 @@ +package compozy + +import ( + "context" + "net" + "testing" + + "github.com/alicebob/miniredis/v2" + "github.com/compozy/compozy/engine/resources" + engineworkflow "github.com/compozy/compozy/engine/workflow" + appconfig "github.com/compozy/compozy/pkg/config" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestValidateDistributedConfigRequiresRedis(t *testing.T) { + cfg := &appconfig.Config{} + cfg.Temporal.HostPort = "localhost:7233" + assert.Error(t, validateDistributedConfig(cfg)) +} + +func TestBootstrapDistributedCreatesRedisStore(t *testing.T) { + ctx := lifecycleTestContext(t) + cfg := appconfig.FromContext(ctx) + require.NotNil(t, cfg) + cfg.Mode = string(ModeDistributed) + listenCfg := net.ListenConfig{} + temporalListener, err := listenCfg.Listen(context.WithoutCancel(t.Context()), "tcp", "127.0.0.1:0") + require.NoError(t, err) + defer temporalListener.Close() + cfg.Temporal.HostPort = temporalListener.Addr().String() + mr := miniredis.NewMiniRedis() + require.NoError(t, mr.Start()) + defer mr.Close() + cfg.Redis.URL = "redis://" + mr.Addr() + cfg.Redis.Mode = string(ModeDistributed) + engine, err := New(ctx, WithMode(ModeDistributed), WithWorkflow(&engineworkflow.Config{ID: "distributed-store"})) + require.NoError(t, err) + store, err := engine.buildResourceStore(ctx, cfg) + require.NoError(t, err) + assert.IsType(t, &resources.RedisResourceStore{}, store) + assert.NoError(t, store.Close()) + assert.NoError(t, engine.cleanupModeResources(ctx)) +} + +func TestHasRedisConnectionCombinations(t *testing.T) { + t.Parallel() + assert.True(t, hasRedisConnection(&appconfig.Config{Redis: appconfig.RedisConfig{URL: "redis://cache"}})) + assert.False(t, hasRedisConnection(&appconfig.Config{Redis: appconfig.RedisConfig{Host: "localhost"}})) + assert.True(t, hasRedisConnection(&appconfig.Config{Redis: appconfig.RedisConfig{Host: "localhost", Port: "6379"}})) +} + +func TestDescribeRedisEndpointFormatsOutput(t *testing.T) { + t.Parallel() + cfg := &appconfig.Config{Redis: appconfig.RedisConfig{URL: "redis://override"}} + assert.Equal(t, "redis://override", describeRedisEndpoint(cfg)) + cfg = &appconfig.Config{Redis: appconfig.RedisConfig{Host: "cache", Port: "6380"}} + assert.Equal(t, "cache:6380", describeRedisEndpoint(cfg)) + assert.Equal(t, "", describeRedisEndpoint(&appconfig.Config{})) +} diff --git a/sdk/compozy/doc.go b/sdk/compozy/doc.go new file mode 100644 index 00000000..c809e289 --- /dev/null +++ b/sdk/compozy/doc.go @@ -0,0 +1,3 @@ +// Package compozy provides the composable SDK entrypoint for building and executing +// Compozy applications using pure Go declarations. +package compozy diff --git a/sdk/compozy/engine.go b/sdk/compozy/engine.go new file mode 100644 index 00000000..78de1050 --- /dev/null +++ b/sdk/compozy/engine.go @@ -0,0 +1,76 @@ +package compozy + +import ( + "context" + "net" + "net/http" + "sync" + + engineagent "github.com/compozy/compozy/engine/agent" + "github.com/compozy/compozy/engine/core" + engineknowledge "github.com/compozy/compozy/engine/knowledge" + enginemcp "github.com/compozy/compozy/engine/mcp" + enginememory "github.com/compozy/compozy/engine/memory" + engineproject "github.com/compozy/compozy/engine/project" + projectschedule "github.com/compozy/compozy/engine/project/schedule" + "github.com/compozy/compozy/engine/resources" + engineschema "github.com/compozy/compozy/engine/schema" + enginetool "github.com/compozy/compozy/engine/tool" + "github.com/compozy/compozy/engine/tool/inline" + enginewebhook "github.com/compozy/compozy/engine/webhook" + engineworkflow "github.com/compozy/compozy/engine/workflow" + appconfig "github.com/compozy/compozy/pkg/config" + "github.com/go-chi/chi/v5" + + sdkclient "github.com/compozy/compozy/sdk/v2/client" +) + +// Engine represents an instantiated Compozy SDK core. +type Engine struct { + ctx context.Context + + mode Mode + host string + port int + + project *engineproject.Config + workflows []*engineworkflow.Config + agents []*engineagent.Config + tools []*enginetool.Config + knowledgeBases []*engineknowledge.BaseConfig + memories []*enginememory.Config + mcps []*enginemcp.Config + schemas []*engineschema.Schema + models []*core.ProviderConfig + schedules []*projectschedule.Config + webhooks []*enginewebhook.Config + + standaloneTemporal *StandaloneTemporalConfig + standaloneRedis *StandaloneRedisConfig + + resourceStore resources.ResourceStore + router *chi.Mux + server *http.Server + listener net.Listener + client *sdkclient.Client + + configSnapshot *appconfig.Config + + serverCancel context.CancelFunc + serverWG sync.WaitGroup + + modeCleanups []modeCleanup + inlineManager *inline.Manager + + stateMu sync.RWMutex + started bool + + startMu sync.Mutex + stopMu sync.Mutex + + errMu sync.Mutex + serverErr error + startErr error + stopErr error + baseURL string +} diff --git a/sdk/compozy/engine_execution.go b/sdk/compozy/engine_execution.go new file mode 100644 index 00000000..e7839962 --- /dev/null +++ b/sdk/compozy/engine_execution.go @@ -0,0 +1,327 @@ +// Code generated by compozygen. DO NOT EDIT. +package compozy + +import ( + "context" + "fmt" + core "github.com/compozy/compozy/engine/core" + client "github.com/compozy/compozy/sdk/v2/client" + "strconv" + "strings" + "time" +) + +// ExecuteWorkflow triggers asynchronous workflow execution via the client. +func (e *Engine) ExecuteWorkflow(ctx context.Context, workflowID string, req *ExecuteRequest) (*ExecuteResponse, error) { + cli, err := ensureClient(e) + if err != nil { + return nil, err + } + payload := buildWorkflowExecuteRequest(req) + resp, err := cli.ExecuteWorkflow(ctx, workflowID, payload) + if err != nil { + return nil, err + } + return newExecuteResponse(resp.ExecID, resp.ExecURL), nil +} + +// ExecuteWorkflowSync performs synchronous workflow execution and returns the result. +func (e *Engine) ExecuteWorkflowSync(ctx context.Context, workflowID string, req *ExecuteSyncRequest) (*ExecuteSyncResponse, error) { + cli, err := ensureClient(e) + if err != nil { + return nil, err + } + payload := buildWorkflowSyncRequest(req) + resp, err := cli.ExecuteWorkflowSync(ctx, workflowID, payload) + if err != nil { + return nil, err + } + return buildSyncResponse(resp.ExecID, resp.Output), nil +} + +// ExecuteWorkflowStream starts workflow execution and returns a stream session. +func (e *Engine) ExecuteWorkflowStream(ctx context.Context, workflowID string, req *ExecuteRequest, opts *client.StreamOptions) (*client.StreamSession, error) { + cli, err := ensureClient(e) + if err != nil { + return nil, err + } + payload := buildWorkflowExecuteRequest(req) + return cli.ExecuteWorkflowStream(ctx, workflowID, payload, opts) +} + +// ExecuteTask triggers asynchronous task execution via the client. +func (e *Engine) ExecuteTask(ctx context.Context, taskID string, req *ExecuteRequest) (*ExecuteResponse, error) { + cli, err := ensureClient(e) + if err != nil { + return nil, err + } + payload := buildTaskExecuteRequest(req) + resp, err := cli.ExecuteTask(ctx, taskID, payload) + if err != nil { + return nil, err + } + return newExecuteResponse(resp.ExecID, resp.ExecURL), nil +} + +// ExecuteTaskSync performs synchronous task execution and returns the result. +func (e *Engine) ExecuteTaskSync(ctx context.Context, taskID string, req *ExecuteSyncRequest) (*ExecuteSyncResponse, error) { + cli, err := ensureClient(e) + if err != nil { + return nil, err + } + payload := buildTaskSyncRequest(req) + resp, err := cli.ExecuteTaskSync(ctx, taskID, payload) + if err != nil { + return nil, err + } + return buildSyncResponse(resp.ExecID, resp.Output), nil +} + +// ExecuteTaskStream starts task execution and returns a stream session. +func (e *Engine) ExecuteTaskStream(ctx context.Context, taskID string, req *ExecuteRequest, opts *client.StreamOptions) (*client.StreamSession, error) { + cli, err := ensureClient(e) + if err != nil { + return nil, err + } + payload := buildTaskExecuteRequest(req) + return cli.ExecuteTaskStream(ctx, taskID, payload, opts) +} + +// ExecuteAgent triggers asynchronous agent execution via the client. +func (e *Engine) ExecuteAgent(ctx context.Context, agentID string, req *ExecuteRequest) (*ExecuteResponse, error) { + cli, err := ensureClient(e) + if err != nil { + return nil, err + } + payload := buildAgentExecuteRequest(req) + resp, err := cli.ExecuteAgent(ctx, agentID, payload) + if err != nil { + return nil, err + } + return newExecuteResponse(resp.ExecID, resp.ExecURL), nil +} + +// ExecuteAgentSync performs synchronous agent execution and returns the result. +func (e *Engine) ExecuteAgentSync(ctx context.Context, agentID string, req *ExecuteSyncRequest) (*ExecuteSyncResponse, error) { + cli, err := ensureClient(e) + if err != nil { + return nil, err + } + payload := buildAgentSyncRequest(req) + resp, err := cli.ExecuteAgentSync(ctx, agentID, payload) + if err != nil { + return nil, err + } + return buildSyncResponse(resp.ExecID, resp.Output), nil +} + +// ExecuteAgentStream starts agent execution and returns a stream session. +func (e *Engine) ExecuteAgentStream(ctx context.Context, agentID string, req *ExecuteRequest, opts *client.StreamOptions) (*client.StreamSession, error) { + cli, err := ensureClient(e) + if err != nil { + return nil, err + } + payload := buildAgentExecuteRequest(req) + return cli.ExecuteAgentStream(ctx, agentID, payload, opts) +} +func ensureClient(e *Engine) (*client.Client, error) { + if e == nil { + return nil, fmt.Errorf("engine is nil") + } + if e.client == nil { + return nil, fmt.Errorf("engine client is not initialized") + } + return e.client, nil +} +func newExecuteResponse(execID string, execURL string) *ExecuteResponse { + return &ExecuteResponse{ + ExecID: execID, + ExecURL: execURL, + } +} +func buildSyncResponse(execID string, output *core.Output) *ExecuteSyncResponse { + return &ExecuteSyncResponse{ + ExecID: execID, + Output: copyOutput(output), + } +} +func copyInput(values map[string]any) core.Input { + if len(values) == 0 { + return nil + } + cloned := core.CopyMaps(values) + if len(cloned) == 0 { + return nil + } + return core.Input(cloned) +} +func copyOutput(output *core.Output) map[string]any { + if output == nil { + return nil + } + return output.AsMap() +} +func stringFromOptions(options map[string]any, key string) string { + if options == nil { + return "" + } + raw, ok := options[key] + if !ok { + return "" + } + str, isString := raw.(string) + if isString { + return strings.TrimSpace(str) + } + stringer, isStringer := raw.(fmt.Stringer) + if isStringer { + return strings.TrimSpace(stringer.String()) + } + return "" +} +func intFromOptions(options map[string]any, key string) *int { + if options == nil { + return nil + } + raw, ok := options[key] + if !ok { + return nil + } + var value int + intValue, isInt := raw.(int) + if isInt { + value = intValue + } else { + strValue, isString := raw.(string) + if !isString { + return nil + } + parsed, err := strconv.Atoi(strings.TrimSpace(strValue)) + if err != nil { + return nil + } + value = parsed + } + if value <= 0 { + return nil + } + return &value +} +func durationSeconds(value *time.Duration) *int { + if value == nil { + return nil + } + secs := int(value.Seconds()) + if secs <= 0 { + return nil + } + return &secs +} +func buildWorkflowExecuteRequest(req *ExecuteRequest) *client.WorkflowExecuteRequest { + payload := &client.WorkflowExecuteRequest{} + if req == nil { + return payload + } + inputCopy := copyInput(req.Input) + if inputCopy != nil { + payload.Input = inputCopy + } + task := stringFromOptions(req.Options, "task_id") + if task != "" { + payload.TaskID = task + } + return payload +} +func buildWorkflowSyncRequest(req *ExecuteSyncRequest) *client.WorkflowSyncRequest { + payload := &client.WorkflowSyncRequest{} + if req == nil { + return payload + } + inputCopy := copyInput(req.Input) + if inputCopy != nil { + payload.Input = inputCopy + } + secs := durationSeconds(req.Timeout) + if secs != nil { + payload.Timeout = *secs + } + task := stringFromOptions(req.Options, "task_id") + if task != "" { + payload.TaskID = task + } + return payload +} +func buildTaskExecuteRequest(req *ExecuteRequest) *client.TaskExecuteRequest { + payload := &client.TaskExecuteRequest{} + if req == nil { + return payload + } + payload.With = copyInput(req.Input) + timeoutOpt := intFromOptions(req.Options, "timeout") + if timeoutOpt != nil { + payload.Timeout = timeoutOpt + } + return payload +} +func buildTaskSyncRequest(req *ExecuteSyncRequest) *client.TaskExecuteRequest { + payload := &client.TaskExecuteRequest{} + if req == nil { + return payload + } + payload.With = copyInput(req.Input) + secs := durationSeconds(req.Timeout) + if secs != nil { + payload.Timeout = secs + return payload + } + timeoutOpt := intFromOptions(req.Options, "timeout") + if timeoutOpt != nil { + payload.Timeout = timeoutOpt + } + return payload +} +func buildAgentExecuteRequest(req *ExecuteRequest) *client.AgentExecuteRequest { + payload := &client.AgentExecuteRequest{} + if req == nil { + return payload + } + payload.With = copyInput(req.Input) + action := stringFromOptions(req.Options, "action") + if action != "" { + payload.Action = action + } + prompt := stringFromOptions(req.Options, "prompt") + if prompt != "" { + payload.Prompt = prompt + } + timeoutOpt := intFromOptions(req.Options, "timeout") + if timeoutOpt != nil { + payload.Timeout = *timeoutOpt + } + return payload +} +func buildAgentSyncRequest(req *ExecuteSyncRequest) *client.AgentExecuteRequest { + payload := &client.AgentExecuteRequest{} + if req == nil { + return payload + } + payload.With = copyInput(req.Input) + action := stringFromOptions(req.Options, "action") + if action != "" { + payload.Action = action + } + prompt := stringFromOptions(req.Options, "prompt") + if prompt != "" { + payload.Prompt = prompt + } + secs := durationSeconds(req.Timeout) + if secs != nil { + payload.Timeout = *secs + return payload + } + timeoutOpt := intFromOptions(req.Options, "timeout") + if timeoutOpt != nil { + payload.Timeout = *timeoutOpt + } + return payload +} diff --git a/sdk/compozy/engine_loading.go b/sdk/compozy/engine_loading.go new file mode 100644 index 00000000..5dce2b1e --- /dev/null +++ b/sdk/compozy/engine_loading.go @@ -0,0 +1,271 @@ +// Code generated by compozygen. DO NOT EDIT. +package compozy + +import ( + "context" + "fmt" + engineagent "github.com/compozy/compozy/engine/agent" + enginecore "github.com/compozy/compozy/engine/core" + engineknowledge "github.com/compozy/compozy/engine/knowledge" + enginemcp "github.com/compozy/compozy/engine/mcp" + enginememory "github.com/compozy/compozy/engine/memory" + engineproject "github.com/compozy/compozy/engine/project" + projectschedule "github.com/compozy/compozy/engine/project/schedule" + engineschema "github.com/compozy/compozy/engine/schema" + enginetool "github.com/compozy/compozy/engine/tool" + enginewebhook "github.com/compozy/compozy/engine/webhook" + engineworkflow "github.com/compozy/compozy/engine/workflow" +) + +// LoadProject loads a project configuration from disk. +func (e *Engine) LoadProject(ctx context.Context, path string) error { + if e == nil { + return fmt.Errorf("engine is nil") + } + cfg, abs, err := loadYAML[*engineproject.Config](ctx, e, path) + if err != nil { + return fmt.Errorf("load project config: %w", err) + } + if err := e.registerProject(cfg, registrationSourceYAML); err != nil { + return fmt.Errorf("%s: %w", abs, err) + } + return nil +} + +// LoadProjectsFromDir loads projects configurations from a directory. +func (e *Engine) LoadProjectsFromDir(ctx context.Context, dir string) error { + if e == nil { + return fmt.Errorf("engine is nil") + } + return e.loadFromDir(ctx, dir, e.LoadProject) +} + +// LoadWorkflow loads a workflow configuration from disk. +func (e *Engine) LoadWorkflow(ctx context.Context, path string) error { + if e == nil { + return fmt.Errorf("engine is nil") + } + cfg, abs, err := loadYAML[*engineworkflow.Config](ctx, e, path) + if err != nil { + return fmt.Errorf("load workflow config: %w", err) + } + if err := e.registerWorkflow(cfg, registrationSourceYAML); err != nil { + return fmt.Errorf("%s: %w", abs, err) + } + return nil +} + +// LoadWorkflowsFromDir loads workflows configurations from a directory. +func (e *Engine) LoadWorkflowsFromDir(ctx context.Context, dir string) error { + if e == nil { + return fmt.Errorf("engine is nil") + } + return e.loadFromDir(ctx, dir, e.LoadWorkflow) +} + +// LoadAgent loads a agent configuration from disk. +func (e *Engine) LoadAgent(ctx context.Context, path string) error { + if e == nil { + return fmt.Errorf("engine is nil") + } + cfg, abs, err := loadYAML[*engineagent.Config](ctx, e, path) + if err != nil { + return fmt.Errorf("load agent config: %w", err) + } + if err := e.registerAgent(cfg, registrationSourceYAML); err != nil { + return fmt.Errorf("%s: %w", abs, err) + } + return nil +} + +// LoadAgentsFromDir loads agents configurations from a directory. +func (e *Engine) LoadAgentsFromDir(ctx context.Context, dir string) error { + if e == nil { + return fmt.Errorf("engine is nil") + } + return e.loadFromDir(ctx, dir, e.LoadAgent) +} + +// LoadTool loads a tool configuration from disk. +func (e *Engine) LoadTool(ctx context.Context, path string) error { + if e == nil { + return fmt.Errorf("engine is nil") + } + cfg, abs, err := loadYAML[*enginetool.Config](ctx, e, path) + if err != nil { + return fmt.Errorf("load tool config: %w", err) + } + if err := e.registerTool(cfg, registrationSourceYAML); err != nil { + return fmt.Errorf("%s: %w", abs, err) + } + return nil +} + +// LoadToolsFromDir loads tools configurations from a directory. +func (e *Engine) LoadToolsFromDir(ctx context.Context, dir string) error { + if e == nil { + return fmt.Errorf("engine is nil") + } + return e.loadFromDir(ctx, dir, e.LoadTool) +} + +// LoadKnowledge loads a knowledge base configuration from disk. +func (e *Engine) LoadKnowledge(ctx context.Context, path string) error { + if e == nil { + return fmt.Errorf("engine is nil") + } + cfg, abs, err := loadYAML[*engineknowledge.BaseConfig](ctx, e, path) + if err != nil { + return fmt.Errorf("load knowledge config: %w", err) + } + if err := e.registerKnowledge(cfg, registrationSourceYAML); err != nil { + return fmt.Errorf("%s: %w", abs, err) + } + return nil +} + +// LoadKnowledgeBasesFromDir loads knowledges configurations from a directory. +func (e *Engine) LoadKnowledgeBasesFromDir(ctx context.Context, dir string) error { + if e == nil { + return fmt.Errorf("engine is nil") + } + return e.loadFromDir(ctx, dir, e.LoadKnowledge) +} + +// LoadMemory loads a memory configuration from disk. +func (e *Engine) LoadMemory(ctx context.Context, path string) error { + if e == nil { + return fmt.Errorf("engine is nil") + } + cfg, abs, err := loadYAML[*enginememory.Config](ctx, e, path) + if err != nil { + return fmt.Errorf("load memory config: %w", err) + } + if err := e.registerMemory(cfg, registrationSourceYAML); err != nil { + return fmt.Errorf("%s: %w", abs, err) + } + return nil +} + +// LoadMemoriesFromDir loads memorys configurations from a directory. +func (e *Engine) LoadMemoriesFromDir(ctx context.Context, dir string) error { + if e == nil { + return fmt.Errorf("engine is nil") + } + return e.loadFromDir(ctx, dir, e.LoadMemory) +} + +// LoadMCP loads a mcp configuration from disk. +func (e *Engine) LoadMCP(ctx context.Context, path string) error { + if e == nil { + return fmt.Errorf("engine is nil") + } + cfg, abs, err := loadYAML[*enginemcp.Config](ctx, e, path) + if err != nil { + return fmt.Errorf("load mcp config: %w", err) + } + if err := e.registerMCP(cfg, registrationSourceYAML); err != nil { + return fmt.Errorf("%s: %w", abs, err) + } + return nil +} + +// LoadMCPsFromDir loads mcps configurations from a directory. +func (e *Engine) LoadMCPsFromDir(ctx context.Context, dir string) error { + if e == nil { + return fmt.Errorf("engine is nil") + } + return e.loadFromDir(ctx, dir, e.LoadMCP) +} + +// LoadSchema loads a schema configuration from disk. +func (e *Engine) LoadSchema(ctx context.Context, path string) error { + if e == nil { + return fmt.Errorf("engine is nil") + } + cfg, abs, err := loadYAML[*engineschema.Schema](ctx, e, path) + if err != nil { + return fmt.Errorf("load schema config: %w", err) + } + if err := e.registerSchema(cfg, registrationSourceYAML); err != nil { + return fmt.Errorf("%s: %w", abs, err) + } + return nil +} + +// LoadSchemasFromDir loads schemas configurations from a directory. +func (e *Engine) LoadSchemasFromDir(ctx context.Context, dir string) error { + if e == nil { + return fmt.Errorf("engine is nil") + } + return e.loadFromDir(ctx, dir, e.LoadSchema) +} + +// LoadModel loads a model configuration from disk. +func (e *Engine) LoadModel(ctx context.Context, path string) error { + if e == nil { + return fmt.Errorf("engine is nil") + } + cfg, abs, err := loadYAML[*enginecore.ProviderConfig](ctx, e, path) + if err != nil { + return fmt.Errorf("load model config: %w", err) + } + if err := e.registerModel(cfg, registrationSourceYAML); err != nil { + return fmt.Errorf("%s: %w", abs, err) + } + return nil +} + +// LoadModelsFromDir loads models configurations from a directory. +func (e *Engine) LoadModelsFromDir(ctx context.Context, dir string) error { + if e == nil { + return fmt.Errorf("engine is nil") + } + return e.loadFromDir(ctx, dir, e.LoadModel) +} + +// LoadSchedule loads a schedule configuration from disk. +func (e *Engine) LoadSchedule(ctx context.Context, path string) error { + if e == nil { + return fmt.Errorf("engine is nil") + } + cfg, abs, err := loadYAML[*projectschedule.Config](ctx, e, path) + if err != nil { + return fmt.Errorf("load schedule config: %w", err) + } + if err := e.registerSchedule(cfg, registrationSourceYAML); err != nil { + return fmt.Errorf("%s: %w", abs, err) + } + return nil +} + +// LoadSchedulesFromDir loads schedules configurations from a directory. +func (e *Engine) LoadSchedulesFromDir(ctx context.Context, dir string) error { + if e == nil { + return fmt.Errorf("engine is nil") + } + return e.loadFromDir(ctx, dir, e.LoadSchedule) +} + +// LoadWebhook loads a webhook configuration from disk. +func (e *Engine) LoadWebhook(ctx context.Context, path string) error { + if e == nil { + return fmt.Errorf("engine is nil") + } + cfg, abs, err := loadYAML[*enginewebhook.Config](ctx, e, path) + if err != nil { + return fmt.Errorf("load webhook config: %w", err) + } + if err := e.registerWebhook(cfg, registrationSourceYAML); err != nil { + return fmt.Errorf("%s: %w", abs, err) + } + return nil +} + +// LoadWebhooksFromDir loads webhooks configurations from a directory. +func (e *Engine) LoadWebhooksFromDir(ctx context.Context, dir string) error { + if e == nil { + return fmt.Errorf("engine is nil") + } + return e.loadFromDir(ctx, dir, e.LoadWebhook) +} diff --git a/sdk/compozy/engine_loading_errors_test.go b/sdk/compozy/engine_loading_errors_test.go new file mode 100644 index 00000000..45a3b385 --- /dev/null +++ b/sdk/compozy/engine_loading_errors_test.go @@ -0,0 +1,58 @@ +package compozy + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestLoadFunctionsRequireEngineInstance(t *testing.T) { + t.Parallel() + var engine *Engine + tests := []struct { + name string + call func(context.Context) error + }{ + {"LoadProject", func(ctx context.Context) error { return engine.LoadProject(ctx, "config.yaml") }}, + {"LoadProjectsFromDir", func(ctx context.Context) error { return engine.LoadProjectsFromDir(ctx, "configs") }}, + {"LoadWorkflow", func(ctx context.Context) error { return engine.LoadWorkflow(ctx, "workflow.yaml") }}, + { + "LoadWorkflowsFromDir", + func(ctx context.Context) error { return engine.LoadWorkflowsFromDir(ctx, "workflows") }, + }, + {"LoadAgent", func(ctx context.Context) error { return engine.LoadAgent(ctx, "agent.yaml") }}, + {"LoadAgentsFromDir", func(ctx context.Context) error { return engine.LoadAgentsFromDir(ctx, "agents") }}, + {"LoadTool", func(ctx context.Context) error { return engine.LoadTool(ctx, "tool.yaml") }}, + {"LoadToolsFromDir", func(ctx context.Context) error { return engine.LoadToolsFromDir(ctx, "tools") }}, + {"LoadKnowledge", func(ctx context.Context) error { return engine.LoadKnowledge(ctx, "knowledge.yaml") }}, + { + "LoadKnowledgeBasesFromDir", + func(ctx context.Context) error { return engine.LoadKnowledgeBasesFromDir(ctx, "knowledge") }, + }, + {"LoadMemory", func(ctx context.Context) error { return engine.LoadMemory(ctx, "memory.yaml") }}, + {"LoadMemoriesFromDir", func(ctx context.Context) error { return engine.LoadMemoriesFromDir(ctx, "memories") }}, + {"LoadMCP", func(ctx context.Context) error { return engine.LoadMCP(ctx, "mcp.yaml") }}, + {"LoadMCPsFromDir", func(ctx context.Context) error { return engine.LoadMCPsFromDir(ctx, "mcps") }}, + {"LoadSchema", func(ctx context.Context) error { return engine.LoadSchema(ctx, "schema.yaml") }}, + {"LoadSchemasFromDir", func(ctx context.Context) error { return engine.LoadSchemasFromDir(ctx, "schemas") }}, + {"LoadModel", func(ctx context.Context) error { return engine.LoadModel(ctx, "model.yaml") }}, + {"LoadModelsFromDir", func(ctx context.Context) error { return engine.LoadModelsFromDir(ctx, "models") }}, + {"LoadSchedule", func(ctx context.Context) error { return engine.LoadSchedule(ctx, "schedule.yaml") }}, + { + "LoadSchedulesFromDir", + func(ctx context.Context) error { return engine.LoadSchedulesFromDir(ctx, "schedules") }, + }, + {"LoadWebhook", func(ctx context.Context) error { return engine.LoadWebhook(ctx, "webhook.yaml") }}, + {"LoadWebhooksFromDir", func(ctx context.Context) error { return engine.LoadWebhooksFromDir(ctx, "webhooks") }}, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + err := tc.call(t.Context()) + require.Error(t, err) + assert.Contains(t, err.Error(), "engine is nil") + }) + } +} diff --git a/sdk/compozy/engine_registration.go b/sdk/compozy/engine_registration.go new file mode 100644 index 00000000..791d35ff --- /dev/null +++ b/sdk/compozy/engine_registration.go @@ -0,0 +1,512 @@ +// Code generated by compozygen. DO NOT EDIT. +package compozy + +import ( + "context" + "errors" + "fmt" + "strings" + + engineagent "github.com/compozy/compozy/engine/agent" + enginecore "github.com/compozy/compozy/engine/core" + engineknowledge "github.com/compozy/compozy/engine/knowledge" + enginemcp "github.com/compozy/compozy/engine/mcp" + enginememory "github.com/compozy/compozy/engine/memory" + engineproject "github.com/compozy/compozy/engine/project" + projectschedule "github.com/compozy/compozy/engine/project/schedule" + "github.com/compozy/compozy/engine/resources" + engineschema "github.com/compozy/compozy/engine/schema" + enginetool "github.com/compozy/compozy/engine/tool" + enginewebhook "github.com/compozy/compozy/engine/webhook" + engineworkflow "github.com/compozy/compozy/engine/workflow" +) + +type registrationSource string + +const ( + registrationSourceProgrammatic registrationSource = registrationSource(resourceSourceProgrammatic) + registrationSourceYAML registrationSource = registrationSource(resourceSourceYAML) +) + +func (s registrationSource) metaSource() string { + if s == "" { + return resourceSourceProgrammatic + } + return string(s) +} + +func (e *Engine) RegisterProject(cfg *engineproject.Config) error { + return e.registerProject(cfg, registrationSourceProgrammatic) +} + +func (e *Engine) registerProject(cfg *engineproject.Config, source registrationSource) error { + if e == nil { + return fmt.Errorf("engine is nil") + } + if cfg == nil { + return fmt.Errorf("project config is required") + } + name := strings.TrimSpace(cfg.Name) + if name == "" { + return fmt.Errorf("project name is required") + } + e.stateMu.Lock() + if e.project != nil { + existing := strings.TrimSpace(e.project.Name) + e.stateMu.Unlock() + return fmt.Errorf("project %s already registered", existing) + } + e.project = cfg + store := e.resourceStore + e.stateMu.Unlock() + if err := e.persistResource(e.ctx, store, name, resources.ResourceProject, name, cfg, source); err != nil { + e.stateMu.Lock() + if e.project == cfg { + e.project = nil + } + e.stateMu.Unlock() + return err + } + return nil +} + +func (e *Engine) RegisterWorkflow(cfg *engineworkflow.Config) error { + return e.registerWorkflow(cfg, registrationSourceProgrammatic) +} + +func (e *Engine) registerWorkflow(cfg *engineworkflow.Config, source registrationSource) error { + if e == nil { + return fmt.Errorf("engine is nil") + } + if cfg == nil { + return fmt.Errorf("workflow config is required") + } + id := strings.TrimSpace(cfg.ID) + if id == "" { + return fmt.Errorf("workflow id is required") + } + e.stateMu.Lock() + if containsConfig(e.workflows, func(existing *engineworkflow.Config) bool { + return strings.TrimSpace(existing.ID) == id + }) { + e.stateMu.Unlock() + return fmt.Errorf("workflow %s already registered", id) + } + e.workflows = append(e.workflows, cfg) + store := e.resourceStore + projectName := projectNameOf(e.project) + e.stateMu.Unlock() + if err := e.persistResource(e.ctx, store, projectName, resources.ResourceWorkflow, id, cfg, source); err != nil { + e.stateMu.Lock() + e.workflows = removeConfig(e.workflows, func(value *engineworkflow.Config) bool { + return value == cfg + }) + e.stateMu.Unlock() + return err + } + return nil +} + +func (e *Engine) RegisterAgent(cfg *engineagent.Config) error { + return e.registerAgent(cfg, registrationSourceProgrammatic) +} + +func (e *Engine) registerAgent(cfg *engineagent.Config, source registrationSource) error { + if e == nil { + return fmt.Errorf("engine is nil") + } + if cfg == nil { + return fmt.Errorf("agent config is required") + } + id := strings.TrimSpace(cfg.ID) + if id == "" { + return fmt.Errorf("agent id is required") + } + e.stateMu.Lock() + if containsConfig(e.agents, func(existing *engineagent.Config) bool { + return strings.TrimSpace(existing.ID) == id + }) { + e.stateMu.Unlock() + return fmt.Errorf("agent %s already registered", id) + } + e.agents = append(e.agents, cfg) + store := e.resourceStore + projectName := projectNameOf(e.project) + e.stateMu.Unlock() + if err := e.persistResource(e.ctx, store, projectName, resources.ResourceAgent, id, cfg, source); err != nil { + e.stateMu.Lock() + e.agents = removeConfig(e.agents, func(value *engineagent.Config) bool { + return value == cfg + }) + e.stateMu.Unlock() + return err + } + return nil +} + +func (e *Engine) RegisterTool(cfg *enginetool.Config) error { + return e.registerTool(cfg, registrationSourceProgrammatic) +} + +func (e *Engine) registerTool(cfg *enginetool.Config, source registrationSource) error { + if e == nil { + return fmt.Errorf("engine is nil") + } + if cfg == nil { + return fmt.Errorf("tool config is required") + } + id := strings.TrimSpace(cfg.ID) + if id == "" { + return fmt.Errorf("tool id is required") + } + e.stateMu.Lock() + if containsConfig(e.tools, func(existing *enginetool.Config) bool { + return strings.TrimSpace(existing.ID) == id + }) { + e.stateMu.Unlock() + return fmt.Errorf("tool %s already registered", id) + } + e.tools = append(e.tools, cfg) + store := e.resourceStore + projectName := projectNameOf(e.project) + e.stateMu.Unlock() + if err := e.persistResource(e.ctx, store, projectName, resources.ResourceTool, id, cfg, source); err != nil { + e.stateMu.Lock() + e.tools = removeConfig(e.tools, func(value *enginetool.Config) bool { + return value == cfg + }) + e.stateMu.Unlock() + return err + } + return nil +} + +func (e *Engine) RegisterKnowledge(cfg *engineknowledge.BaseConfig) error { + return e.registerKnowledge(cfg, registrationSourceProgrammatic) +} + +func (e *Engine) registerKnowledge(cfg *engineknowledge.BaseConfig, source registrationSource) error { + if e == nil { + return fmt.Errorf("engine is nil") + } + if cfg == nil { + return fmt.Errorf("knowledge config is required") + } + id := strings.TrimSpace(cfg.ID) + if id == "" { + return fmt.Errorf("knowledge base id is required") + } + e.stateMu.Lock() + if containsConfig(e.knowledgeBases, func(existing *engineknowledge.BaseConfig) bool { + return strings.TrimSpace(existing.ID) == id + }) { + e.stateMu.Unlock() + return fmt.Errorf("knowledge base %s already registered", id) + } + e.knowledgeBases = append(e.knowledgeBases, cfg) + store := e.resourceStore + projectName := projectNameOf(e.project) + e.stateMu.Unlock() + if err := e.persistResource(e.ctx, store, projectName, resources.ResourceKnowledgeBase, id, cfg, source); err != nil { + e.stateMu.Lock() + e.knowledgeBases = removeConfig(e.knowledgeBases, func(value *engineknowledge.BaseConfig) bool { + return value == cfg + }) + e.stateMu.Unlock() + return err + } + return nil +} + +func (e *Engine) RegisterMemory(cfg *enginememory.Config) error { + return e.registerMemory(cfg, registrationSourceProgrammatic) +} + +func (e *Engine) registerMemory(cfg *enginememory.Config, source registrationSource) error { + if e == nil { + return fmt.Errorf("engine is nil") + } + if cfg == nil { + return fmt.Errorf("memory config is required") + } + id := strings.TrimSpace(cfg.ID) + if id == "" { + return fmt.Errorf("memory id is required") + } + e.stateMu.Lock() + if containsConfig(e.memories, func(existing *enginememory.Config) bool { + return strings.TrimSpace(existing.ID) == id + }) { + e.stateMu.Unlock() + return fmt.Errorf("memory %s already registered", id) + } + e.memories = append(e.memories, cfg) + store := e.resourceStore + projectName := projectNameOf(e.project) + e.stateMu.Unlock() + if err := e.persistResource(e.ctx, store, projectName, resources.ResourceMemory, id, cfg, source); err != nil { + e.stateMu.Lock() + e.memories = removeConfig(e.memories, func(value *enginememory.Config) bool { + return value == cfg + }) + e.stateMu.Unlock() + return err + } + return nil +} + +func (e *Engine) RegisterMCP(cfg *enginemcp.Config) error { + return e.registerMCP(cfg, registrationSourceProgrammatic) +} + +func (e *Engine) registerMCP(cfg *enginemcp.Config, source registrationSource) error { + if e == nil { + return fmt.Errorf("engine is nil") + } + if cfg == nil { + return fmt.Errorf("mcp config is required") + } + id := strings.TrimSpace(cfg.ID) + if id == "" { + return fmt.Errorf("mcp id is required") + } + e.stateMu.Lock() + if containsConfig(e.mcps, func(existing *enginemcp.Config) bool { + return strings.TrimSpace(existing.ID) == id + }) { + e.stateMu.Unlock() + return fmt.Errorf("mcp %s already registered", id) + } + e.mcps = append(e.mcps, cfg) + store := e.resourceStore + projectName := projectNameOf(e.project) + e.stateMu.Unlock() + if err := e.persistResource(e.ctx, store, projectName, resources.ResourceMCP, id, cfg, source); err != nil { + e.stateMu.Lock() + e.mcps = removeConfig(e.mcps, func(value *enginemcp.Config) bool { + return value == cfg + }) + e.stateMu.Unlock() + return err + } + return nil +} + +func (e *Engine) RegisterSchema(cfg *engineschema.Schema) error { + return e.registerSchema(cfg, registrationSourceProgrammatic) +} + +func (e *Engine) registerSchema(cfg *engineschema.Schema, source registrationSource) error { + if e == nil { + return fmt.Errorf("engine is nil") + } + if cfg == nil { + return fmt.Errorf("schema config is required") + } + id := strings.TrimSpace(engineschema.GetID(cfg)) + if id == "" { + return fmt.Errorf("schema id is required") + } + e.stateMu.Lock() + if containsConfig(e.schemas, func(existing *engineschema.Schema) bool { + return strings.TrimSpace(engineschema.GetID(existing)) == id + }) { + e.stateMu.Unlock() + return fmt.Errorf("schema %s already registered", id) + } + e.schemas = append(e.schemas, cfg) + store := e.resourceStore + projectName := projectNameOf(e.project) + e.stateMu.Unlock() + if err := e.persistResource(e.ctx, store, projectName, resources.ResourceSchema, id, cfg, source); err != nil { + e.stateMu.Lock() + e.schemas = removeConfig(e.schemas, func(value *engineschema.Schema) bool { + return value == cfg + }) + e.stateMu.Unlock() + return err + } + return nil +} + +func (e *Engine) RegisterModel(cfg *enginecore.ProviderConfig) error { + return e.registerModel(cfg, registrationSourceProgrammatic) +} + +func (e *Engine) registerModel(cfg *enginecore.ProviderConfig, source registrationSource) error { + if e == nil { + return fmt.Errorf("engine is nil") + } + if cfg == nil { + return fmt.Errorf("model config is required") + } + provider := strings.TrimSpace(string(cfg.Provider)) + model := strings.TrimSpace(cfg.Model) + id := provider + ":" + model + if provider == "" || model == "" { + return fmt.Errorf("model identifier is required") + } + e.stateMu.Lock() + if containsConfig(e.models, func(existing *enginecore.ProviderConfig) bool { + key := strings.TrimSpace(string(existing.Provider)) + ":" + strings.TrimSpace(existing.Model) + return key == id + }) { + e.stateMu.Unlock() + return fmt.Errorf("model %s already registered", id) + } + e.models = append(e.models, cfg) + store := e.resourceStore + projectName := projectNameOf(e.project) + e.stateMu.Unlock() + if err := e.persistResource(e.ctx, store, projectName, resources.ResourceModel, id, cfg, source); err != nil { + e.stateMu.Lock() + e.models = removeConfig(e.models, func(value *enginecore.ProviderConfig) bool { + return value == cfg + }) + e.stateMu.Unlock() + return err + } + return nil +} + +func (e *Engine) RegisterSchedule(cfg *projectschedule.Config) error { + return e.registerSchedule(cfg, registrationSourceProgrammatic) +} + +func (e *Engine) registerSchedule(cfg *projectschedule.Config, source registrationSource) error { + if e == nil { + return fmt.Errorf("engine is nil") + } + if cfg == nil { + return fmt.Errorf("schedule config is required") + } + id := strings.TrimSpace(cfg.ID) + if id == "" { + return fmt.Errorf("schedule id is required") + } + e.stateMu.Lock() + if containsConfig(e.schedules, func(existing *projectschedule.Config) bool { + return strings.TrimSpace(existing.ID) == id + }) { + e.stateMu.Unlock() + return fmt.Errorf("schedule %s already registered", id) + } + e.schedules = append(e.schedules, cfg) + store := e.resourceStore + projectName := projectNameOf(e.project) + e.stateMu.Unlock() + if err := e.persistResource(e.ctx, store, projectName, resources.ResourceSchedule, id, cfg, source); err != nil { + e.stateMu.Lock() + e.schedules = removeConfig(e.schedules, func(value *projectschedule.Config) bool { + return value == cfg + }) + e.stateMu.Unlock() + return err + } + return nil +} + +func (e *Engine) RegisterWebhook(cfg *enginewebhook.Config) error { + return e.registerWebhook(cfg, registrationSourceProgrammatic) +} + +func (e *Engine) registerWebhook(cfg *enginewebhook.Config, source registrationSource) error { + if e == nil { + return fmt.Errorf("engine is nil") + } + if cfg == nil { + return fmt.Errorf("webhook config is required") + } + slug := strings.TrimSpace(cfg.Slug) + if slug == "" { + return fmt.Errorf("webhook slug is required") + } + e.stateMu.Lock() + if containsConfig(e.webhooks, func(existing *enginewebhook.Config) bool { + return strings.TrimSpace(existing.Slug) == slug + }) { + e.stateMu.Unlock() + return fmt.Errorf("webhook %s already registered", slug) + } + e.webhooks = append(e.webhooks, cfg) + store := e.resourceStore + projectName := projectNameOf(e.project) + e.stateMu.Unlock() + if err := e.persistResource(e.ctx, store, projectName, resources.ResourceWebhook, slug, cfg, source); err != nil { + e.stateMu.Lock() + e.webhooks = removeConfig(e.webhooks, func(value *enginewebhook.Config) bool { + return value == cfg + }) + e.stateMu.Unlock() + return err + } + return nil +} + +func (e *Engine) persistResource( + ctx context.Context, + store resources.ResourceStore, + projectName string, + typ resources.ResourceType, + id string, + value any, + source registrationSource, +) error { + if ctx == nil || store == nil { + return nil + } + keyProject := strings.TrimSpace(projectName) + resourceID := strings.TrimSpace(id) + if typ == resources.ResourceProject { + keyProject = resourceID + } else if keyProject == "" { + return nil + } + if resourceID == "" { + return fmt.Errorf("%s id is required", string(typ)) + } + key := resources.ResourceKey{Project: keyProject, Type: typ, ID: resourceID} + if _, _, err := store.Get(ctx, key); err == nil { + return fmt.Errorf("%s %s already registered", string(typ), resourceID) + } else if err != nil && !errors.Is(err, resources.ErrNotFound) { + return fmt.Errorf("inspect %s %s registration state: %w", string(typ), resourceID, err) + } + if _, err := store.Put(ctx, key, value); err != nil { + return fmt.Errorf("store %s %s: %w", string(typ), resourceID, err) + } + if err := resources.WriteMeta(ctx, store, key.Project, typ, resourceID, source.metaSource(), registrationUpdatedBy); err != nil { + return fmt.Errorf("write %s %s metadata: %w", string(typ), resourceID, err) + } + return nil +} + +func projectNameOf(cfg *engineproject.Config) string { + if cfg == nil { + return "" + } + return strings.TrimSpace(cfg.Name) +} + +func containsConfig[T any](values []*T, predicate func(*T) bool) bool { + for _, value := range values { + if value == nil { + continue + } + if predicate(value) { + return true + } + } + return false +} + +func removeConfig[T any](values []*T, predicate func(*T) bool) []*T { + for i, value := range values { + if value == nil { + continue + } + if predicate(value) { + return append(values[:i], values[i+1:]...) + } + } + return values +} diff --git a/sdk/compozy/errors.go b/sdk/compozy/errors.go new file mode 100644 index 00000000..0185e04f --- /dev/null +++ b/sdk/compozy/errors.go @@ -0,0 +1,12 @@ +package compozy + +import "errors" + +var ( + // ErrAlreadyStarted indicates the engine has already been started. + ErrAlreadyStarted = errors.New("engine already started") + // ErrNotStarted indicates the engine has not been started yet. + ErrNotStarted = errors.New("engine not started") + // ErrConfigUnavailable indicates no configuration is available on the context. + ErrConfigUnavailable = errors.New("configuration is unavailable in context") +) diff --git a/sdk/compozy/execution_client_test.go b/sdk/compozy/execution_client_test.go new file mode 100644 index 00000000..f4d0ed93 --- /dev/null +++ b/sdk/compozy/execution_client_test.go @@ -0,0 +1,469 @@ +package compozy + +import ( + "bytes" + "context" + "encoding/json" + "io" + "net/http" + "strings" + "sync" + "testing" + "time" + + "github.com/compozy/compozy/pkg/logger" + sdkclient "github.com/compozy/compozy/sdk/v2/client" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestEngineExecutionDelegation(t *testing.T) { + t.Run("Should delegate workflow async execution to sdk client", func(t *testing.T) { + t.Parallel() + transport := newStubTransport( + t, + jsonHandler( //nolint:bodyclose // response closed via test cleanup + t, + http.MethodPost, + "/api/v0/workflows/workflow-123/executions", + http.StatusAccepted, + map[string]any{ + "exec_id": "exec-123", + "exec_url": "http://testing.local/api/v0/executions/exec-123", + "workflow_id": "workflow-123", + }, + func(body map[string]any) { + assert.Equal(t, map[string]any{"name": "demo"}, body["input"]) + assert.Equal(t, "task-xyz", body["task_id"]) + }, + ), + ) + engine := engineWithClient(t, transport) + ctx := executionContext(t) + resp, err := engine.ExecuteWorkflow(ctx, "workflow-123", &ExecuteRequest{ + Input: map[string]any{"name": "demo"}, + Options: map[string]any{"task_id": "task-xyz"}, + }) + require.NoError(t, err) + assert.Equal(t, "exec-123", resp.ExecID) + assert.Equal(t, "http://testing.local/api/v0/executions/exec-123", resp.ExecURL) + transport.AssertExhausted(t) + }) + + t.Run("Should translate workflow sync request timeout to seconds", func(t *testing.T) { + t.Parallel() + transport := newStubTransport( + t, + jsonHandler( //nolint:bodyclose // response closed via test cleanup + t, + http.MethodPost, + "/api/v0/workflows/sample/executions/sync", + http.StatusOK, + map[string]any{ + "exec_id": "sync-321", + "output": map[string]any{"result": "ok"}, + }, + func(body map[string]any) { + assert.Equal(t, float64(2), body["timeout"]) + assert.Equal(t, map[string]any{"input": "value"}, body["input"]) + }, + ), + ) + engine := engineWithClient(t, transport) + ctx := executionContext(t) + resp, err := engine.ExecuteWorkflowSync(ctx, "sample", &ExecuteSyncRequest{ + Input: map[string]any{"input": "value"}, + Timeout: durationPtr(2 * time.Second), + Options: map[string]any{"task_id": "ignored"}, + }) + require.NoError(t, err) + assert.Equal(t, "sync-321", resp.ExecID) + assert.Equal(t, map[string]any{"result": "ok"}, resp.Output) + transport.AssertExhausted(t) + }) + + t.Run("Should translate workflow stream execution to sdk stream session", func(t *testing.T) { + t.Parallel() + sseData := "event: started\nid: 1\ndata: {\"status\":\"running\"}\n\n" + transport := newStubTransport( + t, + jsonHandler( //nolint:bodyclose // response closed via test cleanup + t, + http.MethodPost, + "/api/v0/workflows/streamer/executions", + http.StatusAccepted, + map[string]any{ + "exec_id": "exec-stream", + "exec_url": "http://testing.local/api/v0/executions/exec-stream", + }, + func(body map[string]any) { + assert.Equal(t, map[string]any{"foo": "bar"}, body["input"]) + }, + ), + streamHandler( //nolint:bodyclose // response closed via test cleanup + t, + http.MethodGet, + "/api/v0/executions/exec-stream/stream", + sseData, + ), + ) + engine := engineWithClient(t, transport) + ctx := executionContext(t) + session, err := engine.ExecuteWorkflowStream(ctx, "streamer", &ExecuteRequest{ + Input: map[string]any{"foo": "bar"}, + }, nil) + require.NoError(t, err) + require.NotNil(t, session) + assert.Equal(t, "exec-stream", session.ExecID) + + select { + case evt := <-session.Events(): + assert.Equal(t, int64(1), evt.ID) + assert.Equal(t, "started", evt.Type) + assert.JSONEq(t, `{"status":"running"}`, string(evt.Data)) + case <-time.After(time.Second): + t.Fatal("expected stream event") + } + + select { + case err := <-session.Errors(): + require.ErrorIs(t, err, io.EOF) + case <-time.After(time.Second): + t.Fatal("expected terminal stream error") + } + require.NoError(t, session.Close()) + transport.AssertExhausted(t) + }) + + t.Run("Should delegate task sync execution with derived timeout", func(t *testing.T) { + t.Parallel() + transport := newStubTransport( + t, + jsonHandler( //nolint:bodyclose // response closed via test cleanup + t, + http.MethodPost, + "/api/v0/tasks/task-555/executions/sync", + http.StatusOK, + map[string]any{ + "exec_id": "task-sync", + }, + func(body map[string]any) { + assert.Equal(t, map[string]any{"key": "value"}, body["with"]) + assert.Equal(t, float64(5), body["timeout"]) + }, + ), + ) + engine := engineWithClient(t, transport) + ctx := executionContext(t) + resp, err := engine.ExecuteTaskSync(ctx, "task-555", &ExecuteSyncRequest{ + Input: map[string]any{"key": "value"}, + Timeout: durationPtr(5 * time.Second), + }) + require.NoError(t, err) + assert.Equal(t, "task-sync", resp.ExecID) + transport.AssertExhausted(t) + }) + + t.Run("Should delegate agent async execution with options mapped", func(t *testing.T) { + t.Parallel() + transport := newStubTransport( + t, + jsonHandler( //nolint:bodyclose // response closed via test cleanup + t, + http.MethodPost, + "/api/v0/agents/support/executions", + http.StatusAccepted, + map[string]any{ + "exec_id": "agent-async", + "exec_url": "http://testing.local/api/v0/executions/agents/agent-async", + }, + func(body map[string]any) { + assert.Equal(t, map[string]any{"topic": "billing"}, body["with"]) + assert.Equal(t, "summarize", body["action"]) + assert.Equal(t, "help the user", body["prompt"]) + assert.Equal(t, float64(7), body["timeout"]) + }, + ), + ) + engine := engineWithClient(t, transport) + ctx := executionContext(t) + resp, err := engine.ExecuteAgent(ctx, "support", &ExecuteRequest{ + Input: map[string]any{"topic": "billing"}, + Options: map[string]any{ + "action": "summarize", + "prompt": "help the user", + "timeout": 7, + }, + }) + require.NoError(t, err) + assert.Equal(t, "agent-async", resp.ExecID) + transport.AssertExhausted(t) + }) + + t.Run("Should delegate task async execution with timeout option parsing", func(t *testing.T) { + t.Parallel() + transport := newStubTransport( + t, + jsonHandler( //nolint:bodyclose // response closed via test cleanup + t, + http.MethodPost, + "/api/v0/tasks/job/executions", + http.StatusAccepted, + map[string]any{ + "exec_id": "task-async", + "exec_url": "http://testing.local/api/v0/executions/tasks/task-async", + }, + func(body map[string]any) { + assert.Equal(t, map[string]any{"payload": "data"}, body["with"]) + assert.Equal(t, float64(15), body["timeout"]) + }, + ), + ) + engine := engineWithClient(t, transport) + ctx := executionContext(t) + resp, err := engine.ExecuteTask(ctx, "job", &ExecuteRequest{ + Input: map[string]any{"payload": "data"}, + Options: map[string]any{"timeout": "15"}, + }) + require.NoError(t, err) + assert.Equal(t, "task-async", resp.ExecID) + transport.AssertExhausted(t) + }) + + t.Run("Should translate task stream execution", func(t *testing.T) { + t.Parallel() + sseData := "event: data\nid: 1\ndata: {}\n\n" + transport := newStubTransport( + t, + jsonHandler( //nolint:bodyclose // response closed via test cleanup + t, + http.MethodPost, + "/api/v0/tasks/stream/executions", + http.StatusAccepted, + map[string]any{ + "exec_id": "task-stream", + "exec_url": "http://testing.local/api/v0/executions/tasks/task-stream", + }, + func(body map[string]any) { + assert.Equal(t, map[string]any{"foo": "bar"}, body["with"]) + }, + ), + streamHandler( //nolint:bodyclose // response closed via test cleanup + t, + http.MethodGet, + "/api/v0/executions/tasks/task-stream/stream", + sseData, + ), + ) + engine := engineWithClient(t, transport) + ctx := executionContext(t) + session, err := engine.ExecuteTaskStream(ctx, "stream", &ExecuteRequest{ + Input: map[string]any{"foo": "bar"}, + }, nil) + require.NoError(t, err) + require.NotNil(t, session) + assert.Equal(t, "task-stream", session.ExecID) + select { + case <-session.Events(): + case <-time.After(time.Second): + t.Fatal("expected stream event") + } + require.NoError(t, session.Close()) + transport.AssertExhausted(t) + }) + + t.Run("Should delegate agent sync execution with prompt mapping", func(t *testing.T) { + t.Parallel() + transport := newStubTransport( + t, + jsonHandler( //nolint:bodyclose // response closed via test cleanup + t, + http.MethodPost, + "/api/v0/agents/assistant/executions/sync", + http.StatusOK, + map[string]any{ + "exec_id": "agent-sync", + "output": map[string]any{"value": "ok"}, + }, + func(body map[string]any) { + assert.Equal(t, map[string]any{"input": "value"}, body["with"]) + assert.Equal(t, "answer", body["prompt"]) + assert.Equal(t, "summarize", body["action"]) + assert.Equal(t, float64(3), body["timeout"]) + }, + ), + ) + engine := engineWithClient(t, transport) + ctx := executionContext(t) + resp, err := engine.ExecuteAgentSync(ctx, "assistant", &ExecuteSyncRequest{ + Input: map[string]any{"input": "value"}, + Options: map[string]any{ + "action": "summarize", + "prompt": "answer", + "timeout": 3, + }, + }) + require.NoError(t, err) + assert.Equal(t, "agent-sync", resp.ExecID) + assert.Equal(t, map[string]any{"value": "ok"}, resp.Output) + transport.AssertExhausted(t) + }) + + t.Run("Should translate agent stream execution", func(t *testing.T) { + t.Parallel() + sseData := "event: delta\nid: 5\ndata: {}\n\n" + transport := newStubTransport( + t, + jsonHandler( //nolint:bodyclose // response closed via test cleanup + t, + http.MethodPost, + "/api/v0/agents/agent-stream/executions", + http.StatusAccepted, + map[string]any{ + "exec_id": "agent-stream-exec", + "exec_url": "http://testing.local/api/v0/executions/agents/agent-stream-exec", + }, + func(body map[string]any) { + assert.Equal(t, map[string]any{"foo": "bar"}, body["with"]) + }, + ), + streamHandler( //nolint:bodyclose // response closed via test cleanup + t, + http.MethodGet, + "/api/v0/executions/agents/agent-stream-exec/stream", + sseData, + ), + ) + engine := engineWithClient(t, transport) + ctx := executionContext(t) + session, err := engine.ExecuteAgentStream(ctx, "agent-stream", &ExecuteRequest{ + Input: map[string]any{"foo": "bar"}, + }, nil) + require.NoError(t, err) + require.NotNil(t, session) + assert.Equal(t, "agent-stream-exec", session.ExecID) + select { + case <-session.Events(): + case <-time.After(time.Second): + t.Fatal("expected agent stream event") + } + require.NoError(t, session.Close()) + transport.AssertExhausted(t) + }) +} + +type stubTransport struct { + t *testing.T + handlers []func(*http.Request) (*http.Response, error) + mu sync.Mutex + index int +} + +func newStubTransport( + t *testing.T, + handlers ...func(*http.Request) (*http.Response, error), +) *stubTransport { + return &stubTransport{t: t, handlers: handlers} +} + +func (s *stubTransport) RoundTrip(req *http.Request) (*http.Response, error) { + s.mu.Lock() + defer s.mu.Unlock() + if s.index >= len(s.handlers) { + s.t.Fatalf("unexpected request %s %s", req.Method, req.URL) + } + handler := s.handlers[s.index] + s.index++ + return handler(req) +} + +func (s *stubTransport) AssertExhausted(t *testing.T) { + t.Helper() + s.mu.Lock() + defer s.mu.Unlock() + if s.index != len(s.handlers) { + t.Fatalf("expected %d requests, got %d", len(s.handlers), s.index) + } +} + +func jsonHandler( + t *testing.T, + expectedMethod string, + expectedPath string, + status int, + data map[string]any, + validate func(body map[string]any), +) func(*http.Request) (*http.Response, error) { + return func(req *http.Request) (*http.Response, error) { + require.Equal(t, expectedMethod, req.Method) + require.Equal(t, expectedPath, req.URL.Path) + defer req.Body.Close() + payload, err := io.ReadAll(req.Body) + require.NoError(t, err) + if len(payload) > 0 { + var body map[string]any + require.NoError(t, json.Unmarshal(payload, &body)) + if validate != nil { + validate(body) + } + } else if validate != nil { + validate(map[string]any{}) + } + bodyData, err := json.Marshal(map[string]any{ + "status": status, + "message": http.StatusText(status), + "data": data, + }) + require.NoError(t, err) + resp := &http.Response{ + StatusCode: status, + Header: http.Header{"Content-Type": []string{"application/json"}}, + Body: io.NopCloser(bytes.NewReader(bodyData)), + Request: req, + } + t.Cleanup(func() { + require.NoError(t, resp.Body.Close()) + }) + return resp, nil + } +} + +func streamHandler( + t *testing.T, + expectedMethod string, + expectedPath string, + payload string, +) func(*http.Request) (*http.Response, error) { + return func(req *http.Request) (*http.Response, error) { + require.Equal(t, expectedMethod, req.Method) + require.Equal(t, expectedPath, req.URL.Path) + resp := &http.Response{ + StatusCode: http.StatusOK, + Header: http.Header{"Content-Type": []string{"text/event-stream"}}, + Body: io.NopCloser(strings.NewReader(payload)), + Request: req, + } + t.Cleanup(func() { + require.NoError(t, resp.Body.Close()) + }) + return resp, nil + } +} + +func engineWithClient(t *testing.T, transport http.RoundTripper) *Engine { + t.Helper() + httpClient := &http.Client{Transport: transport} + ctx := executionContext(t) + client, err := sdkclient.New(ctx, "http://testing.local", sdkclient.WithHTTPClient(httpClient)) + require.NoError(t, err) + return &Engine{ctx: ctx, client: client} +} + +func executionContext(t *testing.T) context.Context { + t.Helper() + return logger.ContextWithLogger(t.Context(), logger.NewForTests()) +} + +func durationPtr(d time.Duration) *time.Duration { + return &d +} diff --git a/sdk/compozy/execution_helpers_test.go b/sdk/compozy/execution_helpers_test.go new file mode 100644 index 00000000..a6587a80 --- /dev/null +++ b/sdk/compozy/execution_helpers_test.go @@ -0,0 +1,72 @@ +package compozy + +import ( + "testing" + "time" + + client "github.com/compozy/compozy/sdk/v2/client" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type stringerStub string + +func (s stringerStub) String() string { + return string(s) +} + +func TestStringFromOptionsHandlesVariants(t *testing.T) { + t.Parallel() + assert.Equal(t, "", stringFromOptions(nil, "key")) + assert.Equal(t, "", stringFromOptions(map[string]any{}, "key")) + assert.Equal(t, "value", stringFromOptions(map[string]any{"key": " value "}, "key")) + assert.Equal(t, "stringer", stringFromOptions(map[string]any{"key": stringerStub(" stringer ")}, "key")) + assert.Equal(t, "", stringFromOptions(map[string]any{"key": 10}, "key")) +} + +func TestBuildTaskSyncRequestPrefersDuration(t *testing.T) { + t.Parallel() + timeout := 5 * time.Second + req := &ExecuteSyncRequest{ + Input: map[string]any{"key": "value"}, + Options: map[string]any{"timeout": 30}, + Timeout: &timeout, + } + payload := buildTaskSyncRequest(req) + require.NotNil(t, payload.Timeout) + assert.Equal(t, 5, *payload.Timeout) + require.NotNil(t, payload.With) + payload.With["key"] = "changed" + assert.Equal(t, "value", req.Input["key"]) +} + +func TestBuildTaskSyncRequestUsesTimeoutOption(t *testing.T) { + t.Parallel() + req := &ExecuteSyncRequest{ + Input: map[string]any{"key": "value"}, + Options: map[string]any{"timeout": "12"}, + } + payload := buildTaskSyncRequest(req) + require.NotNil(t, payload.Timeout) + assert.Equal(t, 12, *payload.Timeout) +} + +func TestBuildWorkflowExecuteRequestHandlesNilInput(t *testing.T) { + t.Parallel() + payload := buildWorkflowExecuteRequest(nil) + assert.Empty(t, payload.Input) + assert.Empty(t, payload.TaskID) +} + +func TestEnsureClientValidatesEngine(t *testing.T) { + t.Parallel() + _, err := ensureClient(nil) + assert.Error(t, err) + engine := &Engine{} + _, err = ensureClient(engine) + assert.Error(t, err) + engine.client = &client.Client{} + cli, err := ensureClient(engine) + require.NoError(t, err) + assert.NotNil(t, cli) +} diff --git a/sdk/compozy/generate.go b/sdk/compozy/generate.go new file mode 100644 index 00000000..6494ffae --- /dev/null +++ b/sdk/compozy/generate.go @@ -0,0 +1,3 @@ +package compozy + +//go:generate go run ./../internal/sdkcodegen/cmd/sdkcodegen -out=. diff --git a/sdk/compozy/integration/distributed_integration_test.go b/sdk/compozy/integration/distributed_integration_test.go new file mode 100644 index 00000000..ddfa448b --- /dev/null +++ b/sdk/compozy/integration/distributed_integration_test.go @@ -0,0 +1,91 @@ +//go:build integration +// +build integration + +package compozy + +import ( + "context" + "fmt" + "net/http" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/alicebob/miniredis/v2" + "github.com/compozy/compozy/engine/resources" + "github.com/compozy/compozy/engine/worker/embedded" + engineworkflow "github.com/compozy/compozy/engine/workflow" + appconfig "github.com/compozy/compozy/pkg/config" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDistributedIntegrationLifecycle(t *testing.T) { + t.Run("Should run distributed integration lifecycle", func(t *testing.T) { + ctx := lifecycleTestContext(t) + cfg := appconfig.FromContext(ctx) + require.NotNil(t, cfg) + cfg.Mode = string(ModeDistributed) + mr := miniredis.NewMiniRedis() + require.NoError(t, mr.Start()) + t.Cleanup(mr.Close) + cfg.Redis.URL = "redis://" + mr.Addr() + cfg.Redis.Mode = string(ModeDistributed) + temporalPort := allocateTemporalFrontendPort(ctx, t) + temporalCfg := &embedded.Config{ + DatabaseFile: filepath.Join(t.TempDir(), "temporal.db"), + FrontendPort: temporalPort, + BindIP: "127.0.0.1", + Namespace: cfg.Temporal.Namespace, + ClusterName: "integration-distributed", + EnableUI: false, + RequireUI: false, + // Align UI port offset with standalone defaults. + UIPort: temporalPort + 1000, + LogLevel: cfg.Temporal.Standalone.LogLevel, + StartTimeout: cfg.Temporal.Standalone.StartTimeout, + } + if strings.TrimSpace(temporalCfg.LogLevel) == "" { + temporalCfg.LogLevel = "warn" + } + if temporalCfg.StartTimeout <= 0 { + temporalCfg.StartTimeout = 30 * time.Second + } + temporalServer, err := embedded.NewServer(ctx, temporalCfg) + require.NoError(t, err) + require.NoError(t, temporalServer.Start(ctx)) + t.Cleanup(func() { + shutdownTimeout := cfg.Server.Timeouts.WorkerShutdown + if shutdownTimeout <= 0 { + shutdownTimeout = temporalCfg.StartTimeout + } + shutdownCtx, cancel := context.WithTimeout(context.WithoutCancel(ctx), shutdownTimeout) + defer cancel() + require.NoError(t, temporalServer.Stop(shutdownCtx)) + }) + cfg.Temporal.HostPort = temporalServer.FrontendAddress() + engine, err := New( + ctx, + WithMode(ModeDistributed), + WithWorkflow(&engineworkflow.Config{ID: "integration-distributed"}), + ) + require.NoError(t, err) + require.NoError(t, engine.Start(ctx)) + t.Cleanup(func() { + require.NoError(t, engine.Stop(ctx)) + engine.Wait() + }) + assert.True(t, engine.IsStarted()) + server := engine.Server() + require.NotNil(t, server) + resp, err := http.Get(fmt.Sprintf("http://%s", server.Addr)) + require.NoError(t, err) + defer resp.Body.Close() + assert.Equal(t, http.StatusNotFound, resp.StatusCode) + store := engine.ResourceStore() + require.NotNil(t, store) + _, ok := store.(*resources.RedisResourceStore) + assert.True(t, ok) + }) +} diff --git a/sdk/compozy/integration/hybrid_yaml_integration_test.go b/sdk/compozy/integration/hybrid_yaml_integration_test.go new file mode 100644 index 00000000..b15c93df --- /dev/null +++ b/sdk/compozy/integration/hybrid_yaml_integration_test.go @@ -0,0 +1,64 @@ +//go:build integration +// +build integration + +package compozy + +import ( + "os" + "path/filepath" + "strings" + "testing" + + enginecore "github.com/compozy/compozy/engine/core" + enginetask "github.com/compozy/compozy/engine/task" + enginetool "github.com/compozy/compozy/engine/tool" + engineworkflow "github.com/compozy/compozy/engine/workflow" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestHybridYAMLIntegration(t *testing.T) { + t.Run("Should validate hybrid YAML integration", func(t *testing.T) { + ctx := lifecycleTestContext(t) + engine, err := New(ctx, WithWorkflow(programmaticWorkflow())) + require.NoError(t, err) + dir := t.TempDir() + workflowDir := filepath.Join(dir, "workflows") + require.NoError(t, os.MkdirAll(workflowDir, 0o755)) + yamlWorkflow := strings.TrimSpace(`id: yaml-workflow +tasks: + - id: entry + final: true + tool: + resource: tool + id: yaml-tool + type: http +`) + file := filepath.Join(workflowDir, "workflow.yaml") + require.NoError(t, os.WriteFile(file, []byte(yamlWorkflow), 0o600)) + require.NoError(t, engine.LoadWorkflowsFromDir(ctx, workflowDir)) + report, err := engine.ValidateReferences() + require.NoError(t, err) + assert.True(t, report.Valid) + }) +} + +func programmaticWorkflow() *engineworkflow.Config { + next := "finish" + cfg := &engineworkflow.Config{ID: "programmatic"} + cfg.Tasks = []enginetask.Config{ + { + ID: "start", + Tool: &enginetool.Config{ID: "prog-tool", Resource: "tool", Type: "http"}, + OnSuccess: &enginecore.SuccessTransition{ + Next: &next, + }, + }, + { + ID: "finish", + Final: true, + Tool: &enginetool.Config{ID: "prog-tool", Resource: "tool", Type: "http"}, + }, + } + return cfg +} diff --git a/sdk/compozy/integration/standalone_integration_test.go b/sdk/compozy/integration/standalone_integration_test.go new file mode 100644 index 00000000..e5e40319 --- /dev/null +++ b/sdk/compozy/integration/standalone_integration_test.go @@ -0,0 +1,39 @@ +//go:build integration +// +build integration + +package compozy + +import ( + "fmt" + "net/http" + "testing" + + engineworkflow "github.com/compozy/compozy/engine/workflow" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestStandaloneIntegrationLifecycle(t *testing.T) { + t.Run("Should start standalone lifecycle", func(t *testing.T) { + ctx := lifecycleTestContext(t) + engine, err := New( + ctx, + WithWorkflow(&engineworkflow.Config{ID: "integration-standalone"}), + ) + require.NoError(t, err) + require.NoError(t, engine.Start(ctx)) + t.Cleanup(func() { + require.NoError(t, engine.Stop(ctx)) + engine.Wait() + }) + assert.True(t, engine.IsStarted()) + server := engine.Server() + require.NotNil(t, server) + resp, err := http.Get(fmt.Sprintf("http://%s", server.Addr)) + require.NoError(t, err) + defer resp.Body.Close() + assert.Equal(t, http.StatusNotFound, resp.StatusCode) + store := engine.ResourceStore() + require.NotNil(t, store) + }) +} diff --git a/sdk/compozy/lifecycle.go b/sdk/compozy/lifecycle.go new file mode 100644 index 00000000..c6845431 --- /dev/null +++ b/sdk/compozy/lifecycle.go @@ -0,0 +1,547 @@ +package compozy + +import ( + "context" + "errors" + "fmt" + "net" + "net/http" + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/go-chi/chi/v5" + + "github.com/compozy/compozy/engine/resources" + "github.com/compozy/compozy/engine/tool/inline" + appconfig "github.com/compozy/compozy/pkg/config" + "github.com/compozy/compozy/pkg/logger" + sdkclient "github.com/compozy/compozy/sdk/v2/client" +) + +const ( + defaultHTTPReadHeaderTimeout = 5 * time.Second + defaultHTTPShutdownTimeout = 5 * time.Second +) + +// Start boots the engine lifecycle by initializing the resource store, HTTP server, and SDK client. +func (e *Engine) Start(ctx context.Context) error { + if ctx == nil { + return fmt.Errorf("context is required") + } + e.startMu.Lock() + defer e.startMu.Unlock() + if e.IsStarted() { + return ErrAlreadyStarted + } + cfg := appconfig.FromContext(ctx) + if cfg == nil { + return ErrConfigUnavailable + } + store, err := e.buildResourceStore(ctx, cfg) + if err != nil { + e.recordStartError(err) + return err + } + httpState, err := e.startHTTPComponents(ctx, cfg) + if err != nil { + e.cleanupStore(ctx, store) + cleanupErr := e.cleanupModeResources(ctx) + if cleanupErr != nil { + err = errors.Join(err, fmt.Errorf("cleanup mode resources: %w", cleanupErr)) + } + e.recordStartError(err) + return err + } + inlineManager, err := e.initInlineManager(ctx, cfg, store) + if err != nil { + e.cleanupHTTPState(ctx, httpState) + e.cleanupStore(ctx, store) + cleanupErr := e.cleanupModeResources(ctx) + if cleanupErr != nil { + err = errors.Join(err, fmt.Errorf("cleanup mode resources: %w", cleanupErr)) + } + e.recordStartError(err) + return err + } + e.applyStartState(cfg, store, httpState) + if inlineManager != nil { + e.stateMu.Lock() + e.inlineManager = inlineManager + e.stateMu.Unlock() + } + if log := logger.FromContext(ctx); log != nil { + log.Info("engine started", "mode", string(e.mode), "base_url", httpState.baseURL) + } + return nil +} + +type httpState struct { + router *chi.Mux + server *http.Server + listener net.Listener + cancel context.CancelFunc + client *sdkclient.Client + baseURL string + port int +} + +type stopState struct { + server *http.Server + listener net.Listener + store resources.ResourceStore + cancel context.CancelFunc + cfg *appconfig.Config +} + +func (e *Engine) startHTTPComponents(ctx context.Context, cfg *appconfig.Config) (*httpState, error) { + router := chi.NewRouter() + listenHost, listenPort := e.resolveListenAddress(cfg) + listener, actualPort, err := e.listen(ctx, listenHost, listenPort) + if err != nil { + return nil, err + } + serverCtx, cancel := context.WithCancel(ctx) + server := e.newHTTPServer(serverCtx, router, cfg, listener.Addr().String()) + client, baseURL, err := e.newClient(ctx, listenHost, actualPort) + if err != nil { + cancel() + _ = listener.Close() + return nil, err + } + e.serverWG = sync.WaitGroup{} + e.launchServer(ctx, server, listener) + return &httpState{ + router: router, + server: server, + listener: listener, + cancel: cancel, + client: client, + baseURL: baseURL, + port: actualPort, + }, nil +} + +func (e *Engine) initInlineManager( + ctx context.Context, + cfg *appconfig.Config, + store resources.ResourceStore, +) (*inline.Manager, error) { + if store == nil { + return nil, fmt.Errorf("resource store is required for inline manager") + } + projectName := projectNameOf(e.project) + if projectName == "" { + return nil, nil + } + root := strings.TrimSpace(cfg.CLI.CWD) + if root == "" { + wd, err := os.Getwd() + if err != nil { + return nil, fmt.Errorf("resolve project root: %w", err) + } + root = wd + } + manager, err := inline.NewManager(ctx, inline.Options{ + ProjectRoot: root, + ProjectName: projectName, + Store: store, + UserEntrypoint: strings.TrimSpace(cfg.Runtime.EntrypointPath), + }) + if err != nil { + return nil, err + } + if err := manager.Start(ctx); err != nil { + _ = manager.Close(ctx) + return nil, err + } + cfg.Runtime.EntrypointPath = manager.EntrypointPath() + return manager, nil +} + +func (e *Engine) cleanupHTTPState(ctx context.Context, state *httpState) { + if state == nil { + return + } + if state.cancel != nil { + state.cancel() + } + if state.server != nil { + shutdownCtx, cancel := context.WithTimeout(ctx, defaultHTTPShutdownTimeout) + if cancel != nil { + defer cancel() + } + if err := state.server.Shutdown(shutdownCtx); err != nil && !errors.Is(err, http.ErrServerClosed) { + if log := logger.FromContext(ctx); log != nil { + log.Warn("failed to shutdown http server", "error", err) + } + } + } + if state.listener != nil { + if err := state.listener.Close(); err != nil && !errors.Is(err, net.ErrClosed) { + if log := logger.FromContext(ctx); log != nil { + log.Warn("failed to close listener", "error", err) + } + } + } +} + +func (e *Engine) applyStartState(cfg *appconfig.Config, store resources.ResourceStore, httpState *httpState) { + e.stateMu.Lock() + e.resourceStore = store + e.router = httpState.router + e.server = httpState.server + e.listener = httpState.listener + e.client = httpState.client + e.configSnapshot = cfg + e.serverCancel = httpState.cancel + e.started = true + e.baseURL = httpState.baseURL + e.port = httpState.port + e.stopErr = nil + e.stateMu.Unlock() + e.errMu.Lock() + e.startErr = nil + e.errMu.Unlock() +} + +// Stop gracefully shuts down the engine and all managed resources. +func (e *Engine) Stop(ctx context.Context) error { + if ctx == nil { + return fmt.Errorf("context is required") + } + e.stopMu.Lock() + defer e.stopMu.Unlock() + if !e.IsStarted() && e.server == nil && e.resourceStore == nil { + return e.stopErr + } + log := logger.FromContext(ctx) + inlineManager := e.takeInlineManager() + if inlineManager != nil { + if err := inlineManager.Close(ctx); err != nil && log != nil { + log.Warn("failed to close inline manager", "error", err) + } + } + state := e.detachStopState() + if state.cancel != nil { + state.cancel() + } + shutdownCtx, cancel := deriveShutdownContext(ctx, state.cfg) + if cancel != nil { + defer cancel() + } + errs := e.shutdownResources(shutdownCtx, ctx, state) + return e.finalizeStop(ctx, errs) +} + +func (e *Engine) detachStopState() stopState { + e.stateMu.Lock() + defer e.stateMu.Unlock() + state := stopState{ + server: e.server, + listener: e.listener, + store: e.resourceStore, + cancel: e.serverCancel, + cfg: e.configSnapshot, + } + e.router = nil + e.server = nil + e.listener = nil + e.resourceStore = nil + e.serverCancel = nil + e.client = nil + e.configSnapshot = nil + e.baseURL = "" + e.port = 0 + e.started = false + return state +} + +func (e *Engine) takeInlineManager() *inline.Manager { + e.stateMu.Lock() + defer e.stateMu.Unlock() + manager := e.inlineManager + e.inlineManager = nil + return manager +} + +func deriveShutdownContext(ctx context.Context, cfg *appconfig.Config) (context.Context, context.CancelFunc) { + if cfg == nil { + return ctx, nil + } + timeout := cfg.Server.Timeouts.ServerShutdown + if timeout <= 0 { + return ctx, nil + } + return context.WithTimeout(ctx, timeout) +} + +func (e *Engine) shutdownResources(shutdownCtx context.Context, baseCtx context.Context, state stopState) []error { + var errs []error + if state.server != nil { + if err := state.server.Shutdown(shutdownCtx); err != nil && !errors.Is(err, http.ErrServerClosed) { + errs = append(errs, fmt.Errorf("shutdown http server: %w", err)) + } + } + if state.listener != nil { + if err := state.listener.Close(); err != nil && !errors.Is(err, net.ErrClosed) { + errs = append(errs, fmt.Errorf("close listener: %w", err)) + } + } + e.serverWG.Wait() + if state.store != nil { + if err := state.store.Close(); err != nil { + errs = append(errs, fmt.Errorf("close resource store: %w", err)) + } + } + if cleanupErr := e.cleanupModeResources(context.WithoutCancel(baseCtx)); cleanupErr != nil { + errs = append(errs, fmt.Errorf("cleanup mode resources: %w", cleanupErr)) + } + if serverErr := e.serverFailure(); serverErr != nil { + errs = append(errs, serverErr) + } + return errs +} + +func (e *Engine) finalizeStop(ctx context.Context, errs []error) error { + log := logger.FromContext(ctx) + if len(errs) > 0 { + err := errors.Join(errs...) + e.errMu.Lock() + e.stopErr = err + e.errMu.Unlock() + if log != nil { + log.Error("engine stopped with errors", "error", err) + } + return err + } + e.errMu.Lock() + e.serverErr = nil + e.stopErr = nil + e.errMu.Unlock() + if log != nil { + log.Info("engine stopped") + } + return nil +} + +// Wait blocks until the engine HTTP server goroutine completes. +func (e *Engine) Wait() { + e.serverWG.Wait() +} + +// Server returns the active HTTP server instance. +func (e *Engine) Server() *http.Server { + e.stateMu.RLock() + defer e.stateMu.RUnlock() + return e.server +} + +// Router returns the current HTTP router instance. +func (e *Engine) Router() *chi.Mux { + e.stateMu.RLock() + defer e.stateMu.RUnlock() + return e.router +} + +// Config returns the configuration snapshot captured at startup. +func (e *Engine) Config() *appconfig.Config { + e.stateMu.RLock() + defer e.stateMu.RUnlock() + return e.configSnapshot +} + +// ResourceStore returns the active resource store. +func (e *Engine) ResourceStore() resources.ResourceStore { + e.stateMu.RLock() + defer e.stateMu.RUnlock() + return e.resourceStore +} + +// Mode returns the configured engine mode. +func (e *Engine) Mode() Mode { + return e.mode +} + +// IsStarted reports whether the engine lifecycle has been started. +func (e *Engine) IsStarted() bool { + e.stateMu.RLock() + defer e.stateMu.RUnlock() + return e.started +} + +func (e *Engine) buildResourceStore(ctx context.Context, cfg *appconfig.Config) (resources.ResourceStore, error) { + state, err := e.bootstrapMode(ctx, cfg) + if err != nil { + return nil, err + } + if state == nil || state.resourceStore == nil { + if state != nil { + state.cleanupOnError(context.WithoutCancel(ctx)) + } + return nil, fmt.Errorf("mode runtime did not provide resource store") + } + e.stateMu.Lock() + e.modeCleanups = state.cleanups + e.stateMu.Unlock() + return state.resourceStore, nil +} + +func (e *Engine) newHTTPServer( + ctx context.Context, + router *chi.Mux, + cfg *appconfig.Config, + addr string, +) *http.Server { + server := &http.Server{ + Addr: addr, + Handler: router, + BaseContext: func(net.Listener) context.Context { return ctx }, + ReadHeaderTimeout: defaultHTTPReadHeaderTimeout, + } + if cfg != nil { + timeouts := cfg.Server.Timeouts + server.ReadTimeout = timeouts.HTTPRead + server.WriteTimeout = timeouts.HTTPWrite + server.IdleTimeout = timeouts.HTTPIdle + if timeouts.HTTPReadHeader > 0 { + server.ReadHeaderTimeout = timeouts.HTTPReadHeader + } + } + return server +} + +func (e *Engine) resolveListenAddress(cfg *appconfig.Config) (string, int) { + host := e.host + port := e.port + if port <= 0 && cfg != nil && cfg.Server.Port > 0 { + port = cfg.Server.Port + } + if host == "" && cfg != nil && cfg.Server.Host != "" { + host = cfg.Server.Host + } + if host == "" { + host = loopbackHostname + } + return host, port +} + +func (e *Engine) listen(ctx context.Context, host string, port int) (net.Listener, int, error) { + if ctx == nil { + return nil, 0, fmt.Errorf("context is required") + } + if host == "" { + host = loopbackHostname + } + portStr := "0" + if port > 0 { + portStr = strconv.Itoa(port) + } + address := net.JoinHostPort(host, portStr) + lc := net.ListenConfig{} + listener, err := lc.Listen(ctx, "tcp", address) + if err != nil { + return nil, 0, fmt.Errorf("listen on %s: %w", address, err) + } + tcpAddr, ok := listener.Addr().(*net.TCPAddr) + if !ok { + _ = listener.Close() + return nil, 0, fmt.Errorf("expected tcp address, got %T", listener.Addr()) + } + return listener, tcpAddr.Port, nil +} + +func (e *Engine) newClient(ctx context.Context, host string, port int) (*sdkclient.Client, string, error) { + clientHost := sanitizeHostForClient(host) + hostPort := net.JoinHostPort(clientHost, strconv.Itoa(port)) + baseURL := fmt.Sprintf("%s://%s", httpScheme, hostPort) + client, err := sdkclient.New(ctx, baseURL) + if err != nil { + return nil, "", fmt.Errorf("initialize sdk client: %w", err) + } + return client, baseURL, nil +} + +func (e *Engine) launchServer(ctx context.Context, srv *http.Server, ln net.Listener) { + log := logger.FromContext(ctx) + if log != nil { + log.Debug("starting http server", "address", ln.Addr().String()) + } + e.serverWG.Go(func() { + if err := srv.Serve(ln); err != nil && !errors.Is(err, http.ErrServerClosed) { + if log != nil { + log.Error("http server failed", "error", err) + } + e.recordServerError(fmt.Errorf("http server failure: %w", err)) + return + } + }) +} + +func sanitizeHostForClient(host string) string { + if host == "" || host == "0.0.0.0" || host == "::" { + return loopbackHostname + } + return host +} + +func (e *Engine) recordServerError(err error) { + if err == nil { + return + } + e.errMu.Lock() + e.serverErr = err + e.errMu.Unlock() +} + +func (e *Engine) cleanupModeResources(ctx context.Context) error { + cleanups := e.extractModeCleanups() + if len(cleanups) == 0 { + return nil + } + var errs []error + for i := len(cleanups) - 1; i >= 0; i-- { + fn := cleanups[i] + if fn == nil { + continue + } + if err := fn(ctx); err != nil { + errs = append(errs, err) + } + } + return errors.Join(errs...) +} + +func (e *Engine) extractModeCleanups() []modeCleanup { + e.stateMu.Lock() + defer e.stateMu.Unlock() + cleanups := e.modeCleanups + e.modeCleanups = nil + return cleanups +} + +func (e *Engine) serverFailure() error { + e.errMu.Lock() + defer e.errMu.Unlock() + return e.serverErr +} + +func (e *Engine) cleanupStore(ctx context.Context, store resources.ResourceStore) { + if store == nil { + return + } + if err := store.Close(); err != nil { + log := logger.FromContext(ctx) + if log != nil { + log.Warn("failed to close resource store during cleanup", "error", err) + } + } +} + +func (e *Engine) recordStartError(err error) { + e.errMu.Lock() + e.startErr = err + e.errMu.Unlock() +} diff --git a/sdk/compozy/lifecycle_helpers_test.go b/sdk/compozy/lifecycle_helpers_test.go new file mode 100644 index 00000000..6d5cc2ad --- /dev/null +++ b/sdk/compozy/lifecycle_helpers_test.go @@ -0,0 +1,199 @@ +package compozy + +import ( + "context" + "errors" + "net" + "net/http" + "testing" + "time" + + appconfig "github.com/compozy/compozy/pkg/config" + "github.com/compozy/compozy/pkg/logger" + "github.com/go-chi/chi/v5" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestEngineRecordServerError(t *testing.T) { + t.Run("Should cache last server error", func(t *testing.T) { + t.Parallel() + engine := &Engine{} + engine.recordServerError(nil) + assert.Nil(t, engine.serverFailure()) + expected := errors.New("server exploded") + engine.recordServerError(expected) + assert.Equal(t, expected, engine.serverFailure()) + }) +} + +func TestEngineStartRequiresContext(t *testing.T) { + t.Run("Should reject missing context", func(t *testing.T) { + t.Parallel() + engine := &Engine{} + var nilCtx context.Context + err := engine.Start(nilCtx) + assert.Error(t, err) + }) +} + +func TestEngineStartUnsupportedMode(t *testing.T) { + t.Run("Should return error for unsupported engine mode", func(t *testing.T) { + t.Parallel() + ctx := lifecycleTestContext(t) + engine := &Engine{mode: Mode("legacy")} + err := engine.Start(ctx) + require.Error(t, err) + assert.Contains(t, err.Error(), "unsupported engine mode") + assert.Equal(t, err, engine.startErr) + }) +} + +func TestEngineStopReturnsCachedError(t *testing.T) { + t.Run("Should return cached stop error", func(t *testing.T) { + t.Parallel() + engine := &Engine{stopErr: errors.New("cached stop")} + ctx := logger.ContextWithLogger(t.Context(), logger.NewForTests()) + err := engine.Stop(ctx) + require.Error(t, err) + assert.Equal(t, "cached stop", err.Error()) + }) +} + +func TestEngineCleanupModeResourcesAggregatesErrors(t *testing.T) { + t.Run("Should aggregate failures from mode cleanup", func(t *testing.T) { + t.Parallel() + engine := &Engine{} + engine.modeCleanups = []modeCleanup{ + func(context.Context) error { return nil }, + func(context.Context) error { return errors.New("first failure") }, + func(context.Context) error { return errors.New("second failure") }, + } + err := engine.cleanupModeResources(logger.ContextWithLogger(t.Context(), logger.NewForTests())) + require.Error(t, err) + assert.Contains(t, err.Error(), "first failure") + assert.Contains(t, err.Error(), "second failure") + assert.NoError(t, engine.cleanupModeResources(t.Context())) + }) +} + +func TestSanitizeHostForClientVariants(t *testing.T) { + t.Run("Should normalize wildcard hosts", func(t *testing.T) { + t.Parallel() + assert.Equal(t, loopbackHostname, sanitizeHostForClient("")) + assert.Equal(t, loopbackHostname, sanitizeHostForClient("0.0.0.0")) + assert.Equal(t, loopbackHostname, sanitizeHostForClient("::")) + assert.Equal(t, "example.com", sanitizeHostForClient("example.com")) + }) +} + +func TestEngineCleanupStoreLogsOnCloseFailure(t *testing.T) { + t.Run("Should log close failures when cleaning stores", func(t *testing.T) { + t.Parallel() + store := newResourceStoreStub() + store.closeErr = errors.New("close failure") + engine := &Engine{} + ctx := logger.ContextWithLogger(t.Context(), logger.NewForTests()) + engine.cleanupStore(ctx, store) + engine.cleanupStore(ctx, nil) + }) +} + +func TestEngineListenAllocatesPort(t *testing.T) { + t.Run("Should listen on ephemeral port when unspecified", func(t *testing.T) { + engine := &Engine{} + listener, port, err := engine.listen(t.Context(), "", 0) + require.NoError(t, err) + require.NotZero(t, port) + require.NotNil(t, listener) + require.NoError(t, listener.Close()) + }) +} + +func TestEngineListenFailsForOccupiedPort(t *testing.T) { + t.Run("Should fail when desired port is occupied", func(t *testing.T) { + listenCfg := net.ListenConfig{} + ln, err := listenCfg.Listen(context.WithoutCancel(t.Context()), "tcp", "127.0.0.1:0") + require.NoError(t, err) + addr := ln.Addr().(*net.TCPAddr) + engine := &Engine{} + _, _, listenErr := engine.listen(t.Context(), "127.0.0.1", addr.Port) + require.Error(t, listenErr) + assert.Contains(t, listenErr.Error(), "listen on") + require.NoError(t, ln.Close()) + }) +} + +func TestEngineNewHTTPServerAppliesTimeouts(t *testing.T) { + t.Run("Should apply configured timeouts to http server", func(t *testing.T) { + t.Parallel() + ctx := logger.ContextWithLogger(t.Context(), logger.NewForTests()) + cfg := appconfig.FromContext(lifecycleTestContext(t)) + require.NotNil(t, cfg) + cfg.Server.Timeouts.HTTPRead = time.Second + cfg.Server.Timeouts.HTTPWrite = 2 * time.Second + cfg.Server.Timeouts.HTTPIdle = 3 * time.Second + cfg.Server.Timeouts.HTTPReadHeader = 500 * time.Millisecond + engine := &Engine{} + server := engine.newHTTPServer(ctx, chi.NewRouter(), cfg, "127.0.0.1:0") + assert.Equal(t, time.Second, server.ReadTimeout) + assert.Equal(t, 2*time.Second, server.WriteTimeout) + assert.Equal(t, 3*time.Second, server.IdleTimeout) + assert.Equal(t, 500*time.Millisecond, server.ReadHeaderTimeout) + }) +} + +func TestEngineNewClientSanitizesHost(t *testing.T) { + t.Run("Should sanitize host before creating client", func(t *testing.T) { + t.Parallel() + ctx := logger.ContextWithLogger(t.Context(), logger.NewForTests()) + engine := &Engine{} + client, baseURL, err := engine.newClient(ctx, "0.0.0.0", 8080) + require.NoError(t, err) + assert.Contains(t, baseURL, "127.0.0.1:8080") + require.NotNil(t, client) + }) +} + +type failingListener struct{} + +func (f *failingListener) Accept() (net.Conn, error) { + return nil, listenerError{} +} + +func (f *failingListener) Close() error { + return nil +} + +func (f *failingListener) Addr() net.Addr { + return &net.TCPAddr{IP: net.IPv4zero, Port: 0} +} + +type listenerError struct{} + +func (listenerError) Error() string { + return "accept failure" +} + +func (listenerError) Timeout() bool { + return false +} + +func (listenerError) Temporary() bool { + return false +} + +func TestEngineLaunchServerRecordsFailures(t *testing.T) { + t.Run("Should record server failures from launch goroutine", func(t *testing.T) { + t.Parallel() + engine := &Engine{} + log := logger.NewForTests() + ctx := logger.ContextWithLogger(t.Context(), log) + server := &http.Server{} + engine.launchServer(ctx, server, &failingListener{}) + engine.serverWG.Wait() + err := engine.serverFailure() + require.Error(t, err) + assert.Contains(t, err.Error(), "http server failure") + }) +} diff --git a/sdk/compozy/lifecycle_test.go b/sdk/compozy/lifecycle_test.go new file mode 100644 index 00000000..5c0b76a0 --- /dev/null +++ b/sdk/compozy/lifecycle_test.go @@ -0,0 +1,294 @@ +package compozy + +import ( + "context" + "errors" + "fmt" + "net" + "net/http" + "os" + "path/filepath" + "sync/atomic" + "testing" + "time" + + engineproject "github.com/compozy/compozy/engine/project" + "github.com/compozy/compozy/engine/resources" + enginetool "github.com/compozy/compozy/engine/tool" + "github.com/compozy/compozy/engine/tool/inline" + engineworkflow "github.com/compozy/compozy/engine/workflow" + appconfig "github.com/compozy/compozy/pkg/config" + "github.com/compozy/compozy/pkg/logger" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var temporalPortCounter int32 = 20000 + +type failingWatchStore struct { + *resources.MemoryResourceStore +} + +func (f *failingWatchStore) Watch( + _ context.Context, + _ string, + _ resources.ResourceType, +) (<-chan resources.Event, error) { + return nil, errors.New("watch failed") +} + +func TestEngineLifecycle(t *testing.T) { + t.Run("Should start and stop standalone engine with memory store", func(t *testing.T) { + ctx := lifecycleTestContext(t) + engine, err := New( + ctx, + WithWorkflow(&engineworkflow.Config{ID: "workflow-start"}), + WithHost(loopbackHostname), + WithPort(0), + ) + require.NoError(t, err) + require.NotNil(t, engine) + + err = engine.Start(ctx) + require.NoError(t, err) + assert.True(t, engine.IsStarted()) + + store := engine.ResourceStore() + require.IsType(t, &resources.MemoryResourceStore{}, store) + assert.NotNil(t, engine.Server()) + assert.NotNil(t, engine.Router()) + assert.NotNil(t, engine.Config()) + assert.Equal(t, ModeStandalone, engine.Mode()) + + err = engine.Start(ctx) + require.ErrorIs(t, err, ErrAlreadyStarted) + + require.NoError(t, engine.Stop(ctx)) + assert.False(t, engine.IsStarted()) + assert.Nil(t, engine.Server()) + assert.Nil(t, engine.Router()) + + require.NoError(t, engine.Stop(ctx)) + engine.Wait() + + memStore := store.(*resources.MemoryResourceStore) + _, putErr := memStore.Put(ctx, resources.ResourceKey{ + Project: "test", + Type: resources.ResourceWorkflow, + ID: "after-stop", + }, &engineworkflow.Config{ID: "after-stop"}) + require.Error(t, putErr) + assert.ErrorContains(t, putErr, "store is closed") + }) + + t.Run("Should fail to start distributed mode without external configuration", func(t *testing.T) { + ctx := lifecycleTestContext(t) + cfg := appconfig.FromContext(ctx) + require.NotNil(t, cfg) + cfg.Mode = string(ModeDistributed) + cfg.Redis.URL = "" + cfg.Redis.Host = "" + cfg.Redis.Port = "" + engine, err := New(ctx, WithMode(ModeDistributed), WithWorkflow(&engineworkflow.Config{ID: "distributed"})) + require.NoError(t, err) + err = engine.Start(ctx) + require.Error(t, err) + assert.ErrorContains(t, err, "redis") + assert.False(t, engine.IsStarted()) + }) + + t.Run("Should start inline manager when project and tools provided", func(t *testing.T) { + ctx := lifecycleTestContext(t) + engine, err := New( + ctx, + WithProject(&engineproject.Config{Name: "inline-project"}), + WithTool(&enginetool.Config{ + ID: "inline-tool", + Code: "export default () => 'ok';", + }), + WithWorkflow(&engineworkflow.Config{ID: "wf-inline"}), + WithHost(loopbackHostname), + WithPort(0), + ) + require.NoError(t, err) + require.NotNil(t, engine) + require.NoError(t, engine.Start(ctx)) + require.NotNil(t, engine.inlineManager) + require.NoError(t, engine.Stop(ctx)) + engine.Wait() + }) + + t.Run("Should fail to start with invalid host", func(t *testing.T) { + ctx := lifecycleTestContext(t) + engine, err := New( + ctx, + WithWorkflow(&engineworkflow.Config{ID: "invalid-host"}), + WithHost("256.0.0.1"), + WithPort(0), + ) + require.NoError(t, err) + err = engine.Start(ctx) + require.Error(t, err) + assert.Contains(t, err.Error(), "listen") + }) +} + +func TestEngineInitInlineManager(t *testing.T) { + ctx := logger.ContextWithLogger(t.Context(), logger.NewForTests()) + cfg := appconfig.Default() + cfg.CLI.CWD = t.TempDir() + store := resources.NewMemoryResourceStore() + defer func() { + require.NoError(t, store.Close()) + }() + toolCfg := &enginetool.Config{ + ID: "inline-tool", + Code: "export default () => 'inline';", + } + _, err := store.Put(ctx, resources.ResourceKey{ + Project: "inline-project", + Type: resources.ResourceTool, + ID: "inline-tool", + }, toolCfg) + require.NoError(t, err) + engine := &Engine{ + ctx: ctx, + project: &engineproject.Config{Name: "inline-project"}, + } + manager, err := engine.initInlineManager(ctx, cfg, store) + require.NoError(t, err) + require.NotNil(t, manager) + defer func() { + require.NoError(t, manager.Close(ctx)) + }() + entrypointPath := manager.EntrypointPath() + require.NotEmpty(t, entrypointPath) + require.Equal(t, entrypointPath, cfg.Runtime.EntrypointPath) + data, readErr := os.ReadFile(entrypointPath) + require.NoError(t, readErr) + assert.Contains(t, string(data), "inline-tool") +} + +func TestEngineInitInlineManager_NoProject(t *testing.T) { + ctx := logger.ContextWithLogger(t.Context(), logger.NewForTests()) + cfg := appconfig.Default() + store := resources.NewMemoryResourceStore() + defer func() { + require.NoError(t, store.Close()) + }() + engine := &Engine{} + manager, err := engine.initInlineManager(ctx, cfg, store) + require.NoError(t, err) + require.Nil(t, manager) +} + +func TestEngineInitInlineManager_NilStore(t *testing.T) { + ctx := logger.ContextWithLogger(t.Context(), logger.NewForTests()) + cfg := appconfig.Default() + engine := &Engine{project: &engineproject.Config{Name: "proj"}} + manager, err := engine.initInlineManager(ctx, cfg, nil) + require.Error(t, err) + require.Nil(t, manager) +} + +func TestEngineInitInlineManager_WatchFailure(t *testing.T) { + ctx := logger.ContextWithLogger(t.Context(), logger.NewForTests()) + cfg := appconfig.Default() + cfg.CLI.CWD = t.TempDir() + store := &failingWatchStore{resources.NewMemoryResourceStore()} + defer func() { + require.NoError(t, store.Close()) + }() + engine := &Engine{project: &engineproject.Config{Name: "proj"}} + manager, err := engine.initInlineManager(ctx, cfg, store) + require.Error(t, err) + require.Nil(t, manager) +} + +func TestEngineTakeInlineManager(t *testing.T) { + engine := &Engine{} + dummy := &inline.Manager{} + engine.inlineManager = dummy + taken := engine.takeInlineManager() + require.Equal(t, dummy, taken) + require.Nil(t, engine.inlineManager) +} + +func TestCleanupHTTPState(t *testing.T) { + ctx := logger.ContextWithLogger(t.Context(), logger.NewForTests()) + listener, err := (&net.ListenConfig{}).Listen(context.WithoutCancel(ctx), "tcp", "127.0.0.1:0") + require.NoError(t, err) + server := &http.Server{} + done := make(chan struct{}) + go func() { + _ = server.Serve(listener) + close(done) + }() + time.Sleep(10 * time.Millisecond) + cleanupCtx, cancel := context.WithCancel(ctx) + state := &httpState{ + server: server, + listener: listener, + cancel: cancel, + } + engine := &Engine{} + engine.cleanupHTTPState(cleanupCtx, state) + select { + case <-done: + case <-time.After(2 * time.Second): + t.Fatalf("server did not shut down") + } +} + +func TestAllocateTemporalFrontendPortWraps(t *testing.T) { + ctx := logger.ContextWithLogger(t.Context(), logger.NewForTests()) + atomic.StoreInt32(&temporalPortCounter, 59990) + port := allocateTemporalFrontendPort(ctx, t) + require.Greater(t, port, 20000) + require.Less(t, port, 65000) +} + +func lifecycleTestContext(t *testing.T) context.Context { + t.Helper() + ctx := logger.ContextWithLogger(t.Context(), logger.NewForTests()) + service := appconfig.NewService() + manager := appconfig.NewManager(ctx, service) + _, err := manager.Load(ctx, appconfig.NewDefaultProvider()) + require.NoError(t, err) + ctx = appconfig.ContextWithManager(ctx, manager) + cfg := appconfig.FromContext(ctx) + require.NotNil(t, cfg) + port := allocateTemporalFrontendPort(ctx, t) + cfg.Temporal.Standalone.FrontendPort = port + cfg.Temporal.Standalone.UIPort = port + 1000 + cfg.Temporal.Standalone.DatabaseFile = filepath.Join(t.TempDir(), "temporal.db") + return ctx +} + +func allocateTemporalFrontendPort(ctx context.Context, t *testing.T) int { + t.Helper() + for attempts := 0; attempts < 2000; attempts++ { + port := int(atomic.AddInt32(&temporalPortCounter, 1)) + if port > 60000 { + atomic.StoreInt32(&temporalPortCounter, 20000) + port = int(atomic.AddInt32(&temporalPortCounter, 1)) + } + if reserveTCPPort(ctx, port) && reserveTCPPort(ctx, port+1000) { + return port + } + } + t.Fatalf("unable to allocate temporal port") + return 0 +} + +func reserveTCPPort(ctx context.Context, port int) bool { + if port <= 0 || port > 65000 { + return false + } + ln, err := (&net.ListenConfig{}).Listen(context.WithoutCancel(ctx), "tcp", fmt.Sprintf("127.0.0.1:%d", port)) + if err != nil { + return false + } + _ = ln.Close() + return true +} diff --git a/sdk/compozy/loader.go b/sdk/compozy/loader.go new file mode 100644 index 00000000..5b8fb14d --- /dev/null +++ b/sdk/compozy/loader.go @@ -0,0 +1,228 @@ +package compozy + +import ( + "context" + "errors" + "fmt" + "os" + "path/filepath" + "sort" + "strings" + + appconfig "github.com/compozy/compozy/pkg/config" + "github.com/compozy/compozy/pkg/logger" + yamlv3 "gopkg.in/yaml.v3" +) + +var yamlExtensions = []string{".yaml", ".yml"} + +const defaultFilteredFilesCapacity = 8 + +func requireContext(ctx context.Context) error { + if ctx == nil { + return fmt.Errorf("context is required") + } + return contextError(ctx) +} + +func contextError(ctx context.Context) error { + if ctx == nil { + return fmt.Errorf("context is required") + } + if err := ctx.Err(); err != nil { + return fmt.Errorf("operation canceled: %w", err) + } + return nil +} + +type filePathSetter interface { + SetFilePath(string) +} + +type cwdSetter interface { + SetCWD(string) error +} + +func loadYAML[T any](ctx context.Context, engine *Engine, path string) (T, string, error) { + var zero T + log := logger.FromContext(ctx) + logFailure := func(target string, err error) { + if err == nil || log == nil { + return + } + log.Error("failed to load YAML", "path", target, "error", err) + } + requestedPath := strings.TrimSpace(path) + if err := requireContext(ctx); err != nil { + logFailure(requestedPath, err) + return zero, "", err + } + trimmed, cfg, err := prepareLoadContext(engine, path) + if err != nil { + logFailure(requestedPath, err) + return zero, "", err + } + if err := contextError(ctx); err != nil { + logFailure(trimmed, err) + return zero, "", err + } + data, abs, err := readYAMLFile(trimmed, cfg.Limits.MaxConfigFileSize) + if err != nil { + logFailure(trimmed, err) + return zero, "", err + } + if err := contextError(ctx); err != nil { + logFailure(trimmed, err) + return zero, "", err + } + value, err := decodeYAML[T](data, trimmed) + if err != nil { + logFailure(trimmed, err) + return zero, "", err + } + if err := contextError(ctx); err != nil { + logFailure(trimmed, err) + return zero, "", err + } + applyYAMLMetadata(engine.ctx, value, abs) + return value, abs, nil +} + +func prepareLoadContext(engine *Engine, rawPath string) (string, *appconfig.Config, error) { + if engine == nil { + return "", nil, fmt.Errorf("engine is nil") + } + trimmed := strings.TrimSpace(rawPath) + if trimmed == "" { + return "", nil, fmt.Errorf("path is required") + } + if engine.ctx == nil { + return "", nil, fmt.Errorf("engine context is not set") + } + cfg := appconfig.FromContext(engine.ctx) + if cfg == nil { + return "", nil, fmt.Errorf("configuration unavailable") + } + return trimmed, cfg, nil +} + +func readYAMLFile(path string, maxSize int) ([]byte, string, error) { + info, err := os.Stat(path) + if err != nil { + return nil, "", fmt.Errorf("stat %s: %w", path, err) + } + if limit := int64(maxSize); limit > 0 && info.Size() > limit { + return nil, "", fmt.Errorf("%s exceeds maximum size of %d bytes", path, maxSize) + } + data, err := os.ReadFile(path) + if err != nil { + return nil, "", fmt.Errorf("read %s: %w", path, err) + } + abs, err := filepath.Abs(path) + if err != nil { + abs = path + } + return data, abs, nil +} + +func decodeYAML[T any](data []byte, path string) (T, error) { + var value T + if err := yamlv3.Unmarshal(data, &value); err != nil { + return value, fmt.Errorf("decode %s: %w", path, err) + } + return value, nil +} + +func applyYAMLMetadata(ctx context.Context, value any, abs string) { + if abs == "" { + return + } + if setter, ok := value.(filePathSetter); ok { + setter.SetFilePath(abs) + } + if setter, ok := value.(cwdSetter); ok { + if err := setter.SetCWD(filepath.Dir(abs)); err != nil { + log := logger.FromContext(ctx) + if log != nil { + log.Error("failed to set cwd", "path", abs, "error", err) + } + } + } +} + +func filteredYAMLFiles(ctx context.Context, dir string) ([]string, error) { + if err := contextError(ctx); err != nil { + return nil, err + } + entries, err := os.ReadDir(dir) + if err != nil { + return nil, fmt.Errorf("read dir %s: %w", dir, err) + } + filesCap := len(entries) + if filesCap < defaultFilteredFilesCapacity { + filesCap = defaultFilteredFilesCapacity + } + files := make([]string, 0, filesCap) + for _, entry := range entries { + if err := contextError(ctx); err != nil { + return nil, err + } + if entry.IsDir() { + continue + } + if !isYAMLExtension(entry.Name()) { + continue + } + files = append(files, filepath.Join(dir, entry.Name())) + } + sort.Strings(files) + return files, nil +} + +func (e *Engine) loadFromDir(ctx context.Context, dir string, loader func(context.Context, string) error) error { + if e == nil { + return fmt.Errorf("engine is nil") + } + if err := requireContext(ctx); err != nil { + return err + } + cleaned := strings.TrimSpace(dir) + if cleaned == "" { + return fmt.Errorf("directory is required") + } + files, err := filteredYAMLFiles(ctx, cleaned) + if err != nil { + return err + } + if len(files) == 0 { + return nil + } + var errs []error + log := logger.FromContext(e.ctx) + for _, file := range files { + if err := contextError(ctx); err != nil { + return err + } + if err := loader(ctx, file); err != nil { + wrapped := fmt.Errorf("%s: %w", file, err) + if log != nil { + log.Error("failed to load yaml file", "path", file, "error", err) + } + errs = append(errs, wrapped) + } + } + if len(errs) > 0 { + return errors.Join(errs...) + } + return nil +} + +func isYAMLExtension(name string) bool { + lower := strings.ToLower(name) + for _, ext := range yamlExtensions { + if strings.HasSuffix(lower, ext) { + return true + } + } + return false +} diff --git a/sdk/compozy/loader_performance_test.go b/sdk/compozy/loader_performance_test.go new file mode 100644 index 00000000..ba750a0a --- /dev/null +++ b/sdk/compozy/loader_performance_test.go @@ -0,0 +1,105 @@ +package compozy_test + +import ( + "context" + "fmt" + "net" + "os" + "path/filepath" + "testing" + "time" + + engineagent "github.com/compozy/compozy/engine/agent" + enginecore "github.com/compozy/compozy/engine/core" + enginetask "github.com/compozy/compozy/engine/task" + engineworkflow "github.com/compozy/compozy/engine/workflow" + appconfig "github.com/compozy/compozy/pkg/config" + "github.com/compozy/compozy/pkg/logger" + compozy "github.com/compozy/compozy/sdk/v2/compozy" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestLoadWorkflowsPerformanceBudget(t *testing.T) { + t.Run("Should load workflows within 100ms per file", func(t *testing.T) { + ctx := loaderTestContext(t) + engine, err := compozy.New( + ctx, + compozy.WithAgent(&engineagent.Config{ + ID: "seed-agent", + Instructions: "Seed agent", + Model: engineagent.Model{ + Config: enginecore.ProviderConfig{ + Provider: enginecore.ProviderName("openai"), + Model: "gpt-4o-mini", + }, + }, + }), + compozy.WithWorkflow(&engineworkflow.Config{ + ID: "seed-workflow", + Tasks: []enginetask.Config{ + { + BaseConfig: enginetask.BaseConfig{ID: "seed-task"}, + }, + }, + }), + ) + require.NoError(t, err) + dir := t.TempDir() + fileCount := 10 + for i := 0; i < fileCount; i++ { + path := filepath.Join(dir, fmt.Sprintf("workflow_%02d.yaml", i)) + content := fmt.Sprintf( + "id: wf-%02d\n"+ + "tasks:\n"+ + " - id: task-%02d\n"+ + " type: basic\n"+ + " agent:\n"+ + " id: inline-agent-%02d\n"+ + " instructions: \"Respond to loader benchmarks\"\n"+ + " model:\n"+ + " config:\n"+ + " provider: openai\n"+ + " model: gpt-4o-mini\n"+ + " action: respond\n"+ + " final: true\n", + i, + i, + i, + ) + require.NoError(t, os.WriteFile(path, []byte(content), 0o600)) + } + start := time.Now() + err = engine.LoadWorkflowsFromDir(ctx, dir) + require.NoError(t, err) + perFile := time.Since(start) / time.Duration(fileCount) + assert.LessOrEqual(t, perFile, 100*time.Millisecond, "per-file load budget exceeded: %v", perFile) + report, err := engine.ValidateReferences() + require.NoError(t, err) + assert.GreaterOrEqual(t, report.ResourceCount, fileCount+2) + }) +} + +func loaderTestContext(t *testing.T) context.Context { + t.Helper() + ctx := logger.ContextWithLogger(t.Context(), logger.NewForTests()) + service := appconfig.NewService() + manager := appconfig.NewManager(ctx, service) + _, err := manager.Load(ctx, appconfig.NewDefaultProvider()) + require.NoError(t, err) + ctx = appconfig.ContextWithManager(ctx, manager) + cfg := appconfig.FromContext(ctx) + require.NotNil(t, cfg) + listenCfg := net.ListenConfig{} + for { + ln, err := listenCfg.Listen(context.WithoutCancel(ctx), "tcp", "127.0.0.1:0") + require.NoError(t, err) + addr := ln.Addr().(*net.TCPAddr) + require.NoError(t, ln.Close()) + if addr.Port <= 64535 { + cfg.Temporal.Standalone.FrontendPort = addr.Port + break + } + } + return ctx +} diff --git a/sdk/compozy/loader_test.go b/sdk/compozy/loader_test.go new file mode 100644 index 00000000..94d06269 --- /dev/null +++ b/sdk/compozy/loader_test.go @@ -0,0 +1,167 @@ +package compozy + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strings" + "testing" + + enginetool "github.com/compozy/compozy/engine/tool" + appconfig "github.com/compozy/compozy/pkg/config" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestLoadYAMLSuccess(t *testing.T) { + t.Run("Should load YAML configuration successfully", func(t *testing.T) { + t.Parallel() + ctx := lifecycleTestContext(t) + engine := &Engine{ctx: ctx} + dir := t.TempDir() + file := filepath.Join(dir, "tool.yaml") + content := strings.TrimSpace(`resource: tool +id: yaml-tool +type: http +`) + require.NoError(t, os.WriteFile(file, []byte(content), 0o600)) + cfg, abs, err := loadYAML[*enginetool.Config](ctx, engine, file) + require.NoError(t, err) + assert.Equal(t, "yaml-tool", cfg.ID) + assert.Equal(t, "tool", cfg.Resource) + assert.Equal(t, filepath.Clean(file), abs) + }) +} + +func TestLoadFromDirAccumulatesErrors(t *testing.T) { + t.Run("Should accumulate loader errors and report them", func(t *testing.T) { + t.Parallel() + ctx := lifecycleTestContext(t) + engine := &Engine{ctx: ctx} + dir := t.TempDir() + good := filepath.Join(dir, "good.yaml") + bad := filepath.Join(dir, "bad.yml") + require.NoError(t, os.WriteFile(good, []byte("kind: ok"), 0o600)) + require.NoError(t, os.WriteFile(bad, []byte("kind: bad"), 0o600)) + seen := make([]string, 0) + loader := func(_ context.Context, path string) error { + seen = append(seen, filepath.Base(path)) + if strings.Contains(path, "bad") { + return fmt.Errorf("failed") + } + return nil + } + err := engine.loadFromDir(ctx, dir, loader) + require.Error(t, err) + assert.Len(t, seen, 2) + assert.Contains(t, err.Error(), "bad.yml") + }) +} + +func TestLoadYAMLErrorConditions(t *testing.T) { + t.Run("Should return error when engine is nil", func(t *testing.T) { + t.Parallel() + ctx := lifecycleTestContext(t) + _, _, err := loadYAML[*enginetool.Config](ctx, nil, "file.yaml") + assert.Error(t, err) + assert.Contains(t, err.Error(), "engine is nil") + }) + t.Run("Should validate that path is not blank", func(t *testing.T) { + t.Parallel() + ctx := lifecycleTestContext(t) + engine := &Engine{ctx: ctx} + _, _, err := loadYAML[*enginetool.Config](ctx, engine, " ") + assert.Error(t, err) + assert.Contains(t, err.Error(), "path is required") + }) + t.Run("Should require non nil context", func(t *testing.T) { + t.Parallel() + engine := &Engine{ctx: lifecycleTestContext(t)} + //lint:ignore SA1012 testing nil context handling + _, _, err := loadYAML[*enginetool.Config](nil, engine, "file.yaml") + assert.Error(t, err) + assert.Contains(t, err.Error(), "context is required") + }) + t.Run("Should validate engine context is set", func(t *testing.T) { + t.Parallel() + ctx := lifecycleTestContext(t) + engine := &Engine{} + _, _, err := loadYAML[*enginetool.Config](ctx, engine, "file.yaml") + assert.Error(t, err) + assert.Contains(t, err.Error(), "engine context is not set") + }) +} + +func TestLoadYAMLHandlesCanceledContext(t *testing.T) { + t.Run("Should fail when context is canceled", func(t *testing.T) { + t.Parallel() + baseCtx := lifecycleTestContext(t) + engine := &Engine{ctx: baseCtx} + dir := t.TempDir() + file := filepath.Join(dir, "tool.yaml") + require.NoError(t, os.WriteFile(file, []byte("resource: tool\nid: ctx-tool\ntype: http\n"), 0o600)) + cancelCtx, cancel := context.WithCancel(baseCtx) + cancel() + _, _, err := loadYAML[*enginetool.Config](cancelCtx, engine, file) + require.Error(t, err) + assert.Contains(t, err.Error(), "operation canceled") + }) +} + +func TestLoadYAMLStatFailure(t *testing.T) { + t.Run("Should report stat errors for missing file", func(t *testing.T) { + t.Parallel() + ctx := lifecycleTestContext(t) + engine := &Engine{ctx: ctx} + _, _, err := loadYAML[*enginetool.Config](ctx, engine, "missing.yaml") + require.Error(t, err) + assert.Contains(t, err.Error(), "stat missing.yaml") + }) +} + +func TestLoadYAMLDecodeFailure(t *testing.T) { + t.Run("Should report decode failure for invalid YAML", func(t *testing.T) { + t.Parallel() + ctx := lifecycleTestContext(t) + engine := &Engine{ctx: ctx} + cfg := appconfig.FromContext(ctx) + require.NotNil(t, cfg) + bad := filepath.Join(t.TempDir(), "invalid.yaml") + require.NoError(t, os.WriteFile(bad, []byte("{"), 0o600)) + _, _, err := loadYAML[*enginetool.Config](ctx, engine, bad) + require.Error(t, err) + assert.Contains(t, err.Error(), "decode") + }) +} + +func TestLoadFromDirValidatesInputs(t *testing.T) { + t.Run("Should require engine instance", func(t *testing.T) { + t.Parallel() + var engine *Engine + ctx := lifecycleTestContext(t) + err := engine.loadFromDir(ctx, "", nil) + require.Error(t, err) + }) + t.Run("Should require directory path and loader", func(t *testing.T) { + t.Parallel() + ctx := lifecycleTestContext(t) + engine := &Engine{ctx: ctx} + err := engine.loadFromDir(ctx, "", func(context.Context, string) error { return nil }) + require.Error(t, err) + }) +} + +func TestLoadFromDirHandlesCanceledContext(t *testing.T) { + t.Run("Should fail when context is canceled", func(t *testing.T) { + t.Parallel() + baseCtx := lifecycleTestContext(t) + engine := &Engine{ctx: baseCtx} + dir := t.TempDir() + cancelCtx, cancel := context.WithCancel(baseCtx) + cancel() + err := engine.loadFromDir(cancelCtx, dir, func(context.Context, string) error { return nil }) + require.Error(t, err) + assert.Contains(t, err.Error(), "operation canceled") + }) +} diff --git a/sdk/compozy/loading_test.go b/sdk/compozy/loading_test.go new file mode 100644 index 00000000..7d9482f0 --- /dev/null +++ b/sdk/compozy/loading_test.go @@ -0,0 +1,223 @@ +package compozy + +import ( + "context" + "os" + "path/filepath" + "testing" + + engineagent "github.com/compozy/compozy/engine/agent" + enginecore "github.com/compozy/compozy/engine/core" + engineknowledge "github.com/compozy/compozy/engine/knowledge" + enginemcp "github.com/compozy/compozy/engine/mcp" + enginememory "github.com/compozy/compozy/engine/memory" + engineproject "github.com/compozy/compozy/engine/project" + projectschedule "github.com/compozy/compozy/engine/project/schedule" + "github.com/compozy/compozy/engine/resources" + engineschema "github.com/compozy/compozy/engine/schema" + enginetask "github.com/compozy/compozy/engine/task" + enginetool "github.com/compozy/compozy/engine/tool" + enginewebhook "github.com/compozy/compozy/engine/webhook" + engineworkflow "github.com/compozy/compozy/engine/workflow" + mcpproxy "github.com/compozy/compozy/pkg/mcp-proxy" + "github.com/stretchr/testify/require" + "gopkg.in/yaml.v3" +) + +func TestEngineLoaders(t *testing.T) { + t.Run("Should load single resources", func(t *testing.T) { + ctx := lifecycleTestContext(t) + baseWorkflow := &engineworkflow.Config{ + ID: "seed", + Tasks: []enginetask.Config{{BaseConfig: enginetask.BaseConfig{ID: "task"}}}, + } + engine, err := New(ctx, WithWorkflow(baseWorkflow)) + require.NoError(t, err) + engine.resourceStore = resources.NewMemoryResourceStore() + dir := t.TempDir() + + writeYAML(t, filepath.Join(dir, "project.yaml"), engineproject.Config{Name: "single-project"}) + require.NoError(t, engine.LoadProject(ctx, filepath.Join(dir, "project.yaml"))) + + writeYAML(t, filepath.Join(dir, "workflow.yaml"), engineworkflow.Config{ + ID: "wf-single", + Tasks: []enginetask.Config{{BaseConfig: enginetask.BaseConfig{ID: "task-single"}}}, + }) + require.NoError(t, engine.LoadWorkflow(ctx, filepath.Join(dir, "workflow.yaml"))) + + writeYAML(t, filepath.Join(dir, "agent.yaml"), engineagent.Config{ + ID: "agent-single", + Instructions: "respond helpfully", + Model: engineagent.Model{ + Config: enginecore.ProviderConfig{ + Provider: enginecore.ProviderName("openai"), + Model: "gpt-4o-mini", + }, + }, + }) + require.NoError(t, engine.LoadAgent(ctx, filepath.Join(dir, "agent.yaml"))) + + writeYAML(t, filepath.Join(dir, "tool.yaml"), enginetool.Config{ID: "tool-single"}) + require.NoError(t, engine.LoadTool(ctx, filepath.Join(dir, "tool.yaml"))) + + writeYAML(t, filepath.Join(dir, "knowledge.yaml"), engineknowledge.BaseConfig{ID: "kb-single"}) + require.NoError(t, engine.LoadKnowledge(ctx, filepath.Join(dir, "knowledge.yaml"))) + + writeYAML(t, filepath.Join(dir, "memory.yaml"), enginememory.Config{ID: "memory-single"}) + require.NoError(t, engine.LoadMemory(ctx, filepath.Join(dir, "memory.yaml"))) + + writeYAML(t, filepath.Join(dir, "mcp.yaml"), enginemcp.Config{ + ID: "mcp-single", + Command: "echo", + Transport: mcpproxy.TransportStdio, + }) + require.NoError(t, engine.LoadMCP(ctx, filepath.Join(dir, "mcp.yaml"))) + + writeYAML(t, filepath.Join(dir, "schema.yaml"), engineschema.Schema{"id": "schema-single", "type": "object"}) + require.NoError(t, engine.LoadSchema(ctx, filepath.Join(dir, "schema.yaml"))) + + writeYAML(t, filepath.Join(dir, "model.yaml"), enginecore.ProviderConfig{ + Provider: enginecore.ProviderName("anthropic"), + Model: "claude", + }) + require.NoError(t, engine.LoadModel(ctx, filepath.Join(dir, "model.yaml"))) + + writeYAML(t, filepath.Join(dir, "schedule.yaml"), projectschedule.Config{ + ID: "schedule-single", + WorkflowID: "wf-single", + Cron: "*/15 * * * *", + }) + require.NoError(t, engine.LoadSchedule(ctx, filepath.Join(dir, "schedule.yaml"))) + + writeYAML(t, filepath.Join(dir, "webhook.yaml"), enginewebhook.Config{ + Slug: "webhook-single", + Events: []enginewebhook.EventConfig{ + {Name: "created", Filter: "true", Input: map[string]string{"field": "value"}}, + }, + }) + require.NoError(t, engine.LoadWebhook(ctx, filepath.Join(dir, "webhook.yaml"))) + }) + + t.Run("Should load resources from directories", func(t *testing.T) { + ctx := lifecycleTestContext(t) + baseWorkflow := &engineworkflow.Config{ + ID: "seed-dir", + Tasks: []enginetask.Config{{BaseConfig: enginetask.BaseConfig{ID: "task-dir"}}}, + } + engine, err := New(ctx, WithWorkflow(baseWorkflow)) + require.NoError(t, err) + engine.resourceStore = resources.NewMemoryResourceStore() + dir := t.TempDir() + + projectDir := filepath.Join(dir, "projects") + require.NoError(t, os.MkdirAll(projectDir, 0o755)) + writeYAML(t, filepath.Join(projectDir, "project.yaml"), engineproject.Config{Name: "dir-project"}) + require.NoError(t, engine.LoadProjectsFromDir(ctx, projectDir)) + + workflowDir := filepath.Join(dir, "workflows") + require.NoError(t, os.MkdirAll(workflowDir, 0o755)) + writeYAML(t, filepath.Join(workflowDir, "workflow.yaml"), engineworkflow.Config{ + ID: "wf-dir", + Tasks: []enginetask.Config{{BaseConfig: enginetask.BaseConfig{ID: "task-dir"}}}, + }) + require.NoError(t, engine.LoadWorkflowsFromDir(ctx, workflowDir)) + + agentDir := filepath.Join(dir, "agents") + require.NoError(t, os.MkdirAll(agentDir, 0o755)) + writeYAML(t, filepath.Join(agentDir, "agent.yaml"), engineagent.Config{ + ID: "agent-dir", + Instructions: "assist users", + Model: engineagent.Model{ + Config: enginecore.ProviderConfig{ + Provider: enginecore.ProviderName("openai"), + Model: "gpt-4o-mini", + }, + }, + }) + require.NoError(t, engine.LoadAgentsFromDir(ctx, agentDir)) + + toolDir := filepath.Join(dir, "tools") + require.NoError(t, os.MkdirAll(toolDir, 0o755)) + writeYAML(t, filepath.Join(toolDir, "tool.yaml"), enginetool.Config{ID: "tool-dir"}) + require.NoError(t, engine.LoadToolsFromDir(ctx, toolDir)) + + knowledgeDir := filepath.Join(dir, "knowledge") + require.NoError(t, os.MkdirAll(knowledgeDir, 0o755)) + writeYAML(t, filepath.Join(knowledgeDir, "kb.yaml"), engineknowledge.BaseConfig{ID: "kb-dir"}) + require.NoError(t, engine.LoadKnowledgeBasesFromDir(ctx, knowledgeDir)) + + memoryDir := filepath.Join(dir, "memories") + require.NoError(t, os.MkdirAll(memoryDir, 0o755)) + writeYAML(t, filepath.Join(memoryDir, "memory.yaml"), enginememory.Config{ID: "memory-dir"}) + require.NoError(t, engine.LoadMemoriesFromDir(ctx, memoryDir)) + + mcpDir := filepath.Join(dir, "mcps") + require.NoError(t, os.MkdirAll(mcpDir, 0o755)) + writeYAML(t, filepath.Join(mcpDir, "mcp.yaml"), enginemcp.Config{ + ID: "mcp-dir", + Command: "echo", + Transport: mcpproxy.TransportStdio, + }) + require.NoError(t, engine.LoadMCPsFromDir(ctx, mcpDir)) + + schemaDir := filepath.Join(dir, "schemas") + require.NoError(t, os.MkdirAll(schemaDir, 0o755)) + writeYAML(t, filepath.Join(schemaDir, "schema.yaml"), engineschema.Schema{"id": "schema-dir", "type": "object"}) + require.NoError(t, engine.LoadSchemasFromDir(ctx, schemaDir)) + + modelDir := filepath.Join(dir, "models") + require.NoError(t, os.MkdirAll(modelDir, 0o755)) + writeYAML(t, filepath.Join(modelDir, "model.yaml"), enginecore.ProviderConfig{ + Provider: enginecore.ProviderName("anthropic"), + Model: "claude-3", + }) + require.NoError(t, engine.LoadModelsFromDir(ctx, modelDir)) + + scheduleDir := filepath.Join(dir, "schedules") + require.NoError(t, os.MkdirAll(scheduleDir, 0o755)) + writeYAML(t, filepath.Join(scheduleDir, "schedule.yaml"), projectschedule.Config{ + ID: "schedule-dir", + WorkflowID: "wf-dir", + Cron: "0 * * * *", + }) + require.NoError(t, engine.LoadSchedulesFromDir(ctx, scheduleDir)) + + webhookDir := filepath.Join(dir, "webhooks") + require.NoError(t, os.MkdirAll(webhookDir, 0o755)) + writeYAML(t, filepath.Join(webhookDir, "webhook.yaml"), enginewebhook.Config{ + Slug: "webhook-dir", + Events: []enginewebhook.EventConfig{ + {Name: "updated", Filter: "true", Input: map[string]string{"foo": "bar"}}, + }, + }) + require.NoError(t, engine.LoadWebhooksFromDir(ctx, webhookDir)) + }) + + t.Run("Should respect context cancellation when loading resources", func(t *testing.T) { + ctx := lifecycleTestContext(t) + baseWorkflow := &engineworkflow.Config{ + ID: "seed-canceled", + Tasks: []enginetask.Config{{BaseConfig: enginetask.BaseConfig{ID: "task-canceled"}}}, + } + engine, err := New(ctx, WithWorkflow(baseWorkflow)) + require.NoError(t, err) + engine.resourceStore = resources.NewMemoryResourceStore() + dir := t.TempDir() + writeYAML(t, filepath.Join(dir, "project.yaml"), engineproject.Config{Name: "cancel-project"}) + cancelCtx, cancel := context.WithCancel(ctx) + cancel() + err = engine.LoadProject(cancelCtx, filepath.Join(dir, "project.yaml")) + require.Error(t, err) + require.Contains(t, err.Error(), "operation canceled") + err = engine.LoadProjectsFromDir(cancelCtx, dir) + require.Error(t, err) + require.Contains(t, err.Error(), "operation canceled") + }) +} + +func writeYAML(t *testing.T, path string, value any) { + t.Helper() + bytes, err := yaml.Marshal(value) + require.NoError(t, err) + require.NoError(t, os.WriteFile(path, bytes, 0o600)) +} diff --git a/sdk/compozy/migration/example_compat_test.go b/sdk/compozy/migration/example_compat_test.go new file mode 100644 index 00000000..8e255e6e --- /dev/null +++ b/sdk/compozy/migration/example_compat_test.go @@ -0,0 +1,104 @@ +package compozy_test + +import ( + "context" + "net" + "testing" + + engineagent "github.com/compozy/compozy/engine/agent" + enginecore "github.com/compozy/compozy/engine/core" + engineproject "github.com/compozy/compozy/engine/project" + enginetask "github.com/compozy/compozy/engine/task" + appconfig "github.com/compozy/compozy/pkg/config" + "github.com/compozy/compozy/pkg/logger" + "github.com/compozy/compozy/sdk/v2/agent" + compozy "github.com/compozy/compozy/sdk/v2/compozy" + "github.com/compozy/compozy/sdk/v2/task" + "github.com/compozy/compozy/sdk/v2/workflow" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + // maxTemporalPort matches Temporal's documented upper port bound for development clusters. + maxTemporalPort = 64535 + // maxTemporalPortAttempts prevents infinite retries when searching for an available port. + maxTemporalPortAttempts = 50 +) + +func TestMigrationGuideExampleCompatibility(t *testing.T) { + t.Run("Should assemble engine from migrated sdk resources", func(t *testing.T) { + ctx := migrationTestContext(t) + model := engineagent.Model{ + Config: enginecore.ProviderConfig{ + Provider: enginecore.ProviderName("openai"), + Model: "gpt-4o-mini", + }, + } + agentCfg, err := agent.New( + ctx, + "migration-assistant", + agent.WithInstructions("Guide users through the migration workflow."), + agent.WithModel(model), + ) + require.NoError(t, err) + withParams := enginecore.Input{ + "name": "{{ .workflow.input.name }}", + } + taskCfg, err := task.New( + ctx, + "welcome", + task.WithAction("prepare-migration"), + task.WithAgent(agentCfg), + task.WithWith(&withParams), + task.WithFinal(true), + ) + require.NoError(t, err) + outputs := enginecore.Output{ + "message": "{{ .tasks.welcome.output.message }}", + } + workflowCfg, err := workflow.New( + ctx, + "migration-demo", + workflow.WithTasks([]enginetask.Config{*taskCfg}), + workflow.WithOutputs(&outputs), + ) + require.NoError(t, err) + engine, err := compozy.New( + ctx, + compozy.WithProject(&engineproject.Config{Name: "migration"}), + compozy.WithAgent(agentCfg), + compozy.WithWorkflow(workflowCfg), + ) + require.NoError(t, err) + report, err := engine.ValidateReferences() + require.NoError(t, err) + assert.True(t, report.Valid) + assert.GreaterOrEqual(t, report.ResourceCount, 3) + }) +} + +func migrationTestContext(t *testing.T) context.Context { + t.Helper() + ctx := logger.ContextWithLogger(t.Context(), logger.NewForTests()) + service := appconfig.NewService() + manager := appconfig.NewManager(ctx, service) + _, err := manager.Load(ctx, appconfig.NewDefaultProvider()) + require.NoError(t, err) + ctx = appconfig.ContextWithManager(ctx, manager) + cfg := appconfig.FromContext(ctx) + require.NotNil(t, cfg) + listenCfg := net.ListenConfig{} + for attempt := 0; attempt < maxTemporalPortAttempts; attempt++ { + ln, err := listenCfg.Listen(context.WithoutCancel(t.Context()), "tcp", "127.0.0.1:0") + require.NoError(t, err) + addr := ln.Addr().(*net.TCPAddr) + require.NoError(t, ln.Close()) + if addr.Port <= maxTemporalPort { + cfg.Temporal.Standalone.FrontendPort = addr.Port + return ctx + } + } + t.Fatalf("failed to allocate Temporal frontend port within %d attempts", maxTemporalPortAttempts) + return nil +} diff --git a/sdk/compozy/mode.go b/sdk/compozy/mode.go new file mode 100644 index 00000000..5fcab94d --- /dev/null +++ b/sdk/compozy/mode.go @@ -0,0 +1,67 @@ +package compozy + +import ( + "context" + "errors" + "fmt" + + appconfig "github.com/compozy/compozy/pkg/config" + + "github.com/compozy/compozy/engine/resources" + "github.com/compozy/compozy/pkg/logger" +) + +type modeCleanup func(context.Context) error + +type modeRuntimeState struct { + resourceStore resources.ResourceStore + cleanups []modeCleanup +} + +func (s *modeRuntimeState) addCleanup(fn modeCleanup) { + if s == nil || fn == nil { + return + } + s.cleanups = append(s.cleanups, fn) +} + +func (s *modeRuntimeState) cleanup(ctx context.Context) error { + if s == nil { + return nil + } + var errs []error + for i := len(s.cleanups) - 1; i >= 0; i-- { + fn := s.cleanups[i] + if fn == nil { + continue + } + if err := fn(ctx); err != nil { + errs = append(errs, err) + } + } + s.cleanups = nil + return errors.Join(errs...) +} + +func (s *modeRuntimeState) cleanupOnError(ctx context.Context) { + if err := s.cleanup(ctx); err != nil { + log := logger.FromContext(ctx) + if log != nil { + log.Error("mode cleanup failed", "error", err) + } + } +} + +func (e *Engine) bootstrapMode(ctx context.Context, cfg *appconfig.Config) (*modeRuntimeState, error) { + if e == nil { + return nil, fmt.Errorf("engine is nil") + } + switch e.mode { + case ModeStandalone: + return e.bootstrapStandalone(ctx, cfg) + case ModeDistributed: + return e.bootstrapDistributed(ctx, cfg) + default: + return nil, fmt.Errorf("unsupported engine mode %q", e.mode) + } +} diff --git a/sdk/compozy/mode_test.go b/sdk/compozy/mode_test.go new file mode 100644 index 00000000..7c0abc88 --- /dev/null +++ b/sdk/compozy/mode_test.go @@ -0,0 +1,49 @@ +package compozy + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestModeRuntimeStateCleanup(t *testing.T) { + t.Parallel() + t.Run("Should invoke cleanup functions exactly once and ignore nil entries", func(t *testing.T) { + t.Parallel() + state := &modeRuntimeState{} + counter := 0 + state.addCleanup(func(context.Context) error { + counter++ + return nil + }) + state.addCleanup(nil) + err := state.cleanup(t.Context()) + assert.NoError(t, err) + assert.Equal(t, 1, counter) + }) + t.Run("Should invoke cleanup on error paths and log failures", func(t *testing.T) { + t.Parallel() + state := &modeRuntimeState{} + counter := 0 + state.addCleanup(func(context.Context) error { + counter++ + return errors.New("failure") + }) + state.cleanupOnError(t.Context()) + assert.Equal(t, 1, counter) + }) +} + +func TestBootstrapModeUnsupported(t *testing.T) { + t.Parallel() + t.Run("Should return error for unsupported mode", func(t *testing.T) { + t.Parallel() + engine := &Engine{mode: Mode("legacy")} + _, err := engine.bootstrapMode(t.Context(), nil) + require.Error(t, err) + assert.Contains(t, err.Error(), "unsupported engine mode") + }) +} diff --git a/sdk/compozy/options.go b/sdk/compozy/options.go new file mode 100644 index 00000000..18e92de2 --- /dev/null +++ b/sdk/compozy/options.go @@ -0,0 +1,111 @@ +package compozy + +import ( + "strings" + + engineagent "github.com/compozy/compozy/engine/agent" + "github.com/compozy/compozy/engine/core" + engineknowledge "github.com/compozy/compozy/engine/knowledge" + enginemcp "github.com/compozy/compozy/engine/mcp" + enginememory "github.com/compozy/compozy/engine/memory" + engineproject "github.com/compozy/compozy/engine/project" + projectschedule "github.com/compozy/compozy/engine/project/schedule" + engineschema "github.com/compozy/compozy/engine/schema" + enginetool "github.com/compozy/compozy/engine/tool" + enginewebhook "github.com/compozy/compozy/engine/webhook" + engineworkflow "github.com/compozy/compozy/engine/workflow" +) + +// Option configures the Compozy engine during construction. +type Option func(*config) + +const ( + defaultMode = ModeStandalone + defaultHost = "127.0.0.1" +) + +type config struct { + mode Mode + host string + port int + project *engineproject.Config + workflows []*engineworkflow.Config + agents []*engineagent.Config + tools []*enginetool.Config + knowledgeBases []*engineknowledge.BaseConfig + memories []*enginememory.Config + mcps []*enginemcp.Config + schemas []*engineschema.Schema + models []*core.ProviderConfig + schedules []*projectschedule.Config + webhooks []*enginewebhook.Config + standaloneTemporal *StandaloneTemporalConfig + standaloneRedis *StandaloneRedisConfig +} + +func defaultConfig() *config { + return &config{ + mode: defaultMode, + host: defaultHost, + workflows: make([]*engineworkflow.Config, 0), + agents: make([]*engineagent.Config, 0), + tools: make([]*enginetool.Config, 0), + knowledgeBases: make([]*engineknowledge.BaseConfig, 0), + memories: make([]*enginememory.Config, 0), + mcps: make([]*enginemcp.Config, 0), + schemas: make([]*engineschema.Schema, 0), + models: make([]*core.ProviderConfig, 0), + schedules: make([]*projectschedule.Config, 0), + webhooks: make([]*enginewebhook.Config, 0), + } +} + +// WithMode sets the deployment mode for the engine. +func WithMode(mode Mode) Option { + return func(c *config) { + if c == nil { + return + } + c.mode = mode + } +} + +// WithHost overrides the bind host for the embedded server. +func WithHost(host string) Option { + return func(c *config) { + if c == nil { + return + } + c.host = strings.TrimSpace(host) + } +} + +// WithPort sets the HTTP port for the embedded server. +func WithPort(port int) Option { + return func(c *config) { + if c == nil { + return + } + c.port = port + } +} + +// WithStandaloneTemporal configures the embedded Temporal server for standalone mode. +func WithStandaloneTemporal(cfg *StandaloneTemporalConfig) Option { + return func(c *config) { + if c == nil { + return + } + c.standaloneTemporal = cfg + } +} + +// WithStandaloneRedis configures the embedded Redis server for standalone mode. +func WithStandaloneRedis(cfg *StandaloneRedisConfig) Option { + return func(c *config) { + if c == nil { + return + } + c.standaloneRedis = cfg + } +} diff --git a/sdk/compozy/options_generated.go b/sdk/compozy/options_generated.go new file mode 100644 index 00000000..89a6c421 --- /dev/null +++ b/sdk/compozy/options_generated.go @@ -0,0 +1,126 @@ +// Code generated by compozygen. DO NOT EDIT. +package compozy + +import ( + engineagent "github.com/compozy/compozy/engine/agent" + enginecore "github.com/compozy/compozy/engine/core" + engineknowledge "github.com/compozy/compozy/engine/knowledge" + enginemcp "github.com/compozy/compozy/engine/mcp" + enginememory "github.com/compozy/compozy/engine/memory" + engineproject "github.com/compozy/compozy/engine/project" + projectschedule "github.com/compozy/compozy/engine/project/schedule" + engineschema "github.com/compozy/compozy/engine/schema" + enginetool "github.com/compozy/compozy/engine/tool" + enginewebhook "github.com/compozy/compozy/engine/webhook" + engineworkflow "github.com/compozy/compozy/engine/workflow" +) + +// WithProject registers a project configuration for the engine. +func WithProject(cfg *engineproject.Config) Option { + return func(c *config) { + if c == nil || cfg == nil { + return + } + c.project = cfg + } +} + +// WithWorkflow registers a workflow configuration for the engine. +func WithWorkflow(cfg *engineworkflow.Config) Option { + return func(c *config) { + if c == nil || cfg == nil { + return + } + c.workflows = append(c.workflows, cfg) + } +} + +// WithAgent registers a agent configuration for the engine. +func WithAgent(cfg *engineagent.Config) Option { + return func(c *config) { + if c == nil || cfg == nil { + return + } + c.agents = append(c.agents, cfg) + } +} + +// WithTool registers a tool configuration for the engine. +func WithTool(cfg *enginetool.Config) Option { + return func(c *config) { + if c == nil || cfg == nil { + return + } + c.tools = append(c.tools, cfg) + } +} + +// WithKnowledge registers a knowledge base configuration for the engine. +func WithKnowledge(cfg *engineknowledge.BaseConfig) Option { + return func(c *config) { + if c == nil || cfg == nil { + return + } + c.knowledgeBases = append(c.knowledgeBases, cfg) + } +} + +// WithMemory registers a memory configuration for the engine. +func WithMemory(cfg *enginememory.Config) Option { + return func(c *config) { + if c == nil || cfg == nil { + return + } + c.memories = append(c.memories, cfg) + } +} + +// WithMCP registers a mcp configuration for the engine. +func WithMCP(cfg *enginemcp.Config) Option { + return func(c *config) { + if c == nil || cfg == nil { + return + } + c.mcps = append(c.mcps, cfg) + } +} + +// WithSchema registers a schema configuration for the engine. +func WithSchema(cfg *engineschema.Schema) Option { + return func(c *config) { + if c == nil || cfg == nil { + return + } + c.schemas = append(c.schemas, cfg) + } +} + +// WithModel registers a model configuration for the engine. +func WithModel(cfg *enginecore.ProviderConfig) Option { + return func(c *config) { + if c == nil || cfg == nil { + return + } + c.models = append(c.models, cfg) + } +} + +// WithSchedule registers a schedule configuration for the engine. +func WithSchedule(cfg *projectschedule.Config) Option { + return func(c *config) { + if c == nil || cfg == nil { + return + } + c.schedules = append(c.schedules, cfg) + } +} + +// WithWebhook registers a webhook configuration for the engine. +func WithWebhook(cfg *enginewebhook.Config) Option { + return func(c *config) { + if c == nil || cfg == nil { + return + } + c.webhooks = append(c.webhooks, cfg) + } +} diff --git a/sdk/compozy/options_test.go b/sdk/compozy/options_test.go new file mode 100644 index 00000000..f9f4470b --- /dev/null +++ b/sdk/compozy/options_test.go @@ -0,0 +1,243 @@ +package compozy + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + engineagent "github.com/compozy/compozy/engine/agent" + "github.com/compozy/compozy/engine/core" + engineknowledge "github.com/compozy/compozy/engine/knowledge" + enginemcp "github.com/compozy/compozy/engine/mcp" + enginememory "github.com/compozy/compozy/engine/memory" + engineproject "github.com/compozy/compozy/engine/project" + projectschedule "github.com/compozy/compozy/engine/project/schedule" + engineschema "github.com/compozy/compozy/engine/schema" + enginetool "github.com/compozy/compozy/engine/tool" + enginewebhook "github.com/compozy/compozy/engine/webhook" + engineworkflow "github.com/compozy/compozy/engine/workflow" +) + +func TestDefaultConfigInitializesCollections(t *testing.T) { + t.Parallel() + cfg := defaultConfig() + require.NotNil(t, cfg) + assert.Equal(t, defaultMode, cfg.mode) + assert.Equal(t, defaultHost, cfg.host) + assert.Empty(t, cfg.workflows) + assert.Empty(t, cfg.agents) + assert.Empty(t, cfg.tools) + assert.Empty(t, cfg.knowledgeBases) + assert.Empty(t, cfg.memories) + assert.Empty(t, cfg.mcps) + assert.Empty(t, cfg.schemas) + assert.Empty(t, cfg.models) + assert.Empty(t, cfg.schedules) + assert.Empty(t, cfg.webhooks) +} + +func TestOptionsApplyBasics(t *testing.T) { + t.Parallel() + tests := []struct { + name string + option Option + check func(*config) + }{ + { + name: "WithMode", + option: WithMode(ModeDistributed), + check: func(cfg *config) { + assert.Equal(t, ModeDistributed, cfg.mode) + }, + }, + { + name: "WithHost", + option: WithHost(" 0.0.0.0 "), + check: func(cfg *config) { + assert.Equal(t, "0.0.0.0", cfg.host) + }, + }, + { + name: "WithPort", + option: WithPort(8080), + check: func(cfg *config) { + assert.Equal(t, 8080, cfg.port) + }, + }, + { + name: "WithProject", + option: func() Option { + projectCfg := &engineproject.Config{Name: "demo"} + return WithProject(projectCfg) + }(), + check: func(cfg *config) { + require.NotNil(t, cfg.project) + assert.Equal(t, "demo", cfg.project.Name) + }, + }, + } + for _, tc := range tests { + caseEntry := tc + t.Run(caseEntry.name, func(t *testing.T) { + applyAndCheckOption(t, caseEntry.option, caseEntry.check) + }) + } +} + +func TestWithWorkflowOption(t *testing.T) { + t.Parallel() + cfg := &engineworkflow.Config{ID: "wf"} + applyAndCheckOption(t, WithWorkflow(cfg), func(c *config) { + require.Len(t, c.workflows, 1) + assert.Equal(t, "wf", c.workflows[0].ID) + }) +} + +func TestWithAgentOption(t *testing.T) { + t.Parallel() + cfg := &engineagent.Config{ID: "agent"} + applyAndCheckOption(t, WithAgent(cfg), func(c *config) { + require.Len(t, c.agents, 1) + assert.Equal(t, "agent", c.agents[0].ID) + }) +} + +func TestWithToolOption(t *testing.T) { + t.Parallel() + cfg := &enginetool.Config{ID: "tool"} + applyAndCheckOption(t, WithTool(cfg), func(c *config) { + require.Len(t, c.tools, 1) + assert.Equal(t, "tool", c.tools[0].ID) + }) +} + +func TestWithKnowledgeOption(t *testing.T) { + t.Parallel() + cfg := &engineknowledge.BaseConfig{ID: "kb"} + applyAndCheckOption(t, WithKnowledge(cfg), func(c *config) { + require.Len(t, c.knowledgeBases, 1) + assert.Equal(t, "kb", c.knowledgeBases[0].ID) + }) +} + +func TestWithMemoryOption(t *testing.T) { + t.Parallel() + cfg := &enginememory.Config{ID: "mem"} + applyAndCheckOption(t, WithMemory(cfg), func(c *config) { + require.Len(t, c.memories, 1) + assert.Equal(t, "mem", c.memories[0].ID) + }) +} + +func TestWithMCPOption(t *testing.T) { + t.Parallel() + cfg := &enginemcp.Config{ID: "mcp"} + applyAndCheckOption(t, WithMCP(cfg), func(c *config) { + require.Len(t, c.mcps, 1) + assert.Equal(t, "mcp", c.mcps[0].ID) + }) +} + +func TestWithSchemaOption(t *testing.T) { + t.Parallel() + value := engineschema.Schema{"type": "object"} + applyAndCheckOption(t, WithSchema(&value), func(c *config) { + require.Len(t, c.schemas, 1) + assert.Equal(t, "object", (*c.schemas[0])["type"]) + }) +} + +func TestWithModelOption(t *testing.T) { + t.Parallel() + cfg := &core.ProviderConfig{Provider: core.ProviderName("openai"), Model: "gpt-4"} + applyAndCheckOption(t, WithModel(cfg), func(c *config) { + require.Len(t, c.models, 1) + assert.Equal(t, core.ProviderName("openai"), c.models[0].Provider) + assert.Equal(t, "gpt-4", c.models[0].Model) + }) +} + +func TestWithScheduleOption(t *testing.T) { + t.Parallel() + cfg := &projectschedule.Config{ID: "schedule"} + applyAndCheckOption(t, WithSchedule(cfg), func(c *config) { + require.Len(t, c.schedules, 1) + assert.Equal(t, "schedule", c.schedules[0].ID) + }) +} + +func TestWithWebhookOption(t *testing.T) { + t.Parallel() + cfg := &enginewebhook.Config{Slug: "webhook"} + applyAndCheckOption(t, WithWebhook(cfg), func(c *config) { + require.Len(t, c.webhooks, 1) + assert.Equal(t, "webhook", c.webhooks[0].Slug) + }) +} + +func TestOptionsApplyStandaloneConfigs(t *testing.T) { + t.Parallel() + tests := []struct { + name string + option Option + check func(*config) + }{ + { + name: "WithStandaloneTemporal", + option: WithStandaloneTemporal(&StandaloneTemporalConfig{FrontendPort: 7233}), + check: func(cfg *config) { + require.NotNil(t, cfg.standaloneTemporal) + assert.Equal(t, 7233, cfg.standaloneTemporal.FrontendPort) + }, + }, + { + name: "WithStandaloneRedis", + option: WithStandaloneRedis(&StandaloneRedisConfig{Port: 6379}), + check: func(cfg *config) { + require.NotNil(t, cfg.standaloneRedis) + assert.Equal(t, 6379, cfg.standaloneRedis.Port) + }, + }, + } + for _, tc := range tests { + caseEntry := tc + t.Run(caseEntry.name, func(t *testing.T) { + applyAndCheckOption(t, caseEntry.option, caseEntry.check) + }) + } +} + +func TestWithNilResources(t *testing.T) { + t.Parallel() + tests := []struct { + name string + option Option + }{ + {name: "WithWorkflow", option: WithWorkflow(nil)}, + {name: "WithAgent", option: WithAgent(nil)}, + {name: "WithTool", option: WithTool(nil)}, + {name: "WithKnowledge", option: WithKnowledge(nil)}, + {name: "WithMemory", option: WithMemory(nil)}, + {name: "WithMCP", option: WithMCP(nil)}, + {name: "WithSchema", option: WithSchema(nil)}, + {name: "WithModel", option: WithModel(nil)}, + {name: "WithSchedule", option: WithSchedule(nil)}, + {name: "WithWebhook", option: WithWebhook(nil)}, + } + for _, tc := range tests { + caseEntry := tc + t.Run(caseEntry.name, func(t *testing.T) { + cfg := defaultConfig() + caseEntry.option(cfg) + assert.Zero(t, cfg.resourceCount()) + }) + } +} + +func applyAndCheckOption(t *testing.T, option Option, check func(*config)) { + cfg := defaultConfig() + require.NotNil(t, cfg) + option(cfg) + check(cfg) +} diff --git a/sdk/compozy/registration_errors_test.go b/sdk/compozy/registration_errors_test.go new file mode 100644 index 00000000..41fa17c8 --- /dev/null +++ b/sdk/compozy/registration_errors_test.go @@ -0,0 +1,471 @@ +package compozy + +import ( + "context" + "errors" + "testing" + + engineagent "github.com/compozy/compozy/engine/agent" + enginecore "github.com/compozy/compozy/engine/core" + engineknowledge "github.com/compozy/compozy/engine/knowledge" + enginemcp "github.com/compozy/compozy/engine/mcp" + enginememory "github.com/compozy/compozy/engine/memory" + engineproject "github.com/compozy/compozy/engine/project" + projectschedule "github.com/compozy/compozy/engine/project/schedule" + "github.com/compozy/compozy/engine/resources" + engineschema "github.com/compozy/compozy/engine/schema" + enginetool "github.com/compozy/compozy/engine/tool" + enginewebhook "github.com/compozy/compozy/engine/webhook" + engineworkflow "github.com/compozy/compozy/engine/workflow" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type resourceStoreStub struct { + items map[resources.ResourceKey]any + putErr error + getErr error + metaErr bool + closeErr error +} + +func newResourceStoreStub() *resourceStoreStub { + return &resourceStoreStub{items: make(map[resources.ResourceKey]any)} +} + +func (s *resourceStoreStub) Put(_ context.Context, key resources.ResourceKey, value any) (resources.ETag, error) { + if s.metaErr && key.Type == resources.ResourceMeta { + return "", errors.New("meta failure") + } + if s.putErr != nil && key.Type != resources.ResourceMeta { + return "", s.putErr + } + s.items[key] = value + return "", nil +} + +func (s *resourceStoreStub) PutIfMatch( + _ context.Context, + _ resources.ResourceKey, + _ any, + _ resources.ETag, +) (resources.ETag, error) { + return "", nil +} + +func (s *resourceStoreStub) Get(_ context.Context, key resources.ResourceKey) (any, resources.ETag, error) { + if s.getErr != nil && key.Type != resources.ResourceMeta { + return nil, "", s.getErr + } + value, ok := s.items[key] + if !ok { + return nil, "", resources.ErrNotFound + } + return value, "", nil +} + +func (s *resourceStoreStub) Delete(context.Context, resources.ResourceKey) error { + return nil +} + +func (s *resourceStoreStub) List(context.Context, string, resources.ResourceType) ([]resources.ResourceKey, error) { + return nil, nil +} + +func (s *resourceStoreStub) Watch(context.Context, string, resources.ResourceType) (<-chan resources.Event, error) { + return nil, nil +} + +func (s *resourceStoreStub) ListWithValues( + context.Context, + string, + resources.ResourceType, +) ([]resources.StoredItem, error) { + return nil, nil +} + +func (s *resourceStoreStub) ListWithValuesPage( + context.Context, + string, + resources.ResourceType, + int, + int, +) ([]resources.StoredItem, int, error) { + return nil, 0, nil +} + +func (s *resourceStoreStub) Close() error { + return s.closeErr +} + +func TestPersistResourceRequiresIdentifiers(t *testing.T) { + t.Run("Should return error when workflow id missing", func(t *testing.T) { + engine := &Engine{ctx: t.Context()} + store := newResourceStoreStub() + err := engine.persistResource( + engine.ctx, + store, + "proj", + resources.ResourceWorkflow, + "", + map[string]any{}, + registrationSourceProgrammatic, + ) + assert.Error(t, err) + assert.Contains(t, err.Error(), "workflow id is required") + }) + t.Run("Should persist workflow with nil context", func(t *testing.T) { + engine := &Engine{ctx: t.Context()} + store := newResourceStoreStub() + var nilCtx context.Context + err := engine.persistResource( + nilCtx, + store, + "proj", + resources.ResourceWorkflow, + "wf", + map[string]any{}, + registrationSourceProgrammatic, + ) + assert.NoError(t, err) + }) + t.Run("Should persist workflow with nil store", func(t *testing.T) { + engine := &Engine{ctx: t.Context()} + err := engine.persistResource( + engine.ctx, + nil, + "proj", + resources.ResourceWorkflow, + "wf", + map[string]any{}, + registrationSourceProgrammatic, + ) + assert.NoError(t, err) + }) +} + +func TestPersistResourceDetectsExistingResource(t *testing.T) { + t.Run("Should detect existing resource", func(t *testing.T) { + store := newResourceStoreStub() + ctx := t.Context() + key := resources.ResourceKey{Project: "proj", Type: resources.ResourceWorkflow, ID: "wf"} + store.items[key] = map[string]any{"id": "wf"} + engine := &Engine{ctx: ctx} + err := engine.persistResource( + ctx, + store, + "proj", + resources.ResourceWorkflow, + "wf", + map[string]any{}, + registrationSourceProgrammatic, + ) + assert.Error(t, err) + assert.Contains(t, err.Error(), "already registered") + }) +} + +func TestPersistResourceHandlesStorePutErrors(t *testing.T) { + t.Run("Should surface store put failures", func(t *testing.T) { + store := newResourceStoreStub() + store.putErr = errors.New("store failure") + engine := &Engine{ctx: t.Context()} + err := engine.persistResource( + engine.ctx, + store, + "proj", + resources.ResourceWorkflow, + "wf", + map[string]any{}, + registrationSourceProgrammatic, + ) + assert.Error(t, err) + assert.Contains(t, err.Error(), "store workflow wf") + }) +} + +func TestPersistResourceReportsMetaWriteFailure(t *testing.T) { + t.Run("Should report metadata write failure", func(t *testing.T) { + store := newResourceStoreStub() + store.metaErr = true + engine := &Engine{ctx: t.Context()} + err := engine.persistResource( + engine.ctx, + store, + "proj", + resources.ResourceWorkflow, + "wf", + map[string]any{}, + registrationSourceProgrammatic, + ) + assert.Error(t, err) + assert.Contains(t, err.Error(), "write workflow wf metadata") + }) +} + +func TestRegisterProjectResetsStateOnPersistFailure(t *testing.T) { + t.Run("Should reset project state on persist failure", func(t *testing.T) { + t.Parallel() + store := newResourceStoreStub() + store.putErr = errors.New("persist failure") + engine := &Engine{ctx: t.Context(), resourceStore: store} + cfg := &engineproject.Config{Name: "helios"} + err := engine.registerProject(cfg, registrationSourceProgrammatic) + require.Error(t, err) + engine.stateMu.RLock() + defer engine.stateMu.RUnlock() + assert.Nil(t, engine.project) + }) +} + +func TestRegisterResourceNilConfigValidation(t *testing.T) { + tests := []struct { + name string + call func(*Engine) error + want string + }{ + { + name: "return error when project config missing", + call: func(engine *Engine) error { + var cfg *engineproject.Config + return engine.registerProject(cfg, registrationSourceProgrammatic) + }, + want: "project config is required", + }, + { + name: "return error when workflow config missing", + call: func(engine *Engine) error { + var cfg *engineworkflow.Config + return engine.registerWorkflow(cfg, registrationSourceProgrammatic) + }, + want: "workflow config is required", + }, + { + name: "return error when agent config missing", + call: func(engine *Engine) error { + var cfg *engineagent.Config + return engine.registerAgent(cfg, registrationSourceProgrammatic) + }, + want: "agent config is required", + }, + { + name: "return error when tool config missing", + call: func(engine *Engine) error { + var cfg *enginetool.Config + return engine.registerTool(cfg, registrationSourceProgrammatic) + }, + want: "tool config is required", + }, + { + name: "return error when knowledge config missing", + call: func(engine *Engine) error { + var cfg *engineknowledge.BaseConfig + return engine.registerKnowledge(cfg, registrationSourceProgrammatic) + }, + want: "knowledge config is required", + }, + { + name: "return error when memory config missing", + call: func(engine *Engine) error { + var cfg *enginememory.Config + return engine.registerMemory(cfg, registrationSourceProgrammatic) + }, + want: "memory config is required", + }, + { + name: "return error when mcp config missing", + call: func(engine *Engine) error { + var cfg *enginemcp.Config + return engine.registerMCP(cfg, registrationSourceProgrammatic) + }, + want: "mcp config is required", + }, + { + name: "return error when schema config missing", + call: func(engine *Engine) error { + var cfg *engineschema.Schema + return engine.registerSchema(cfg, registrationSourceProgrammatic) + }, + want: "schema config is required", + }, + { + name: "return error when model config missing", + call: func(engine *Engine) error { + var cfg *enginecore.ProviderConfig + return engine.registerModel(cfg, registrationSourceProgrammatic) + }, + want: "model config is required", + }, + { + name: "return error when schedule config missing", + call: func(engine *Engine) error { + var cfg *projectschedule.Config + return engine.registerSchedule(cfg, registrationSourceProgrammatic) + }, + want: "schedule config is required", + }, + { + name: "return error when webhook config missing", + call: func(engine *Engine) error { + var cfg *enginewebhook.Config + return engine.registerWebhook(cfg, registrationSourceProgrammatic) + }, + want: "webhook config is required", + }, + } + for _, tc := range tests { + caseEntry := tc + t.Run("Should "+caseEntry.name, func(t *testing.T) { + engine := &Engine{ctx: t.Context(), resourceStore: newResourceStoreStub()} + err := caseEntry.call(engine) + require.Error(t, err) + assert.Contains(t, err.Error(), caseEntry.want) + }) + } +} + +func TestRegisterResourceEmptyIdentifier(t *testing.T) { + tests := []struct { + name string + call func(*Engine) error + want string + }{ + { + name: "return error when project name missing", + call: func(engine *Engine) error { + return engine.registerProject(&engineproject.Config{}, registrationSourceProgrammatic) + }, + want: "project name is required", + }, + { + name: "return error when workflow id missing", + call: func(engine *Engine) error { + return engine.registerWorkflow(&engineworkflow.Config{}, registrationSourceProgrammatic) + }, + want: "workflow id is required", + }, + { + name: "return error when agent id missing", + call: func(engine *Engine) error { + return engine.registerAgent(&engineagent.Config{}, registrationSourceProgrammatic) + }, + want: "agent id is required", + }, + { + name: "return error when tool id missing", + call: func(engine *Engine) error { + return engine.registerTool(&enginetool.Config{}, registrationSourceProgrammatic) + }, + want: "tool id is required", + }, + { + name: "return error when knowledge id missing", + call: func(engine *Engine) error { + return engine.registerKnowledge(&engineknowledge.BaseConfig{}, registrationSourceProgrammatic) + }, + want: "knowledge base id is required", + }, + { + name: "return error when memory id missing", + call: func(engine *Engine) error { + return engine.registerMemory(&enginememory.Config{}, registrationSourceProgrammatic) + }, + want: "memory id is required", + }, + { + name: "return error when mcp id missing", + call: func(engine *Engine) error { + return engine.registerMCP(&enginemcp.Config{}, registrationSourceProgrammatic) + }, + want: "mcp id is required", + }, + { + name: "return error when schema id missing", + call: func(engine *Engine) error { + schema := engineschema.Schema{} + return engine.registerSchema(&schema, registrationSourceProgrammatic) + }, + want: "schema id is required", + }, + { + name: "return error when model identifier missing", + call: func(engine *Engine) error { + return engine.registerModel(&enginecore.ProviderConfig{}, registrationSourceProgrammatic) + }, + want: "model identifier is required", + }, + { + name: "return error when schedule id missing", + call: func(engine *Engine) error { + return engine.registerSchedule(&projectschedule.Config{}, registrationSourceProgrammatic) + }, + want: "schedule id is required", + }, + { + name: "return error when webhook slug missing", + call: func(engine *Engine) error { + return engine.registerWebhook(&enginewebhook.Config{}, registrationSourceProgrammatic) + }, + want: "webhook slug is required", + }, + } + for _, tc := range tests { + caseEntry := tc + t.Run("Should "+caseEntry.name, func(t *testing.T) { + engine := &Engine{ctx: t.Context(), resourceStore: newResourceStoreStub()} + err := caseEntry.call(engine) + require.Error(t, err) + assert.Contains(t, err.Error(), caseEntry.want) + }) + } +} + +func TestRegisterResourceDuplicateDetection(t *testing.T) { + t.Run("Should detect duplicate registrations across resources", func(t *testing.T) { + t.Parallel() + ctx := lifecycleTestContext(t) + engine := &Engine{ctx: ctx, resourceStore: newResourceStoreStub()} + require.NoError(t, engine.registerProject(&engineproject.Config{Name: "dup"}, registrationSourceProgrammatic)) + require.NoError(t, engine.registerWorkflow(&engineworkflow.Config{ID: "wf"}, registrationSourceProgrammatic)) + require.NoError(t, engine.registerTool(&enginetool.Config{ID: "tool"}, registrationSourceProgrammatic)) + require.NoError( + t, + engine.registerKnowledge(&engineknowledge.BaseConfig{ID: "kb"}, registrationSourceProgrammatic), + ) + require.NoError(t, engine.registerMemory(&enginememory.Config{ID: "mem"}, registrationSourceProgrammatic)) + require.NoError(t, engine.registerMCP(&enginemcp.Config{ID: "mcp"}, registrationSourceProgrammatic)) + schema := engineschema.Schema{"id": "schema-1", "type": "object"} + require.NoError(t, engine.registerSchema(&schema, registrationSourceProgrammatic)) + require.NoError( + t, + engine.registerModel( + &enginecore.ProviderConfig{Provider: enginecore.ProviderName("openai"), Model: "gpt"}, + registrationSourceProgrammatic, + ), + ) + require.NoError( + t, + engine.registerSchedule(&projectschedule.Config{ID: "schedule"}, registrationSourceProgrammatic), + ) + require.NoError(t, engine.registerWebhook(&enginewebhook.Config{Slug: "hook"}, registrationSourceProgrammatic)) + assert.Error(t, engine.registerProject(&engineproject.Config{Name: "dup"}, registrationSourceProgrammatic)) + assert.Error(t, engine.registerWorkflow(&engineworkflow.Config{ID: "wf"}, registrationSourceProgrammatic)) + assert.Error(t, engine.registerTool(&enginetool.Config{ID: "tool"}, registrationSourceProgrammatic)) + assert.Error(t, engine.registerKnowledge(&engineknowledge.BaseConfig{ID: "kb"}, registrationSourceProgrammatic)) + assert.Error(t, engine.registerMemory(&enginememory.Config{ID: "mem"}, registrationSourceProgrammatic)) + assert.Error(t, engine.registerMCP(&enginemcp.Config{ID: "mcp"}, registrationSourceProgrammatic)) + assert.Error(t, engine.registerSchema(&schema, registrationSourceProgrammatic)) + assert.Error( + t, + engine.registerModel( + &enginecore.ProviderConfig{Provider: enginecore.ProviderName("openai"), Model: "gpt"}, + registrationSourceProgrammatic, + ), + ) + assert.Error( + t, + engine.registerSchedule(&projectschedule.Config{ID: "schedule"}, registrationSourceProgrammatic), + ) + assert.Error(t, engine.registerWebhook(&enginewebhook.Config{Slug: "hook"}, registrationSourceProgrammatic)) + }) +} diff --git a/sdk/compozy/registration_test.go b/sdk/compozy/registration_test.go new file mode 100644 index 00000000..4a77a1bb --- /dev/null +++ b/sdk/compozy/registration_test.go @@ -0,0 +1,232 @@ +package compozy + +import ( + "testing" + + engineagent "github.com/compozy/compozy/engine/agent" + enginecore "github.com/compozy/compozy/engine/core" + engineknowledge "github.com/compozy/compozy/engine/knowledge" + enginemcp "github.com/compozy/compozy/engine/mcp" + enginememory "github.com/compozy/compozy/engine/memory" + engineproject "github.com/compozy/compozy/engine/project" + projectschedule "github.com/compozy/compozy/engine/project/schedule" + "github.com/compozy/compozy/engine/resources" + engineschema "github.com/compozy/compozy/engine/schema" + enginetask "github.com/compozy/compozy/engine/task" + enginetool "github.com/compozy/compozy/engine/tool" + enginewebhook "github.com/compozy/compozy/engine/webhook" + engineworkflow "github.com/compozy/compozy/engine/workflow" + mcpproxy "github.com/compozy/compozy/pkg/mcp-proxy" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestRegisterProject(t *testing.T) { + t.Run("Should register project and reject duplicates", func(t *testing.T) { + engine := newSeedEngine(t) + require.NoError(t, engine.RegisterProject(&engineproject.Config{Name: "reg-project"})) + assert.Equal(t, "reg-project", engine.project.Name) + assert.Error(t, engine.RegisterProject(&engineproject.Config{Name: "reg-project"})) + }) +} + +func TestRegisterWorkflow(t *testing.T) { + t.Run("Should register secondary workflow", func(t *testing.T) { + engine := newSeedEngine(t) + requireProjectRegistered(t, engine, "reg-project") + require.NoError(t, engine.RegisterWorkflow(&engineworkflow.Config{ + ID: "secondary", + Tasks: []enginetask.Config{ + {BaseConfig: enginetask.BaseConfig{ID: "secondary-task"}}, + }, + })) + assert.Len(t, engine.workflows, 2) + }) +} + +func TestRegisterAgent(t *testing.T) { + t.Run("Should register agent and detect duplicate", func(t *testing.T) { + engine := newSeedEngine(t) + requireProjectRegistered(t, engine, "reg-project") + require.NoError(t, engine.RegisterAgent(&engineagent.Config{ + ID: "agent-alpha", + Instructions: "Provide assistance", + Model: engineagent.Model{ + Config: enginecore.ProviderConfig{ + Provider: enginecore.ProviderName("openai"), + Model: "gpt-4o-mini", + }, + }, + })) + assert.Error(t, engine.RegisterAgent(&engineagent.Config{ID: "agent-alpha"})) + assert.Len(t, engine.agents, 1) + }) +} + +func TestRegisterTool(t *testing.T) { + t.Run("Should register tool", func(t *testing.T) { + engine := newSeedEngine(t) + requireProjectRegistered(t, engine, "reg-project") + require.NoError(t, engine.RegisterTool(&enginetool.Config{ID: "tool-alpha"})) + assert.Len(t, engine.tools, 1) + assert.Error(t, engine.RegisterTool(&enginetool.Config{ID: "tool-alpha"})) + }) +} + +func TestRegisterKnowledgeBase(t *testing.T) { + t.Run("Should register knowledge base", func(t *testing.T) { + engine := newSeedEngine(t) + requireProjectRegistered(t, engine, "reg-project") + require.NoError(t, engine.RegisterKnowledge(&engineknowledge.BaseConfig{ID: "kb-alpha"})) + assert.Len(t, engine.knowledgeBases, 1) + assert.Error(t, engine.RegisterKnowledge(&engineknowledge.BaseConfig{ID: "kb-alpha"})) + }) +} + +func TestRegisterMemory(t *testing.T) { + t.Run("Should register memory", func(t *testing.T) { + engine := newSeedEngine(t) + requireProjectRegistered(t, engine, "reg-project") + require.NoError(t, engine.RegisterMemory(&enginememory.Config{ID: "memory-alpha"})) + assert.Len(t, engine.memories, 1) + assert.Error(t, engine.RegisterMemory(&enginememory.Config{ID: "memory-alpha"})) + }) +} + +func TestRegisterMCP(t *testing.T) { + t.Run("Should register mcp", func(t *testing.T) { + engine := newSeedEngine(t) + requireProjectRegistered(t, engine, "reg-project") + require.NoError(t, engine.RegisterMCP(&enginemcp.Config{ + ID: "mcp-alpha", + Command: "echo", + Transport: mcpproxy.TransportStdio, + })) + assert.Len(t, engine.mcps, 1) + assert.Error(t, engine.RegisterMCP(&enginemcp.Config{ID: "mcp-alpha"})) + }) +} + +func TestRegisterSchema(t *testing.T) { + t.Run("Should register schema", func(t *testing.T) { + engine := newSeedEngine(t) + requireProjectRegistered(t, engine, "reg-project") + schema := engineschema.Schema{"id": "schema-alpha", "type": "object"} + require.NoError(t, engine.RegisterSchema(&schema)) + assert.Len(t, engine.schemas, 1) + assert.Error(t, engine.RegisterSchema(&schema)) + }) +} + +func TestRegisterModel(t *testing.T) { + t.Run("Should register model", func(t *testing.T) { + engine := newSeedEngine(t) + requireProjectRegistered(t, engine, "reg-project") + require.NoError(t, engine.RegisterModel(&enginecore.ProviderConfig{ + Provider: enginecore.ProviderName("anthropic"), + Model: "claude", + })) + assert.Len(t, engine.models, 1) + assert.Error(t, engine.RegisterModel(&enginecore.ProviderConfig{ + Provider: enginecore.ProviderName("anthropic"), + Model: "claude", + })) + }) +} + +func TestRegisterSchedule(t *testing.T) { + t.Run("Should register schedule", func(t *testing.T) { + engine := newSeedEngine(t) + requireProjectRegistered(t, engine, "reg-project") + require.NoError(t, engine.RegisterWorkflow(&engineworkflow.Config{ + ID: "secondary", + Tasks: []enginetask.Config{ + {BaseConfig: enginetask.BaseConfig{ID: "secondary-task"}}, + }, + })) + require.NoError(t, engine.RegisterSchedule(&projectschedule.Config{ + ID: "schedule-alpha", + WorkflowID: "secondary", + Cron: "*/5 * * * *", + })) + assert.Len(t, engine.schedules, 1) + projectName := projectNameOf(engine.project) + value, _, err := engine.resourceStore.Get(engine.ctx, resources.ResourceKey{ + Project: projectName, + Type: resources.ResourceSchedule, + ID: "schedule-alpha", + }) + require.NoError(t, err) + require.NotNil(t, value) + assert.Error(t, engine.RegisterSchedule(&projectschedule.Config{ + ID: "schedule-alpha", + WorkflowID: "secondary", + Cron: "*/5 * * * *", + })) + }) +} + +func TestRegisterWebhook(t *testing.T) { + t.Run("Should register webhook", func(t *testing.T) { + engine := newSeedEngine(t) + requireProjectRegistered(t, engine, "reg-project") + require.NoError(t, engine.RegisterWebhook(&enginewebhook.Config{ + Slug: "webhook-alpha", + Events: []enginewebhook.EventConfig{ + { + Name: "created", + Filter: "true", + Input: map[string]string{"field": "value"}, + }, + }, + })) + assert.Len(t, engine.webhooks, 1) + projectName := projectNameOf(engine.project) + stored, _, err := engine.resourceStore.Get(engine.ctx, resources.ResourceKey{ + Project: projectName, + Type: resources.ResourceWebhook, + ID: "webhook-alpha", + }) + require.NoError(t, err) + require.NotNil(t, stored) + assert.Error(t, engine.RegisterWebhook(&enginewebhook.Config{Slug: "webhook-alpha"})) + }) +} + +func TestRegisterScheduleRollsBackOnPersistError(t *testing.T) { + engine := newSeedEngine(t) + requireProjectRegistered(t, engine, "reg-project") + engine.resourceStore = &failingStore{ResourceStore: resources.NewMemoryResourceStore()} + err := engine.RegisterSchedule(&projectschedule.Config{ID: "schedule-fail", WorkflowID: "seed", Cron: "* * * * *"}) + require.Error(t, err) + assert.Len(t, engine.schedules, 0) +} + +func TestRegisterWebhookRollsBackOnPersistError(t *testing.T) { + engine := newSeedEngine(t) + requireProjectRegistered(t, engine, "reg-project") + engine.resourceStore = &failingStore{ResourceStore: resources.NewMemoryResourceStore()} + err := engine.RegisterWebhook(&enginewebhook.Config{Slug: "webhook-fail"}) + require.Error(t, err) + assert.Len(t, engine.webhooks, 0) +} + +func newSeedEngine(t *testing.T) *Engine { + t.Helper() + ctx := lifecycleTestContext(t) + baseWorkflow := &engineworkflow.Config{ + ID: "seed", + Tasks: []enginetask.Config{ + {BaseConfig: enginetask.BaseConfig{ID: "seed-task"}}, + }, + } + engine, err := New(ctx, WithWorkflow(baseWorkflow)) + require.NoError(t, err) + engine.resourceStore = resources.NewMemoryResourceStore() + return engine +} + +func requireProjectRegistered(t *testing.T, engine *Engine, name string) { + t.Helper() + require.NoError(t, engine.RegisterProject(&engineproject.Config{Name: name})) +} diff --git a/sdk/compozy/resources_graph_test.go b/sdk/compozy/resources_graph_test.go new file mode 100644 index 00000000..89c76c06 --- /dev/null +++ b/sdk/compozy/resources_graph_test.go @@ -0,0 +1,77 @@ +package compozy + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestExtractCycle(t *testing.T) { + t.Run("Should derive cycle from traversal history", func(t *testing.T) { + path := []string{"task:alpha/a", "task:alpha/b", "task:alpha/c"} + cycle := extractCycle(path, "task:alpha/b") + assert.Equal(t, []string{"task:alpha/b", "task:alpha/c", "task:alpha/b"}, cycle) + }) + t.Run("Should return target when path empty", func(t *testing.T) { + cycle := extractCycle(nil, "task:alpha/b") + assert.Equal(t, []string{"task:alpha/b"}, cycle) + }) + t.Run("Should return target when not in path", func(t *testing.T) { + path := []string{"task:alpha/a", "task:alpha/c"} + cycle := extractCycle(path, "task:alpha/b") + assert.Equal(t, []string{"task:alpha/b"}, cycle) + }) + t.Run("Should handle target at start", func(t *testing.T) { + path := []string{"task:alpha/b", "task:alpha/c"} + cycle := extractCycle(path, "task:alpha/b") + assert.Equal(t, []string{"task:alpha/b", "task:alpha/c", "task:alpha/b"}, cycle) + }) + t.Run("Should handle target at end", func(t *testing.T) { + path := []string{"task:alpha/a", "task:alpha/b"} + cycle := extractCycle(path, "task:alpha/b") + assert.Equal(t, []string{"task:alpha/b", "task:alpha/b"}, cycle) + }) + t.Run("Should handle single element path", func(t *testing.T) { + path := []string{"task:alpha/b"} + cycle := extractCycle(path, "task:alpha/b") + assert.Equal(t, []string{"task:alpha/b", "task:alpha/b"}, cycle) + }) + t.Run("Should use latest occurrence when target repeats", func(t *testing.T) { + path := []string{"task:alpha/a", "task:alpha/b", "task:alpha/c", "task:alpha/b"} + cycle := extractCycle(path, "task:alpha/b") + assert.Equal(t, []string{"task:alpha/b", "task:alpha/b"}, cycle) + }) +} + +func TestParseNode(t *testing.T) { + t.Run("Should split node identifier into type and id", func(t *testing.T) { + typ, id := parseNode("workflow:sample") + assert.Equal(t, "workflow", typ) + assert.Equal(t, "sample", id) + }) + t.Run("Should return type and empty id when delimiter missing", func(t *testing.T) { + typ, id := parseNode("invalidnode") + assert.Equal(t, "invalidnode", typ) + assert.Equal(t, "", id) + }) + t.Run("Should return empty parts when input empty", func(t *testing.T) { + typ, id := parseNode("") + assert.Equal(t, "", typ) + assert.Equal(t, "", id) + }) + t.Run("Should split only on first delimiter", func(t *testing.T) { + typ, id := parseNode("workflow:sample:extra") + assert.Equal(t, "workflow", typ) + assert.Equal(t, "sample:extra", id) + }) + t.Run("Should handle delimiter only input", func(t *testing.T) { + typ, id := parseNode(":") + assert.Equal(t, "", typ) + assert.Equal(t, "", id) + }) + t.Run("Should trim whitespace around type and id", func(t *testing.T) { + typ, id := parseNode(" workflow: sample ") + assert.Equal(t, "workflow", typ) + assert.Equal(t, "sample", id) + }) +} diff --git a/sdk/compozy/standalone.go b/sdk/compozy/standalone.go new file mode 100644 index 00000000..67b37fb6 --- /dev/null +++ b/sdk/compozy/standalone.go @@ -0,0 +1,187 @@ +package compozy + +import ( + "context" + "fmt" + "net" + "strings" + "time" + + "github.com/compozy/compozy/engine/infra/cache" + "github.com/compozy/compozy/engine/resources" + "github.com/compozy/compozy/engine/worker/embedded" + appconfig "github.com/compozy/compozy/pkg/config" + "github.com/compozy/compozy/pkg/logger" +) + +func (e *Engine) bootstrapStandalone(ctx context.Context, cfg *appconfig.Config) (*modeRuntimeState, error) { + state := &modeRuntimeState{} + log := logger.FromContext(ctx) + if cfg == nil { + return nil, fmt.Errorf("configuration is required") + } + mergeStandaloneRedisConfig(cfg, e.standaloneRedis) + embeddedCfg := mergeStandaloneTemporalConfig(cfg, e.standaloneTemporal) + server, err := embedded.NewServer(ctx, embeddedCfg) + if err != nil { + return nil, fmt.Errorf("prepare embedded temporal server: %w", err) + } + if err := server.Start(ctx); err != nil { + state.addCleanup(func(cleanupCtx context.Context) error { + return stopEmbeddedTemporal(cleanupCtx, server, embeddedCfg.StartTimeout, cfg) + }) + state.cleanupOnError(context.WithoutCancel(ctx)) + return nil, fmt.Errorf("start embedded temporal server: %w", err) + } + cfg.Temporal.HostPort = server.FrontendAddress() + state.addCleanup(func(cleanupCtx context.Context) error { + return stopEmbeddedTemporal(cleanupCtx, server, embeddedCfg.StartTimeout, cfg) + }) + if log != nil { + log.Info("embedded temporal server started", + "frontend_addr", server.FrontendAddress(), + "namespace", embeddedCfg.Namespace, + "cluster", embeddedCfg.ClusterName, + ) + } + mini, err := cache.NewMiniredisStandalone(ctx) + if err != nil { + state.cleanupOnError(context.WithoutCancel(ctx)) + return nil, fmt.Errorf("start embedded redis: %w", err) + } + state.addCleanup(func(cleanupCtx context.Context) error { + return mini.Close(context.WithoutCancel(cleanupCtx)) + }) + redisClient := mini.Client() + addr := redisClient.Options().Addr + updateRedisConfig(cfg, addr) + useRedisStore := e.shouldUseStandaloneRedis(cfg) + if log != nil { + storeKind := "memory" + if useRedisStore { + storeKind = "redis" + } + log.Info("embedded redis ready", "addr", addr, "resource_store", storeKind) + } + state.resourceStore = selectStandaloneStore(redisClient, useRedisStore) + return state, nil +} + +func mergeStandaloneTemporalConfig(cfg *appconfig.Config, override *StandaloneTemporalConfig) *embedded.Config { + base := cfg.Temporal.Standalone + if override != nil { + if override.DatabaseFile != "" { + base.DatabaseFile = override.DatabaseFile + } + if override.FrontendPort != 0 { + base.FrontendPort = override.FrontendPort + } + if override.BindIP != "" { + base.BindIP = override.BindIP + } + if override.Namespace != "" { + base.Namespace = override.Namespace + } + if override.ClusterName != "" { + base.ClusterName = override.ClusterName + } + if override.UIPort != 0 { + base.UIPort = override.UIPort + } + if override.LogLevel != "" { + base.LogLevel = override.LogLevel + } + if override.StartTimeout > 0 { + base.StartTimeout = override.StartTimeout + } + base.EnableUI = override.EnableUI + } + cfg.Temporal.Standalone = base + return &embedded.Config{ + DatabaseFile: base.DatabaseFile, + FrontendPort: base.FrontendPort, + BindIP: base.BindIP, + Namespace: base.Namespace, + ClusterName: base.ClusterName, + EnableUI: base.EnableUI, + RequireUI: base.RequireUI, + UIPort: base.UIPort, + LogLevel: base.LogLevel, + StartTimeout: base.StartTimeout, + } +} + +func mergeStandaloneRedisConfig(cfg *appconfig.Config, override *StandaloneRedisConfig) { + if cfg == nil || override == nil { + return + } + if override.Persistence { + cfg.Redis.Standalone.Persistence.Enabled = true + } + if override.PersistenceDir != "" { + cfg.Redis.Standalone.Persistence.DataDir = override.PersistenceDir + } + if override.SnapshotInterval > 0 { + cfg.Redis.Standalone.Persistence.SnapshotInterval = override.SnapshotInterval + } +} + +func stopEmbeddedTemporal( + ctx context.Context, + server *embedded.Server, + startup time.Duration, + cfg *appconfig.Config, +) error { + if server == nil { + return nil + } + shutdown := time.Duration(0) + if cfg != nil { + shutdown = cfg.Server.Timeouts.WorkerShutdown + } + if shutdown <= 0 { + shutdown = startup + } + stopCtx, cancel := context.WithTimeout(context.WithoutCancel(ctx), shutdown) + defer cancel() + return server.Stop(stopCtx) +} + +func selectStandaloneStore(client cache.RedisInterface, useRedis bool) resources.ResourceStore { + if useRedis && client != nil { + return resources.NewRedisResourceStore(client) + } + return resources.NewMemoryResourceStore() +} + +func (e *Engine) shouldUseStandaloneRedis(cfg *appconfig.Config) bool { + if e != nil && e.standaloneRedis != nil { + return true + } + if cfg == nil { + return false + } + return strings.EqualFold(strings.TrimSpace(cfg.Redis.Mode), string(ModeStandalone)) +} + +func updateRedisConfig(cfg *appconfig.Config, addr string) { + if cfg == nil || addr == "" { + return + } + cfg.Redis.URL = fmt.Sprintf("redis://%s", addr) + host, port := parseRedisHostPort(addr) + if host != "" { + cfg.Redis.Host = host + } + if port != "" { + cfg.Redis.Port = port + } +} + +func parseRedisHostPort(addr string) (string, string) { + host, port, err := net.SplitHostPort(addr) + if err != nil { + return addr, "" + } + return host, port +} diff --git a/sdk/compozy/standalone_test.go b/sdk/compozy/standalone_test.go new file mode 100644 index 00000000..b811fae6 --- /dev/null +++ b/sdk/compozy/standalone_test.go @@ -0,0 +1,109 @@ +package compozy + +import ( + "testing" + "time" + + "github.com/compozy/compozy/engine/resources" + engineworkflow "github.com/compozy/compozy/engine/workflow" + appconfig "github.com/compozy/compozy/pkg/config" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestStandaloneResourceStoreDefaultsToMemory(t *testing.T) { + ctx := lifecycleTestContext(t) + cfg := appconfig.FromContext(ctx) + require.NotNil(t, cfg) + engine, err := New(ctx, WithWorkflow(&engineworkflow.Config{ID: "standalone-memory"})) + require.NoError(t, err) + store, err := engine.buildResourceStore(ctx, cfg) + require.NoError(t, err) + assert.IsType(t, &resources.MemoryResourceStore{}, store) + assert.NoError(t, store.Close()) + assert.NoError(t, engine.cleanupModeResources(ctx)) +} + +func TestStandaloneResourceStoreUsesRedisWhenConfigured(t *testing.T) { + ctx := lifecycleTestContext(t) + cfg := appconfig.FromContext(ctx) + require.NotNil(t, cfg) + engine, err := New(ctx, + WithWorkflow(&engineworkflow.Config{ID: "standalone-redis"}), + WithStandaloneRedis(&StandaloneRedisConfig{Persistence: false}), + ) + require.NoError(t, err) + store, err := engine.buildResourceStore(ctx, cfg) + require.NoError(t, err) + assert.IsType(t, &resources.RedisResourceStore{}, store) + assert.NoError(t, store.Close()) + assert.NoError(t, engine.cleanupModeResources(ctx)) +} + +func TestMergeStandaloneTemporalConfigOverrides(t *testing.T) { + ctx := lifecycleTestContext(t) + cfg := appconfig.FromContext(ctx) + require.NotNil(t, cfg) + override := &StandaloneTemporalConfig{ + DatabaseFile: "custom.db", + FrontendPort: 7234, + BindIP: "0.0.0.0", + Namespace: "custom-ns", + ClusterName: "custom-cluster", + EnableUI: true, + UIPort: 7443, + LogLevel: "debug", + StartTimeout: 3 * time.Second, + } + embeddedCfg := mergeStandaloneTemporalConfig(cfg, override) + assert.Equal(t, "custom.db", cfg.Temporal.Standalone.DatabaseFile) + assert.Equal(t, 7234, cfg.Temporal.Standalone.FrontendPort) + assert.Equal(t, "0.0.0.0", cfg.Temporal.Standalone.BindIP) + assert.Equal(t, "custom-ns", cfg.Temporal.Standalone.Namespace) + assert.Equal(t, "custom-cluster", cfg.Temporal.Standalone.ClusterName) + assert.True(t, cfg.Temporal.Standalone.EnableUI) + assert.Equal(t, 7443, cfg.Temporal.Standalone.UIPort) + assert.Equal(t, "debug", cfg.Temporal.Standalone.LogLevel) + assert.Equal(t, 3*time.Second, cfg.Temporal.Standalone.StartTimeout) + assert.Equal(t, cfg.Temporal.Standalone.DatabaseFile, embeddedCfg.DatabaseFile) + assert.Equal(t, cfg.Temporal.Standalone.FrontendPort, embeddedCfg.FrontendPort) + assert.Equal(t, cfg.Temporal.Standalone.Namespace, embeddedCfg.Namespace) + assert.Equal(t, cfg.Temporal.Standalone.ClusterName, embeddedCfg.ClusterName) + assert.Equal(t, cfg.Temporal.Standalone.EnableUI, embeddedCfg.EnableUI) + assert.Equal(t, cfg.Temporal.Standalone.UIPort, embeddedCfg.UIPort) + assert.Equal(t, cfg.Temporal.Standalone.LogLevel, embeddedCfg.LogLevel) + assert.Equal(t, cfg.Temporal.Standalone.StartTimeout, embeddedCfg.StartTimeout) +} + +func TestMergeStandaloneRedisConfigOverrides(t *testing.T) { + ctx := lifecycleTestContext(t) + cfg := appconfig.FromContext(ctx) + require.NotNil(t, cfg) + override := &StandaloneRedisConfig{ + Persistence: true, + PersistenceDir: "/tmp/redis-data", + SnapshotInterval: 5 * time.Minute, + } + mergeStandaloneRedisConfig(cfg, override) + assert.True(t, cfg.Redis.Standalone.Persistence.Enabled) + assert.Equal(t, "/tmp/redis-data", cfg.Redis.Standalone.Persistence.DataDir) + assert.Equal(t, 5*time.Minute, cfg.Redis.Standalone.Persistence.SnapshotInterval) + mergeStandaloneRedisConfig(nil, override) + mergeStandaloneRedisConfig(cfg, nil) +} + +func TestUpdateRedisConfigAppliesConnectionDetails(t *testing.T) { + cfg := &appconfig.Config{} + updateRedisConfig(cfg, "127.0.0.1:6379") + assert.Equal(t, "redis://127.0.0.1:6379", cfg.Redis.URL) + assert.Equal(t, "127.0.0.1", cfg.Redis.Host) + assert.Equal(t, "6379", cfg.Redis.Port) + updateRedisConfig(cfg, "") + assert.Equal(t, "redis://127.0.0.1:6379", cfg.Redis.URL) +} + +func TestParseRedisHostPortHandlesMissingPort(t *testing.T) { + host, port := parseRedisHostPort("cache.local") + assert.Equal(t, "cache.local", host) + assert.Equal(t, "", port) +} diff --git a/sdk/compozy/types.go b/sdk/compozy/types.go new file mode 100644 index 00000000..e101ec2d --- /dev/null +++ b/sdk/compozy/types.go @@ -0,0 +1,97 @@ +package compozy + +import "time" + +// ExecuteRequest is the unified request type for asynchronous executions. +type ExecuteRequest struct { + Input map[string]any `json:"input,omitempty"` + Options map[string]any `json:"options,omitempty"` +} + +// ExecuteSyncRequest is the unified request type for synchronous executions. +type ExecuteSyncRequest struct { + Input map[string]any `json:"input,omitempty"` + Options map[string]any `json:"options,omitempty"` + Timeout *time.Duration `json:"timeout,omitempty"` +} + +// ExecuteResponse contains metadata about an asynchronous execution handle. +type ExecuteResponse struct { + ExecID string `json:"exec_id"` + ExecURL string `json:"exec_url"` +} + +// ExecuteSyncResponse represents the outcome of a synchronous execution. +type ExecuteSyncResponse struct { + ExecID string `json:"exec_id"` + Output map[string]any `json:"output"` +} + +// Mode declares the deployment strategy for the Compozy engine. +type Mode string + +const ( + // ModeStandalone runs the engine with embedded dependencies. + ModeStandalone Mode = "standalone" + // ModeDistributed connects the engine to externally managed infrastructure. + ModeDistributed Mode = "distributed" +) + +// StandaloneTemporalConfig configures the embedded Temporal server when running in standalone mode. +type StandaloneTemporalConfig struct { + DatabaseFile string + FrontendPort int + BindIP string + Namespace string + ClusterName string + EnableUI bool + UIPort int + LogLevel string + StartTimeout time.Duration +} + +// StandaloneRedisConfig configures the embedded Redis server when running in standalone mode. +type StandaloneRedisConfig struct { + Port int + Persistence bool + PersistenceDir string + SnapshotInterval time.Duration + MaxMemory int64 +} + +// ValidationError captures a validation failure discovered during reference checks. +type ValidationError struct { + ResourceType string `json:"resource_type"` + ResourceID string `json:"resource_id"` + Message string `json:"message"` +} + +// ValidationWarning captures a non-fatal validation warning. +type ValidationWarning struct { + ResourceType string `json:"resource_type"` + ResourceID string `json:"resource_id"` + Message string `json:"message"` +} + +// CircularDependency describes a detected dependency loop between resources. +type CircularDependency struct { + Chain []string `json:"chain"` +} + +// MissingReference captures an unresolved reference discovered during validation. +type MissingReference struct { + ResourceType string `json:"resource_type"` + ResourceID string `json:"resource_id"` + Reference string `json:"reference"` +} + +// ValidationReport contains the aggregate results of reference validation. +type ValidationReport struct { + Valid bool + Errors []ValidationError + Warnings []ValidationWarning + ResourceCount int + CircularDeps []CircularDependency + MissingRefs []MissingReference + DependencyGraph map[string][]string +} diff --git a/sdk/compozy/validation.go b/sdk/compozy/validation.go new file mode 100644 index 00000000..448d81a9 --- /dev/null +++ b/sdk/compozy/validation.go @@ -0,0 +1,429 @@ +package compozy + +import ( + "fmt" + "sort" + "strings" + + engineagent "github.com/compozy/compozy/engine/agent" + enginecore "github.com/compozy/compozy/engine/core" + engineknowledge "github.com/compozy/compozy/engine/knowledge" + enginemcp "github.com/compozy/compozy/engine/mcp" + enginememory "github.com/compozy/compozy/engine/memory" + engineproject "github.com/compozy/compozy/engine/project" + projectschedule "github.com/compozy/compozy/engine/project/schedule" + engineschema "github.com/compozy/compozy/engine/schema" + enginetask "github.com/compozy/compozy/engine/task" + enginetool "github.com/compozy/compozy/engine/tool" + enginewebhook "github.com/compozy/compozy/engine/webhook" + engineworkflow "github.com/compozy/compozy/engine/workflow" +) + +type dependencyGraph map[string][]string + +type validationContext struct { + report *ValidationReport + nodes map[string]struct{} + graph dependencyGraph +} + +func newValidationContext(report *ValidationReport) *validationContext { + return &validationContext{ + report: report, + nodes: make(map[string]struct{}), + graph: make(dependencyGraph), + } +} + +func (vc *validationContext) addNode(node string) { + if node == "" { + return + } + vc.nodes[node] = struct{}{} +} + +func (vc *validationContext) addEdge(from string, to string) { + if from == "" || to == "" { + return + } + deps := vc.graph[from] + if !containsString(deps, to) { + vc.graph[from] = append(deps, to) + } +} + +func containsString(values []string, target string) bool { + for _, value := range values { + if value == target { + return true + } + } + return false +} + +func (vc *validationContext) registerProject(project *engineproject.Config) { + if project == nil { + return + } + name := strings.TrimSpace(project.Name) + if name == "" { + vc.report.Errors = append( + vc.report.Errors, + ValidationError{ResourceType: "project", ResourceID: "", Message: "project name is required"}, + ) + return + } + vc.addNode(projectNode(name)) +} + +func (vc *validationContext) registerWorkflows(workflows []*engineworkflow.Config) { + for _, wf := range workflows { + vc.registerWorkflow(wf) + } +} + +func (vc *validationContext) registerWorkflow(wf *engineworkflow.Config) { + if wf == nil { + return + } + id := strings.TrimSpace(wf.ID) + if id == "" { + vc.report.Errors = append( + vc.report.Errors, + ValidationError{ResourceType: "workflow", ResourceID: "", Message: "workflow id is required"}, + ) + return + } + wfNode := workflowNode(id) + vc.addNode(wfNode) + vc.registerWorkflowAgents(id, wf.Agents) + vc.registerWorkflowTools(id, wf.Tools) + vc.registerWorkflowKnowledge(id, wf.KnowledgeBases) + vc.registerWorkflowTasks(id, wfNode, wf.Tasks) +} + +func (vc *validationContext) registerWorkflowAgents(workflowID string, agents []engineagent.Config) { + for i := range agents { + agentID := strings.TrimSpace(agents[i].ID) + if agentID == "" { + vc.report.Warnings = append( + vc.report.Warnings, + ValidationWarning{ + ResourceType: "workflow", + ResourceID: workflowID, + Message: "workflow agent with empty id ignored", + }, + ) + continue + } + vc.addNode(agentNode(agentID)) + } +} + +func (vc *validationContext) registerWorkflowTools(workflowID string, tools []enginetool.Config) { + for i := range tools { + toolID := strings.TrimSpace(tools[i].ID) + if toolID == "" { + vc.report.Warnings = append( + vc.report.Warnings, + ValidationWarning{ + ResourceType: "workflow", + ResourceID: workflowID, + Message: "workflow tool with empty id ignored", + }, + ) + continue + } + vc.addNode(toolNode(toolID)) + } +} + +func (vc *validationContext) registerWorkflowKnowledge(workflowID string, knowledge []engineknowledge.BaseConfig) { + for i := range knowledge { + kbID := strings.TrimSpace(knowledge[i].ID) + if kbID == "" { + vc.report.Warnings = append( + vc.report.Warnings, + ValidationWarning{ + ResourceType: "workflow", + ResourceID: workflowID, + Message: "workflow knowledge base with empty id ignored", + }, + ) + continue + } + vc.addNode(knowledgeNode(kbID)) + } +} + +func (vc *validationContext) registerWorkflowTasks(workflowID string, workflowNode string, tasks []enginetask.Config) { + seen := make(map[string]struct{}) + for i := range tasks { + taskCfg := &tasks[i] + taskID := strings.TrimSpace(taskCfg.ID) + if taskID == "" { + vc.report.Errors = append( + vc.report.Errors, + ValidationError{ResourceType: "workflow", ResourceID: workflowID, Message: "task id is required"}, + ) + continue + } + if _, ok := seen[taskID]; ok { + vc.report.Errors = append( + vc.report.Errors, + ValidationError{ + ResourceType: "workflow", + ResourceID: workflowID, + Message: fmt.Sprintf("duplicate task id %s", taskID), + }, + ) + } else { + seen[taskID] = struct{}{} + } + taskNodeID := taskNode(workflowID, taskID) + vc.addNode(taskNodeID) + vc.addEdge(workflowNode, taskNodeID) + vc.registerTaskBindings(workflowID, taskID, taskCfg) + } +} + +func (vc *validationContext) registerTaskBindings(workflowID string, taskID string, taskCfg *enginetask.Config) { + if taskCfg == nil { + return + } + node := taskNode(workflowID, taskID) + if taskCfg.Agent != nil { + agentID := strings.TrimSpace(taskCfg.Agent.ID) + if agentID != "" { + vc.addEdge(node, agentNode(agentID)) + } + } + if taskCfg.Tool != nil { + toolID := strings.TrimSpace(taskCfg.Tool.ID) + if toolID != "" { + vc.addEdge(node, toolNode(toolID)) + } + } + if taskCfg.OnSuccess != nil && taskCfg.OnSuccess.Next != nil { + next := strings.TrimSpace(*taskCfg.OnSuccess.Next) + if next != "" { + vc.addEdge(node, taskNode(workflowID, next)) + } + } + if taskCfg.OnError != nil && taskCfg.OnError.Next != nil { + next := strings.TrimSpace(*taskCfg.OnError.Next) + if next != "" { + vc.addEdge(node, taskNode(workflowID, next)) + } + } +} + +func (vc *validationContext) registerAgents(agents []*engineagent.Config) { + registerSimpleResources(vc, "agent", agents, func(cfg *engineagent.Config) string { return cfg.ID }) +} + +func (vc *validationContext) registerTools(tools []*enginetool.Config) { + registerSimpleResources(vc, "tool", tools, func(cfg *enginetool.Config) string { return cfg.ID }) +} + +func (vc *validationContext) registerKnowledgeBases(kb []*engineknowledge.BaseConfig) { + registerSimpleResources(vc, "knowledge", kb, func(cfg *engineknowledge.BaseConfig) string { return cfg.ID }) +} + +func (vc *validationContext) registerMemories(memories []*enginememory.Config) { + registerSimpleResources(vc, "memory", memories, func(cfg *enginememory.Config) string { return cfg.ID }) +} + +func (vc *validationContext) registerMCPs(mcps []*enginemcp.Config) { + registerSimpleResources(vc, "mcp", mcps, func(cfg *enginemcp.Config) string { return cfg.ID }) +} + +func (vc *validationContext) registerSchemas(schemas []*engineschema.Schema) { + registerSimpleResources( + vc, + "schema", + schemas, + engineschema.GetID, + ) +} + +func (vc *validationContext) registerModels(models []*enginecore.ProviderConfig) { + registerSimpleResources(vc, "model", models, func(cfg *enginecore.ProviderConfig) string { + provider := strings.TrimSpace(string(cfg.Provider)) + model := strings.TrimSpace(cfg.Model) + if provider == "" && model == "" { + return "" + } + return provider + ":" + model + }) +} + +func (vc *validationContext) registerSchedules(schedules []*projectschedule.Config) { + registerSimpleResources(vc, "schedule", schedules, func(cfg *projectschedule.Config) string { return cfg.ID }) +} + +func (vc *validationContext) registerWebhooks(webhooks []*enginewebhook.Config) { + registerSimpleResources(vc, "webhook", webhooks, func(cfg *enginewebhook.Config) string { return cfg.Slug }) +} + +func registerSimpleResources[T any](vc *validationContext, typ string, values []*T, idFn func(*T) string) { + for _, value := range values { + if value == nil { + continue + } + id := strings.TrimSpace(idFn(value)) + if id == "" { + vc.report.Warnings = append( + vc.report.Warnings, + ValidationWarning{ResourceType: typ, ResourceID: "", Message: "resource with empty id ignored"}, + ) + continue + } + vc.addNode(fmt.Sprintf("%s:%s", typ, id)) + } +} + +func workflowNode(id string) string { + return fmt.Sprintf("workflow:%s", id) +} + +func projectNode(name string) string { + return fmt.Sprintf("project:%s", name) +} + +func agentNode(id string) string { + return fmt.Sprintf("agent:%s", id) +} + +func toolNode(id string) string { + return fmt.Sprintf("tool:%s", id) +} + +func knowledgeNode(id string) string { + return fmt.Sprintf("knowledge:%s", id) +} + +func taskNode(workflowID string, taskID string) string { + return fmt.Sprintf("task:%s/%s", workflowID, taskID) +} + +func (vc *validationContext) finalize(report *ValidationReport) { + for node, deps := range vc.graph { + sorted := append([]string(nil), deps...) + sort.Strings(sorted) + report.DependencyGraph[node] = sorted + } + report.ResourceCount = len(vc.nodes) +} + +func (vc *validationContext) detectMissingReferences() { + for from, deps := range vc.graph { + for _, dep := range deps { + if _, ok := vc.nodes[dep]; ok { + continue + } + typ, id := parseNode(from) + vc.report.MissingRefs = append(vc.report.MissingRefs, MissingReference{ + ResourceType: typ, + ResourceID: id, + Reference: dep, + }) + } + } +} + +func (vc *validationContext) detectCycles() { + visited := make(map[string]bool) + stack := make(map[string]bool) + path := make([]string, 0) + var dfs func(string) + dfs = func(node string) { + visited[node] = true + stack[node] = true + path = append(path, node) + for _, dep := range vc.graph[node] { + if !visited[dep] { + dfs(dep) + } else if stack[dep] { + cycle := extractCycle(path, dep) + vc.report.CircularDeps = append(vc.report.CircularDeps, CircularDependency{Chain: cycle}) + } + } + stack[node] = false + path = path[:len(path)-1] + } + for node := range vc.graph { + if !visited[node] { + dfs(node) + } + } +} + +func extractCycle(path []string, target string) []string { + idx := -1 + for i := len(path) - 1; i >= 0; i-- { + if path[i] == target { + idx = i + break + } + } + if idx == -1 { + return append([]string(nil), target) + } + cycle := append([]string(nil), path[idx:]...) + cycle = append(cycle, target) + return cycle +} + +func parseNode(node string) (string, string) { + trimmed := strings.TrimSpace(node) + parts := strings.SplitN(trimmed, ":", 2) + if len(parts) != 2 { + return strings.TrimSpace(node), "" + } + return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]) +} + +func (e *Engine) ValidateReferences() (*ValidationReport, error) { + if e == nil { + return nil, fmt.Errorf("engine is nil") + } + report := &ValidationReport{ + Valid: true, + Errors: make([]ValidationError, 0), + Warnings: make([]ValidationWarning, 0), + CircularDeps: make([]CircularDependency, 0), + MissingRefs: make([]MissingReference, 0), + DependencyGraph: make(map[string][]string), + } + vc := newValidationContext(report) + e.stateMu.RLock() + project := e.project + workflows := append([]*engineworkflow.Config(nil), e.workflows...) + agents := append([]*engineagent.Config(nil), e.agents...) + tools := append([]*enginetool.Config(nil), e.tools...) + knowledge := append([]*engineknowledge.BaseConfig(nil), e.knowledgeBases...) + memories := append([]*enginememory.Config(nil), e.memories...) + mcps := append([]*enginemcp.Config(nil), e.mcps...) + schemas := append([]*engineschema.Schema(nil), e.schemas...) + models := append([]*enginecore.ProviderConfig(nil), e.models...) + schedules := append([]*projectschedule.Config(nil), e.schedules...) + webhooks := append([]*enginewebhook.Config(nil), e.webhooks...) + e.stateMu.RUnlock() + vc.registerProject(project) + vc.registerWorkflows(workflows) + vc.registerAgents(agents) + vc.registerTools(tools) + vc.registerKnowledgeBases(knowledge) + vc.registerMemories(memories) + vc.registerMCPs(mcps) + vc.registerSchemas(schemas) + vc.registerModels(models) + vc.registerSchedules(schedules) + vc.registerWebhooks(webhooks) + vc.detectMissingReferences() + vc.detectCycles() + vc.finalize(report) + report.Valid = len(report.Errors) == 0 && len(report.MissingRefs) == 0 && len(report.CircularDeps) == 0 + return report, nil +} diff --git a/sdk/compozy/validation_nodes_test.go b/sdk/compozy/validation_nodes_test.go new file mode 100644 index 00000000..a5c2c1c1 --- /dev/null +++ b/sdk/compozy/validation_nodes_test.go @@ -0,0 +1,149 @@ +package compozy + +import ( + "testing" + + engineagent "github.com/compozy/compozy/engine/agent" + enginecore "github.com/compozy/compozy/engine/core" + engineknowledge "github.com/compozy/compozy/engine/knowledge" + engineproject "github.com/compozy/compozy/engine/project" + enginetask "github.com/compozy/compozy/engine/task" + enginetool "github.com/compozy/compozy/engine/tool" + engineworkflow "github.com/compozy/compozy/engine/workflow" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestWorkflowValidationNodes(t *testing.T) { + t.Run("Should register workflow-level resources in dependency graph", func(t *testing.T) { + engine := newWorkflowValidationEngine(t) + registerGraphWorkflow(t, engine) + report, err := engine.ValidateReferences() + require.NoError(t, err) + assert.True(t, report.Valid) + assert.Contains(t, report.DependencyGraph, "workflow:graph-workflow") + taskDeps, ok := report.DependencyGraph["task:graph-workflow/step-start"] + require.True(t, ok) + assert.Contains(t, taskDeps, "agent:task-agent") + assert.Contains(t, taskDeps, "task:graph-workflow/step-final") + finalDeps, ok := report.DependencyGraph["task:graph-workflow/step-final"] + require.True(t, ok) + assert.Contains(t, finalDeps, "tool:task-tool") + assert.GreaterOrEqual(t, report.ResourceCount, 4) + assert.Empty(t, report.Warnings) + }) +} + +func TestWorkflowValidationMissingReferences(t *testing.T) { + t.Run("Should report missing agent when task binding unresolved", func(t *testing.T) { + ctx := lifecycleTestContext(t) + engine, err := New( + ctx, + WithWorkflow( + &engineworkflow.Config{ + ID: "seed", + Tasks: []enginetask.Config{{BaseConfig: enginetask.BaseConfig{ID: "seed-task"}}}, + }, + ), + ) + require.NoError(t, err) + require.NoError(t, engine.RegisterProject(&engineproject.Config{Name: "graph-project"})) + next := "step-final" + workflow := &engineworkflow.Config{ + ID: "graph-workflow", + Tasks: []enginetask.Config{ + { + BaseConfig: enginetask.BaseConfig{ + ID: "step-start", + Agent: &engineagent.Config{ID: "task-agent"}, + OnSuccess: &enginecore.SuccessTransition{Next: &next}, + }, + }, + }, + } + require.NoError(t, engine.RegisterWorkflow(workflow)) + report, err := engine.ValidateReferences() + require.NoError(t, err) + assert.False(t, report.Valid) + references := make([]string, len(report.MissingRefs)) + for i := range report.MissingRefs { + references[i] = report.MissingRefs[i].Reference + } + assert.Contains(t, references, "agent:task-agent") + }) +} + +func TestWorkflowValidationWarnings(t *testing.T) { + t.Run("Should emit warnings for workflow resources with empty identifiers", func(t *testing.T) { + report := &ValidationReport{Warnings: make([]ValidationWarning, 0)} + vc := newValidationContext(report) + vc.registerWorkflowAgents("wf-empty", []engineagent.Config{{ID: " "}}) + vc.registerWorkflowTools("wf-empty", []enginetool.Config{{ID: ""}}) + vc.registerWorkflowKnowledge("wf-empty", []engineknowledge.BaseConfig{{ID: ""}}) + require.Len(t, report.Warnings, 3) + assert.Contains(t, report.Warnings[0].Message, "agent with empty id") + assert.Contains(t, report.Warnings[1].Message, "workflow tool with empty id") + assert.Contains(t, report.Warnings[2].Message, "knowledge base with empty id") + }) +} + +func newWorkflowValidationEngine(t *testing.T) *Engine { + t.Helper() + ctx := lifecycleTestContext(t) + engine, err := New( + ctx, + WithWorkflow( + &engineworkflow.Config{ + ID: "seed", + Tasks: []enginetask.Config{{BaseConfig: enginetask.BaseConfig{ID: "seed-task"}}}, + }, + ), + ) + require.NoError(t, err) + require.NoError(t, engine.RegisterProject(&engineproject.Config{Name: "graph-project"})) + require.NoError( + t, + engine.RegisterAgent( + &engineagent.Config{ + ID: "task-agent", + Instructions: "assist", + Model: engineagent.Model{ + Config: enginecore.ProviderConfig{ + Provider: enginecore.ProviderName("openai"), + Model: "gpt-4o-mini", + }, + }, + }, + ), + ) + require.NoError(t, engine.RegisterTool(&enginetool.Config{ID: "task-tool"})) + require.NoError(t, engine.RegisterKnowledge(&engineknowledge.BaseConfig{ID: "task-kb"})) + return engine +} + +func registerGraphWorkflow(t *testing.T, engine *Engine) { + t.Helper() + next := "step-final" + wf := &engineworkflow.Config{ + ID: "graph-workflow", + Agents: []engineagent.Config{{ID: "workflow-agent"}}, + Tools: []enginetool.Config{{ID: "workflow-tool"}}, + KnowledgeBases: []engineknowledge.BaseConfig{{ID: "workflow-kb"}}, + Tasks: []enginetask.Config{ + { + BaseConfig: enginetask.BaseConfig{ + ID: "step-start", + Agent: &engineagent.Config{ID: "task-agent"}, + OnSuccess: &enginecore.SuccessTransition{Next: &next}, + }, + }, + { + BaseConfig: enginetask.BaseConfig{ + ID: "step-final", + Tool: &enginetool.Config{ID: "task-tool"}, + }, + }, + }, + } + require.NoError(t, engine.RegisterWorkflow(wf)) +} diff --git a/sdk/compozy/validation_test.go b/sdk/compozy/validation_test.go new file mode 100644 index 00000000..841a1e78 --- /dev/null +++ b/sdk/compozy/validation_test.go @@ -0,0 +1,161 @@ +package compozy + +import ( + "testing" + + engineagent "github.com/compozy/compozy/engine/agent" + enginecore "github.com/compozy/compozy/engine/core" + enginemcp "github.com/compozy/compozy/engine/mcp" + enginememory "github.com/compozy/compozy/engine/memory" + projectschedule "github.com/compozy/compozy/engine/project/schedule" + engineschema "github.com/compozy/compozy/engine/schema" + enginetask "github.com/compozy/compozy/engine/task" + enginewebhook "github.com/compozy/compozy/engine/webhook" + engineworkflow "github.com/compozy/compozy/engine/workflow" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestValidateReferencesDetectsMissingTask(t *testing.T) { + t.Parallel() + t.Run("Should detect missing task references", func(t *testing.T) { + t.Parallel() + ctx := lifecycleTestContext(t) + engine := &Engine{ctx: ctx} + wf := &engineworkflow.Config{ID: "missing"} + next := "ghost" + wf.Tasks = []enginetask.Config{ + { + BaseConfig: enginetask.BaseConfig{ + ID: "start", + OnSuccess: &enginecore.SuccessTransition{Next: &next}, + }, + }, + } + require.NoError(t, engine.RegisterWorkflow(wf)) + report, err := engine.ValidateReferences() + require.NoError(t, err) + assert.False(t, report.Valid) + require.NotEmpty(t, report.MissingRefs) + assert.Contains(t, report.MissingRefs[0].Reference, "ghost") + }) +} + +func TestValidateReferencesDetectsCycle(t *testing.T) { + t.Parallel() + t.Run("Should detect circular workflow references", func(t *testing.T) { + t.Parallel() + ctx := lifecycleTestContext(t) + engine := &Engine{ctx: ctx} + wf := &engineworkflow.Config{ID: "cycle"} + nextB := "b" + nextA := "a" + wf.Tasks = []enginetask.Config{ + { + BaseConfig: enginetask.BaseConfig{ + ID: "a", + OnSuccess: &enginecore.SuccessTransition{Next: &nextB}, + }, + }, + { + BaseConfig: enginetask.BaseConfig{ + ID: "b", + OnSuccess: &enginecore.SuccessTransition{Next: &nextA}, + }, + }, + } + require.NoError(t, engine.RegisterWorkflow(wf)) + report, err := engine.ValidateReferences() + require.NoError(t, err) + assert.False(t, report.Valid) + require.NotEmpty(t, report.CircularDeps) + assert.Greater(t, len(report.CircularDeps[0].Chain), 0) + }) +} + +func TestRegisterWorkflowPreventsDuplicateIDs(t *testing.T) { + t.Parallel() + t.Run("Should reject duplicate workflow registrations", func(t *testing.T) { + t.Parallel() + ctx := lifecycleTestContext(t) + engine := &Engine{ctx: ctx} + wf := &engineworkflow.Config{ID: "dup"} + wf.Tasks = []enginetask.Config{ + { + BaseConfig: enginetask.BaseConfig{ID: "only"}, + }, + } + require.NoError(t, engine.RegisterWorkflow(wf)) + err := engine.RegisterWorkflow(wf) + require.Error(t, err) + assert.Contains(t, err.Error(), "already registered") + }) +} + +func TestRegisterSimpleResourcesWarnsOnMissingIDs(t *testing.T) { + t.Parallel() + t.Run("Should warn when simple resources miss IDs", func(t *testing.T) { + t.Parallel() + report := &ValidationReport{DependencyGraph: make(map[string][]string)} + vc := newValidationContext(report) + vc.registerAgents([]*engineagent.Config{{ID: "agent-1"}, {}}) + require.Len(t, vc.report.Warnings, 1) + assert.Contains(t, vc.report.Warnings[0].Message, "empty id") + assert.Contains(t, vc.nodes, "agent:agent-1") + }) +} + +func TestRegisterModelsSkipsEmptyEntries(t *testing.T) { + t.Parallel() + t.Run("Should warn and skip empty model registrations", func(t *testing.T) { + t.Parallel() + report := &ValidationReport{DependencyGraph: make(map[string][]string)} + vc := newValidationContext(report) + openai := enginecore.ProviderConfig{Provider: enginecore.ProviderName("openai"), Model: "gpt-4o-mini"} + vc.registerModels([]*enginecore.ProviderConfig{&openai, {}}) + assert.Contains(t, vc.nodes, "model:openai:gpt-4o-mini") + require.Len(t, vc.report.Warnings, 1) + assert.Equal(t, "model", vc.report.Warnings[0].ResourceType) + assert.Contains(t, vc.report.Warnings[0].Message, "empty id") + }) +} + +func TestRegisterAdditionalResourcesEmitWarnings(t *testing.T) { + t.Parallel() + t.Run("Should warn when registering additional resources with empty IDs", func(t *testing.T) { + t.Parallel() + report := &ValidationReport{DependencyGraph: make(map[string][]string)} + vc := newValidationContext(report) + schema := engineschema.Schema{"id": "schema-1", "type": "object"} + vc.registerMemories([]*enginememory.Config{{ID: "mem-1"}, {}}) + vc.registerMCPs([]*enginemcp.Config{{ID: "mcp-1"}, {}}) + vc.registerSchemas([]*engineschema.Schema{&schema, {}}) + vc.registerSchedules([]*projectschedule.Config{{ID: "schedule-1"}, {}}) + vc.registerWebhooks([]*enginewebhook.Config{{Slug: "hook-1"}, {}}) + assert.Contains(t, vc.nodes, "memory:mem-1") + assert.Contains(t, vc.nodes, "mcp:mcp-1") + assert.Contains(t, vc.nodes, "schema:schema-1") + assert.Contains(t, vc.nodes, "schedule:schedule-1") + assert.Contains(t, vc.nodes, "webhook:hook-1") + require.GreaterOrEqual(t, len(vc.report.Warnings), 5) + }) +} + +func TestValidationContextAddHelpers(t *testing.T) { + t.Parallel() + t.Run("Should guard helper operations against empty values", func(t *testing.T) { + t.Parallel() + report := &ValidationReport{DependencyGraph: make(map[string][]string)} + vc := newValidationContext(report) + vc.addNode("") + assert.Empty(t, vc.nodes) + vc.addEdge("", "target") + assert.Empty(t, vc.graph) + vc.addEdge("source", "") + assert.Empty(t, vc.graph) + vc.addNode("source") + vc.addNode("target") + vc.addEdge("source", "target") + assert.Len(t, vc.graph["source"], 1) + }) +} diff --git a/sdk/doc.go b/sdk/doc.go new file mode 100644 index 00000000..e82b404c --- /dev/null +++ b/sdk/doc.go @@ -0,0 +1,4 @@ +// Package sdk exposes the public entry points and shared documentation for the +// Compozy Go SDK. Builders and integration helpers live in subpackages under +// this module and should be imported from their domain-specific packages. +package sdk diff --git a/sdk/examples/00_shared.go b/sdk/examples/00_shared.go new file mode 100644 index 00000000..1cd305d4 --- /dev/null +++ b/sdk/examples/00_shared.go @@ -0,0 +1,159 @@ +package main + +import ( + "context" + "fmt" + "os" + "strings" + "time" + + engineagent "github.com/compozy/compozy/engine/agent" + "github.com/compozy/compozy/engine/core" + engineknowledge "github.com/compozy/compozy/engine/knowledge" + "github.com/compozy/compozy/pkg/config" + "github.com/compozy/compozy/pkg/logger" + sdkagent "github.com/compozy/compozy/sdk/v2/agent" + compozy "github.com/compozy/compozy/sdk/v2/compozy" +) + +const fallbackShutdown = 5 * time.Second + +func withEngine(ctx context.Context, opts []compozy.Option, run func(context.Context, *compozy.Engine) error) error { + if len(opts) == 0 { + return fmt.Errorf("at least one engine option must be provided") + } + engine, err := compozy.New(ctx, opts...) + if err != nil { + return fmt.Errorf("create engine: %w", err) + } + report, err := engine.ValidateReferences() + if err != nil { + return fmt.Errorf("validate references: %w", err) + } + if err := ensureValid(report); err != nil { + return err + } + if err := engine.Start(ctx); err != nil { + return fmt.Errorf("start engine: %w", err) + } + defer stopEngine(ctx, engine) + return run(ctx, engine) +} + +func ensureValid(report *compozy.ValidationReport) error { + if report == nil || report.Valid { + return nil + } + issues := make([]string, 0, len(report.Errors)+len(report.MissingRefs)+len(report.CircularDeps)) + for _, item := range report.Errors { + issues = append(issues, fmt.Sprintf("error:%s:%s:%s", item.ResourceType, item.ResourceID, item.Message)) + } + for _, miss := range report.MissingRefs { + issues = append(issues, fmt.Sprintf("missing:%s:%s->%s", miss.ResourceType, miss.ResourceID, miss.Reference)) + } + for _, cycle := range report.CircularDeps { + issues = append(issues, fmt.Sprintf("cycle:%s", strings.Join(cycle.Chain, "->"))) + } + if len(issues) == 0 { + return fmt.Errorf("engine validation failed without diagnostics") + } + return fmt.Errorf("engine validation failed: %s", strings.Join(issues, "; ")) +} + +func stopEngine(ctx context.Context, engine *compozy.Engine) { + if engine == nil { + return + } + cfg := config.FromContext(ctx) + deadline := fallbackShutdown + if cfg != nil && cfg.Server.Timeouts.ServerShutdown > 0 { + deadline = cfg.Server.Timeouts.ServerShutdown + } + stopCtx := ctx + var cancel context.CancelFunc + if deadline > 0 { + stopCtx, cancel = context.WithTimeout(context.WithoutCancel(ctx), deadline) + } + if cancel != nil { + defer cancel() + } + if err := engine.Stop(stopCtx); err != nil { + logger.FromContext(ctx).Warn("engine stop failed", "error", err) + } +} + +func inputPtr(values map[string]any) *core.Input { + if len(values) == 0 { + return nil + } + clone := core.CopyMaps(values) + result := core.NewInput(clone) + return &result +} + +func outputPtr(values map[string]any) *core.Output { + if len(values) == 0 { + return nil + } + clone := core.CopyMaps(values) + result := core.Output(clone) + return &result +} + +func stringOutput(values map[string]any, key string) string { + if len(values) == 0 { + return "" + } + text, ok := values[key].(string) + if !ok { + return "" + } + return text +} + +func ensureOpenAIKey(ctx context.Context) (string, error) { + key := strings.TrimSpace(os.Getenv("OPENAI_API_KEY")) + if key == "" { + return "", fmt.Errorf("OPENAI_API_KEY environment variable is required") + } + if log := logger.FromContext(ctx); log != nil { + log.Debug("OPENAI_API_KEY detected") + } + return key, nil +} + +func newOpenAIModel(ctx context.Context, model string) (*engineagent.Model, error) { + key, err := ensureOpenAIKey(ctx) + if err != nil { + return nil, err + } + return &engineagent.Model{ + Config: core.ProviderConfig{Provider: core.ProviderOpenAI, Model: model, APIKey: key}, + }, nil +} + +func newAgentWithModel( + ctx context.Context, + id string, + instructions string, + model *engineagent.Model, + extra ...sdkagent.Option, +) (*engineagent.Config, error) { + if model == nil { + return nil, fmt.Errorf("model configuration is required") + } + options := []sdkagent.Option{ + sdkagent.WithInstructions(strings.TrimSpace(instructions)), + sdkagent.WithModel(*model), + } + options = append(options, extra...) + cfg, err := sdkagent.New(ctx, id, options...) + if err != nil { + return nil, fmt.Errorf("build agent %s: %w", id, err) + } + return cfg, nil +} + +func knowledgeSource(path string) engineknowledge.SourceConfig { + return engineknowledge.SourceConfig{Type: engineknowledge.SourceTypeMarkdownGlob, Path: path} +} diff --git a/sdk/examples/01_simple_workflow.go b/sdk/examples/01_simple_workflow.go new file mode 100644 index 00000000..5c23cd08 --- /dev/null +++ b/sdk/examples/01_simple_workflow.go @@ -0,0 +1,72 @@ +package main + +import ( + "context" + "fmt" + + engineagent "github.com/compozy/compozy/engine/agent" + enginecore "github.com/compozy/compozy/engine/core" + enginetask "github.com/compozy/compozy/engine/task" + engineworkflow "github.com/compozy/compozy/engine/workflow" + "github.com/compozy/compozy/pkg/logger" + sdkagent "github.com/compozy/compozy/sdk/v2/agent" + compozy "github.com/compozy/compozy/sdk/v2/compozy" + sdktask "github.com/compozy/compozy/sdk/v2/task" + sdkworkflow "github.com/compozy/compozy/sdk/v2/workflow" +) + +// RunSimpleWorkflow executes a minimal workflow using a mock model-backed agent. +func RunSimpleWorkflow(ctx context.Context) error { + agentCfg, workflowCfg, err := buildSimpleWorkflow(ctx) + if err != nil { + return err + } + options := []compozy.Option{ + compozy.WithAgent(agentCfg), + compozy.WithWorkflow(workflowCfg), + } + return withEngine(ctx, options, func(execCtx context.Context, engine *compozy.Engine) error { + request := &compozy.ExecuteSyncRequest{Input: map[string]any{"name": "Casey"}} + response, err := engine.ExecuteWorkflowSync(execCtx, workflowCfg.ID, request) + if err != nil { + return fmt.Errorf("execute workflow: %w", err) + } + message := stringOutput(response.Output, "greeting") + if message == "" { + message = stringOutput(response.Output, enginecore.OutputRootKey) + } + fmt.Printf("Simple workflow greeting: %s\n", message) + logger.FromContext(execCtx).Info("simple workflow completed", "greeting", message) + return nil + }) +} + +func buildSimpleWorkflow(ctx context.Context) (*engineagent.Config, *engineworkflow.Config, error) { + model := &engineagent.Model{ + Config: enginecore.ProviderConfig{Provider: enginecore.ProviderMock, Model: "mock-greeting"}, + } + agentCfg, err := newAgentWithModel(ctx, "friendly-assistant", "You create short, warm greetings.", model, + sdkagent.WithMaxIterations(1), + ) + if err != nil { + return nil, nil, err + } + taskCfg, err := sdktask.New(ctx, "compose-greeting", + sdktask.WithAgent(agentCfg), + sdktask.WithPrompt("Greet {{ .workflow.input.name }} in a single cheerful sentence."), + sdktask.WithOutputs(inputPtr(map[string]any{"message": "{{ .task.output }}"})), + sdktask.WithFinal(true), + ) + if err != nil { + return nil, nil, fmt.Errorf("create task: %w", err) + } + workflowCfg, err := sdkworkflow.New(ctx, "simple-workflow", + sdkworkflow.WithDescription("Greets a user with a friendly message"), + sdkworkflow.WithTasks([]enginetask.Config{*taskCfg}), + sdkworkflow.WithOutputs(outputPtr(map[string]any{"greeting": "{{ .tasks.compose-greeting.output.message }}"})), + ) + if err != nil { + return nil, nil, fmt.Errorf("create workflow: %w", err) + } + return agentCfg, workflowCfg, nil +} diff --git a/sdk/examples/02_parallel_tasks.go b/sdk/examples/02_parallel_tasks.go new file mode 100644 index 00000000..cdb13535 --- /dev/null +++ b/sdk/examples/02_parallel_tasks.go @@ -0,0 +1,112 @@ +package main + +import ( + "context" + "fmt" + + engineagent "github.com/compozy/compozy/engine/agent" + enginecore "github.com/compozy/compozy/engine/core" + enginetask "github.com/compozy/compozy/engine/task" + engineworkflow "github.com/compozy/compozy/engine/workflow" + "github.com/compozy/compozy/pkg/logger" + sdkagent "github.com/compozy/compozy/sdk/v2/agent" + compozy "github.com/compozy/compozy/sdk/v2/compozy" + sdktask "github.com/compozy/compozy/sdk/v2/task" + sdkworkflow "github.com/compozy/compozy/sdk/v2/workflow" +) + +// RunParallelTasks demonstrates fan-out/fan-in execution with parallel tasks and aggregation. +func RunParallelTasks(ctx context.Context) error { + agentCfg, workflowCfg, err := buildParallelWorkflow(ctx) + if err != nil { + return err + } + options := []compozy.Option{ + compozy.WithAgent(agentCfg), + compozy.WithWorkflow(workflowCfg), + } + return withEngine(ctx, options, func(execCtx context.Context, engine *compozy.Engine) error { + input := map[string]any{"project": "Helios"} + resp, err := engine.ExecuteWorkflowSync(execCtx, workflowCfg.ID, &compozy.ExecuteSyncRequest{Input: input}) + if err != nil { + return fmt.Errorf("execute workflow: %w", err) + } + summary := stringOutput(resp.Output, "summary") + fmt.Printf("Parallel execution summary:\n%s\n", summary) + logger.FromContext(execCtx).Info("parallel workflow completed", "summary", summary) + return nil + }) +} + +func buildParallelWorkflow(ctx context.Context) (*engineagent.Config, *engineworkflow.Config, error) { + model := &engineagent.Model{ + Config: enginecore.ProviderConfig{Provider: enginecore.ProviderMock, Model: "mock-updates"}, + } + agentCfg, err := newAgentWithModel(ctx, "status-reporter", "Summarize team progress in one sentence.", model, + sdkagent.WithMaxIterations(1), + ) + if err != nil { + return nil, nil, err + } + branches, err := buildBranches(ctx, agentCfg) + if err != nil { + return nil, nil, err + } + next := "summarize-updates" + parallelCfg, err := sdktask.NewParallel(ctx, "collect-updates", branches, + sdktask.WithStrategy(enginetask.StrategyWaitAll), + sdktask.WithOnSuccess(&enginecore.SuccessTransition{Next: &next}), + ) + if err != nil { + return nil, nil, fmt.Errorf("create parallel task: %w", err) + } + summaryCfg, err := sdktask.New( + ctx, + next, + sdktask.WithAgent(agentCfg), + sdktask.WithPrompt( + "Project {{ .workflow.input.project }} is underway. Draft a concise summary using these team updates:\n"+ + "- Research: {{ .tasks.research-update.output.status }}\n"+ + "- Product: {{ .tasks.product-update.output.status }}\n"+ + "- Support: {{ .tasks.support-update.output.status }}", + ), + sdktask.WithOutputs(inputPtr(map[string]any{"summary": "{{ .task.output }}"})), + sdktask.WithFinal(true), + ) + if err != nil { + return nil, nil, fmt.Errorf("create summary task: %w", err) + } + workflowCfg, err := sdkworkflow.New(ctx, "parallel-tasks", + sdkworkflow.WithDescription("Runs three updates in parallel and aggregates results"), + sdkworkflow.WithTasks([]enginetask.Config{*parallelCfg, *summaryCfg}), + sdkworkflow.WithOutputs(outputPtr(map[string]any{"summary": "{{ .tasks.summarize-updates.output.summary }}"})), + ) + if err != nil { + return nil, nil, fmt.Errorf("create workflow: %w", err) + } + return agentCfg, workflowCfg, nil +} + +func buildBranches(ctx context.Context, agentCfg *engineagent.Config) ([]enginetask.Config, error) { + definitions := []struct { + id string + prompt string + }{ + {"research-update", "Provide a research milestone update for project {{ .workflow.input.project }}."}, + {"product-update", "Summarize product development progress for project {{ .workflow.input.project }}."}, + {"support-update", "Report a customer support highlight for project {{ .workflow.input.project }}."}, + } + branches := make([]enginetask.Config, 0, len(definitions)) + for _, spec := range definitions { + taskCfg, err := sdktask.New(ctx, spec.id, + sdktask.WithAgent(agentCfg), + sdktask.WithPrompt(spec.prompt), + sdktask.WithOutputs(inputPtr(map[string]any{"status": "{{ .task.output }}"})), + ) + if err != nil { + return nil, fmt.Errorf("create branch %s: %w", spec.id, err) + } + branches = append(branches, *taskCfg) + } + return branches, nil +} diff --git a/sdk/examples/03_knowledge_rag.go b/sdk/examples/03_knowledge_rag.go new file mode 100644 index 00000000..239e61f5 --- /dev/null +++ b/sdk/examples/03_knowledge_rag.go @@ -0,0 +1,222 @@ +package main + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strings" + + engineagent "github.com/compozy/compozy/engine/agent" + enginecore "github.com/compozy/compozy/engine/core" + engineknowledge "github.com/compozy/compozy/engine/knowledge" + engineproject "github.com/compozy/compozy/engine/project" + enginetask "github.com/compozy/compozy/engine/task" + engineworkflow "github.com/compozy/compozy/engine/workflow" + "github.com/compozy/compozy/pkg/logger" + sdkagent "github.com/compozy/compozy/sdk/v2/agent" + compozy "github.com/compozy/compozy/sdk/v2/compozy" + sdkknowledge "github.com/compozy/compozy/sdk/v2/knowledge" + sdkproject "github.com/compozy/compozy/sdk/v2/project" + sdktask "github.com/compozy/compozy/sdk/v2/task" + sdkworkflow "github.com/compozy/compozy/sdk/v2/workflow" +) + +// RunKnowledgeRag ingests markdown notes into a knowledge base and answers a question using retrieval. +func RunKnowledgeRag(ctx context.Context) error { + _, err := ensureOpenAIKey(ctx) + if err != nil { + return err + } + dir, err := writeKnowledgeDocuments() + if err != nil { + return err + } + defer func() { + _ = os.RemoveAll(dir) + }() + options, workflowID, err := buildKnowledgeOptions(ctx, dir) + if err != nil { + return err + } + return withEngine(ctx, options, func(execCtx context.Context, engine *compozy.Engine) error { + question := "What are the two pillars of the Helios launch plan?" + req := &compozy.ExecuteSyncRequest{Input: map[string]any{"question": question}} + resp, err := engine.ExecuteWorkflowSync(execCtx, workflowID, req) + if err != nil { + return fmt.Errorf("execute workflow: %w", err) + } + answer := stringOutput(resp.Output, "answer") + fmt.Printf("Knowledge grounded answer:\n%s\n", strings.TrimSpace(answer)) + logger.FromContext(execCtx).Info("knowledge workflow completed", "answer", answer) + return nil + }) +} + +func buildKnowledgeOptions(ctx context.Context, dir string) ([]compozy.Option, string, error) { + key, err := ensureOpenAIKey(ctx) + if err != nil { + return nil, "", err + } + embedderCfg, vectorCfg, kbCfg, err := createKnowledgeDefinitions(ctx, dir, key) + if err != nil { + return nil, "", err + } + agentCfg, workflowCfg, err := createKnowledgeWorkflow(ctx, kbCfg) + if err != nil { + return nil, "", err + } + projectCfg, err := createKnowledgeProject(ctx, workflowCfg, embedderCfg, vectorCfg, kbCfg) + if err != nil { + return nil, "", err + } + options := []compozy.Option{ + compozy.WithProject(projectCfg), + compozy.WithAgent(agentCfg), + compozy.WithKnowledge(kbCfg), + compozy.WithWorkflow(workflowCfg), + } + return options, workflowCfg.ID, nil +} + +func createKnowledgeDefinitions( + ctx context.Context, + dir string, + key string, +) (*engineknowledge.EmbedderConfig, *engineknowledge.VectorDBConfig, *engineknowledge.BaseConfig, error) { + embedderCfg, err := sdkknowledge.NewEmbedder(ctx, "openai-notes", "openai", "text-embedding-3-small", + sdkknowledge.WithAPIKey(key), + sdkknowledge.WithDimension(1536), + ) + if err != nil { + return nil, nil, nil, fmt.Errorf("create embedder: %w", err) + } + vectorCfg, err := sdkknowledge.NewVectorDB(ctx, "notes-vector", string(engineknowledge.VectorDBTypeFilesystem), + sdkknowledge.WithPath(filepath.Join(dir, "notes.db")), + sdkknowledge.WithVectorDBDimension(1536), + ) + if err != nil { + return nil, nil, nil, fmt.Errorf("create vector db: %w", err) + } + overlap := 48 + kbCfg, err := sdkknowledge.NewBase( + ctx, + "launch-notes", + sdkknowledge.WithEmbedder(embedderCfg.ID), + sdkknowledge.WithVectorDB(vectorCfg.ID), + sdkknowledge.WithIngest(engineknowledge.IngestOnStart), + sdkknowledge.WithChunking( + engineknowledge.ChunkingConfig{ + Strategy: engineknowledge.ChunkStrategyRecursiveTextSplitter, + Size: 400, + Overlap: &overlap, + }, + ), + sdkknowledge.WithSources([]engineknowledge.SourceConfig{knowledgeSource(filepath.Join(dir, "*.md"))}), + ) + if err != nil { + return nil, nil, nil, fmt.Errorf("create knowledge base: %w", err) + } + return embedderCfg, vectorCfg, kbCfg, nil +} + +func createKnowledgeWorkflow( + ctx context.Context, + kbCfg *engineknowledge.BaseConfig, +) (*engineagent.Config, *engineworkflow.Config, error) { + model, err := newOpenAIModel(ctx, "gpt-4o-mini") + if err != nil { + return nil, nil, err + } + binding := enginecore.KnowledgeBinding{ID: kbCfg.ID, TopK: intPtr(3), MinScore: floatPtr(0.15)} + agentCfg, err := newAgentWithModel( + ctx, + "knowledge-expert", + "Answer using only supplied documents and cite sections by title.", + model, + sdkagent.WithKnowledge([]enginecore.KnowledgeBinding{binding}), + ) + if err != nil { + return nil, nil, err + } + taskCfg, err := sdktask.New( + ctx, + "answer-question", + sdktask.WithAgent(agentCfg), + sdktask.WithPrompt( + "You are reviewing internal launch notes. Using the retrieved context, answer: {{ .workflow.input.question }}. "+ + "Cite section names in parentheses.", + ), + sdktask.WithOutputs(inputPtr(map[string]any{"answer": "{{ .task.output }}"})), + sdktask.WithFinal(true), + ) + if err != nil { + return nil, nil, fmt.Errorf("create task: %w", err) + } + workflowCfg, err := sdkworkflow.New(ctx, "knowledge-rag", + sdkworkflow.WithDescription("Retrieves launch documentation and answers a question"), + sdkworkflow.WithTasks([]enginetask.Config{*taskCfg}), + sdkworkflow.WithOutputs(outputPtr(map[string]any{"answer": "{{ .tasks.answer-question.output.answer }}"})), + ) + if err != nil { + return nil, nil, fmt.Errorf("create workflow: %w", err) + } + return agentCfg, workflowCfg, nil +} + +func createKnowledgeProject( + ctx context.Context, + workflowCfg *engineworkflow.Config, + embedderCfg *engineknowledge.EmbedderConfig, + vectorCfg *engineknowledge.VectorDBConfig, + kbCfg *engineknowledge.BaseConfig, +) (*engineproject.Config, error) { + projectCfg, err := sdkproject.New(ctx, "knowledge-demo", + sdkproject.WithWorkflows([]*engineproject.WorkflowSourceConfig{{Source: workflowCfg.ID}}), + sdkproject.WithEmbedders([]engineknowledge.EmbedderConfig{*embedderCfg}), + sdkproject.WithVectorDBs([]engineknowledge.VectorDBConfig{*vectorCfg}), + sdkproject.WithKnowledgeBases([]engineknowledge.BaseConfig{*kbCfg}), + ) + if err != nil { + return nil, fmt.Errorf("create project: %w", err) + } + return projectCfg, nil +} + +func writeKnowledgeDocuments() (string, error) { + dir, err := os.MkdirTemp("", "compozy-notes-") + if err != nil { + return "", fmt.Errorf("create temp dir: %w", err) + } + content := strings.TrimSpace(`# Helios Launch Brief + +## Strategy Pillars + +1. Customer immersion weeks to gather qualitative insight. +2. Progressive rollout that starts with design partners before public launch. + +## Rollout Timeline + +- **July**: finalize onboarding scripts and training docs. +- **August**: private preview with five enterprise design partners. +- **September**: GA announcement with coordinated marketing campaign. + +## Success Criteria + +- Net Promoter Score above 45 from preview customers. +- Onboarding time under 30 minutes for new tenant. +`) + filePath := filepath.Join(dir, "helios.md") + if err := os.WriteFile(filePath, []byte(content), 0o600); err != nil { + return "", fmt.Errorf("write knowledge document: %w", err) + } + return dir, nil +} + +func intPtr(v int) *int { + return &v +} + +func floatPtr(v float64) *float64 { + return &v +} diff --git a/sdk/examples/04_memory_conversation.go b/sdk/examples/04_memory_conversation.go new file mode 100644 index 00000000..028efd35 --- /dev/null +++ b/sdk/examples/04_memory_conversation.go @@ -0,0 +1,119 @@ +package main + +import ( + "context" + "fmt" + + engineagent "github.com/compozy/compozy/engine/agent" + enginecore "github.com/compozy/compozy/engine/core" + enginememory "github.com/compozy/compozy/engine/memory" + memcore "github.com/compozy/compozy/engine/memory/core" + enginetask "github.com/compozy/compozy/engine/task" + engineworkflow "github.com/compozy/compozy/engine/workflow" + "github.com/compozy/compozy/pkg/logger" + sdkagent "github.com/compozy/compozy/sdk/v2/agent" + compozy "github.com/compozy/compozy/sdk/v2/compozy" + sdkmemory "github.com/compozy/compozy/sdk/v2/memory" + sdktask "github.com/compozy/compozy/sdk/v2/task" + sdkworkflow "github.com/compozy/compozy/sdk/v2/workflow" +) + +// RunMemoryConversation demonstrates shared memory usage across sequential agent turns. +func RunMemoryConversation(ctx context.Context) error { + memoryCfg, agentCfg, workflowCfg, err := buildMemoryConversation(ctx) + if err != nil { + return err + } + options := []compozy.Option{ + compozy.WithMemory(memoryCfg), + compozy.WithAgent(agentCfg), + compozy.WithWorkflow(workflowCfg), + } + return withEngine(ctx, options, func(execCtx context.Context, engine *compozy.Engine) error { + input := map[string]any{ + "session_id": "alpha", + "first_message": "I joined Helios yesterday, any onboarding tips?", + "second_message": "Thanks! How should I prepare for the design partner sync?", + } + req := &compozy.ExecuteSyncRequest{Input: input} + resp, err := engine.ExecuteWorkflowSync(execCtx, workflowCfg.ID, req) + if err != nil { + return fmt.Errorf("execute workflow: %w", err) + } + firstReply := stringOutput(resp.Output, "opening_reply") + followupReply := stringOutput(resp.Output, "followup_reply") + fmt.Println("Conversation summary:") + fmt.Printf("- Assistant (turn 1): %s\n", firstReply) + fmt.Printf("- Assistant (turn 2): %s\n", followupReply) + logger.FromContext(execCtx).Info("memory workflow completed", "turn1", firstReply, "turn2", followupReply) + return nil + }) +} + +func buildMemoryConversation( + ctx context.Context, +) (*enginememory.Config, *engineagent.Config, *engineworkflow.Config, error) { + memoryCfg, err := sdkmemory.New(ctx, "conversation-store", "message_count_based", + sdkmemory.WithPersistence(memcore.PersistenceConfig{Type: memcore.InMemoryPersistence, TTL: "30m"}), + sdkmemory.WithMaxMessages(20), + ) + if err != nil { + return nil, nil, nil, fmt.Errorf("create memory config: %w", err) + } + agentModel := &engineagent.Model{ + Config: enginecore.ProviderConfig{Provider: enginecore.ProviderMock, Model: "mock-conversation"}, + } + memRef := enginecore.MemoryReference{ + ID: memoryCfg.ID, + Mode: enginecore.MemoryModeReadWrite, + Key: "session:{{ .workflow.input.session_id }}", + } + agentCfg, err := newAgentWithModel( + ctx, + "conversation-guide", + "Maintain warm, contextual dialog across turns.", + agentModel, + sdkagent.WithMemory([]enginecore.MemoryReference{memRef}), + ) + if err != nil { + return nil, nil, nil, err + } + openingTask, err := sdktask.New( + ctx, + "opening-turn", + sdktask.WithAgent(agentCfg), + sdktask.WithPrompt( + "User: {{ .workflow.input.first_message }}. Respond with a concise welcome and a practical suggestion.", + ), + sdktask.WithOutputs(inputPtr(map[string]any{"reply": "{{ .task.output }}"})), + ) + if err != nil { + return nil, nil, nil, fmt.Errorf("create opening task: %w", err) + } + followTask, err := sdktask.New( + ctx, + "followup-turn", + sdktask.WithAgent(agentCfg), + sdktask.WithPrompt( + "Review prior exchange stored in memory. Earlier you said '{{ .tasks.opening-turn.output.reply }}'. "+ + "Now the user adds '{{ .workflow.input.second_message }}'. Continue the conversation building on prior advice.", + ), + sdktask.WithOutputs(inputPtr(map[string]any{"reply": "{{ .task.output }}"})), + sdktask.WithFinal(true), + ) + if err != nil { + return nil, nil, nil, fmt.Errorf("create follow-up task: %w", err) + } + workflowCfg, err := sdkworkflow.New(ctx, "memory-conversation", + sdkworkflow.WithDescription("Shows multi-turn conversation with shared memory state"), + sdkworkflow.WithTasks([]enginetask.Config{*openingTask, *followTask}), + sdkworkflow.WithOutputs(outputPtr(map[string]any{ + "opening_reply": "{{ .tasks.opening-turn.output.reply }}", + "followup_reply": "{{ .tasks.followup-turn.output.reply }}", + })), + ) + if err != nil { + return nil, nil, nil, fmt.Errorf("create workflow: %w", err) + } + return memoryCfg, agentCfg, workflowCfg, nil +} diff --git a/sdk/examples/05_runtime_native_tools.go b/sdk/examples/05_runtime_native_tools.go new file mode 100644 index 00000000..1bc48627 --- /dev/null +++ b/sdk/examples/05_runtime_native_tools.go @@ -0,0 +1,115 @@ +package main + +import ( + "context" + "fmt" + "time" + + enginetask "github.com/compozy/compozy/engine/task" + enginetool "github.com/compozy/compozy/engine/tool" + engineworkflow "github.com/compozy/compozy/engine/workflow" + "github.com/compozy/compozy/pkg/logger" + compozy "github.com/compozy/compozy/sdk/v2/compozy" + sdktask "github.com/compozy/compozy/sdk/v2/task" + sdktool "github.com/compozy/compozy/sdk/v2/tool" + sdkworkflow "github.com/compozy/compozy/sdk/v2/workflow" +) + +// RunRuntimeNativeTools registers a native Go tool and an inline Bun script and executes them together. +func RunRuntimeNativeTools(ctx context.Context) error { + nativeTool, inlineTool, err := buildTools(ctx) + if err != nil { + return err + } + workflowCfg, err := buildToolWorkflow(ctx) + if err != nil { + return err + } + options := []compozy.Option{ + compozy.WithTool(nativeTool), + compozy.WithTool(inlineTool), + compozy.WithWorkflow(workflowCfg), + } + return withEngine(ctx, options, func(execCtx context.Context, engine *compozy.Engine) error { + input := map[string]any{"name": "Avery"} + req := &compozy.ExecuteSyncRequest{Input: input} + resp, err := engine.ExecuteWorkflowSync(execCtx, workflowCfg.ID, req) + if err != nil { + return fmt.Errorf("execute workflow: %w", err) + } + timestamp := stringOutput(resp.Output, "generated_at") + message := stringOutput(resp.Output, "message") + fmt.Printf("Hybrid tool run at %s produced message:\n%s\n", timestamp, message) + logger.FromContext(execCtx).Info("tool workflow completed", "timestamp", timestamp) + return nil + }) +} + +func buildTools(ctx context.Context) (*enginetool.Config, *enginetool.Config, error) { + native, err := sdktool.New( + ctx, + "timestamp-native", + sdktool.WithName("Timestamp (Native)"), + sdktool.WithDescription("Returns the current UTC timestamp"), + sdktool.WithNativeHandler( + func(_ context.Context, input map[string]any, cfg map[string]any) (map[string]any, error) { + now := time.Now().UTC().Format(time.RFC3339Nano) + return map[string]any{"timestamp": now, "config": cfg, "input": input}, nil + }, + ), + ) + if err != nil { + return nil, nil, fmt.Errorf("create native tool: %w", err) + } + inlineCode := "export default async function(input) {\n" + + " const name = input?.name ?? \"friend\";\n" + + " const timestamp = input?.timestamp ?? \"unknown time\";\n" + + " return { message: `Hello ${name}! This workflow ran at ${timestamp}.` };\n" + + "}\n" + inline, err := sdktool.New( + ctx, + "greet-inline", + sdktool.WithName("Inline Greeter"), + sdktool.WithDescription("Formats a friendly greeting inside the Bun runtime"), + sdktool.WithRuntime("bun"), + sdktool.WithCode(inlineCode), + ) + if err != nil { + return nil, nil, fmt.Errorf("create inline tool: %w", err) + } + return native, inline, nil +} + +func buildToolWorkflow(ctx context.Context) (*engineworkflow.Config, error) { + stampTask, err := sdktask.New(ctx, "capture-timestamp", + sdktask.WithTool(&enginetool.Config{ID: "timestamp-native"}), + sdktask.WithOutputs(inputPtr(map[string]any{"timestamp": "{{ .task.output.timestamp }}"})), + ) + if err != nil { + return nil, fmt.Errorf("create timestamp task: %w", err) + } + greetTask, err := sdktask.New(ctx, "compose-greeting", + sdktask.WithTool(&enginetool.Config{ID: "greet-inline"}), + sdktask.WithWith(inputPtr(map[string]any{ + "name": "{{ .workflow.input.name }}", + "timestamp": "{{ .tasks.capture-timestamp.output.timestamp }}", + })), + sdktask.WithOutputs(inputPtr(map[string]any{"message": "{{ .task.output.message }}"})), + sdktask.WithFinal(true), + ) + if err != nil { + return nil, fmt.Errorf("create greeting task: %w", err) + } + workflowCfg, err := sdkworkflow.New(ctx, "runtime-native-tools", + sdkworkflow.WithDescription("Combines native Go and Bun inline tools"), + sdkworkflow.WithTasks([]enginetask.Config{*stampTask, *greetTask}), + sdkworkflow.WithOutputs(outputPtr(map[string]any{ + "generated_at": "{{ .tasks.capture-timestamp.output.timestamp }}", + "message": "{{ .tasks.compose-greeting.output.message }}", + })), + ) + if err != nil { + return nil, fmt.Errorf("create tool workflow: %w", err) + } + return workflowCfg, nil +} diff --git a/sdk/examples/06_scheduled_workflow.go b/sdk/examples/06_scheduled_workflow.go new file mode 100644 index 00000000..f945cd4e --- /dev/null +++ b/sdk/examples/06_scheduled_workflow.go @@ -0,0 +1,100 @@ +package main + +import ( + "context" + "fmt" + "time" + + engineagent "github.com/compozy/compozy/engine/agent" + enginecore "github.com/compozy/compozy/engine/core" + projectschedule "github.com/compozy/compozy/engine/project/schedule" + enginetask "github.com/compozy/compozy/engine/task" + engineworkflow "github.com/compozy/compozy/engine/workflow" + "github.com/compozy/compozy/pkg/logger" + sdkagent "github.com/compozy/compozy/sdk/v2/agent" + compozy "github.com/compozy/compozy/sdk/v2/compozy" + sdktask "github.com/compozy/compozy/sdk/v2/task" + sdkworkflow "github.com/compozy/compozy/sdk/v2/workflow" +) + +// RunScheduledWorkflow simulates a schedule tick and executes the registered workflow once. +func RunScheduledWorkflow(ctx context.Context) error { + agentCfg, workflowCfg, err := buildScheduledWorkflow(ctx) + if err != nil { + return err + } + tickTime := time.Now().UTC().Format(time.RFC3339Nano) + scheduleCfg := &projectschedule.Config{ + ID: "demo-schedule", + WorkflowID: workflowCfg.ID, + Cron: "*/5 * * * *", + Description: "Runs every five minutes; this example triggers it manually once.", + Input: map[string]any{"report": "morning", "tick_time": tickTime}, + } + options := []compozy.Option{ + compozy.WithAgent(agentCfg), + compozy.WithWorkflow(workflowCfg), + compozy.WithSchedule(scheduleCfg), + } + return withEngine(ctx, options, func(execCtx context.Context, engine *compozy.Engine) error { + log := logger.FromContext(execCtx) + log.Info("simulating first schedule tick", "schedule", scheduleCfg.ID) + resp, err := engine.ExecuteWorkflowSync( + execCtx, + workflowCfg.ID, + &compozy.ExecuteSyncRequest{Input: scheduleCfg.Input}, + ) + if err != nil { + return fmt.Errorf("execute scheduled workflow: %w", err) + } + report := stringOutput(resp.Output, "summary") + fmt.Printf("Schedule tick at %s generated summary:\n%s\n", tickTime, report) + log.Info("schedule run completed", "summary", report) + return nil + }) +} + +func buildScheduledWorkflow(ctx context.Context) (*engineagent.Config, *engineworkflow.Config, error) { + agentModel := &engineagent.Model{ + Config: enginecore.ProviderConfig{Provider: enginecore.ProviderMock, Model: "mock-schedule"}, + } + agentCfg, err := newAgentWithModel(ctx, "status-writer", "Produce short operational updates.", agentModel, + sdkagent.WithMaxIterations(1), + ) + if err != nil { + return nil, nil, err + } + statusTask, err := sdktask.New(ctx, "generate-status", + sdktask.WithAgent(agentCfg), + sdktask.WithPrompt("Create a one-line status update for the {{ .workflow.input.report }} shift."), + sdktask.WithOutputs(inputPtr(map[string]any{"note": "{{ .task.output }}"})), + ) + if err != nil { + return nil, nil, fmt.Errorf("create status task: %w", err) + } + summaryTask, err := sdktask.New( + ctx, + "wrap-up", + sdktask.WithAgent(agentCfg), + sdktask.WithPrompt("Compose a handoff summary referencing '{{ .tasks.generate-status.output.note }}'."), + sdktask.WithOutputs( + inputPtr(map[string]any{"summary": "{{ .task.output }}", "timestamp": "{{ .workflow.input.tick_time }}"}), + ), + sdktask.WithFinal(true), + ) + if err != nil { + return nil, nil, fmt.Errorf("create summary task: %w", err) + } + workflowCfg, err := sdkworkflow.New(ctx, "scheduled-workflow", + sdkworkflow.WithDescription("Produces a shift summary for the schedule tick"), + sdkworkflow.WithTasks([]enginetask.Config{*statusTask, *summaryTask}), + sdkworkflow.WithOutputs(outputPtr(map[string]any{ + "summary": "{{ .tasks.wrap-up.output.summary }}", + "generated_at": "{{ .tasks.wrap-up.output.timestamp }}", + })), + ) + if err != nil { + return nil, nil, fmt.Errorf("create schedule workflow: %w", err) + } + return agentCfg, workflowCfg, nil +} diff --git a/sdk/examples/07_signal_communication.go b/sdk/examples/07_signal_communication.go new file mode 100644 index 00000000..e3bce5f0 --- /dev/null +++ b/sdk/examples/07_signal_communication.go @@ -0,0 +1,76 @@ +package main + +import ( + "context" + "fmt" + + enginecore "github.com/compozy/compozy/engine/core" + enginetask "github.com/compozy/compozy/engine/task" + engineworkflow "github.com/compozy/compozy/engine/workflow" + "github.com/compozy/compozy/pkg/logger" + compozy "github.com/compozy/compozy/sdk/v2/compozy" + sdktask "github.com/compozy/compozy/sdk/v2/task" + sdkworkflow "github.com/compozy/compozy/sdk/v2/workflow" +) + +// RunSignalCommunication demonstrates signal emission and wait coordination within a workflow. +func RunSignalCommunication(ctx context.Context) error { + workflowCfg, err := buildSignalWorkflow(ctx) + if err != nil { + return err + } + options := []compozy.Option{compozy.WithWorkflow(workflowCfg)} + return withEngine(ctx, options, func(execCtx context.Context, engine *compozy.Engine) error { + input := map[string]any{"request_id": "42", "message": "Data sync completed"} + req := &compozy.ExecuteSyncRequest{Input: input} + resp, err := engine.ExecuteWorkflowSync(execCtx, workflowCfg.ID, req) + if err != nil { + return fmt.Errorf("execute signal workflow: %w", err) + } + received := stringOutput(resp.Output, "acknowledged") + fmt.Printf("Wait task received payload: %s\n", received) + logger.FromContext(execCtx).Info("signal workflow completed", "payload", received) + return nil + }) +} + +func buildSignalWorkflow(ctx context.Context) (*engineworkflow.Config, error) { + next := "await-signal" + signalTask, err := sdktask.New(ctx, "publish-signal", + sdktask.WithType(enginetask.TaskTypeSignal), + sdktask.WithSignal(&enginetask.SignalConfig{ + ID: "request-{{ .workflow.input.request_id }}-done", + Payload: map[string]any{ + "message": "{{ .workflow.input.message }}", + "request_id": "{{ .workflow.input.request_id }}", + }, + }), + sdktask.WithOnSuccess(&enginecore.SuccessTransition{Next: &next}), + ) + if err != nil { + return nil, fmt.Errorf("create signal task: %w", err) + } + waitTask, err := sdktask.New(ctx, next, + sdktask.WithType(enginetask.TaskTypeWait), + sdktask.WithWaitFor("request-{{ .workflow.input.request_id }}-done"), + sdktask.WithTimeout("10s"), + sdktask.WithOutputs(inputPtr(map[string]any{"acknowledged": "{{ signal.payload.message }}"})), + sdktask.WithFinal(true), + ) + if err != nil { + return nil, fmt.Errorf("create wait task: %w", err) + } + workflowCfg, err := sdkworkflow.New( + ctx, + "signal-communication", + sdkworkflow.WithDescription("Uses signal and wait tasks to hand off payload"), + sdkworkflow.WithTasks([]enginetask.Config{*signalTask, *waitTask}), + sdkworkflow.WithOutputs( + outputPtr(map[string]any{"acknowledged": "{{ .tasks.await-signal.output.acknowledged }}"}), + ), + ) + if err != nil { + return nil, fmt.Errorf("create signal workflow: %w", err) + } + return workflowCfg, nil +} diff --git a/sdk/examples/08_model_routing.go b/sdk/examples/08_model_routing.go new file mode 100644 index 00000000..38b6fbe3 --- /dev/null +++ b/sdk/examples/08_model_routing.go @@ -0,0 +1,86 @@ +package main + +import ( + "context" + "fmt" + + engineagent "github.com/compozy/compozy/engine/agent" + enginecore "github.com/compozy/compozy/engine/core" + enginetask "github.com/compozy/compozy/engine/task" + engineworkflow "github.com/compozy/compozy/engine/workflow" + "github.com/compozy/compozy/pkg/logger" + sdkagent "github.com/compozy/compozy/sdk/v2/agent" + compozy "github.com/compozy/compozy/sdk/v2/compozy" + sdktask "github.com/compozy/compozy/sdk/v2/task" + sdkworkflow "github.com/compozy/compozy/sdk/v2/workflow" +) + +// RunModelRouting shows how per-task model overrides change execution strategy. +func RunModelRouting(ctx context.Context) error { + agentCfg, workflowCfg, err := buildModelRoutingWorkflow(ctx) + if err != nil { + return err + } + options := []compozy.Option{ + compozy.WithAgent(agentCfg), + compozy.WithWorkflow(workflowCfg), + } + return withEngine(ctx, options, func(execCtx context.Context, engine *compozy.Engine) error { + resp, err := engine.ExecuteWorkflowSync(execCtx, workflowCfg.ID, &compozy.ExecuteSyncRequest{}) + if err != nil { + return fmt.Errorf("execute routing workflow: %w", err) + } + primary := stringOutput(resp.Output, "primary_model") + secondary := stringOutput(resp.Output, "secondary_model") + fmt.Printf("Primary task used: %s\nFallback task used: %s\n", primary, secondary) + logger.FromContext(execCtx).Info("model routing completed", "primary", primary, "secondary", secondary) + return nil + }) +} + +func buildModelRoutingWorkflow(ctx context.Context) (*engineagent.Config, *engineworkflow.Config, error) { + defaultModel := &engineagent.Model{ + Config: enginecore.ProviderConfig{Provider: enginecore.ProviderMock, Model: "mock-primary"}, + } + agentCfg, err := newAgentWithModel( + ctx, + "router-agent", + "Mention the supplied model name in your reply.", + defaultModel, + sdkagent.WithMaxIterations(1), + ) + if err != nil { + return nil, nil, err + } + primaryTask, err := sdktask.New(ctx, "primary-route", + sdktask.WithAgent(agentCfg), + sdktask.WithPrompt("You are using model mock-primary. Reply with 'Primary via mock-primary'."), + sdktask.WithOutputs(inputPtr(map[string]any{"model_used": "{{ .task.output }}"})), + ) + if err != nil { + return nil, nil, fmt.Errorf("create primary task: %w", err) + } + secondaryModel := enginecore.ProviderConfig{Provider: enginecore.ProviderMock, Model: "mock-secondary"} + secondaryTask, err := sdktask.New(ctx, "secondary-route", + sdktask.WithAgent(agentCfg), + sdktask.WithModelConfig(secondaryModel), + sdktask.WithPrompt("You are using model mock-secondary. Reply with 'Fallback via mock-secondary'."), + sdktask.WithOutputs(inputPtr(map[string]any{"model_used": "{{ .task.output }}"})), + sdktask.WithFinal(true), + ) + if err != nil { + return nil, nil, fmt.Errorf("create secondary task: %w", err) + } + workflowCfg, err := sdkworkflow.New(ctx, "model-routing", + sdkworkflow.WithDescription("Demonstrates per-task model overrides"), + sdkworkflow.WithTasks([]enginetask.Config{*primaryTask, *secondaryTask}), + sdkworkflow.WithOutputs(outputPtr(map[string]any{ + "primary_model": "{{ .tasks.primary-route.output.model_used }}", + "secondary_model": "{{ .tasks.secondary-route.output.model_used }}", + })), + ) + if err != nil { + return nil, nil, fmt.Errorf("create routing workflow: %w", err) + } + return agentCfg, workflowCfg, nil +} diff --git a/sdk/examples/09_debugging_tracing.go b/sdk/examples/09_debugging_tracing.go new file mode 100644 index 00000000..b968fc97 --- /dev/null +++ b/sdk/examples/09_debugging_tracing.go @@ -0,0 +1,81 @@ +package main + +import ( + "context" + "encoding/json" + "fmt" + + engineagent "github.com/compozy/compozy/engine/agent" + enginecore "github.com/compozy/compozy/engine/core" + enginetask "github.com/compozy/compozy/engine/task" + engineworkflow "github.com/compozy/compozy/engine/workflow" + "github.com/compozy/compozy/pkg/logger" + sdkagent "github.com/compozy/compozy/sdk/v2/agent" + compozy "github.com/compozy/compozy/sdk/v2/compozy" + sdktask "github.com/compozy/compozy/sdk/v2/task" + sdkworkflow "github.com/compozy/compozy/sdk/v2/workflow" +) + +// RunDebuggingAndTracing executes a workflow and prints execution metadata for debugging. +func RunDebuggingAndTracing(ctx context.Context) error { + agentCfg, workflowCfg, err := buildDebugWorkflow(ctx) + if err != nil { + return err + } + options := []compozy.Option{ + compozy.WithAgent(agentCfg), + compozy.WithWorkflow(workflowCfg), + } + return withEngine(ctx, options, func(execCtx context.Context, engine *compozy.Engine) error { + resp, err := engine.ExecuteWorkflowSync(execCtx, workflowCfg.ID, &compozy.ExecuteSyncRequest{}) + if err != nil { + return fmt.Errorf("execute workflow: %w", err) + } + telemetry := map[string]any{ + "exec_id": resp.ExecID, + "output": resp.Output, + "workflow_id": workflowCfg.ID, + } + blob, err := json.MarshalIndent(telemetry, "", " ") + if err != nil { + return fmt.Errorf("marshal telemetry: %w", err) + } + fmt.Printf("Telemetry snapshot:\n%s\n", string(blob)) + logger.FromContext(execCtx).Info("debug workflow completed", "exec_id", resp.ExecID) + return nil + }) +} + +func buildDebugWorkflow(ctx context.Context) (*engineagent.Config, *engineworkflow.Config, error) { + agentModel := &engineagent.Model{ + Config: enginecore.ProviderConfig{Provider: enginecore.ProviderMock, Model: "mock-debug"}, + } + agentCfg, err := newAgentWithModel(ctx, "debug-assistant", "Summarize diagnostic context succinctly.", agentModel, + sdkagent.WithMaxIterations(1), + ) + if err != nil { + return nil, nil, err + } + inspectTask, err := sdktask.New(ctx, "collect-diagnostics", + sdktask.WithAgent(agentCfg), + sdktask.WithPrompt("Provide a sentence describing that debugging instrumentation is active."), + sdktask.WithOutputs(inputPtr(map[string]any{"details": "{{ .task.output }}"})), + sdktask.WithFinal(true), + ) + if err != nil { + return nil, nil, fmt.Errorf("create debug task: %w", err) + } + workflowCfg, err := sdkworkflow.New( + ctx, + "debugging-and-tracing", + sdkworkflow.WithDescription("Captures execution metadata for troubleshooting"), + sdkworkflow.WithTasks([]enginetask.Config{*inspectTask}), + sdkworkflow.WithOutputs( + outputPtr(map[string]any{"diagnostics": "{{ .tasks.collect-diagnostics.output.details }}"}), + ), + ) + if err != nil { + return nil, nil, fmt.Errorf("create debug workflow: %w", err) + } + return agentCfg, workflowCfg, nil +} diff --git a/sdk/examples/10_complete_project.go b/sdk/examples/10_complete_project.go new file mode 100644 index 00000000..627977fd --- /dev/null +++ b/sdk/examples/10_complete_project.go @@ -0,0 +1,254 @@ +package main + +import ( + "context" + "fmt" + "os" + "time" + + engineagent "github.com/compozy/compozy/engine/agent" + enginecore "github.com/compozy/compozy/engine/core" + engineknowledge "github.com/compozy/compozy/engine/knowledge" + enginememory "github.com/compozy/compozy/engine/memory" + memcore "github.com/compozy/compozy/engine/memory/core" + engineproject "github.com/compozy/compozy/engine/project" + projectschedule "github.com/compozy/compozy/engine/project/schedule" + enginetask "github.com/compozy/compozy/engine/task" + enginetool "github.com/compozy/compozy/engine/tool" + engineworkflow "github.com/compozy/compozy/engine/workflow" + "github.com/compozy/compozy/pkg/logger" + sdkagent "github.com/compozy/compozy/sdk/v2/agent" + compozy "github.com/compozy/compozy/sdk/v2/compozy" + sdkmemory "github.com/compozy/compozy/sdk/v2/memory" + sdkproject "github.com/compozy/compozy/sdk/v2/project" + sdktask "github.com/compozy/compozy/sdk/v2/task" + sdktool "github.com/compozy/compozy/sdk/v2/tool" + sdkworkflow "github.com/compozy/compozy/sdk/v2/workflow" +) + +// RunCompleteProject wires agents, tools, knowledge, memory, and schedules into a single execution. +func RunCompleteProject(ctx context.Context) error { + setup, err := prepareCompleteProject(ctx) + if err != nil { + return err + } + defer setup.cleanup() + composition, err := composeCompleteProject(ctx, setup) + if err != nil { + return err + } + return executeCompleteProject(ctx, composition) +} + +// completeProjectSetup holds reusable resources for the full project example. +type completeProjectSetup struct { + dir string + embedderCfg *engineknowledge.EmbedderConfig + vectorCfg *engineknowledge.VectorDBConfig + kbCfg *engineknowledge.BaseConfig + memoryCfg *enginememory.Config + toolCfg *enginetool.Config +} + +// cleanup removes temporary assets generated during setup. +func (s *completeProjectSetup) cleanup() { + if s == nil || s.dir == "" { + return + } + _ = os.RemoveAll(s.dir) +} + +// prepareCompleteProject builds shared resources and artifacts used throughout the example. +func prepareCompleteProject(ctx context.Context) (*completeProjectSetup, error) { + key, err := ensureOpenAIKey(ctx) + if err != nil { + return nil, err + } + dir, err := writeKnowledgeDocuments() + if err != nil { + return nil, err + } + cleanup := func(e error) (*completeProjectSetup, error) { + _ = os.RemoveAll(dir) + return nil, e + } + embedderCfg, vectorCfg, kbCfg, err := createKnowledgeDefinitions(ctx, dir, key) + if err != nil { + return cleanup(err) + } + memoryCfg, err := sdkmemory.New(ctx, "project-memory", "message_count_based", + sdkmemory.WithPersistence(memcore.PersistenceConfig{Type: memcore.InMemoryPersistence, TTL: "1h"}), + sdkmemory.WithMaxMessages(50), + ) + if err != nil { + return cleanup(fmt.Errorf("create memory: %w", err)) + } + toolCfg, err := sdktool.New(ctx, "report-clock", + sdktool.WithName("Report Clock"), + sdktool.WithDescription("Returns a timestamp for the final report"), + sdktool.WithNativeHandler(func(context.Context, map[string]any, map[string]any) (map[string]any, error) { + return map[string]any{"timestamp": timeNow()}, nil + }), + ) + if err != nil { + return cleanup(fmt.Errorf("create tool: %w", err)) + } + return &completeProjectSetup{ + dir: dir, + embedderCfg: embedderCfg, + vectorCfg: vectorCfg, + kbCfg: kbCfg, + memoryCfg: memoryCfg, + toolCfg: toolCfg, + }, nil +} + +// completeProjectComposition stores the runtime configuration for executing the workflow. +type completeProjectComposition struct { + workflowCfg *engineworkflow.Config + options []compozy.Option +} + +// composeCompleteProject assembles engine options alongside workflow definitions. +func composeCompleteProject(ctx context.Context, setup *completeProjectSetup) (*completeProjectComposition, error) { + agentCfg, err := buildCompleteAgent(ctx, setup.kbCfg, setup.memoryCfg) + if err != nil { + return nil, err + } + workflowCfg, err := buildCompleteWorkflow(ctx, agentCfg) + if err != nil { + return nil, err + } + scheduleCfg := &projectschedule.Config{ + ID: "daily-summary", + WorkflowID: workflowCfg.ID, + Cron: "0 8 * * *", + Description: "Generates the morning launch update", + Enabled: boolPtr(true), + } + projectCfg, err := createCompleteProject( + ctx, + workflowCfg, + setup.embedderCfg, + setup.vectorCfg, + setup.kbCfg, + setup.memoryCfg, + setup.toolCfg, + scheduleCfg, + ) + if err != nil { + return nil, err + } + options := []compozy.Option{ + compozy.WithProject(projectCfg), + compozy.WithAgent(agentCfg), + compozy.WithTool(setup.toolCfg), + compozy.WithKnowledge(setup.kbCfg), + compozy.WithMemory(setup.memoryCfg), + compozy.WithWorkflow(workflowCfg), + compozy.WithSchedule(scheduleCfg), + } + return &completeProjectComposition{workflowCfg: workflowCfg, options: options}, nil +} + +// executeCompleteProject runs the composed workflow and prints the final report output. +func executeCompleteProject(ctx context.Context, composition *completeProjectComposition) error { + return withEngine(ctx, composition.options, func(execCtx context.Context, engine *compozy.Engine) error { + input := map[string]any{"session_id": "helios-demo", "topic": "Helios launch readiness"} + resp, err := engine.ExecuteWorkflowSync( + execCtx, + composition.workflowCfg.ID, + &compozy.ExecuteSyncRequest{Input: input}, + ) + if err != nil { + return fmt.Errorf("execute complete project workflow: %w", err) + } + report := stringOutput(resp.Output, "final_report") + fmt.Printf("Project report (%s):\n%s\n", input["topic"], report) + logger.FromContext(execCtx).Info("complete project finished", "exec_id", resp.ExecID) + return nil + }) +} + +func buildCompleteAgent( + ctx context.Context, + kbCfg *engineknowledge.BaseConfig, + memoryCfg *enginememory.Config, +) (*engineagent.Config, error) { + model, err := newOpenAIModel(ctx, "gpt-4o-mini") + if err != nil { + return nil, err + } + binding := enginecore.KnowledgeBinding{ID: kbCfg.ID, TopK: intPtr(2), MinScore: floatPtr(0.2)} + memRef := enginecore.MemoryReference{ + ID: memoryCfg.ID, + Mode: enginecore.MemoryModeReadWrite, + Key: "session:{{ .workflow.input.session_id }}", + } + return newAgentWithModel( + ctx, + "project-analyst", + "Draft concise operational summaries grounded in provided knowledge.", + model, + sdkagent.WithKnowledge([]enginecore.KnowledgeBinding{binding}), + sdkagent.WithMemory([]enginecore.MemoryReference{memRef}), + ) +} + +func buildCompleteWorkflow(ctx context.Context, agentCfg *engineagent.Config) (*engineworkflow.Config, error) { + timeTask, err := sdktask.New(ctx, "capture-time", + sdktask.WithTool(&enginetool.Config{ID: "report-clock"}), + sdktask.WithOutputs(inputPtr(map[string]any{"timestamp": "{{ .task.output.timestamp }}"})), + ) + if err != nil { + return nil, fmt.Errorf("create time task: %w", err) + } + reportTask, err := sdktask.New( + ctx, + "compose-report", + sdktask.WithAgent(agentCfg), + sdktask.WithPrompt( + "Use knowledge base notes to summarize '{{ .workflow.input.topic }}' and cite sections. "+ + "Include the timestamp {{ .tasks.capture-time.output.timestamp }} in the first sentence.", + ), + sdktask.WithOutputs(inputPtr(map[string]any{"report": "{{ .task.output }}"})), + sdktask.WithFinal(true), + ) + if err != nil { + return nil, fmt.Errorf("create report task: %w", err) + } + return sdkworkflow.New(ctx, "complete-project", + sdkworkflow.WithDescription("Generates a launch readiness bulletin"), + sdkworkflow.WithTasks([]enginetask.Config{*timeTask, *reportTask}), + sdkworkflow.WithOutputs(outputPtr(map[string]any{"final_report": "{{ .tasks.compose-report.output.report }}"})), + ) +} + +func createCompleteProject( + ctx context.Context, + workflowCfg *engineworkflow.Config, + embedderCfg *engineknowledge.EmbedderConfig, + vectorCfg *engineknowledge.VectorDBConfig, + kbCfg *engineknowledge.BaseConfig, + memoryCfg *enginememory.Config, + toolCfg *enginetool.Config, + scheduleCfg *projectschedule.Config, +) (*engineproject.Config, error) { + return sdkproject.New(ctx, "helios-complete", + sdkproject.WithWorkflows([]*engineproject.WorkflowSourceConfig{{Source: workflowCfg.ID}}), + sdkproject.WithEmbedders([]engineknowledge.EmbedderConfig{*embedderCfg}), + sdkproject.WithVectorDBs([]engineknowledge.VectorDBConfig{*vectorCfg}), + sdkproject.WithKnowledgeBases([]engineknowledge.BaseConfig{*kbCfg}), + sdkproject.WithMemories([]*enginememory.Config{memoryCfg}), + sdkproject.WithTools([]enginetool.Config{*toolCfg}), + sdkproject.WithSchedules([]*projectschedule.Config{scheduleCfg}), + ) +} + +func boolPtr(v bool) *bool { + return &v +} + +func timeNow() string { + return time.Now().UTC().Format(time.RFC3339Nano) +} diff --git a/sdk/examples/README.md b/sdk/examples/README.md new file mode 100644 index 00000000..493ce687 --- /dev/null +++ b/sdk/examples/README.md @@ -0,0 +1,35 @@ +# Compozy SDK v2 Examples + +Every example in this directory is runnable with the functional-options SDK. Execute a scenario using + +```bash +go run sdk/examples --example +``` + +## Prerequisites + +- Go 1.25.2+ +- Bun installed locally (required for `runtime-native-tools`) +- `OPENAI_API_KEY` exported when running `knowledge-rag` or `complete-project` + +## Example Catalog + +| Example | Command | What it Demonstrates | +| --------------------- | ----------------------------------------------------- | --------------------------------------------------------------------- | +| simple-workflow | `go run sdk/examples --example simple-workflow` | Minimal mock-backed agent executed synchronously | +| parallel-tasks | `go run sdk/examples --example parallel-tasks` | `task.NewParallel` fan-out/fan-in with aggregation | +| knowledge-rag | `go run sdk/examples --example knowledge-rag` | Markdown ingestion, OpenAI embeddings, retrieval-grounded answer | +| memory-conversation | `go run sdk/examples --example memory-conversation` | Session memory with multi-turn dialogue | +| runtime-native-tools | `go run sdk/examples --example runtime-native-tools` | Native Go tool + Bun inline script working side-by-side | +| scheduled-workflow | `go run sdk/examples --example scheduled-workflow` | Cron schedule config and deterministic first-tick simulation | +| signal-communication | `go run sdk/examples --example signal-communication` | Signal and wait task coordination with payload hand-off | +| model-routing | `go run sdk/examples --example model-routing` | Per-task model overrides for routing and fallback chains | +| debugging-and-tracing | `go run sdk/examples --example debugging-and-tracing` | Capturing exec telemetry for troubleshooting | +| complete-project | `go run sdk/examples --example complete-project` | Project-level config spanning tools, knowledge, memory, and schedules | + +## Development Notes + +- Constructors mirror runtime defaults—no builders or `Build()` calls. +- Context flows top-down from `main`; the helpers avoid `context.Background()` in execution paths. +- Examples respect `.cursor/rules` (no stray blank lines, functions < 50 lines, named constants for non-trivial values). +- `knowledge-rag` and `complete-project` create temporary markdown content and rely on OpenAI to embed and answer questions. diff --git a/sdk/examples/main.go b/sdk/examples/main.go new file mode 100644 index 00000000..2631a1bb --- /dev/null +++ b/sdk/examples/main.go @@ -0,0 +1,130 @@ +package main + +import ( + "context" + "flag" + "fmt" + "os" + "sort" + "strings" + + "github.com/compozy/compozy/pkg/config" + "github.com/compozy/compozy/pkg/logger" +) + +type exampleRunner func(context.Context) error + +var examples = map[string]exampleRunner{ + "simple-workflow": RunSimpleWorkflow, + "parallel-tasks": RunParallelTasks, + "knowledge-rag": RunKnowledgeRag, + "memory-conversation": RunMemoryConversation, + "runtime-native-tools": RunRuntimeNativeTools, + "scheduled-workflow": RunScheduledWorkflow, + "signal-communication": RunSignalCommunication, + "model-routing": RunModelRouting, + "debugging-and-tracing": RunDebuggingAndTracing, + "complete-project": RunCompleteProject, +} + +var descriptions = map[string]string{ + "simple-workflow": "Basic agent workflow executed synchronously", + "parallel-tasks": "Parallel branches with aggregation", + "knowledge-rag": "Retrieval-augmented workflow with OpenAI embeddings", + "memory-conversation": "Conversation that persists state across turns", + "runtime-native-tools": "Hybrid native and inline tool execution", + "scheduled-workflow": "Deterministic scheduled workflow trigger", + "signal-communication": "Signal and wait coordination between tasks", + "model-routing": "Per-task model override and fallback", + "debugging-and-tracing": "Runtime observability and tracing options", + "complete-project": "End-to-end project integrating multiple resources", +} + +func main() { + selected := flag.String("example", "", "Example to run") + flag.Usage = showHelp + flag.Parse() + if strings.TrimSpace(*selected) == "" { + showHelp() + os.Exit(1) + } + exit := run(*selected) + os.Exit(exit) +} + +func run(name string) int { + ctx, cleanup, err := initializeContext() + if err != nil { + fmt.Fprintf(os.Stderr, "failed to initialize context: %v\n", err) + return 1 + } + defer cleanup() + if err := runExample(ctx, name); err != nil { + logger.FromContext(ctx).Error("example failed", "example", name, "error", err) + return 1 + } + return 0 +} + +func initializeContext() (context.Context, func(), error) { + baseCtx, cancel := context.WithCancel(context.Background()) + ctx := baseCtx + manager := config.NewManager(ctx, config.NewService()) + if _, err := manager.Load(ctx, config.NewDefaultProvider(), config.NewEnvProvider()); err != nil { + cancel() + _ = manager.Close(ctx) + return nil, nil, fmt.Errorf("load configuration: %w", err) + } + ctx = config.ContextWithManager(ctx, manager) + cfg := config.FromContext(ctx) + level := logger.InfoLevel + addSource := false + if cfg != nil { + if cfg.CLI.Quiet { + level = logger.DisabledLevel + } else if cfg.CLI.Debug { + level = logger.DebugLevel + } + addSource = cfg.CLI.Debug + } + log := logger.SetupLogger(level, false, addSource) + ctx = logger.ContextWithLogger(ctx, log) + cleanup := func() { + if err := manager.Close(ctx); err != nil { + logger.FromContext(ctx).Warn("failed to close configuration manager", "error", err) + } + cancel() + } + return ctx, cleanup, nil +} + +func runExample(ctx context.Context, name string) error { + runner, ok := examples[name] + if !ok { + return fmt.Errorf("unknown example %q (use --help)", name) + } + log := logger.FromContext(ctx) + log.Info("starting example", "example", name) + if err := runner(ctx); err != nil { + return err + } + log.Info("example finished", "example", name) + return nil +} + +func showHelp() { + fmt.Println("Compozy SDK v2 Examples") + fmt.Println("Usage: go run sdk/examples --example ") + fmt.Println() + names := make([]string, 0, len(examples)) + for name := range examples { + names = append(names, name) + } + sort.Strings(names) + for _, name := range names { + desc := descriptions[name] + fmt.Printf(" %-22s %s\n", name, desc) + } + fmt.Println() + fmt.Println("Most examples require OPENAI_API_KEY and Bun when inline tools run.") +} diff --git a/sdk/go.mod b/sdk/go.mod new file mode 100644 index 00000000..6f3e3b1d --- /dev/null +++ b/sdk/go.mod @@ -0,0 +1,301 @@ +module github.com/compozy/compozy/sdk/v2 + +go 1.25.2 + +require ( + github.com/Masterminds/semver/v3 v3.4.0 + github.com/alicebob/miniredis/v2 v2.35.0 + github.com/compozy/compozy v0.0.0-00010101000000-000000000000 + github.com/dave/jennifer v1.7.1 + github.com/go-chi/chi/v5 v5.2.3 + github.com/robfig/cron/v3 v3.0.1 + github.com/stretchr/testify v1.11.1 + gopkg.in/yaml.v3 v3.0.1 +) + +require ( + cel.dev/expr v0.24.0 // indirect + cloud.google.com/go v0.121.2 // indirect + cloud.google.com/go/ai v0.12.0 // indirect + cloud.google.com/go/aiplatform v1.85.0 // indirect + cloud.google.com/go/auth v0.16.1 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect + cloud.google.com/go/compute/metadata v0.7.0 // indirect + cloud.google.com/go/iam v1.5.2 // indirect + cloud.google.com/go/longrunning v0.6.7 // indirect + cloud.google.com/go/monitoring v1.24.2 // indirect + cloud.google.com/go/storage v1.53.0 // indirect + cloud.google.com/go/vertexai v0.12.0 // indirect + dario.cat/mergo v1.0.2 // indirect + filippo.io/edwards25519 v1.1.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0 // indirect + github.com/Masterminds/goutils v1.1.1 // indirect + github.com/Masterminds/sprig/v3 v3.3.0 // indirect + github.com/Masterminds/squirrel v1.5.4 // indirect + github.com/adrg/strutil v0.3.1 // indirect + github.com/antlr4-go/antlr/v4 v4.13.0 // indirect + github.com/apache/thrift v0.21.0 // indirect + github.com/atotto/clipboard v0.1.4 // indirect + github.com/aws/aws-sdk-go v1.55.6 // indirect + github.com/aws/aws-sdk-go-v2 v1.36.3 // indirect + github.com/aws/smithy-go v1.22.3 // indirect + github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect + github.com/bahlo/generic-list-go v0.2.0 // indirect + github.com/benbjohnson/clock v1.3.5 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect + github.com/bmatcuk/doublestar/v4 v4.9.1 // indirect + github.com/buger/jsonparser v1.1.1 // indirect + github.com/bytedance/sonic v1.14.0 // indirect + github.com/bytedance/sonic/loader v0.3.0 // indirect + github.com/cactus/go-statsd-client/statsd v0.0.0-20200423205355-cb0885a1018c // indirect + github.com/cactus/go-statsd-client/v5 v5.1.0 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/charmbracelet/bubbles v0.21.1-0.20250623103423-23b8fd6302d7 // indirect + github.com/charmbracelet/bubbletea v1.3.10 // indirect + github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect + github.com/charmbracelet/lipgloss v1.1.0 // indirect + github.com/charmbracelet/log v0.4.2 // indirect + github.com/charmbracelet/x/ansi v0.10.1 // indirect + github.com/charmbracelet/x/cellbuf v0.0.13 // indirect + github.com/charmbracelet/x/term v0.2.1 // indirect + github.com/cloudwego/base64x v0.1.6 // indirect + github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 // indirect + github.com/cohere-ai/cohere-go/v2 v2.14.1 // indirect + github.com/coreos/go-oidc/v3 v3.11.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/dgraph-io/badger/v4 v4.2.0 // indirect + github.com/dgraph-io/ristretto v0.1.1 // indirect + github.com/dgraph-io/ristretto/v2 v2.3.0 // indirect + github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da // indirect + github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect + github.com/dlclark/regexp2 v1.11.5 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/emirpasic/gods v1.18.1 // indirect + github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect + github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect + github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect + github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a // indirect + github.com/fatih/structs v1.1.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/gabriel-vasile/mimetype v1.4.10 // indirect + github.com/georgysavva/scany/v2 v2.1.4 // indirect + github.com/gin-contrib/sse v1.1.0 // indirect + github.com/gin-gonic/gin v1.11.0 // indirect + github.com/go-jose/go-jose/v4 v4.1.2 // indirect + github.com/go-json-experiment/json v0.0.0-20250910080747-cc2cfa0554c3 // indirect + github.com/go-logfmt/logfmt v0.6.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/go-playground/validator/v10 v10.28.0 // indirect + github.com/go-resty/resty/v2 v2.16.5 // indirect + github.com/go-sql-driver/mysql v1.9.3 // indirect + github.com/go-viper/mapstructure/v2 v2.4.0 // indirect + github.com/goccy/go-json v0.10.5 // indirect + github.com/goccy/go-yaml v1.18.0 // indirect + github.com/gocql/gocql v1.7.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang-jwt/jwt/v4 v4.5.2 // indirect + github.com/golang/glog v1.2.5 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/mock v1.7.0-rc.1 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/gomarkdown/markdown v0.0.0-20240729212818-a2a9c4f76ef5 // indirect + github.com/google/cel-go v0.26.1 // indirect + github.com/google/flatbuffers v24.3.25+incompatible // indirect + github.com/google/generative-ai-go v0.20.1 // indirect + github.com/google/s2a-go v0.1.9 // indirect + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect + github.com/googleapis/gax-go/v2 v2.14.2 // indirect + github.com/gorilla/mux v1.8.1 // indirect + github.com/gorilla/securecookie v1.1.1 // indirect + github.com/gosimple/slug v1.15.0 // indirect + github.com/gosimple/unidecode v1.0.1 // indirect + github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect + github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.2 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 // indirect + github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect + github.com/huandu/xstrings v1.5.0 // indirect + github.com/iancoleman/strcase v0.3.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/invopop/jsonschema v0.13.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect + github.com/jackc/pgx/v5 v5.7.6 // indirect + github.com/jackc/puddle/v2 v2.2.2 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/jmoiron/sqlx v1.4.0 // indirect + github.com/joho/godotenv v1.5.1 // indirect + github.com/jolestar/go-commons-pool/v2 v2.1.2 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/kaptinlin/go-i18n v0.2.0 // indirect + github.com/kaptinlin/jsonschema v0.5.1 // indirect + github.com/kaptinlin/messageformat-go v0.4.5 // indirect + github.com/klauspost/compress v1.18.0 // indirect + github.com/klauspost/cpuid/v2 v2.3.0 // indirect + github.com/klippa-app/go-pdfium v1.17.2 // indirect + github.com/knadh/koanf/maps v0.1.2 // indirect + github.com/knadh/koanf/providers/env/v2 v2.0.0 // indirect + github.com/knadh/koanf/providers/structs v1.0.0 // indirect + github.com/knadh/koanf/v2 v2.3.0 // indirect + github.com/labstack/echo/v4 v4.13.4 // indirect + github.com/labstack/gommon v0.4.2 // indirect + github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect + github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect + github.com/leodido/go-urn v1.4.0 // indirect + github.com/lib/pq v1.10.9 // indirect + github.com/looplab/fsm v1.0.3 // indirect + github.com/lucasb-eyer/go-colorful v1.2.0 // indirect + github.com/mailru/easyjson v0.9.0 // indirect + github.com/mark3labs/mcp-go v0.41.1 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-localereader v0.0.1 // indirect + github.com/mattn/go-runewidth v0.0.16 // indirect + github.com/mfridman/interpolate v0.0.2 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect + github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect + github.com/muesli/cancelreader v0.2.2 // indirect + github.com/muesli/termenv v0.16.0 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/ncruces/go-strftime v0.1.9 // indirect + github.com/nexus-rpc/sdk-go v0.3.0 // indirect + github.com/nlpodyssey/cybertron v0.2.1 // indirect + github.com/nlpodyssey/gopickle v0.3.0 // indirect + github.com/nlpodyssey/gotokenizers v0.2.0 // indirect + github.com/nlpodyssey/spago v1.1.0 // indirect + github.com/olivere/elastic/v7 v7.0.32 // indirect + github.com/ollama/ollama v0.12.4 // indirect + github.com/open-and-sustainable/alembica v0.0.8 // indirect + github.com/opentracing/opentracing-go v1.2.0 // indirect + github.com/pborman/uuid v1.2.1 // indirect + github.com/pelletier/go-toml/v2 v2.2.4 // indirect + github.com/pgvector/pgvector-go v0.3.0 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pkoukk/tiktoken-go v0.1.8 // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/pressly/goose/v3 v3.26.0 // indirect + github.com/prometheus/client_golang v1.23.2 // indirect + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v1.20.99 // indirect + github.com/prometheus/otlptranslator v0.0.2 // indirect + github.com/prometheus/procfs v0.17.0 // indirect + github.com/quic-go/qpack v0.5.1 // indirect + github.com/quic-go/quic-go v0.54.1 // indirect + github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect + github.com/redis/go-redis/v9 v9.14.0 // indirect + github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect + github.com/rivo/uniseg v0.4.7 // indirect + github.com/robfig/cron v1.2.0 // indirect + github.com/romdo/go-debounce v0.1.0 // indirect + github.com/rs/zerolog v1.31.0 // indirect + github.com/sashabaranov/go-openai v1.40.1 // indirect + github.com/segmentio/ksuid v1.0.4 // indirect + github.com/sethvargo/go-retry v0.3.0 // indirect + github.com/shopspring/decimal v1.4.0 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/slok/goresilience v0.2.0 // indirect + github.com/sony/gobreaker v1.0.0 // indirect + github.com/spf13/cast v1.10.0 // indirect + github.com/spf13/cobra v1.10.1 // indirect + github.com/spf13/pflag v1.0.10 // indirect + github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect + github.com/stoewer/go-strcase v1.3.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect + github.com/temporalio/ringpop-go v0.0.0-20250130211428-b97329e994f7 // indirect + github.com/temporalio/sqlparser v0.0.0-20231115171017-f4060bcfa6cb // indirect + github.com/temporalio/tchannel-go v1.22.1-0.20240528171429-1db37fdea938 // indirect + github.com/temporalio/ui-server/v2 v2.41.0 // indirect + github.com/tetratelabs/wazero v1.9.0 // indirect + github.com/tidwall/gjson v1.18.0 // indirect + github.com/tidwall/match v1.1.1 // indirect + github.com/tidwall/pretty v1.2.1 // indirect + github.com/tmc/langchaingo v0.1.13 // indirect + github.com/twitchyliquid64/golang-asm v0.15.1 // indirect + github.com/twmb/murmur3 v1.1.8 // indirect + github.com/uber-common/bark v1.3.0 // indirect + github.com/uber-go/tally/v4 v4.1.17 // indirect + github.com/ugorji/go/codec v1.3.0 // indirect + github.com/valyala/bytebufferpool v1.0.0 // indirect + github.com/valyala/fasttemplate v1.2.2 // indirect + github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect + github.com/xhit/go-str2duration/v2 v2.1.0 // indirect + github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect + github.com/yosida95/uritemplate/v3 v3.0.2 // indirect + github.com/yuin/gopher-lua v1.1.1 // indirect + github.com/zeebo/errs v1.4.0 // indirect + gitlab.com/golang-commonmark/html v0.0.0-20191124015941-a22733972181 // indirect + gitlab.com/golang-commonmark/linkify v0.0.0-20191026162114-a0c2df6c8f82 // indirect + gitlab.com/golang-commonmark/markdown v0.0.0-20211110145824-bf3e522c626a // indirect + gitlab.com/golang-commonmark/mdurl v0.0.0-20191124015652-932350d1cb84 // indirect + gitlab.com/golang-commonmark/puny v0.0.0-20191124015043-9f83538fa04f // indirect + go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/detectors/gcp v1.36.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect + go.opentelemetry.io/otel v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.34.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 // indirect + go.opentelemetry.io/otel/exporters/prometheus v0.60.0 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + go.opentelemetry.io/otel/sdk v1.38.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.38.0 // indirect + go.opentelemetry.io/otel/trace v1.38.0 // indirect + go.opentelemetry.io/proto/otlp v1.7.1 // indirect + go.starlark.net v0.0.0-20230302034142-4b1e35fe2254 // indirect + go.temporal.io/api v1.53.0 // indirect + go.temporal.io/sdk v1.37.0 // indirect + go.temporal.io/server v1.29.0 // indirect + go.uber.org/atomic v1.11.0 // indirect + go.uber.org/dig v1.18.0 // indirect + go.uber.org/fx v1.23.0 // indirect + go.uber.org/mock v0.5.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.0 // indirect + golang.org/x/arch v0.20.0 // indirect + golang.org/x/crypto v0.43.0 // indirect + golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b // indirect + golang.org/x/mod v0.28.0 // indirect + golang.org/x/net v0.46.0 // indirect + golang.org/x/oauth2 v0.32.0 // indirect + golang.org/x/sync v0.17.0 // indirect + golang.org/x/sys v0.37.0 // indirect + golang.org/x/term v0.36.0 // indirect + golang.org/x/text v0.30.0 // indirect + golang.org/x/time v0.14.0 // indirect + golang.org/x/tools v0.37.0 // indirect + google.golang.org/api v0.235.0 // indirect + google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250804133106-a7a43d27e69b // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250804133106-a7a43d27e69b // indirect + google.golang.org/grpc v1.76.0 // indirect + google.golang.org/protobuf v1.36.9 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/validator.v2 v2.0.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + modernc.org/libc v1.66.3 // indirect + modernc.org/mathutil v1.7.1 // indirect + modernc.org/memory v1.11.0 // indirect + modernc.org/sqlite v1.38.2 // indirect +) + +replace github.com/compozy/compozy => .. diff --git a/sdk/go.sum b/sdk/go.sum new file mode 100644 index 00000000..d9696a08 --- /dev/null +++ b/sdk/go.sum @@ -0,0 +1,1056 @@ +cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.121.2 h1:v2qQpN6Dx9x2NmwrqlesOt3Ys4ol5/lFZ6Mg1B7OJCg= +cloud.google.com/go v0.121.2/go.mod h1:nRFlrHq39MNVWu+zESP2PosMWA0ryJw8KUBZ2iZpxbw= +cloud.google.com/go/ai v0.12.0 h1:i9k0U14BhejPY+yKTm9VTCjRAA3PwYvf4s/zhSkHof0= +cloud.google.com/go/ai v0.12.0/go.mod h1:SEbNRRerz779yMT0qjDYG245m96WO8Flieiv+/fU9GQ= +cloud.google.com/go/aiplatform v1.85.0 h1:80/GqdP8Tovaaw9Qr6fYZNDvwJeA9rLk8mYkqBJNIJQ= +cloud.google.com/go/aiplatform v1.85.0/go.mod h1:S4DIKz3TFLSt7ooF2aCRdAqsUR4v/YDXUoHqn5P0EFc= +cloud.google.com/go/auth v0.16.1 h1:XrXauHMd30LhQYVRHLGvJiYeczweKQXZxsTbV9TiguU= +cloud.google.com/go/auth v0.16.1/go.mod h1:1howDHJ5IETh/LwYs3ZxvlkXF48aSqqJUM+5o02dNOI= +cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= +cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= +cloud.google.com/go/compute/metadata v0.7.0 h1:PBWF+iiAerVNe8UCHxdOt6eHLVc3ydFeOCw78U8ytSU= +cloud.google.com/go/compute/metadata v0.7.0/go.mod h1:j5MvL9PprKL39t166CoB1uVHfQMs4tFQZZcKwksXUjo= +cloud.google.com/go/iam v1.5.2 h1:qgFRAGEmd8z6dJ/qyEchAuL9jpswyODjA2lS+w234g8= +cloud.google.com/go/iam v1.5.2/go.mod h1:SE1vg0N81zQqLzQEwxL2WI6yhetBdbNQuTvIKCSkUHE= +cloud.google.com/go/logging v1.13.0 h1:7j0HgAp0B94o1YRDqiqm26w4q1rDMH7XNRU34lJXHYc= +cloud.google.com/go/logging v1.13.0/go.mod h1:36CoKh6KA/M0PbhPKMq6/qety2DCAErbhXT62TuXALA= +cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE= +cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY= +cloud.google.com/go/monitoring v1.24.2 h1:5OTsoJ1dXYIiMiuL+sYscLc9BumrL3CarVLL7dd7lHM= +cloud.google.com/go/monitoring v1.24.2/go.mod h1:x7yzPWcgDRnPEv3sI+jJGBkwl5qINf+6qY4eq0I9B4U= +cloud.google.com/go/storage v1.53.0 h1:gg0ERZwL17pJ+Cz3cD2qS60w1WMDnwcm5YPAIQBHUAw= +cloud.google.com/go/storage v1.53.0/go.mod h1:7/eO2a/srr9ImZW9k5uufcNahT2+fPb8w5it1i5boaA= +cloud.google.com/go/trace v1.11.6 h1:2O2zjPzqPYAHrn3OKl029qlqG6W8ZdYaOWRyr8NgMT4= +cloud.google.com/go/trace v1.11.6/go.mod h1:GA855OeDEBiBMzcckLPE2kDunIpC72N+Pq8WFieFjnI= +cloud.google.com/go/vertexai v0.12.0 h1:zTadEo/CtsoyRXNx3uGCncoWAP1H2HakGqwznt+iMo8= +cloud.google.com/go/vertexai v0.12.0/go.mod h1:8u+d0TsvBfAAd2x5R6GMgbYhsLgo3J7lmP4bR8g2ig8= +dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8= +dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +entgo.io/ent v0.14.3 h1:wokAV/kIlH9TeklJWGGS7AYJdVckr0DloWjIcO9iIIQ= +entgo.io/ent v0.14.3/go.mod h1:aDPE/OziPEu8+OWbzy4UlvWmD2/kbRuWfK2A40hcxJM= +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 h1:UQUsRi8WTzhZntp5313l+CHIAT95ojUI2lpP/ExlZa4= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0/go.mod h1:Cz6ft6Dkn3Et6l2v2a9/RpN7epQ1GtDlO6lj8bEcOvw= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0 h1:fYE9p3esPxA/C0rQ0AHhP0drtPXDRhaWiwg1DPqO7IU= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0/go.mod h1:BnBReJLvVYx2CS/UHOgVz2BXKXD9wsQPxZug20nZhd0= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.51.0 h1:OqVGm6Ei3x5+yZmSJG1Mh2NwHvpVmZ08CB5qJhT9Nuk= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.51.0/go.mod h1:SZiPHWGOOk3bl8tkevxkoiwPgsIl6CwrWcbwjfHZpdM= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0 h1:6/0iUd0xrnX7qt+mLNRwg5c0PGv8wpE8K90ryANQwMI= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0/go.mod h1:otE2jQekW/PqXk1Awf5lmfokJx4uwuqcj1ab5SpGeW0= +github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= +github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs= +github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0= +github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM= +github.com/Masterminds/squirrel v1.5.4/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/adrg/strutil v0.3.1 h1:OLvSS7CSJO8lBii4YmBt8jiK9QOtB9CzCzwl4Ic/Fz4= +github.com/adrg/strutil v0.3.1/go.mod h1:8h90y18QLrs11IBffcGX3NW/GFBXCMcNg4M7H6MspPA= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/alicebob/miniredis/v2 v2.35.0 h1:QwLphYqCEAo1eu1TqPRN2jgVMPBweeQcR21jeqDCONI= +github.com/alicebob/miniredis/v2 v2.35.0/go.mod h1:TcL7YfarKPGDAthEtl5NBeHZfeUQj6OXMm/+iu5cLMM= +github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= +github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= +github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= +github.com/apache/thrift v0.21.0 h1:tdPmh/ptjE1IJnhbhrcl2++TauVjy242rkV/UzJChnE= +github.com/apache/thrift v0.21.0/go.mod h1:W1H8aR/QRtYNvrPeFXBtobyRkd0/YVhTc6i07XIAgDw= +github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4= +github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI= +github.com/aws/aws-sdk-go v1.55.6 h1:cSg4pvZ3m8dgYcgqB97MrcdjUmZ1BeMYKUxMMB89IPk= +github.com/aws/aws-sdk-go v1.55.6/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go-v2 v1.36.3 h1:mJoei2CxPutQVxaATCzDUjcZEjVRdpsiiXi2o38yqWM= +github.com/aws/aws-sdk-go-v2 v1.36.3/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg= +github.com/aws/smithy-go v1.22.3 h1:Z//5NuZCSW6R4PhQ93hShNbyBbn8BWCmCVCt+Q8Io5k= +github.com/aws/smithy-go v1.22.3/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= +github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= +github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= +github.com/aymanbagabas/go-udiff v0.3.1 h1:LV+qyBQ2pqe0u42ZsUEtPiCaUoqgA9gYRDs3vj1nolY= +github.com/aymanbagabas/go-udiff v0.3.1/go.mod h1:G0fsKmG+P6ylD0r6N/KgQD/nWzgfnl8ZBcNLgcbrw8E= +github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk= +github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg= +github.com/benbjohnson/clock v0.0.0-20160125162948-a620c1cc9866/go.mod h1:UMqtWQTnOe4byzwe7Zhwh8f8s+36uszN51sJrSIZlTE= +github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= +github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 h1:mXoPYz/Ul5HYEDvkta6I8/rnYM5gSdSV2tJ6XbZuEtY= +github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/bmatcuk/doublestar/v4 v4.9.1 h1:X8jg9rRZmJd4yRy7ZeNDRnM+T3ZfHv15JiBJ/avrEXE= +github.com/bmatcuk/doublestar/v4 v4.9.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/bmizerany/perks v0.0.0-20141205001514-d9a9656a3a4b h1:AP/Y7sqYicnjGDfD5VcY4CIfh1hRXBUavxrvELjTiOE= +github.com/bmizerany/perks v0.0.0-20141205001514-d9a9656a3a4b/go.mod h1:ac9efd0D1fsDb3EJvhqgXRbFx7bs2wqZ10HQPeU8U/Q= +github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= +github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= +github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= +github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= +github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= +github.com/bytedance/sonic v1.14.0 h1:/OfKt8HFw0kh2rj8N0F6C/qPGRESq0BbaNZgcNXXzQQ= +github.com/bytedance/sonic v1.14.0/go.mod h1:WoEbx8WTcFJfzCe0hbmyTGrfjt8PzNEBdxlNUO24NhA= +github.com/bytedance/sonic/loader v0.3.0 h1:dskwH8edlzNMctoruo8FPTJDF3vLtDT0sXZwvZJyqeA= +github.com/bytedance/sonic/loader v0.3.0/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI= +github.com/cactus/go-statsd-client/statsd v0.0.0-20191106001114-12b4e2b38748/go.mod h1:l/bIBLeOl9eX+wxJAzxS4TveKRtAqlyDpHjhkfO0MEI= +github.com/cactus/go-statsd-client/statsd v0.0.0-20200423205355-cb0885a1018c h1:HIGF0r/56+7fuIZw2V4isE22MK6xpxWx7BbV8dJ290w= +github.com/cactus/go-statsd-client/statsd v0.0.0-20200423205355-cb0885a1018c/go.mod h1:l/bIBLeOl9eX+wxJAzxS4TveKRtAqlyDpHjhkfO0MEI= +github.com/cactus/go-statsd-client/v4 v4.0.0/go.mod h1:m73kwJp6TN0Ja9P6ycdZhWM1MlfxY/95WZ//IptPQ+Y= +github.com/cactus/go-statsd-client/v5 v5.1.0 h1:sbbdfIl9PgisjEoXzvXI1lwUKWElngsjJKaZeC021P4= +github.com/cactus/go-statsd-client/v5 v5.1.0/go.mod h1:COEvJ1E+/E2L4q6QE5CkjWPi4eeDw9maJBMIuMPBZbY= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/charmbracelet/bubbles v0.21.1-0.20250623103423-23b8fd6302d7 h1:JFgG/xnwFfbezlUnFMJy0nusZvytYysV4SCS2cYbvws= +github.com/charmbracelet/bubbles v0.21.1-0.20250623103423-23b8fd6302d7/go.mod h1:ISC1gtLcVilLOf23wvTfoQuYbW2q0JevFxPfUzZ9Ybw= +github.com/charmbracelet/bubbletea v1.3.10 h1:otUDHWMMzQSB0Pkc87rm691KZ3SWa4KUlvF9nRvCICw= +github.com/charmbracelet/bubbletea v1.3.10/go.mod h1:ORQfo0fk8U+po9VaNvnV95UPWA1BitP1E0N6xJPlHr4= +github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc h1:4pZI35227imm7yK2bGPcfpFEmuY1gc2YSTShr4iJBfs= +github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc/go.mod h1:X4/0JoqgTIPSFcRA/P6INZzIuyqdFY5rm8tb41s9okk= +github.com/charmbracelet/lipgloss v1.1.0 h1:vYXsiLHVkK7fp74RkV7b2kq9+zDLoEU4MZoFqR/noCY= +github.com/charmbracelet/lipgloss v1.1.0/go.mod h1:/6Q8FR2o+kj8rz4Dq0zQc3vYf7X+B0binUUBwA0aL30= +github.com/charmbracelet/log v0.4.2 h1:hYt8Qj6a8yLnvR+h7MwsJv/XvmBJXiueUcI3cIxsyig= +github.com/charmbracelet/log v0.4.2/go.mod h1:qifHGX/tc7eluv2R6pWIpyHDDrrb/AG71Pf2ysQu5nw= +github.com/charmbracelet/x/ansi v0.10.1 h1:rL3Koar5XvX0pHGfovN03f5cxLbCF2YvLeyz7D2jVDQ= +github.com/charmbracelet/x/ansi v0.10.1/go.mod h1:3RQDQ6lDnROptfpWuUVIUG64bD2g2BgntdxH0Ya5TeE= +github.com/charmbracelet/x/cellbuf v0.0.13 h1:/KBBKHuVRbq1lYx5BzEHBAFBP8VcQzJejZ/IA3iR28k= +github.com/charmbracelet/x/cellbuf v0.0.13/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs= +github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91 h1:payRxjMjKgx2PaCWLZ4p3ro9y97+TVLZNaRZgJwSVDQ= +github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91/go.mod h1:wDlXFlCrmJ8J+swcL/MnGUuYnqgQdW9rhSD61oNMb6U= +github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ= +github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudwego/base64x v0.1.6 h1:t11wG9AECkCDk5fMSoxmufanudBtJ+/HemLstXDLI2M= +github.com/cloudwego/base64x v0.1.6/go.mod h1:OFcloc187FXDaYHvrNIjxSe8ncn0OOM8gEHfghB2IPU= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 h1:aQ3y1lwWyqYPiWZThqv1aFbZMiM9vblcSArJRf2Irls= +github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cockroachdb/cockroach-go/v2 v2.2.0 h1:/5znzg5n373N/3ESjHF5SMLxiW4RKB05Ql//KWfeTFs= +github.com/cockroachdb/cockroach-go/v2 v2.2.0/go.mod h1:u3MiKYGupPPjkn3ozknpMUpxPaNLTFWAya419/zv6eI= +github.com/cohere-ai/cohere-go/v2 v2.14.1 h1:fXNrV02rfrP9ieI+S6mHV6Nt2Z0uEDPkK3rnc5bJWCM= +github.com/cohere-ai/cohere-go/v2 v2.14.1/go.mod h1:MuiJkCxlR18BDV2qQPbz2Yb/OCVphT1y6nD2zYaKeR0= +github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= +github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= +github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= +github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= +github.com/coreos/go-oidc/v3 v3.11.0 h1:Ia3MxdwpSw702YW0xgfmP1GVCMA9aEFWu12XUZ3/OtI= +github.com/coreos/go-oidc/v3 v3.11.0/go.mod h1:gE3LgjOgFoHi9a4ce4/tJczr0Ai2/BoDhf0r5lltWI0= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cpuguy83/dockercfg v0.3.2 h1:DlJTyZGBDlXqUZ2Dk2Q3xHs/FtnooJJVaad2S9GKorA= +github.com/cpuguy83/dockercfg v0.3.2/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/crossdock/crossdock-go v0.0.0-20160816171116-049aabb0122b/go.mod h1:v9FBN7gdVTpiD/+LZ7Po0UKvROyT87uLVxTHVky/dlQ= +github.com/dave/jennifer v1.7.1 h1:B4jJJDHelWcDhlRQxWeo0Npa/pYKBLrirAQoTN45txo= +github.com/dave/jennifer v1.7.1/go.mod h1:nXbxhEmQfOZhWml3D1cDK5M1FLnMSozpbFN/m3RmGZc= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgraph-io/badger/v4 v4.2.0 h1:kJrlajbXXL9DFTNuhhu9yCx7JJa4qpYWxtE8BzuWsEs= +github.com/dgraph-io/badger/v4 v4.2.0/go.mod h1:qfCqhPoWDFJRx1gp5QwwyGo8xk1lbHUxvK9nK0OGAak= +github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= +github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= +github.com/dgraph-io/ristretto/v2 v2.3.0 h1:qTQ38m7oIyd4GAed/QkUZyPFNMnvVWyazGXRwvOt5zk= +github.com/dgraph-io/ristretto/v2 v2.3.0/go.mod h1:gpoRV3VzrEY1a9dWAYV6T1U7YzfgttXdd/ZzL1s9OZM= +github.com/dgryski/go-farm v0.0.0-20140601200337-fc41e106ee0e/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da h1:aIftn67I1fkbMa512G+w+Pxci9hJPB8oMnkcP3iZF38= +github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/dlclark/regexp2 v1.4.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= +github.com/dlclark/regexp2 v1.11.5 h1:Q/sSnsKerHeCkc/jSTNq1oCm7KiVgUMZRDUoRu0JQZQ= +github.com/dlclark/regexp2 v1.11.5/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= +github.com/docker/docker v28.3.3+incompatible h1:Dypm25kh4rmk49v1eiVbsAtpAsYURjYkaKubwuBdxEI= +github.com/docker/docker v28.3.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94= +github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/ebitengine/purego v0.8.4 h1:CF7LEKg5FFOsASUj0+QwaXf8Ht6TlFxg09+S9wz0omw= +github.com/ebitengine/purego v0.8.4/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= +github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= +github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M= +github.com/envoyproxy/go-control-plane v0.13.4/go.mod h1:kDfuBlDVsSj2MjrLEtRWtHlsWIFcGyB2RMO44Dc5GZA= +github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A= +github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw= +github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI= +github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= +github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= +github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4= +github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM= +github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw= +github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA= +github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/gabriel-vasile/mimetype v1.4.10 h1:zyueNbySn/z8mJZHLt6IPw0KoZsiQNszIpU+bX4+ZK0= +github.com/gabriel-vasile/mimetype v1.4.10/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s= +github.com/georgysavva/scany/v2 v2.1.4 h1:nrzHEJ4oQVRoiKmocRqA1IyGOmM/GQOEsg9UjMR5Ip4= +github.com/georgysavva/scany/v2 v2.1.4/go.mod h1:fqp9yHZzM/PFVa3/rYEC57VmDx+KDch0LoqrJzkvtos= +github.com/gin-contrib/sse v1.1.0 h1:n0w2GMuUpWDVp7qSpvze6fAu9iRxJY4Hmj6AmBOU05w= +github.com/gin-contrib/sse v1.1.0/go.mod h1:hxRZ5gVpWMT7Z0B0gSNYqqsSCNIJMjzvm6fqCz9vjwM= +github.com/gin-gonic/gin v1.11.0 h1:OW/6PLjyusp2PPXtyxKHU0RbX6I/l28FTdDlae5ueWk= +github.com/gin-gonic/gin v1.11.0/go.mod h1:+iq/FyxlGzII0KHiBGjuNn4UNENUlKbGlNmc+W50Dls= +github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE= +github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= +github.com/go-faker/faker/v4 v4.6.0 h1:6aOPzNptRiDwD14HuAnEtlTa+D1IfFuEHO8+vEFwjTs= +github.com/go-faker/faker/v4 v4.6.0/go.mod h1:ZmrHuVtTTm2Em9e0Du6CJ9CADaLEzGXW62z1YqFH0m0= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-jose/go-jose/v4 v4.1.2 h1:TK/7NqRQZfgAh+Td8AlsrvtPoUyiHh0LqVvokh+1vHI= +github.com/go-jose/go-jose/v4 v4.1.2/go.mod h1:22cg9HWM1pOlnRiY+9cQYJ9XHmya1bYW8OeDM6Ku6Oo= +github.com/go-json-experiment/json v0.0.0-20250910080747-cc2cfa0554c3 h1:02WINGfSX5w0Mn+F28UyRoSt9uvMhKguwWMlOAh6U/0= +github.com/go-json-experiment/json v0.0.0-20250910080747-cc2cfa0554c3/go.mod h1:uNVvRXArCGbZ508SxYYTC5v1JWoz2voff5pm25jU1Ok= +github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= +github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= +github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= +github.com/go-pg/pg/v10 v10.11.0 h1:CMKJqLgTrfpE/aOVeLdybezR2om071Vh38OLZjsyMI0= +github.com/go-pg/pg/v10 v10.11.0/go.mod h1:4BpHRoxE61y4Onpof3x1a2SQvi9c+q1dJnrNdMjsroA= +github.com/go-pg/zerochecker v0.2.0 h1:pp7f72c3DobMWOb2ErtZsnrPaSvHd2W4o9//8HtF4mU= +github.com/go-pg/zerochecker v0.2.0/go.mod h1:NJZ4wKL0NmTtz0GKCoJ8kym6Xn/EQzXRl2OnAe7MmDo= +github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator/v10 v10.28.0 h1:Q7ibns33JjyW48gHkuFT91qX48KG0ktULL6FgHdG688= +github.com/go-playground/validator/v10 v10.28.0/go.mod h1:GoI6I1SjPBh9p7ykNE/yj3fFYbyDOpwMn5KXd+m2hUU= +github.com/go-resty/resty/v2 v2.16.5 h1:hBKqmWrr7uRc3euHVqmh1HTHcKn99Smr7o5spptdhTM= +github.com/go-resty/resty/v2 v2.16.5/go.mod h1:hkJtXbA2iKHzJheXYvQ8snQES5ZLGKMwQ07xAwp/fiA= +github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= +github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo= +github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= +github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4= +github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= +github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw= +github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= +github.com/gocql/gocql v1.7.0 h1:O+7U7/1gSN7QTEAaMEsJc1Oq2QHXvCWoF3DFK9HDHus= +github.com/gocql/gocql v1.7.0/go.mod h1:vnlvXyFZeLBF0Wy+RS8hrOdbn0UWsWtdg07XJnFxZ+4= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/flock v0.13.0 h1:95JolYOvGMqeH31+FC7D2+uULf6mG61mEZ/A8dRYMzw= +github.com/gofrs/flock v0.13.0/go.mod h1:jxeyy9R1auM5S6JYDBhDt+E2TCo7DkratH4Pgi8P+Z0= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI= +github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.2.5 h1:DrW6hGnjIhtvhOIiAKT6Psh/Kd/ldepEa81DKeiRJ5I= +github.com/golang/glog v1.2.5/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.7.0-rc.1 h1:YojYx61/OLFsiv6Rw1Z96LpldJIy31o+UHmwAUMJ6/U= +github.com/golang/mock v1.7.0-rc.1/go.mod h1:s42URUywIqd+OcERslBJvOjepvNymP31m3q8d/GkuRs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/gomarkdown/markdown v0.0.0-20240729212818-a2a9c4f76ef5 h1:8QWUW69MXlNdZXnDnD9vEQ1BL8/mm1FTiSesKKHYivk= +github.com/gomarkdown/markdown v0.0.0-20240729212818-a2a9c4f76ef5/go.mod h1:JDGcbDT52eL4fju3sZ4TeHGsQwhG9nbDV21aMyhwPoA= +github.com/google/cel-go v0.26.1 h1:iPbVVEdkhTX++hpe3lzSk7D3G3QSYqLGoHOcEio+UXQ= +github.com/google/cel-go v0.26.1/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM= +github.com/google/flatbuffers v24.3.25+incompatible h1:CX395cjN9Kke9mmalRoL3d81AtFUxJM+yDthflgJGkI= +github.com/google/flatbuffers v24.3.25+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/generative-ai-go v0.20.1 h1:6dEIujpgN2V0PgLhr6c/M1ynRdc7ARtiIDPFzj45uNQ= +github.com/google/generative-ai-go v0.20.1/go.mod h1:TjOnZJmZKzarWbjUJgy+r3Ee7HGBRVLhOIgupnwR4Bg= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= +github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= +github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4= +github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= +github.com/googleapis/gax-go/v2 v2.14.2 h1:eBLnkZ9635krYIPD+ag1USrOAI0Nr0QYF3+/3GqO0k0= +github.com/googleapis/gax-go/v2 v2.14.2/go.mod h1:ON64QhlJkhVtSqp4v1uaK92VyZ2gmvDQsweuyLV+8+w= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= +github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gosimple/slug v1.15.0 h1:wRZHsRrRcs6b0XnxMUBM6WK1U1Vg5B0R7VkIf1Xzobo= +github.com/gosimple/slug v1.15.0/go.mod h1:UiRaFH+GEilHstLUmcBgWcI42viBN7mAb818JrYOeFQ= +github.com/gosimple/unidecode v1.0.1 h1:hZzFTMMqSswvf0LBJZCZgThIZrpDHFXux9KeGmn6T/o= +github.com/gosimple/unidecode v1.0.1/go.mod h1:CP0Cr1Y1kogOtx0bJblKzsVWrqYaqfNOnHzpgWw4Awc= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.2 h1:sGm2vDRFUrQJO/Veii4h4zG2vvqG6uWNkBHSTqXOZk0= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.2/go.mod h1:wd1YpapPLivG6nQgbf7ZkG1hhSOXDhhn4MLTknx2aAc= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 h1:X5VWvz21y3gzm9Nw/kaUeku/1+uBhcekkmy4IkffJww= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1/go.mod h1:Zanoh4+gvIgluNqcfMVTJueD4wSS5hT7zTt4Mrutd90= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= +github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= +github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/invopop/jsonschema v0.13.0 h1:KvpoAJWEjR3uD9Kbm2HWJmqsEaHt8lBUpd0qHcIi21E= +github.com/invopop/jsonschema v0.13.0/go.mod h1:ffZ5Km5SWWRAIN6wbDXItl95euhFz2uON45H2qjYt+0= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.7.6 h1:rWQc5FwZSPX58r1OQmkuaNicxdmExaEz5A2DO2hUuTk= +github.com/jackc/pgx/v5 v5.7.6/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= +github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= +github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= +github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ= +github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= +github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY= +github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= +github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= +github.com/jolestar/go-commons-pool/v2 v2.1.2 h1:E+XGo58F23t7HtZiC/W6jzO2Ux2IccSH/yx4nD+J1CM= +github.com/jolestar/go-commons-pool/v2 v2.1.2/go.mod h1:r4NYccrkS5UqP1YQI1COyTZ9UjPJAAGTUxzcsK1kqhY= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/jung-kurt/gofpdf v1.16.2 h1:jgbatWHfRlPYiK85qgevsZTHviWXKwB1TTiKdz5PtRc= +github.com/jung-kurt/gofpdf v1.16.2/go.mod h1:1hl7y57EsiPAkLbOwzpzqgx1A30nQCk/YmFV8S2vmK0= +github.com/kaptinlin/go-i18n v0.2.0 h1:8iwjAERQbCVF78c3HxC4MxUDxDRFvQVQlMDvlsO43hU= +github.com/kaptinlin/go-i18n v0.2.0/go.mod h1:gRHEMrTHtQLsAFwulPbJG71TwHjXxkagn88O8FI8FuA= +github.com/kaptinlin/jsonschema v0.5.1 h1:EAkrS6HpmAqb9zFD9nNpg1MGJzWzmsqrc6DPcwkaLwo= +github.com/kaptinlin/jsonschema v0.5.1/go.mod h1:HuWb90460GwFxRe0i9Ni3Z7YXwkjpqjeccWTB9gTZZE= +github.com/kaptinlin/messageformat-go v0.4.5 h1:Y1CTf38O6lKKXX/UZTwb2Xw7c6DPk7kjQEHPJW6qxTI= +github.com/kaptinlin/messageformat-go v0.4.5/go.mod h1:r0PH7FsxJX8jS/n6LAYZon5w3X+yfCLUrquqYd2H7ks= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= +github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= +github.com/klippa-app/go-pdfium v1.17.2 h1:vlaF4b+4Uw7GtpkVzysgfEy00/1v1nFgb7uO3HgaS60= +github.com/klippa-app/go-pdfium v1.17.2/go.mod h1:Esq2YX5JCdA+UHzMNPEmV62rqbgvIiNUj8s+EZfgHpM= +github.com/knadh/koanf/maps v0.1.2 h1:RBfmAW5CnZT+PJ1CVc1QSJKf4Xu9kxfQgYVQSu8hpbo= +github.com/knadh/koanf/maps v0.1.2/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= +github.com/knadh/koanf/providers/env/v2 v2.0.0 h1:Ad5H3eun722u+FvchiIcEIJZsZ2M6oxCkgZfWN5B5KY= +github.com/knadh/koanf/providers/env/v2 v2.0.0/go.mod h1:1g01PE+Ve1gBfWNNw2wmULRP0tc8RJrjn5p2N/jNCIc= +github.com/knadh/koanf/providers/structs v1.0.0 h1:DznjB7NQykhqCar2LvNug3MuxEQsZ5KvfgMbio+23u4= +github.com/knadh/koanf/providers/structs v1.0.0/go.mod h1:kjo5TFtgpaZORlpoJqcbeLowM2cINodv8kX+oFAeQ1w= +github.com/knadh/koanf/v2 v2.3.0 h1:Qg076dDRFHvqnKG97ZEsi9TAg2/nFTa9hCdcSa1lvlM= +github.com/knadh/koanf/v2 v2.3.0/go.mod h1:gRb40VRAbd4iJMYYD5IxZ6hfuopFcXBpc9bbQpZwo28= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/labstack/echo/v4 v4.13.4 h1:oTZZW+T3s9gAu5L8vmzihV7/lkXGZuITzTQkTEhcXEA= +github.com/labstack/echo/v4 v4.13.4/go.mod h1:g63b33BZ5vZzcIUF8AtRH40DrTlXnx4UMC8rBdndmjQ= +github.com/labstack/gommon v0.4.2 h1:F8qTUNXgG1+6WQmqoUWnz8WiEU60mXVVw0P4ht1WRA0= +github.com/labstack/gommon v0.4.2/go.mod h1:QlUFxVM+SNXhDL/Z7YhocGIBYOiwB0mXm1+1bAPHPyU= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= +github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= +github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/looplab/fsm v1.0.3 h1:qtxBsa2onOs0qFOtkqwf5zE0uP0+Te+wlIvXctPKpcw= +github.com/looplab/fsm v1.0.3/go.mod h1:PmD3fFvQEIsjMEfvZdrCDZ6y8VwKTwWNjlpEr6IKPO4= +github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= +github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= +github.com/lufia/plan9stats v0.0.0-20250317134145-8bc96cf8fc35 h1:PpXWgLPs+Fqr325bN2FD2ISlRRztXibcX6e8f5FR5Dc= +github.com/lufia/plan9stats v0.0.0-20250317134145-8bc96cf8fc35/go.mod h1:autxFIvghDt3jPTLoqZ9OZ7s9qTGNAWmYCjVFWPX/zg= +github.com/magiconair/properties v1.8.10 h1:s31yESBquKXCV9a/ScB3ESkOjUYYv+X0rg8SYxI99mE= +github.com/magiconair/properties v1.8.10/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= +github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= +github.com/mark3labs/mcp-go v0.41.1 h1:w78eWfiQam2i8ICL7AL0WFiq7KHNJQ6UB53ZVtH4KGA= +github.com/mark3labs/mcp-go v0.41.1/go.mod h1:T7tUa2jO6MavG+3P25Oy/jR7iCeJPHImCZHRymCn39g= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4= +github.com/mattn/go-localereader v0.0.1/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88= +github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= +github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mfridman/interpolate v0.0.2 h1:pnuTK7MQIxxFz1Gr+rjSIx9u7qVjf5VOoM/u6BbAxPY= +github.com/mfridman/interpolate v0.0.2/go.mod h1:p+7uk6oE07mpE/Ik1b8EckO0O4ZXiGAfshKBWLUM9Xg= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/go-archive v0.1.0 h1:Kk/5rdW/g+H8NHdJW2gsXyZ7UnzvJNOy6VKJqueWdcQ= +github.com/moby/go-archive v0.1.0/go.mod h1:G9B+YoujNohJmrIYFBpSd54GTUB4lt9S+xVQvsJyFuo= +github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= +github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= +github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= +github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= +github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs= +github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= +github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= +github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= +github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= +github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 h1:ZK8zHtRHOkbHy6Mmr5D264iyp3TiX5OmNcI5cIARiQI= +github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6/go.mod h1:CJlz5H+gyd6CUWT45Oy4q24RdLyn7Md9Vj2/ldJBSIo= +github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELUXHmA= +github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo= +github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc= +github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4= +github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= +github.com/nexus-rpc/sdk-go v0.3.0 h1:Y3B0kLYbMhd4C2u00kcYajvmOrfozEtTV/nHSnV57jA= +github.com/nexus-rpc/sdk-go v0.3.0/go.mod h1:TpfkM2Cw0Rlk9drGkoiSMpFqflKTiQLWUNyKJjF8mKQ= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nlpodyssey/cybertron v0.2.1 h1:zBvzmjP6Teq3u8yiHuLoUPxan6ZDRq/32GpV6Ep8X08= +github.com/nlpodyssey/cybertron v0.2.1/go.mod h1:Vg9PeB8EkOTAgSKQ68B3hhKUGmB6Vs734dBdCyE4SVM= +github.com/nlpodyssey/gopickle v0.3.0 h1:BLUE5gxFLyyNOPzlXxt6GoHEMMxD0qhsE4p0CIQyoLw= +github.com/nlpodyssey/gopickle v0.3.0/go.mod h1:f070HJ/yR+eLi5WmM1OXJEGaTpuJEUiib19olXgYha0= +github.com/nlpodyssey/gotokenizers v0.2.0 h1:CWx/sp9s35XMO5lT1kNXCshFGDCfPuuWdx/9JiQBsVc= +github.com/nlpodyssey/gotokenizers v0.2.0/go.mod h1:SBLbuSQhpni9M7U+Ie6O46TXYN73T2Cuw/4eeYHYJ+s= +github.com/nlpodyssey/spago v1.1.0 h1:DGUdGfeGR7TxwkYRdSEzbSvunVWN5heNSksmERmj97w= +github.com/nlpodyssey/spago v1.1.0/go.mod h1:jDWGZwrB4B61U6Tf3/+MVlWOtNsk3EUA7G13UDHlnjQ= +github.com/olivere/elastic/v7 v7.0.32 h1:R7CXvbu8Eq+WlsLgxmKVKPox0oOwAE/2T9Si5BnvK6E= +github.com/olivere/elastic/v7 v7.0.32/go.mod h1:c7PVmLe3Fxq77PIfY/bZmxY/TAamBhCzZ8xDOE09a9k= +github.com/ollama/ollama v0.12.4 h1:VfqVk8qSxREJar0z0EQAU2/h9qi/cqAMIKzo+nT+faI= +github.com/ollama/ollama v0.12.4/go.mod h1:9+1//yWPsDE2u+l1a5mpaKrYw4VdnSsRU3ioq5BvMms= +github.com/onsi/ginkgo/v2 v2.25.3 h1:Ty8+Yi/ayDAGtk4XxmmfUy4GabvM+MegeB4cDLRi6nw= +github.com/onsi/ginkgo/v2 v2.25.3/go.mod h1:43uiyQC4Ed2tkOzLsEYm7hnrb7UJTWHYNsuy3bG/snE= +github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= +github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= +github.com/open-and-sustainable/alembica v0.0.8 h1:uw5a/Nwn6S1ZAndMEnUEIUSfcpJyHwdWynOBDA9atXY= +github.com/open-and-sustainable/alembica v0.0.8/go.mod h1:Bd2lD/I8bRAz7YrxKi0KLwbeNZeBIRv/cUV3G8lvLoQ= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/otiai10/copy v1.14.1 h1:5/7E6qsUMBaH5AnQ0sSLzzTg1oTECmcCmT6lvF45Na8= +github.com/otiai10/copy v1.14.1/go.mod h1:oQwrEDDOci3IM8dJF0d8+jnbfPDllW6vUjNc3DoZm9I= +github.com/otiai10/mint v1.6.3 h1:87qsV/aw1F5as1eH1zS/yqHY85ANKVMgkDrf9rcxbQs= +github.com/otiai10/mint v1.6.3/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM= +github.com/pashagolub/pgxmock/v4 v4.9.0 h1:itlO8nrVRnzkdMBXLs8pWUyyB2PC3Gku0WGIj/gGl7I= +github.com/pashagolub/pgxmock/v4 v4.9.0/go.mod h1:9L57pC193h2aKRHVyiiE817avasIPZnPwPlw3JczWvM= +github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw= +github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= +github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= +github.com/pgvector/pgvector-go v0.3.0 h1:Ij+Yt78R//uYqs3Zk35evZFvr+G0blW0OUN+Q2D1RWc= +github.com/pgvector/pgvector-go v0.3.0/go.mod h1:duFy+PXWfW7QQd5ibqutBO4GxLsUZ9RVXhFZGIBsWSA= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkoukk/tiktoken-go v0.1.8 h1:85ENo+3FpWgAACBaEUVp+lctuTcYUO7BtmfhlN/QTRo= +github.com/pkoukk/tiktoken-go v0.1.8/go.mod h1:9NiV+i9mJKGj1rYOT+njbv+ZwA/zJxYdewGl6qVatpg= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/prashantv/protectmem v0.0.0-20171002184600-e20412882b3a h1:AA9vgIBDjMHPC2McaGPojgV2dcI78ZC0TLNhYCXEKH8= +github.com/prashantv/protectmem v0.0.0-20171002184600-e20412882b3a/go.mod h1:lzZQ3Noex5pfAy7mkAeCjcBDteYU85uWWnJ/y6gKU8k= +github.com/pressly/goose/v3 v3.26.0 h1:KJakav68jdH0WDvoAcj8+n61WqOIaPGgH0bJWS6jpmM= +github.com/pressly/goose/v3 v3.26.0/go.mod h1:4hC1KrritdCxtuFsqgs1R4AU5bWtTAf+cnWvfhf2DNY= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v1.20.99 h1:vZEybF3CT0t6L0UjsOtHRML7vuIglHocmvJMMH/se4M= +github.com/prometheus/common v1.20.99/go.mod h1:VX44Tebe4qpuTK+MQWg25h4fJGKBqzObSdxuB7y8K/Y= +github.com/prometheus/otlptranslator v0.0.2 h1:+1CdeLVrRQ6Psmhnobldo0kTp96Rj80DRXRd5OSnMEQ= +github.com/prometheus/otlptranslator v0.0.2/go.mod h1:P8AwMgdD7XEr6QRUJ2QWLpiAZTgTE2UYgjlu3svompI= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= +github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= +github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI= +github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg= +github.com/quic-go/quic-go v0.54.1 h1:4ZAWm0AhCb6+hE+l5Q1NAL0iRn/ZrMwqHRGQiFwj2eg= +github.com/quic-go/quic-go v0.54.1/go.mod h1:e68ZEaCdyviluZmy44P6Iey98v/Wfz6HCjQEm+l8zTY= +github.com/rcrowley/go-metrics v0.0.0-20141108142129-dee209f2455f/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/redis/go-redis/v9 v9.14.0 h1:u4tNCjXOyzfgeLN+vAZaW1xUooqWDqVEsZN0U01jfAE= +github.com/redis/go-redis/v9 v9.14.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ= +github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= +github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/romdo/go-debounce v0.1.0 h1:x5cEkVabfsX+9Orkc6v6sktozqGYRqjvCmAg8P1CgWI= +github.com/romdo/go-debounce v0.1.0/go.mod h1:RnqrKSJ6D0uuawNUYoc2R6RjAh0/MhNkArOhVjc0btg= +github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/zerolog v1.31.0 h1:FcTR3NnLWW+NnTwwhFWiJSZr4ECLpqCm6QsEnyvbV4A= +github.com/rs/zerolog v1.31.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/samuel/go-thrift v0.0.0-20190219015601-e8b6b52668fe/go.mod h1:Vrkh1pnjV9Bl8c3P9zH0/D4NlOHWP5d4/hF4YTULaec= +github.com/sashabaranov/go-openai v1.40.1 h1:bJ08Iwct5mHBVkuvG6FEcb9MDTfsXdTYPGjYLRdeTEU= +github.com/sashabaranov/go-openai v1.40.1/go.mod h1:lj5b/K+zjTSFxVLijLSTDZuP7adOgerWeFyZLUhAKRg= +github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c= +github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE= +github.com/sethvargo/go-retry v0.3.0 h1:EEt31A35QhrcRZtrYFDTBg91cqZVnFL2navjDrah2SE= +github.com/sethvargo/go-retry v0.3.0/go.mod h1:mNX17F0C/HguQMyMyJxcnU471gOZGxCLyYaFyAZraas= +github.com/shirou/gopsutil/v4 v4.25.6 h1:kLysI2JsKorfaFPcYmcJqbzROzsBWEOAtw6A7dIfqXs= +github.com/shirou/gopsutil/v4 v4.25.6/go.mod h1:PfybzyydfZcN+JMMjkF6Zb8Mq1A/VcogFFg7hj50W9c= +github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= +github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= +github.com/sirupsen/logrus v1.0.2-0.20170726183946-abee6f9b0679/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/slok/goresilience v0.2.0 h1:dagdIiWlhTm7BK/r/LRKz+zvw0SCNk+nHf7obdsbzxQ= +github.com/slok/goresilience v0.2.0/go.mod h1:L6IqqHlxWGTrTyq8WwF8kUY8kOIESZAMWr1xkV0zdZA= +github.com/sony/gobreaker v1.0.0 h1:feX5fGGXSl3dYd4aHZItw+FpHLvvoaqkawKjVNiFMNQ= +github.com/sony/gobreaker v1.0.0/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= +github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= +github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= +github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE= +github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g= +github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= +github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/temporalio/ringpop-go v0.0.0-20250130211428-b97329e994f7 h1:lEebX/hZss+TSH3EBwhztnBavJVj7pWGJOH8UgKHS0w= +github.com/temporalio/ringpop-go v0.0.0-20250130211428-b97329e994f7/go.mod h1:RE+CHmY+kOZQk47AQaVzwrGmxpflnLgTd6EOK0853j4= +github.com/temporalio/sqlparser v0.0.0-20231115171017-f4060bcfa6cb h1:YzHH/U/dN7vMP+glybzcXRTczTrgfdRisNTzAj7La04= +github.com/temporalio/sqlparser v0.0.0-20231115171017-f4060bcfa6cb/go.mod h1:143qKdh3G45IgV9p+gbAwp3ikRDI8mxsijFiXDfuxsw= +github.com/temporalio/tchannel-go v1.22.1-0.20220818200552-1be8d8cffa5b/go.mod h1:c+V9Z/ZgkzAdyGvHrvC5AsXgN+M9Qwey04cBdKYzV7U= +github.com/temporalio/tchannel-go v1.22.1-0.20240528171429-1db37fdea938 h1:sEJGhmDo+0FaPWM6f0v8Tjia0H5pR6/Baj6+kS78B+M= +github.com/temporalio/tchannel-go v1.22.1-0.20240528171429-1db37fdea938/go.mod h1:ezRQRwu9KQXy8Wuuv1aaFFxoCNz5CeNbVOOkh3xctbY= +github.com/temporalio/ui-server/v2 v2.41.0 h1:m9F2jnFJy/dWxjk9d2oS+825r78EX4gUlipoQ1xNO6Y= +github.com/temporalio/ui-server/v2 v2.41.0/go.mod h1:ofEKGV5/NaPbjdmEQRcUDFE6nZPprOemNjLJYLF9IX4= +github.com/testcontainers/testcontainers-go v0.39.0 h1:uCUJ5tA+fcxbFAB0uP3pIK3EJ2IjjDUHFSZ1H1UxAts= +github.com/testcontainers/testcontainers-go v0.39.0/go.mod h1:qmHpkG7H5uPf/EvOORKvS6EuDkBUPE3zpVGaH9NL7f8= +github.com/testcontainers/testcontainers-go/modules/postgres v0.39.0 h1:REJz+XwNpGC/dCgTfYvM4SKqobNqDBfvhq74s2oHTUM= +github.com/testcontainers/testcontainers-go/modules/postgres v0.39.0/go.mod h1:4K2OhtHEeT+JSIFX4V8DkGKsyLa96Y2vLdd3xsxD5HE= +github.com/tetratelabs/wazero v1.9.0 h1:IcZ56OuxrtaEz8UYNRHBrUa9bYeX9oVY93KspZZBf/I= +github.com/tetratelabs/wazero v1.9.0/go.mod h1:TSbcXCfFP0L2FGkRPxHphadXPjo1T6W+CseNNY7EkjM= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tklauser/go-sysconf v0.3.15 h1:VE89k0criAymJ/Os65CSn1IXaol+1wrsFHEB8Ol49K4= +github.com/tklauser/go-sysconf v0.3.15/go.mod h1:Dmjwr6tYFIseJw7a3dRLJfsHAMXZ3nEnL/aZY+0IuI4= +github.com/tklauser/numcpus v0.10.0 h1:18njr6LDBk1zuna922MgdjQuJFjrdppsZG60sHGfjso= +github.com/tklauser/numcpus v0.10.0/go.mod h1:BiTKazU708GQTYF4mB+cmlpT2Is1gLk7XVuEeem8LsQ= +github.com/tmc/langchaingo v0.1.13 h1:rcpMWBIi2y3B90XxfE4Ao8dhCQPVDMaNPnN5cGB1CaA= +github.com/tmc/langchaingo v0.1.13/go.mod h1:vpQ5NOIhpzxDfTZK9B6tf2GM/MoaHewPWM5KXXGh7hg= +github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc h1:9lRDQMhESg+zvGYmW5DyG0UqvY96Bu5QYsTLvCHdrgo= +github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc/go.mod h1:bciPuU6GHm1iF1pBvUfxfsH0Wmnc2VbpgvbI9ZWuIRs= +github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= +github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= +github.com/twmb/murmur3 v1.1.8 h1:8Yt9taO/WN3l08xErzjeschgZU2QSrwm1kclYq+0aRg= +github.com/twmb/murmur3 v1.1.8/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ= +github.com/uber-common/bark v1.0.0/go.mod h1:g0ZuPcD7XiExKHynr93Q742G/sbrdVQkghrqLGOoFuY= +github.com/uber-common/bark v1.3.0 h1:DkuZCBaQS9LWuNAPrCO6yQVANckIX3QI0QwLemUnzCo= +github.com/uber-common/bark v1.3.0/go.mod h1:5fDe/YcIVP55XhFF9hUihX2lDsDcpFrTZEAwAVwtPDw= +github.com/uber-go/tally v3.3.15+incompatible/go.mod h1:YDTIBxdXyOU/sCWilKB4bgyufu1cEi0jdVnRdxvjnmU= +github.com/uber-go/tally/v4 v4.1.17 h1:C+U4BKtVDXTszuzU+WH8JVQvRVnaVKxzZrROFyDrvS8= +github.com/uber-go/tally/v4 v4.1.17/go.mod h1:ZdpiHRGSa3z4NIAc1VlEH4SiknR885fOIF08xmS0gaU= +github.com/uber/jaeger-client-go v2.22.1+incompatible h1:NHcubEkVbahf9t3p75TOCR83gdUHXjRJvjoBh1yACsM= +github.com/uber/jaeger-client-go v2.22.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg= +github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/ugorji/go/codec v1.3.0 h1:Qd2W2sQawAfG8XSvzwhBeoGq71zXOC/Q1E9y/wUcsUA= +github.com/ugorji/go/codec v1.3.0/go.mod h1:pRBVtBSKl77K30Bv8R2P+cLSGaTtex6fsA2Wjqmfxj4= +github.com/uptrace/bun v1.1.12 h1:sOjDVHxNTuM6dNGaba0wUuz7KvDE1BmNu9Gqs2gJSXQ= +github.com/uptrace/bun v1.1.12/go.mod h1:NPG6JGULBeQ9IU6yHp7YGELRa5Agmd7ATZdz4tGZ6z0= +github.com/uptrace/bun/dialect/pgdialect v1.1.12 h1:m/CM1UfOkoBTglGO5CUTKnIKKOApOYxkcP2qn0F9tJk= +github.com/uptrace/bun/dialect/pgdialect v1.1.12/go.mod h1:Ij6WIxQILxLlL2frUBxUBOZJtLElD2QQNDcu/PWDHTc= +github.com/uptrace/bun/driver/pgdriver v1.1.12 h1:3rRWB1GK0psTJrHwxzNfEij2MLibggiLdTqjTtfHc1w= +github.com/uptrace/bun/driver/pgdriver v1.1.12/go.mod h1:ssYUP+qwSEgeDDS1xm2XBip9el1y9Mi5mTAvLoiADLM= +github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo= +github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= +github.com/vmihailenco/bufpool v0.1.11 h1:gOq2WmBrq0i2yW5QJ16ykccQ4wH9UyEsgLm6czKAd94= +github.com/vmihailenco/bufpool v0.1.11/go.mod h1:AFf/MOy3l2CFTKbxwt0mp2MwnqjNEs5H/UxrkA5jxTQ= +github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU= +github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= +github.com/vmihailenco/tagparser v0.1.2 h1:gnjoVuB/kljJ5wICEEOpx98oXMWPLj22G67Vbd1qPqc= +github.com/vmihailenco/tagparser v0.1.2/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= +github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= +github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= +github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc= +github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= +github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= +github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no= +github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM= +github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4= +github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M= +github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= +github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= +github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM= +github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= +gitlab.com/golang-commonmark/html v0.0.0-20191124015941-a22733972181 h1:K+bMSIx9A7mLES1rtG+qKduLIXq40DAzYHtb0XuCukA= +gitlab.com/golang-commonmark/html v0.0.0-20191124015941-a22733972181/go.mod h1:dzYhVIwWCtzPAa4QP98wfB9+mzt33MSmM8wsKiMi2ow= +gitlab.com/golang-commonmark/linkify v0.0.0-20191026162114-a0c2df6c8f82 h1:oYrL81N608MLZhma3ruL8qTM4xcpYECGut8KSxRY59g= +gitlab.com/golang-commonmark/linkify v0.0.0-20191026162114-a0c2df6c8f82/go.mod h1:Gn+LZmCrhPECMD3SOKlE+BOHwhOYD9j7WT9NUtkCrC8= +gitlab.com/golang-commonmark/markdown v0.0.0-20211110145824-bf3e522c626a h1:O85GKETcmnCNAfv4Aym9tepU8OE0NmcZNqPlXcsBKBs= +gitlab.com/golang-commonmark/markdown v0.0.0-20211110145824-bf3e522c626a/go.mod h1:LaSIs30YPGs1H5jwGgPhLzc8vkNc/k0rDX/fEZqiU/M= +gitlab.com/golang-commonmark/mdurl v0.0.0-20191124015652-932350d1cb84 h1:qqjvoVXdWIcZCLPMlzgA7P9FZWdPGPvP/l3ef8GzV6o= +gitlab.com/golang-commonmark/mdurl v0.0.0-20191124015652-932350d1cb84/go.mod h1:IJZ+fdMvbW2qW6htJx7sLJ04FEs4Ldl/MDsJtMKywfw= +gitlab.com/golang-commonmark/puny v0.0.0-20191124015043-9f83538fa04f h1:Wku8eEdeJqIOFHtrfkYUByc4bCaTeA6fL0UJgfEiFMI= +gitlab.com/golang-commonmark/puny v0.0.0-20191124015043-9f83538fa04f/go.mod h1:Tiuhl+njh/JIg0uS/sOJVYi0x2HEa5rc1OAaVsb5tAs= +gitlab.com/opennota/wd v0.0.0-20180912061657-c5d65f63c638 h1:uPZaMiz6Sz0PZs3IZJWpU5qHKGNy///1pacZC9txiUI= +gitlab.com/opennota/wd v0.0.0-20180912061657-c5d65f63c638/go.mod h1:EGRJaqe2eO9XGmFtQCvV3Lm9NLico3UhFwUpCG/+mVU= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/collector/pdata v1.34.0 h1:2vwYftckXe7pWxI9mfSo+tw3wqdGNrYpMbDx/5q6rw8= +go.opentelemetry.io/collector/pdata v1.34.0/go.mod h1:StPHMFkhLBellRWrULq0DNjv4znCDJZP6La4UuC+JHI= +go.opentelemetry.io/contrib/detectors/gcp v1.36.0 h1:F7q2tNlCaHY9nMKHR6XH9/qkp8FktLnIcy6jJNyOCQw= +go.opentelemetry.io/contrib/detectors/gcp v1.36.0/go.mod h1:IbBN8uAIIx734PTonTPxAxnjc2pQTxWNkwfstZ+6H2k= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 h1:q4XOmH/0opmeuJtPsbFNivyl7bCt7yRBbeEm2sC/XtQ= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0/go.mod h1:snMWehoOh2wsEwnvvwtDyFCxVeDAODenXHtn5vzrKjo= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= +go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= +go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.34.0 h1:ajl4QczuJVA2TU9W9AGw++86Xga/RKt//16z/yxPgdk= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.34.0/go.mod h1:Vn3/rlOJ3ntf/Q3zAI0V5lDnTbHGaUsNUeF6nZmm7pA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 h1:tgJ0uaNS4c98WRNUEx5U3aDlrDOI5Rs+1Vifcw4DJ8U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0/go.mod h1:U7HYyW0zt/a9x5J1Kjs+r1f/d4ZHnYFclhYY2+YbeoE= +go.opentelemetry.io/otel/exporters/prometheus v0.60.0 h1:cGtQxGvZbnrWdC2GyjZi0PDKVSLWP/Jocix3QWfXtbo= +go.opentelemetry.io/otel/exporters/prometheus v0.60.0/go.mod h1:hkd1EekxNo69PTV4OWFGZcKQiIqg0RfuWExcPKFvepk= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.35.0 h1:PB3Zrjs1sG1GBX51SXyTSoOTqcDglmsk7nT6tkKPb/k= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.35.0/go.mod h1:U2R3XyVPzn0WX7wOIypPuptulsMcPDPs/oiSVOMVnHY= +go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= +go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= +go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= +go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= +go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= +go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= +go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= +go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= +go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4= +go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE= +go.starlark.net v0.0.0-20230302034142-4b1e35fe2254 h1:Ss6D3hLXTM0KobyBYEAygXzFfGcjnmfEJOBgSbemCtg= +go.starlark.net v0.0.0-20230302034142-4b1e35fe2254/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= +go.temporal.io/api v1.53.0 h1:6vAFpXaC584AIELa6pONV56MTpkm4Ha7gPWL2acNAjo= +go.temporal.io/api v1.53.0/go.mod h1:iaxoP/9OXMJcQkETTECfwYq4cw/bj4nwov8b3ZLVnXM= +go.temporal.io/sdk v1.37.0 h1:RbwCkUQuqY4rfCzdrDZF9lgT7QWG/pHlxfZFq0NPpDQ= +go.temporal.io/sdk v1.37.0/go.mod h1:tOy6vGonfAjrpCl6Bbw/8slTgQMiqvoyegRv2ZHPm5M= +go.temporal.io/server v1.29.0 h1:BGBCvI7vcPokCjuDsfitLx2eS+8ow+yQ4frLJZcn2nQ= +go.temporal.io/server v1.29.0/go.mod h1:pc0n6DRcN06V4WNhaxdxE3KaZIS3KSDNKdca6uu6RuU= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= +go.uber.org/dig v1.18.0 h1:imUL1UiY0Mg4bqbFfsRQO5G4CGRBec/ZujWTvSVp3pw= +go.uber.org/dig v1.18.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= +go.uber.org/fx v1.23.0 h1:lIr/gYWQGfTwGcSXWXu4vP5Ws6iqnNEIY+F/aFzCKTg= +go.uber.org/fx v1.23.0/go.mod h1:o/D9n+2mLP6v1EG+qsdT1O8wKopYAsqZasju97SDFCU= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU= +go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +golang.org/x/arch v0.20.0 h1:dx1zTU0MAE98U+TQ8BLl7XsJbgze2WnNKF/8tGp/Q6c= +golang.org/x/arch v0.20.0/go.mod h1:bdwinDaKcfZUGpH09BB7ZmOfhalA8lQdzl62l8gGWsk= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04= +golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b h1:M2rDM6z3Fhozi9O7NWsxAkg/yqS/lQJ6PmkyIV3YP+o= +golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.28.0 h1:gQBtGhjxykdjY9YhZpSlZIsbnaE2+PgjfLWUQTnoZ1U= +golang.org/x/mod v0.28.0/go.mod h1:yfB/L0NOf/kmEbXjzCPOx1iK1fRutOydrCMsqRhEBxI= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4= +golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY= +golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= +golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= +golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q= +golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= +golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= +golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= +golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE= +golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +google.golang.org/api v0.235.0 h1:C3MkpQSRxS1Jy6AkzTGKKrpSCOd2WOGrezZ+icKSkKo= +google.golang.org/api v0.235.0/go.mod h1:QpeJkemzkFKe5VCE/PMv7GsUfn9ZF+u+q1Q7w6ckxTg= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 h1:1tXaIXCracvtsRxSBsYDiSBN0cuJvM7QYW+MrpIRY78= +google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2/go.mod h1:49MsLSx0oWMOZqcpB3uL8ZOkAh1+TndpJ8ONoCBWiZk= +google.golang.org/genproto/googleapis/api v0.0.0-20250804133106-a7a43d27e69b h1:ULiyYQ0FdsJhwwZUwbaXpZF5yUE3h+RA+gxvBu37ucc= +google.golang.org/genproto/googleapis/api v0.0.0-20250804133106-a7a43d27e69b/go.mod h1:oDOGiMSXHL4sDTJvFvIB9nRQCGdLP1o/iVaqQK8zB+M= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250804133106-a7a43d27e69b h1:zPKJod4w6F1+nRGDI9ubnXYhU9NSWoFAijkHkUXeTK8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250804133106-a7a43d27e69b/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.76.0 h1:UnVkv1+uMLYXoIz6o7chp59WfQUYA2ex/BXQ9rHZu7A= +google.golang.org/grpc v1.76.0/go.mod h1:Ju12QI8M6iQJtbcsV+awF5a4hfJMLi4X0JLo94ULZ6c= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/validator.v2 v2.0.1 h1:xF0KWyGWXm/LM2G1TrEjqOu4pa6coO9AlWSf3msVfDY= +gopkg.in/validator.v2 v2.0.1/go.mod h1:lIUZBlB3Im4s/eYp39Ry/wkR02yOPhZ9IwIRBjuPuG8= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gorm.io/driver/postgres v1.5.4 h1:Iyrp9Meh3GmbSuyIAGyjkN+n9K+GHX9b9MqsTL4EJCo= +gorm.io/driver/postgres v1.5.4/go.mod h1:Bgo89+h0CRcdA33Y6frlaHHVuTdOf87pmyzwW9C/BH0= +gorm.io/gorm v1.25.5 h1:zR9lOiiYf09VNh5Q1gphfyia1JpiClIWG9hQaxB/mls= +gorm.io/gorm v1.25.5/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +mellium.im/sasl v0.3.1 h1:wE0LW6g7U83vhvxjC1IY8DnXM+EU095yeo8XClvCdfo= +mellium.im/sasl v0.3.1/go.mod h1:xm59PUYpZHhgQ9ZqoJ5QaCqzWMi8IeS49dhp6plPCzw= +modernc.org/cc/v4 v4.26.2 h1:991HMkLjJzYBIfha6ECZdjrIYz2/1ayr+FL8GN+CNzM= +modernc.org/cc/v4 v4.26.2/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0= +modernc.org/ccgo/v4 v4.28.0 h1:rjznn6WWehKq7dG4JtLRKxb52Ecv8OUGah8+Z/SfpNU= +modernc.org/ccgo/v4 v4.28.0/go.mod h1:JygV3+9AV6SmPhDasu4JgquwU81XAKLd3OKTUDNOiKE= +modernc.org/fileutil v1.3.8 h1:qtzNm7ED75pd1C7WgAGcK4edm4fvhtBsEiI/0NQ54YM= +modernc.org/fileutil v1.3.8/go.mod h1:HxmghZSZVAz/LXcMNwZPA/DRrQZEVP9VX0V4LQGQFOc= +modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI= +modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito= +modernc.org/goabi0 v0.2.0 h1:HvEowk7LxcPd0eq6mVOAEMai46V+i7Jrj13t4AzuNks= +modernc.org/goabi0 v0.2.0/go.mod h1:CEFRnnJhKvWT1c1JTI3Avm+tgOWbkOu5oPA8eH8LnMI= +modernc.org/libc v1.66.3 h1:cfCbjTUcdsKyyZZfEUKfoHcP3S0Wkvz3jgSzByEWVCQ= +modernc.org/libc v1.66.3/go.mod h1:XD9zO8kt59cANKvHPXpx7yS2ELPheAey0vjIuZOhOU8= +modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU= +modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg= +modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI= +modernc.org/memory v1.11.0/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw= +modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8= +modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns= +modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w= +modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE= +modernc.org/sqlite v1.38.2 h1:Aclu7+tgjgcQVShZqim41Bbw9Cho0y/7WzYptXqkEek= +modernc.org/sqlite v1.38.2/go.mod h1:cPTJYSlgg3Sfg046yBShXENNtPrWrDX8bsbAQBzgQ5E= +modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0= +modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A= +modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= +modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/sdk/internal/codegen/cmd/optionsgen/main.go b/sdk/internal/codegen/cmd/optionsgen/main.go new file mode 100644 index 00000000..574bf95e --- /dev/null +++ b/sdk/internal/codegen/cmd/optionsgen/main.go @@ -0,0 +1,59 @@ +package main + +import ( + "flag" + "fmt" + "os" + + "github.com/compozy/compozy/sdk/v2/internal/codegen" +) + +func main() { + var ( + engineFile = flag.String("engine", "", "Path to engine struct file (e.g., ../../../../engine/agent/config.go)") + structName = flag.String("struct", "Config", "Name of struct to generate options for") + output = flag.String("output", "", "Output file path (e.g., ../../agent/options_generated.go)") + pkgName = flag.String( + "package", + "", + "Package name for generated code (optional, defaults to engine package name)", + ) + ) + flag.Parse() + if *engineFile == "" || *output == "" { + fmt.Fprintf(os.Stderr, "Usage: optionsgen -engine -struct -output [-package ]\n\n") + fmt.Fprintf(os.Stderr, "Example:\n") + fmt.Fprintf(os.Stderr, " optionsgen -engine ../../../../engine/agent/config.go ") + fmt.Fprintf(os.Stderr, "-struct Config -output ../../agent/options_generated.go\n\n") + flag.Usage() + os.Exit(1) + } + info, err := codegen.ParseStruct(*engineFile, *structName) + if err != nil { + fmt.Fprintf(os.Stderr, "❌ Failed to parse struct: %v\n", err) + os.Exit(1) + } + fmt.Printf("✅ Discovered %d fields from %s.%s\n", len(info.Fields), info.PackageName, info.StructName) + for _, field := range info.Fields { + typeInfo := field.Type + if field.IsSlice { + typeInfo = "[]" + field.ValueType + } + if field.IsMap { + typeInfo = fmt.Sprintf("map[%s]%s", field.KeyType, field.ValueType) + } + if field.IsPtr { + typeInfo = "*" + typeInfo + } + fmt.Printf(" - %s: %s\n", field.Name, typeInfo) + } + targetPkg := info.PackageName + if *pkgName != "" { + targetPkg = *pkgName + } + if err := codegen.GenerateOptions(info, *output, targetPkg); err != nil { + fmt.Fprintf(os.Stderr, "❌ Failed to generate options: %v\n", err) + os.Exit(1) + } + fmt.Printf("\n✅ Generated %s with %d option functions\n", *output, len(info.Fields)) +} diff --git a/sdk/internal/codegen/generator.go b/sdk/internal/codegen/generator.go new file mode 100644 index 00000000..73981b86 --- /dev/null +++ b/sdk/internal/codegen/generator.go @@ -0,0 +1,168 @@ +package codegen + +import ( + "fmt" + "strings" + + "github.com/dave/jennifer/jen" +) + +const engineBaseImport = "github.com/compozy/compozy/engine" + +// GenerateOptions creates functional options for all discovered fields +func GenerateOptions(info *StructInfo, outputPath string, targetPackage string) error { + f := jen.NewFile(targetPackage) + f.HeaderComment("Code generated by optionsgen. DO NOT EDIT.") + f.Line() + enginePkg := fmt.Sprintf("%s/%s", engineBaseImport, info.PackageName) + f.Type().Id("Option").Func().Params( + jen.Op("*").Qual(enginePkg, info.StructName), + ) + f.Line() + for i := range info.Fields { + if shouldSkipField(&info.Fields[i]) { + continue + } + generateOptionFunc(f, info, &info.Fields[i], enginePkg) + } + return f.Save(outputPath) +} + +func shouldSkipField(field *FieldInfo) bool { + unexported := []string{"filePath", "CWD"} + for _, skip := range unexported { + if field.Name == skip { + return true + } + } + return false +} + +func generateOptionFunc(f *jen.File, info *StructInfo, field *FieldInfo, enginePkg string) { + funcName := "With" + field.Name + paramName := toParamName(field.Name) + if field.Comment != "" { + commentLines := strings.Split(field.Comment, "\n") + f.Comment(fmt.Sprintf("%s sets the %s field", funcName, field.Name)) + if len(commentLines) > 0 && commentLines[0] != "" { + f.Comment("") + for _, line := range commentLines { + if line = strings.TrimSpace(line); line != "" { + f.Comment(line) + } + } + } + } else { + f.Comment(fmt.Sprintf("%s sets the %s field", funcName, field.Name)) + } + paramType := buildParameterType(field, info.PackageName) + f.Func().Id(funcName).Params( + jen.Id(paramName).Add(paramType), + ).Id("Option").Block( + jen.Return(jen.Func().Params( + jen.Id("cfg").Op("*").Qual(enginePkg, info.StructName), + ).Block( + jen.Id("cfg").Dot(field.Name).Op("=").Id(paramName), + )), + ) + f.Line() +} + +func buildParameterType(field *FieldInfo, currentPkg string) *jen.Statement { + var stmt *jen.Statement + switch { + case field.IsMap: + keyType := buildSimpleType(field.KeyType, field.PackagePath, currentPkg) + valueType := buildSimpleType(field.ValueType, field.PackagePath, currentPkg) + stmt = jen.Map(keyType).Add(valueType) + case field.IsSlice: + elemType := buildSimpleType(field.ValueType, field.PackagePath, currentPkg) + stmt = jen.Index().Add(elemType) + default: + stmt = buildSimpleType(field.Type, field.PackagePath, currentPkg) + } + if field.IsPtr && !field.IsSlice { + stmt = jen.Op("*").Add(stmt) + } + return stmt +} + +func buildSimpleType(typeName string, packagePath string, currentPkg string) *jen.Statement { + if strings.HasPrefix(typeName, "*") { + inner := buildSimpleType(strings.TrimPrefix(typeName, "*"), packagePath, currentPkg) + return jen.Op("*").Add(inner) + } + if strings.HasPrefix(typeName, "[]") { + inner := buildSimpleType(strings.TrimPrefix(typeName, "[]"), packagePath, currentPkg) + return jen.Index().Add(inner) + } + if isBuiltinType(typeName) { + return jen.Id(typeName) + } + if strings.Contains(typeName, ".") { + parts := strings.Split(typeName, ".") + pkgAlias := parts[0] + typeIdent := parts[len(parts)-1] + importPath := packagePath + if importPath == "" { + importPath = fmt.Sprintf("%s/%s", engineBaseImport, pkgAlias) + } + return jen.Qual(importPath, typeIdent) + } + if packagePath == "" || packagePath == currentPkg { + enginePkg := fmt.Sprintf("%s/%s", engineBaseImport, currentPkg) + return jen.Qual(enginePkg, typeName) + } + enginePkg := fmt.Sprintf("%s/%s", engineBaseImport, packagePath) + return jen.Qual(enginePkg, typeName) +} + +func isBuiltinType(typeName string) bool { + builtins := map[string]bool{ + "string": true, "int": true, "int8": true, "int16": true, "int32": true, "int64": true, + "uint": true, "uint8": true, "uint16": true, "uint32": true, "uint64": true, + "float32": true, "float64": true, "bool": true, "byte": true, "rune": true, + "error": true, "interface{}": true, "any": true, + } + return builtins[typeName] +} + +func toParamName(fieldName string) string { + if fieldName == "" { + return fieldName + } + if len(fieldName) == 1 { + return strings.ToLower(fieldName) + } + if fieldName == "ID" { + return "id" + } + if len(fieldName) > 1 && isAllCaps(fieldName) { + return strings.ToLower(fieldName) + } + paramName := strings.ToLower(fieldName[:1]) + fieldName[1:] + if isGoKeyword(paramName) { + return paramName + "Value" + } + return paramName +} + +func isGoKeyword(name string) bool { + keywords := map[string]bool{ + "break": true, "case": true, "chan": true, "const": true, "continue": true, + "default": true, "defer": true, "else": true, "fallthrough": true, "for": true, + "func": true, "go": true, "goto": true, "if": true, "import": true, + "interface": true, "map": true, "package": true, "range": true, "return": true, + "select": true, "struct": true, "switch": true, "type": true, "var": true, + } + return keywords[name] +} + +func isAllCaps(s string) bool { + for _, r := range s { + if r >= 'a' && r <= 'z' { + return false + } + } + return true +} diff --git a/sdk/internal/codegen/parser.go b/sdk/internal/codegen/parser.go new file mode 100644 index 00000000..d6b27ec5 --- /dev/null +++ b/sdk/internal/codegen/parser.go @@ -0,0 +1,203 @@ +package codegen + +import ( + "fmt" + "go/ast" + "go/parser" + "go/token" + "strings" +) + +// ParseStruct automatically discovers all fields from a struct definition +func ParseStruct(filePath string, structName string) (*StructInfo, error) { + fset := token.NewFileSet() + node, err := parser.ParseFile(fset, filePath, nil, parser.ParseComments) + if err != nil { + return nil, fmt.Errorf("failed to parse file %s: %w", filePath, err) + } + info := &StructInfo{ + PackageName: node.Name.Name, + StructName: structName, + Fields: make([]FieldInfo, 0), + } + handler := func(field *ast.Field) error { + if len(field.Names) == 0 { + embeddedType := getEmbeddedTypeName(field.Type) + if embeddedType == "" { + return nil + } + embeddedFields, embeddedErr := parseEmbeddedStruct(fset, filePath, embeddedType) + if embeddedErr != nil { + return embeddedErr + } + info.Fields = append(info.Fields, embeddedFields...) + return nil + } + fieldName := field.Names[0].Name + if !ast.IsExported(fieldName) { + return nil + } + fieldInfo := analyzeFieldType(field) + fieldInfo.Name = fieldName + if field.Doc != nil { + fieldInfo.Comment = strings.TrimSpace(field.Doc.Text()) + } + info.Fields = append(info.Fields, fieldInfo) + return nil + } + found, walkErr := walkStructFields(node, structName, handler) + if walkErr != nil { + return nil, walkErr + } + if !found { + return nil, fmt.Errorf("struct %s not found in %s", structName, filePath) + } + return info, nil +} + +func walkStructFields(node ast.Node, structName string, fn func(*ast.Field) error) (bool, error) { + found := false + var walkErr error + ast.Inspect(node, func(n ast.Node) bool { + if walkErr != nil { + return false + } + typeSpec, ok := n.(*ast.TypeSpec) + if !ok || typeSpec.Name.Name != structName { + return true + } + found = true + structType, ok := typeSpec.Type.(*ast.StructType) + if !ok { + return true + } + for _, field := range structType.Fields.List { + if err := fn(field); err != nil { + walkErr = err + return false + } + } + return false + }) + return found, walkErr +} + +func analyzeFieldType(field *ast.Field) FieldInfo { + return analyzeType(field.Type) +} + +func analyzeType(expr ast.Expr) FieldInfo { + switch t := expr.(type) { + case *ast.Ident: + return FieldInfo{Type: t.Name} + case *ast.SelectorExpr: + info := FieldInfo{Type: exprToString(t)} + if x, ok := t.X.(*ast.Ident); ok { + info.PackagePath = x.Name + } + return info + case *ast.StarExpr: + inner := analyzeType(t.X) + return FieldInfo{ + Type: inner.Type, + IsPtr: true, + IsSlice: inner.IsSlice, + IsMap: inner.IsMap, + KeyType: inner.KeyType, + ValueType: inner.ValueType, + PackagePath: inner.PackagePath, + } + case *ast.ArrayType: + inner := analyzeType(t.Elt) + valueType := inner.Type + if inner.IsPtr { + valueType = "*" + inner.Type + } + return FieldInfo{ + Type: inner.Type, + IsSlice: true, + IsMap: inner.IsMap, + KeyType: inner.KeyType, + ValueType: valueType, + PackagePath: inner.PackagePath, + } + case *ast.MapType: + keyInfo := analyzeType(t.Key) + valueInfo := analyzeType(t.Value) + valueType := valueInfo.Type + if valueInfo.IsPtr { + valueType = "*" + valueInfo.Type + } + return FieldInfo{ + Type: fmt.Sprintf("map[%s]%s", keyInfo.Type, valueType), + IsMap: true, + KeyType: keyInfo.Type, + ValueType: valueType, + } + default: + return FieldInfo{Type: exprToString(expr)} + } +} + +func exprToString(expr ast.Expr) string { + switch t := expr.(type) { + case *ast.Ident: + return t.Name + case *ast.SelectorExpr: + return exprToString(t.X) + "." + t.Sel.Name + case *ast.StarExpr: + return "*" + exprToString(t.X) + case *ast.ArrayType: + return "[]" + exprToString(t.Elt) + case *ast.MapType: + return "map[" + exprToString(t.Key) + "]" + exprToString(t.Value) + default: + return "interface{}" + } +} + +func getEmbeddedTypeName(expr ast.Expr) string { + switch t := expr.(type) { + case *ast.Ident: + return t.Name + case *ast.SelectorExpr: + return "" + default: + return "" + } +} + +func parseEmbeddedStruct(fset *token.FileSet, filePath string, typeName string) ([]FieldInfo, error) { + node, err := parser.ParseFile(fset, filePath, nil, parser.ParseComments) + if err != nil { + return nil, fmt.Errorf("parse embedded struct %s in %s: %w", typeName, filePath, err) + } + var fields []FieldInfo + ast.Inspect(node, func(n ast.Node) bool { + typeSpec, ok := n.(*ast.TypeSpec) + if !ok || typeSpec.Name.Name != typeName { + return true + } + structType, ok := typeSpec.Type.(*ast.StructType) + if !ok { + return true + } + for _, field := range structType.Fields.List { + if len(field.Names) == 0 { + continue + } + fieldName := field.Names[0].Name + if !ast.IsExported(fieldName) { + continue + } + fieldInfo := analyzeFieldType(field) + fieldInfo.Name = fieldName + if field.Doc != nil { + fieldInfo.Comment = strings.TrimSpace(field.Doc.Text()) + } + fields = append(fields, fieldInfo) + } + return false + }) + return fields, nil +} diff --git a/sdk/internal/codegen/types.go b/sdk/internal/codegen/types.go new file mode 100644 index 00000000..2baa7f2f --- /dev/null +++ b/sdk/internal/codegen/types.go @@ -0,0 +1,23 @@ +// Package codegen provides tools for automatically generating functional options +// from engine struct definitions using Go's AST parser and Jennifer code generator. +package codegen + +// StructInfo holds metadata about a discovered struct +type StructInfo struct { + PackageName string + StructName string + Fields []FieldInfo +} + +// FieldInfo represents a single struct field with type information +type FieldInfo struct { + Name string + Type string + IsSlice bool + IsPtr bool + IsMap bool + KeyType string // For map types + ValueType string // For map types or slice element types + PackagePath string // Full import path if from another package + Comment string // Documentation comment +} diff --git a/sdk/internal/errors/build_error.go b/sdk/internal/errors/build_error.go new file mode 100644 index 00000000..9c9a3407 --- /dev/null +++ b/sdk/internal/errors/build_error.go @@ -0,0 +1,102 @@ +package errors + +import ( + "errors" + "fmt" + "strings" +) + +// BuildError aggregates configuration errors gathered during fluent builder +// invocations and reports them in a single return value from Build calls. +type BuildError struct { + Errors []error +} + +// Error renders the aggregated error list using a concise, developer-friendly +// message so callers understand why a build failed. +func (e *BuildError) Error() string { + if e == nil { + return "build failed" + } + + errs := e.nonNilErrors() + if len(errs) == 0 { + return "build failed" + } + + if len(errs) == 1 { + return fmt.Sprintf("build failed: %v", errs[0]) + } + + var builder strings.Builder + builder.WriteString(fmt.Sprintf("build failed with %d errors:\n", len(errs))) + for idx, err := range errs { + builder.WriteString(fmt.Sprintf(" %d. %v", idx+1, err)) + if idx < len(errs)-1 { + builder.WriteByte('\n') + } + } + + return builder.String() +} + +// Unwrap exposes the first aggregated error so standard helpers can unwrap the +// chain while preserving compatibility with errors.Is and errors.As. +func (e *BuildError) Unwrap() error { + if e == nil { + return nil + } + + errs := e.nonNilErrors() + if len(errs) == 0 { + return nil + } + + return errs[0] +} + +// Is allows errors.Is to match against any aggregated error. +func (e *BuildError) Is(target error) bool { + if e == nil { + return false + } + + for _, err := range e.nonNilErrors() { + if errors.Is(err, target) { + return true + } + } + + return false +} + +// As allows errors.As to project any aggregated error into the provided +// target. +func (e *BuildError) As(target any) bool { + if e == nil { + return false + } + + for _, err := range e.nonNilErrors() { + if errors.As(err, target) { + return true + } + } + + return false +} + +func (e *BuildError) nonNilErrors() []error { + if e == nil { + return nil + } + + filtered := make([]error, 0, len(e.Errors)) + for _, err := range e.Errors { + if err != nil { + filtered = append(filtered, err) + } + } + + return filtered +} diff --git a/sdk/internal/errors/build_error_test.go b/sdk/internal/errors/build_error_test.go new file mode 100644 index 00000000..080107e8 --- /dev/null +++ b/sdk/internal/errors/build_error_test.go @@ -0,0 +1,194 @@ +package errors + +import ( + stderrors "errors" + "fmt" + "testing" +) + +type typedError struct { + code string +} + +func (e *typedError) Error() string { + return fmt.Sprintf("error code: %s", e.code) +} + +func TestBuildErrorErrorSingle(t *testing.T) { + target := stderrors.New("missing workflow id") + buildErr := &BuildError{Errors: []error{target}} + + got := buildErr.Error() + want := "build failed: missing workflow id" + + if got != want { + t.Fatalf("unexpected error message\nwant: %q\n got: %q", want, got) + } +} + +func TestBuildErrorErrorMultiple(t *testing.T) { + buildErr := &BuildError{Errors: []error{ + fmt.Errorf("missing workflow id"), + fmt.Errorf("agent must define at least one action"), + fmt.Errorf("runtime is required"), + }} + + got := buildErr.Error() + want := "build failed with 3 errors:\n 1. missing workflow id\n 2. agent must define at least one action\n 3. runtime is required" + + if got != want { + t.Fatalf("unexpected error message\nwant: %q\n got: %q", want, got) + } +} + +func TestBuildErrorErrorEmpty(t *testing.T) { + t.Run("nil receiver", func(t *testing.T) { + var buildErr *BuildError + + if msg := buildErr.Error(); msg != "build failed" { + t.Fatalf("expected default message, got %q", msg) + } + }) + + t.Run("empty slice", func(t *testing.T) { + buildErr := &BuildError{} + + if msg := buildErr.Error(); msg != "build failed" { + t.Fatalf("expected default message, got %q", msg) + } + }) +} + +func TestBuildErrorUnwrapReturnsFirstError(t *testing.T) { + first := fmt.Errorf("first failure") + second := fmt.Errorf("second failure") + buildErr := &BuildError{Errors: []error{first, second}} + + unwrapped := stderrors.Unwrap(buildErr) + if unwrapped != first { + t.Fatalf("expected first error, got %#v", unwrapped) + } +} + +func TestBuildErrorUnwrapNilCases(t *testing.T) { + t.Run("nil receiver", func(t *testing.T) { + var buildErr *BuildError + if stderrors.Unwrap(buildErr) != nil { + t.Fatal("expected unwrap of nil receiver to return nil") + } + }) + + t.Run("empty", func(t *testing.T) { + buildErr := &BuildError{} + if stderrors.Unwrap(buildErr) != nil { + t.Fatal("expected unwrap of empty BuildError to return nil") + } + }) +} + +func TestBuildErrorErrorsIs(t *testing.T) { + sentinel := stderrors.New("sentinel") + wrapped := fmt.Errorf("wrap: %w", sentinel) + other := stderrors.New("other") + buildErr := &BuildError{Errors: []error{wrapped, other}} + + if !stderrors.Is(buildErr, sentinel) { + t.Fatal("expected errors.Is to match sentinel error") + } + + if !stderrors.Is(buildErr, other) { + t.Fatal("expected errors.Is to match other error") + } +} + +func TestBuildErrorErrorsIsNilReceiver(t *testing.T) { + var buildErr *BuildError + if stderrors.Is(buildErr, fmt.Errorf("anything")) { + t.Fatal("expected nil BuildError to not match target") + } +} + +func TestBuildErrorErrorsIsNoMatch(t *testing.T) { + buildErr := &BuildError{Errors: []error{nil}} + if stderrors.Is(buildErr, fmt.Errorf("missing")) { + t.Fatal("expected errors.Is to return false when no matches exist") + } +} + +func TestBuildErrorErrorsAs(t *testing.T) { + custom := &typedError{code: "INVALID_AGENT"} + wrapped := fmt.Errorf("wrap: %w", custom) + buildErr := &BuildError{Errors: []error{wrapped}} + + var target *typedError + if !stderrors.As(buildErr, &target) { + t.Fatal("expected errors.As to project custom error") + } + + if target != custom { + t.Fatalf("expected target to be %v, got %v", custom, target) + } +} + +func TestBuildErrorErrorsAsNilReceiver(t *testing.T) { + var buildErr *BuildError + var target *typedError + if stderrors.As(buildErr, &target) { + t.Fatal("expected errors.As to return false for nil BuildError") + } +} + +func TestBuildErrorErrorsAsNoMatch(t *testing.T) { + buildErr := &BuildError{Errors: []error{fmt.Errorf("wrapped")}} + var target *typedError + if stderrors.As(buildErr, &target) { + t.Fatal("expected errors.As to return false when no target matches") + } +} + +func TestBuildErrorNonNilErrors(t *testing.T) { + t.Run("nil receiver", func(t *testing.T) { + var buildErr *BuildError + if buildErr.nonNilErrors() != nil { + t.Fatal("expected nil receiver to return nil slice") + } + }) + + t.Run("filters nils", func(t *testing.T) { + first := fmt.Errorf("first") + buildErr := &BuildError{Errors: []error{nil, first, nil}} + filtered := buildErr.nonNilErrors() + if len(filtered) != 1 { + t.Fatalf("expected one error, got %d", len(filtered)) + } + if filtered[0] != first { + t.Fatalf("expected first error, got %v", filtered[0]) + } + }) +} + +func TestBuildErrorMessageClarity(t *testing.T) { + buildErr := &BuildError{Errors: []error{nil, fmt.Errorf("workflow identifier missing")}} + + msg := buildErr.Error() + if msg != "build failed: workflow identifier missing" { + t.Fatalf("unexpected message: %q", msg) + } + + if stderrors.Is(buildErr, nil) { + t.Fatal("errors.Is should not match nil") + } +} + +func ExampleBuildError() { + buildErr := &BuildError{Errors: []error{ + fmt.Errorf("workflow id is required"), + fmt.Errorf("agent must include at least one action"), + }} + + fmt.Println(buildErr.Error()) + // Output: + // build failed with 2 errors: + // 1. workflow id is required + // 2. agent must include at least one action +} diff --git a/sdk/internal/errors/doc.go b/sdk/internal/errors/doc.go new file mode 100644 index 00000000..e83e82d2 --- /dev/null +++ b/sdk/internal/errors/doc.go @@ -0,0 +1,3 @@ +// Package errors implements internal error aggregation helpers that back the +// public SDK builder contracts while remaining hidden from consumers. +package errors diff --git a/sdk/internal/sdkcodegen/cmd/sdkcodegen/main.go b/sdk/internal/sdkcodegen/cmd/sdkcodegen/main.go new file mode 100644 index 00000000..83e7e27c --- /dev/null +++ b/sdk/internal/sdkcodegen/cmd/sdkcodegen/main.go @@ -0,0 +1,34 @@ +package main + +import ( + "context" + "flag" + "fmt" + "log" + "os" + "os/signal" + "path/filepath" + "strings" + "syscall" + + "github.com/compozy/compozy/sdk/v2/internal/sdkcodegen" +) + +func main() { + outDir := flag.String("out", "", "output directory for generated files") + flag.Parse() + if strings.TrimSpace(*outDir) == "" { + log.Fatal("sdkcodegen: -out directory is required") + } + absolute, err := filepath.Abs(*outDir) + if err != nil { + log.Fatalf("sdkcodegen: resolve output path: %v", err) + } + ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM) + if err := sdkcodegen.Generate(ctx, absolute); err != nil { + cancel() + log.Fatalf("sdkcodegen: %v", err) + } + cancel() + fmt.Printf("sdkcodegen: generated files in %s\n", absolute) +} diff --git a/sdk/internal/sdkcodegen/execution_generator.go b/sdk/internal/sdkcodegen/execution_generator.go new file mode 100644 index 00000000..453c0521 --- /dev/null +++ b/sdk/internal/sdkcodegen/execution_generator.go @@ -0,0 +1,528 @@ +package sdkcodegen + +import ( + "fmt" + "strings" + + "github.com/dave/jennifer/jen" +) + +func buildExecutionFile() *jen.File { + f := jen.NewFile(packageName) + addExecutionImports(f) + emitExecutionFunctions(f) + addExecutionHelpers(f) + return f +} + +func addExecutionImports(f *jen.File) { + f.ImportName("context", "context") + f.ImportName("fmt", "fmt") + f.ImportName("strconv", "strconv") + f.ImportName("strings", "strings") + f.ImportName("time", "time") + f.ImportAlias("github.com/compozy/compozy/engine/core", "core") + f.ImportAlias("github.com/compozy/compozy/sdk/v2/client", "client") +} + +type execSpec struct { + Resource string + IDParam string + BuildRequestFunc string + BuildSyncFunc string + ClientExecute string + ClientSync string + ClientStream string +} + +func emitExecutionFunctions(f *jen.File) { + specs := []execSpec{ + { + Resource: "Workflow", + IDParam: "workflowID", + BuildRequestFunc: "buildWorkflowExecuteRequest", + BuildSyncFunc: "buildWorkflowSyncRequest", + ClientExecute: "ExecuteWorkflow", + ClientSync: "ExecuteWorkflowSync", + ClientStream: "ExecuteWorkflowStream", + }, + { + Resource: "Task", + IDParam: "taskID", + BuildRequestFunc: "buildTaskExecuteRequest", + BuildSyncFunc: "buildTaskSyncRequest", + ClientExecute: "ExecuteTask", + ClientSync: "ExecuteTaskSync", + ClientStream: "ExecuteTaskStream", + }, + { + Resource: "Agent", + IDParam: "agentID", + BuildRequestFunc: "buildAgentExecuteRequest", + BuildSyncFunc: "buildAgentSyncRequest", + ClientExecute: "ExecuteAgent", + ClientSync: "ExecuteAgentSync", + ClientStream: "ExecuteAgentStream", + }, + } + for i := range specs { + spec := &specs[i] + addExecuteFunction(f, spec) + addExecuteSyncFunction(f, spec) + addExecuteStreamFunction(f, spec) + } +} + +func addExecuteFunction(f *jen.File, spec *execSpec) { + f.Comment( + fmt.Sprintf( + "Execute%s triggers asynchronous %s execution via the client.", + spec.Resource, + strings.ToLower(spec.Resource), + ), + ) + f.Func(). + Params(jen.Id("e").Op("*").Id("Engine")). + Id(fmt.Sprintf("Execute%s", spec.Resource)). + Params( + jen.Id("ctx").Qual("context", "Context"), + jen.Id(spec.IDParam).String(), + jen.Id("req").Op("*").Id("ExecuteRequest"), + ). + Params( + jen.Op("*").Id("ExecuteResponse"), + jen.Error(), + ). + BlockFunc(func(g *jen.Group) { + g.List(jen.Id("cli"), jen.Err()).Op(":=").Id("ensureClient").Call(jen.Id("e")) + g.If(jen.Err().Op("!=").Nil()).Block( + jen.Return(jen.Nil(), jen.Err()), + ) + g.Id("payload").Op(":=").Id(spec.BuildRequestFunc).Call(jen.Id("req")) + g.List(jen.Id("resp"), jen.Err()).Op(":=").Id("cli").Dot(spec.ClientExecute).Call( + jen.Id("ctx"), + jen.Id(spec.IDParam), + jen.Id("payload"), + ) + g.If(jen.Err().Op("!=").Nil()).Block( + jen.Return(jen.Nil(), jen.Err()), + ) + g.Return( + jen.Id("newExecuteResponse").Call( + jen.Id("resp").Dot("ExecID"), + jen.Id("resp").Dot("ExecURL"), + ), + jen.Nil(), + ) + }) +} + +func addExecuteSyncFunction(f *jen.File, spec *execSpec) { + f.Comment( + fmt.Sprintf( + "Execute%sSync performs synchronous %s execution and returns the result.", + spec.Resource, + strings.ToLower(spec.Resource), + ), + ) + f.Func(). + Params(jen.Id("e").Op("*").Id("Engine")). + Id(fmt.Sprintf("Execute%sSync", spec.Resource)). + Params( + jen.Id("ctx").Qual("context", "Context"), + jen.Id(spec.IDParam).String(), + jen.Id("req").Op("*").Id("ExecuteSyncRequest"), + ). + Params( + jen.Op("*").Id("ExecuteSyncResponse"), + jen.Error(), + ). + BlockFunc(func(g *jen.Group) { + g.List(jen.Id("cli"), jen.Err()).Op(":=").Id("ensureClient").Call(jen.Id("e")) + g.If(jen.Err().Op("!=").Nil()).Block( + jen.Return(jen.Nil(), jen.Err()), + ) + g.Id("payload").Op(":=").Id(spec.BuildSyncFunc).Call(jen.Id("req")) + g.List(jen.Id("resp"), jen.Err()).Op(":=").Id("cli").Dot(spec.ClientSync).Call( + jen.Id("ctx"), + jen.Id(spec.IDParam), + jen.Id("payload"), + ) + g.If(jen.Err().Op("!=").Nil()).Block( + jen.Return(jen.Nil(), jen.Err()), + ) + g.Return( + jen.Id("buildSyncResponse").Call( + jen.Id("resp").Dot("ExecID"), + jen.Id("resp").Dot("Output"), + ), + jen.Nil(), + ) + }) +} + +func addExecuteStreamFunction(f *jen.File, spec *execSpec) { + f.Comment( + fmt.Sprintf( + "Execute%sStream starts %s execution and returns a stream session.", + spec.Resource, + strings.ToLower(spec.Resource), + ), + ) + f.Func(). + Params(jen.Id("e").Op("*").Id("Engine")). + Id(fmt.Sprintf("Execute%sStream", spec.Resource)). + Params( + jen.Id("ctx").Qual("context", "Context"), + jen.Id(spec.IDParam).String(), + jen.Id("req").Op("*").Id("ExecuteRequest"), + jen.Id("opts").Op("*").Qual("github.com/compozy/compozy/sdk/v2/client", "StreamOptions"), + ). + Params( + jen.Op("*").Qual("github.com/compozy/compozy/sdk/v2/client", "StreamSession"), + jen.Error(), + ). + BlockFunc(func(g *jen.Group) { + g.List(jen.Id("cli"), jen.Err()).Op(":=").Id("ensureClient").Call(jen.Id("e")) + g.If(jen.Err().Op("!=").Nil()).Block( + jen.Return(jen.Nil(), jen.Err()), + ) + g.Id("payload").Op(":=").Id(spec.BuildRequestFunc).Call(jen.Id("req")) + g.Return( + jen.Id("cli").Dot(spec.ClientStream).Call( + jen.Id("ctx"), + jen.Id(spec.IDParam), + jen.Id("payload"), + jen.Id("opts"), + ), + ) + }) +} + +func addExecutionHelpers(f *jen.File) { + addEnsureClientHelper(f) + addNewExecuteResponseHelper(f) + addBuildSyncResponseHelper(f) + addCopyInputHelper(f) + addCopyOutputHelper(f) + addStringFromOptionsHelper(f) + addIntFromOptionsHelper(f) + addDurationSecondsHelper(f) + addWorkflowRequestHelpers(f) + addTaskRequestHelpers(f) + addAgentRequestHelpers(f) +} + +func addEnsureClientHelper(f *jen.File) { + f.Func(). + Id("ensureClient"). + Params(jen.Id("e").Op("*").Id("Engine")). + Params( + jen.Op("*").Qual("github.com/compozy/compozy/sdk/v2/client", "Client"), + jen.Error(), + ). + Block( + jen.If(jen.Id("e").Op("==").Nil()).Block( + jen.Return(jen.Nil(), jen.Qual("fmt", "Errorf").Call(jen.Lit("engine is nil"))), + ), + jen.If(jen.Id("e").Dot("client").Op("==").Nil()).Block( + jen.Return(jen.Nil(), jen.Qual("fmt", "Errorf").Call(jen.Lit("engine client is not initialized"))), + ), + jen.Return(jen.Id("e").Dot("client"), jen.Nil()), + ) +} + +func addNewExecuteResponseHelper(f *jen.File) { + f.Func(). + Id("newExecuteResponse"). + Params( + jen.Id("execID").String(), + jen.Id("execURL").String(), + ). + Op("*").Id("ExecuteResponse"). + Block( + jen.Return(jen.Op("&").Id("ExecuteResponse").Values(jen.Dict{ + jen.Id("ExecID"): jen.Id("execID"), + jen.Id("ExecURL"): jen.Id("execURL"), + })), + ) +} + +func addBuildSyncResponseHelper(f *jen.File) { + f.Func(). + Id("buildSyncResponse"). + Params( + jen.Id("execID").String(), + jen.Id("output").Op("*").Qual("github.com/compozy/compozy/engine/core", "Output"), + ). + Op("*").Id("ExecuteSyncResponse"). + Block( + jen.Return(jen.Op("&").Id("ExecuteSyncResponse").Values(jen.Dict{ + jen.Id("ExecID"): jen.Id("execID"), + jen.Id("Output"): jen.Id("copyOutput").Call(jen.Id("output")), + })), + ) +} + +func addCopyInputHelper(f *jen.File) { + f.Func(). + Id("copyInput"). + Params(jen.Id("values").Map(jen.String()).Any()). + Qual("github.com/compozy/compozy/engine/core", "Input"). + Block( + jen.If(jen.Len(jen.Id("values")).Op("==").Lit(0)).Block( + jen.Return(jen.Nil()), + ), + jen.Id("cloned").Op(":=").Qual("github.com/compozy/compozy/engine/core", "CopyMaps").Call(jen.Id("values")), + jen.If(jen.Len(jen.Id("cloned")).Op("==").Lit(0)).Block( + jen.Return(jen.Nil()), + ), + jen.Return(jen.Qual("github.com/compozy/compozy/engine/core", "Input").Call(jen.Id("cloned"))), + ) +} + +func addCopyOutputHelper(f *jen.File) { + f.Func(). + Id("copyOutput"). + Params(jen.Id("output").Op("*").Qual("github.com/compozy/compozy/engine/core", "Output")). + Map(jen.String()).Any(). + Block( + jen.If(jen.Id("output").Op("==").Nil()).Block( + jen.Return(jen.Nil()), + ), + jen.Return(jen.Id("output").Dot("AsMap").Call()), + ) +} + +func addStringFromOptionsHelper(f *jen.File) { + f.Func(). + Id("stringFromOptions"). + Params( + jen.Id("options").Map(jen.String()).Any(), + jen.Id("key").String(), + ). + String(). + BlockFunc(func(g *jen.Group) { + g.If(jen.Id("options").Op("==").Nil()).Block(jen.Return(jen.Lit(""))) + g.Id("raw").Op(",").Id("ok").Op(":=").Id("options").Index(jen.Id("key")) + g.If(jen.Op("!").Id("ok")).Block(jen.Return(jen.Lit(""))) + g.Id("str").Op(",").Id("isString").Op(":=").Id("raw").Assert(jen.String()) + g.If(jen.Id("isString")).Block( + jen.Return(jen.Qual("strings", "TrimSpace").Call(jen.Id("str"))), + ) + g.Id("stringer").Op(",").Id("isStringer").Op(":=").Id("raw").Assert(jen.Qual("fmt", "Stringer")) + g.If(jen.Id("isStringer")).Block( + jen.Return(jen.Qual("strings", "TrimSpace").Call(jen.Id("stringer").Dot("String").Call())), + ) + g.Return(jen.Lit("")) + }) +} + +func addIntFromOptionsHelper(f *jen.File) { + f.Func(). + Id("intFromOptions"). + Params( + jen.Id("options").Map(jen.String()).Any(), + jen.Id("key").String(), + ). + Op("*").Int(). + BlockFunc(func(g *jen.Group) { + g.If(jen.Id("options").Op("==").Nil()).Block(jen.Return(jen.Nil())) + g.Id("raw").Op(",").Id("ok").Op(":=").Id("options").Index(jen.Id("key")) + g.If(jen.Op("!").Id("ok")).Block(jen.Return(jen.Nil())) + g.Var().Id("value").Int() + g.Id("intValue").Op(",").Id("isInt").Op(":=").Id("raw").Assert(jen.Int()) + g.If(jen.Id("isInt")).Block( + jen.Id("value").Op("=").Id("intValue"), + ).Else().BlockFunc(func(b *jen.Group) { + b.Id("strValue").Op(",").Id("isString").Op(":=").Id("raw").Assert(jen.String()) + b.If(jen.Op("!").Id("isString")).Block(jen.Return(jen.Nil())) + b.Id("parsed"). + Op(","). + Id("err"). + Op(":="). + Qual("strconv", "Atoi"). + Call(jen.Qual("strings", "TrimSpace").Call(jen.Id("strValue"))) + b.If(jen.Err().Op("!=").Nil()).Block(jen.Return(jen.Nil())) + b.Id("value").Op("=").Id("parsed") + }) + g.If(jen.Id("value").Op("<=").Lit(0)).Block(jen.Return(jen.Nil())) + g.Return(jen.Op("&").Id("value")) + }) +} + +func addDurationSecondsHelper(f *jen.File) { + f.Func(). + Id("durationSeconds"). + Params(jen.Id("value").Op("*").Qual("time", "Duration")). + Op("*").Int(). + Block( + jen.If(jen.Id("value").Op("==").Nil()).Block(jen.Return(jen.Nil())), + jen.Id("secs").Op(":=").Id("int").Call(jen.Id("value").Dot("Seconds").Call()), + jen.If(jen.Id("secs").Op("<=").Lit(0)).Block(jen.Return(jen.Nil())), + jen.Return(jen.Op("&").Id("secs")), + ) +} + +func addWorkflowRequestHelpers(f *jen.File) { + f.Func(). + Id("buildWorkflowExecuteRequest"). + Params(jen.Id("req").Op("*").Id("ExecuteRequest")). + Op("*").Qual("github.com/compozy/compozy/sdk/v2/client", "WorkflowExecuteRequest"). + BlockFunc(func(g *jen.Group) { + g.Id("payload"). + Op(":="). + Op("&"). + Qual("github.com/compozy/compozy/sdk/v2/client", "WorkflowExecuteRequest"). + Values() + g.If(jen.Id("req").Op("==").Nil()).Block(jen.Return(jen.Id("payload"))) + g.Id("inputCopy").Op(":=").Id("copyInput").Call(jen.Id("req").Dot("Input")) + g.If(jen.Id("inputCopy").Op("!=").Nil()).Block( + jen.Id("payload").Dot("Input").Op("=").Id("inputCopy"), + ) + g.Id("task").Op(":=").Id("stringFromOptions").Call(jen.Id("req").Dot("Options"), jen.Lit("task_id")) + g.If(jen.Id("task").Op("!=").Lit("")).Block( + jen.Id("payload").Dot("TaskID").Op("=").Id("task"), + ) + g.Return(jen.Id("payload")) + }) + + f.Func(). + Id("buildWorkflowSyncRequest"). + Params(jen.Id("req").Op("*").Id("ExecuteSyncRequest")). + Op("*").Qual("github.com/compozy/compozy/sdk/v2/client", "WorkflowSyncRequest"). + BlockFunc(func(g *jen.Group) { + g.Id("payload"). + Op(":="). + Op("&"). + Qual("github.com/compozy/compozy/sdk/v2/client", "WorkflowSyncRequest"). + Values() + g.If(jen.Id("req").Op("==").Nil()).Block(jen.Return(jen.Id("payload"))) + g.Id("inputCopy").Op(":=").Id("copyInput").Call(jen.Id("req").Dot("Input")) + g.If(jen.Id("inputCopy").Op("!=").Nil()).Block( + jen.Id("payload").Dot("Input").Op("=").Id("inputCopy"), + ) + g.Id("secs").Op(":=").Id("durationSeconds").Call(jen.Id("req").Dot("Timeout")) + g.If(jen.Id("secs").Op("!=").Nil()).Block( + jen.Id("payload").Dot("Timeout").Op("=").Op("*").Id("secs"), + ) + g.Id("task").Op(":=").Id("stringFromOptions").Call(jen.Id("req").Dot("Options"), jen.Lit("task_id")) + g.If(jen.Id("task").Op("!=").Lit("")).Block( + jen.Id("payload").Dot("TaskID").Op("=").Id("task"), + ) + g.Return(jen.Id("payload")) + }) +} + +func addTaskRequestHelpers(f *jen.File) { + f.Func(). + Id("buildTaskExecuteRequest"). + Params(jen.Id("req").Op("*").Id("ExecuteRequest")). + Op("*").Qual("github.com/compozy/compozy/sdk/v2/client", "TaskExecuteRequest"). + BlockFunc(func(g *jen.Group) { + g.Id("payload"). + Op(":="). + Op("&"). + Qual("github.com/compozy/compozy/sdk/v2/client", "TaskExecuteRequest"). + Values() + g.If(jen.Id("req").Op("==").Nil()).Block(jen.Return(jen.Id("payload"))) + g.Id("payload").Dot("With").Op("=").Id("copyInput").Call(jen.Id("req").Dot("Input")) + g.Id("timeoutOpt").Op(":=").Id("intFromOptions").Call(jen.Id("req").Dot("Options"), jen.Lit("timeout")) + g.If(jen.Id("timeoutOpt").Op("!=").Nil()).Block( + jen.Id("payload").Dot("Timeout").Op("=").Id("timeoutOpt"), + ) + g.Return(jen.Id("payload")) + }) + + f.Func(). + Id("buildTaskSyncRequest"). + Params(jen.Id("req").Op("*").Id("ExecuteSyncRequest")). + Op("*").Qual("github.com/compozy/compozy/sdk/v2/client", "TaskExecuteRequest"). + BlockFunc(func(g *jen.Group) { + g.Id("payload"). + Op(":="). + Op("&"). + Qual("github.com/compozy/compozy/sdk/v2/client", "TaskExecuteRequest"). + Values() + g.If(jen.Id("req").Op("==").Nil()).Block(jen.Return(jen.Id("payload"))) + g.Id("payload").Dot("With").Op("=").Id("copyInput").Call(jen.Id("req").Dot("Input")) + g.Id("secs").Op(":=").Id("durationSeconds").Call(jen.Id("req").Dot("Timeout")) + g.If(jen.Id("secs").Op("!=").Nil()).Block( + jen.Id("payload").Dot("Timeout").Op("=").Id("secs"), + jen.Return(jen.Id("payload")), + ) + g.Id("timeoutOpt").Op(":=").Id("intFromOptions").Call(jen.Id("req").Dot("Options"), jen.Lit("timeout")) + g.If(jen.Id("timeoutOpt").Op("!=").Nil()).Block( + jen.Id("payload").Dot("Timeout").Op("=").Id("timeoutOpt"), + ) + g.Return(jen.Id("payload")) + }) +} + +func addAgentRequestHelpers(f *jen.File) { + addAgentExecuteRequestBuilder(f) + addAgentSyncRequestBuilder(f) +} + +func addAgentExecuteRequestBuilder(f *jen.File) { + f.Func(). + Id("buildAgentExecuteRequest"). + Params(jen.Id("req").Op("*").Id("ExecuteRequest")). + Op("*").Qual("github.com/compozy/compozy/sdk/v2/client", "AgentExecuteRequest"). + BlockFunc(func(g *jen.Group) { + g.Id("payload"). + Op(":="). + Op("&"). + Qual("github.com/compozy/compozy/sdk/v2/client", "AgentExecuteRequest"). + Values() + g.If(jen.Id("req").Op("==").Nil()).Block(jen.Return(jen.Id("payload"))) + g.Id("payload").Dot("With").Op("=").Id("copyInput").Call(jen.Id("req").Dot("Input")) + g.Id("action").Op(":=").Id("stringFromOptions").Call(jen.Id("req").Dot("Options"), jen.Lit("action")) + g.If(jen.Id("action").Op("!=").Lit("")).Block( + jen.Id("payload").Dot("Action").Op("=").Id("action"), + ) + g.Id("prompt").Op(":=").Id("stringFromOptions").Call(jen.Id("req").Dot("Options"), jen.Lit("prompt")) + g.If(jen.Id("prompt").Op("!=").Lit("")).Block( + jen.Id("payload").Dot("Prompt").Op("=").Id("prompt"), + ) + g.Id("timeoutOpt").Op(":=").Id("intFromOptions").Call(jen.Id("req").Dot("Options"), jen.Lit("timeout")) + g.If(jen.Id("timeoutOpt").Op("!=").Nil()).Block( + jen.Id("payload").Dot("Timeout").Op("=").Op("*").Id("timeoutOpt"), + ) + g.Return(jen.Id("payload")) + }) +} + +func addAgentSyncRequestBuilder(f *jen.File) { + f.Func(). + Id("buildAgentSyncRequest"). + Params(jen.Id("req").Op("*").Id("ExecuteSyncRequest")). + Op("*").Qual("github.com/compozy/compozy/sdk/v2/client", "AgentExecuteRequest"). + BlockFunc(func(g *jen.Group) { + g.Id("payload"). + Op(":="). + Op("&"). + Qual("github.com/compozy/compozy/sdk/v2/client", "AgentExecuteRequest"). + Values() + g.If(jen.Id("req").Op("==").Nil()).Block(jen.Return(jen.Id("payload"))) + g.Id("payload").Dot("With").Op("=").Id("copyInput").Call(jen.Id("req").Dot("Input")) + g.Id("action").Op(":=").Id("stringFromOptions").Call(jen.Id("req").Dot("Options"), jen.Lit("action")) + g.If(jen.Id("action").Op("!=").Lit("")).Block( + jen.Id("payload").Dot("Action").Op("=").Id("action"), + ) + g.Id("prompt").Op(":=").Id("stringFromOptions").Call(jen.Id("req").Dot("Options"), jen.Lit("prompt")) + g.If(jen.Id("prompt").Op("!=").Lit("")).Block( + jen.Id("payload").Dot("Prompt").Op("=").Id("prompt"), + ) + g.Id("secs").Op(":=").Id("durationSeconds").Call(jen.Id("req").Dot("Timeout")) + g.If(jen.Id("secs").Op("!=").Nil()).Block( + jen.Id("payload").Dot("Timeout").Op("=").Op("*").Id("secs"), + jen.Return(jen.Id("payload")), + ) + g.Id("timeoutOpt").Op(":=").Id("intFromOptions").Call(jen.Id("req").Dot("Options"), jen.Lit("timeout")) + g.If(jen.Id("timeoutOpt").Op("!=").Nil()).Block( + jen.Id("payload").Dot("Timeout").Op("=").Op("*").Id("timeoutOpt"), + ) + g.Return(jen.Id("payload")) + }) +} diff --git a/sdk/internal/sdkcodegen/generator.go b/sdk/internal/sdkcodegen/generator.go new file mode 100644 index 00000000..b61af2de --- /dev/null +++ b/sdk/internal/sdkcodegen/generator.go @@ -0,0 +1,92 @@ +package sdkcodegen + +import ( + "bytes" + "context" + "fmt" + "go/format" + "os" + "path/filepath" + "strings" + + "github.com/dave/jennifer/jen" +) + +const ( + packageName = "compozy" + generatedPreamble = "// Code generated by compozygen. DO NOT EDIT.\n" +) + +// Generate produces every Compozy SDK helper file under dstDir. +func Generate(ctx context.Context, dstDir string) error { + if ctx == nil { + return fmt.Errorf("context is required") + } + if err := ensureContext(ctx); err != nil { + return err + } + if strings.TrimSpace(dstDir) == "" { + return fmt.Errorf("destination directory is required") + } + files := []struct { + Name string + File *jen.File + Err error + }{ + { + Name: "options_generated.go", + File: buildOptionsFile(), + }, + { + Name: "engine_registration.go", + File: buildRegistrationFile(), + }, + { + Name: "engine_loading.go", + File: buildLoadingFile(), + }, + { + Name: "engine_execution.go", + File: buildExecutionFile(), + }, + } + for _, item := range files { + if err := ensureContext(ctx); err != nil { + return err + } + if item.File == nil { + return fmt.Errorf("failed to build %s", item.Name) + } + if err := writeFormattedFile(filepath.Join(dstDir, item.Name), item.File); err != nil { + return err + } + } + return nil +} + +func ensureContext(ctx context.Context) error { + select { + case <-ctx.Done(): + return fmt.Errorf("generation canceled: %w", ctx.Err()) + default: + return nil + } +} + +func writeFormattedFile(path string, file *jen.File) error { + var buf bytes.Buffer + if _, err := buf.WriteString(generatedPreamble); err != nil { + return fmt.Errorf("write header: %w", err) + } + if err := file.Render(&buf); err != nil { + return fmt.Errorf("render jennifer file: %w", err) + } + formatted, err := format.Source(buf.Bytes()) + if err != nil { + return fmt.Errorf("format generated source: %w", err) + } + if err := os.WriteFile(path, formatted, 0o600); err != nil { + return fmt.Errorf("write %s: %w", path, err) + } + return nil +} diff --git a/sdk/internal/sdkcodegen/loading_generator.go b/sdk/internal/sdkcodegen/loading_generator.go new file mode 100644 index 00000000..5078dc7c --- /dev/null +++ b/sdk/internal/sdkcodegen/loading_generator.go @@ -0,0 +1,117 @@ +package sdkcodegen + +import ( + "fmt" + "strings" + + "github.com/dave/jennifer/jen" +) + +func buildLoadingFile() *jen.File { + f := jen.NewFile(packageName) + addLoadingImports(f) + for i := range ResourceSpecs { + spec := &ResourceSpecs[i] + f.ImportAlias(spec.PackagePath, spec.ImportAlias) + declareLoadFunctions(f, spec) + } + return f +} + +func addLoadingImports(f *jen.File) { + f.ImportName("context", "context") + f.ImportName("fmt", "fmt") + f.ImportName("os", "os") + f.ImportAlias("path/filepath", "filepath") + f.ImportName("sort", "sort") + f.ImportName("strings", "strings") +} + +func declareLoadFunctions(f *jen.File, spec *ResourceSpec) { + addLoadFunction(f, spec) + addLoadDirFunction(f, spec) +} + +func addLoadFunction(f *jen.File, spec *ResourceSpec) { + f.Comment(fmt.Sprintf("Load%s loads a %s configuration from disk.", spec.Name, loadingSubject(spec.Name))) + f.Func(). + Params(jen.Id("e").Op("*").Id("Engine")). + Id(fmt.Sprintf("Load%s", spec.Name)). + Params(jen.Id("ctx").Qual("context", "Context"), jen.Id("path").String()). + Error(). + Block( + jen.If(jen.Id("e").Op("==").Nil()).Block( + jen.Return(jen.Qual("fmt", "Errorf").Call(jen.Lit("engine is nil"))), + ), + jen.List(jen.Id("cfg"), jen.Err()). + Op(":="). + Id("loadYAML"). + Types(jen.Op("*").Qual(spec.PackagePath, spec.TypeName)). + Call(jen.Id("ctx"), jen.Id("e"), jen.Id("path")), + jen.If(jen.Err().Op("!=").Nil()).Block( + jen.Return(jen.Qual("fmt", "Errorf").Call( + jen.Lit(fmt.Sprintf("load %s config: %%w", strings.ToLower(spec.Name))), + jen.Err(), + )), + ), + jen.If(jen.Id("ctx").Dot("Err").Call().Op("!=").Nil()).Block( + jen.Return(jen.Qual("fmt", "Errorf").Call( + jen.Lit(fmt.Sprintf("load %s config: %%w", strings.ToLower(spec.Name))), + jen.Id("ctx").Dot("Err").Call(), + )), + ), + jen.Return(jen.Id("e").Dot(fmt.Sprintf("Register%s", spec.Name)).Call(jen.Id("cfg"))), + ) +} + +func addLoadDirFunction(f *jen.File, spec *ResourceSpec) { + values := make([]jen.Code, 0, len(spec.FileExtensions)) + for _, ext := range spec.FileExtensions { + values = append(values, jen.Lit(ext)) + } + + f.Comment( + fmt.Sprintf( + "Load%sFromDir loads %s configurations from a directory.", + spec.PluralName, + pluralSubject(spec.Name), + ), + ) + f.Func(). + Params(jen.Id("e").Op("*").Id("Engine")). + Id(fmt.Sprintf("Load%sFromDir", spec.PluralName)). + Params(jen.Id("ctx").Qual("context", "Context"), jen.Id("dir").String()). + Error(). + Block( + jen.If(jen.Id("e").Op("==").Nil()).Block( + jen.Return(jen.Qual("fmt", "Errorf").Call(jen.Lit("engine is nil"))), + ), + jen.Return( + jen.Id("e").Dot("loadFromDir").Call( + jen.Id("ctx"), + jen.Id("dir"), + jen.Index().String().Values(values...), + jen.Func(). + Params( + jen.Id("loaderCtx").Qual("context", "Context"), + jen.Id("path").String(), + ). + Error(). + Block( + jen.Return(jen.Id("e").Dot(fmt.Sprintf("Load%s", spec.Name)).Call(jen.Id("loaderCtx"), jen.Id("path"))), + ), + ), + ), + ) +} + +func loadingSubject(name string) string { + if strings.Contains(strings.ToLower(name), "knowledge") { + return "knowledge base" + } + return strings.ToLower(name) +} + +func pluralSubject(name string) string { + return strings.ToLower(name) + "s" +} diff --git a/sdk/internal/sdkcodegen/options_generator.go b/sdk/internal/sdkcodegen/options_generator.go new file mode 100644 index 00000000..796c60c3 --- /dev/null +++ b/sdk/internal/sdkcodegen/options_generator.go @@ -0,0 +1,54 @@ +package sdkcodegen + +import ( + "fmt" + "strings" + + "github.com/dave/jennifer/jen" +) + +func buildOptionsFile() *jen.File { + f := jen.NewFile(packageName) + for i := range ResourceSpecs { + spec := &ResourceSpecs[i] + f.ImportAlias(spec.PackagePath, spec.ImportAlias) + } + for i := range ResourceSpecs { + spec := &ResourceSpecs[i] + f.Comment(optionComment(spec)) + f.Func(). + Id(fmt.Sprintf("With%s", spec.Name)). + Params(jen.Id("cfg").Op("*").Qual(spec.PackagePath, spec.TypeName)). + Id("Option"). + Block( + jen.Return( + jen.Func(). + Params(jen.Id("c").Op("*").Id("config")). + BlockFunc(func(g *jen.Group) { + g.If( + jen.Id("c").Op("==").Nil().Op("||").Id("cfg").Op("==").Nil(), + ).Block( + jen.Return(), + ) + if spec.IsSlice { + g.Id("c"). + Dot(spec.BuilderField). + Op("="). + Append(jen.Id("c").Dot(spec.BuilderField), jen.Id("cfg")) + } else { + g.Id("c").Dot(spec.BuilderField).Op("=").Id("cfg") + } + }), + ), + ) + } + return f +} + +func optionComment(spec *ResourceSpec) string { + resource := strings.ToLower(spec.Name) + if strings.Contains(resource, "knowledge") { + resource = "knowledge base" + } + return fmt.Sprintf("With%s registers a %s configuration for the engine.", spec.Name, resource) +} diff --git a/sdk/internal/sdkcodegen/registration_generator.go b/sdk/internal/sdkcodegen/registration_generator.go new file mode 100644 index 00000000..c9c07388 --- /dev/null +++ b/sdk/internal/sdkcodegen/registration_generator.go @@ -0,0 +1,50 @@ +package sdkcodegen + +import ( + "fmt" + "strings" + + "github.com/dave/jennifer/jen" +) + +func buildRegistrationFile() *jen.File { + f := jen.NewFile(packageName) + f.ImportName("fmt", "fmt") + for i := range ResourceSpecs { + spec := &ResourceSpecs[i] + f.ImportAlias(spec.PackagePath, spec.ImportAlias) + } + for i := range ResourceSpecs { + spec := &ResourceSpecs[i] + f.Comment(fmt.Sprintf("Register%s registers a %s for runtime execution.", spec.Name, registrationSubject(spec))) + f.Func(). + Params(jen.Id("e").Op("*").Id("Engine")). + Id(fmt.Sprintf("Register%s", spec.Name)). + Params(jen.Id("cfg").Op("*").Qual(spec.PackagePath, spec.TypeName)). + Params(jen.Error()). + BlockFunc(func(g *jen.Group) { + requiredMessage := fmt.Sprintf("%s config is required", strings.ToLower(spec.Name)) + g.If(jen.Id("e").Op("==").Nil()).Block( + jen.Return(jen.Qual("fmt", "Errorf").Call(jen.Lit("engine is nil"))), + ) + g.If(jen.Id("cfg").Op("==").Nil()).Block( + jen.Return(jen.Qual("fmt", "Errorf").Call(jen.Lit(requiredMessage))), + ) + if spec.IsSlice { + g.Id("e").Dot(spec.BuilderField).Op("=").Append(jen.Id("e").Dot(spec.BuilderField), jen.Id("cfg")) + } else { + g.Id("e").Dot(spec.BuilderField).Op("=").Id("cfg") + } + g.Return(jen.Nil()) + }) + } + return f +} + +func registrationSubject(spec *ResourceSpec) string { + name := strings.ToLower(spec.Name) + if strings.Contains(name, "knowledge") { + return "knowledge base configuration" + } + return fmt.Sprintf("%s configuration", name) +} diff --git a/sdk/internal/sdkcodegen/spec.go b/sdk/internal/sdkcodegen/spec.go new file mode 100644 index 00000000..ee707ca0 --- /dev/null +++ b/sdk/internal/sdkcodegen/spec.go @@ -0,0 +1,139 @@ +package sdkcodegen + +// ResourceSpec declares metadata required to generate Compozy SDK helpers. +type ResourceSpec struct { + Name string + PluralName string + PackagePath string + ImportAlias string + SDK2Package string + TypeName string + BuilderField string + IsSlice bool + FileExtensions []string +} + +// ResourceSpecs contains every Compozy resource supported by the generator. +var ResourceSpecs = []ResourceSpec{ + { + Name: "Project", + PluralName: "Projects", + PackagePath: "github.com/compozy/compozy/engine/project", + ImportAlias: "engineproject", + SDK2Package: "github.com/compozy/sdk/project", + TypeName: "Config", + BuilderField: "project", + IsSlice: false, + FileExtensions: []string{".yaml", ".yml"}, + }, + { + Name: "Workflow", + PluralName: "Workflows", + PackagePath: "github.com/compozy/compozy/engine/workflow", + ImportAlias: "engineworkflow", + SDK2Package: "github.com/compozy/sdk/workflow", + TypeName: "Config", + BuilderField: "workflows", + IsSlice: true, + FileExtensions: []string{".yaml", ".yml"}, + }, + { + Name: "Agent", + PluralName: "Agents", + PackagePath: "github.com/compozy/compozy/engine/agent", + ImportAlias: "engineagent", + SDK2Package: "github.com/compozy/sdk/agent", + TypeName: "Config", + BuilderField: "agents", + IsSlice: true, + FileExtensions: []string{".yaml", ".yml"}, + }, + { + Name: "Tool", + PluralName: "Tools", + PackagePath: "github.com/compozy/compozy/engine/tool", + ImportAlias: "enginetool", + SDK2Package: "github.com/compozy/sdk/tool", + TypeName: "Config", + BuilderField: "tools", + IsSlice: true, + FileExtensions: []string{".yaml", ".yml"}, + }, + { + Name: "Knowledge", + PluralName: "KnowledgeBases", + PackagePath: "github.com/compozy/compozy/engine/knowledge", + ImportAlias: "engineknowledge", + SDK2Package: "github.com/compozy/sdk/knowledge", + TypeName: "BaseConfig", + BuilderField: "knowledgeBases", + IsSlice: true, + FileExtensions: []string{".yaml", ".yml"}, + }, + { + Name: "Memory", + PluralName: "Memories", + PackagePath: "github.com/compozy/compozy/engine/memory", + ImportAlias: "enginememory", + SDK2Package: "github.com/compozy/sdk/memory", + TypeName: "Config", + BuilderField: "memories", + IsSlice: true, + FileExtensions: []string{".yaml", ".yml"}, + }, + { + Name: "MCP", + PluralName: "MCPs", + PackagePath: "github.com/compozy/compozy/engine/mcp", + ImportAlias: "enginemcp", + SDK2Package: "github.com/compozy/sdk/mcp", + TypeName: "Config", + BuilderField: "mcps", + IsSlice: true, + FileExtensions: []string{".yaml", ".yml"}, + }, + { + Name: "Schema", + PluralName: "Schemas", + PackagePath: "github.com/compozy/compozy/engine/schema", + ImportAlias: "engineschema", + SDK2Package: "github.com/compozy/sdk/schema", + TypeName: "Schema", + BuilderField: "schemas", + IsSlice: true, + FileExtensions: []string{".yaml", ".yml"}, + }, + { + Name: "Model", + PluralName: "Models", + PackagePath: "github.com/compozy/compozy/engine/core", + ImportAlias: "enginecore", + SDK2Package: "github.com/compozy/sdk/model", + TypeName: "ProviderConfig", + BuilderField: "models", + IsSlice: true, + FileExtensions: []string{".yaml", ".yml"}, + }, + { + Name: "Schedule", + PluralName: "Schedules", + PackagePath: "github.com/compozy/compozy/engine/project/schedule", + ImportAlias: "projectschedule", + SDK2Package: "github.com/compozy/sdk/schedule", + TypeName: "Config", + BuilderField: "schedules", + IsSlice: true, + FileExtensions: []string{".yaml", ".yml"}, + }, + { + Name: "Webhook", + PluralName: "Webhooks", + PackagePath: "github.com/compozy/compozy/engine/webhook", + ImportAlias: "enginewebhook", + SDK2Package: "github.com/compozy/sdk/webhook", + TypeName: "Config", + BuilderField: "webhooks", + IsSlice: true, + FileExtensions: []string{".yaml", ".yml"}, + }, +} diff --git a/sdk/internal/sse/decoder.go b/sdk/internal/sse/decoder.go new file mode 100644 index 00000000..e29639cc --- /dev/null +++ b/sdk/internal/sse/decoder.go @@ -0,0 +1,136 @@ +package sse + +import ( + "bufio" + "bytes" + "context" + "errors" + "io" + "strconv" + "strings" +) + +// Event represents a single Server-Sent Event frame. +type Event struct { + ID int64 + Type string + Data []byte +} + +// Decoder parses Server-Sent Events from a stream. +type Decoder struct { + reader *bufio.Reader +} + +// NewDecoder constructs a Decoder for the provided stream. +func NewDecoder(r io.Reader) *Decoder { + if r == nil { + return &Decoder{reader: bufio.NewReader(bytes.NewReader(nil))} + } + return &Decoder{reader: bufio.NewReader(r)} +} + +// Next reads the next event from the stream, skipping heartbeat frames transparently. +func (d *Decoder) Next(ctx context.Context) (Event, error) { + if d == nil || d.reader == nil { + return Event{}, io.EOF + } + for { + if ctx != nil { + if err := ctx.Err(); err != nil { + return Event{}, err + } + } + event, more, err := d.readEvent(ctx) + if err != nil { + return Event{}, err + } + if !more { + return event, nil + } + if event.Type == "" && len(event.Data) == 0 { + continue + } + return event, nil + } +} + +func (d *Decoder) readEvent(ctx context.Context) (Event, bool, error) { + var event Event + var data bytes.Buffer + for { + if err := contextErr(ctx); err != nil { + return Event{}, false, err + } + line, err := d.reader.ReadString('\n') + if err != nil { + return handleReadError(err, event, &data) + } + trimmed := strings.TrimRight(line, "\r\n") + if trimmed == "" { + return finalizeEvent(event, &data) + } + if strings.HasPrefix(trimmed, ":") { + continue + } + updateEvent(trimmed, &event, &data) + } +} + +func contextErr(ctx context.Context) error { + if ctx == nil { + return nil + } + return ctx.Err() +} + +func handleReadError(err error, event Event, data *bytes.Buffer) (Event, bool, error) { + if !errors.Is(err, io.EOF) { + return Event{}, false, err + } + if data.Len() == 0 && event.Type == "" && event.ID == 0 { + return Event{}, false, io.EOF + } + event.Data = data.Bytes() + return event, false, io.EOF +} + +func finalizeEvent(event Event, data *bytes.Buffer) (Event, bool, error) { + if data.Len() == 0 && event.Type == "" && event.ID == 0 { + return Event{}, true, nil + } + event.Data = append(event.Data, data.Bytes()...) + return event, false, nil +} + +func updateEvent(line string, event *Event, data *bytes.Buffer) { + switch { + case strings.HasPrefix(line, "id:"): + event.ID = parseID(line[3:]) + case strings.HasPrefix(line, "event:"): + event.Type = strings.TrimSpace(line[6:]) + case strings.HasPrefix(line, "data:"): + appendData(data, strings.TrimSpace(line[5:])) + default: + appendData(data, line) + } +} + +func appendData(buffer *bytes.Buffer, value string) { + if buffer.Len() > 0 { + buffer.WriteByte('\n') + } + buffer.WriteString(value) +} + +func parseID(raw string) int64 { + value := strings.TrimSpace(raw) + if value == "" { + return 0 + } + parsed, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return 0 + } + return parsed +} diff --git a/sdk/internal/testutil/bench.go b/sdk/internal/testutil/bench.go new file mode 100644 index 00000000..1c93fceb --- /dev/null +++ b/sdk/internal/testutil/bench.go @@ -0,0 +1,77 @@ +package testutil + +import ( + "context" + "fmt" + "strings" + "sync/atomic" + "testing" +) + +// BenchmarkProfile describes the scale of a benchmark scenario for SDK builders. +type BenchmarkProfile struct { + Name string + Agents int + Tasks int + Workflows int + Memories int + Knowledge int +} + +// Predefined benchmark profiles used across builder benchmarks. +var ( + BenchmarkSimple = BenchmarkProfile{Name: "simple", Agents: 1, Tasks: 1, Workflows: 1, Memories: 1, Knowledge: 1} + BenchmarkMedium = BenchmarkProfile{Name: "medium", Agents: 4, Tasks: 8, Workflows: 4, Memories: 2, Knowledge: 3} + BenchmarkComplex = BenchmarkProfile{Name: "complex", Agents: 10, Tasks: 50, Workflows: 6, Memories: 4, Knowledge: 6} +) + +var benchmarkSink atomic.Value + +// RunBuilderBenchmark executes fn using b.N iterations while reporting allocations and capturing failures. +func RunBuilderBenchmark(b *testing.B, fn func(ctx context.Context) (any, error)) { + b.Helper() + ctx := NewBenchmarkContext(b) + b.ReportAllocs() + b.ResetTimer() + var last any + for i := 0; i < b.N; i++ { + result, err := fn(ctx) + if err != nil { + b.Fatalf("build failed: %v", err) + } + if result != nil { + last = result + } + } + if last != nil { + benchmarkSink.Store(last) + } +} + +// RunParallelBuilderBenchmark executes fn concurrently using RunParallel to surface contention issues. +func RunParallelBuilderBenchmark(b *testing.B, fn func(ctx context.Context) (any, error)) { + b.Helper() + ctx := NewBenchmarkContext(b) + b.ReportAllocs() + b.ResetTimer() + var last atomic.Value + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + result, err := fn(ctx) + if err != nil { + b.Fatalf("build failed: %v", err) + } + if result != nil { + last.Store(result) + } + } + }) + if value := last.Load(); value != nil { + benchmarkSink.Store(value) + } +} + +// BenchmarkID produces deterministic benchmark identifiers. +func BenchmarkID(prefix string, idx int) string { + return fmt.Sprintf("%s-%03d", strings.TrimSpace(prefix), idx) +} diff --git a/sdk/internal/testutil/compare.go b/sdk/internal/testutil/compare.go new file mode 100644 index 00000000..a5791921 --- /dev/null +++ b/sdk/internal/testutil/compare.go @@ -0,0 +1,27 @@ +package testutil + +import ( + "encoding/json" + "fmt" + "reflect" + "testing" +) + +// AssertConfigEqual fails the test when want and got differ, emitting a stable JSON diff for easier debugging. +func AssertConfigEqual(t *testing.T, want, got any) { + t.Helper() + if reflect.DeepEqual(want, got) { + return + } + serializedWant := mustMarshalConfig(want) + serializedGot := mustMarshalConfig(got) + t.Fatalf("config mismatch\nwant: %s\n got: %s", serializedWant, serializedGot) +} + +func mustMarshalConfig(v any) string { + data, err := json.MarshalIndent(v, "", " ") + if err != nil { + return fmt.Sprintf("%+v", v) + } + return string(data) +} diff --git a/sdk/internal/testutil/context.go b/sdk/internal/testutil/context.go new file mode 100644 index 00000000..69642aae --- /dev/null +++ b/sdk/internal/testutil/context.go @@ -0,0 +1,56 @@ +package testutil + +import ( + "context" + "testing" + + "github.com/compozy/compozy/pkg/config" + "github.com/compozy/compozy/pkg/logger" +) + +// NewTestContext returns a context derived from tb.Context() with test logger and configuration manager attached. +func NewTestContext(tb testing.TB) context.Context { + tb.Helper() + ctx := contextFromTB(tb) + ctx = WithTestLogger(ctx, tb) + ctx = WithTestConfig(ctx, tb) + return ctx +} + +// WithTestLogger returns a copy of ctx containing a logger configured for tests. +func WithTestLogger(ctx context.Context, tb testing.TB) context.Context { + tb.Helper() + log := logger.NewForTests() + return logger.ContextWithLogger(ctx, log) +} + +// WithTestConfig returns a copy of ctx containing a configuration manager loaded with defaults suitable for tests. +func WithTestConfig(ctx context.Context, tb testing.TB) context.Context { + tb.Helper() + manager := config.NewManager(ctx, config.NewService()) + if _, err := manager.Load(ctx, config.NewDefaultProvider()); err != nil { + tb.Fatalf("failed to load test configuration: %v", err) + } + cleanup, ok := tb.(interface{ Cleanup(func()) }) + if !ok { + tb.Fatalf("testing object does not support Cleanup") + } + cleanup.Cleanup(func() { + _ = manager.Close(context.WithoutCancel(ctx)) + }) + return config.ContextWithManager(ctx, manager) +} + +// NewBenchmarkContext returns a context derived from b.Context() with logger and configuration attached. +func NewBenchmarkContext(b *testing.B) context.Context { + b.Helper() + return NewTestContext(b) +} + +func contextFromTB(tb testing.TB) context.Context { + provider, ok := tb.(interface{ Context() context.Context }) + if !ok { + tb.Fatalf("testing object does not expose Context()") + } + return provider.Context() +} diff --git a/sdk/internal/testutil/doc.go b/sdk/internal/testutil/doc.go new file mode 100644 index 00000000..820abe2a --- /dev/null +++ b/sdk/internal/testutil/doc.go @@ -0,0 +1,3 @@ +// Package testutil centralizes reusable helpers for SDK unit tests, including +// context construction, validation assertions, fixtures, and table-driven test utilities. +package testutil diff --git a/sdk/internal/testutil/fixtures.go b/sdk/internal/testutil/fixtures.go new file mode 100644 index 00000000..cf576e74 --- /dev/null +++ b/sdk/internal/testutil/fixtures.go @@ -0,0 +1,80 @@ +package testutil + +import ( + "fmt" + "strings" + + "github.com/compozy/compozy/engine/agent" + "github.com/compozy/compozy/engine/core" + "github.com/compozy/compozy/engine/task" + "github.com/compozy/compozy/engine/workflow" +) + +// NewTestModel returns a minimal provider configuration suitable for use in tests. +func NewTestModel(provider, model string) *core.ProviderConfig { + trimmedProvider := strings.TrimSpace(provider) + if trimmedProvider == "" { + trimmedProvider = string(core.ProviderOpenAI) + } + trimmedModel := strings.TrimSpace(model) + if trimmedModel == "" { + trimmedModel = "gpt-4o-mini" + } + return &core.ProviderConfig{ + Provider: core.ProviderName(strings.ToLower(trimmedProvider)), + Model: trimmedModel, + APIKey: "{{ .env.TEST_API_KEY }}", + } +} + +// NewTestAgent creates an agent configuration with default instructions and inline model config. +func NewTestAgent(id string) *agent.Config { + trimmedID := strings.TrimSpace(id) + if trimmedID == "" { + trimmedID = "test-agent" + } + model := NewTestModel(string(core.ProviderOpenAI), "gpt-4o-mini") + cfg := &agent.Config{ + ID: trimmedID, + Instructions: "You are a reliable assistant used for automated tests.", + Model: agent.Model{ + Config: *model, + }, + } + if err := cfg.SetCWD(repoRoot); err != nil { + panic(fmt.Errorf("failed to set agent cwd: %w", err)) + } + return cfg +} + +// NewTestWorkflow constructs a workflow configuration with a single basic task referencing a test agent. +func NewTestWorkflow(id string) *workflow.Config { + trimmedID := strings.TrimSpace(id) + if trimmedID == "" { + trimmedID = "test-workflow" + } + agentCfg := NewTestAgent(trimmedID + "-agent") + withInput := make(core.Input) + taskCfg := task.Config{ + BaseConfig: task.BaseConfig{ + ID: trimmedID + "-task", + Type: task.TaskTypeBasic, + Agent: agentCfg, + With: &withInput, + }, + BasicTask: task.BasicTask{Prompt: "Summarize the workflow input."}, + } + if cwd, err := core.CWDFromPath(repoRoot); err == nil { + taskCfg.CWD = cwd + } + wf := &workflow.Config{ + ID: trimmedID, + Version: "1.0.0", + Agents: []agent.Config{*agentCfg}, + Tasks: []task.Config{taskCfg}, + } + if cwd, err := core.CWDFromPath(repoRoot); err == nil { + wf.CWD = cwd + } + return wf +} diff --git a/sdk/internal/testutil/table.go b/sdk/internal/testutil/table.go new file mode 100644 index 00000000..0d67a2e1 --- /dev/null +++ b/sdk/internal/testutil/table.go @@ -0,0 +1,42 @@ +package testutil + +import ( + "context" + "strings" + "testing" +) + +// TableTest describes a table-driven builder test case executed via RunTableTests. +type TableTest struct { + Name string + BuildFunc func(context.Context) (any, error) + WantErr bool + ErrContains string + Validate func(*testing.T, any) +} + +// RunTableTests executes each table-driven test with a consistent test context. +func RunTableTests(t *testing.T, tests []TableTest) { + t.Helper() + for _, tc := range tests { + tc := tc + t.Run(tc.Name, func(t *testing.T) { + t.Helper() + ctx := NewTestContext(t) + result, err := tc.BuildFunc(ctx) + if tc.WantErr { + if err == nil { + t.Fatalf("expected error") + } + if tc.ErrContains != "" && !strings.Contains(err.Error(), tc.ErrContains) { + t.Fatalf("expected error containing %q, got %v", tc.ErrContains, err) + } + return + } + RequireNoError(t, err) + if tc.Validate != nil { + tc.Validate(t, result) + } + }) + } +} diff --git a/sdk/internal/testutil/testdata.go b/sdk/internal/testutil/testdata.go new file mode 100644 index 00000000..fac2bf3a --- /dev/null +++ b/sdk/internal/testutil/testdata.go @@ -0,0 +1,46 @@ +package testutil + +import ( + "os" + "path/filepath" + "runtime" + "testing" +) + +var repoRoot = detectRepoRoot() + +func detectRepoRoot() string { + _, file, _, ok := runtime.Caller(0) + if !ok { + panic("testutil: failed to detect repository root via runtime.Caller") + } + dir := filepath.Dir(file) + for { + if _, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil { + return filepath.Clean(dir) + } + parent := filepath.Dir(dir) + if parent == dir { + panic("testutil: repository root not found") + } + dir = parent + } +} + +// TestDataPath returns an absolute path rooted at sdk/testdata for the provided segments. +func TestDataPath(t *testing.T, segments ...string) string { + t.Helper() + parts := append([]string{repoRoot, "sdk", "testdata"}, segments...) + return filepath.Join(parts...) +} + +// ReadTestData reads a file from sdk/testdata using the provided path segments and fails the test on error. +func ReadTestData(t *testing.T, segments ...string) []byte { + t.Helper() + path := TestDataPath(t, segments...) + data, err := os.ReadFile(path) + if err != nil { + t.Fatalf("failed to read testdata %s: %v", path, err) + } + return data +} diff --git a/sdk/internal/testutil/testutil_test.go b/sdk/internal/testutil/testutil_test.go new file mode 100644 index 00000000..3220e86f --- /dev/null +++ b/sdk/internal/testutil/testutil_test.go @@ -0,0 +1,164 @@ +package testutil + +import ( + "context" + "errors" + "fmt" + "strings" + "testing" + + enginecore "github.com/compozy/compozy/engine/core" + "github.com/compozy/compozy/pkg/config" + "github.com/compozy/compozy/pkg/logger" + sdkerrors "github.com/compozy/compozy/sdk/v2/internal/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewTestContextProvidesLoggerAndConfig(t *testing.T) { + t.Parallel() + t.Run("Should provide logger and config", func(t *testing.T) { + t.Parallel() + ctx := NewTestContext(t) + require.NotNil(t, ctx.Done(), "expected context with cancellation support") + require.NotNil(t, logger.FromContext(ctx), "expected logger in context") + require.NotNil(t, config.FromContext(ctx), "expected configuration in context") + }) +} + +func TestRequireNoError(t *testing.T) { + t.Run("Should succeed when error is nil", func(t *testing.T) { + RequireNoError(t, nil) + }) + t.Run("Should report failure when error is present", func(t *testing.T) { + prev := reportFailure + called := false + var message string + reportFailure = func(_ *testing.T, format string, args ...any) { + called = true + message = fmt.Sprintf(format, args...) + } + t.Cleanup(func() { + reportFailure = prev + }) + RequireNoError(t, fmt.Errorf("boom")) + require.True(t, called, "expected failure handler to be invoked") + assert.Contains(t, message, "unexpected error") + }) +} + +func TestRequireValidationError(t *testing.T) { + t.Run("Should verify build error contains target", func(t *testing.T) { + inner := fmt.Errorf("invalid value for field") + be := &sdkerrors.BuildError{Errors: []error{inner}} + RequireValidationError(t, be, "field") + }) + t.Run("Should report failure when validation error missing", func(t *testing.T) { + prev := reportFailure + called := false + var message string + reportFailure = func(_ *testing.T, format string, args ...any) { + called = true + message = fmt.Sprintf(format, args...) + } + t.Cleanup(func() { + reportFailure = prev + }) + RequireValidationError(t, nil, "") + require.True(t, called, "expected validation failure handler to run") + assert.Contains(t, message, "expected validation error") + }) +} + +func TestAssertBuildError(t *testing.T) { + t.Parallel() + t.Run("Should assert expected substrings", func(t *testing.T) { + t.Parallel() + be := &sdkerrors.BuildError{Errors: []error{fmt.Errorf("missing id"), fmt.Errorf("invalid name")}} + AssertBuildError(t, be, []string{"missing", "invalid"}) + }) +} + +func TestNewTestModelDefaults(t *testing.T) { + t.Parallel() + t.Run("Should set defaults for provider and model", func(t *testing.T) { + t.Parallel() + model := NewTestModel("", "") + require.Equal(t, enginecore.ProviderOpenAI, model.Provider) + require.NotEmpty(t, model.Model) + assert.Contains(t, model.APIKey, "TEST_API_KEY") + }) +} + +func TestNewTestAgent(t *testing.T) { + t.Parallel() + t.Run("Should build agent with defaults", func(t *testing.T) { + t.Parallel() + agent := NewTestAgent("example-agent") + require.Equal(t, "example-agent", agent.ID) + require.NotEmpty(t, strings.TrimSpace(agent.Instructions)) + require.NotEmpty(t, agent.Model.Config.Model) + }) +} + +func TestNewTestWorkflow(t *testing.T) { + t.Parallel() + t.Run("Should create workflow with defaults", func(t *testing.T) { + t.Parallel() + wf := NewTestWorkflow("workflow") + require.Equal(t, "workflow", wf.ID) + require.Len(t, wf.Agents, 1) + require.Len(t, wf.Tasks, 1) + require.NotNil(t, wf.Tasks[0].Agent) + require.NotEmpty(t, wf.Tasks[0].Agent.ID) + ctx := NewTestContext(t) + require.NoError(t, wf.Validate(ctx)) + assert.Equal(t, wf.Agents[0].ID, wf.Tasks[0].Agent.ID) + }) +} + +func TestRunTableTests(t *testing.T) { + t.Parallel() + t.Run("Should execute builders and validators", func(t *testing.T) { + t.Parallel() + executions := make([]string, 0, 2) + table := []TableTest{ + { + Name: "ok", + BuildFunc: func(ctx context.Context) (any, error) { + if logger.FromContext(ctx) == nil { + return nil, errors.New("logger missing") + } + executions = append(executions, "ok") + return "value", nil + }, + Validate: func(t *testing.T, v any) { + t.Helper() + require.IsType(t, "", v) + assert.Equal(t, "value", v) + }, + }, + { + Name: "err", + WantErr: true, + ErrContains: "boom", + BuildFunc: func(_ context.Context) (any, error) { + executions = append(executions, "err") + return nil, fmt.Errorf("boom failure") + }, + }, + } + RunTableTests(t, table) + require.Len(t, executions, 2) + }) +} + +func TestAssertConfigEqual(t *testing.T) { + t.Parallel() + t.Run("Should compare maps without diff", func(t *testing.T) { + t.Parallel() + want := map[string]any{"k": "v"} + got := map[string]any{"k": "v"} + AssertConfigEqual(t, want, got) + }) +} diff --git a/sdk/internal/testutil/validation.go b/sdk/internal/testutil/validation.go new file mode 100644 index 00000000..651c15aa --- /dev/null +++ b/sdk/internal/testutil/validation.go @@ -0,0 +1,74 @@ +package testutil + +import ( + "errors" + "fmt" + "strings" + "testing" + + sdkerrors "github.com/compozy/compozy/sdk/v2/internal/errors" +) + +var reportFailure = func(t *testing.T, format string, args ...any) { + t.Fatalf(format, args...) +} + +// RequireNoError fails the test if err is non-nil. Optional msgAndArgs provide context in the failure message. +func RequireNoError(t *testing.T, err error, msgAndArgs ...any) { + t.Helper() + if err == nil { + return + } + if len(msgAndArgs) > 0 { + reportFailure(t, "%s: %v", fmt.Sprint(msgAndArgs...), err) + return + } + reportFailure(t, "unexpected error: %v", err) +} + +// RequireValidationError fails when err is nil or does not wrap a BuildError. It optionally enforces that the +// rendered error message contains the provided substring. +func RequireValidationError(t *testing.T, err error, contains string) { + t.Helper() + if err == nil { + reportFailure(t, "expected validation error") + return + } + var buildErr *sdkerrors.BuildError + if !errors.As(err, &buildErr) { + reportFailure(t, "expected build error, got %T", err) + return + } + if contains != "" && !strings.Contains(err.Error(), contains) { + reportFailure(t, "expected error containing %q, got %v", contains, err) + } +} + +// AssertBuildError verifies that err is a BuildError containing messages for each expected entry. +func AssertBuildError(t *testing.T, err error, expectedErrors []string) { + t.Helper() + if err == nil { + reportFailure(t, "expected build error") + return + } + var buildErr *sdkerrors.BuildError + if !errors.As(err, &buildErr) { + reportFailure(t, "expected build error, got %T", err) + return + } + for _, want := range expectedErrors { + matched := false + for _, inner := range buildErr.Errors { + if inner == nil { + continue + } + if strings.Contains(inner.Error(), want) { + matched = true + break + } + } + if !matched { + reportFailure(t, "expected build error containing %q, got %v", want, err) + } + } +} diff --git a/sdk/internal/validate/doc.go b/sdk/internal/validate/doc.go new file mode 100644 index 00000000..815c70a5 --- /dev/null +++ b/sdk/internal/validate/doc.go @@ -0,0 +1,3 @@ +// Package validate contains shared validation utilities used by SDK builders to +// keep configuration checks consistent with the engine. +package validate diff --git a/sdk/internal/validate/validate.go b/sdk/internal/validate/validate.go new file mode 100644 index 00000000..808153d7 --- /dev/null +++ b/sdk/internal/validate/validate.go @@ -0,0 +1,176 @@ +// Package validate provides helper functions for validating SDK builder inputs. +package validate + +import ( + "context" + "fmt" + "net/url" + "reflect" + "regexp" + "strings" + "time" + + "github.com/robfig/cron/v3" +) + +var idPattern = regexp.MustCompile(`^[A-Za-z0-9-]+$`) + +func ensureContext(ctx context.Context) error { + if ctx == nil { + return fmt.Errorf("context is required") + } + return nil +} + +func ensureFieldName(name string) error { + if strings.TrimSpace(name) == "" { + return fmt.Errorf("field name is required") + } + return nil +} + +func isEmptyCollection(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice: + return v.Len() == 0 + } + return false +} + +func isStringValue(v reflect.Value) (string, bool) { + if v.Kind() == reflect.String { + return v.String(), true + } + return "", false +} + +// Required checks that a required field is present and not empty. +func Required(ctx context.Context, name string, value any) error { + if err := ensureContext(ctx); err != nil { + return err + } + if err := ensureFieldName(name); err != nil { + return err + } + if value == nil { + return fmt.Errorf("%s is required", name) + } + + rv := reflect.ValueOf(value) + for rv.Kind() == reflect.Interface || rv.Kind() == reflect.Ptr { + if rv.IsNil() { + return fmt.Errorf("%s is required", name) + } + rv = rv.Elem() + } + + if !rv.IsValid() { + return fmt.Errorf("%s is required", name) + } + + if isEmptyCollection(rv) { + return fmt.Errorf("%s cannot be empty", name) + } + + if str, ok := isStringValue(rv); ok { + if strings.TrimSpace(str) == "" { + return fmt.Errorf("%s cannot be empty", name) + } + } + + return nil +} + +// ID ensures that an identifier contains only alphanumeric characters and hyphens. +func ID(ctx context.Context, id string) error { + if err := ensureContext(ctx); err != nil { + return err + } + trimmed := strings.TrimSpace(id) + if trimmed == "" { + return fmt.Errorf("id is required") + } + if !idPattern.MatchString(trimmed) { + return fmt.Errorf("id must contain only letters, numbers, or hyphens") + } + return nil +} + +// NonEmpty ensures a string field is not empty or whitespace. +func NonEmpty(ctx context.Context, name, value string) error { + if err := ensureContext(ctx); err != nil { + return err + } + if err := ensureFieldName(name); err != nil { + return err + } + if strings.TrimSpace(value) == "" { + return fmt.Errorf("%s cannot be empty", name) + } + return nil +} + +// URL ensures the provided string is a well-formed URL with scheme and host. +func URL(ctx context.Context, rawURL string) error { + if err := ensureContext(ctx); err != nil { + return err + } + value := strings.TrimSpace(rawURL) + if value == "" { + return fmt.Errorf("url is required") + } + parsed, err := url.Parse(value) + if err != nil { + return fmt.Errorf("url must be valid: %w", err) + } + if parsed.Scheme == "" { + return fmt.Errorf("url must include a scheme such as http or https") + } + if parsed.Host == "" { + return fmt.Errorf("url must include a host") + } + return nil +} + +// Duration ensures a duration is strictly positive. +func Duration(ctx context.Context, d time.Duration) error { + if err := ensureContext(ctx); err != nil { + return err + } + if d <= 0 { + return fmt.Errorf("duration must be positive: got %s", d) + } + return nil +} + +// Range ensures an integer value lies within the inclusive range. +func Range(ctx context.Context, name string, val, minVal, maxVal int) error { + if err := ensureContext(ctx); err != nil { + return err + } + if err := ensureFieldName(name); err != nil { + return err + } + if minVal > maxVal { + return fmt.Errorf("%s range is invalid: min %d is greater than max %d", name, minVal, maxVal) + } + if val < minVal || val > maxVal { + return fmt.Errorf("%s must be between %d and %d inclusive: got %d", name, minVal, maxVal, val) + } + return nil +} + +// Cron ensures the provided cron expression is valid according to cron/v3 standard parsing rules. +func Cron(ctx context.Context, expr string) error { + if err := ensureContext(ctx); err != nil { + return err + } + trimmed := strings.TrimSpace(expr) + if trimmed == "" { + return fmt.Errorf("cron expression is required") + } + if _, err := cron.ParseStandard(trimmed); err != nil { + return fmt.Errorf("cron expression is invalid: %w", err) + } + return nil +} diff --git a/sdk/internal/validate/validate_test.go b/sdk/internal/validate/validate_test.go new file mode 100644 index 00000000..4a27deca --- /dev/null +++ b/sdk/internal/validate/validate_test.go @@ -0,0 +1,125 @@ +package validate + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestValidateRequired(t *testing.T) { + t.Run("Should return error when value is nil", func(t *testing.T) { + err := Required(t.Context(), "name", nil) + require.Error(t, err) + assert.Contains(t, err.Error(), "name is required") + }) + t.Run("Should return error when string is empty", func(t *testing.T) { + err := Required(t.Context(), "title", " ") + require.Error(t, err) + assert.Contains(t, err.Error(), "title cannot be empty") + }) + t.Run("Should return error when slice is empty", func(t *testing.T) { + values := []string{} + err := Required(t.Context(), "items", values) + require.Error(t, err) + assert.Contains(t, err.Error(), "items cannot be empty") + }) + t.Run("Should return error when pointer dereferences to empty value", func(t *testing.T) { + value := " " + err := Required(t.Context(), "pointer", &value) + require.Error(t, err) + assert.Contains(t, err.Error(), "pointer cannot be empty") + }) + t.Run("Should succeed for valid value", func(t *testing.T) { + err := Required(t.Context(), "description", "value") + require.NoError(t, err) + }) +} + +func TestValidateID(t *testing.T) { + t.Run("Should return error when ID is empty", func(t *testing.T) { + err := ID(t.Context(), "") + require.Error(t, err) + assert.Contains(t, err.Error(), "id is required") + }) + t.Run("Should return error when ID contains invalid characters", func(t *testing.T) { + err := ID(t.Context(), "invalid_id") + require.Error(t, err) + assert.Contains(t, err.Error(), "letters, numbers, or hyphens") + }) + t.Run("Should succeed for valid ID", func(t *testing.T) { + err := ID(t.Context(), "abc-123") + require.NoError(t, err) + }) + t.Run("Should return error when context is nil", func(t *testing.T) { + var missingCtx context.Context + err := ID(missingCtx, "abc-123") + require.Error(t, err) + assert.Contains(t, err.Error(), "context is required") + }) +} + +func TestValidateNonEmpty(t *testing.T) { + t.Run("Should return error when value is empty", func(t *testing.T) { + err := NonEmpty(t.Context(), "name", "\t") + require.Error(t, err) + assert.Contains(t, err.Error(), "name cannot be empty") + }) + t.Run("Should succeed for non empty value", func(t *testing.T) { + err := NonEmpty(t.Context(), "name", "value") + require.NoError(t, err) + }) +} + +func TestValidateURL(t *testing.T) { + t.Run("Should return error when URL is empty", func(t *testing.T) { + err := URL(t.Context(), "") + require.Error(t, err) + assert.Contains(t, err.Error(), "url is required") + }) + t.Run("Should return error when scheme is missing", func(t *testing.T) { + err := URL(t.Context(), "example.com/path") + require.Error(t, err) + assert.Contains(t, err.Error(), "must include a scheme") + }) + t.Run("Should return error when host is missing", func(t *testing.T) { + err := URL(t.Context(), "mailto:user@example.com") + require.Error(t, err) + assert.Contains(t, err.Error(), "must include a host") + }) + t.Run("Should succeed for valid URL", func(t *testing.T) { + err := URL(t.Context(), "https://example.com/path") + require.NoError(t, err) + }) +} + +func TestValidateDuration(t *testing.T) { + t.Run("Should return error when duration is non positive", func(t *testing.T) { + err := Duration(t.Context(), 0) + require.Error(t, err) + assert.Contains(t, err.Error(), "must be positive") + }) + t.Run("Should succeed for positive duration", func(t *testing.T) { + err := Duration(t.Context(), time.Second) + require.NoError(t, err) + }) +} + +func TestValidateRange(t *testing.T) { + t.Run("Should return error when bounds are invalid", func(t *testing.T) { + err := Range(t.Context(), "score", 5, 10, 1) + require.Error(t, err) + assert.Contains(t, err.Error(), "range is invalid") + }) + t.Run("Should return error when value is outside bounds", func(t *testing.T) { + err := Range(t.Context(), "score", 11, 1, 10) + require.Error(t, err) + assert.Contains(t, err.Error(), "must be between 1 and 10") + }) + t.Run("Should succeed when value is within range", func(t *testing.T) { + err := Range(t.Context(), "score", 5, 1, 10) + require.NoError(t, err) + }) +} diff --git a/sdk/knowledge/README.md b/sdk/knowledge/README.md new file mode 100644 index 00000000..27092eff --- /dev/null +++ b/sdk/knowledge/README.md @@ -0,0 +1,226 @@ +# SDK Knowledge Package + +Functional options implementation for knowledge base configuration in Compozy SDK v2. + +## Overview + +This package provides a type-safe, functional options pattern for configuring knowledge bases, embedders, vector databases, and knowledge bindings. + +## Constructors + +### NewBase + +Creates a knowledge base configuration with functional options. + +```go +cfg, err := knowledge.NewBase( + ctx, + "my-knowledge-base", + knowledge.WithEmbedder("my-embedder"), + knowledge.WithVectorDB("my-vectordb"), + knowledge.WithSources([]engineknowledge.SourceConfig{ + {Type: "file", Path: "/path/to/docs"}, + }), +) +``` + +**Required options:** + +- `WithEmbedder(id)` - Embedder configuration ID +- `WithVectorDB(id)` - Vector database configuration ID +- `WithSources(sources)` - At least one source must be provided + +**Optional options:** + +- `WithDescription(desc)` - Knowledge base description +- `WithIngest(mode)` - Ingest mode (manual or on_start) +- `WithChunking(config)` - Chunking configuration +- `WithPreprocess(config)` - Preprocessing configuration +- `WithRetrieval(config)` - Retrieval configuration +- `WithMetadata(config)` - Metadata configuration + +### NewBinding + +Creates a knowledge binding for attaching knowledge bases to agents. + +```go +binding, err := knowledge.NewBinding( + ctx, + "my-knowledge-base", + knowledge.WithBindingTopK(&topK), + knowledge.WithBindingMinScore(&minScore), +) +``` + +**Optional options:** + +- `WithBindingTopK(*int)` - Override retrieval top-k +- `WithBindingMinScore(*float64)` - Override minimum score threshold +- `WithBindingMaxTokens(*int)` - Override maximum tokens +- `WithBindingInjectAs(string)` - How to inject retrieved context +- `WithBindingFallback(string)` - Fallback message when no results +- `WithBindingFilters(map[string]string)` - Metadata filters + +### NewEmbedder + +Creates an embedder configuration for converting text to vectors. + +```go +cfg, err := knowledge.NewEmbedder( + ctx, + "my-embedder", + "openai", + "text-embedding-ada-002", + knowledge.WithDimension(1536), + knowledge.WithAPIKey(os.Getenv("OPENAI_API_KEY")), +) +``` + +**Supported providers:** + +- `openai` - OpenAI embeddings +- `google` - Google embeddings +- `azure` - Azure OpenAI embeddings +- `cohere` - Cohere embeddings +- `ollama` - Ollama embeddings + +**Required options:** + +- `WithDimension(int)` - Vector dimension + +**Optional options:** + +- `WithAPIKey(string)` - API key for provider +- `WithBatchSize(int)` - Batch size for embedding operations +- `WithMaxConcurrentWorkers(int)` - Maximum concurrent workers + +### NewVectorDB + +Creates a vector database configuration for storing embeddings. + +```go +cfg, err := knowledge.NewVectorDB( + ctx, + "my-vectordb", + "pgvector", + knowledge.WithDSN("postgres://localhost/mydb"), + knowledge.WithVectorDBDimension(1536), +) +``` + +**Supported types:** + +- `pgvector` - PostgreSQL with pgvector extension +- `qdrant` - Qdrant vector database +- `redis` - Redis with vector search +- `filesystem` - Local filesystem storage + +**Type-specific requirements:** + +**PGVector:** + +- `WithDSN(string)` - PostgreSQL connection string (required) +- `WithVectorDBDimension(int)` - Vector dimension (required) +- `WithPGVector(*PGVectorConfig)` - Advanced PGVector configuration (optional) + +**Qdrant:** + +- `WithDSN(string)` - Qdrant URL (required) +- `WithCollection(string)` - Collection name (required) +- `WithVectorDBDimension(int)` - Vector dimension (required) + +**Redis:** + +- `WithDSN(string)` - Redis connection string (optional) +- `WithVectorDBDimension(int)` - Vector dimension (required) + +**Filesystem:** + +- `WithPath(string)` - Storage path (optional) +- `WithVectorDBDimension(int)` - Vector dimension (required) + +## Design Notes + +### Multi-Type Package Structure + +Unlike other SDK packages that have a single config type, the knowledge package manages **4 different config types**: + +1. `BaseConfig` - Knowledge base configuration +2. `KnowledgeBinding` - Agent binding configuration +3. `EmbedderConfig` - Text embedding configuration +4. `VectorDBConfig` - Vector storage configuration + +To avoid type collisions when all types live in the same package, we use: + +- Distinct option type names: `BaseOption`, `BindingOption`, `EmbedderOption`, `VectorDBOption` +- Prefixed function names where needed: `WithBindingTopK`, `WithVectorDBDimension` +- Manual option files (not code-generated) for better control + +### Validation Strategy + +All constructors follow a comprehensive validation pattern: + +1. Context validation (nil check) +2. ID validation (empty check + format) +3. Apply functional options +4. Type-specific validation (provider, db type, etc.) +5. Apply defaults from global config +6. Range validation (chunk size, top-k, scores, etc.) +7. Cross-field validation (overlap < size, etc.) +8. Deep copy before return (immutability) + +Errors are collected and returned as a `BuildError` containing all validation failures, not fail-fast. + +### Testing Coverage + +All constructors have comprehensive test coverage including: + +- Minimal configuration (happy path) +- Input normalization (trim, lowercase) +- Nil context validation +- Empty/invalid ID validation +- Required field validation +- Type-specific validation (provider, db type) +- Range validation (dimensions, top-k, scores) +- Default value application + +## Migration from Builder Pattern + +The knowledge package was migrated from the old builder pattern to functional options: + +**Old pattern (builder):** + +```go +kb := knowledge.NewBuilder("my-kb"). + SetEmbedder("embedder-id"). + SetVectorDB("vectordb-id"). + AddSource(source). + Build(ctx) +``` + +**New pattern (functional options):** + +```go +kb, err := knowledge.NewBase( + ctx, + "my-kb", + knowledge.WithEmbedder("embedder-id"), + knowledge.WithVectorDB("vectordb-id"), + knowledge.WithSources([]Source{source}), +) +``` + +Key improvements: + +- Context-first API (required for all constructors) +- Immutable configs (deep copy on return) +- Better error aggregation (all errors reported at once) +- Type safety (compile-time option validation) +- Consistent patterns across SDK + +## Related Packages + +- `engine/knowledge` - Core knowledge base engine types +- `engine/core` - Core types including `KnowledgeBinding` +- `sdk/v2/internal/errors` - Build error aggregation +- `sdk/v2/internal/validate` - Validation utilities diff --git a/sdk/knowledge/base_options.go b/sdk/knowledge/base_options.go new file mode 100644 index 00000000..10b4d7ac --- /dev/null +++ b/sdk/knowledge/base_options.go @@ -0,0 +1,71 @@ +package knowledge + +import engineknowledge "github.com/compozy/compozy/engine/knowledge" + +// BaseOption is a functional option for configuring BaseConfig +type BaseOption func(*engineknowledge.BaseConfig) + +// WithDescription sets the Description field +func WithDescription(description string) BaseOption { + return func(cfg *engineknowledge.BaseConfig) { + cfg.Description = description + } +} + +// WithEmbedder sets the Embedder field +func WithEmbedder(embedder string) BaseOption { + return func(cfg *engineknowledge.BaseConfig) { + cfg.Embedder = embedder + } +} + +// WithVectorDB sets the VectorDB field +func WithVectorDB(vectorDB string) BaseOption { + return func(cfg *engineknowledge.BaseConfig) { + cfg.VectorDB = vectorDB + } +} + +// WithIngest sets the Ingest field +func WithIngest(ingest engineknowledge.IngestMode) BaseOption { + return func(cfg *engineknowledge.BaseConfig) { + cfg.Ingest = ingest + } +} + +// WithSources sets the Sources field +func WithSources(sources []engineknowledge.SourceConfig) BaseOption { + return func(cfg *engineknowledge.BaseConfig) { + cfg.Sources = sources + } +} + +// WithChunking sets the Chunking field +func WithChunking(chunking engineknowledge.ChunkingConfig) BaseOption { + return func(cfg *engineknowledge.BaseConfig) { + cfg.Chunking = chunking + } +} + +// WithPreprocess sets the Preprocess field +func WithPreprocess(preprocess engineknowledge.PreprocessConfig) BaseOption { + return func(cfg *engineknowledge.BaseConfig) { + cfg.Preprocess = preprocess + } +} + +// WithRetrieval sets the Retrieval field +func WithRetrieval(retrieval *engineknowledge.RetrievalConfig) BaseOption { + return func(cfg *engineknowledge.BaseConfig) { + if retrieval != nil { + cfg.Retrieval = *retrieval + } + } +} + +// WithMetadata sets the Metadata field +func WithMetadata(metadata engineknowledge.MetadataConfig) BaseOption { + return func(cfg *engineknowledge.BaseConfig) { + cfg.Metadata = metadata + } +} diff --git a/sdk/knowledge/binding_options.go b/sdk/knowledge/binding_options.go new file mode 100644 index 00000000..dca013dc --- /dev/null +++ b/sdk/knowledge/binding_options.go @@ -0,0 +1,48 @@ +package knowledge + +import "github.com/compozy/compozy/engine/core" + +// BindingOption is a functional option for configuring KnowledgeBinding +type BindingOption func(*core.KnowledgeBinding) + +// WithBindingTopK sets the TopK field for knowledge binding +func WithBindingTopK(topK *int) BindingOption { + return func(cfg *core.KnowledgeBinding) { + cfg.TopK = topK + } +} + +// WithBindingMinScore sets the MinScore field for knowledge binding +func WithBindingMinScore(minScore *float64) BindingOption { + return func(cfg *core.KnowledgeBinding) { + cfg.MinScore = minScore + } +} + +// WithBindingMaxTokens sets the MaxTokens field for knowledge binding +func WithBindingMaxTokens(maxTokens *int) BindingOption { + return func(cfg *core.KnowledgeBinding) { + cfg.MaxTokens = maxTokens + } +} + +// WithBindingInjectAs sets the InjectAs field for knowledge binding +func WithBindingInjectAs(injectAs string) BindingOption { + return func(cfg *core.KnowledgeBinding) { + cfg.InjectAs = injectAs + } +} + +// WithBindingFallback sets the Fallback field for knowledge binding +func WithBindingFallback(fallback string) BindingOption { + return func(cfg *core.KnowledgeBinding) { + cfg.Fallback = fallback + } +} + +// WithBindingFilters sets the Filters field for knowledge binding +func WithBindingFilters(filters map[string]string) BindingOption { + return func(cfg *core.KnowledgeBinding) { + cfg.Filters = filters + } +} diff --git a/sdk/knowledge/constructors.go b/sdk/knowledge/constructors.go new file mode 100644 index 00000000..a8cc0af9 --- /dev/null +++ b/sdk/knowledge/constructors.go @@ -0,0 +1,466 @@ +package knowledge + +import ( + "context" + "fmt" + "strings" + + "github.com/compozy/compozy/engine/core" + engineknowledge "github.com/compozy/compozy/engine/knowledge" + appconfig "github.com/compozy/compozy/pkg/config" + "github.com/compozy/compozy/pkg/logger" + sdkerrors "github.com/compozy/compozy/sdk/v2/internal/errors" + "github.com/compozy/compozy/sdk/v2/internal/validate" +) + +//nolint:gocyclo,funlen // Complex validation logic for multiple config types +func NewBase(ctx context.Context, id string, opts ...BaseOption) (*engineknowledge.BaseConfig, error) { + if ctx == nil { + return nil, fmt.Errorf("context is required") + } + log := logger.FromContext(ctx) + cfg := appconfig.FromContext(ctx) + defaults := engineknowledge.DefaultsFromConfig(cfg) + + config := &engineknowledge.BaseConfig{ + ID: strings.TrimSpace(id), + Sources: make([]engineknowledge.SourceConfig, 0), + } + + for _, opt := range opts { + opt(config) + } + + log.Debug("creating knowledge base configuration", "knowledge_base", config.ID, "sources", len(config.Sources)) + + collected := make([]error, 0, 12) + + // Validate ID + config.ID = strings.TrimSpace(config.ID) + if err := validate.ID(ctx, config.ID); err != nil { + collected = append(collected, fmt.Errorf("knowledge_base id is invalid: %w", err)) + } + + // Validate embedder + config.Embedder = strings.TrimSpace(config.Embedder) + if err := validate.NonEmpty(ctx, "embedder", config.Embedder); err != nil { + collected = append(collected, fmt.Errorf("embedder id is required: %w", err)) + } else if err := validate.ID(ctx, config.Embedder); err != nil { + collected = append(collected, fmt.Errorf("embedder id is invalid: %w", err)) + } + + // Validate vector DB + config.VectorDB = strings.TrimSpace(config.VectorDB) + if err := validate.NonEmpty(ctx, "vector_db", config.VectorDB); err != nil { + collected = append(collected, fmt.Errorf("vector_db id is required: %w", err)) + } else if err := validate.ID(ctx, config.VectorDB); err != nil { + collected = append(collected, fmt.Errorf("vector_db id is invalid: %w", err)) + } + + // Apply defaults and normalize + if config.Ingest == "" { + config.Ingest = engineknowledge.IngestManual + } + config.Ingest = engineknowledge.IngestMode(strings.ToLower(strings.TrimSpace(string(config.Ingest)))) + + // Validate ingest mode + if config.Ingest != engineknowledge.IngestManual && config.Ingest != engineknowledge.IngestOnStart { + collected = append(collected, fmt.Errorf("ingest mode %q is not supported", config.Ingest)) + } + + // Validate sources + if len(config.Sources) == 0 { + collected = append(collected, fmt.Errorf("at least one source must be added")) + } + + // Normalize and validate chunking + if config.Chunking.Strategy == "" { + config.Chunking.Strategy = engineknowledge.ChunkStrategyRecursiveTextSplitter + } + if config.Chunking.Size == 0 { + config.Chunking.Size = defaults.ChunkSize + } + if config.Chunking.Overlap == nil { + overlap := defaults.ChunkOverlap + config.Chunking.Overlap = &overlap + } + + if err := validate.Range( + ctx, "chunking.size", config.Chunking.Size, + engineknowledge.MinChunkSize, engineknowledge.MaxChunkSize, + ); err != nil { + collected = append(collected, err) + } + overlap := config.Chunking.OverlapValue() + maxOverlap := config.Chunking.Size - 1 + if maxOverlap < 0 { + maxOverlap = 0 + } + if err := validate.Range(ctx, "chunking.overlap", overlap, 0, maxOverlap); err != nil { + collected = append(collected, err) + } + if config.Chunking.Size <= overlap { + collected = append( + collected, + fmt.Errorf( + "chunking.overlap must be less than chunking.size: overlap %d, size %d", + overlap, + config.Chunking.Size, + ), + ) + } + + // Normalize preprocess + if config.Preprocess.Deduplicate == nil { + val := true + config.Preprocess.Deduplicate = &val + } + + // Normalize and validate retrieval + if config.Retrieval.TopK <= 0 { + config.Retrieval.TopK = defaults.RetrievalTopK + } + if config.Retrieval.MinScore == nil { + score := defaults.RetrievalMinScore + config.Retrieval.MinScore = &score + } + if config.Retrieval.MaxTokens <= 0 { + config.Retrieval.MaxTokens = 1200 // default from old builder + } + + if config.Retrieval.TopK <= 0 || config.Retrieval.TopK > 50 { + collected = append( + collected, + fmt.Errorf("retrieval.top_k must be between 1 and 50: got %d", config.Retrieval.TopK), + ) + } + minScore := config.Retrieval.MinScoreValue() + if minScore < engineknowledge.MinScoreFloor || minScore > engineknowledge.MaxScoreCeiling { + collected = append( + collected, + fmt.Errorf( + "retrieval.min_score must be between %.2f and %.2f: got %.4f", + engineknowledge.MinScoreFloor, + engineknowledge.MaxScoreCeiling, + minScore, + ), + ) + } + if config.Retrieval.MaxTokens <= 0 { + collected = append( + collected, + fmt.Errorf("retrieval.max_tokens must be greater than zero: got %d", config.Retrieval.MaxTokens), + ) + } + + if len(collected) > 0 { + return nil, &sdkerrors.BuildError{Errors: collected} + } + + cloned, err := core.DeepCopy(config) + if err != nil { + return nil, fmt.Errorf("failed to clone knowledge base config: %w", err) + } + return cloned, nil +} + +// NewBinding creates a knowledge binding configuration using functional options +func NewBinding(ctx context.Context, id string, opts ...BindingOption) (*core.KnowledgeBinding, error) { + if ctx == nil { + return nil, fmt.Errorf("context is required") + } + log := logger.FromContext(ctx) + + config := &core.KnowledgeBinding{ + ID: strings.TrimSpace(id), + } + + for _, opt := range opts { + opt(config) + } + + log.Debug("creating knowledge binding", "knowledge_base", config.ID) + + collected := make([]error, 0, 5) + + // Validate ID + config.ID = strings.TrimSpace(config.ID) + if err := validate.ID(ctx, config.ID); err != nil { + collected = append(collected, fmt.Errorf("knowledge binding id is invalid: %w", err)) + } + + // Validate optional overrides + if config.TopK != nil && *config.TopK <= 0 { + collected = append(collected, fmt.Errorf("top_k override must be greater than zero: got %d", *config.TopK)) + } + if config.MinScore != nil { + score := *config.MinScore + if score < 0.0 || score > 1.0 { + collected = append( + collected, + fmt.Errorf("min_score override must be between 0.0 and 1.0 inclusive: got %.4f", score), + ) + } + } + if config.MaxTokens != nil && *config.MaxTokens <= 0 { + collected = append( + collected, + fmt.Errorf("max_tokens override must be greater than zero: got %d", *config.MaxTokens), + ) + } + + if len(collected) > 0 { + return nil, &sdkerrors.BuildError{Errors: collected} + } + + // Clone using the built-in Clone method + cloned := config.Clone() + return &cloned, nil +} + +//nolint:funlen // Comprehensive validation requires extended function +func NewEmbedder( + ctx context.Context, + id string, + provider string, + model string, + opts ...EmbedderOption, +) (*engineknowledge.EmbedderConfig, error) { + if ctx == nil { + return nil, fmt.Errorf("context is required") + } + log := logger.FromContext(ctx) + cfg := appconfig.FromContext(ctx) + defaults := engineknowledge.DefaultsFromConfig(cfg) + + config := &engineknowledge.EmbedderConfig{ + ID: strings.TrimSpace(id), + Provider: strings.ToLower(strings.TrimSpace(provider)), + Model: strings.TrimSpace(model), + Config: engineknowledge.EmbedderRuntimeConfig{}, + } + + for _, opt := range opts { + opt(config) + } + + log.Debug("creating embedder configuration", "embedder", config.ID, "provider", config.Provider) + + collected := make([]error, 0, 8) + + // Validate ID + config.ID = strings.TrimSpace(config.ID) + if err := validate.ID(ctx, config.ID); err != nil { + collected = append(collected, fmt.Errorf("embedder id is invalid: %w", err)) + } + + // Validate provider + config.Provider = strings.ToLower(strings.TrimSpace(config.Provider)) + if err := validate.NonEmpty(ctx, "provider", config.Provider); err != nil { + collected = append(collected, err) + } else { + supportedProviders := map[string]struct{}{ + "openai": {}, + "google": {}, + "azure": {}, + "cohere": {}, + "ollama": {}, + } + if _, ok := supportedProviders[config.Provider]; !ok { + collected = append( + collected, + fmt.Errorf( + "provider %q is not supported; must be one of openai, google, azure, cohere, ollama", + config.Provider, + ), + ) + } + } + + // Validate model + config.Model = strings.TrimSpace(config.Model) + if err := validate.NonEmpty(ctx, "model", config.Model); err != nil { + collected = append(collected, err) + } + + // Validate dimension + if config.Config.Dimension <= 0 { + collected = append( + collected, + fmt.Errorf("config.dimension must be greater than zero: got %d", config.Config.Dimension), + ) + } + + // Apply defaults + if config.Config.BatchSize <= 0 { + config.Config.BatchSize = defaults.EmbedderBatchSize + } + if config.Config.MaxConcurrentWorkers <= 0 { + config.Config.MaxConcurrentWorkers = 4 // default from old builder + } + + // Validate after applying defaults + if config.Config.BatchSize <= 0 { + collected = append( + collected, + fmt.Errorf("config.batch_size must be greater than zero: got %d", config.Config.BatchSize), + ) + } + if config.Config.MaxConcurrentWorkers <= 0 { + collected = append( + collected, + fmt.Errorf( + "config.max_concurrent_workers must be greater than zero: got %d", + config.Config.MaxConcurrentWorkers, + ), + ) + } + + config.APIKey = strings.TrimSpace(config.APIKey) + + if len(collected) > 0 { + return nil, &sdkerrors.BuildError{Errors: collected} + } + + cloned, err := core.DeepCopy(config) + if err != nil { + return nil, fmt.Errorf("failed to clone embedder config: %w", err) + } + return cloned, nil +} + +//nolint:gocyclo,funlen // Complex validation logic for multiple vector DB types +func NewVectorDB( + ctx context.Context, + id string, + dbType string, + opts ...VectorDBOption, +) (*engineknowledge.VectorDBConfig, error) { + if ctx == nil { + return nil, fmt.Errorf("context is required") + } + log := logger.FromContext(ctx) + + normalizedType := engineknowledge.VectorDBType(strings.ToLower(strings.TrimSpace(dbType))) + + config := &engineknowledge.VectorDBConfig{ + ID: strings.TrimSpace(id), + Type: normalizedType, + Config: engineknowledge.VectorDBConnConfig{ + PGVector: nil, + }, + } + + for _, opt := range opts { + opt(config) + } + + log.Debug("creating vector db configuration", "vector_db", config.ID, "type", config.Type) + + collected := make([]error, 0, 10) + + // Validate ID + config.ID = strings.TrimSpace(config.ID) + if err := validate.ID(ctx, config.ID); err != nil { + collected = append(collected, fmt.Errorf("vector_db id is invalid: %w", err)) + } + + // Validate type + config.Type = engineknowledge.VectorDBType(strings.ToLower(strings.TrimSpace(string(config.Type)))) + supportedTypes := map[engineknowledge.VectorDBType]struct{}{ + engineknowledge.VectorDBTypePGVector: {}, + engineknowledge.VectorDBTypeQdrant: {}, + engineknowledge.VectorDBTypeRedis: {}, + engineknowledge.VectorDBTypeFilesystem: {}, + } + if _, ok := supportedTypes[config.Type]; !ok { + collected = append( + collected, + fmt.Errorf( + "vector_db type %q is not supported; must be one of pgvector, qdrant, redis, filesystem", + config.Type, + ), + ) + } + + // Type-specific validation + switch config.Type { + case engineknowledge.VectorDBTypePGVector: + config.Config.DSN = strings.TrimSpace(config.Config.DSN) + if err := validate.NonEmpty(ctx, "dsn", config.Config.DSN); err != nil { + collected = append(collected, fmt.Errorf("pgvector requires config.dsn: %w", err)) + } + if config.Config.Dimension <= 0 { + collected = append( + collected, + fmt.Errorf("config.dimension must be greater than zero: got %d", config.Config.Dimension), + ) + } + // Validate PGVector-specific config + if pg := config.Config.PGVector; pg != nil { + if idx := pg.Index; idx != nil { + idx.Type = strings.ToLower(strings.TrimSpace(idx.Type)) + if idx.Type == "" { + collected = append(collected, fmt.Errorf("pgvector.index.type cannot be empty")) + } + if idx.Lists <= 0 { + collected = append(collected, fmt.Errorf("pgvector.index.lists must be greater than zero")) + } + } + if pool := pg.Pool; pool != nil { + if pool.MinConns < 0 { + collected = append(collected, fmt.Errorf("pgvector.pool.min_conns must be >= 0")) + } + if pool.MaxConns < 0 { + collected = append(collected, fmt.Errorf("pgvector.pool.max_conns must be >= 0")) + } + if pool.MaxConns > 0 && pool.MinConns > pool.MaxConns { + collected = append(collected, fmt.Errorf("pgvector.pool.min_conns cannot exceed max_conns")) + } + } + } + case engineknowledge.VectorDBTypeQdrant: + config.Config.DSN = strings.TrimSpace(config.Config.DSN) + if err := validate.NonEmpty(ctx, "dsn", config.Config.DSN); err != nil { + collected = append(collected, fmt.Errorf("qdrant requires config.dsn: %w", err)) + } else if err := validate.URL(ctx, config.Config.DSN); err != nil { + collected = append(collected, fmt.Errorf("qdrant config.dsn must be a valid url: %w", err)) + } + config.Config.Collection = strings.TrimSpace(config.Config.Collection) + if err := validate.NonEmpty(ctx, "collection", config.Config.Collection); err != nil { + collected = append(collected, fmt.Errorf("qdrant requires config.collection: %w", err)) + } + if config.Config.Dimension <= 0 { + collected = append( + collected, + fmt.Errorf("config.dimension must be greater than zero: got %d", config.Config.Dimension), + ) + } + case engineknowledge.VectorDBTypeRedis: + config.Config.DSN = strings.TrimSpace(config.Config.DSN) + // DSN is optional for Redis + if config.Config.Dimension <= 0 { + collected = append( + collected, + fmt.Errorf("config.dimension must be greater than zero: got %d", config.Config.Dimension), + ) + } + case engineknowledge.VectorDBTypeFilesystem: + if config.Config.Dimension <= 0 { + collected = append( + collected, + fmt.Errorf("config.dimension must be greater than zero: got %d", config.Config.Dimension), + ) + } + } + + if len(collected) > 0 { + return nil, &sdkerrors.BuildError{Errors: collected} + } + + cloned, err := core.DeepCopy(config) + if err != nil { + return nil, fmt.Errorf("failed to clone vector db config: %w", err) + } + return cloned, nil +} diff --git a/sdk/knowledge/constructors_test.go b/sdk/knowledge/constructors_test.go new file mode 100644 index 00000000..cf99acfc --- /dev/null +++ b/sdk/knowledge/constructors_test.go @@ -0,0 +1,363 @@ +package knowledge + +import ( + "context" + "testing" + + engineknowledge "github.com/compozy/compozy/engine/knowledge" + sdkerrors "github.com/compozy/compozy/sdk/v2/internal/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewBase(t *testing.T) { + t.Run("Should create knowledge base with minimal configuration", func(t *testing.T) { + ctx := t.Context() + sources := []engineknowledge.SourceConfig{{Type: "file", Path: "/tmp/test.txt"}} + cfg, err := NewBase( + ctx, + "test-kb", + WithEmbedder("test-embedder"), + WithVectorDB("test-vectordb"), + WithSources(sources), + ) + require.NoError(t, err) + require.NotNil(t, cfg) + assert.Equal(t, "test-kb", cfg.ID) + assert.Equal(t, "test-embedder", cfg.Embedder) + assert.Equal(t, "test-vectordb", cfg.VectorDB) + }) + t.Run("Should trim whitespace from ID", func(t *testing.T) { + ctx := t.Context() + sources := []engineknowledge.SourceConfig{{Type: "file", Path: "/tmp/test.txt"}} + cfg, err := NewBase( + ctx, + " test-kb ", + WithEmbedder("test-embedder"), + WithVectorDB("test-vectordb"), + WithSources(sources), + ) + require.NoError(t, err) + assert.Equal(t, "test-kb", cfg.ID) + }) + t.Run("Should fail when context is nil", func(t *testing.T) { + var nilCtx context.Context + _, err := NewBase(nilCtx, "test-kb") + require.Error(t, err, "expected error for nil context") + assert.Equal(t, "context is required", err.Error()) + }) + t.Run("Should fail when ID is empty", func(t *testing.T) { + ctx := t.Context() + _, err := NewBase(ctx, "", WithEmbedder("test-embedder"), WithVectorDB("test-vectordb")) + require.Error(t, err, "expected error for empty ID") + var buildErr *sdkerrors.BuildError + require.ErrorAs(t, err, &buildErr) + }) + t.Run("Should fail when embedder is missing", func(t *testing.T) { + ctx := t.Context() + _, err := NewBase(ctx, "test-kb", WithVectorDB("test-vectordb")) + require.Error(t, err, "expected error for missing embedder") + var buildErr *sdkerrors.BuildError + require.ErrorAs(t, err, &buildErr) + }) + t.Run("Should fail when vectordb is missing", func(t *testing.T) { + ctx := t.Context() + _, err := NewBase(ctx, "test-kb", WithEmbedder("test-embedder")) + require.Error(t, err, "expected error for missing vectordb") + var buildErr *sdkerrors.BuildError + require.ErrorAs(t, err, &buildErr) + }) + t.Run("Should fail when no sources provided", func(t *testing.T) { + ctx := t.Context() + _, err := NewBase(ctx, "test-kb", WithEmbedder("test-embedder"), WithVectorDB("test-vectordb")) + require.Error(t, err, "expected error for no sources") + var buildErr *sdkerrors.BuildError + require.ErrorAs(t, err, &buildErr) + }) + t.Run("Should apply defaults for optional fields", func(t *testing.T) { + ctx := t.Context() + sources := []engineknowledge.SourceConfig{{Type: "file", Path: "/tmp/test.txt"}} + cfg, err := NewBase( + ctx, + "test-kb", + WithEmbedder("test-embedder"), + WithVectorDB("test-vectordb"), + WithSources(sources), + ) + require.NoError(t, err) + assert.Equal(t, engineknowledge.IngestManual, cfg.Ingest) + assert.Equal(t, engineknowledge.ChunkStrategyRecursiveTextSplitter, cfg.Chunking.Strategy) + assert.NotZero(t, cfg.Chunking.Size) + }) + t.Run("Should validate chunk size range", func(t *testing.T) { + ctx := t.Context() + sources := []engineknowledge.SourceConfig{{Type: "file", Path: "/tmp/test.txt"}} + chunking := engineknowledge.ChunkingConfig{Size: 10} + _, err := NewBase( + ctx, + "test-kb", + WithEmbedder("test-embedder"), + WithVectorDB("test-vectordb"), + WithSources(sources), + WithChunking(chunking), + ) + require.Error(t, err, "expected error for chunk size below minimum") + var buildErr *sdkerrors.BuildError + require.ErrorAs(t, err, &buildErr) + }) + t.Run("Should validate retrieval topk range", func(t *testing.T) { + ctx := t.Context() + sources := []engineknowledge.SourceConfig{{Type: "file", Path: "/tmp/test.txt"}} + retrieval := engineknowledge.RetrievalConfig{TopK: 100} + _, err := NewBase( + ctx, + "test-kb", + WithEmbedder("test-embedder"), + WithVectorDB("test-vectordb"), + WithSources(sources), + WithRetrieval(&retrieval), + ) + require.Error(t, err, "expected error for topk above maximum") + var buildErr *sdkerrors.BuildError + require.ErrorAs(t, err, &buildErr) + }) +} + +func TestNewBinding(t *testing.T) { + t.Run("Should create binding with minimal configuration", func(t *testing.T) { + ctx := t.Context() + binding, err := NewBinding(ctx, "test-kb") + require.NoError(t, err) + require.NotNil(t, binding) + assert.Equal(t, "test-kb", binding.ID) + }) + t.Run("Should trim whitespace from ID", func(t *testing.T) { + ctx := t.Context() + binding, err := NewBinding(ctx, " test-kb ") + require.NoError(t, err) + assert.Equal(t, "test-kb", binding.ID) + }) + t.Run("Should fail when context is nil", func(t *testing.T) { + var nilCtx context.Context + _, err := NewBinding(nilCtx, "test-kb") + require.Error(t, err, "expected error for nil context") + assert.Equal(t, "context is required", err.Error()) + }) + t.Run("Should fail when ID is empty", func(t *testing.T) { + ctx := t.Context() + _, err := NewBinding(ctx, "") + require.Error(t, err, "expected error for empty ID") + var buildErr *sdkerrors.BuildError + require.ErrorAs(t, err, &buildErr) + }) + t.Run("Should accept optional topk override", func(t *testing.T) { + ctx := t.Context() + topk := 10 + binding, err := NewBinding(ctx, "test-kb", WithBindingTopK(&topk)) + require.NoError(t, err) + require.NotNil(t, binding.TopK) + assert.Equal(t, 10, *binding.TopK) + }) + t.Run("Should fail when topk override is invalid", func(t *testing.T) { + ctx := t.Context() + topk := -1 + _, err := NewBinding(ctx, "test-kb", WithBindingTopK(&topk)) + require.Error(t, err, "expected error for invalid topk") + var buildErr *sdkerrors.BuildError + require.ErrorAs(t, err, &buildErr) + }) + t.Run("Should validate min score range", func(t *testing.T) { + ctx := t.Context() + minScore := 1.5 + _, err := NewBinding(ctx, "test-kb", WithBindingMinScore(&minScore)) + require.Error(t, err, "expected error for min score out of range") + var buildErr *sdkerrors.BuildError + require.ErrorAs(t, err, &buildErr) + }) +} + +func TestNewEmbedder(t *testing.T) { + t.Run("Should create embedder with minimal configuration", func(t *testing.T) { + ctx := t.Context() + cfg, err := NewEmbedder(ctx, "test-embedder", "openai", "text-embedding-ada-002", WithDimension(1536)) + require.NoError(t, err) + require.NotNil(t, cfg) + assert.Equal(t, "test-embedder", cfg.ID) + assert.Equal(t, "openai", cfg.Provider) + assert.Equal(t, "text-embedding-ada-002", cfg.Model) + }) + t.Run("Should trim and normalize provider", func(t *testing.T) { + ctx := t.Context() + cfg, err := NewEmbedder(ctx, "test-embedder", " OpenAI ", "text-embedding-ada-002", WithDimension(1536)) + require.NoError(t, err) + assert.Equal(t, "openai", cfg.Provider) + }) + t.Run("Should fail when context is nil", func(t *testing.T) { + var nilCtx context.Context + _, err := NewEmbedder(nilCtx, "test-embedder", "openai", "text-embedding-ada-002", WithDimension(1536)) + require.Error(t, err, "expected error for nil context") + assert.Equal(t, "context is required", err.Error()) + }) + t.Run("Should fail when ID is empty", func(t *testing.T) { + ctx := t.Context() + _, err := NewEmbedder(ctx, "", "openai", "text-embedding-ada-002", WithDimension(1536)) + require.Error(t, err, "expected error for empty ID") + var buildErr *sdkerrors.BuildError + require.ErrorAs(t, err, &buildErr) + }) + t.Run("Should fail when provider is invalid", func(t *testing.T) { + ctx := t.Context() + _, err := NewEmbedder(ctx, "test-embedder", "invalid-provider", "model", WithDimension(1536)) + require.Error(t, err, "expected error for invalid provider") + var buildErr *sdkerrors.BuildError + require.ErrorAs(t, err, &buildErr) + }) + t.Run("Should fail when model is empty", func(t *testing.T) { + ctx := t.Context() + _, err := NewEmbedder(ctx, "test-embedder", "openai", "", WithDimension(1536)) + require.Error(t, err, "expected error for empty model") + var buildErr *sdkerrors.BuildError + require.ErrorAs(t, err, &buildErr) + }) + t.Run("Should fail when dimension is invalid", func(t *testing.T) { + ctx := t.Context() + _, err := NewEmbedder(ctx, "test-embedder", "openai", "text-embedding-ada-002", WithDimension(0)) + require.Error(t, err, "expected error for invalid dimension") + var buildErr *sdkerrors.BuildError + require.ErrorAs(t, err, &buildErr) + }) + t.Run("Should apply defaults for batch size and workers", func(t *testing.T) { + ctx := t.Context() + cfg, err := NewEmbedder(ctx, "test-embedder", "openai", "text-embedding-ada-002", WithDimension(1536)) + require.NoError(t, err) + assert.NotZero(t, cfg.Config.BatchSize) + assert.NotZero(t, cfg.Config.MaxConcurrentWorkers) + }) +} + +func TestNewVectorDB(t *testing.T) { + t.Run("Should create pgvector with minimal configuration", func(t *testing.T) { + ctx := t.Context() + cfg, err := NewVectorDB( + ctx, + "test-vectordb", + "pgvector", + WithDSN("postgres://localhost/test"), + WithVectorDBDimension(1536), + ) + require.NoError(t, err) + require.NotNil(t, cfg) + assert.Equal(t, "test-vectordb", cfg.ID) + assert.Equal(t, engineknowledge.VectorDBTypePGVector, cfg.Type) + }) + t.Run("Should normalize database type", func(t *testing.T) { + ctx := t.Context() + cfg, err := NewVectorDB( + ctx, + "test-vectordb", + " PGVector ", + WithDSN("postgres://localhost/test"), + WithVectorDBDimension(1536), + ) + require.NoError(t, err) + assert.Equal(t, engineknowledge.VectorDBTypePGVector, cfg.Type) + }) + t.Run("Should fail when context is nil", func(t *testing.T) { + var nilCtx context.Context + _, err := NewVectorDB( + nilCtx, + "test-vectordb", + "pgvector", + WithDSN("postgres://localhost/test"), + WithVectorDBDimension(1536), + ) + require.Error(t, err, "expected error for nil context") + assert.Equal(t, "context is required", err.Error()) + }) + t.Run("Should fail when ID is empty", func(t *testing.T) { + ctx := t.Context() + _, err := NewVectorDB(ctx, "", "pgvector", WithDSN("postgres://localhost/test"), WithVectorDBDimension(1536)) + require.Error(t, err, "expected error for empty ID") + var buildErr *sdkerrors.BuildError + require.ErrorAs(t, err, &buildErr) + }) + t.Run("Should fail when type is invalid", func(t *testing.T) { + ctx := t.Context() + _, err := NewVectorDB( + ctx, + "test-vectordb", + "invalid-type", + WithDSN("postgres://localhost/test"), + WithVectorDBDimension(1536), + ) + require.Error(t, err, "expected error for invalid type") + var buildErr *sdkerrors.BuildError + require.ErrorAs(t, err, &buildErr) + }) + t.Run("Should fail when pgvector DSN is missing", func(t *testing.T) { + ctx := t.Context() + _, err := NewVectorDB(ctx, "test-vectordb", "pgvector", WithVectorDBDimension(1536)) + require.Error(t, err, "expected error for missing DSN") + var buildErr *sdkerrors.BuildError + require.ErrorAs(t, err, &buildErr) + }) + t.Run("Should fail when dimension is invalid", func(t *testing.T) { + ctx := t.Context() + _, err := NewVectorDB( + ctx, + "test-vectordb", + "pgvector", + WithDSN("postgres://localhost/test"), + WithVectorDBDimension(0), + ) + require.Error(t, err, "expected error for invalid dimension") + var buildErr *sdkerrors.BuildError + require.ErrorAs(t, err, &buildErr) + }) + t.Run("Should create qdrant with collection", func(t *testing.T) { + ctx := t.Context() + cfg, err := NewVectorDB( + ctx, + "test-vectordb", + "qdrant", + WithDSN("http://localhost:6333"), + WithCollection("test-collection"), + WithVectorDBDimension(1536), + ) + require.NoError(t, err) + assert.Equal(t, engineknowledge.VectorDBTypeQdrant, cfg.Type) + assert.Equal(t, "test-collection", cfg.Config.Collection) + }) + t.Run("Should fail when qdrant DSN is invalid URL", func(t *testing.T) { + ctx := t.Context() + _, err := NewVectorDB( + ctx, + "test-vectordb", + "qdrant", + WithDSN("not-a-valid-url"), + WithCollection("test-collection"), + WithVectorDBDimension(1536), + ) + require.Error(t, err, "expected error for invalid qdrant URL") + var buildErr *sdkerrors.BuildError + require.ErrorAs(t, err, &buildErr) + }) + t.Run("Should create filesystem vectordb", func(t *testing.T) { + ctx := t.Context() + cfg, err := NewVectorDB( + ctx, + "test-vectordb", + "filesystem", + WithPath("/tmp/vectors"), + WithVectorDBDimension(1536), + ) + require.NoError(t, err) + assert.Equal(t, engineknowledge.VectorDBTypeFilesystem, cfg.Type) + }) + t.Run("Should create redis vectordb", func(t *testing.T) { + ctx := t.Context() + cfg, err := NewVectorDB(ctx, "test-vectordb", "redis", WithVectorDBDimension(1536)) + require.NoError(t, err) + assert.Equal(t, engineknowledge.VectorDBTypeRedis, cfg.Type) + }) +} diff --git a/sdk/knowledge/embedder_options.go b/sdk/knowledge/embedder_options.go new file mode 100644 index 00000000..34c966de --- /dev/null +++ b/sdk/knowledge/embedder_options.go @@ -0,0 +1,41 @@ +package knowledge + +import engineknowledge "github.com/compozy/compozy/engine/knowledge" + +// EmbedderOption is a functional option for configuring EmbedderConfig +type EmbedderOption func(*engineknowledge.EmbedderConfig) + +// WithAPIKey sets the APIKey field +func WithAPIKey(apiKey string) EmbedderOption { + return func(cfg *engineknowledge.EmbedderConfig) { + cfg.APIKey = apiKey + } +} + +// WithEmbedderConfig sets the Config field +func WithEmbedderConfig(config engineknowledge.EmbedderRuntimeConfig) EmbedderOption { + return func(cfg *engineknowledge.EmbedderConfig) { + cfg.Config = config + } +} + +// WithDimension sets the dimension in the embedder config +func WithDimension(dimension int) EmbedderOption { + return func(cfg *engineknowledge.EmbedderConfig) { + cfg.Config.Dimension = dimension + } +} + +// WithBatchSize sets the batch size in the embedder config +func WithBatchSize(batchSize int) EmbedderOption { + return func(cfg *engineknowledge.EmbedderConfig) { + cfg.Config.BatchSize = batchSize + } +} + +// WithMaxConcurrentWorkers sets the max concurrent workers in the embedder config +func WithMaxConcurrentWorkers(maxWorkers int) EmbedderOption { + return func(cfg *engineknowledge.EmbedderConfig) { + cfg.Config.MaxConcurrentWorkers = maxWorkers + } +} diff --git a/sdk/knowledge/generate_base.go b/sdk/knowledge/generate_base.go new file mode 100644 index 00000000..40d2111e --- /dev/null +++ b/sdk/knowledge/generate_base.go @@ -0,0 +1,3 @@ +package knowledge + +//go:generate go run ../internal/codegen/cmd/optionsgen/main.go -engine ../../engine/knowledge/config.go -struct BaseConfig -output base_options_generated.go -package knowledge diff --git a/sdk/knowledge/generate_binding.go b/sdk/knowledge/generate_binding.go new file mode 100644 index 00000000..f426ab0c --- /dev/null +++ b/sdk/knowledge/generate_binding.go @@ -0,0 +1,3 @@ +package knowledge + +//go:generate go run ../internal/codegen/cmd/optionsgen/main.go -engine ../../engine/core/knowledge.go -struct KnowledgeBinding -output binding_options_generated.go -package knowledge diff --git a/sdk/knowledge/generate_embedder.go b/sdk/knowledge/generate_embedder.go new file mode 100644 index 00000000..e00cdf75 --- /dev/null +++ b/sdk/knowledge/generate_embedder.go @@ -0,0 +1,3 @@ +package knowledge + +//go:generate go run ../internal/codegen/cmd/optionsgen/main.go -engine ../../engine/knowledge/config.go -struct EmbedderConfig -output embedder_options_generated.go -package knowledge diff --git a/sdk/knowledge/generate_vectordb.go b/sdk/knowledge/generate_vectordb.go new file mode 100644 index 00000000..e5f4bc9b --- /dev/null +++ b/sdk/knowledge/generate_vectordb.go @@ -0,0 +1,3 @@ +package knowledge + +//go:generate go run ../internal/codegen/cmd/optionsgen/main.go -engine ../../engine/knowledge/config.go -struct VectorDBConfig -output vectordb_options_generated.go -package knowledge diff --git a/sdk/knowledge/vectordb_options.go b/sdk/knowledge/vectordb_options.go new file mode 100644 index 00000000..1a86f0fb --- /dev/null +++ b/sdk/knowledge/vectordb_options.go @@ -0,0 +1,52 @@ +package knowledge + +import engineknowledge "github.com/compozy/compozy/engine/knowledge" + +// VectorDBOption is a functional option for configuring VectorDBConfig +type VectorDBOption func(*engineknowledge.VectorDBConfig) + +// WithVectorDBConfig sets the Config field +func WithVectorDBConfig(config *engineknowledge.VectorDBConnConfig) VectorDBOption { + return func(cfg *engineknowledge.VectorDBConfig) { + if config != nil { + cfg.Config = *config + } + } +} + +// WithDSN sets the DSN in the vector DB config +func WithDSN(dsn string) VectorDBOption { + return func(cfg *engineknowledge.VectorDBConfig) { + cfg.Config.DSN = dsn + } +} + +// WithPath sets the Path in the vector DB config +func WithPath(path string) VectorDBOption { + return func(cfg *engineknowledge.VectorDBConfig) { + cfg.Config.Path = path + } +} + +// WithCollection sets the Collection in the vector DB config +func WithCollection(collection string) VectorDBOption { + return func(cfg *engineknowledge.VectorDBConfig) { + cfg.Config.Collection = collection + } +} + +// WithDimension sets the Dimension in the vector DB config +func WithVectorDBDimension(dimension int) VectorDBOption { + return func(cfg *engineknowledge.VectorDBConfig) { + cfg.Config.Dimension = dimension + } +} + +// WithPGVector sets the PGVector config +func WithPGVector(pgVector *engineknowledge.PGVectorConfig) VectorDBOption { + return func(cfg *engineknowledge.VectorDBConfig) { + if pgVector != nil { + cfg.Config.PGVector = pgVector + } + } +} diff --git a/sdk/mcp/README.md b/sdk/mcp/README.md new file mode 100644 index 00000000..4aff9954 --- /dev/null +++ b/sdk/mcp/README.md @@ -0,0 +1,206 @@ +# MCP Package + +Model Context Protocol (MCP) configuration using functional options pattern. + +## Installation + +```go +import "github.com/compozy/compozy/sdk/mcp" +``` + +## Usage + +### Basic Stdio MCP + +```go +cfg, err := mcp.New(ctx, "filesystem", + mcp.WithCommand("mcp-server-filesystem"), +) +``` + +### Basic HTTP MCP + +```go +cfg, err := mcp.New(ctx, "github", + mcp.WithURL("https://api.github.com/mcp"), +) +``` + +### Full Stdio Configuration + +```go +cfg, err := mcp.New(ctx, "filesystem", + mcp.WithCommand("mcp-server-filesystem"), + mcp.WithArgs([]string{"--root", "/data"}), + mcp.WithEnv(map[string]string{ + "LOG_LEVEL": "debug", + "ROOT_DIR": "/workspace", + }), + mcp.WithStartTimeout(30*time.Second), + mcp.WithMaxSessions(5), + mcp.WithProto("2025-03-26"), +) +``` + +### Full HTTP Configuration + +```go +cfg, err := mcp.New(ctx, "github", + mcp.WithURL("https://api.github.com/mcp"), + mcp.WithHeaders(map[string]string{ + "Authorization": "Bearer token123", + }), + mcp.WithTransport(mcpproxy.TransportStreamableHTTP), + mcp.WithMaxSessions(10), +) +``` + +## API Reference + +### Constructor + +```go +func New(ctx context.Context, id string, opts ...Option) (*enginemcp.Config, error) +``` + +Creates a new MCP configuration with the given ID and options. + +**Parameters:** + +- `ctx` - Context for validation and logging +- `id` - Unique identifier for the MCP server (required, non-empty) +- `opts` - Variadic functional options + +**Returns:** + +- `*enginemcp.Config` - Deep copied configuration +- `error` - Validation errors + +### Options + +#### WithResource + +```go +func WithResource(resource string) Option +``` + +Sets the resource identifier (defaults to ID). + +#### WithCommand + +```go +func WithCommand(command string) Option +``` + +Sets the command for stdio transport. + +#### WithArgs + +```go +func WithArgs(args []string) Option +``` + +Sets command arguments for stdio transport. + +#### WithURL + +```go +func WithURL(url string) Option +``` + +Sets the URL for HTTP-based transports. + +#### WithHeaders + +```go +func WithHeaders(headers map[string]string) Option +``` + +Sets HTTP headers for URL-based MCPs. + +#### WithEnv + +```go +func WithEnv(env map[string]string) Option +``` + +Sets environment variables for command-based MCPs. + +#### WithTransport + +```go +func WithTransport(transport mcpproxy.TransportType) Option +``` + +Sets the transport type (stdio, sse, streamable-http). + +#### WithProto + +```go +func WithProto(proto string) Option +``` + +Sets the MCP protocol version (format: YYYY-MM-DD). + +#### WithStartTimeout + +```go +func WithStartTimeout(startTimeout time.Duration) Option +``` + +Sets startup timeout for command-based MCPs. + +#### WithMaxSessions + +```go +func WithMaxSessions(maxSessions int) Option +``` + +Sets maximum concurrent sessions (0 = unlimited). + +## Migration Guide + +### Before (Old SDK) + +```go +cfg, err := mcp.New("filesystem"). + WithCommand("mcp-server-filesystem", "--root", "/data"). + WithEnvVar("LOG_LEVEL", "debug"). + WithStartTimeout(30 * time.Second). + Build(ctx) +``` + +### After (New SDK) + +```go +cfg, err := mcp.New(ctx, "filesystem", + mcp.WithCommand("mcp-server-filesystem"), + mcp.WithArgs([]string{"--root", "/data"}), + mcp.WithEnv(map[string]string{ + "LOG_LEVEL": "debug", + }), + mcp.WithStartTimeout(30*time.Second), +) +``` + +### Key Changes + +1. **Context First**: `ctx` moved to first parameter +2. **No Build()**: Configuration validated immediately +3. **Separate Args**: Command and args are separate options +4. **Map-Based**: Env and Headers use maps instead of individual methods +5. **Variadic Options**: All options passed as variadic arguments + +## Validation + +The constructor validates: + +- ID is non-empty and valid format +- Either command OR url is configured (not both) +- Transport type is valid (stdio, sse, streamable-http) +- Command is required for stdio transport +- URL is required for HTTP transports + +## Examples + +See `constructor_test.go` for comprehensive usage examples. diff --git a/sdk/mcp/constructor.go b/sdk/mcp/constructor.go new file mode 100644 index 00000000..6763d8e5 --- /dev/null +++ b/sdk/mcp/constructor.go @@ -0,0 +1,60 @@ +package mcp + +import ( + "context" + "fmt" + "strings" + + "github.com/compozy/compozy/engine/core" + enginemcp "github.com/compozy/compozy/engine/mcp" + sdkerrors "github.com/compozy/compozy/sdk/v2/internal/errors" + "github.com/compozy/compozy/sdk/v2/internal/validate" +) + +func New(ctx context.Context, id string, opts ...Option) (*enginemcp.Config, error) { + if ctx == nil { + return nil, fmt.Errorf("context is required") + } + cfg := &enginemcp.Config{ + ID: strings.TrimSpace(id), + } + for _, opt := range opts { + opt(cfg) + } + if err := validateConfig(ctx, cfg); err != nil { + return nil, err + } + cfg.SetDefaults() + clone, err := core.DeepCopy(cfg) + if err != nil { + return nil, fmt.Errorf("failed to clone mcp config: %w", err) + } + return clone, nil +} + +func validateConfig(ctx context.Context, cfg *enginemcp.Config) error { + collected := make([]error, 0) + if err := validate.ID(ctx, cfg.ID); err != nil { + collected = append(collected, fmt.Errorf("mcp id is invalid: %w", err)) + } + if err := validateTransportSelection(cfg); err != nil { + collected = append(collected, err) + } + if len(collected) > 0 { + return &sdkerrors.BuildError{Errors: collected} + } + return nil +} + +func validateTransportSelection(cfg *enginemcp.Config) error { + hasCommand := strings.TrimSpace(cfg.Command) != "" + hasURL := strings.TrimSpace(cfg.URL) != "" + switch { + case hasCommand && hasURL: + return fmt.Errorf("configure either command or url, not both") + case !hasCommand && !hasURL: + return fmt.Errorf("either command or url must be configured") + default: + return nil + } +} diff --git a/sdk/mcp/constructor_test.go b/sdk/mcp/constructor_test.go new file mode 100644 index 00000000..48ada041 --- /dev/null +++ b/sdk/mcp/constructor_test.go @@ -0,0 +1,213 @@ +package mcp + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + enginemcp "github.com/compozy/compozy/engine/mcp" + mcpproxy "github.com/compozy/compozy/pkg/mcp-proxy" + sdkerrors "github.com/compozy/compozy/sdk/v2/internal/errors" +) + +func TestNew_MinimalStdioConfig(t *testing.T) { + t.Run("Should create valid stdio MCP with command", func(t *testing.T) { + cfg, err := New(t.Context(), "filesystem", + WithCommand("mcp-server-filesystem"), + ) + require.NoError(t, err) + require.NotNil(t, cfg) + assert.Equal(t, "filesystem", cfg.ID) + assert.Equal(t, "mcp-server-filesystem", cfg.Command) + assert.Equal(t, "filesystem", cfg.Resource) + assert.Equal(t, mcpproxy.TransportStdio, cfg.Transport) + }) +} + +func TestNew_MinimalHTTPConfig(t *testing.T) { + t.Run("Should create valid HTTP MCP with URL", func(t *testing.T) { + cfg, err := New(t.Context(), "github", + WithURL("https://api.github.com/mcp"), + ) + require.NoError(t, err) + require.NotNil(t, cfg) + assert.Equal(t, "github", cfg.ID) + assert.Equal(t, "https://api.github.com/mcp", cfg.URL) + assert.Equal(t, mcpproxy.TransportSSE, cfg.Transport) + }) +} + +func TestNew_FullStdioConfig(t *testing.T) { + t.Run("Should create fully configured stdio MCP", func(t *testing.T) { + cfg, err := New(t.Context(), "filesystem", + WithCommand("mcp-server-filesystem"), + WithArgs([]string{"--root", "/data"}), + WithEnv(map[string]string{ + "LOG_LEVEL": "debug", + "ROOT_DIR": "/workspace", + }), + WithStartTimeout(30*time.Second), + WithMaxSessions(5), + WithProto("2025-03-26"), + ) + require.NoError(t, err) + require.NotNil(t, cfg) + assert.Equal(t, "filesystem", cfg.ID) + assert.Equal(t, "mcp-server-filesystem", cfg.Command) + assert.Equal(t, []string{"--root", "/data"}, cfg.Args) + assert.Equal(t, "debug", cfg.Env["LOG_LEVEL"]) + assert.Equal(t, "/workspace", cfg.Env["ROOT_DIR"]) + assert.Equal(t, 30*time.Second, cfg.StartTimeout) + assert.Equal(t, 5, cfg.MaxSessions) + assert.Equal(t, "2025-03-26", cfg.Proto) + }) +} + +func TestNew_FullHTTPConfig(t *testing.T) { + t.Run("Should create fully configured HTTP MCP", func(t *testing.T) { + cfg, err := New(t.Context(), "github", + WithURL("https://api.github.com/mcp"), + WithHeaders(map[string]string{ + "Authorization": "Bearer token123", + "X-Custom": "value", + }), + WithTransport(mcpproxy.TransportStreamableHTTP), + WithMaxSessions(10), + WithProto("2025-03-26"), + ) + require.NoError(t, err) + require.NotNil(t, cfg) + assert.Equal(t, "github", cfg.ID) + assert.Equal(t, "https://api.github.com/mcp", cfg.URL) + assert.Equal(t, "Bearer token123", cfg.Headers["Authorization"]) + assert.Equal(t, "value", cfg.Headers["X-Custom"]) + assert.Equal(t, mcpproxy.TransportStreamableHTTP, cfg.Transport) + assert.Equal(t, 10, cfg.MaxSessions) + }) +} + +func TestNew_ValidationErrors(t *testing.T) { + tests := []struct { + name string + id string + opts []Option + wantErr string + }{ + { + name: "Should fail with empty ID", + id: "", + opts: []Option{WithCommand("mcp-server")}, + wantErr: "id is invalid", + }, + { + name: "Should fail with invalid ID characters", + id: "bad id", + opts: []Option{WithCommand("mcp-server")}, + wantErr: "id is invalid", + }, + { + name: "Should fail without command or URL", + id: "test-mcp", + opts: []Option{}, + wantErr: "either command or url must be configured", + }, + { + name: "Should fail with both command and URL", + id: "test-mcp", + opts: []Option{ + WithCommand("mcp-server"), + WithURL("https://example.com"), + }, + wantErr: "configure either command or url", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg, err := New(t.Context(), tt.id, tt.opts...) + require.Error(t, err) + assert.Nil(t, cfg) + assert.Contains(t, err.Error(), tt.wantErr) + }) + } +} + +func TestNew_NilContext(t *testing.T) { + t.Run("Should fail with nil context", func(t *testing.T) { + var nilCtx context.Context + cfg, err := New(nilCtx, "test-mcp", WithCommand("mcp-server")) + require.Error(t, err) + assert.Nil(t, cfg) + assert.Contains(t, err.Error(), "context is required") + }) +} + +func TestNew_DeepCopy(t *testing.T) { + t.Run("Should return deep copy of config", func(t *testing.T) { + cfg1, err := New(t.Context(), "filesystem", + WithCommand("mcp-server"), + WithArgs([]string{"--root", "/data"}), + WithEnv(map[string]string{"KEY": "value"}), + ) + require.NoError(t, err) + cfg1.Args[0] = "modified" + cfg1.Env["KEY"] = "modified" + cfg2, err := New(t.Context(), "filesystem", + WithCommand("mcp-server"), + WithArgs([]string{"--root", "/data"}), + WithEnv(map[string]string{"KEY": "value"}), + ) + require.NoError(t, err) + assert.NotEqual(t, cfg1.Args[0], cfg2.Args[0]) + assert.NotEqual(t, cfg1.Env["KEY"], cfg2.Env["KEY"]) + }) +} + +func TestNew_WhitespaceTrimming(t *testing.T) { + t.Run("Should trim whitespace from ID", func(t *testing.T) { + cfg, err := New(t.Context(), " filesystem ", + WithCommand("mcp-server"), + ) + require.NoError(t, err) + assert.Equal(t, "filesystem", cfg.ID) + }) +} + +func TestNew_SetDefaultsApplied(t *testing.T) { + t.Run("Should apply defaults for stdio transport", func(t *testing.T) { + cfg, err := New(t.Context(), "filesystem", + WithCommand("mcp-server"), + ) + require.NoError(t, err) + assert.Equal(t, "filesystem", cfg.Resource) + assert.Equal(t, enginemcp.DefaultProtocolVersion, cfg.Proto) + assert.Equal(t, mcpproxy.TransportStdio, cfg.Transport) + }) + t.Run("Should apply defaults for HTTP transport", func(t *testing.T) { + cfg, err := New(t.Context(), "github", + WithURL("https://api.github.com"), + ) + require.NoError(t, err) + assert.Equal(t, "github", cfg.Resource) + assert.Equal(t, enginemcp.DefaultProtocolVersion, cfg.Proto) + assert.Equal(t, mcpproxy.TransportSSE, cfg.Transport) + }) +} + +func TestNew_BuildErrorAggregation(t *testing.T) { + t.Run("Should aggregate multiple validation errors", func(t *testing.T) { + cfg, err := New(t.Context(), "", + WithCommand(""), + WithURL(""), + ) + require.Error(t, err) + assert.Nil(t, cfg) + var buildErr *sdkerrors.BuildError + require.ErrorAs(t, err, &buildErr) + require.True(t, errors.Is(err, buildErr)) + assert.GreaterOrEqual(t, len(buildErr.Errors), 2) + }) +} diff --git a/sdk/mcp/generate.go b/sdk/mcp/generate.go new file mode 100644 index 00000000..e535d23b --- /dev/null +++ b/sdk/mcp/generate.go @@ -0,0 +1,3 @@ +package mcp + +//go:generate go run ../internal/codegen/cmd/optionsgen/main.go -engine ../../engine/mcp/config.go -struct Config -output options_generated.go diff --git a/sdk/mcp/options_generated.go b/sdk/mcp/options_generated.go new file mode 100644 index 00000000..d481341d --- /dev/null +++ b/sdk/mcp/options_generated.go @@ -0,0 +1,211 @@ +// Code generated by optionsgen. DO NOT EDIT. + +package mcp + +import ( + "time" + + mcp "github.com/compozy/compozy/engine/mcp" + mcpproxy "github.com/compozy/compozy/pkg/mcp-proxy" +) + +type Option func(*mcp.Config) + +// WithResource sets the Resource field +// +// Resource reference for the MCP server (optional) +// If not specified, defaults to the value of ID. +// Used for resource identification and referencing in Compozy's resource system. +func WithResource(resource string) Option { + return func(cfg *mcp.Config) { + cfg.Resource = resource + } +} + +// WithID sets the ID field +// +// ID is the **unique identifier** for this MCP server configuration. +// This identifier is used throughout the system to reference this specific MCP server. +// Choose descriptive IDs that reflect the server's purpose. +// - **Examples**: +// - `filesystem` - for file system operations +// - `postgres-db` - for PostgreSQL database access +// - `github-api` - for GitHub integration +// - `python-runtime` - for Python code execution +func WithID(id string) Option { + return func(cfg *mcp.Config) { + cfg.ID = id + } +} + +// WithURL sets the URL field +// +// URL is the **endpoint for remote MCP servers**. +// Required for HTTP-based transports (SSE, streamable-http). +// Must be a valid HTTP or HTTPS URL pointing to an MCP-compatible endpoint. +// **Format**: `http[s]://host[:port]/path` +// - **Examples**: +// ```yaml +// url: "http://localhost:3000/mcp" +// url: "https://api.example.com/v1/mcp" +// url: "http://mcp-proxy:6001/filesystem" +// ``` +// **Note**: Mutually exclusive with `command` - use either URL or Command, not both. +func WithURL(url string) Option { + return func(cfg *mcp.Config) { + cfg.URL = url + } +} + +// WithCommand sets the Command field +// +// Command is the **executable command** to spawn a local MCP server process. +// Used for stdio transport to run MCP servers as child processes. +// Supports both direct executables and complex commands with arguments. +// - **Examples**: +// ```yaml +// # Simple executable +// command: "mcp-server-filesystem" +// # Command with arguments +// command: "python /app/mcp_server.py --mode production" +// # Docker container +// command: "docker run --rm -i mcp/postgres:latest" +// ``` +// **Security Note**: Commands are parsed using shell lexing for safety. +// Avoid user-provided input in commands. +func WithCommand(command string) Option { + return func(cfg *mcp.Config) { + cfg.Command = command + } +} + +// WithArgs sets the Args field +// +// Args supplies additional arguments passed to the command when spawning local MCP processes. +// Only used when `command` is provided (stdio transport). Ignored when `url` is configured. +// Runtime validation enforces that `command` and `url` are mutually exclusive. +// Use this to provide flags or subcommands while keeping Command focused on the executable. +// Example: +// command: "uvx" +// args: ["mcp-server-fetch", "--port", "9000"] +func WithArgs(args []string) Option { + return func(cfg *mcp.Config) { + cfg.Args = args + } +} + +// WithHeaders sets the Headers field +// +// Headers contains HTTP headers to include when connecting to remote MCP servers (SSE/HTTP). +// Useful for passing Authorization tokens, custom auth headers, or version negotiation. +// Example: +// headers: +// Authorization: "Bearer {{ .env.GITHUB_MCP_OAUTH_TOKEN }}" +func WithHeaders(headers map[string]string) Option { + return func(cfg *mcp.Config) { + cfg.Headers = headers + } +} + +// WithEnv sets the Env field +// +// Env contains **environment variables** to pass to the MCP server process. +// Only used when `command` is specified for spawning local processes. +// Useful for passing configuration, secrets, or runtime parameters. +// - **Examples**: +// ```yaml +// env: +// DATABASE_URL: "postgres://user:pass@localhost/db" +// API_KEY: "{{ .env.GITHUB_TOKEN }}" +// LOG_LEVEL: "debug" +// WORKSPACE_DIR: "/data/workspace" +// ``` +// **Template Support**: Values can use Go template syntax to reference +// environment variables from the host system. +func WithEnv(env map[string]string) Option { + return func(cfg *mcp.Config) { + cfg.Env = env + } +} + +// WithProto sets the Proto field +// +// Proto specifies the **MCP protocol version** to use. +// Different protocol versions may support different features, message formats, +// or capabilities. Always use the version compatible with your MCP server. +// **Format**: `YYYY-MM-DD` (e.g., "2025-03-26") +// **Default**: `DefaultProtocolVersion` ("2025-03-26") +// **Version History**: +// - `2025-03-26` - Latest version with streaming support +// - `2024-12-01` - Initial protocol release +func WithProto(proto string) Option { + return func(cfg *mcp.Config) { + cfg.Proto = proto + } +} + +// WithTransport sets the Transport field +// +// Transport defines the **communication transport mechanism**. +// Choose the transport based on your MCP server's capabilities and deployment model. +// **Supported Values**: +// | Transport | Description | Use Case | +// |-----------|-------------|----------| +// | `sse` | Server-Sent Events | HTTP servers with real-time streaming | +// | `streamable-http` | HTTP with streaming | Large responses, file transfers | +// | `stdio` | Standard I/O | Local processes, Docker containers | +// **Default**: `sse` +// - **Examples**: +// ```yaml +// # Remote server with SSE +// transport: sse +// # Local process with stdio +// transport: stdio +// # HTTP server with large file support +// transport: streamable-http +// ``` +func WithTransport(transport mcpproxy.TransportType) Option { + return func(cfg *mcp.Config) { + cfg.Transport = transport + } +} + +// WithStartTimeout sets the StartTimeout field +// +// StartTimeout is the **maximum time to wait** for the MCP server to start. +// Only applicable when using `command` to spawn local processes. +// Helps detect and handle startup failures gracefully. +// **Format**: Go duration string (e.g., "30s", "1m", "500ms") +// **Default**: No timeout (waits indefinitely) +// - **Examples**: +// ```yaml +// start_timeout: 30s # Wait up to 30 seconds +// start_timeout: 2m # Wait up to 2 minutes +// start_timeout: 500ms # Wait up to 500 milliseconds +// ``` +// **Recommendation**: Set to at least 10-30s for Docker-based servers. +func WithStartTimeout(startTimeout time.Duration) Option { + return func(cfg *mcp.Config) { + cfg.StartTimeout = startTimeout + } +} + +// WithMaxSessions sets the MaxSessions field +// +// MaxSessions defines the **maximum number of concurrent sessions** allowed. +// Helps manage resource usage and prevent server overload. +// Each agent connection typically creates one session. +// **Values**: +// - `0`: Unlimited sessions (default) +// - Positive number: Maximum concurrent sessions +// - **Examples**: +// ```yaml +// max_sessions: 10 # Allow up to 10 concurrent connections +// max_sessions: 1 # Single session only (useful for stateful servers) +// max_sessions: 0 # Unlimited sessions +// ``` +func WithMaxSessions(maxSessions int) Option { + return func(cfg *mcp.Config) { + cfg.MaxSessions = maxSessions + } +} diff --git a/sdk/memory/README.md b/sdk/memory/README.md new file mode 100644 index 00000000..133a2624 --- /dev/null +++ b/sdk/memory/README.md @@ -0,0 +1,233 @@ +# Package memory + +## Overview + +The memory package provides a clean, type-safe API for configuring memory resources in Compozy. Memory resources enable persistent context management for AI agents, allowing them to retain and share information across multiple interactions. + +## Installation + +```go +import memory "github.com/compozy/compozy/sdk/v2/memory" +``` + +## Usage + +### Basic Example (Redis Persistence) + +```go +package main + +import ( + "context" + "log" + + memory "github.com/compozy/compozy/sdk/v2/memory" + memorycore "github.com/compozy/compozy/engine/memory/core" +) + +func main() { + ctx := context.Background() + + cfg, err := memory.New(ctx, "conversation-memory", "token_based", + memory.WithMaxTokens(4000), + memory.WithPersistence(memorycore.PersistenceConfig{ + Type: memorycore.RedisPersistence, + TTL: "24h", + }), + ) + if err != nil { + log.Fatal(err) + } + + // Use cfg with Compozy engine... +} +``` + +### Full Configuration Example + +```go +cfg, err := memory.New(ctx, "advanced-memory", "token_based", + memory.WithDescription("Advanced conversation memory with summarization"), + memory.WithVersion("1.0.0"), + memory.WithMaxTokens(4000), + memory.WithMaxMessages(100), + memory.WithExpiration("48h"), + memory.WithTokenAllocation(&memorycore.TokenAllocation{ + ShortTerm: 0.6, + LongTerm: 0.3, + System: 0.1, + }), + memory.WithFlushing(&memorycore.FlushingStrategyConfig{ + Type: memorycore.HybridSummaryFlushing, + SummarizeThreshold: 0.8, + SummaryTokens: 500, + }), + memory.WithPersistence(memorycore.PersistenceConfig{ + Type: memorycore.RedisPersistence, + TTL: "24h", + }), +) +``` + +### In-Memory Persistence (Development/Testing) + +```go +cfg, err := memory.New(ctx, "test-memory", "buffer", + memory.WithPersistence(memorycore.PersistenceConfig{ + Type: memorycore.InMemoryPersistence, + }), +) +``` + +## API Reference + +### Constructor + +```go +func New(ctx context.Context, id string, memType string, opts ...Option) (*enginememory.Config, error) +``` + +Creates a new memory configuration with the specified ID and type. + +**Parameters:** + +- `ctx`: Context (required, cannot be nil) +- `id`: Unique identifier for the memory resource (required, non-empty) +- `memType`: Memory management strategy (required, one of: `"token_based"`, `"message_count_based"`, `"buffer"`) +- `opts`: Variadic functional options for configuration + +**Returns:** + +- `*enginememory.Config`: Deep-copied memory configuration +- `error`: Validation errors if any + +### Memory Types + +- **`"token_based"`**: Manages memory based on token count limits (recommended for LLM contexts) + - Requires at least one limit: `max_tokens`, `max_context_ratio`, or `max_messages` +- **`"message_count_based"`**: Manages memory based on message count limits +- **`"buffer"`**: Simple buffer that stores messages up to a limit + +### Options + +#### Core Configuration + +- `WithResource(resource string)` - Sets resource type (auto-set to "memory") +- `WithID(id string)` - Sets unique identifier +- `WithDescription(description string)` - Sets human-readable description +- `WithVersion(version string)` - Sets version for tracking changes + +#### Memory Limits + +- `WithType(typeValue memorycore.Type)` - Sets memory management strategy +- `WithMaxTokens(maxTokens int)` - Sets hard limit on token count (must be non-negative) +- `WithMaxMessages(maxMessages int)` - Sets hard limit on message count (must be non-negative) +- `WithMaxContextRatio(maxContextRatio float64)` - Sets maximum portion of LLM context window (0-1) +- `WithExpiration(expiration string)` - Sets data retention period (e.g., "24h", "7d") + +#### Advanced Configuration + +- `WithTokenAllocation(tokenAllocation *memorycore.TokenAllocation)` - Sets token budget distribution +- `WithFlushing(flushing *memorycore.FlushingStrategyConfig)` - Sets memory management strategy +- `WithPersistence(persistence memorycore.PersistenceConfig)` - **Required** - Sets storage backend +- `WithPrivacyPolicy(privacyPolicy *memorycore.PrivacyPolicyConfig)` - Sets data protection rules +- `WithPrivacyScope(privacyScope PrivacyScope)` - Sets sharing scope across tenants +- `WithLocking(locking *memorycore.LockConfig)` - Sets distributed lock timeouts +- `WithTokenProvider(tokenProvider *memorycore.TokenProviderConfig)` - Sets token counting provider +- `WithDefaultKeyTemplate(defaultKeyTemplate string)` - Sets fallback key template +- `WithCWD(cwd *core.PathCWD)` - Sets current working directory + +### Persistence Types + +- **`memorycore.RedisPersistence`**: Production-grade persistence with TTL support + - Requires `TTL` field (e.g., "24h", "7d") +- **`memorycore.InMemoryPersistence`**: Testing/development only (data lost on restart) + - Does not require `TTL` field + +## Migration Guide + +### Before (Builder Pattern - Old SDK) + +```go +// Old SDK (not available for memory package) +``` + +### After (Functional Options - New SDK) + +```go +cfg, err := memory.New(ctx, "conversation-memory", "token_based", + memory.WithMaxTokens(4000), + memory.WithPersistence(memorycore.PersistenceConfig{ + Type: memorycore.RedisPersistence, + TTL: "24h", + }), +) +``` + +### Key Changes + +1. ✅ `ctx` is now the first parameter +2. ✅ `id` and `memType` are required constructor parameters +3. ✅ No `.Build()` call needed - validation happens in constructor +4. ✅ Options use `With` prefix and take typed values +5. ✅ Persistence configuration is required + +## Validation Rules + +### ID Validation + +- Must be non-empty +- Must be valid identifier format + +### Type Validation + +- Must be one of: `"token_based"`, `"message_count_based"`, `"buffer"` +- Case-insensitive (normalized to lowercase) + +### Persistence Validation + +- `type` is required +- Must be one of: `"redis"`, `"in_memory"` +- Redis persistence requires `ttl` field +- TTL must be non-negative duration + +### Limit Validation + +- `max_tokens` must be non-negative (if specified) +- `max_messages` must be non-negative (if specified) +- `max_context_ratio` must be between 0 and 1 (if specified) +- `expiration` must be valid duration format (if specified) +- `token_based` type requires at least one limit configured +- `max_context_ratio` requires `token_provider` configuration + +## Error Handling + +The constructor returns `*sdkerrors.BuildError` containing all validation errors at once: + +```go +cfg, err := memory.New(ctx, "", "invalid-type", + memory.WithMaxTokens(-1), +) +if err != nil { + var buildErr *sdkerrors.BuildError + if errors.As(err, &buildErr) { + for _, e := range buildErr.Errors { + fmt.Println("Validation error:", e) + } + } +} +``` + +## Testing + +```bash +# Run tests +gotestsum --format pkgname -- -race -parallel=4 ./sdk/memory + +# Run linter +golangci-lint run --fix --allow-parallel-runners ./sdk/memory/... +``` + +## Examples + +See example files in `sdk/cmd/` directory for complete usage examples. diff --git a/sdk/memory/constructor.go b/sdk/memory/constructor.go new file mode 100644 index 00000000..1fe55453 --- /dev/null +++ b/sdk/memory/constructor.go @@ -0,0 +1,168 @@ +package memory + +import ( + "context" + "fmt" + "strings" + + "github.com/compozy/compozy/engine/core" + enginememory "github.com/compozy/compozy/engine/memory" + memorycore "github.com/compozy/compozy/engine/memory/core" + "github.com/compozy/compozy/pkg/logger" + sdkerrors "github.com/compozy/compozy/sdk/v2/internal/errors" + "github.com/compozy/compozy/sdk/v2/internal/validate" +) + +var supportedTypes = map[string]memorycore.Type{ + "token_based": memorycore.TokenBasedMemory, + "message_count_based": memorycore.MessageCountBasedMemory, + "buffer": memorycore.BufferMemory, +} + +var typeList = []string{ + "token_based", + "message_count_based", + "buffer", +} + +var supportedPersistence = map[string]memorycore.PersistenceType{ + "redis": memorycore.RedisPersistence, + "in_memory": memorycore.InMemoryPersistence, +} + +var persistenceList = []string{ + "redis", + "in_memory", +} + +// New creates a memory configuration using functional options +func New(ctx context.Context, id string, memType string, opts ...Option) (*enginememory.Config, error) { + if ctx == nil { + return nil, fmt.Errorf("context is required") + } + log := logger.FromContext(ctx) + log.Debug("creating memory configuration", "id", id, "type", memType) + normalizedType := strings.ToLower(strings.TrimSpace(memType)) + typeVal, ok := supportedTypes[normalizedType] + if !ok { + return nil, fmt.Errorf( + "type %q is not supported; must be one of %s", + normalizedType, + strings.Join(typeList, ", "), + ) + } + cfg := &enginememory.Config{ + Resource: string(core.ConfigMemory), + ID: strings.TrimSpace(id), + Type: typeVal, + } + for _, opt := range opts { + opt(cfg) + } + collected := make([]error, 0, 8) + if err := validate.ID(ctx, cfg.ID); err != nil { + collected = append(collected, fmt.Errorf("id is invalid: %w", err)) + } + if err := validateResource(ctx, cfg); err != nil { + collected = append(collected, err) + } + if err := validateType(ctx, cfg); err != nil { + collected = append(collected, err) + } + if err := validatePersistence(ctx, cfg); err != nil { + collected = append(collected, err) + } + if errs := validateLimits(cfg); len(errs) > 0 { + collected = append(collected, errs...) + } + if len(collected) > 0 { + return nil, &sdkerrors.BuildError{Errors: collected} + } + cloned, err := core.DeepCopy(cfg) + if err != nil { + return nil, fmt.Errorf("failed to clone memory config: %w", err) + } + return cloned, nil +} + +func validateResource(_ context.Context, cfg *enginememory.Config) error { + if cfg.Resource != string(core.ConfigMemory) { + return fmt.Errorf("resource field must be %q, got %q", core.ConfigMemory, cfg.Resource) + } + return nil +} + +func validateType(_ context.Context, cfg *enginememory.Config) error { + typeStr := strings.ToLower(string(cfg.Type)) + if _, ok := supportedTypes[typeStr]; !ok { + return fmt.Errorf("type %q is not supported; must be one of %s", cfg.Type, strings.Join(typeList, ", ")) + } + return nil +} + +func validatePersistence(_ context.Context, cfg *enginememory.Config) error { + persistenceType := strings.ToLower(string(cfg.Persistence.Type)) + if persistenceType == "" { + return fmt.Errorf("persistence.type is required") + } + mapped, ok := supportedPersistence[persistenceType] + if !ok { + return fmt.Errorf( + "persistence.type %q is not supported; must be one of %s", + persistenceType, + strings.Join(persistenceList, ", "), + ) + } + cfg.Persistence.Type = mapped + if cfg.Persistence.Type != memorycore.InMemoryPersistence && cfg.Persistence.TTL == "" { + return fmt.Errorf("persistence.ttl is required for persistence type %q", cfg.Persistence.Type) + } + if cfg.Persistence.TTL != "" { + parsedTTL, err := core.ParseHumanDuration(cfg.Persistence.TTL) + if err != nil { + return fmt.Errorf("invalid persistence.ttl duration format %q: %w", cfg.Persistence.TTL, err) + } + if parsedTTL < 0 { + return fmt.Errorf("persistence.ttl must be non-negative, got %q", cfg.Persistence.TTL) + } + } + return nil +} + +func validateLimits(cfg *enginememory.Config) []error { + errs := make([]error, 0, 5) + if cfg.MaxTokens < 0 { + errs = append(errs, fmt.Errorf("max_tokens must be non-negative: got %d", cfg.MaxTokens)) + } + if cfg.MaxMessages < 0 { + errs = append(errs, fmt.Errorf("max_messages must be non-negative: got %d", cfg.MaxMessages)) + } + if cfg.MaxContextRatio < 0 || cfg.MaxContextRatio > 1 { + errs = append( + errs, + fmt.Errorf("max_context_ratio must be between 0 and 1 inclusive: got %v", cfg.MaxContextRatio), + ) + } + if cfg.Type == memorycore.TokenBasedMemory { + if cfg.MaxTokens <= 0 && cfg.MaxContextRatio <= 0 && cfg.MaxMessages <= 0 { + errs = append( + errs, + fmt.Errorf( + "token_based memory must have at least one limit configured (max_tokens, max_context_ratio, or max_messages)", + ), + ) + } + if cfg.MaxContextRatio > 0 && cfg.TokenProvider == nil { + errs = append(errs, fmt.Errorf("max_context_ratio requires token_provider configuration")) + } + } + if cfg.Expiration != "" { + duration, err := core.ParseHumanDuration(cfg.Expiration) + if err != nil { + errs = append(errs, fmt.Errorf("invalid expiration duration %q: %w", cfg.Expiration, err)) + } else if duration < 0 { + errs = append(errs, fmt.Errorf("expiration duration must be non-negative, got %q", cfg.Expiration)) + } + } + return errs +} diff --git a/sdk/memory/constructor_test.go b/sdk/memory/constructor_test.go new file mode 100644 index 00000000..373089b9 --- /dev/null +++ b/sdk/memory/constructor_test.go @@ -0,0 +1,340 @@ +package memory + +import ( + "context" + "errors" + "testing" + + "github.com/compozy/compozy/engine/core" + memorycore "github.com/compozy/compozy/engine/memory/core" + sdkerrors "github.com/compozy/compozy/sdk/v2/internal/errors" +) + +func TestNew(t *testing.T) { + t.Run("Should create memory with minimal redis configuration", func(t *testing.T) { + ctx := context.Background() + cfg, err := New(ctx, "test-memory", "token_based", + WithMaxTokens(1000), + WithPersistence(memorycore.PersistenceConfig{ + Type: memorycore.RedisPersistence, + TTL: "24h", + }), + ) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg == nil { + t.Fatal("expected config, got nil") + } + if cfg.ID != "test-memory" { + t.Errorf("expected id 'test-memory', got '%s'", cfg.ID) + } + if cfg.Type != memorycore.TokenBasedMemory { + t.Errorf("expected type 'token_based', got '%s'", cfg.Type) + } + if cfg.Resource != string(core.ConfigMemory) { + t.Errorf("expected resource 'memory', got '%s'", cfg.Resource) + } + }) + t.Run("Should create memory with in_memory persistence", func(t *testing.T) { + ctx := context.Background() + cfg, err := New(ctx, "test-memory", "buffer", + WithPersistence(memorycore.PersistenceConfig{ + Type: memorycore.InMemoryPersistence, + }), + ) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.Persistence.Type != memorycore.InMemoryPersistence { + t.Errorf("expected in_memory persistence, got '%s'", cfg.Persistence.Type) + } + }) + t.Run("Should trim whitespace from id and type", func(t *testing.T) { + ctx := context.Background() + cfg, err := New(ctx, " test-memory ", " token_based ", + WithMaxTokens(1000), + WithPersistence(memorycore.PersistenceConfig{ + Type: memorycore.RedisPersistence, + TTL: "24h", + }), + ) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.ID != "test-memory" { + t.Errorf("expected trimmed id 'test-memory', got '%s'", cfg.ID) + } + if cfg.Type != memorycore.TokenBasedMemory { + t.Errorf("expected type 'token_based', got '%s'", cfg.Type) + } + }) + t.Run("Should normalize type to lowercase", func(t *testing.T) { + ctx := context.Background() + cfg, err := New(ctx, "test-memory", "TOKEN_BASED", + WithMaxTokens(1000), + WithPersistence(memorycore.PersistenceConfig{ + Type: memorycore.RedisPersistence, + TTL: "24h", + }), + ) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.Type != memorycore.TokenBasedMemory { + t.Errorf("expected type 'token_based', got '%s'", cfg.Type) + } + }) + t.Run("Should fail when context is nil", func(t *testing.T) { + var nilCtx context.Context + _, err := New(nilCtx, "test-memory", "token_based") + if err == nil { + t.Fatal("expected error for nil context") + } + if err.Error() != "context is required" { + t.Errorf("unexpected error message: %v", err) + } + }) + t.Run("Should fail when id is empty", func(t *testing.T) { + ctx := context.Background() + _, err := New(ctx, "", "token_based", + WithPersistence(memorycore.PersistenceConfig{ + Type: memorycore.RedisPersistence, + TTL: "24h", + }), + ) + if err == nil { + t.Fatal("expected error for empty id") + } + var buildErr *sdkerrors.BuildError + if !errors.As(err, &buildErr) { + t.Errorf("expected BuildError, got %T", err) + } + }) + t.Run("Should fail when type is invalid", func(t *testing.T) { + ctx := context.Background() + _, err := New(ctx, "test-memory", "invalid-type", + WithPersistence(memorycore.PersistenceConfig{ + Type: memorycore.RedisPersistence, + TTL: "24h", + }), + ) + if err == nil { + t.Fatal("expected error for invalid type") + } + }) + t.Run("Should fail when persistence type is missing", func(t *testing.T) { + ctx := context.Background() + _, err := New(ctx, "test-memory", "token_based", + WithMaxTokens(1000), + ) + if err == nil { + t.Fatal("expected error for missing persistence type") + } + var buildErr *sdkerrors.BuildError + if !errors.As(err, &buildErr) { + t.Errorf("expected BuildError, got %T", err) + } + }) + t.Run("Should fail when redis persistence has no ttl", func(t *testing.T) { + ctx := context.Background() + _, err := New(ctx, "test-memory", "token_based", + WithMaxTokens(1000), + WithPersistence(memorycore.PersistenceConfig{ + Type: memorycore.RedisPersistence, + }), + ) + if err == nil { + t.Fatal("expected error for missing TTL with redis") + } + var buildErr *sdkerrors.BuildError + if !errors.As(err, &buildErr) { + t.Errorf("expected BuildError, got %T", err) + } + }) + t.Run("Should fail when token_based has no limits", func(t *testing.T) { + ctx := context.Background() + _, err := New(ctx, "test-memory", "token_based", + WithPersistence(memorycore.PersistenceConfig{ + Type: memorycore.InMemoryPersistence, + }), + ) + if err == nil { + t.Fatal("expected error for token_based without limits") + } + var buildErr *sdkerrors.BuildError + if !errors.As(err, &buildErr) { + t.Errorf("expected BuildError, got %T", err) + } + }) + t.Run("Should create memory with all options", func(t *testing.T) { + ctx := context.Background() + cfg, err := New(ctx, "test-memory", "token_based", + WithDescription("Test memory"), + WithVersion("1.0.0"), + WithMaxTokens(4000), + WithMaxMessages(100), + WithExpiration("48h"), + WithPersistence(memorycore.PersistenceConfig{ + Type: memorycore.RedisPersistence, + TTL: "24h", + }), + ) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.Description != "Test memory" { + t.Errorf("expected description 'Test memory', got '%s'", cfg.Description) + } + if cfg.Version != "1.0.0" { + t.Errorf("expected version '1.0.0', got '%s'", cfg.Version) + } + if cfg.MaxTokens != 4000 { + t.Errorf("expected max_tokens 4000, got %d", cfg.MaxTokens) + } + if cfg.MaxMessages != 100 { + t.Errorf("expected max_messages 100, got %d", cfg.MaxMessages) + } + if cfg.Expiration != "48h" { + t.Errorf("expected expiration '48h', got '%s'", cfg.Expiration) + } + }) + t.Run("Should support all memory types", func(t *testing.T) { + ctx := context.Background() + types := []struct { + name string + expected memorycore.Type + }{ + {"token_based", memorycore.TokenBasedMemory}, + {"message_count_based", memorycore.MessageCountBasedMemory}, + {"buffer", memorycore.BufferMemory}, + } + for _, tt := range types { + opts := []Option{ + WithPersistence(memorycore.PersistenceConfig{ + Type: memorycore.InMemoryPersistence, + }), + } + if tt.name == "token_based" { + opts = append(opts, WithMaxTokens(1000)) + } + cfg, err := New(ctx, "test-memory", tt.name, opts...) + if err != nil { + t.Errorf("type %s: unexpected error: %v", tt.name, err) + continue + } + if cfg.Type != tt.expected { + t.Errorf("type %s: expected %s, got %s", tt.name, tt.expected, cfg.Type) + } + } + }) + t.Run("Should validate negative max_tokens", func(t *testing.T) { + ctx := context.Background() + _, err := New(ctx, "test-memory", "token_based", + WithMaxTokens(-1), + WithPersistence(memorycore.PersistenceConfig{ + Type: memorycore.InMemoryPersistence, + }), + ) + if err == nil { + t.Fatal("expected error for negative max_tokens") + } + var buildErr *sdkerrors.BuildError + if !errors.As(err, &buildErr) { + t.Errorf("expected BuildError, got %T", err) + } + }) + t.Run("Should validate negative max_messages", func(t *testing.T) { + ctx := context.Background() + _, err := New(ctx, "test-memory", "buffer", + WithMaxMessages(-1), + WithPersistence(memorycore.PersistenceConfig{ + Type: memorycore.InMemoryPersistence, + }), + ) + if err == nil { + t.Fatal("expected error for negative max_messages") + } + var buildErr *sdkerrors.BuildError + if !errors.As(err, &buildErr) { + t.Errorf("expected BuildError, got %T", err) + } + }) + t.Run("Should validate max_context_ratio range", func(t *testing.T) { + ctx := context.Background() + _, err := New(ctx, "test-memory", "token_based", + WithMaxTokens(1000), + WithMaxContextRatio(1.5), + WithPersistence(memorycore.PersistenceConfig{ + Type: memorycore.InMemoryPersistence, + }), + ) + if err == nil { + t.Fatal("expected error for invalid max_context_ratio") + } + var buildErr *sdkerrors.BuildError + if !errors.As(err, &buildErr) { + t.Errorf("expected BuildError, got %T", err) + } + }) + t.Run("Should validate expiration duration format", func(t *testing.T) { + ctx := context.Background() + _, err := New(ctx, "test-memory", "buffer", + WithExpiration("invalid-duration"), + WithPersistence(memorycore.PersistenceConfig{ + Type: memorycore.InMemoryPersistence, + }), + ) + if err == nil { + t.Fatal("expected error for invalid expiration") + } + var buildErr *sdkerrors.BuildError + if !errors.As(err, &buildErr) { + t.Errorf("expected BuildError, got %T", err) + } + }) + t.Run("Should perform deep copy", func(t *testing.T) { + ctx := context.Background() + cfg1, err := New(ctx, "test-memory", "token_based", + WithMaxTokens(1000), + WithPersistence(memorycore.PersistenceConfig{ + Type: memorycore.RedisPersistence, + TTL: "24h", + }), + ) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + cfg1.Description = "modified" + cfg2, err := New(ctx, "test-memory", "token_based", + WithMaxTokens(1000), + WithPersistence(memorycore.PersistenceConfig{ + Type: memorycore.RedisPersistence, + TTL: "24h", + }), + ) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg2.Description == "modified" { + t.Error("deep copy failed: configuration was modified") + } + }) + t.Run("Should accumulate multiple validation errors", func(t *testing.T) { + ctx := context.Background() + _, err := New(ctx, "", "token_based", + WithMaxTokens(-1), + WithMaxContextRatio(2.0), + ) + if err == nil { + t.Fatal("expected error for multiple validation failures") + } + var buildErr *sdkerrors.BuildError + if !errors.As(err, &buildErr) { + t.Fatalf("expected BuildError, got %T", err) + } + if len(buildErr.Errors) < 2 { + t.Errorf("expected multiple errors, got %d", len(buildErr.Errors)) + } + }) +} diff --git a/sdk/memory/generate.go b/sdk/memory/generate.go new file mode 100644 index 00000000..026de21f --- /dev/null +++ b/sdk/memory/generate.go @@ -0,0 +1,3 @@ +package memory + +//go:generate go run ../internal/codegen/cmd/optionsgen/main.go -engine ../../engine/memory/config.go -struct Config -output options_generated.go -package memory diff --git a/sdk/memory/options_generated.go b/sdk/memory/options_generated.go new file mode 100644 index 00000000..a32750a5 --- /dev/null +++ b/sdk/memory/options_generated.go @@ -0,0 +1,217 @@ +// Code generated by optionsgen. DO NOT EDIT. + +package memory + +import ( + memcore "github.com/compozy/compozy/engine/memory/core" + memory "github.com/compozy/compozy/engine/memory" +) + +type Option func(*memory.Config) + +// WithResource sets the Resource field +// +// Resource type identifier, **must be "memory"**. +// This field is used by the autoloader system to identify and properly +// register this configuration as a memory resource. +func WithResource(resource string) Option { + return func(cfg *memory.Config) { + cfg.Resource = resource + } +} + +// WithID sets the ID field +// +// ID is the **unique identifier** for this memory resource within the project. +// This ID is used by agents to reference the memory in their configuration. +// - **Examples**: `"user_conversation"`, `"session_context"`, `"agent_workspace"` +func WithID(id string) Option { + return func(cfg *memory.Config) { + cfg.ID = id + } +} + +// WithDescription sets the Description field +// +// Description provides a **human-readable explanation** of the memory resource's purpose. +// This helps developers understand what kind of data this memory stores and +// how it should be used within workflows. +func WithDescription(description string) Option { + return func(cfg *memory.Config) { + cfg.Description = description + } +} + +// WithVersion sets the Version field +// +// Version allows **tracking changes** to the memory resource definition. +// Can be used for migration strategies when memory schema evolves. +// **Format**: semantic versioning (e.g., `"1.0.0"`, `"2.1.0-beta"`) +func WithVersion(version string) Option { + return func(cfg *memory.Config) { + cfg.Version = version + } +} + +// WithType sets the Type field +// +// Type indicates the **primary memory management strategy**: +// - **`"token_based"`**: Manages memory based on token count limits (recommended for LLM contexts) +// - **`"message_count_based"`**: Manages memory based on message count limits +// - **`"buffer"`**: Simple buffer that stores messages up to a limit without sophisticated eviction +func WithType(typeValue memcore.Type) Option { + return func(cfg *memory.Config) { + cfg.Type = typeValue + } +} + +// WithMaxTokens sets the MaxTokens field +// +// MaxTokens is the **hard limit** on the number of tokens this memory can hold. +// Only applicable when Type is `"token_based"`. When this limit is reached, +// the flushing strategy determines how to make room for new content. +// - **Example**: `4000` (roughly equivalent to ~3000 words) +func WithMaxTokens(maxTokens int) Option { + return func(cfg *memory.Config) { + cfg.MaxTokens = maxTokens + } +} + +// WithMaxMessages sets the MaxMessages field +// +// MaxMessages is the **hard limit** on the number of messages this memory can store. +// Applicable for `"message_count_based"` type or as a secondary limit for `"token_based"`. +// - **Example**: `100` (keeps last 100 messages in conversation) +func WithMaxMessages(maxMessages int) Option { + return func(cfg *memory.Config) { + cfg.MaxMessages = maxMessages + } +} + +// WithMaxContextRatio sets the MaxContextRatio field +// +// MaxContextRatio specifies the **maximum portion** of an LLM's context window this memory should use. +// Value between 0 and 1. Dynamically calculates MaxTokens based on the model's context window. +// - **Example**: `0.5` means use at most 50% of the model's context window for memory, +// leaving the rest for system prompts and current task context. +func WithMaxContextRatio(maxContextRatio float64) Option { + return func(cfg *memory.Config) { + cfg.MaxContextRatio = maxContextRatio + } +} + +// WithTokenAllocation sets the TokenAllocation field +// +// TokenAllocation defines how the **token budget is distributed** across different categories. +// Only applicable for `token_based` memory type. All percentages **must sum to 1.0**. +// ```yaml +// token_allocation: +// short_term: 0.6 # 60% for recent messages +// long_term: 0.3 # 30% for summarized context +// system: 0.1 # 10% for system prompts +// ``` +func WithTokenAllocation(tokenAllocation *memcore.TokenAllocation) Option { + return func(cfg *memory.Config) { + cfg.TokenAllocation = tokenAllocation + } +} + +// WithFlushing sets the Flushing field +// +// Flushing defines **how memory is managed** when limits are approached or reached. +// **Available strategies**: +// - **`"simple_fifo"`**: Removes oldest messages first (fastest, no LLM required) +// - **`"lru"`**: Removes least recently used messages (tracks access patterns) +// - **`"hybrid_summary"`**: Summarizes old messages before removal (requires LLM, preserves context) +// - **`"token_aware_lru"`**: LRU that considers token cost of messages (optimizes token usage) +func WithFlushing(flushing *memcore.FlushingStrategyConfig) Option { + return func(cfg *memory.Config) { + cfg.Flushing = flushing + } +} + +// WithPersistence sets the Persistence field +// +// Persistence defines **how memory instances are persisted** beyond process lifetime. +// **Required field** that specifies storage backend and retention policy. +// **Supported backends**: +// - **`"redis"`**: Production-grade persistence with distributed locking and TTL support +// - **`"in_memory"`**: Testing/development only, data lost on restart +func WithPersistence(persistence memcore.PersistenceConfig) Option { + return func(cfg *memory.Config) { + cfg.Persistence = persistence + } +} + +// WithPrivacyPolicy sets the PrivacyPolicy field +// +// PrivacyPolicy defines **rules for handling sensitive data** within this memory. +// Can specify redaction patterns, non-persistable message types, and +// custom redaction strings for **compliance with data protection regulations**. +// ```yaml +// privacy_policy: +// redact_patterns: ["\\b\\d{3}-\\d{2}-\\d{4}\\b"] # SSN pattern +// non_persistable_message_types: ["payment_info"] +// default_redaction_string: "[REDACTED]" +// ``` +func WithPrivacyPolicy(privacyPolicy *memcore.PrivacyPolicyConfig) Option { + return func(cfg *memory.Config) { + cfg.PrivacyPolicy = privacyPolicy + } +} + +// WithPrivacyScope sets the PrivacyScope field +// +// PrivacyScope controls how memory is shared across tenants/users/sessions. +func WithPrivacyScope(privacyScope memory.PrivacyScope) Option { + return func(cfg *memory.Config) { + cfg.PrivacyScope = privacyScope + } +} + +// WithExpiration sets the Expiration field +// +// Expiration defines how long memory data is retained before cleanup. +func WithExpiration(expiration string) Option { + return func(cfg *memory.Config) { + cfg.Expiration = expiration + } +} + +// WithLocking sets the Locking field +// +// Locking configures **distributed lock timeouts** for concurrent memory operations. +// **Critical for preventing race conditions** when multiple agents access the same memory. +// Timeouts can be configured per operation type: +// - **`append_ttl`**: Timeout for adding new messages (default: `30s`) +// - **`clear_ttl`**: Timeout for clearing memory (default: `10s`) +// - **`flush_ttl`**: Timeout for flush operations (default: `5m`) +func WithLocking(locking *memcore.LockConfig) Option { + return func(cfg *memory.Config) { + cfg.Locking = locking + } +} + +// WithTokenProvider sets the TokenProvider field +// +// TokenProvider configures **provider-specific token counting** for accurate limits. +// Supports OpenAI, Anthropic, and other providers with their specific tokenizers. +// Can specify API keys for **real-time token counting** or fallback strategies. +func WithTokenProvider(tokenProvider *memcore.TokenProviderConfig) Option { + return func(cfg *memory.Config) { + cfg.TokenProvider = tokenProvider + } +} + +// WithDefaultKeyTemplate sets the DefaultKeyTemplate field +// +// DefaultKeyTemplate provides a fallback key template used when an +// agent's memory reference omits the `key` field and supplies only the +// memory ID. The template supports the same variables available to +// agent-level key templates and will be rendered at runtime. +// Example: "session:{{.workflow.input.session_id}}" +func WithDefaultKeyTemplate(defaultKeyTemplate string) Option { + return func(cfg *memory.Config) { + cfg.DefaultKeyTemplate = defaultKeyTemplate + } +} diff --git a/sdk/model/README.md b/sdk/model/README.md new file mode 100644 index 00000000..96118aa0 --- /dev/null +++ b/sdk/model/README.md @@ -0,0 +1,268 @@ +# Model SDK Package + +Auto-generated functional options for LLM provider configuration. + +## Overview + +The `model` package provides a clean, type-safe API for configuring LLM providers in Compozy. It uses auto-generated functional options to reduce boilerplate and ensure consistency with the engine layer. + +## Usage + +### Basic Configuration + +```go +import ( + "context" + "github.com/compozy/compozy/sdk/v2/model" +) + +func main() { + ctx := context.Background() + + // Minimal configuration + cfg, err := model.New(ctx, "openai", "gpt-4") + if err != nil { + panic(err) + } +} +``` + +### Full Configuration + +```go +import ( + "context" + "github.com/compozy/compozy/engine/core" + "github.com/compozy/compozy/sdk/v2/model" +) + +func main() { + ctx := context.Background() + + // Configure parameters + params := core.PromptParams{} + params.SetTemperature(0.7) + params.SetMaxTokens(2000) + params.SetTopP(0.9) + + cfg, err := model.New(ctx, "openai", "gpt-4-turbo", + model.WithAPIKey("{{ .env.OPENAI_API_KEY }}"), + model.WithAPIURL("https://api.openai.com/v1"), + model.WithParams(params), + model.WithOrganization("org-123456"), + model.WithDefault(true), + model.WithMaxToolIterations(10), + model.WithContextWindow(128000), + ) + if err != nil { + panic(err) + } +} +``` + +## Supported Providers + +- `openai` - OpenAI (GPT-4, GPT-3.5, etc.) +- `anthropic` - Anthropic (Claude models) +- `google` - Google (Gemini models) +- `groq` - Groq (fast inference) +- `ollama` - Ollama (local models) +- `deepseek` - DeepSeek AI models +- `xai` - xAI (Grok models) +- `cerebras` - Cerebras (fast inference) +- `openrouter` - OpenRouter (multi-model gateway) + +## Available Options + +### WithProvider + +Sets the LLM provider. Automatically normalized to lowercase. + +```go +model.New(ctx, "OpenAI", "gpt-4") // Normalized to "openai" +``` + +### WithModel + +Sets the specific model identifier. + +```go +model.New(ctx, "openai", "gpt-4-turbo") +``` + +### WithAPIKey + +Sets the authentication key. Use environment variable templates for security. + +```go +model.WithAPIKey("{{ .env.OPENAI_API_KEY }}") +``` + +### WithAPIURL + +Sets a custom API endpoint for local hosting or proxies. + +```go +model.WithAPIURL("http://localhost:11434") // Ollama +model.WithAPIURL("https://api.openai.com/v1") // OpenAI +``` + +### WithParams + +Configures generation parameters (temperature, max_tokens, etc.). + +```go +params := core.PromptParams{} +params.SetTemperature(0.7) +params.SetMaxTokens(2000) +model.WithParams(params) +``` + +### WithOrganization + +Sets the organization ID (primarily for OpenAI). + +```go +model.WithOrganization("org-123456789") +``` + +### WithDefault + +Marks this model as the default fallback. + +```go +model.WithDefault(true) +``` + +### WithMaxToolIterations + +Limits the number of tool-call iterations. + +```go +model.WithMaxToolIterations(10) +``` + +### WithContextWindow + +Overrides the provider's default context window size. + +```go +model.WithContextWindow(200000) // Claude 3.5 Sonnet via OpenRouter +``` + +## Parameter Validation + +### Temperature + +- **Range:** 0.0 to 2.0 +- **Default:** Provider-specific +- Controls randomness and creativity + +### MaxTokens + +- **Range:** Positive integers +- **Default:** Provider-specific +- Limits response length + +### TopP + +- **Range:** 0.0 to 1.0 +- **Default:** Provider-specific +- Nucleus sampling threshold + +### FrequencyPenalty + +- **Range:** -2.0 to 2.0 +- **Default:** 0.0 +- Penalizes token frequency + +### PresencePenalty + +- **Range:** -2.0 to 2.0 +- **Default:** 0.0 +- Encourages topic diversity + +## Error Handling + +The constructor returns a `BuildError` containing all validation errors: + +```go +cfg, err := model.New(ctx, "invalid", "") +if err != nil { + var buildErr *sdkerrors.BuildError + if errors.As(err, &buildErr) { + for _, e := range buildErr.Errors { + fmt.Println(e) + } + } +} +``` + +## Auto-Generation + +Options are auto-generated from `engine/core/provider.go`: + +```bash +cd sdk/model +go generate +``` + +This generates `options_generated.go` with all field options. **Never edit this file manually.** + +## Validation Rules + +1. **Provider:** Must be one of the supported providers +2. **Model:** Cannot be empty +3. **API URL:** Must be a valid URL (if provided) +4. **Temperature:** 0.0 to 2.0 +5. **MaxTokens:** Must be positive +6. **TopP:** 0.0 to 1.0 +7. **Frequency/Presence Penalty:** -2.0 to 2.0 + +## Migration from Builder Pattern + +### Before (Old Builder) + +```go +cfg, err := model.New("openai", "gpt-4"). + WithTemperature(0.7). + WithMaxTokens(2000). + Build(ctx) +``` + +### After (Functional Options) + +```go +params := core.PromptParams{} +params.SetTemperature(0.7) +params.SetMaxTokens(2000) + +cfg, err := model.New(ctx, "openai", "gpt-4", + model.WithParams(params), +) +``` + +## Key Changes + +1. **Context First:** `ctx` is now the first parameter +2. **No Build():** Validation happens immediately +3. **Params Object:** Use `core.PromptParams` with setters +4. **Provider/Model Required:** Both are constructor parameters + +## Benefits + +- ✅ **70% Less Boilerplate:** Auto-generated options +- ✅ **Zero Maintenance:** `go generate` syncs with engine +- ✅ **Type Safety:** Compile-time validation +- ✅ **Centralized Validation:** All checks in one place +- ✅ **Idiomatic Go:** Standard functional options pattern + +## Testing + +Run tests with: + +```bash +gotestsum --format pkgname -- -race -parallel=4 ./sdk/model +``` + +Current coverage: >90% of business logic diff --git a/sdk/model/constructor.go b/sdk/model/constructor.go new file mode 100644 index 00000000..76b3dad6 --- /dev/null +++ b/sdk/model/constructor.go @@ -0,0 +1,140 @@ +package model + +import ( + "context" + "fmt" + "strings" + + "github.com/compozy/compozy/engine/core" + "github.com/compozy/compozy/pkg/logger" + sdkerrors "github.com/compozy/compozy/sdk/v2/internal/errors" + "github.com/compozy/compozy/sdk/v2/internal/validate" +) + +var supportedProviders = map[string]core.ProviderName{ + "openai": core.ProviderOpenAI, + "anthropic": core.ProviderAnthropic, + "google": core.ProviderGoogle, + "groq": core.ProviderGroq, + "ollama": core.ProviderOllama, + "deepseek": core.ProviderDeepSeek, + "xai": core.ProviderXAI, + "cerebras": core.ProviderCerebras, + "openrouter": core.ProviderOpenRouter, +} + +var providerList = []string{ + "openai", + "anthropic", + "google", + "groq", + "ollama", + "deepseek", + "xai", + "cerebras", + "openrouter", +} + +// New creates a provider configuration using functional options +func New(ctx context.Context, provider string, model string, opts ...Option) (*core.ProviderConfig, error) { + if ctx == nil { + return nil, fmt.Errorf("context is required") + } + log := logger.FromContext(ctx) + log.Debug("creating provider configuration", "provider", provider, "model", model) + normalizedProvider := strings.ToLower(strings.TrimSpace(provider)) + cfg := &core.ProviderConfig{ + Provider: core.ProviderName(normalizedProvider), + Model: strings.TrimSpace(model), + } + for _, opt := range opts { + opt(cfg) + } + collected := make([]error, 0, 8) + if err := validateProvider(ctx, cfg); err != nil { + collected = append(collected, err) + } + if err := validateModel(ctx, cfg); err != nil { + collected = append(collected, err) + } + if err := validateAPIURL(ctx, cfg); err != nil { + collected = append(collected, err) + } + if errs := validateParams(cfg); len(errs) > 0 { + collected = append(collected, errs...) + } + if len(collected) > 0 { + return nil, &sdkerrors.BuildError{Errors: collected} + } + cloned, err := core.DeepCopy(cfg) + if err != nil { + return nil, fmt.Errorf("failed to clone provider config: %w", err) + } + return cloned, nil +} + +func validateProvider(ctx context.Context, cfg *core.ProviderConfig) error { + provider := strings.ToLower(strings.TrimSpace(string(cfg.Provider))) + if err := validate.NonEmpty(ctx, "provider", provider); err != nil { + return err + } + mapped, ok := supportedProviders[provider] + if !ok { + return fmt.Errorf("provider %q is not supported; must be one of %s", provider, strings.Join(providerList, ", ")) + } + cfg.Provider = mapped + return nil +} + +func validateModel(ctx context.Context, cfg *core.ProviderConfig) error { + model := strings.TrimSpace(cfg.Model) + if err := validate.NonEmpty(ctx, "model", model); err != nil { + return err + } + cfg.Model = model + return nil +} + +func validateAPIURL(ctx context.Context, cfg *core.ProviderConfig) error { + apiURL := strings.TrimSpace(cfg.APIURL) + cfg.APIURL = apiURL + if apiURL == "" { + return nil + } + if err := validate.URL(ctx, apiURL); err != nil { + return err + } + return nil +} + +func validateParams(cfg *core.ProviderConfig) []error { + errs := make([]error, 0, 5) + if cfg.Params.IsSetMaxTokens() && cfg.Params.MaxTokens <= 0 { + errs = append(errs, fmt.Errorf("max tokens must be positive: got %d", cfg.Params.MaxTokens)) + } + if cfg.Params.IsSetTemperature() { + temp := cfg.Params.Temperature + if temp < 0 || temp > 2 { + errs = append(errs, fmt.Errorf("temperature must be between 0 and 2 inclusive: got %v", temp)) + } + } + if cfg.Params.IsSetTopP() { + topP := cfg.Params.TopP + if topP < 0 || topP > 1 { + errs = append(errs, fmt.Errorf("top_p must be between 0 and 1 inclusive: got %v", topP)) + } + } + if cfg.Params.IsSetFrequencyPenalty() { + penalty := cfg.Params.FrequencyPenalty + if penalty < -2 || penalty > 2 { + errs = append(errs, fmt.Errorf("frequency penalty must be between -2 and 2 inclusive: got %v", penalty)) + } + } + if cfg.Params.IsSetPresencePenalty() { + penalty := cfg.Params.PresencePenalty + if penalty < -2 || penalty > 2 { + errs = append(errs, fmt.Errorf("presence penalty must be between -2 and 2 inclusive: got %v", penalty)) + } + } + return errs +} diff --git a/sdk/model/constructor_test.go b/sdk/model/constructor_test.go new file mode 100644 index 00000000..131f9069 --- /dev/null +++ b/sdk/model/constructor_test.go @@ -0,0 +1,284 @@ +package model + +import ( + "context" + "errors" + "testing" + + "github.com/compozy/compozy/engine/core" + sdkerrors "github.com/compozy/compozy/sdk/v2/internal/errors" +) + +func TestNew(t *testing.T) { + t.Run("Should create model with minimal configuration", func(t *testing.T) { + ctx := context.Background() + cfg, err := New(ctx, "openai", "gpt-4") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg == nil { + t.Fatal("expected config, got nil") + } + if cfg.Provider != core.ProviderOpenAI { + t.Errorf("expected provider 'openai', got '%s'", cfg.Provider) + } + if cfg.Model != "gpt-4" { + t.Errorf("expected model 'gpt-4', got '%s'", cfg.Model) + } + }) + t.Run("Should trim whitespace from provider and model", func(t *testing.T) { + ctx := context.Background() + cfg, err := New(ctx, " openai ", " gpt-4 ") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.Provider != core.ProviderOpenAI { + t.Errorf("expected trimmed provider 'openai', got '%s'", cfg.Provider) + } + if cfg.Model != "gpt-4" { + t.Errorf("expected trimmed model 'gpt-4', got '%s'", cfg.Model) + } + }) + t.Run("Should normalize provider to lowercase", func(t *testing.T) { + ctx := context.Background() + cfg, err := New(ctx, "OpenAI", "gpt-4") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.Provider != core.ProviderOpenAI { + t.Errorf("expected provider 'openai', got '%s'", cfg.Provider) + } + }) + t.Run("Should fail when context is nil", func(t *testing.T) { + var nilCtx context.Context + _, err := New(nilCtx, "openai", "gpt-4") + if err == nil { + t.Fatal("expected error for nil context") + } + if err.Error() != "context is required" { + t.Errorf("unexpected error message: %v", err) + } + }) + t.Run("Should fail when provider is empty", func(t *testing.T) { + ctx := context.Background() + _, err := New(ctx, "", "gpt-4") + if err == nil { + t.Fatal("expected error for empty provider") + } + var buildErr *sdkerrors.BuildError + if !errors.As(err, &buildErr) { + t.Errorf("expected BuildError, got %T", err) + } + }) + t.Run("Should fail when provider is invalid", func(t *testing.T) { + ctx := context.Background() + _, err := New(ctx, "invalid-provider", "gpt-4") + if err == nil { + t.Fatal("expected error for invalid provider") + } + var buildErr *sdkerrors.BuildError + if !errors.As(err, &buildErr) { + t.Errorf("expected BuildError, got %T", err) + } + }) + t.Run("Should fail when model is empty", func(t *testing.T) { + ctx := context.Background() + _, err := New(ctx, "openai", "") + if err == nil { + t.Fatal("expected error for empty model") + } + var buildErr *sdkerrors.BuildError + if !errors.As(err, &buildErr) { + t.Errorf("expected BuildError, got %T", err) + } + }) + t.Run("Should create model with all options", func(t *testing.T) { + ctx := context.Background() + params := core.PromptParams{} + params.SetTemperature(0.7) + params.SetMaxTokens(1000) + cfg, err := New(ctx, "openai", "gpt-4", + WithAPIKey("test-key"), + WithAPIURL("https://api.openai.com/v1"), + WithParams(params), + WithOrganization("org-123"), + WithDefault(true), + WithMaxToolIterations(10), + WithContextWindow(8000), + ) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.APIKey != "test-key" { + t.Errorf("expected api_key 'test-key', got '%s'", cfg.APIKey) + } + if cfg.APIURL != "https://api.openai.com/v1" { + t.Errorf("expected api_url, got '%s'", cfg.APIURL) + } + if cfg.Organization != "org-123" { + t.Errorf("expected organization 'org-123', got '%s'", cfg.Organization) + } + if !cfg.Default { + t.Error("expected default to be true") + } + if cfg.MaxToolIterations != 10 { + t.Errorf("expected max_tool_iterations 10, got %d", cfg.MaxToolIterations) + } + if cfg.ContextWindow != 8000 { + t.Errorf("expected context_window 8000, got %d", cfg.ContextWindow) + } + if cfg.Params.Temperature != 0.7 { + t.Errorf("expected temperature 0.7, got %v", cfg.Params.Temperature) + } + if cfg.Params.MaxTokens != 1000 { + t.Errorf("expected max_tokens 1000, got %d", cfg.Params.MaxTokens) + } + }) + t.Run("Should validate temperature range", func(t *testing.T) { + ctx := context.Background() + params := core.PromptParams{} + params.SetTemperature(3.0) + _, err := New(ctx, "openai", "gpt-4", WithParams(params)) + if err == nil { + t.Fatal("expected error for invalid temperature") + } + var buildErr *sdkerrors.BuildError + if !errors.As(err, &buildErr) { + t.Errorf("expected BuildError, got %T", err) + } + }) + t.Run("Should validate max_tokens positive", func(t *testing.T) { + ctx := context.Background() + params := core.PromptParams{} + params.SetMaxTokens(-1) + _, err := New(ctx, "openai", "gpt-4", WithParams(params)) + if err == nil { + t.Fatal("expected error for negative max_tokens") + } + var buildErr *sdkerrors.BuildError + if !errors.As(err, &buildErr) { + t.Errorf("expected BuildError, got %T", err) + } + }) + t.Run("Should validate top_p range", func(t *testing.T) { + ctx := context.Background() + params := core.PromptParams{} + params.SetTopP(1.5) + _, err := New(ctx, "openai", "gpt-4", WithParams(params)) + if err == nil { + t.Fatal("expected error for invalid top_p") + } + var buildErr *sdkerrors.BuildError + if !errors.As(err, &buildErr) { + t.Errorf("expected BuildError, got %T", err) + } + }) + t.Run("Should validate frequency_penalty range", func(t *testing.T) { + ctx := context.Background() + params := core.PromptParams{} + params.SetFrequencyPenalty(3.0) + _, err := New(ctx, "openai", "gpt-4", WithParams(params)) + if err == nil { + t.Fatal("expected error for invalid frequency_penalty") + } + var buildErr *sdkerrors.BuildError + if !errors.As(err, &buildErr) { + t.Errorf("expected BuildError, got %T", err) + } + }) + t.Run("Should validate presence_penalty range", func(t *testing.T) { + ctx := context.Background() + params := core.PromptParams{} + params.SetPresencePenalty(-3.0) + _, err := New(ctx, "openai", "gpt-4", WithParams(params)) + if err == nil { + t.Fatal("expected error for invalid presence_penalty") + } + var buildErr *sdkerrors.BuildError + if !errors.As(err, &buildErr) { + t.Errorf("expected BuildError, got %T", err) + } + }) + t.Run("Should validate API URL format", func(t *testing.T) { + ctx := context.Background() + _, err := New(ctx, "openai", "gpt-4", WithAPIURL("not-a-valid-url")) + if err == nil { + t.Fatal("expected error for invalid API URL") + } + var buildErr *sdkerrors.BuildError + if !errors.As(err, &buildErr) { + t.Errorf("expected BuildError, got %T", err) + } + }) + t.Run("Should accept valid API URL", func(t *testing.T) { + ctx := context.Background() + cfg, err := New(ctx, "openai", "gpt-4", WithAPIURL("https://api.openai.com/v1")) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.APIURL != "https://api.openai.com/v1" { + t.Errorf("expected api_url, got '%s'", cfg.APIURL) + } + }) + t.Run("Should support all provider types", func(t *testing.T) { + ctx := context.Background() + providers := []struct { + name string + expected core.ProviderName + }{ + {"openai", core.ProviderOpenAI}, + {"anthropic", core.ProviderAnthropic}, + {"google", core.ProviderGoogle}, + {"groq", core.ProviderGroq}, + {"ollama", core.ProviderOllama}, + {"deepseek", core.ProviderDeepSeek}, + {"xai", core.ProviderXAI}, + {"cerebras", core.ProviderCerebras}, + {"openrouter", core.ProviderOpenRouter}, + } + for _, p := range providers { + cfg, err := New(ctx, p.name, "test-model") + if err != nil { + t.Errorf("provider %s: unexpected error: %v", p.name, err) + continue + } + if cfg.Provider != p.expected { + t.Errorf("provider %s: expected %s, got %s", p.name, p.expected, cfg.Provider) + } + } + }) + t.Run("Should perform deep copy", func(t *testing.T) { + ctx := context.Background() + params := core.PromptParams{} + params.SetTemperature(0.5) + cfg1, err := New(ctx, "openai", "gpt-4", WithParams(params)) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + cfg1.Model = "modified" + cfg2, err := New(ctx, "openai", "gpt-4", WithParams(params)) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg2.Model == "modified" { + t.Error("deep copy failed: configuration was modified") + } + }) + t.Run("Should accumulate multiple validation errors", func(t *testing.T) { + ctx := context.Background() + params := core.PromptParams{} + params.SetTemperature(3.0) + params.SetTopP(2.0) + _, err := New(ctx, "", "", WithParams(params)) + if err == nil { + t.Fatal("expected error for multiple validation failures") + } + var buildErr *sdkerrors.BuildError + if !errors.As(err, &buildErr) { + t.Fatalf("expected BuildError, got %T", err) + } + if len(buildErr.Errors) < 2 { + t.Errorf("expected multiple errors, got %d", len(buildErr.Errors)) + } + }) +} diff --git a/sdk/model/generate.go b/sdk/model/generate.go new file mode 100644 index 00000000..69877775 --- /dev/null +++ b/sdk/model/generate.go @@ -0,0 +1,3 @@ +package model + +//go:generate go run ../internal/codegen/cmd/optionsgen/main.go -engine ../../engine/core/provider.go -struct ProviderConfig -output options_generated.go -package model diff --git a/sdk/model/options_generated.go b/sdk/model/options_generated.go new file mode 100644 index 00000000..54211074 --- /dev/null +++ b/sdk/model/options_generated.go @@ -0,0 +1,138 @@ +// Code generated by optionsgen. DO NOT EDIT. + +package model + +import core "github.com/compozy/compozy/engine/core" + +type Option func(*core.ProviderConfig) + +// WithProvider sets the Provider field +// +// Provider specifies which AI service to use for LLM operations. +// Must match one of the supported ProviderName constants. +// - **Examples**: `"openai"`, `"anthropic"`, `"google"`, `"ollama"` +func WithProvider(provider core.ProviderName) Option { + return func(cfg *core.ProviderConfig) { + cfg.Provider = provider + } +} + +// WithModel sets the Model field +// +// Model defines the specific model identifier to use with the provider. +// Model names are provider-specific and determine capabilities and pricing. +// - **Examples**: +// - OpenAI: `"gpt-4-turbo"`, `"gpt-3.5-turbo"` +// - Anthropic: `"claude-4-opus"`, `"claude-3-5-haiku-latest"` +// - Google: `"gemini-pro"`, `"gemini-pro-vision"` +// - Ollama: `"llama2:13b"`, `"mistral:7b"` +func WithModel(model string) Option { + return func(cfg *core.ProviderConfig) { + cfg.Model = model + } +} + +// WithAPIKey sets the APIKey field +// +// APIKey contains the authentication key for the AI provider. +// - **Security**: Use template references to environment variables. +// - **Examples**: `"{{ .env.OPENAI_API_KEY }}"`, `"{{ .secrets.ANTHROPIC_KEY }}"` +// > **Note:**: Required for most cloud providers, optional for local providers +func WithAPIKey(aPIKey string) Option { + return func(cfg *core.ProviderConfig) { + cfg.APIKey = aPIKey + } +} + +// WithAPIURL sets the APIURL field +// +// APIURL specifies a custom API endpoint for the provider. +// **Use Cases**: +// - Local model hosting (Ollama, OpenAI-compatible servers) +// - Enterprise API gateways +// - Regional API endpoints +// - Custom proxy servers +// **Examples**: `"http://localhost:11434"`, `"https://api.openai.com/v1"` +func WithAPIURL(apiurl string) Option { + return func(cfg *core.ProviderConfig) { + cfg.APIURL = apiurl + } +} + +// WithParams sets the Params field +// +// Params contains the generation parameters that control LLM behavior. +// These parameters are applied to all requests using this provider configuration. +// Can be overridden at the task or action level for specific requirements. +func WithParams(params core.PromptParams) Option { + return func(cfg *core.ProviderConfig) { + cfg.Params = params + } +} + +// WithOrganization sets the Organization field +// +// Organization specifies the organization ID for providers that support it. +// - **Primary Use**: OpenAI organization management for billing and access control +// - **Example**: `"org-123456789abcdef"` +// > **Note:**: Optional for most providers +func WithOrganization(organization string) Option { + return func(cfg *core.ProviderConfig) { + cfg.Organization = organization + } +} + +// WithDefault sets the Default field +// +// Default indicates that this model should be used as the fallback when no explicit +// model configuration is provided at the task or agent level. +// **Behavior**: +// - Only one model per project can be marked as default +// - When set to true, this model will be used for tasks/agents without explicit model config +// - Validation ensures at most one default model per project +// **Example**: +// ```yaml +// models: +// - provider: openai +// model: gpt-4 +// default: true # This will be used by default +// ``` +func WithDefault(defaultValue bool) Option { + return func(cfg *core.ProviderConfig) { + cfg.Default = defaultValue + } +} + +// WithMaxToolIterations sets the MaxToolIterations field +// +// MaxToolIterations optionally caps the maximum number of tool-call iterations +// during a single LLM request when tools are available. +// When > 0, overrides the global default for this model; 0 uses the global default. +func WithMaxToolIterations(maxToolIterations int) Option { + return func(cfg *core.ProviderConfig) { + cfg.MaxToolIterations = maxToolIterations + } +} + +// WithRateLimit sets the RateLimit field +// +// RateLimit overrides concurrency limits and queue size for this provider. +// When omitted the orchestrator applies the global defaults. +func WithRateLimit(rateLimit *core.ProviderRateLimitConfig) Option { + return func(cfg *core.ProviderConfig) { + cfg.RateLimit = rateLimit + } +} + +// WithContextWindow sets the ContextWindow field +// +// ContextWindow optionally overrides the provider's default context window size. +// When > 0, this value replaces the provider's default ContextWindowTokens capability. +// Useful for providers like OpenRouter that support multiple models with varying limits. +// - **Example**: 200000 for Claude 3.5 Sonnet via OpenRouter +// - **Default**: Uses provider's default when not specified or <= 0 +func WithContextWindow(contextWindow int) Option { + return func(cfg *core.ProviderConfig) { + cfg.ContextWindow = contextWindow + } +} diff --git a/sdk/project/README.md b/sdk/project/README.md new file mode 100644 index 00000000..3d0adf1e --- /dev/null +++ b/sdk/project/README.md @@ -0,0 +1,282 @@ +# Project Package + +SDK for creating Compozy project configurations using functional options. + +## Overview + +The project package provides a type-safe, ergonomic API for building project configurations - the top-level orchestrator that integrates agents, workflows, tasks, tools, memory, knowledge, and schedules. + +## Installation + +```go +import "github.com/compozy/compozy/sdk/project" +``` + +## Usage + +### Minimal Project + +```go +cfg, err := project.New(ctx, "my-project", + project.WithWorkflows([]*engineproject.WorkflowSourceConfig{ + {Source: "./workflow.yaml"}, + }), +) +``` + +### Full Project Configuration + +```go +cfg, err := project.New(ctx, "enterprise-ai-system", + project.WithVersion("2.1.0"), + project.WithDescription("Multi-agent system for enterprise automation"), + project.WithAuthor(core.Author{ + Name: "AI Team", + Email: "ai@company.com", + Organization: "ACME Corp", + }), + project.WithWorkflows([]*engineproject.WorkflowSourceConfig{ + {Source: "./workflows/customer-support.yaml"}, + {Source: "./workflows/data-pipeline.yaml"}, + }), + project.WithModels([]*core.ProviderConfig{ + { + Provider: core.ProviderOpenAI, + Model: "gpt-4", + APIKey: "{{.env.OPENAI_API_KEY}}", + Default: true, + }, + { + Provider: core.ProviderAnthropic, + Model: "claude-3-opus", + APIKey: "{{.env.ANTHROPIC_API_KEY}}", + }, + }), + project.WithTools([]tool.Config{ + {ID: "code-analyzer", Description: "Analyzes code quality"}, + {ID: "data-processor", Description: "Processes data"}, + }), + project.WithMemories([]*memory.Config{ + {ID: "conversation", Type: memory.TypeBuffer}, + }), + project.WithEmbedders([]knowledge.EmbedderConfig{ + {ID: "openai-embedder", Provider: "openai"}, + }), + project.WithVectorDBs([]knowledge.VectorDBConfig{ + {ID: "pinecone-db", Type: "pinecone"}, + }), + project.WithKnowledgeBases([]knowledge.BaseConfig{ + {ID: "company-docs"}, + }), + project.WithSchedules([]*projectschedule.Config{ + { + ID: "daily-report", + WorkflowID: "./workflows/data-pipeline.yaml", + Cron: "0 9 * * *", + }, + }), +) +``` + +## API Reference + +### Constructor + +```go +func New(ctx context.Context, name string, opts ...Option) (*engineproject.Config, error) +``` + +Creates a new project configuration with the given name and options. + +**Parameters:** + +- `ctx` - Context for cancellation and logging +- `name` - Project name (alphanumeric and hyphens only) +- `opts` - Functional options for configuration + +**Returns:** + +- `*engineproject.Config` - Deep copied configuration +- `error` - Validation errors (may be `*sdkerrors.BuildError` with multiple errors) + +### Options + +#### Core Options + +- `WithVersion(version string)` - Sets semantic version (e.g., "1.0.0") +- `WithDescription(description string)` - Sets project description +- `WithAuthor(author core.Author)` - Sets author information + +#### Resource Options + +- `WithWorkflows(workflows []*WorkflowSourceConfig)` - Registers workflow files (required) +- `WithModels(models []*core.ProviderConfig)` - Registers LLM providers +- `WithTools(tools []tool.Config)` - Registers shared tools +- `WithMemories(memories []*memory.Config)` - Registers memory resources +- `WithSchedules(schedules []*projectschedule.Config)` - Registers scheduled workflows + +#### Knowledge Options + +- `WithEmbedders(embedders []knowledge.EmbedderConfig)` - Registers embedders +- `WithVectorDBs(vectorDBs []knowledge.VectorDBConfig)` - Registers vector databases +- `WithKnowledgeBases(bases []knowledge.BaseConfig)` - Registers knowledge bases +- `WithKnowledge(bindings []core.KnowledgeBinding)` - Sets knowledge binding (max 1) + +#### Advanced Options + +- `WithSchemas(schemas []schema.Schema)` - Registers data schemas +- `WithOpts(opts Opts)` - Sets project configuration options +- `WithRuntime(runtime RuntimeConfig)` - Sets runtime configuration +- `WithAutoLoad(autoload *autoload.Config)` - Sets autoload configuration +- `WithMCPs(mcps []mcp.Config)` - Registers MCP servers +- `WithMonitoringConfig(config *monitoring.Config)` - Sets monitoring +- `WithCWD(cwd *core.PathCWD)` - Sets working directory + +### Cross-Reference Validation + +```go +func ValidateCrossReferences( + cfg *engineproject.Config, + agents []agent.Config, + workflows []workflow.Config, +) error +``` + +Validates that agents reference valid tools, memory, and knowledge bases defined in the project config. + +**Note:** This performs basic structural validation. Full semantic validation (e.g., verifying agent references in tasks) happens during workflow execution. + +## Validation Rules + +### Required Fields + +- **Name**: Non-empty, alphanumeric with hyphens, max 63 characters +- **Workflows**: At least one workflow source must be registered + +### Version + +- Must be valid semantic version if specified (e.g., "1.0.0", "2.1.0-alpha.1") + +### Author Email + +- Must be valid email format if specified + +### Models + +- Only one model can be marked as default + +### Workflows + +- Source path cannot be empty +- Source path is relative to project root + +### Schedules + +- Schedule IDs must be unique +- Workflow IDs must be empty +- Schedule must reference existing workflow source + +### Tools + +- Tool IDs must be unique and non-empty + +### Memories + +- Memory IDs must be unique and non-empty +- Resource field defaults to "memory" if empty + +### Knowledge + +- Only one knowledge binding supported (MVP) +- Embedder IDs must be unique +- VectorDB IDs must be unique +- Knowledge base IDs must be unique + +## Migration Guide + +### From Old SDK (Builder Pattern) + +**Before:** + +```go +cfg, err := project.New("my-project"). + WithVersion("1.0.0"). + WithDescription("My project"). + AddWorkflow(wf). + AddModel(model). + Build(ctx) +``` + +**After:** + +```go +cfg, err := project.New(ctx, "my-project", + project.WithVersion("1.0.0"), + project.WithDescription("My project"), + project.WithWorkflows([]*engineproject.WorkflowSourceConfig{ + {Source: "./workflow.yaml"}, + }), + project.WithModels([]*core.ProviderConfig{model}), +) +``` + +### Key Changes + +1. **Context First**: `ctx` is now the first parameter +2. **No Build()**: Configuration is created and validated immediately +3. **Slices for Collections**: Use `WithWorkflows()`, `WithModels()`, etc. with slices +4. **Immediate Validation**: Errors are returned from `New()`, not from `Build()` +5. **Deep Copy**: Returned config is automatically deep copied for safety + +## Examples + +See `sdk/cmd/` for complete working examples of project configurations. + +## Testing + +```bash +# Run tests +gotestsum -- -race -parallel=4 ./sdk/project + +# Run with coverage +go test -race -coverprofile=coverage.out ./sdk/project +go tool cover -html=coverage.out +``` + +## Error Handling + +The constructor returns `*sdkerrors.BuildError` when multiple validation errors occur: + +```go +cfg, err := project.New(ctx, "", // Empty name + project.WithVersion("invalid"), // Invalid version +) +if err != nil { + var buildErr *sdkerrors.BuildError + if errors.As(err, &buildErr) { + for _, e := range buildErr.Errors { + fmt.Println("Error:", e) + } + } +} +``` + +## Best Practices + +1. **Use Context**: Always pass a valid context with logger +2. **Validate Early**: Run `New()` during application initialization +3. **Organize Workflows**: Group related workflows in subdirectories +4. **Reference by ID**: Use consistent IDs for tools, memory, and knowledge +5. **Single Default Model**: Mark only one model as default +6. **Schedule Wisely**: Use cron expressions for workflow schedules +7. **Document Resources**: Provide clear descriptions for tools and workflows + +## Performance + +- **Constructor Time**: < 1µs for minimal config, < 10µs for full config +- **Memory Allocation**: < 5KB per constructor call +- **Deep Copy Overhead**: Negligible (~100ns) using efficient `core.DeepCopy()` + +## Thread Safety + +The `New()` constructor is thread-safe. The returned configuration is a deep copy and safe for concurrent read access. Modifications to the returned config do not affect the internal state. diff --git a/sdk/project/constructor.go b/sdk/project/constructor.go new file mode 100644 index 00000000..2a6e5041 --- /dev/null +++ b/sdk/project/constructor.go @@ -0,0 +1,408 @@ +package project + +import ( + "context" + "fmt" + "net/mail" + "strings" + + "github.com/Masterminds/semver/v3" + "github.com/compozy/compozy/engine/agent" + "github.com/compozy/compozy/engine/core" + "github.com/compozy/compozy/engine/knowledge" + "github.com/compozy/compozy/engine/mcp" + "github.com/compozy/compozy/engine/memory" + engineproject "github.com/compozy/compozy/engine/project" + projectschedule "github.com/compozy/compozy/engine/project/schedule" + "github.com/compozy/compozy/engine/tool" + "github.com/compozy/compozy/engine/workflow" + "github.com/compozy/compozy/pkg/logger" + sdkerrors "github.com/compozy/compozy/sdk/v2/internal/errors" + "github.com/compozy/compozy/sdk/v2/internal/validate" +) + +// New creates a project configuration using functional options +func New(ctx context.Context, name string, opts ...Option) (*engineproject.Config, error) { + if ctx == nil { + return nil, fmt.Errorf("context is required") + } + log := logger.FromContext(ctx) + log.Debug("creating project configuration", "project", name) + cfg := initializeConfig(name) + for _, opt := range opts { + opt(cfg) + } + collected := collectValidationErrors(ctx, cfg) + if len(collected) > 0 { + return nil, &sdkerrors.BuildError{Errors: collected} + } + cloned, err := core.DeepCopy(cfg) + if err != nil { + return nil, fmt.Errorf("failed to clone project config: %w", err) + } + return cloned, nil +} + +func initializeConfig(name string) *engineproject.Config { + return &engineproject.Config{ + Name: strings.TrimSpace(name), + Models: make([]*core.ProviderConfig, 0), + Workflows: make([]*engineproject.WorkflowSourceConfig, 0), + Schedules: make([]*projectschedule.Config, 0), + Tools: make([]tool.Config, 0), + Embedders: make([]knowledge.EmbedderConfig, 0), + VectorDBs: make([]knowledge.VectorDBConfig, 0), + KnowledgeBases: make([]knowledge.BaseConfig, 0), + Knowledge: make([]core.KnowledgeBinding, 0), + MCPs: make([]mcp.Config, 0), + Memories: make([]*memory.Config, 0), + } +} + +func collectValidationErrors(ctx context.Context, cfg *engineproject.Config) []error { + collected := make([]error, 0) + appendIfError(&collected, validateName(ctx, cfg)) + appendIfError(&collected, validateVersion(cfg)) + validateDescription(cfg) + appendIfError(&collected, validateAuthor(cfg)) + appendIfError(&collected, validateWorkflows(cfg)) + appendIfError(&collected, validateModels(cfg)) + appendIfError(&collected, validateSchedules(cfg)) + appendIfError(&collected, validateTools(cfg)) + appendIfError(&collected, validateMemories(cfg)) + appendIfError(&collected, validateKnowledge(cfg)) + return collected +} + +func appendIfError(collected *[]error, err error) { + if err != nil { + *collected = append(*collected, err) + } +} + +func validateName(ctx context.Context, cfg *engineproject.Config) error { + cfg.Name = strings.TrimSpace(cfg.Name) + if err := validate.Required(ctx, "project name", cfg.Name); err != nil { + return err + } + if err := validate.ID(ctx, cfg.Name); err != nil { + return fmt.Errorf("project name must be alphanumeric or hyphenated: %w", err) + } + return nil +} + +func validateVersion(cfg *engineproject.Config) error { + version := strings.TrimSpace(cfg.Version) + if version == "" { + return nil + } + if _, err := semver.NewVersion(version); err != nil { + return fmt.Errorf("version must be valid semver: %w", err) + } + cfg.Version = version + return nil +} + +func validateDescription(cfg *engineproject.Config) { + cfg.Description = strings.TrimSpace(cfg.Description) +} + +func validateAuthor(cfg *engineproject.Config) error { + cfg.Author.Name = strings.TrimSpace(cfg.Author.Name) + cfg.Author.Email = strings.TrimSpace(cfg.Author.Email) + cfg.Author.Organization = strings.TrimSpace(cfg.Author.Organization) + if cfg.Author.Email != "" { + if _, err := mail.ParseAddress(cfg.Author.Email); err != nil { + return fmt.Errorf("author email must be valid: %w", err) + } + } + return nil +} + +func validateWorkflows(cfg *engineproject.Config) error { + if len(cfg.Workflows) == 0 { + return fmt.Errorf("at least one workflow must be registered") + } + for i, wf := range cfg.Workflows { + if wf == nil { + return fmt.Errorf("workflow[%d] cannot be nil", i) + } + wf.Source = strings.TrimSpace(wf.Source) + if wf.Source == "" { + return fmt.Errorf("workflow[%d] source cannot be empty", i) + } + } + return nil +} + +func validateModels(cfg *engineproject.Config) error { + if len(cfg.Models) == 0 { + return nil + } + firstDefaultIdx := -1 + for i, model := range cfg.Models { + if model != nil && model.Default { + if firstDefaultIdx == -1 { + firstDefaultIdx = i + } else { + return fmt.Errorf( + "only one model can be marked as default, found multiple at indices %d and %d", + firstDefaultIdx, + i, + ) + } + } + } + return nil +} + +func validateSchedules(cfg *engineproject.Config) error { + if len(cfg.Schedules) == 0 { + return nil + } + scheduleIDs := make(map[string]bool, len(cfg.Schedules)) + workflowIDSet := buildWorkflowIDSet(cfg) + for i, sched := range cfg.Schedules { + if sched == nil { + continue + } + sched.ID = strings.TrimSpace(sched.ID) + if sched.ID == "" { + return fmt.Errorf("schedule[%d] id cannot be empty", i) + } + if scheduleIDs[sched.ID] { + return fmt.Errorf("duplicate schedule id '%s' found", sched.ID) + } + scheduleIDs[sched.ID] = true + sched.WorkflowID = strings.TrimSpace(sched.WorkflowID) + if sched.WorkflowID == "" { + return fmt.Errorf("schedule[%d] workflow id cannot be empty", i) + } + if _, exists := workflowIDSet[sched.WorkflowID]; !exists { + return fmt.Errorf("schedule '%s' references unknown workflow '%s'", sched.ID, sched.WorkflowID) + } + } + return nil +} + +func buildWorkflowIDSet(cfg *engineproject.Config) map[string]struct{} { + ids := make(map[string]struct{}, len(cfg.Workflows)) + for _, wf := range cfg.Workflows { + if wf == nil { + continue + } + source := strings.TrimSpace(wf.Source) + if source != "" { + ids[source] = struct{}{} + } + } + return ids +} + +func validateTools(cfg *engineproject.Config) error { + if len(cfg.Tools) == 0 { + return nil + } + toolIDs := make(map[string]bool, len(cfg.Tools)) + for i := range cfg.Tools { + toolCfg := &cfg.Tools[i] + toolCfg.ID = strings.TrimSpace(toolCfg.ID) + if toolCfg.ID == "" { + return fmt.Errorf("tool[%d] id cannot be empty", i) + } + if toolIDs[toolCfg.ID] { + return fmt.Errorf("duplicate tool id '%s' found", toolCfg.ID) + } + toolIDs[toolCfg.ID] = true + } + return nil +} + +func validateMemories(cfg *engineproject.Config) error { + if len(cfg.Memories) == 0 { + return nil + } + memoryIDs := make(map[string]bool, len(cfg.Memories)) + for i, mem := range cfg.Memories { + if mem == nil { + return fmt.Errorf("memory[%d] cannot be nil", i) + } + if strings.TrimSpace(mem.Resource) == "" { + mem.Resource = string(core.ConfigMemory) + } + mem.ID = strings.TrimSpace(mem.ID) + if mem.ID == "" { + return fmt.Errorf("memory[%d] id cannot be empty", i) + } + if memoryIDs[mem.ID] { + return fmt.Errorf("duplicate memory id '%s' found", mem.ID) + } + memoryIDs[mem.ID] = true + } + return nil +} + +func validateKnowledge(cfg *engineproject.Config) error { + if len(cfg.Knowledge) > 1 { + return fmt.Errorf("only one knowledge binding is supported") + } + if len(cfg.Knowledge) == 1 { + binding := &cfg.Knowledge[0] + binding.ID = strings.TrimSpace(binding.ID) + if binding.ID == "" { + return fmt.Errorf("knowledge binding requires an id reference") + } + } + embedderIDs := make(map[string]bool, len(cfg.Embedders)) + for i := range cfg.Embedders { + embedder := &cfg.Embedders[i] + embedder.ID = strings.TrimSpace(embedder.ID) + if embedder.ID == "" { + return fmt.Errorf("embedder[%d] id cannot be empty", i) + } + if embedderIDs[embedder.ID] { + return fmt.Errorf("duplicate embedder id '%s' found", embedder.ID) + } + embedderIDs[embedder.ID] = true + } + vectorDBIDs := make(map[string]bool, len(cfg.VectorDBs)) + for i := range cfg.VectorDBs { + vectorDB := &cfg.VectorDBs[i] + vectorDB.ID = strings.TrimSpace(vectorDB.ID) + if vectorDB.ID == "" { + return fmt.Errorf("vector_db[%d] id cannot be empty", i) + } + if vectorDBIDs[vectorDB.ID] { + return fmt.Errorf("duplicate vector_db id '%s' found", vectorDB.ID) + } + vectorDBIDs[vectorDB.ID] = true + } + knowledgeBaseIDs := make(map[string]bool, len(cfg.KnowledgeBases)) + for i := range cfg.KnowledgeBases { + kb := &cfg.KnowledgeBases[i] + kb.ID = strings.TrimSpace(kb.ID) + if kb.ID == "" { + return fmt.Errorf("knowledge_base[%d] id cannot be empty", i) + } + if knowledgeBaseIDs[kb.ID] { + return fmt.Errorf("duplicate knowledge_base id '%s' found", kb.ID) + } + knowledgeBaseIDs[kb.ID] = true + } + return nil +} + +// ValidateCrossReferences validates agent, workflow, tool, and other references +// This function is exported for use in project loading scenarios where +// agents and workflows are provided separately from the project config. +// +// Note: This performs basic structural validation. Full semantic validation +// (e.g., verifying agent references in tasks) happens during workflow execution +// when the actual workflow configs are loaded and parsed. +func ValidateCrossReferences( + cfg *engineproject.Config, + agents []agent.Config, + _ []workflow.Config, +) error { + collected := make([]error, 0) + toolIDSet := buildToolIDSet(cfg) + memoryIDSet := buildMemoryIDSet(cfg) + knowledgeIDSet := buildKnowledgeBaseIDSet(cfg) + for i := range agents { + validateAgentReferences(&agents[i], toolIDSet, memoryIDSet, knowledgeIDSet, &collected) + } + if len(collected) > 0 { + return &sdkerrors.BuildError{Errors: collected} + } + return nil +} + +func validateAgentReferences( + agentCfg *agent.Config, + toolIDSet, memoryIDSet, knowledgeIDSet map[string]struct{}, + collected *[]error, +) { + validateAgentTools(agentCfg, toolIDSet, collected) + validateAgentMemory(agentCfg, memoryIDSet, collected) + validateAgentKnowledge(agentCfg, knowledgeIDSet, collected) +} + +func validateAgentTools(agentCfg *agent.Config, toolIDSet map[string]struct{}, collected *[]error) { + for j := range agentCfg.Tools { + toolID := strings.TrimSpace(agentCfg.Tools[j].ID) + if toolID == "" { + continue + } + if _, exists := toolIDSet[toolID]; !exists { + *collected = append( + *collected, + fmt.Errorf("agent '%s' references unknown tool '%s'", agentCfg.ID, toolID), + ) + } + } +} + +func validateAgentMemory(agentCfg *agent.Config, memoryIDSet map[string]struct{}, collected *[]error) { + for j := range agentCfg.Memory { + memID := strings.TrimSpace(agentCfg.Memory[j].ID) + if memID == "" { + continue + } + if _, exists := memoryIDSet[memID]; !exists { + *collected = append( + *collected, + fmt.Errorf("agent '%s' references unknown memory '%s'", agentCfg.ID, memID), + ) + } + } +} + +func validateAgentKnowledge(agentCfg *agent.Config, knowledgeIDSet map[string]struct{}, collected *[]error) { + for j := range agentCfg.Knowledge { + kbID := strings.TrimSpace(agentCfg.Knowledge[j].ID) + if kbID == "" { + continue + } + if _, exists := knowledgeIDSet[kbID]; !exists { + *collected = append( + *collected, + fmt.Errorf("agent '%s' references unknown knowledge base '%s'", agentCfg.ID, kbID), + ) + } + } +} + +func buildToolIDSet(cfg *engineproject.Config) map[string]struct{} { + ids := make(map[string]struct{}, len(cfg.Tools)) + for i := range cfg.Tools { + id := strings.TrimSpace(cfg.Tools[i].ID) + if id != "" { + ids[id] = struct{}{} + } + } + return ids +} + +func buildMemoryIDSet(cfg *engineproject.Config) map[string]struct{} { + ids := make(map[string]struct{}, len(cfg.Memories)) + for i := range cfg.Memories { + if cfg.Memories[i] != nil { + id := strings.TrimSpace(cfg.Memories[i].ID) + if id != "" { + ids[id] = struct{}{} + } + } + } + return ids +} + +func buildKnowledgeBaseIDSet(cfg *engineproject.Config) map[string]struct{} { + ids := make(map[string]struct{}, len(cfg.KnowledgeBases)) + for i := range cfg.KnowledgeBases { + id := strings.TrimSpace(cfg.KnowledgeBases[i].ID) + if id != "" { + ids[id] = struct{}{} + } + } + return ids +} diff --git a/sdk/project/constructor_test.go b/sdk/project/constructor_test.go new file mode 100644 index 00000000..ba7e654b --- /dev/null +++ b/sdk/project/constructor_test.go @@ -0,0 +1,489 @@ +package project + +import ( + "context" + "testing" + + "github.com/compozy/compozy/engine/agent" + "github.com/compozy/compozy/engine/core" + "github.com/compozy/compozy/engine/knowledge" + "github.com/compozy/compozy/engine/memory" + engineproject "github.com/compozy/compozy/engine/project" + projectschedule "github.com/compozy/compozy/engine/project/schedule" + "github.com/compozy/compozy/engine/tool" + sdkerrors "github.com/compozy/compozy/sdk/v2/internal/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNew_MinimalConfig(t *testing.T) { + cfg, err := New(t.Context(), "test-project", + WithWorkflows([]*engineproject.WorkflowSourceConfig{ + {Source: "./workflow.yaml"}, + }), + ) + require.NoError(t, err) + assert.Equal(t, "test-project", cfg.Name) + assert.Len(t, cfg.Workflows, 1) + assert.Equal(t, "./workflow.yaml", cfg.Workflows[0].Source) +} + +func TestNew_FullConfig(t *testing.T) { + cfg, err := New(t.Context(), "test-project", + WithVersion("1.0.0"), + WithDescription("Test project"), + WithAuthor(core.Author{ + Name: "Test Author", + Email: "test@example.com", + Organization: "Test Org", + }), + WithWorkflows([]*engineproject.WorkflowSourceConfig{ + {Source: "./workflow.yaml"}, + }), + WithModels([]*core.ProviderConfig{ + { + Provider: core.ProviderOpenAI, + Model: "gpt-4", + Default: true, + }, + }), + WithTools([]tool.Config{ + {ID: "tool1"}, + }), + WithMemories([]*memory.Config{ + {ID: "memory1", Resource: string(core.ConfigMemory)}, + }), + WithEmbedders([]knowledge.EmbedderConfig{ + {ID: "embedder1", Provider: "openai"}, + }), + WithVectorDBs([]knowledge.VectorDBConfig{ + {ID: "vectordb1", Type: "pinecone"}, + }), + WithKnowledgeBases([]knowledge.BaseConfig{ + {ID: "kb1"}, + }), + ) + require.NoError(t, err) + assert.Equal(t, "test-project", cfg.Name) + assert.Equal(t, "1.0.0", cfg.Version) + assert.Equal(t, "Test project", cfg.Description) + assert.Equal(t, "Test Author", cfg.Author.Name) + assert.Equal(t, "test@example.com", cfg.Author.Email) + assert.Equal(t, "Test Org", cfg.Author.Organization) + assert.Len(t, cfg.Workflows, 1) + assert.Len(t, cfg.Models, 1) + assert.True(t, cfg.Models[0].Default) + assert.Len(t, cfg.Tools, 1) + assert.Len(t, cfg.Memories, 1) + assert.Len(t, cfg.Embedders, 1) + assert.Len(t, cfg.VectorDBs, 1) + assert.Len(t, cfg.KnowledgeBases, 1) +} + +func TestNew_NilContext(t *testing.T) { + var nilCtx context.Context + _, err := New(nilCtx, "test-project", + WithWorkflows([]*engineproject.WorkflowSourceConfig{ + {Source: "./workflow.yaml"}, + }), + ) + require.Error(t, err) + assert.Contains(t, err.Error(), "context is required") +} + +func TestNew_EmptyName(t *testing.T) { + _, err := New(t.Context(), "", + WithWorkflows([]*engineproject.WorkflowSourceConfig{ + {Source: "./workflow.yaml"}, + }), + ) + require.Error(t, err) + assert.Contains(t, err.Error(), "project name") +} + +func TestNew_InvalidName(t *testing.T) { + _, err := New(t.Context(), "test project", + WithWorkflows([]*engineproject.WorkflowSourceConfig{ + {Source: "./workflow.yaml"}, + }), + ) + require.Error(t, err) + assert.Contains(t, err.Error(), "alphanumeric or hyphenated") +} + +func TestNew_InvalidVersion(t *testing.T) { + _, err := New(t.Context(), "test-project", + WithVersion("invalid-version"), + WithWorkflows([]*engineproject.WorkflowSourceConfig{ + {Source: "./workflow.yaml"}, + }), + ) + require.Error(t, err) + assert.Contains(t, err.Error(), "version must be valid semver") +} + +func TestNew_InvalidAuthorEmail(t *testing.T) { + _, err := New(t.Context(), "test-project", + WithAuthor(core.Author{ + Name: "Test", + Email: "invalid-email", + }), + WithWorkflows([]*engineproject.WorkflowSourceConfig{ + {Source: "./workflow.yaml"}, + }), + ) + require.Error(t, err) + assert.Contains(t, err.Error(), "author email must be valid") +} + +func TestNew_NoWorkflows(t *testing.T) { + _, err := New(t.Context(), "test-project") + require.Error(t, err) + assert.Contains(t, err.Error(), "at least one workflow must be registered") +} + +func TestNew_EmptyWorkflowSource(t *testing.T) { + _, err := New(t.Context(), "test-project", + WithWorkflows([]*engineproject.WorkflowSourceConfig{ + {Source: ""}, + }), + ) + require.Error(t, err) + assert.Contains(t, err.Error(), "workflow[0] source cannot be empty") +} + +func TestNew_MultipleDefaultModels(t *testing.T) { + _, err := New(t.Context(), "test-project", + WithWorkflows([]*engineproject.WorkflowSourceConfig{ + {Source: "./workflow.yaml"}, + }), + WithModels([]*core.ProviderConfig{ + {Provider: core.ProviderOpenAI, Model: "gpt-4", Default: true}, + {Provider: core.ProviderAnthropic, Model: "claude-3", Default: true}, + }), + ) + require.Error(t, err) + assert.Contains(t, err.Error(), "only one model can be marked as default") +} + +func TestNew_DuplicateScheduleIDs(t *testing.T) { + _, err := New(t.Context(), "test-project", + WithWorkflows([]*engineproject.WorkflowSourceConfig{ + {Source: "./workflow1.yaml"}, + }), + WithSchedules([]*projectschedule.Config{ + {ID: "schedule1", WorkflowID: "./workflow1.yaml"}, + {ID: "schedule1", WorkflowID: "./workflow1.yaml"}, + }), + ) + require.Error(t, err) + assert.Contains(t, err.Error(), "duplicate schedule id 'schedule1'") +} + +func TestNew_ScheduleReferencesUnknownWorkflow(t *testing.T) { + _, err := New(t.Context(), "test-project", + WithWorkflows([]*engineproject.WorkflowSourceConfig{ + {Source: "./workflow1.yaml"}, + }), + WithSchedules([]*projectschedule.Config{ + {ID: "schedule1", WorkflowID: "./unknown.yaml"}, + }), + ) + require.Error(t, err) + assert.Contains(t, err.Error(), "references unknown workflow") +} + +func TestNew_DuplicateToolIDs(t *testing.T) { + _, err := New(t.Context(), "test-project", + WithWorkflows([]*engineproject.WorkflowSourceConfig{ + {Source: "./workflow.yaml"}, + }), + WithTools([]tool.Config{ + {ID: "tool1"}, + {ID: "tool1"}, + }), + ) + require.Error(t, err) + assert.Contains(t, err.Error(), "duplicate tool id 'tool1'") +} + +func TestNew_DuplicateMemoryIDs(t *testing.T) { + _, err := New(t.Context(), "test-project", + WithWorkflows([]*engineproject.WorkflowSourceConfig{ + {Source: "./workflow.yaml"}, + }), + WithMemories([]*memory.Config{ + {ID: "mem1"}, + {ID: "mem1"}, + }), + ) + require.Error(t, err) + assert.Contains(t, err.Error(), "duplicate memory id 'mem1'") +} + +func TestNew_MemoryResourceDefaulting(t *testing.T) { + cfg, err := New(t.Context(), "test-project", + WithWorkflows([]*engineproject.WorkflowSourceConfig{ + {Source: "./workflow.yaml"}, + }), + WithMemories([]*memory.Config{ + {ID: "mem1"}, + }), + ) + require.NoError(t, err) + assert.Equal(t, string(core.ConfigMemory), cfg.Memories[0].Resource) +} + +func TestNew_MultipleKnowledgeBindings(t *testing.T) { + _, err := New(t.Context(), "test-project", + WithWorkflows([]*engineproject.WorkflowSourceConfig{ + {Source: "./workflow.yaml"}, + }), + WithKnowledge([]core.KnowledgeBinding{ + {ID: "kb1"}, + {ID: "kb2"}, + }), + ) + require.Error(t, err) + assert.Contains(t, err.Error(), "only one knowledge binding is supported") +} + +func TestNew_EmptyKnowledgeBindingID(t *testing.T) { + _, err := New(t.Context(), "test-project", + WithWorkflows([]*engineproject.WorkflowSourceConfig{ + {Source: "./workflow.yaml"}, + }), + WithKnowledge([]core.KnowledgeBinding{ + {ID: ""}, + }), + ) + require.Error(t, err) + assert.Contains(t, err.Error(), "knowledge binding requires an id reference") +} + +func TestNew_DuplicateEmbedderIDs(t *testing.T) { + _, err := New(t.Context(), "test-project", + WithWorkflows([]*engineproject.WorkflowSourceConfig{ + {Source: "./workflow.yaml"}, + }), + WithEmbedders([]knowledge.EmbedderConfig{ + {ID: "emb1", Provider: "openai"}, + {ID: "emb1", Provider: "cohere"}, + }), + ) + require.Error(t, err) + assert.Contains(t, err.Error(), "duplicate embedder id 'emb1'") +} + +func TestNew_DuplicateVectorDBIDs(t *testing.T) { + _, err := New(t.Context(), "test-project", + WithWorkflows([]*engineproject.WorkflowSourceConfig{ + {Source: "./workflow.yaml"}, + }), + WithVectorDBs([]knowledge.VectorDBConfig{ + {ID: "vdb1", Type: "pinecone"}, + {ID: "vdb1", Type: "qdrant"}, + }), + ) + require.Error(t, err) + assert.Contains(t, err.Error(), "duplicate vector_db id 'vdb1'") +} + +func TestNew_DuplicateKnowledgeBaseIDs(t *testing.T) { + _, err := New(t.Context(), "test-project", + WithWorkflows([]*engineproject.WorkflowSourceConfig{ + {Source: "./workflow.yaml"}, + }), + WithKnowledgeBases([]knowledge.BaseConfig{ + {ID: "kb1"}, + {ID: "kb1"}, + }), + ) + require.Error(t, err) + assert.Contains(t, err.Error(), "duplicate knowledge_base id 'kb1'") +} + +func TestNew_WhitespaceTrimming(t *testing.T) { + cfg, err := New(t.Context(), " test-project ", + WithVersion(" 1.0.0 "), + WithDescription(" Test description "), + WithAuthor(core.Author{ + Name: " Test Author ", + Email: " test@example.com ", + Organization: " Test Org ", + }), + WithWorkflows([]*engineproject.WorkflowSourceConfig{ + {Source: " ./workflow.yaml "}, + }), + ) + require.NoError(t, err) + assert.Equal(t, "test-project", cfg.Name) + assert.Equal(t, "1.0.0", cfg.Version) + assert.Equal(t, "Test description", cfg.Description) + assert.Equal(t, "Test Author", cfg.Author.Name) + assert.Equal(t, "test@example.com", cfg.Author.Email) + assert.Equal(t, "Test Org", cfg.Author.Organization) + assert.Equal(t, "./workflow.yaml", cfg.Workflows[0].Source) +} + +func TestNew_DeepCopy(t *testing.T) { + tools := []tool.Config{{ID: "tool1"}} + cfg1, err := New(t.Context(), "test-project", + WithWorkflows([]*engineproject.WorkflowSourceConfig{ + {Source: "./workflow.yaml"}, + }), + WithTools(tools), + ) + require.NoError(t, err) + tools[0].ID = "modified" + cfg2, err := New(t.Context(), "test-project", + WithWorkflows([]*engineproject.WorkflowSourceConfig{ + {Source: "./workflow.yaml"}, + }), + WithTools([]tool.Config{{ID: "tool1"}}), + ) + require.NoError(t, err) + assert.Equal(t, "tool1", cfg1.Tools[0].ID) + assert.Equal(t, "tool1", cfg2.Tools[0].ID) +} + +func TestNew_MultipleValidationErrors(t *testing.T) { + _, err := New(t.Context(), "", + WithVersion("invalid"), + WithAuthor(core.Author{Email: "invalid"}), + ) + require.Error(t, err) + var buildErr *sdkerrors.BuildError + require.ErrorAs(t, err, &buildErr) + assert.GreaterOrEqual(t, len(buildErr.Errors), 3) +} + +func TestValidateCrossReferences_AgentToolReference(t *testing.T) { + cfg := &engineproject.Config{ + Tools: []tool.Config{ + {ID: "tool1"}, + }, + } + agents := []agent.Config{ + { + ID: "agent1", + LLMProperties: agent.LLMProperties{ + Tools: []tool.Config{{ID: "tool1"}}, + }, + }, + } + err := ValidateCrossReferences(cfg, agents, nil) + require.NoError(t, err) +} + +func TestValidateCrossReferences_AgentToolReferenceUnknown(t *testing.T) { + cfg := &engineproject.Config{ + Tools: []tool.Config{ + {ID: "tool1"}, + }, + } + agents := []agent.Config{ + { + ID: "agent1", + LLMProperties: agent.LLMProperties{ + Tools: []tool.Config{{ID: "unknown-tool"}}, + }, + }, + } + err := ValidateCrossReferences(cfg, agents, nil) + require.Error(t, err) + assert.Contains(t, err.Error(), "references unknown tool 'unknown-tool'") +} + +func TestValidateCrossReferences_AgentMemoryReference(t *testing.T) { + cfg := &engineproject.Config{ + Memories: []*memory.Config{ + {ID: "mem1"}, + }, + } + agents := []agent.Config{ + { + ID: "agent1", + LLMProperties: agent.LLMProperties{ + Memory: []core.MemoryReference{{ID: "mem1"}}, + }, + }, + } + err := ValidateCrossReferences(cfg, agents, nil) + require.NoError(t, err) +} + +func TestValidateCrossReferences_AgentMemoryReferenceUnknown(t *testing.T) { + cfg := &engineproject.Config{ + Memories: []*memory.Config{ + {ID: "mem1"}, + }, + } + agents := []agent.Config{ + { + ID: "agent1", + LLMProperties: agent.LLMProperties{ + Memory: []core.MemoryReference{{ID: "unknown-mem"}}, + }, + }, + } + err := ValidateCrossReferences(cfg, agents, nil) + require.Error(t, err) + assert.Contains(t, err.Error(), "references unknown memory 'unknown-mem'") +} + +func TestValidateCrossReferences_AgentKnowledgeReference(t *testing.T) { + cfg := &engineproject.Config{ + KnowledgeBases: []knowledge.BaseConfig{ + {ID: "kb1"}, + }, + } + agents := []agent.Config{ + { + ID: "agent1", + Knowledge: []core.KnowledgeBinding{{ID: "kb1"}}, + }, + } + err := ValidateCrossReferences(cfg, agents, nil) + require.NoError(t, err) +} + +func TestValidateCrossReferences_AgentKnowledgeReferenceUnknown(t *testing.T) { + cfg := &engineproject.Config{ + KnowledgeBases: []knowledge.BaseConfig{ + {ID: "kb1"}, + }, + } + agents := []agent.Config{ + { + ID: "agent1", + Knowledge: []core.KnowledgeBinding{{ID: "unknown-kb"}}, + }, + } + err := ValidateCrossReferences(cfg, agents, nil) + require.Error(t, err) + assert.Contains(t, err.Error(), "references unknown knowledge base 'unknown-kb'") +} + +func TestValidateCrossReferences_MultipleErrors(t *testing.T) { + cfg := &engineproject.Config{ + Tools: []tool.Config{{ID: "tool1"}}, + Memories: []*memory.Config{{ID: "mem1"}}, + KnowledgeBases: []knowledge.BaseConfig{{ID: "kb1"}}, + } + agents := []agent.Config{ + { + ID: "agent1", + LLMProperties: agent.LLMProperties{ + Tools: []tool.Config{{ID: "unknown-tool"}}, + Memory: []core.MemoryReference{{ID: "unknown-mem"}}, + }, + Knowledge: []core.KnowledgeBinding{{ID: "unknown-kb"}}, + }, + } + err := ValidateCrossReferences(cfg, agents, nil) + require.Error(t, err) + var buildErr *sdkerrors.BuildError + require.ErrorAs(t, err, &buildErr) + assert.Len(t, buildErr.Errors, 3) +} diff --git a/sdk/project/generate.go b/sdk/project/generate.go new file mode 100644 index 00000000..4a4fae90 --- /dev/null +++ b/sdk/project/generate.go @@ -0,0 +1,3 @@ +package project + +//go:generate go run ../internal/codegen/cmd/optionsgen/main.go -engine ../../engine/project/config.go -struct Config -output options_generated.go diff --git a/sdk/project/options_generated.go b/sdk/project/options_generated.go new file mode 100644 index 00000000..8406c5a7 --- /dev/null +++ b/sdk/project/options_generated.go @@ -0,0 +1,309 @@ +// Code generated by optionsgen. DO NOT EDIT. + +package project + +import ( + autoload "github.com/compozy/compozy/engine/autoload" + core "github.com/compozy/compozy/engine/core" + monitoring "github.com/compozy/compozy/engine/infra/monitoring" + knowledge "github.com/compozy/compozy/engine/knowledge" + mcp "github.com/compozy/compozy/engine/mcp" + memory "github.com/compozy/compozy/engine/memory" + project "github.com/compozy/compozy/engine/project" + projectschedule "github.com/compozy/compozy/engine/project/schedule" + schema "github.com/compozy/compozy/engine/schema" + tool "github.com/compozy/compozy/engine/tool" +) + +type Option func(*project.Config) + +// WithName sets the Name field +// +// Name is the unique identifier for this Compozy project. +// **Requirements**: +// - Must be unique within your Compozy installation +// - Alphanumeric characters, hyphens, and underscores only +// - Cannot start with a number +// - Maximum 63 characters +// - **Examples**: `"customer-support-ai"`, `"data-pipeline"`, `"content-generator"` +func WithName(name string) Option { + return func(cfg *project.Config) { + cfg.Name = name + } +} + +// WithVersion sets the Version field +// +// Version specifies the semantic version of this project configuration. +// **Format**: Follows [Semantic Versioning 2.0.0](https://semver.org/) +// - `MAJOR.MINOR.PATCH` (e.g., `1.2.3`) +// - Optional pre-release: `1.0.0-alpha.1` +// - Optional build metadata: `1.0.0+20230615` +func WithVersion(version string) Option { + return func(cfg *project.Config) { + cfg.Version = version + } +} + +// WithDescription sets the Description field +// +// Description provides a human-readable explanation of the project's purpose and capabilities. +// **Guidelines**: +// - Be specific about what the project does +// - Include primary use cases and benefits +// - Keep it concise (1-3 sentences) +// - Avoid technical jargon for broader understanding +// **Example**: `"Multi-agent customer support system with automated ticket routing"` +func WithDescription(description string) Option { + return func(cfg *project.Config) { + cfg.Description = description + } +} + +// WithAuthor sets the Author field +// +// Author information for the project. +// $ref: inline:#author +func WithAuthor(author core.Author) Option { + return func(cfg *project.Config) { + cfg.Author = author + } +} + +// WithWorkflows sets the Workflows field +// +// Workflows defines the list of workflow files that compose this project's AI capabilities. +func WithWorkflows(workflows []*project.WorkflowSourceConfig) Option { + return func(cfg *project.Config) { + cfg.Workflows = workflows + } +} + +// WithSchedules sets the Schedules field +// +// Schedules defines automated workflow executions managed by the SDK. +// Each schedule references a workflow by identifier and applies cron-based execution semantics. +func WithSchedules(schedules []*projectschedule.Config) Option { + return func(cfg *project.Config) { + cfg.Schedules = schedules + } +} + +// WithModels sets the Models field +// +// Models configures the LLM providers and model settings available to this project. +// $ref: schema://provider +// **Multi-Model Support**: +// - Configure multiple providers for redundancy +// - Different models for different tasks (cost/performance optimization) +// - Fallback chains for high availability +// **Supported Providers**: +// - OpenAI (GPT-4, GPT-3.5, etc.) +// - Anthropic (Claude models) +// - Google (Gemini models) +// - Groq (Fast inference) +// - Ollama (Local models) +// - Custom providers via API compatibility +// **Example**: +// ```yaml +// models: +// # Primary model for complex reasoning +// - provider: openai +// model: gpt-4-turbo +// api_key: "{{ .env.OPENAI_API_KEY }}" +// temperature: 0.7 +// max_tokens: 4000 +// # Fallback for cost optimization +// - provider: anthropic +// model: claude-3-haiku +// api_key: "{{ .env.ANTHROPIC_API_KEY }}" +// # Local model for sensitive data +// - provider: ollama +// model: llama2:13b +// api_url: http://localhost:11434 +// ``` +func WithModels(models []*core.ProviderConfig) Option { + return func(cfg *project.Config) { + cfg.Models = models + } +} + +// WithSchemas sets the Schemas field +// +// Schemas defines the data validation schemas used throughout the project workflows. +// **Schema Benefits**: +// - Type safety for workflow inputs/outputs +// - Early error detection and validation +// - Self-documenting data contracts +// - IDE autocomplete support +// **Example**: +// ```yaml +// schemas: +// - id: user-input +// schema: +// type: object +// properties: +// name: +// type: string +// minLength: 1 +// age: +// type: integer +// minimum: 0 +// required: ["name"] +// ``` +func WithSchemas(schemas []schema.Schema) Option { + return func(cfg *project.Config) { + cfg.Schemas = schemas + } +} + +// WithOpts sets the Opts field +// +// Opts contains project-wide configuration options for performance tuning and behavior control. +// $ref: inline:#project-options +func WithOpts(opts project.Opts) Option { + return func(cfg *project.Config) { + cfg.Opts = opts + } +} + +// WithRuntime sets the Runtime field +// +// Runtime specifies the JavaScript/TypeScript execution environment for custom tools. +// NOTE: Runtime configuration has been moved to global config (pkg/config.RuntimeConfig) +// This field is kept for backwards compatibility and project-specific overrides. +// $ref: schema://application#runtime +func WithRuntime(runtime project.RuntimeConfig) Option { + return func(cfg *project.Config) { + cfg.Runtime = runtime + } +} + +// WithAutoLoad sets the AutoLoad field +// +// AutoLoad configures automatic loading and reloading of project resources during development. +// $ref: inline:#autoload +// **Development Benefits**: +// - Hot-reload agents and workflows without restart +// - Automatic discovery of new resources +// - Faster iteration cycles +// - Validation on file changes +// **Example**: +// ```yaml +// autoload: +// enabled: true +// strict: true # Fail on validation errors +// watch_interval: 2s # Check for changes every 2 seconds +// include: +// - "agents/**/*.yaml" +// - "workflows/**/*.yaml" +// - "memory/**/*.yaml" +// exclude: +// - "**/*.tmp" +// - "**/*~" +// ``` +func WithAutoLoad(autoLoad *autoload.Config) Option { + return func(cfg *project.Config) { + cfg.AutoLoad = autoLoad + } +} + +// WithTools sets the Tools field +// +// Tools defines shared tool definitions available to all workflows and agents +// within this project. These tools are inherited unless explicitly overridden. +// **Inheritance Rules**: +// - Agent tools completely override inheritance when present +// - Workflow tools override project tools by ID +// - Tool ID collisions resolved by precedence: Agent > Workflow > Project +// **Location & autoload**: +// - Place reusable tool configuration files under the `tools/` directory (e.g., `tools/*.yaml`) +// - If autoload is enabled, files in `tools/` will be discovered and validated automatically +// **Example**: +// ```yaml +// tools: +// - id: code-analyzer +// description: Analyzes code quality and patterns +// timeout: 30s +// - id: data-processor +// description: Processes and transforms data +// ``` +func WithTools(tools []tool.Config) Option { + return func(cfg *project.Config) { + cfg.Tools = tools + } +} + +// WithEmbedders sets the Embedders field +// +// Embedders declares project-level embedding providers that can be reused across workflows. +func WithEmbedders(embedders []knowledge.EmbedderConfig) Option { + return func(cfg *project.Config) { + cfg.Embedders = embedders + } +} + +// WithVectorDBs sets the VectorDBs field +// +// VectorDBs declares project-level vector database connections that knowledge bases can reference. +func WithVectorDBs(vectorDBs []knowledge.VectorDBConfig) Option { + return func(cfg *project.Config) { + cfg.VectorDBs = vectorDBs + } +} + +// WithKnowledgeBases sets the KnowledgeBases field +// +// KnowledgeBases declares reusable knowledge base definitions scoped to the project. +func WithKnowledgeBases(knowledgeBases []knowledge.BaseConfig) Option { + return func(cfg *project.Config) { + cfg.KnowledgeBases = knowledgeBases + } +} + +// WithKnowledge sets the Knowledge field +// +// Knowledge defines the default binding for tasks or agents within the project scope (MVP single binding). +func WithKnowledge(knowledge []core.KnowledgeBinding) Option { + return func(cfg *project.Config) { + cfg.Knowledge = knowledge + } +} + +// WithMCPs sets the MCPs field +// +// MCPs declares project-scoped MCP server definitions accessible to workflows and agents. +func WithMCPs(mCPs []mcp.Config) Option { + return func(cfg *project.Config) { + cfg.MCPs = mCPs + } +} + +// WithMemories sets the Memories field +// +// Memories declares project-scoped memory resources that agents and tasks can reference +// by ID. These are indexed into the ResourceStore under the current project and can be +// used across workflows for conversation and state sharing. +// Example: +// memories: +// - id: conversation +// type: buffer +// persistence: +// type: in_memory +// The Resource field on memory.Config is optional in project-level definitions and will +// default to "memory" during validation. +func WithMemories(memories []*memory.Config) Option { + return func(cfg *project.Config) { + cfg.Memories = memories + } +} + +// WithMonitoringConfig sets the MonitoringConfig field +// +// MonitoringConfig enables observability and metrics collection for performance tracking. +// $ref: inline:#monitoring +func WithMonitoringConfig(monitoringConfig *monitoring.Config) Option { + return func(cfg *project.Config) { + cfg.MonitoringConfig = monitoringConfig + } +} diff --git a/sdk/runtime/README.md b/sdk/runtime/README.md new file mode 100644 index 00000000..bcc2e26c --- /dev/null +++ b/sdk/runtime/README.md @@ -0,0 +1,347 @@ +# Runtime Package + +## Overview + +The runtime package provides a clean, functional options API for configuring JavaScript/TypeScript runtime environments (Bun or Node.js) for workflow execution in Compozy. + +## Installation + +```go +import runtime "github.com/compozy/compozy/sdk/v2/runtime" +``` + +## Usage + +### Basic Example - Bun Runtime + +```go +cfg, err := runtime.New(ctx, "bun", + runtime.WithEntrypointPath("./tools/main.ts"), +) +if err != nil { + log.Fatal(err) +} +``` + +### Basic Example - Node.js Runtime + +```go +cfg, err := runtime.New(ctx, "node", + runtime.WithEntrypointPath("./tools/main.js"), + runtime.WithNodeOptions([]string{"--max-old-space-size=4096"}), +) +if err != nil { + log.Fatal(err) +} +``` + +### Full Configuration + +```go +nativeTools := &engineruntime.NativeToolsConfig{ + CallAgents: true, + CallWorkflows: true, +} + +cfg, err := runtime.New(ctx, "bun", + // Required: entrypoint script path + runtime.WithEntrypointPath("./tools/main.ts"), + + // Bun-specific permissions + runtime.WithBunPermissions([]string{ + "--allow-read", + "--allow-net=api.example.com", + "--allow-env=API_KEY,API_SECRET", + }), + + // Memory and performance + runtime.WithMaxMemoryMB(1024), + runtime.WithToolExecutionTimeout(30 * time.Second), + + // Native tools integration + runtime.WithNativeTools(nativeTools), + + // Environment + runtime.WithEnvironment("production"), +) +if err != nil { + log.Fatal(err) +} +``` + +## API Reference + +### Constructor + +```go +func New(ctx context.Context, runtimeType string, opts ...Option) (*engineruntime.Config, error) +``` + +Creates a new runtime configuration with the specified runtime type and optional configuration. + +**Parameters:** + +- `ctx`: Context for logging and cancellation +- `runtimeType`: Runtime type ("bun" or "node") +- `opts`: Variadic functional options + +**Returns:** + +- `*engineruntime.Config`: Deep-copied runtime configuration +- `error`: Validation errors if any + +**Supported Runtime Types:** + +- `"bun"` - Bun JavaScript runtime (default) +- `"node"` - Node.js runtime + +### Options + +#### Core Configuration + +**`WithEntrypointPath(path string)`** +Sets the path to the runtime entrypoint script. + +```go +runtime.WithEntrypointPath("./tools/main.ts") +``` + +**`WithEnvironment(env string)`** +Sets the deployment environment (development, staging, production). + +```go +runtime.WithEnvironment("production") +``` + +#### Bun-Specific Options + +**`WithBunPermissions(permissions []string)`** +Configures Bun permission flags for security. + +```go +runtime.WithBunPermissions([]string{ + "--allow-read", // Allow all read operations + "--allow-write", // Allow all write operations + "--allow-net", // Allow all network access + "--allow-env", // Allow all environment variables + "--allow-net=example.com", // Scoped network access + "--allow-env=API_KEY", // Scoped environment access + "--allow-all", // Allow all operations (use with caution) +}) +``` + +#### Node.js-Specific Options + +**`WithNodeOptions(options []string)`** +Configures Node.js command-line options. + +```go +runtime.WithNodeOptions([]string{ + "--max-old-space-size=4096", + "--experimental-modules", +}) +``` + +#### Performance and Limits + +**`WithMaxMemoryMB(mb int)`** +Sets the maximum memory allocation in megabytes. Default: 2048 MB (2 GB). + +```go +runtime.WithMaxMemoryMB(1024) // 1 GB limit +``` + +**`WithToolExecutionTimeout(timeout time.Duration)`** +Sets the maximum duration for tool execution. Default: 60 seconds. + +```go +runtime.WithToolExecutionTimeout(30 * time.Second) +``` + +**`WithMaxStderrCaptureSize(size int)`** +Sets the maximum stderr buffer size in bytes. Default: 1 MB. + +```go +runtime.WithMaxStderrCaptureSize(2 * 1024 * 1024) // 2 MB +``` + +#### Backoff Configuration + +**`WithBackoffInitialInterval(duration time.Duration)`** +Sets the initial backoff interval for retries. + +```go +runtime.WithBackoffInitialInterval(100 * time.Millisecond) +``` + +**`WithBackoffMaxInterval(duration time.Duration)`** +Sets the maximum backoff interval. + +```go +runtime.WithBackoffMaxInterval(5 * time.Second) +``` + +**`WithBackoffMaxElapsedTime(duration time.Duration)`** +Sets the maximum total elapsed time for backoff retries. + +```go +runtime.WithBackoffMaxElapsedTime(30 * time.Second) +``` + +#### Advanced Options + +**`WithNativeTools(tools *engineruntime.NativeToolsConfig)`** +Enables builtin native tools provided by the engine. + +```go +nativeTools := &engineruntime.NativeToolsConfig{ + CallAgents: true, + CallWorkflows: true, +} +runtime.WithNativeTools(nativeTools) +``` + +**`WithWorkerFilePerm(perm os.FileMode)`** +Sets file permissions for worker files. Default: 0600. + +```go +runtime.WithWorkerFilePerm(0600) // Owner read/write only +``` + +**`WithRuntimeType(runtimeType string)`** +Overrides the runtime type (generally set via constructor). + +```go +runtime.WithRuntimeType("node") +``` + +## Migration Guide + +### Before (Old SDK - Builder Pattern) + +```go +cfg, err := runtime.NewBun(). + WithEntrypoint("./tools/main.ts"). + WithBunPermissions("--allow-read", "--allow-net"). + WithToolTimeout(30 * time.Second). + WithMaxMemoryMB(512). + Build(ctx) +``` + +### After (New SDK - Functional Options) + +```go +cfg, err := runtime.New(ctx, "bun", + runtime.WithEntrypointPath("./tools/main.ts"), + runtime.WithBunPermissions([]string{"--allow-read", "--allow-net"}), + runtime.WithToolExecutionTimeout(30 * time.Second), + runtime.WithMaxMemoryMB(512), +) +``` + +### Key Changes + +1. **Context First**: `ctx` is now the first parameter instead of passed to `Build()` +2. **Runtime Type**: Specified in constructor instead of separate `NewBun()` function +3. **No Build() Call**: Configuration is validated and created immediately +4. **Slice Arguments**: Collections like permissions take slices instead of variadic args +5. **Renamed Methods**: Some methods renamed for clarity (e.g., `WithToolTimeout` → `WithToolExecutionTimeout`) + +## Validation Rules + +The constructor validates: + +1. **Runtime Type**: Must be "bun" or "node" (case-insensitive) +2. **Context**: Cannot be nil +3. **Entrypoint Path**: Trimmed of whitespace (empty is allowed) + +All other fields use engine defaults and are validated at runtime. + +## Error Handling + +The constructor returns a `*sdkerrors.BuildError` containing all validation errors: + +```go +cfg, err := runtime.New(ctx, "invalid", + runtime.WithEntrypointPath(""), +) +if err != nil { + var buildErr *sdkerrors.BuildError + if errors.As(err, &buildErr) { + for _, e := range buildErr.Errors { + log.Printf("Validation error: %v", e) + } + } +} +``` + +## Examples + +### Development Environment + +```go +cfg, err := runtime.New(ctx, "bun", + runtime.WithEntrypointPath("./dev/tools.ts"), + runtime.WithEnvironment("development"), + runtime.WithMaxMemoryMB(512), +) +``` + +### Production Environment with Security + +```go +cfg, err := runtime.New(ctx, "bun", + runtime.WithEntrypointPath("./dist/tools.js"), + runtime.WithBunPermissions([]string{ + "--allow-net=api.production.com", + "--allow-env=API_KEY,DB_URL", + "--allow-read", + }), + runtime.WithEnvironment("production"), + runtime.WithMaxMemoryMB(2048), + runtime.WithToolExecutionTimeout(60 * time.Second), +) +``` + +### Node.js Runtime + +```go +cfg, err := runtime.New(ctx, "node", + runtime.WithEntrypointPath("./tools/index.js"), + runtime.WithNodeOptions([]string{ + "--max-old-space-size=4096", + "--enable-source-maps", + }), + runtime.WithMaxMemoryMB(4096), +) +``` + +## Testing + +Use the test config for faster tests: + +```go +func TestMyFeature(t *testing.T) { + cfg, err := runtime.New(t.Context(), "bun", + runtime.WithEntrypointPath("./test/tools.ts"), + runtime.WithMaxMemoryMB(256), + runtime.WithToolExecutionTimeout(5 * time.Second), + ) + require.NoError(t, err) + // ... test with cfg +} +``` + +## Best Practices + +1. **Security**: Always use minimal Bun permissions required for your use case +2. **Memory**: Set appropriate memory limits based on your tool requirements +3. **Timeouts**: Configure realistic timeouts to prevent hanging operations +4. **Environment**: Use different configurations for dev/staging/production +5. **Deep Copy**: The returned config is deep-copied, preventing accidental mutations + +## Performance + +- Constructor execution: < 1µs for minimal config +- Memory allocation: < 5KB per constructor call +- No performance regression vs. builder pattern diff --git a/sdk/runtime/constructor.go b/sdk/runtime/constructor.go new file mode 100644 index 00000000..72b86292 --- /dev/null +++ b/sdk/runtime/constructor.go @@ -0,0 +1,71 @@ +package runtime + +import ( + "context" + "fmt" + "strings" + + "github.com/compozy/compozy/engine/core" + engineruntime "github.com/compozy/compozy/engine/runtime" + "github.com/compozy/compozy/pkg/logger" + sdkerrors "github.com/compozy/compozy/sdk/v2/internal/errors" + "github.com/compozy/compozy/sdk/v2/internal/validate" +) + +// New creates a runtime configuration using functional options +func New(ctx context.Context, runtimeType string, opts ...Option) (*engineruntime.Config, error) { + if ctx == nil { + return nil, fmt.Errorf("context is required") + } + log := logger.FromContext(ctx) + log.Debug("creating runtime configuration", "runtime_type", runtimeType) + normalizedType := strings.ToLower(strings.TrimSpace(runtimeType)) + cfg := engineruntime.DefaultConfig() + cfg.RuntimeType = normalizedType + for _, opt := range opts { + opt(cfg) + } + collected := make([]error, 0, 3) + if err := validateRuntimeType(ctx, cfg); err != nil { + collected = append(collected, err) + } + if err := validateEntrypoint(ctx, cfg); err != nil { + collected = append(collected, err) + } + if len(collected) > 0 { + return nil, &sdkerrors.BuildError{Errors: collected} + } + cloned, err := core.DeepCopy(cfg) + if err != nil { + return nil, fmt.Errorf("failed to clone runtime config: %w", err) + } + return cloned, nil +} + +func validateRuntimeType(ctx context.Context, cfg *engineruntime.Config) error { + runtimeType := strings.ToLower(strings.TrimSpace(cfg.RuntimeType)) + if err := validate.NonEmpty(ctx, "runtime type", runtimeType); err != nil { + return err + } + if !engineruntime.IsValidRuntimeType(runtimeType) { + return fmt.Errorf( + "runtime type %q is not supported; must be one of %v", + runtimeType, + engineruntime.SupportedRuntimeTypes, + ) + } + cfg.RuntimeType = runtimeType + return nil +} + +func validateEntrypoint(ctx context.Context, cfg *engineruntime.Config) error { + entrypoint := strings.TrimSpace(cfg.EntrypointPath) + cfg.EntrypointPath = entrypoint + if entrypoint == "" { + return nil + } + if err := validate.NonEmpty(ctx, "entrypoint path", entrypoint); err != nil { + return err + } + return nil +} diff --git a/sdk/runtime/constructor_test.go b/sdk/runtime/constructor_test.go new file mode 100644 index 00000000..2d006f00 --- /dev/null +++ b/sdk/runtime/constructor_test.go @@ -0,0 +1,190 @@ +package runtime + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + engineruntime "github.com/compozy/compozy/engine/runtime" + sdkerrors "github.com/compozy/compozy/sdk/v2/internal/errors" +) + +func TestNew_MinimalConfig(t *testing.T) { + t.Run("Should create runtime config with bun type", func(t *testing.T) { + cfg, err := New(t.Context(), "bun") + require.NoError(t, err) + require.NotNil(t, cfg) + assert.Equal(t, engineruntime.RuntimeTypeBun, cfg.RuntimeType) + }) + + t.Run("Should create runtime config with node type", func(t *testing.T) { + cfg, err := New(t.Context(), "node") + require.NoError(t, err) + require.NotNil(t, cfg) + assert.Equal(t, engineruntime.RuntimeTypeNode, cfg.RuntimeType) + }) + + t.Run("Should normalize runtime type to lowercase", func(t *testing.T) { + cfg, err := New(t.Context(), "BUN") + require.NoError(t, err) + assert.Equal(t, engineruntime.RuntimeTypeBun, cfg.RuntimeType) + }) + + t.Run("Should trim whitespace from runtime type", func(t *testing.T) { + cfg, err := New(t.Context(), " node ") + require.NoError(t, err) + assert.Equal(t, engineruntime.RuntimeTypeNode, cfg.RuntimeType) + }) +} + +func TestNew_FullConfig(t *testing.T) { + t.Run("Should apply all options correctly", func(t *testing.T) { + nativeTools := &engineruntime.NativeToolsConfig{ + CallAgents: true, + CallWorkflows: true, + } + cfg, err := New(t.Context(), "bun", + WithEntrypointPath("./tools/main.ts"), + WithBunPermissions([]string{"--allow-read", "--allow-net"}), + WithMaxMemoryMB(512), + WithNativeTools(nativeTools), + WithEnvironment("production"), + ) + require.NoError(t, err) + require.NotNil(t, cfg) + assert.Equal(t, engineruntime.RuntimeTypeBun, cfg.RuntimeType) + assert.Equal(t, "./tools/main.ts", cfg.EntrypointPath) + assert.Equal(t, []string{"--allow-read", "--allow-net"}, cfg.BunPermissions) + assert.Equal(t, 512, cfg.MaxMemoryMB) + assert.Equal(t, "production", cfg.Environment) + assert.NotNil(t, cfg.NativeTools) + assert.True(t, cfg.NativeTools.CallAgents) + assert.True(t, cfg.NativeTools.CallWorkflows) + }) +} + +func TestNew_ValidationErrors(t *testing.T) { + t.Run("Should fail with nil context", func(t *testing.T) { + var nilCtx context.Context + cfg, err := New(nilCtx, "bun") + require.Error(t, err) + assert.Nil(t, cfg) + assert.Contains(t, err.Error(), "context is required") + }) + + t.Run("Should fail with empty runtime type", func(t *testing.T) { + cfg, err := New(t.Context(), "") + require.Error(t, err) + assert.Nil(t, cfg) + var buildErr *sdkerrors.BuildError + require.ErrorAs(t, err, &buildErr) + assert.Contains(t, err.Error(), "runtime type") + }) + + t.Run("Should fail with invalid runtime type", func(t *testing.T) { + cfg, err := New(t.Context(), "python") + require.Error(t, err) + assert.Nil(t, cfg) + var buildErr *sdkerrors.BuildError + require.ErrorAs(t, err, &buildErr) + assert.Contains(t, err.Error(), "not supported") + }) + + t.Run("Should fail with whitespace-only runtime type", func(t *testing.T) { + cfg, err := New(t.Context(), " ") + require.Error(t, err) + assert.Nil(t, cfg) + var buildErr *sdkerrors.BuildError + require.ErrorAs(t, err, &buildErr) + }) +} + +func TestNew_DeepCopy(t *testing.T) { + t.Run("Should return independent copy", func(t *testing.T) { + permissions := []string{"--allow-read"} + cfg1, err := New(t.Context(), "bun", + WithBunPermissions(permissions), + ) + require.NoError(t, err) + + cfg2, err := New(t.Context(), "bun", + WithBunPermissions(permissions), + ) + require.NoError(t, err) + + cfg1.BunPermissions[0] = "--allow-write" + + assert.Equal(t, "--allow-read", cfg2.BunPermissions[0]) + assert.NotEqual(t, cfg1.BunPermissions[0], cfg2.BunPermissions[0]) + }) + + t.Run("Should deep copy native tools config", func(t *testing.T) { + tools := &engineruntime.NativeToolsConfig{ + CallAgents: true, + } + cfg, err := New(t.Context(), "bun", + WithNativeTools(tools), + ) + require.NoError(t, err) + + tools.CallAgents = false + + assert.True(t, cfg.NativeTools.CallAgents) + }) +} + +func TestNew_EntrypointValidation(t *testing.T) { + t.Run("Should allow empty entrypoint", func(t *testing.T) { + cfg, err := New(t.Context(), "bun") + require.NoError(t, err) + assert.Empty(t, cfg.EntrypointPath) + }) + + t.Run("Should trim entrypoint whitespace", func(t *testing.T) { + cfg, err := New(t.Context(), "bun", + WithEntrypointPath(" ./main.ts "), + ) + require.NoError(t, err) + assert.Equal(t, "./main.ts", cfg.EntrypointPath) + }) + + t.Run("Should accept valid entrypoint path", func(t *testing.T) { + cfg, err := New(t.Context(), "bun", + WithEntrypointPath("./tools/runtime.ts"), + ) + require.NoError(t, err) + assert.Equal(t, "./tools/runtime.ts", cfg.EntrypointPath) + }) +} + +func TestNew_DefaultValues(t *testing.T) { + t.Run("Should apply default config values", func(t *testing.T) { + cfg, err := New(t.Context(), "bun") + require.NoError(t, err) + require.NotNil(t, cfg) + + defaultCfg := engineruntime.DefaultConfig() + assert.Equal(t, defaultCfg.BackoffInitialInterval, cfg.BackoffInitialInterval) + assert.Equal(t, defaultCfg.BackoffMaxInterval, cfg.BackoffMaxInterval) + assert.Equal(t, defaultCfg.BackoffMaxElapsedTime, cfg.BackoffMaxElapsedTime) + assert.Equal(t, defaultCfg.ToolExecutionTimeout, cfg.ToolExecutionTimeout) + assert.Equal(t, defaultCfg.MaxMemoryMB, cfg.MaxMemoryMB) + assert.Equal(t, defaultCfg.MaxStderrCaptureSize, cfg.MaxStderrCaptureSize) + }) +} + +func TestNew_NodeRuntime(t *testing.T) { + t.Run("Should create node runtime config", func(t *testing.T) { + cfg, err := New(t.Context(), "node", + WithNodeOptions([]string{"--max-old-space-size=4096"}), + WithEntrypointPath("./main.js"), + ) + require.NoError(t, err) + require.NotNil(t, cfg) + assert.Equal(t, engineruntime.RuntimeTypeNode, cfg.RuntimeType) + assert.Equal(t, []string{"--max-old-space-size=4096"}, cfg.NodeOptions) + assert.Equal(t, "./main.js", cfg.EntrypointPath) + }) +} diff --git a/sdk/runtime/generate.go b/sdk/runtime/generate.go new file mode 100644 index 00000000..3b5adb75 --- /dev/null +++ b/sdk/runtime/generate.go @@ -0,0 +1,3 @@ +package runtime + +//go:generate go run ../internal/codegen/cmd/optionsgen/main.go -engine ../../engine/runtime/config.go -struct Config -output options_generated.go -package runtime diff --git a/sdk/runtime/options_generated.go b/sdk/runtime/options_generated.go new file mode 100644 index 00000000..b723d29c --- /dev/null +++ b/sdk/runtime/options_generated.go @@ -0,0 +1,109 @@ +// Code generated by optionsgen. DO NOT EDIT. + +package runtime + +import ( + "os" + "time" + + runtime "github.com/compozy/compozy/engine/runtime" +) + +type Option func(*runtime.Config) + +// WithBackoffInitialInterval sets the BackoffInitialInterval field +func WithBackoffInitialInterval(backoffInitialInterval time.Duration) Option { + return func(cfg *runtime.Config) { + cfg.BackoffInitialInterval = backoffInitialInterval + } +} + +// WithBackoffMaxInterval sets the BackoffMaxInterval field +func WithBackoffMaxInterval(backoffMaxInterval time.Duration) Option { + return func(cfg *runtime.Config) { + cfg.BackoffMaxInterval = backoffMaxInterval + } +} + +// WithBackoffMaxElapsedTime sets the BackoffMaxElapsedTime field +func WithBackoffMaxElapsedTime(backoffMaxElapsedTime time.Duration) Option { + return func(cfg *runtime.Config) { + cfg.BackoffMaxElapsedTime = backoffMaxElapsedTime + } +} + +// WithWorkerFilePerm sets the WorkerFilePerm field +func WithWorkerFilePerm(workerFilePerm os.FileMode) Option { + return func(cfg *runtime.Config) { + cfg.WorkerFilePerm = workerFilePerm + } +} + +// WithToolExecutionTimeout sets the ToolExecutionTimeout field +func WithToolExecutionTimeout(toolExecutionTimeout time.Duration) Option { + return func(cfg *runtime.Config) { + cfg.ToolExecutionTimeout = toolExecutionTimeout + } +} + +// WithRuntimeType sets the RuntimeType field +// +// Runtime selection fields +func WithRuntimeType(runtimeType string) Option { + return func(cfg *runtime.Config) { + cfg.RuntimeType = runtimeType + } +} + +// WithEntrypointPath sets the EntrypointPath field +func WithEntrypointPath(entrypointPath string) Option { + return func(cfg *runtime.Config) { + cfg.EntrypointPath = entrypointPath + } +} + +// WithBunPermissions sets the BunPermissions field +func WithBunPermissions(bunPermissions []string) Option { + return func(cfg *runtime.Config) { + cfg.BunPermissions = bunPermissions + } +} + +// WithNodeOptions sets the NodeOptions field +func WithNodeOptions(nodeOptions []string) Option { + return func(cfg *runtime.Config) { + cfg.NodeOptions = nodeOptions + } +} + +// WithNativeTools sets the NativeTools field +func WithNativeTools(nativeTools *runtime.NativeToolsConfig) Option { + return func(cfg *runtime.Config) { + cfg.NativeTools = nativeTools + } +} + +// WithEnvironment sets the Environment field +// +// Application config integration fields +func WithEnvironment(environment string) Option { + return func(cfg *runtime.Config) { + cfg.Environment = environment + } +} + +// WithMaxMemoryMB sets the MaxMemoryMB field +// +// Memory management +func WithMaxMemoryMB(maxMemoryMB int) Option { + return func(cfg *runtime.Config) { + cfg.MaxMemoryMB = maxMemoryMB + } +} + +// WithMaxStderrCaptureSize sets the MaxStderrCaptureSize field +func WithMaxStderrCaptureSize(maxStderrCaptureSize int) Option { + return func(cfg *runtime.Config) { + cfg.MaxStderrCaptureSize = maxStderrCaptureSize + } +} diff --git a/sdk/schedule/README.md b/sdk/schedule/README.md new file mode 100644 index 00000000..1ffdcbb4 --- /dev/null +++ b/sdk/schedule/README.md @@ -0,0 +1,300 @@ +# Schedule Package + +The `schedule` package provides a functional options API for creating workflow schedule configurations in Compozy. Schedules define when workflows should be automatically triggered based on cron expressions. + +## Features + +- **Functional Options Pattern**: Clean, extensible API using functional options +- **Auto-Generated Options**: Options are automatically generated from engine structs +- **Comprehensive Validation**: Validates cron expressions, timezones, and retry policies +- **Type Safety**: Compile-time safety with full Go type checking +- **Deep Copy**: Returns immutable configuration copies + +## Installation + +```go +import ( + "github.com/compozy/compozy/sdk/schedule" + engineschedule "github.com/compozy/compozy/engine/project/schedule" +) +``` + +## Basic Usage + +### Minimal Schedule + +```go +cfg, err := schedule.New(ctx, "daily-report", + schedule.WithWorkflowID("generate-report"), + schedule.WithCron("0 9 * * *"), // Daily at 9 AM +) +if err != nil { + log.Fatal(err) +} +``` + +### Schedule with Timezone + +```go +cfg, err := schedule.New(ctx, "ny-morning-task", + schedule.WithWorkflowID("morning-workflow"), + schedule.WithCron("0 8 * * 1-5"), // Weekdays at 8 AM + schedule.WithTimezone("America/New_York"), +) +``` + +### Schedule with Retry Policy + +```go +cfg, err := schedule.New(ctx, "critical-task", + schedule.WithWorkflowID("critical-workflow"), + schedule.WithCron("0 */4 * * *"), // Every 4 hours + schedule.WithRetry(&engineschedule.RetryPolicy{ + MaxAttempts: 3, + Backoff: time.Minute * 5, + }), +) +``` + +### Complete Configuration + +```go +enabled := true +cfg, err := schedule.New(ctx, "full-schedule", + schedule.WithWorkflowID("data-sync"), + schedule.WithCron("0 2 * * *"), // Daily at 2 AM + schedule.WithTimezone("UTC"), + schedule.WithInput(map[string]any{ + "source": "production", + "target": "warehouse", + }), + schedule.WithRetry(&engineschedule.RetryPolicy{ + MaxAttempts: 5, + Backoff: time.Minute * 10, + }), + schedule.WithEnabled(&enabled), + schedule.WithDescription("Daily data synchronization"), +) +``` + +## Available Options + +### WithWorkflowID(workflowID string) + +Sets the workflow identifier that will be executed when the schedule triggers. + +**Required**: Yes + +```go +schedule.WithWorkflowID("my-workflow") +``` + +### WithCron(cron string) + +Sets the cron expression that determines when the schedule triggers. Uses standard 5-field cron format. + +**Required**: Yes +**Format**: `minute hour day month weekday` + +```go +schedule.WithCron("0 9 * * *") // Daily at 9 AM +schedule.WithCron("*/15 * * * *") // Every 15 minutes +schedule.WithCron("0 0 * * 0") // Weekly on Sunday at midnight +schedule.WithCron("0 9-17 * * 1-5") // Weekdays 9 AM to 5 PM +``` + +### WithTimezone(timezone string) + +Sets the IANA timezone name used when evaluating the cron expression. + +**Optional**: Defaults to server timezone if not specified +**Format**: IANA timezone (e.g., "America/New_York", "Europe/London", "Asia/Tokyo") + +```go +schedule.WithTimezone("America/New_York") +schedule.WithTimezone("Europe/London") +schedule.WithTimezone("UTC") +``` + +### WithInput(input map[string]any) + +Provides default input values supplied to the workflow when triggered. + +**Optional** + +```go +schedule.WithInput(map[string]any{ + "environment": "production", + "batchSize": 1000, +}) +``` + +### WithRetry(retry \*engineschedule.RetryPolicy) + +Configures retry behavior for failed scheduled executions. + +**Optional** +**MaxAttempts**: 1-100 +**Backoff**: Positive duration + +```go +schedule.WithRetry(&engineschedule.RetryPolicy{ + MaxAttempts: 3, + Backoff: time.Minute * 5, +}) +``` + +### WithEnabled(enabled \*bool) + +Toggles whether the schedule is active. Use a pointer to distinguish between explicitly disabled and unset. + +**Optional** + +```go +enabled := true +schedule.WithEnabled(&enabled) + +disabled := false +schedule.WithEnabled(&disabled) +``` + +### WithDescription(description string) + +Sets a human-readable description explaining the schedule's purpose. + +**Optional** + +```go +schedule.WithDescription("Daily report generation for finance team") +``` + +## Validation Rules + +### Schedule ID + +- Required +- Must contain only letters, numbers, or hyphens +- Automatically trimmed of whitespace + +### Workflow ID + +- Required +- Must contain only letters, numbers, or hyphens +- Automatically trimmed of whitespace + +### Cron Expression + +- Required +- Must be valid 5-field cron expression +- Validated using robfig/cron/v3 standard parser +- Examples: + - `0 * * * *` - Every hour + - `*/15 * * * *` - Every 15 minutes + - `0 9-17 * * 1-5` - Weekdays 9 AM to 5 PM + +### Timezone + +- Optional (empty string allowed) +- Must be valid IANA timezone if specified +- Validated using time.LoadLocation() +- Common timezones: UTC, America/New_York, Europe/London, Asia/Tokyo + +### Retry Policy + +- Optional (nil allowed) +- MaxAttempts: Must be between 1 and 100 +- Backoff: Must be positive duration + +## Error Handling + +The constructor returns `*sdkerrors.BuildError` which aggregates all validation errors: + +```go +cfg, err := schedule.New(ctx, "", + schedule.WithWorkflowID("bad id"), // Invalid: contains space + schedule.WithCron("invalid"), // Invalid: bad cron syntax + schedule.WithTimezone("Bad/TZ"), // Invalid: unknown timezone +) +if err != nil { + var buildErr *sdkerrors.BuildError + if errors.As(err, &buildErr) { + for _, e := range buildErr.Errors { + log.Printf("Validation error: %v", e) + } + } +} +``` + +## Cron Expression Examples + +```go +// Minute-based +"*/5 * * * *" // Every 5 minutes +"0,30 * * * *" // Every hour at :00 and :30 + +// Hour-based +"0 * * * *" // Every hour +"0 */4 * * *" // Every 4 hours +"0 9-17 * * *" // Every hour from 9 AM to 5 PM + +// Daily +"0 9 * * *" // Daily at 9 AM +"0 0 * * *" // Daily at midnight + +// Weekly +"0 9 * * 1" // Every Monday at 9 AM +"0 0 * * 0" // Every Sunday at midnight +"0 9 * * 1-5" // Weekdays at 9 AM + +// Monthly +"0 0 1 * *" // First day of month at midnight +"0 9 15 * *" // 15th of each month at 9 AM +``` + +## Code Generation + +This package uses code generation to maintain options in sync with engine structs: + +```bash +cd sdk/schedule +go generate +``` + +This regenerates `options_generated.go` from `engine/project/schedule/config.go`. + +## Testing + +Run tests with: + +```bash +gotestsum --format pkgname -- -race -parallel=4 ./sdk/schedule +``` + +## Migration from Builder Pattern + +**Old (Builder Pattern):** + +```go +cfg, err := schedule.New("daily-task"). + WithWorkflowID("my-workflow"). + WithCron("0 9 * * *"). + WithTimezone("UTC"). + Build(ctx) +``` + +**New (Functional Options):** + +```go +cfg, err := schedule.New(ctx, "daily-task", + schedule.WithWorkflowID("my-workflow"), + schedule.WithCron("0 9 * * *"), + schedule.WithTimezone("UTC"), +) +``` + +**Key Changes:** + +- `ctx` is now first parameter +- No `.Build(ctx)` call needed +- Options use `schedule.WithX()` prefix +- Validation happens immediately in constructor diff --git a/sdk/schedule/constructor.go b/sdk/schedule/constructor.go new file mode 100644 index 00000000..e684cf82 --- /dev/null +++ b/sdk/schedule/constructor.go @@ -0,0 +1,115 @@ +package schedule + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/compozy/compozy/engine/core" + engineschedule "github.com/compozy/compozy/engine/project/schedule" + "github.com/compozy/compozy/pkg/logger" + sdkerrors "github.com/compozy/compozy/sdk/v2/internal/errors" + "github.com/compozy/compozy/sdk/v2/internal/validate" +) + +// New creates a schedule configuration using functional options +func New(ctx context.Context, id string, opts ...Option) (*engineschedule.Config, error) { + if ctx == nil { + return nil, fmt.Errorf("context is required") + } + log := logger.FromContext(ctx) + log.Debug("creating schedule configuration", "schedule", id) + cfg := &engineschedule.Config{ + ID: strings.TrimSpace(id), + } + for _, opt := range opts { + opt(cfg) + } + if err := validateScheduleConfig(ctx, cfg); err != nil { + return nil, err + } + cloned, err := core.DeepCopy(cfg) + if err != nil { + return nil, fmt.Errorf("failed to clone schedule config: %w", err) + } + return cloned, nil +} + +func validateScheduleConfig(ctx context.Context, cfg *engineschedule.Config) error { + collected := make([]error, 0, 8) + cfg.ID = strings.TrimSpace(cfg.ID) + if err := validate.ID(ctx, cfg.ID); err != nil { + collected = append(collected, fmt.Errorf("schedule id is invalid: %w", err)) + } + if err := validateWorkflowID(ctx, cfg); err != nil { + collected = append(collected, err) + } + if err := validateCronExpression(ctx, cfg); err != nil { + collected = append(collected, err) + } + if err := validateTimezone(cfg); err != nil { + collected = append(collected, err) + } + if err := validateRetry(ctx, cfg); err != nil { + collected = append(collected, err) + } + cfg.Description = strings.TrimSpace(cfg.Description) + return buildValidationError(collected) +} + +func validateWorkflowID(ctx context.Context, cfg *engineschedule.Config) error { + cfg.WorkflowID = strings.TrimSpace(cfg.WorkflowID) + if err := validate.NonEmpty(ctx, "workflow id", cfg.WorkflowID); err != nil { + return err + } + if err := validate.ID(ctx, cfg.WorkflowID); err != nil { + return fmt.Errorf("workflow id is invalid: %w", err) + } + return nil +} + +func validateCronExpression(ctx context.Context, cfg *engineschedule.Config) error { + cfg.Cron = strings.TrimSpace(cfg.Cron) + return validate.Cron(ctx, cfg.Cron) +} + +func validateTimezone(cfg *engineschedule.Config) error { + if cfg.Timezone == "" { + return nil + } + cfg.Timezone = strings.TrimSpace(cfg.Timezone) + if _, err := time.LoadLocation(cfg.Timezone); err != nil { + return fmt.Errorf("timezone is invalid: %w", err) + } + return nil +} + +func validateRetry(ctx context.Context, cfg *engineschedule.Config) error { + if cfg.Retry == nil { + return nil + } + if cfg.Retry.MaxAttempts <= 0 { + return fmt.Errorf("retry max attempts must be positive: got %d", cfg.Retry.MaxAttempts) + } + if cfg.Retry.MaxAttempts > 100 { + return fmt.Errorf("retry max attempts must not exceed 100: got %d", cfg.Retry.MaxAttempts) + } + if err := validate.Duration(ctx, cfg.Retry.Backoff); err != nil { + return fmt.Errorf("retry backoff %w", err) + } + return nil +} + +func buildValidationError(collected []error) error { + filtered := make([]error, 0, len(collected)) + for _, err := range collected { + if err != nil { + filtered = append(filtered, err) + } + } + if len(filtered) > 0 { + return &sdkerrors.BuildError{Errors: filtered} + } + return nil +} diff --git a/sdk/schedule/constructor_test.go b/sdk/schedule/constructor_test.go new file mode 100644 index 00000000..42d9053e --- /dev/null +++ b/sdk/schedule/constructor_test.go @@ -0,0 +1,450 @@ +package schedule + +import ( + "context" + "errors" + "testing" + "time" + + engineschedule "github.com/compozy/compozy/engine/project/schedule" + sdkerrors "github.com/compozy/compozy/sdk/v2/internal/errors" +) + +func TestNew(t *testing.T) { + t.Run("Should create schedule with minimal configuration", func(t *testing.T) { + ctx := context.Background() + cfg, err := New(ctx, "test-schedule", + WithWorkflowID("test-workflow"), + WithCron("0 * * * *"), + ) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg == nil { + t.Fatal("expected config, got nil") + } + if cfg.ID != "test-schedule" { + t.Errorf("expected ID 'test-schedule', got '%s'", cfg.ID) + } + if cfg.WorkflowID != "test-workflow" { + t.Errorf("expected workflow ID 'test-workflow', got '%s'", cfg.WorkflowID) + } + if cfg.Cron != "0 * * * *" { + t.Errorf("expected cron '0 * * * *', got '%s'", cfg.Cron) + } + }) + t.Run("Should trim whitespace from ID", func(t *testing.T) { + ctx := context.Background() + cfg, err := New(ctx, " test-schedule ", + WithWorkflowID("test-workflow"), + WithCron("0 * * * *"), + ) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.ID != "test-schedule" { + t.Errorf("expected trimmed ID 'test-schedule', got '%s'", cfg.ID) + } + }) + t.Run("Should fail when context is nil", func(t *testing.T) { + var nilCtx context.Context + _, err := New(nilCtx, "test-schedule", + WithWorkflowID("test-workflow"), + WithCron("0 * * * *"), + ) + if err == nil { + t.Fatal("expected error for nil context") + } + if err.Error() != "context is required" { + t.Errorf("unexpected error message: %v", err) + } + }) + t.Run("Should fail when ID is empty", func(t *testing.T) { + ctx := context.Background() + _, err := New(ctx, "", + WithWorkflowID("test-workflow"), + WithCron("0 * * * *"), + ) + if err == nil { + t.Fatal("expected error for empty ID") + } + var buildErr *sdkerrors.BuildError + if !errors.As(err, &buildErr) { + t.Errorf("expected BuildError, got %T", err) + } + }) + t.Run("Should fail when workflow ID is empty", func(t *testing.T) { + ctx := context.Background() + _, err := New(ctx, "test-schedule", + WithCron("0 * * * *"), + ) + if err == nil { + t.Fatal("expected error for empty workflow ID") + } + var buildErr *sdkerrors.BuildError + if !errors.As(err, &buildErr) { + t.Errorf("expected BuildError, got %T", err) + } + }) + t.Run("Should fail when cron expression is empty", func(t *testing.T) { + ctx := context.Background() + _, err := New(ctx, "test-schedule", + WithWorkflowID("test-workflow"), + ) + if err == nil { + t.Fatal("expected error for empty cron expression") + } + var buildErr *sdkerrors.BuildError + if !errors.As(err, &buildErr) { + t.Errorf("expected BuildError, got %T", err) + } + }) + t.Run("Should create schedule with all options", func(t *testing.T) { + ctx := context.Background() + enabled := true + cfg, err := New(ctx, "full-schedule", + WithWorkflowID("test-workflow"), + WithCron("0 0 * * *"), + WithTimezone("America/New_York"), + WithInput(map[string]any{"key": "value"}), + WithRetry(&engineschedule.RetryPolicy{ + MaxAttempts: 3, + Backoff: time.Minute, + }), + WithEnabled(&enabled), + WithDescription("Test schedule"), + ) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.Timezone != "America/New_York" { + t.Errorf("expected timezone 'America/New_York', got '%s'", cfg.Timezone) + } + if cfg.Input == nil || cfg.Input["key"] != "value" { + t.Error("expected input to be set") + } + if cfg.Retry == nil || cfg.Retry.MaxAttempts != 3 { + t.Error("expected retry policy to be set") + } + if cfg.Enabled == nil || !*cfg.Enabled { + t.Error("expected enabled to be true") + } + if cfg.Description != "Test schedule" { + t.Errorf("expected description 'Test schedule', got '%s'", cfg.Description) + } + }) + t.Run("Should create deep copy of configuration", func(t *testing.T) { + ctx := context.Background() + cfg1, err := New(ctx, "test-schedule", + WithWorkflowID("test-workflow"), + WithCron("0 * * * *"), + ) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + cfg2 := &engineschedule.Config{} + *cfg2 = *cfg1 + cfg2.WorkflowID = "modified" + if cfg1.WorkflowID == "modified" { + t.Error("configuration was not deep copied") + } + }) +} + +func TestCronValidation(t *testing.T) { + t.Run("Should accept valid standard cron expression", func(t *testing.T) { + ctx := context.Background() + _, err := New(ctx, "test", + WithWorkflowID("wf"), + WithCron("0 * * * *"), + ) + if err != nil { + t.Errorf("expected valid cron, got error: %v", err) + } + }) + t.Run("Should accept cron with multiple values", func(t *testing.T) { + ctx := context.Background() + _, err := New(ctx, "test", + WithWorkflowID("wf"), + WithCron("0 0,12 * * *"), + ) + if err != nil { + t.Errorf("expected valid cron, got error: %v", err) + } + }) + t.Run("Should accept cron with ranges", func(t *testing.T) { + ctx := context.Background() + _, err := New(ctx, "test", + WithWorkflowID("wf"), + WithCron("0 9-17 * * 1-5"), + ) + if err != nil { + t.Errorf("expected valid cron, got error: %v", err) + } + }) + t.Run("Should fail with invalid cron expression", func(t *testing.T) { + ctx := context.Background() + _, err := New(ctx, "test", + WithWorkflowID("wf"), + WithCron("invalid"), + ) + if err == nil { + t.Fatal("expected error for invalid cron") + } + }) + t.Run("Should fail with too many fields", func(t *testing.T) { + ctx := context.Background() + _, err := New(ctx, "test", + WithWorkflowID("wf"), + WithCron("0 0 0 0 0 0"), + ) + if err == nil { + t.Fatal("expected error for invalid cron format") + } + }) +} + +func TestTimezoneValidation(t *testing.T) { + t.Run("Should accept UTC timezone", func(t *testing.T) { + ctx := context.Background() + cfg, err := New(ctx, "test", + WithWorkflowID("wf"), + WithCron("0 * * * *"), + WithTimezone("UTC"), + ) + if err != nil { + t.Errorf("expected valid timezone, got error: %v", err) + } + if cfg.Timezone != "UTC" { + t.Errorf("expected timezone 'UTC', got '%s'", cfg.Timezone) + } + }) + t.Run("Should accept America/New_York timezone", func(t *testing.T) { + ctx := context.Background() + cfg, err := New(ctx, "test", + WithWorkflowID("wf"), + WithCron("0 * * * *"), + WithTimezone("America/New_York"), + ) + if err != nil { + t.Errorf("expected valid timezone, got error: %v", err) + } + if cfg.Timezone != "America/New_York" { + t.Errorf("expected timezone 'America/New_York', got '%s'", cfg.Timezone) + } + }) + t.Run("Should accept Europe/London timezone", func(t *testing.T) { + ctx := context.Background() + cfg, err := New(ctx, "test", + WithWorkflowID("wf"), + WithCron("0 * * * *"), + WithTimezone("Europe/London"), + ) + if err != nil { + t.Errorf("expected valid timezone, got error: %v", err) + } + if cfg.Timezone != "Europe/London" { + t.Errorf("expected timezone 'Europe/London', got '%s'", cfg.Timezone) + } + }) + t.Run("Should accept Asia/Tokyo timezone", func(t *testing.T) { + ctx := context.Background() + _, err := New(ctx, "test", + WithWorkflowID("wf"), + WithCron("0 * * * *"), + WithTimezone("Asia/Tokyo"), + ) + if err != nil { + t.Errorf("expected valid timezone, got error: %v", err) + } + }) + t.Run("Should fail with invalid timezone", func(t *testing.T) { + ctx := context.Background() + _, err := New(ctx, "test", + WithWorkflowID("wf"), + WithCron("0 * * * *"), + WithTimezone("Invalid/Timezone"), + ) + if err == nil { + t.Fatal("expected error for invalid timezone") + } + }) + t.Run("Should allow empty timezone", func(t *testing.T) { + ctx := context.Background() + cfg, err := New(ctx, "test", + WithWorkflowID("wf"), + WithCron("0 * * * *"), + ) + if err != nil { + t.Errorf("expected success with empty timezone, got error: %v", err) + } + if cfg.Timezone != "" { + t.Errorf("expected empty timezone, got '%s'", cfg.Timezone) + } + }) +} + +func TestRetryValidation(t *testing.T) { + t.Run("Should accept valid retry policy", func(t *testing.T) { + ctx := context.Background() + cfg, err := New(ctx, "test", + WithWorkflowID("wf"), + WithCron("0 * * * *"), + WithRetry(&engineschedule.RetryPolicy{ + MaxAttempts: 3, + Backoff: time.Minute, + }), + ) + if err != nil { + t.Errorf("expected valid retry, got error: %v", err) + } + if cfg.Retry == nil { + t.Fatal("expected retry policy to be set") + } + if cfg.Retry.MaxAttempts != 3 { + t.Errorf("expected max attempts 3, got %d", cfg.Retry.MaxAttempts) + } + }) + t.Run("Should fail with negative max attempts", func(t *testing.T) { + ctx := context.Background() + _, err := New(ctx, "test", + WithWorkflowID("wf"), + WithCron("0 * * * *"), + WithRetry(&engineschedule.RetryPolicy{ + MaxAttempts: -1, + Backoff: time.Minute, + }), + ) + if err == nil { + t.Fatal("expected error for negative max attempts") + } + }) + t.Run("Should fail with zero max attempts", func(t *testing.T) { + ctx := context.Background() + _, err := New(ctx, "test", + WithWorkflowID("wf"), + WithCron("0 * * * *"), + WithRetry(&engineschedule.RetryPolicy{ + MaxAttempts: 0, + Backoff: time.Minute, + }), + ) + if err == nil { + t.Fatal("expected error for zero max attempts") + } + }) + t.Run("Should fail with max attempts greater than 100", func(t *testing.T) { + ctx := context.Background() + _, err := New(ctx, "test", + WithWorkflowID("wf"), + WithCron("0 * * * *"), + WithRetry(&engineschedule.RetryPolicy{ + MaxAttempts: 101, + Backoff: time.Minute, + }), + ) + if err == nil { + t.Fatal("expected error for max attempts > 100") + } + }) + t.Run("Should fail with negative backoff", func(t *testing.T) { + ctx := context.Background() + _, err := New(ctx, "test", + WithWorkflowID("wf"), + WithCron("0 * * * *"), + WithRetry(&engineschedule.RetryPolicy{ + MaxAttempts: 3, + Backoff: -time.Minute, + }), + ) + if err == nil { + t.Fatal("expected error for negative backoff") + } + }) + t.Run("Should fail with zero backoff", func(t *testing.T) { + ctx := context.Background() + _, err := New(ctx, "test", + WithWorkflowID("wf"), + WithCron("0 * * * *"), + WithRetry(&engineschedule.RetryPolicy{ + MaxAttempts: 3, + Backoff: 0, + }), + ) + if err == nil { + t.Fatal("expected error for zero backoff") + } + }) + t.Run("Should allow nil retry policy", func(t *testing.T) { + ctx := context.Background() + cfg, err := New(ctx, "test", + WithWorkflowID("wf"), + WithCron("0 * * * *"), + ) + if err != nil { + t.Errorf("expected success with nil retry, got error: %v", err) + } + if cfg.Retry != nil { + t.Error("expected nil retry policy") + } + }) +} + +func TestWorkflowIDValidation(t *testing.T) { + t.Run("Should trim whitespace from workflow ID", func(t *testing.T) { + ctx := context.Background() + cfg, err := New(ctx, "test", + WithWorkflowID(" test-workflow "), + WithCron("0 * * * *"), + ) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.WorkflowID != "test-workflow" { + t.Errorf("expected trimmed workflow ID, got '%s'", cfg.WorkflowID) + } + }) + t.Run("Should fail with invalid workflow ID characters", func(t *testing.T) { + ctx := context.Background() + _, err := New(ctx, "test", + WithWorkflowID("bad workflow"), + WithCron("0 * * * *"), + ) + if err == nil { + t.Fatal("expected error for invalid workflow ID") + } + }) +} + +func TestInputHandling(t *testing.T) { + t.Run("Should accept input map", func(t *testing.T) { + ctx := context.Background() + cfg, err := New(ctx, "test", + WithWorkflowID("wf"), + WithCron("0 * * * *"), + WithInput(map[string]any{"key": "value"}), + ) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.Input == nil { + t.Fatal("expected input to be set") + } + if cfg.Input["key"] != "value" { + t.Error("expected input value to match") + } + }) + t.Run("Should accept nil input", func(t *testing.T) { + ctx := context.Background() + cfg, err := New(ctx, "test", + WithWorkflowID("wf"), + WithCron("0 * * * *"), + ) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.Input != nil { + t.Error("expected nil input") + } + }) +} diff --git a/sdk/schedule/generate.go b/sdk/schedule/generate.go new file mode 100644 index 00000000..9d0a7b1e --- /dev/null +++ b/sdk/schedule/generate.go @@ -0,0 +1,3 @@ +package schedule + +//go:generate go run ../internal/codegen/cmd/optionsgen/main.go -engine ../../engine/project/schedule/config.go -struct Config -output options_generated.go diff --git a/sdk/schedule/options_generated.go b/sdk/schedule/options_generated.go new file mode 100644 index 00000000..7279a769 --- /dev/null +++ b/sdk/schedule/options_generated.go @@ -0,0 +1,79 @@ +// Code generated by optionsgen. DO NOT EDIT. + +package schedule + +import schedule "github.com/compozy/compozy/engine/project/schedule" + +type Option func(*schedule.Config) + +// WithID sets the ID field +// +// ID uniquely identifies the schedule within the project. +func WithID(id string) Option { + return func(cfg *schedule.Config) { + cfg.ID = id + } +} + +// WithWorkflowID sets the WorkflowID field +// +// WorkflowID references the workflow that should be executed when the schedule fires. +func WithWorkflowID(workflowID string) Option { + return func(cfg *schedule.Config) { + cfg.WorkflowID = workflowID + } +} + +// WithCron sets the Cron field +// +// Cron is the cron expression that determines when the schedule triggers. +func WithCron(cron string) Option { + return func(cfg *schedule.Config) { + cfg.Cron = cron + } +} + +// WithTimezone sets the Timezone field +// +// Timezone provides the IANA timezone name used when evaluating the cron expression. +func WithTimezone(timezone string) Option { + return func(cfg *schedule.Config) { + cfg.Timezone = timezone + } +} + +// WithInput sets the Input field +// +// Input contains default input values that are supplied to the workflow when triggered. +func WithInput(input map[string]any) Option { + return func(cfg *schedule.Config) { + cfg.Input = input + } +} + +// WithRetry sets the Retry field +// +// Retry configures retry behavior for failed scheduled executions. +func WithRetry(retry *schedule.RetryPolicy) Option { + return func(cfg *schedule.Config) { + cfg.Retry = retry + } +} + +// WithEnabled sets the Enabled field +// +// Enabled toggles whether the schedule is active. +func WithEnabled(enabled *bool) Option { + return func(cfg *schedule.Config) { + cfg.Enabled = enabled + } +} + +// WithDescription sets the Description field +// +// Description explains the schedule purpose for operators. +func WithDescription(description string) Option { + return func(cfg *schedule.Config) { + cfg.Description = description + } +} diff --git a/sdk/schema/README.md b/sdk/schema/README.md new file mode 100644 index 00000000..7e8b776a --- /dev/null +++ b/sdk/schema/README.md @@ -0,0 +1,311 @@ +# Schema Package - Hybrid Approach + +This package provides a **hybrid approach** to schema configuration in the Compozy SDK: + +1. **PropertyBuilder Pattern** (from `sdk/schema/`) - For dynamic schema construction +2. **Functional Options** (this package) - For static schema configuration with metadata + +## When to Use Each Pattern + +### Use PropertyBuilder (`sdk/schema/`) for Dynamic Schema Construction + +When you need to build schemas programmatically at runtime based on dynamic conditions: + +```go +import sdkschema "github.com/compozy/compozy/sdk/schema" + +// Build schema dynamically +schema := sdkschema.NewObject(). + AddProperty("name", sdkschema.NewString().WithMinLength(1)). + AddProperty("age", sdkschema.NewInteger().WithMinimum(0)). + RequireProperty("name"). + Build(ctx) +``` + +**Use cases:** + +- Runtime-dependent schemas +- Conditional property addition +- Dynamic validation rules +- Schema generation from external sources + +### Use Functional Options (`sdk/schema/`) for Static Schema Configuration + +When you have a complete JSON schema definition and just need to wrap it with metadata: + +```go +import "github.com/compozy/compozy/sdk/v2/schema" + +// Static schema with metadata +schemaConfig, err := schema.New(ctx, "user-schema", + schema.WithJSONSchema(map[string]any{ + "type": "object", + "title": "User Schema", + "description": "Validates user data", + "properties": map[string]any{ + "name": map[string]any{"type": "string"}, + "age": map[string]any{"type": "integer"}, + }, + "required": []string{"name"}, + }), +) +``` + +**Use cases:** + +- Fixed schema definitions +- Configuration metadata +- Schema versioning +- Simple wrapper around complete JSON schemas + +## Installation + +```go +import "github.com/compozy/compozy/sdk/v2/schema" +``` + +## API Reference + +### Constructor + +```go +func New(ctx context.Context, id string, opts ...Option) (*engschema.Schema, error) +``` + +Creates a new schema configuration with the provided ID and options. Returns a deep-copied schema ready for use. + +**Parameters:** + +- `ctx`: Context for logging and cancellation (required, non-nil) +- `id`: Schema identifier (required, non-empty) +- `opts`: Functional options for configuration + +**Returns:** + +- `*engschema.Schema`: Deep-copied schema configuration +- `error`: Validation errors if any + +### Options + +#### WithJSONSchema + +```go +func WithJSONSchema(jsonSchema map[string]any) Option +``` + +Sets the complete JSON schema definition. Accepts a `map[string]any` representing the full JSON schema object. + +**Note:** The "id" field is automatically preserved from the constructor and will not be overwritten. + +**Supported fields:** + +- `type`: Schema type (object, string, number, integer, boolean, array, null) +- `title`: Human-readable title +- `description`: Schema description +- `properties`: Object properties (can be from Builder or plain map) +- `required`: Array of required property names +- `version`: Schema version (custom field) +- Any other valid JSON Schema fields + +## Usage Examples + +### Basic Example + +```go +schema, err := schema.New(ctx, "simple-schema", + schema.WithJSONSchema(map[string]any{ + "type": "string", + "minLength": 1, + "maxLength": 100, + }), +) +if err != nil { + log.Fatal(err) +} +``` + +### Full Configuration Example + +```go +schema, err := schema.New(ctx, "user-schema", + schema.WithJSONSchema(map[string]any{ + "type": "object", + "title": "User Schema", + "description": "Validates user data", + "version": "1.0.0", + "properties": map[string]any{ + "name": map[string]any{ + "type": "string", + "minLength": 1, + "maxLength": 100, + }, + "email": map[string]any{ + "type": "string", + "pattern": "^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\.[a-zA-Z]{2,}$", + }, + "age": map[string]any{ + "type": "integer", + "minimum": 0, + "maximum": 150, + }, + }, + "required": []string{"name", "email"}, + }), +) +``` + +### Using PropertyBuilder with Functional Options + +You can combine both approaches - use PropertyBuilder to construct the schema, then wrap it: + +```go +import ( + sdkschema "github.com/compozy/compozy/sdk/schema" + "github.com/compozy/compozy/sdk/v2/schema" +) + +// Build dynamic properties with PropertyBuilder +properties, err := sdkschema.NewObject(). + AddProperty("name", sdkschema.NewString()). + AddProperty("age", sdkschema.NewInteger()). + Build(ctx) +if err != nil { + return err +} + +// Wrap with metadata using functional options +schemaConfig, err := schema.New(ctx, "dynamic-schema", + schema.WithJSONSchema(map[string]any{ + "type": "object", + "title": "Dynamic Schema", + "properties": *properties, + }), +) +``` + +## Migration Guide + +### Before (Old SDK - Builder Pattern Only) + +```go +import "github.com/compozy/compozy/sdk/schema" + +schema := schema.NewObject(). + AddProperty("name", schema.NewString()). + AddProperty("age", schema.NewInteger()). + Build(ctx) +``` + +### After (New SDK - Hybrid Approach) + +**Option 1: Keep using PropertyBuilder for dynamic schemas** + +```go +import sdkschema "github.com/compozy/compozy/sdk/schema" + +// Still works! Use for dynamic construction +schema := sdkschema.NewObject(). + AddProperty("name", sdkschema.NewString()). + AddProperty("age", sdkschema.NewInteger()). + Build(ctx) +``` + +**Option 2: Use functional options for static schemas** + +```go +import "github.com/compozy/compozy/sdk/v2/schema" + +// New approach for static schemas +schemaConfig, err := schema.New(ctx, "user-schema", + schema.WithJSONSchema(map[string]any{ + "type": "object", + "properties": map[string]any{ + "name": map[string]any{"type": "string"}, + "age": map[string]any{"type": "integer"}, + }, + }), +) +``` + +### Key Changes + +1. **PropertyBuilder API remains unchanged** - No breaking changes for dynamic schema construction +2. **New functional options API** - Simpler approach for static schema configuration +3. **ID is required** - Schema configurations now require an explicit ID +4. **Context-first** - Context is the first parameter (Go idiom) +5. **Metadata support** - Easy to add version, title, description, etc. + +## Error Handling + +All validation errors are collected and returned together: + +```go +schema, err := schema.New(ctx, "", // Invalid: empty ID + schema.WithJSONSchema(map[string]any{ + "type": "invalid-type", // Invalid: unsupported type + }), +) +if err != nil { + // Error will contain: + // - "id is invalid: id cannot be empty" + // - "invalid schema type: invalid-type" + fmt.Println(err) +} +``` + +## Validation + +The constructor performs validation on: + +- **ID**: Must be non-empty and valid format +- **Schema type**: Must be one of: object, string, number, integer, boolean, array, null +- **Properties**: Must be a `map[string]any` if present +- **Required**: Must be a string array if present + +## Deep Copy + +All schemas returned by `New()` are deep-copied to prevent accidental mutation: + +```go +schema1, _ := schema.New(ctx, "test", schema.WithJSONSchema(props)) +// Modifying schema1 won't affect future schemas created with same props +``` + +## Testing + +Run tests: + +```bash +gotestsum --format pkgname -- -race -parallel=4 ./sdk/schema +``` + +Run linting: + +```bash +golangci-lint run --fix --allow-parallel-runners ./sdk/schema/... +``` + +## Architecture Decision + +**Why the hybrid approach?** + +1. **PropertyBuilder is still valuable** - Dynamic schema construction at runtime cannot be easily replaced with functional options +2. **Functional options for static config** - When you have a complete schema definition, functional options are simpler and more idiomatic +3. **Best of both worlds** - Use the right tool for the job: + - PropertyBuilder: Runtime flexibility + - Functional options: Static configuration simplicity + +**Design principles:** + +- **No breaking changes**: PropertyBuilder API remains unchanged +- **Minimal API**: Single `WithJSONSchema()` option instead of dozens of field-specific options +- **Idiomatic Go**: Follows functional options pattern used throughout Go ecosystem +- **Type safety**: Full type checking at compile time + +## Examples + +See `sdk/cmd/` directory for complete examples using both patterns. + +## Support + +For issues or questions, please open an issue on GitHub. diff --git a/sdk/schema/constructor.go b/sdk/schema/constructor.go new file mode 100644 index 00000000..0199b7e6 --- /dev/null +++ b/sdk/schema/constructor.go @@ -0,0 +1,113 @@ +package schema + +import ( + "context" + "fmt" + + "github.com/compozy/compozy/engine/core" + engschema "github.com/compozy/compozy/engine/schema" + "github.com/compozy/compozy/pkg/logger" + sdkerrors "github.com/compozy/compozy/sdk/v2/internal/errors" + "github.com/compozy/compozy/sdk/v2/internal/validate" +) + +// New creates a new schema configuration with metadata wrapper. +// +// This is a simple wrapper for schema metadata and configuration. +// For dynamic schema construction (runtime-dependent schemas), use the +// PropertyBuilder pattern from sdk/schema package directly. +// +// Parameters: +// - ctx: Context for logging and cancellation +// - id: Schema identifier (required, non-empty) +// - opts: Functional options for schema configuration +// +// Returns a deep-copied schema ready for use. +// +// Example: +// +// // Static schema with metadata +// schema, err := schema.New(ctx, "user-schema", +// schema.WithJSONSchema(map[string]any{ +// "type": "object", +// "title": "User Schema", +// "description": "Validates user data", +// "properties": map[string]any{ +// "name": map[string]any{"type": "string"}, +// "age": map[string]any{"type": "integer"}, +// }, +// "required": []string{"name"}, +// }), +// ) +func New(ctx context.Context, id string, opts ...Option) (*engschema.Schema, error) { + if ctx == nil { + return nil, fmt.Errorf("context is required") + } + log := logger.FromContext(ctx) + log.Debug("creating schema configuration", "id", id) + collected := make([]error, 0) + if err := validate.ID(ctx, id); err != nil { + collected = append(collected, fmt.Errorf("id is invalid: %w", err)) + } + schema := engschema.Schema{ + "id": id, + } + for _, opt := range opts { + opt(&schema) + } + if err := validateSchema(&schema); err != nil { + collected = append(collected, err) + } + if len(collected) > 0 { + return nil, &sdkerrors.BuildError{Errors: collected} + } + cloned, err := core.DeepCopy(schema) + if err != nil { + return nil, fmt.Errorf("failed to clone schema: %w", err) + } + return &cloned, nil +} + +func validateSchema(s *engschema.Schema) error { + if s == nil { + return fmt.Errorf("schema cannot be nil") + } + schemaMap := map[string]any(*s) + if schemaType, ok := schemaMap["type"]; ok { + typeStr, isString := schemaType.(string) + if !isString { + return fmt.Errorf("schema type must be a string") + } + validTypes := map[string]bool{ + "object": true, + "string": true, + "number": true, + "integer": true, + "boolean": true, + "array": true, + "null": true, + } + if !validTypes[typeStr] { + return fmt.Errorf("invalid schema type: %s", typeStr) + } + } + if properties, ok := schemaMap["properties"]; ok { + if _, isMap := properties.(map[string]any); !isMap { + return fmt.Errorf("properties must be a map[string]any") + } + } + if required, ok := schemaMap["required"]; ok { + switch v := required.(type) { + case []string: + case []any: + for i, item := range v { + if _, isString := item.(string); !isString { + return fmt.Errorf("required[%d] must be a string", i) + } + } + default: + return fmt.Errorf("required must be a string array") + } + } + return nil +} diff --git a/sdk/schema/constructor_test.go b/sdk/schema/constructor_test.go new file mode 100644 index 00000000..5e0f0dd2 --- /dev/null +++ b/sdk/schema/constructor_test.go @@ -0,0 +1,299 @@ +package schema + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNew_MinimalConfig(t *testing.T) { + t.Run("Should create minimal schema with just ID", func(t *testing.T) { + schema, err := New(t.Context(), "test-schema") + require.NoError(t, err) + require.NotNil(t, schema) + schemaMap := map[string]any(*schema) + assert.Equal(t, "test-schema", schemaMap["id"]) + }) +} + +func TestNew_WithJSONSchema(t *testing.T) { + t.Run("Should create schema with JSON schema definition", func(t *testing.T) { + schema, err := New(t.Context(), "user-schema", + WithJSONSchema(map[string]any{ + "type": "object", + "title": "User Schema", + "description": "Validates user data", + "properties": map[string]any{ + "name": map[string]any{"type": "string"}, + "age": map[string]any{"type": "integer"}, + }, + "required": []string{"name"}, + }), + ) + require.NoError(t, err) + require.NotNil(t, schema) + schemaMap := map[string]any(*schema) + assert.Equal(t, "user-schema", schemaMap["id"]) + assert.Equal(t, "object", schemaMap["type"]) + assert.Equal(t, "User Schema", schemaMap["title"]) + assert.Equal(t, "Validates user data", schemaMap["description"]) + properties, ok := schemaMap["properties"].(map[string]any) + require.True(t, ok) + assert.Contains(t, properties, "name") + assert.Contains(t, properties, "age") + required, ok := schemaMap["required"].([]string) + require.True(t, ok) + assert.Equal(t, []string{"name"}, required) + }) + t.Run("Should handle string schema type", func(t *testing.T) { + schema, err := New(t.Context(), "string-schema", + WithJSONSchema(map[string]any{ + "type": "string", + "minLength": 1, + "maxLength": 100, + }), + ) + require.NoError(t, err) + require.NotNil(t, schema) + schemaMap := map[string]any(*schema) + assert.Equal(t, "string", schemaMap["type"]) + assert.Equal(t, 1, schemaMap["minLength"]) + assert.Equal(t, 100, schemaMap["maxLength"]) + }) + t.Run("Should handle array schema type", func(t *testing.T) { + schema, err := New(t.Context(), "array-schema", + WithJSONSchema(map[string]any{ + "type": "array", + "items": map[string]any{ + "type": "string", + }, + }), + ) + require.NoError(t, err) + require.NotNil(t, schema) + schemaMap := map[string]any(*schema) + assert.Equal(t, "array", schemaMap["type"]) + items, ok := schemaMap["items"].(map[string]any) + require.True(t, ok) + assert.Equal(t, "string", items["type"]) + }) + t.Run("Should handle nil JSON schema gracefully", func(t *testing.T) { + schema, err := New(t.Context(), "nil-schema", + WithJSONSchema(nil), + ) + require.NoError(t, err) + require.NotNil(t, schema) + schemaMap := map[string]any(*schema) + assert.Equal(t, "nil-schema", schemaMap["id"]) + }) +} + +func TestNew_ValidationErrors(t *testing.T) { + tests := []struct { + name string + id string + opts []Option + wantErr string + }{ + { + name: "empty id", + id: "", + wantErr: "id is invalid", + }, + { + name: "whitespace id", + id: " ", + wantErr: "id is invalid", + }, + { + name: "invalid schema type", + id: "test-schema", + opts: []Option{ + WithJSONSchema(map[string]any{ + "type": "invalid-type", + }), + }, + wantErr: "invalid schema type", + }, + { + name: "non-string schema type", + id: "test-schema", + opts: []Option{ + WithJSONSchema(map[string]any{ + "type": 123, + }), + }, + wantErr: "schema type must be a string", + }, + { + name: "invalid properties type", + id: "test-schema", + opts: []Option{ + WithJSONSchema(map[string]any{ + "type": "object", + "properties": "invalid", + }), + }, + wantErr: "properties must be a map", + }, + { + name: "invalid required type", + id: "test-schema", + opts: []Option{ + WithJSONSchema(map[string]any{ + "type": "object", + "required": "invalid", + }), + }, + wantErr: "required must be a string array", + }, + { + name: "invalid required array element", + id: "test-schema", + opts: []Option{ + WithJSONSchema(map[string]any{ + "type": "object", + "required": []any{123, "valid"}, + }), + }, + wantErr: "required[0] must be a string", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, err := New(t.Context(), tt.id, tt.opts...) + require.Error(t, err) + assert.Contains(t, err.Error(), tt.wantErr) + }) + } +} + +func TestNew_NilContext(t *testing.T) { + t.Run("Should return error for nil context", func(t *testing.T) { + var nilCtx context.Context + _, err := New(nilCtx, "test-schema") + require.Error(t, err) + assert.Contains(t, err.Error(), "context is required") + }) +} + +func TestNew_DeepCopy(t *testing.T) { + t.Run("Should return deep copied schema", func(t *testing.T) { + properties := map[string]any{ + "name": map[string]any{"type": "string"}, + } + schema1, err := New(t.Context(), "test-schema", + WithJSONSchema(map[string]any{ + "type": "object", + "properties": properties, + }), + ) + require.NoError(t, err) + schema1Map := map[string]any(*schema1) + props1, ok := schema1Map["properties"].(map[string]any) + require.True(t, ok) + props1["modified"] = map[string]any{"type": "boolean"} + schema2, err := New(t.Context(), "test-schema", + WithJSONSchema(map[string]any{ + "type": "object", + "properties": properties, + }), + ) + require.NoError(t, err) + schema2Map := map[string]any(*schema2) + props2, ok := schema2Map["properties"].(map[string]any) + require.True(t, ok) + assert.NotContains(t, props2, "modified") + assert.Contains(t, props1, "modified") + }) +} + +func TestNew_AllSchemaTypes(t *testing.T) { + validTypes := []string{"object", "string", "number", "integer", "boolean", "array", "null"} + for _, schemaType := range validTypes { + t.Run("Should accept "+schemaType+" type", func(t *testing.T) { + schema, err := New(t.Context(), "test-schema", + WithJSONSchema(map[string]any{ + "type": schemaType, + }), + ) + require.NoError(t, err) + require.NotNil(t, schema) + schemaMap := map[string]any(*schema) + assert.Equal(t, schemaType, schemaMap["type"]) + }) + } +} + +func TestNew_ComplexSchema(t *testing.T) { + t.Run("Should handle complex nested schema", func(t *testing.T) { + schema, err := New(t.Context(), "complex-schema", + WithJSONSchema(map[string]any{ + "type": "object", + "title": "Complex Schema", + "description": "A complex nested schema", + "properties": map[string]any{ + "user": map[string]any{ + "type": "object", + "properties": map[string]any{ + "name": map[string]any{ + "type": "string", + "minLength": 1, + "maxLength": 100, + }, + "email": map[string]any{ + "type": "string", + "pattern": "^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\.[a-zA-Z]{2,}$", + }, + "age": map[string]any{ + "type": "integer", + "minimum": 0, + "maximum": 150, + }, + }, + "required": []string{"name", "email"}, + }, + "tags": map[string]any{ + "type": "array", + "items": map[string]any{ + "type": "string", + }, + "minItems": 1, + }, + }, + "required": []string{"user"}, + }), + ) + require.NoError(t, err) + require.NotNil(t, schema) + schemaMap := map[string]any(*schema) + assert.Equal(t, "complex-schema", schemaMap["id"]) + assert.Equal(t, "object", schemaMap["type"]) + assert.Equal(t, "Complex Schema", schemaMap["title"]) + properties, ok := schemaMap["properties"].(map[string]any) + require.True(t, ok) + assert.Contains(t, properties, "user") + assert.Contains(t, properties, "tags") + }) +} + +func TestNew_WithVersion(t *testing.T) { + t.Run("Should preserve custom fields like version", func(t *testing.T) { + schema, err := New(t.Context(), "versioned-schema", + WithJSONSchema(map[string]any{ + "type": "object", + "version": "1.0.0", + "properties": map[string]any{ + "data": map[string]any{"type": "string"}, + }, + }), + ) + require.NoError(t, err) + require.NotNil(t, schema) + schemaMap := map[string]any(*schema) + assert.Equal(t, "versioned-schema", schemaMap["id"]) + assert.Equal(t, "1.0.0", schemaMap["version"]) + }) +} diff --git a/sdk/schema/options.go b/sdk/schema/options.go new file mode 100644 index 00000000..45953bd1 --- /dev/null +++ b/sdk/schema/options.go @@ -0,0 +1,47 @@ +package schema + +import ( + engschema "github.com/compozy/compozy/engine/schema" +) + +// Option is a functional option for configuring Schema wrapper. +type Option func(*engschema.Schema) + +// WithJSONSchema sets the complete JSON schema definition. +// +// This accepts a map[string]any representing the full JSON schema object, +// which can include properties like: +// - "type": schema type (object, string, etc.) +// - "title": human-readable title +// - "description": schema description +// - "properties": object properties (can be built with Builder or plain map) +// - "version": schema version +// - Any other valid JSON Schema fields +// +// Note: The "id" field is preserved from the constructor and will not be overwritten. +// +// Example: +// +// schema.New(ctx, "user-schema", +// schema.WithJSONSchema(map[string]any{ +// "type": "object", +// "title": "User Schema", +// "description": "Validates user data", +// "properties": propertySchema, // Built with Builder or plain map +// "version": "1.0.0", +// }), +// ) +func WithJSONSchema(jsonSchema map[string]any) Option { + return func(s *engschema.Schema) { + if jsonSchema == nil { + return + } + id := (*s)["id"] + for k, v := range jsonSchema { + (*s)[k] = v + } + if id != nil { + (*s)["id"] = id + } + } +} diff --git a/sdk/task/README.md b/sdk/task/README.md new file mode 100644 index 00000000..5fc6ddc1 --- /dev/null +++ b/sdk/task/README.md @@ -0,0 +1,341 @@ +# Task Package - Functional Options API + +The `task` package provides a clean, type-safe API for creating task configurations using the functional options pattern. It replaces the previous builder pattern with ~70% code reduction while maintaining full functionality. + +## Overview + +This package provides 7 specialized constructors for different task types, all returning `*task.Config`: + +- **New** - Basic tasks with agent or tool execution +- **NewRouter** - Conditional routing based on expressions +- **NewParallel** - Parallel task execution with strategies +- **NewCollection** - Iterate over collections +- **NewWait** - Wait for signals or events +- **NewSignal** - Send signals to other tasks +- **NewMemory** - Memory operations (read/write/append/delete) + +## Installation + +```go +import "github.com/compozy/compozy/sdk/v2/task" +``` + +## Basic Task + +Create tasks that execute agents or tools: + +```go +import ( + "context" + "github.com/compozy/compozy/sdk/v2/task" + "github.com/compozy/compozy/engine/agent" +) + +// Minimal agent-based task +cfg, err := task.New(ctx, "process-data", + task.WithAgent(&agent.Config{ID: "data-processor"}), +) + +// Full configuration with all options +cfg, err := task.New(ctx, "analyze", + task.WithAgent(&agent.Config{ID: "analyzer"}), + task.WithAction("analyze"), + task.WithPrompt("Analyze the data"), + task.WithTimeout("30s"), + task.WithRetries(3), + task.WithSleep("1s"), + task.WithOnSuccess(&core.SuccessTransition{ + Next: strPtr("next-task"), + }), + task.WithOnError(&core.ErrorTransition{ + Next: strPtr("error-handler"), + }), +) +``` + +## Router Task + +Route execution based on conditions: + +```go +routes := map[string]any{ + "true": "approved-path", + "false": "rejected-path", +} + +cfg, err := task.NewRouter(ctx, "approval-router", + task.WithCondition("input.approved == true"), + task.WithRoutes(routes), +) +``` + +## Parallel Task + +Execute multiple tasks concurrently: + +```go +import enginetask "github.com/compozy/compozy/engine/task" + +tasks := []enginetask.Config{ + {BaseConfig: enginetask.BaseConfig{ID: "task-1", Type: enginetask.TaskTypeBasic}}, + {BaseConfig: enginetask.BaseConfig{ID: "task-2", Type: enginetask.TaskTypeBasic}}, + {BaseConfig: enginetask.BaseConfig{ID: "task-3", Type: enginetask.TaskTypeBasic}}, +} + +// Default strategy: WaitAll +cfg, err := task.NewParallel(ctx, "parallel-processing", tasks) + +// Custom strategy and workers +cfg, err := task.NewParallel(ctx, "parallel-processing", tasks, + task.WithStrategy(enginetask.StrategyFailFast), + task.WithMaxWorkers(5), +) +``` + +### Parallel Strategies + +- **StrategyWaitAll** (default) - Wait for all tasks to complete +- **StrategyFailFast** - Stop on first failure +- **StrategyBestEffort** - Continue despite failures +- **StrategyRace** - Stop after first success + +## Collection Task + +Process items in a collection: + +```go +taskTemplate := &enginetask.Config{ + BaseConfig: enginetask.BaseConfig{ + ID: "process-item", + Type: enginetask.TaskTypeBasic, + }, +} + +// Sequential processing +cfg, err := task.NewCollection(ctx, "process-users", "workflow.users", + task.WithTask(taskTemplate), + task.WithMode(enginetask.CollectionModeSequential), +) + +// Parallel processing with batching +cfg, err := task.NewCollection(ctx, "process-items", "data.items", + task.WithTask(taskTemplate), + task.WithMode(enginetask.CollectionModeParallel), + task.WithBatch(10), +) +``` + +## Wait Task + +Wait for signals or events: + +```go +// Wait indefinitely +cfg, err := task.NewWait(ctx, "wait-approval", "user-approval") + +// Wait with timeout +cfg, err := task.NewWait(ctx, "wait-payment", "payment-complete", + task.WithTimeout("5m"), +) +``` + +## Signal Task + +Send signals to waiting tasks: + +```go +// Simple signal +cfg, err := task.NewSignal(ctx, "notify-completion", "process-complete") + +// Signal with payload +payload := map[string]any{ + "status": "success", + "result": "data-processed", +} +cfg, err := task.NewSignal(ctx, "send-result", "task-done", + task.WithPayload(payload), +) +``` + +## Memory Task + +Perform memory operations: + +```go +import enginetask "github.com/compozy/compozy/engine/task" + +// Read from memory +cfg, err := task.NewMemory(ctx, "read-session", enginetask.MemoryOpRead, + task.WithMemoryRef("session-data"), + task.WithKeyTemplate("user-{{.user_id}}"), +) + +// Write to memory +cfg, err := task.NewMemory(ctx, "save-result", enginetask.MemoryOpWrite, + task.WithMemoryRef("results"), + task.WithKeyTemplate("result-{{.task_id}}"), +) + +// Append to list +cfg, err := task.NewMemory(ctx, "add-item", enginetask.MemoryOpAppend, + task.WithMemoryRef("items"), +) + +// Delete key +cfg, err := task.NewMemory(ctx, "remove-data", enginetask.MemoryOpDelete, + task.WithMemoryRef("cache"), + task.WithKeyTemplate("temp-{{.id}}"), +) +``` + +### Memory Operations + +- **MemoryOpRead** - Read value from memory +- **MemoryOpWrite** - Write value to memory +- **MemoryOpAppend** - Append to list +- **MemoryOpDelete** - Delete key +- **MemoryOpClear** - Clear all memory +- **MemoryOpFlush** - Flush to persistent storage +- **MemoryOpHealth** - Check memory health +- **MemoryOpStats** - Get memory statistics + +## Common Options + +All task types support these common options: + +```go +task.WithTimeout("30s") // Task timeout +task.WithRetries(3) // Retry attempts +task.WithSleep("1s") // Delay before execution +task.WithName("Display Name") // Human-readable name +task.WithDescription("Details...") // Task description +task.WithLabels(map[string]string{ // Metadata labels + "env": "prod", + "team": "data", +}) +task.WithOnSuccess(&core.SuccessTransition{ + Next: strPtr("next-task"), +}) +task.WithOnError(&core.ErrorTransition{ + Next: strPtr("error-handler"), +}) +``` + +## Validation + +All constructors perform comprehensive validation: + +- **ID validation** - Non-empty, trimmed IDs required +- **Type-specific validation** - Each task type validates its required fields +- **Error accumulation** - All validation errors collected and returned together +- **Whitespace trimming** - IDs and string fields automatically trimmed + +Example validation errors: + +```go +// Empty ID +_, err := task.New(ctx, "") +// Error: "task id is invalid: cannot be empty" + +// Invalid timeout +_, err := task.New(ctx, "task-1", task.WithTimeout("invalid")) +// Error: "invalid timeout duration: invalid" + +// Parallel with too few tasks +_, err := task.NewParallel(ctx, "parallel", []enginetask.Config{oneTask}) +// Error: "parallel task requires at least 2 tasks" + +// Multiple errors accumulated +_, err := task.NewParallel(ctx, "", []enginetask.Config{}) +// Error: "task id is invalid: cannot be empty; parallel task requires at least 2 tasks" +``` + +## Deep Copy Behavior + +All constructors return deep copies to prevent external mutation: + +```go +cfg1, _ := task.New(ctx, "task-1", task.WithAgent(&agent.Config{ID: "agent-1"})) +cfg2, _ := task.New(ctx, "task-2", task.WithAgent(&agent.Config{ID: "agent-2"})) + +// Modifying cfg1 doesn't affect cfg2 +cfg1.ID = "modified" +// cfg2.ID is still "task-2" +``` + +## Helper Functions + +```go +// Create pointer to string (useful for transitions) +func strPtr(s string) *string { + return &s +} + +// Usage +task.WithOnSuccess(&core.SuccessTransition{ + Next: strPtr("next-task"), +}) +``` + +## Code Generation + +This package uses code generation for functional options: + +```bash +# Regenerate options (if engine/task/config.go changes) +cd sdk/task +go generate +``` + +The generator creates ~50 functional options from `engine/task/config.go`. + +## Migration from Builder Pattern + +**Before (Builder Pattern):** + +```go +builder := task.NewBuilder(). + SetID("task-1"). + SetAgent(agentCfg). + SetTimeout("30s"). + SetRetries(3) + +cfg, err := builder.Build(ctx) +``` + +**After (Functional Options):** + +```go +cfg, err := task.New(ctx, "task-1", + task.WithAgent(agentCfg), + task.WithTimeout("30s"), + task.WithRetries(3), +) +``` + +## Benefits + +✅ **70% code reduction** - From ~300 LOC to ~90 LOC per constructor +✅ **Type safety** - Compile-time validation of options +✅ **Clear intent** - Constructor names indicate task type +✅ **Immutability** - Deep copy prevents external mutation +✅ **Error accumulation** - All validation errors reported at once +✅ **Auto-generation** - Options generated from engine structs + +## Testing + +All constructors have comprehensive test coverage: + +- Minimal valid configurations +- Full configurations with all options +- Error cases and validation +- Deep copy behavior +- Whitespace trimming +- Error accumulation +- Transition configuration + +Run tests: + +```bash +gotestsum -- -race -parallel=4 ./sdk/task +``` diff --git a/sdk/task/constructors.go b/sdk/task/constructors.go new file mode 100644 index 00000000..24a3747f --- /dev/null +++ b/sdk/task/constructors.go @@ -0,0 +1,450 @@ +package task + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/compozy/compozy/engine/core" + enginetask "github.com/compozy/compozy/engine/task" + "github.com/compozy/compozy/pkg/logger" + sdkerrors "github.com/compozy/compozy/sdk/v2/internal/errors" + "github.com/compozy/compozy/sdk/v2/internal/validate" +) + +// New creates a basic task configuration using functional options +func New(ctx context.Context, id string, opts ...Option) (*enginetask.Config, error) { + if ctx == nil { + return nil, fmt.Errorf("context is required") + } + log := logger.FromContext(ctx) + log.Debug("creating basic task configuration", "task", id) + cfg := &enginetask.Config{ + BaseConfig: enginetask.BaseConfig{ + ID: strings.TrimSpace(id), + Type: enginetask.TaskTypeBasic, + }, + } + for _, opt := range opts { + opt(cfg) + } + collected := make([]error, 0) + if err := validateID(ctx, cfg); err != nil { + collected = append(collected, err) + } + if err := validateAgentOrTool(ctx, cfg); err != nil { + collected = append(collected, err) + } + if err := validateTimeout(cfg); err != nil { + collected = append(collected, err) + } + if err := validateSleep(cfg); err != nil { + collected = append(collected, err) + } + filtered := filterErrors(collected) + if len(filtered) > 0 { + return nil, &sdkerrors.BuildError{Errors: filtered} + } + cloned, err := core.DeepCopy(cfg) + if err != nil { + return nil, fmt.Errorf("failed to clone task config: %w", err) + } + return cloned, nil +} + +// NewRouter creates a router task configuration using functional options +func NewRouter(ctx context.Context, id string, opts ...Option) (*enginetask.Config, error) { + if ctx == nil { + return nil, fmt.Errorf("context is required") + } + log := logger.FromContext(ctx) + log.Debug("creating router task configuration", "task", id) + cfg := &enginetask.Config{ + BaseConfig: enginetask.BaseConfig{ + ID: strings.TrimSpace(id), + Type: enginetask.TaskTypeRouter, + }, + } + for _, opt := range opts { + opt(cfg) + } + collected := make([]error, 0) + if err := validateID(ctx, cfg); err != nil { + collected = append(collected, err) + } + if err := validateCondition(ctx, cfg); err != nil { + collected = append(collected, err) + } + if err := validateRoutes(ctx, cfg); err != nil { + collected = append(collected, err) + } + filtered := filterErrors(collected) + if len(filtered) > 0 { + return nil, &sdkerrors.BuildError{Errors: filtered} + } + cloned, err := core.DeepCopy(cfg) + if err != nil { + return nil, fmt.Errorf("failed to clone router task config: %w", err) + } + return cloned, nil +} + +// NewParallel creates a parallel task configuration using functional options +func NewParallel( + ctx context.Context, + id string, + tasks []enginetask.Config, + opts ...Option, +) (*enginetask.Config, error) { + if ctx == nil { + return nil, fmt.Errorf("context is required") + } + log := logger.FromContext(ctx) + log.Debug("creating parallel task configuration", "task", id) + cfg := &enginetask.Config{ + BaseConfig: enginetask.BaseConfig{ + ID: strings.TrimSpace(id), + Type: enginetask.TaskTypeParallel, + }, + Tasks: tasks, + } + for _, opt := range opts { + opt(cfg) + } + collected := make([]error, 0) + if err := validateID(ctx, cfg); err != nil { + collected = append(collected, err) + } + if err := validateParallelTasks(ctx, cfg); err != nil { + collected = append(collected, err) + } + if err := validateParallelStrategy(cfg); err != nil { + collected = append(collected, err) + } + filtered := filterErrors(collected) + if len(filtered) > 0 { + return nil, &sdkerrors.BuildError{Errors: filtered} + } + cloned, err := core.DeepCopy(cfg) + if err != nil { + return nil, fmt.Errorf("failed to clone parallel task config: %w", err) + } + return cloned, nil +} + +// NewCollection creates a collection task configuration using functional options +func NewCollection(ctx context.Context, id string, items string, opts ...Option) (*enginetask.Config, error) { + if ctx == nil { + return nil, fmt.Errorf("context is required") + } + log := logger.FromContext(ctx) + log.Debug("creating collection task configuration", "task", id) + cfg := &enginetask.Config{ + BaseConfig: enginetask.BaseConfig{ + ID: strings.TrimSpace(id), + Type: enginetask.TaskTypeCollection, + }, + CollectionConfig: enginetask.CollectionConfig{ + Items: strings.TrimSpace(items), + }, + } + for _, opt := range opts { + opt(cfg) + } + collected := make([]error, 0) + if err := validateID(ctx, cfg); err != nil { + collected = append(collected, err) + } + if err := validateCollectionItems(ctx, cfg); err != nil { + collected = append(collected, err) + } + if err := validateCollectionTask(ctx, cfg); err != nil { + collected = append(collected, err) + } + filtered := filterErrors(collected) + if len(filtered) > 0 { + return nil, &sdkerrors.BuildError{Errors: filtered} + } + cloned, err := core.DeepCopy(cfg) + if err != nil { + return nil, fmt.Errorf("failed to clone collection task config: %w", err) + } + return cloned, nil +} + +// NewWait creates a wait task configuration using functional options +func NewWait(ctx context.Context, id string, waitFor string, opts ...Option) (*enginetask.Config, error) { + if ctx == nil { + return nil, fmt.Errorf("context is required") + } + log := logger.FromContext(ctx) + log.Debug("creating wait task configuration", "task", id) + cfg := &enginetask.Config{ + BaseConfig: enginetask.BaseConfig{ + ID: strings.TrimSpace(id), + Type: enginetask.TaskTypeWait, + }, + WaitTask: enginetask.WaitTask{ + WaitFor: strings.TrimSpace(waitFor), + }, + } + for _, opt := range opts { + opt(cfg) + } + collected := make([]error, 0) + if err := validateID(ctx, cfg); err != nil { + collected = append(collected, err) + } + if err := validateWaitFor(ctx, cfg); err != nil { + collected = append(collected, err) + } + filtered := filterErrors(collected) + if len(filtered) > 0 { + return nil, &sdkerrors.BuildError{Errors: filtered} + } + cloned, err := core.DeepCopy(cfg) + if err != nil { + return nil, fmt.Errorf("failed to clone wait task config: %w", err) + } + return cloned, nil +} + +// NewSignal creates a signal task configuration using functional options +func NewSignal(ctx context.Context, id string, signalID string, opts ...Option) (*enginetask.Config, error) { + if ctx == nil { + return nil, fmt.Errorf("context is required") + } + log := logger.FromContext(ctx) + log.Debug("creating signal task configuration", "task", id) + cfg := &enginetask.Config{ + BaseConfig: enginetask.BaseConfig{ + ID: strings.TrimSpace(id), + Type: enginetask.TaskTypeSignal, + }, + SignalTask: enginetask.SignalTask{ + Signal: &enginetask.SignalConfig{ + ID: strings.TrimSpace(signalID), + }, + }, + } + for _, opt := range opts { + opt(cfg) + } + collected := make([]error, 0) + if err := validateID(ctx, cfg); err != nil { + collected = append(collected, err) + } + if err := validateSignal(ctx, cfg); err != nil { + collected = append(collected, err) + } + filtered := filterErrors(collected) + if len(filtered) > 0 { + return nil, &sdkerrors.BuildError{Errors: filtered} + } + cloned, err := core.DeepCopy(cfg) + if err != nil { + return nil, fmt.Errorf("failed to clone signal task config: %w", err) + } + return cloned, nil +} + +// NewMemory creates a memory task configuration using functional options +func NewMemory( + ctx context.Context, + id string, + operation enginetask.MemoryOpType, + opts ...Option, +) (*enginetask.Config, error) { + if ctx == nil { + return nil, fmt.Errorf("context is required") + } + log := logger.FromContext(ctx) + log.Debug("creating memory task configuration", "task", id) + cfg := &enginetask.Config{ + BaseConfig: enginetask.BaseConfig{ + ID: strings.TrimSpace(id), + Type: enginetask.TaskTypeMemory, + }, + MemoryTask: enginetask.MemoryTask{ + Operation: operation, + }, + } + for _, opt := range opts { + opt(cfg) + } + collected := make([]error, 0) + if err := validateID(ctx, cfg); err != nil { + collected = append(collected, err) + } + if err := validateMemoryOperation(ctx, cfg); err != nil { + collected = append(collected, err) + } + filtered := filterErrors(collected) + if len(filtered) > 0 { + return nil, &sdkerrors.BuildError{Errors: filtered} + } + cloned, err := core.DeepCopy(cfg) + if err != nil { + return nil, fmt.Errorf("failed to clone memory task config: %w", err) + } + return cloned, nil +} + +// Validation helper functions + +func validateID(ctx context.Context, cfg *enginetask.Config) error { + cfg.ID = strings.TrimSpace(cfg.ID) + if err := validate.ID(ctx, cfg.ID); err != nil { + return fmt.Errorf("task id is invalid: %w", err) + } + return nil +} + +func validateAgentOrTool(_ context.Context, cfg *enginetask.Config) error { + hasAgent := cfg.Agent != nil && (cfg.Agent.ID != "" || cfg.Agent.Instructions != "") + hasTool := cfg.Tool != nil && cfg.Tool.ID != "" + if !hasAgent && !hasTool { + return fmt.Errorf("either agent or tool must be configured") + } + if hasAgent && hasTool { + return fmt.Errorf("cannot configure both agent and tool") + } + if hasAgent { + cfg.Agent.ID = strings.TrimSpace(cfg.Agent.ID) + cfg.Agent.Instructions = strings.TrimSpace(cfg.Agent.Instructions) + } + if hasTool { + cfg.Tool.ID = strings.TrimSpace(cfg.Tool.ID) + } + return nil +} + +func validateTimeout(cfg *enginetask.Config) error { + if cfg.Timeout == "" { + return nil + } + cfg.Timeout = strings.TrimSpace(cfg.Timeout) + if _, err := time.ParseDuration(cfg.Timeout); err != nil { + return fmt.Errorf("invalid timeout duration: %w", err) + } + return nil +} + +func validateSleep(cfg *enginetask.Config) error { + if cfg.Sleep == "" { + return nil + } + cfg.Sleep = strings.TrimSpace(cfg.Sleep) + if _, err := time.ParseDuration(cfg.Sleep); err != nil { + return fmt.Errorf("invalid sleep duration: %w", err) + } + return nil +} + +func validateCondition(ctx context.Context, cfg *enginetask.Config) error { + cfg.Condition = strings.TrimSpace(cfg.Condition) + if err := validate.NonEmpty(ctx, "condition", cfg.Condition); err != nil { + return err + } + return nil +} + +func validateRoutes(_ context.Context, cfg *enginetask.Config) error { + if len(cfg.Routes) == 0 { + return fmt.Errorf("at least one route must be defined") + } + return nil +} + +func validateParallelTasks(_ context.Context, cfg *enginetask.Config) error { + if len(cfg.Tasks) < 2 { + return fmt.Errorf("parallel task must have at least 2 tasks") + } + for i := range cfg.Tasks { + if strings.TrimSpace(cfg.Tasks[i].ID) == "" { + return fmt.Errorf("task at index %d is missing an id", i) + } + } + return nil +} + +func validateParallelStrategy(cfg *enginetask.Config) error { + if cfg.Strategy == "" { + cfg.Strategy = enginetask.StrategyWaitAll + } + validStrategies := map[enginetask.ParallelStrategy]bool{ + enginetask.StrategyWaitAll: true, + enginetask.StrategyFailFast: true, + enginetask.StrategyBestEffort: true, + enginetask.StrategyRace: true, + } + if !validStrategies[cfg.Strategy] { + return fmt.Errorf("invalid parallel strategy: %s", cfg.Strategy) + } + return nil +} + +func validateCollectionItems(ctx context.Context, cfg *enginetask.Config) error { + cfg.Items = strings.TrimSpace(cfg.Items) + if err := validate.NonEmpty(ctx, "items", cfg.Items); err != nil { + return err + } + return nil +} + +func validateCollectionTask(_ context.Context, cfg *enginetask.Config) error { + if cfg.Task == nil { + return fmt.Errorf("collection task must have a task template") + } + if strings.TrimSpace(cfg.Task.ID) == "" { + return fmt.Errorf("collection task template must have an id") + } + return nil +} + +func validateWaitFor(ctx context.Context, cfg *enginetask.Config) error { + cfg.WaitFor = strings.TrimSpace(cfg.WaitFor) + if err := validate.NonEmpty(ctx, "wait_for", cfg.WaitFor); err != nil { + return err + } + return nil +} + +func validateSignal(ctx context.Context, cfg *enginetask.Config) error { + if cfg.Signal == nil { + return fmt.Errorf("signal configuration is required") + } + cfg.Signal.ID = strings.TrimSpace(cfg.Signal.ID) + if err := validate.NonEmpty(ctx, "signal id", cfg.Signal.ID); err != nil { + return err + } + return nil +} + +func validateMemoryOperation(_ context.Context, cfg *enginetask.Config) error { + validOps := map[enginetask.MemoryOpType]bool{ + enginetask.MemoryOpRead: true, + enginetask.MemoryOpWrite: true, + enginetask.MemoryOpAppend: true, + enginetask.MemoryOpDelete: true, + enginetask.MemoryOpClear: true, + enginetask.MemoryOpFlush: true, + enginetask.MemoryOpHealth: true, + enginetask.MemoryOpStats: true, + } + if !validOps[cfg.Operation] { + return fmt.Errorf("invalid memory operation: %s", cfg.Operation) + } + return nil +} + +func filterErrors(errors []error) []error { + filtered := make([]error, 0, len(errors)) + for _, err := range errors { + if err != nil { + filtered = append(filtered, err) + } + } + return filtered +} diff --git a/sdk/task/constructors_test.go b/sdk/task/constructors_test.go new file mode 100644 index 00000000..6b5d5d42 --- /dev/null +++ b/sdk/task/constructors_test.go @@ -0,0 +1,425 @@ +package task + +import ( + "context" + "testing" + + "github.com/compozy/compozy/engine/agent" + "github.com/compozy/compozy/engine/core" + enginetask "github.com/compozy/compozy/engine/task" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNew(t *testing.T) { + t.Run("Should create minimal basic task configuration", func(t *testing.T) { + cfg, err := New(t.Context(), "test-task", + WithAgent(&agent.Config{ID: "test-agent"}), + ) + require.NoError(t, err) + assert.Equal(t, "test-task", cfg.ID) + assert.Equal(t, enginetask.TaskTypeBasic, cfg.Type) + assert.NotNil(t, cfg.Agent) + }) + + t.Run("Should create full basic task configuration with all options", func(t *testing.T) { + cfg, err := New(t.Context(), "test-task", + WithAgent(&agent.Config{ID: "test-agent"}), + WithTimeout("30s"), + WithRetries(3), + WithSleep("1s"), + ) + require.NoError(t, err) + assert.Equal(t, "test-task", cfg.ID) + assert.Equal(t, "30s", cfg.Timeout) + assert.Equal(t, 3, cfg.Retries) + assert.Equal(t, "1s", cfg.Sleep) + }) + + t.Run("Should return error for nil context", func(t *testing.T) { + var nilCtx context.Context + _, err := New(nilCtx, "test-task") + require.Error(t, err) + assert.Contains(t, err.Error(), "context is required") + }) + + t.Run("Should return error for empty ID", func(t *testing.T) { + _, err := New(t.Context(), "") + require.Error(t, err) + assert.Contains(t, err.Error(), "task id is invalid") + }) + + t.Run("Should return error for invalid timeout", func(t *testing.T) { + _, err := New(t.Context(), "test-task", WithTimeout("invalid")) + require.Error(t, err) + assert.Contains(t, err.Error(), "invalid timeout duration") + }) + + t.Run("Should deep copy configuration", func(t *testing.T) { + cfg1, err := New(t.Context(), "test-task", + WithAgent(&agent.Config{ID: "test-agent"}), + ) + require.NoError(t, err) + + cfg2, err := New(t.Context(), "test-task", + WithAgent(&agent.Config{ID: "test-agent"}), + ) + require.NoError(t, err) + + cfg1.ID = "modified" + assert.Equal(t, "test-task", cfg2.ID) + }) +} + +func TestNewRouter(t *testing.T) { + t.Run("Should create minimal router task configuration", func(t *testing.T) { + routes := map[string]any{ + "true": "task-a", + "false": "task-b", + } + cfg, err := NewRouter(t.Context(), "router-task", + WithCondition("input.approved"), + WithRoutes(routes), + ) + require.NoError(t, err) + assert.Equal(t, "router-task", cfg.ID) + assert.Equal(t, enginetask.TaskTypeRouter, cfg.Type) + assert.Equal(t, "input.approved", cfg.Condition) + assert.Len(t, cfg.Routes, 2) + }) + + t.Run("Should return error for nil context", func(t *testing.T) { + var nilCtx context.Context + _, err := NewRouter(nilCtx, "router-task") + require.Error(t, err) + assert.Contains(t, err.Error(), "context is required") + }) + + t.Run("Should return error for empty condition", func(t *testing.T) { + routes := map[string]any{"true": "task-a"} + _, err := NewRouter(t.Context(), "router-task", + WithCondition(""), + WithRoutes(routes), + ) + require.Error(t, err) + assert.Contains(t, err.Error(), "condition") + }) + + t.Run("Should return error for empty routes", func(t *testing.T) { + _, err := NewRouter(t.Context(), "router-task", + WithCondition("input.approved"), + ) + require.Error(t, err) + assert.Contains(t, err.Error(), "at least one route must be defined") + }) +} + +func TestNewParallel(t *testing.T) { + t.Run("Should create minimal parallel task configuration", func(t *testing.T) { + tasks := []enginetask.Config{ + {BaseConfig: enginetask.BaseConfig{ID: "task-1", Type: enginetask.TaskTypeBasic}}, + {BaseConfig: enginetask.BaseConfig{ID: "task-2", Type: enginetask.TaskTypeBasic}}, + } + cfg, err := NewParallel(t.Context(), "parallel-task", tasks) + require.NoError(t, err) + assert.Equal(t, "parallel-task", cfg.ID) + assert.Equal(t, enginetask.TaskTypeParallel, cfg.Type) + assert.Len(t, cfg.Tasks, 2) + assert.Equal(t, enginetask.StrategyWaitAll, cfg.Strategy) + }) + + t.Run("Should create parallel task with custom strategy", func(t *testing.T) { + tasks := []enginetask.Config{ + {BaseConfig: enginetask.BaseConfig{ID: "task-1", Type: enginetask.TaskTypeBasic}}, + {BaseConfig: enginetask.BaseConfig{ID: "task-2", Type: enginetask.TaskTypeBasic}}, + } + cfg, err := NewParallel(t.Context(), "parallel-task", tasks, + WithStrategy(enginetask.StrategyFailFast), + WithMaxWorkers(5), + ) + require.NoError(t, err) + assert.Equal(t, enginetask.StrategyFailFast, cfg.Strategy) + assert.Equal(t, 5, cfg.MaxWorkers) + }) + + t.Run("Should return error for less than 2 tasks", func(t *testing.T) { + tasks := []enginetask.Config{ + {BaseConfig: enginetask.BaseConfig{ID: "task-1", Type: enginetask.TaskTypeBasic}}, + } + _, err := NewParallel(t.Context(), "parallel-task", tasks) + require.Error(t, err) + assert.Contains(t, err.Error(), "at least 2 tasks") + }) + + t.Run("Should return error for task with empty ID", func(t *testing.T) { + tasks := []enginetask.Config{ + {BaseConfig: enginetask.BaseConfig{ID: "task-1", Type: enginetask.TaskTypeBasic}}, + {BaseConfig: enginetask.BaseConfig{ID: "", Type: enginetask.TaskTypeBasic}}, + } + _, err := NewParallel(t.Context(), "parallel-task", tasks) + require.Error(t, err) + assert.Contains(t, err.Error(), "missing an id") + }) + + t.Run("Should return error for invalid strategy", func(t *testing.T) { + tasks := []enginetask.Config{ + {BaseConfig: enginetask.BaseConfig{ID: "task-1", Type: enginetask.TaskTypeBasic}}, + {BaseConfig: enginetask.BaseConfig{ID: "task-2", Type: enginetask.TaskTypeBasic}}, + } + _, err := NewParallel(t.Context(), "parallel-task", tasks, + WithStrategy("invalid-strategy"), + ) + require.Error(t, err) + assert.Contains(t, err.Error(), "invalid parallel strategy") + }) +} + +func TestNewCollection(t *testing.T) { + t.Run("Should create minimal collection task configuration", func(t *testing.T) { + taskTemplate := &enginetask.Config{ + BaseConfig: enginetask.BaseConfig{ + ID: "item-task", + Type: enginetask.TaskTypeBasic, + }, + } + cfg, err := NewCollection(t.Context(), "collection-task", "items", + WithTask(taskTemplate), + ) + require.NoError(t, err) + assert.Equal(t, "collection-task", cfg.ID) + assert.Equal(t, enginetask.TaskTypeCollection, cfg.Type) + assert.Equal(t, "items", cfg.Items) + assert.NotNil(t, cfg.Task) + }) + + t.Run("Should create collection task with options", func(t *testing.T) { + taskTemplate := &enginetask.Config{ + BaseConfig: enginetask.BaseConfig{ + ID: "item-task", + Type: enginetask.TaskTypeBasic, + }, + } + cfg, err := NewCollection(t.Context(), "collection-task", "workflow.data", + WithTask(taskTemplate), + WithMode(enginetask.CollectionModeSequential), + WithBatch(10), + ) + require.NoError(t, err) + assert.Equal(t, "workflow.data", cfg.Items) + assert.Equal(t, enginetask.CollectionModeSequential, cfg.Mode) + assert.Equal(t, 10, cfg.Batch) + }) + + t.Run("Should return error for empty items", func(t *testing.T) { + taskTemplate := &enginetask.Config{ + BaseConfig: enginetask.BaseConfig{ID: "item-task", Type: enginetask.TaskTypeBasic}, + } + _, err := NewCollection(t.Context(), "collection-task", "", + WithTask(taskTemplate), + ) + require.Error(t, err) + assert.Contains(t, err.Error(), "items") + }) + + t.Run("Should return error for nil task template", func(t *testing.T) { + _, err := NewCollection(t.Context(), "collection-task", "items") + require.Error(t, err) + assert.Contains(t, err.Error(), "task template") + }) + + t.Run("Should return error for task template with empty ID", func(t *testing.T) { + taskTemplate := &enginetask.Config{ + BaseConfig: enginetask.BaseConfig{ID: "", Type: enginetask.TaskTypeBasic}, + } + _, err := NewCollection(t.Context(), "collection-task", "items", + WithTask(taskTemplate), + ) + require.Error(t, err) + assert.Contains(t, err.Error(), "task template must have an id") + }) +} + +func TestNewWait(t *testing.T) { + t.Run("Should create minimal wait task configuration", func(t *testing.T) { + cfg, err := NewWait(t.Context(), "wait-task", "user-approval") + require.NoError(t, err) + assert.Equal(t, "wait-task", cfg.ID) + assert.Equal(t, enginetask.TaskTypeWait, cfg.Type) + assert.Equal(t, "user-approval", cfg.WaitFor) + }) + + t.Run("Should create wait task with timeout", func(t *testing.T) { + cfg, err := NewWait(t.Context(), "wait-task", "payment-complete", + WithTimeout("30s"), + ) + require.NoError(t, err) + assert.Equal(t, "payment-complete", cfg.WaitFor) + assert.Equal(t, "30s", cfg.Timeout) + }) + + t.Run("Should return error for empty wait_for", func(t *testing.T) { + _, err := NewWait(t.Context(), "wait-task", "") + require.Error(t, err) + assert.Contains(t, err.Error(), "wait_for") + }) +} + +func TestNewSignal(t *testing.T) { + t.Run("Should create minimal signal task configuration", func(t *testing.T) { + cfg, err := NewSignal(t.Context(), "signal-task", "process-complete") + require.NoError(t, err) + assert.Equal(t, "signal-task", cfg.ID) + assert.Equal(t, enginetask.TaskTypeSignal, cfg.Type) + assert.NotNil(t, cfg.Signal) + assert.Equal(t, "process-complete", cfg.Signal.ID) + }) + + t.Run("Should create signal task with payload", func(t *testing.T) { + payload := map[string]any{ + "status": "success", + "data": "result", + } + cfg, err := NewSignal(t.Context(), "signal-task", "task-done", + WithPayload(payload), + ) + require.NoError(t, err) + assert.Equal(t, "task-done", cfg.Signal.ID) + assert.Equal(t, payload, cfg.Payload) + }) + + t.Run("Should return error for empty signal ID", func(t *testing.T) { + _, err := NewSignal(t.Context(), "signal-task", "") + require.Error(t, err) + assert.Contains(t, err.Error(), "signal id") + }) +} + +func TestNewMemory(t *testing.T) { + t.Run("Should create minimal memory task configuration", func(t *testing.T) { + cfg, err := NewMemory(t.Context(), "memory-task", enginetask.MemoryOpRead) + require.NoError(t, err) + assert.Equal(t, "memory-task", cfg.ID) + assert.Equal(t, enginetask.TaskTypeMemory, cfg.Type) + assert.Equal(t, enginetask.MemoryOpRead, cfg.Operation) + }) + + t.Run("Should create memory task with all operations", func(t *testing.T) { + operations := []enginetask.MemoryOpType{ + enginetask.MemoryOpRead, + enginetask.MemoryOpWrite, + enginetask.MemoryOpAppend, + enginetask.MemoryOpDelete, + enginetask.MemoryOpClear, + enginetask.MemoryOpFlush, + enginetask.MemoryOpHealth, + enginetask.MemoryOpStats, + } + + for _, op := range operations { + cfg, err := NewMemory(t.Context(), "memory-task", op) + require.NoError(t, err) + assert.Equal(t, op, cfg.Operation) + } + }) + + t.Run("Should create memory task with memory reference", func(t *testing.T) { + cfg, err := NewMemory(t.Context(), "memory-task", enginetask.MemoryOpWrite, + WithMemoryRef("session-data"), + WithKeyTemplate("user-{{.user_id}}"), + ) + require.NoError(t, err) + assert.Equal(t, "session-data", cfg.MemoryRef) + assert.Equal(t, "user-{{.user_id}}", cfg.KeyTemplate) + }) + + t.Run("Should return error for invalid operation", func(t *testing.T) { + _, err := NewMemory(t.Context(), "memory-task", "invalid-op") + require.Error(t, err) + assert.Contains(t, err.Error(), "invalid memory operation") + }) +} + +func TestDeepCopyBehavior(t *testing.T) { + t.Run("Should deep copy config to prevent external mutation", func(t *testing.T) { + originalTasks := []enginetask.Config{ + {BaseConfig: enginetask.BaseConfig{ID: "task-1", Type: enginetask.TaskTypeBasic}}, + {BaseConfig: enginetask.BaseConfig{ID: "task-2", Type: enginetask.TaskTypeBasic}}, + } + + cfg1, err := NewParallel(t.Context(), "parallel-1", originalTasks) + require.NoError(t, err) + + cfg1.Tasks[0].ID = "modified" + + cfg2, err := NewParallel(t.Context(), "parallel-2", originalTasks) + require.NoError(t, err) + + assert.NotEqual(t, cfg1.Tasks[0].ID, cfg2.Tasks[0].ID) + }) +} + +func TestWhitespaceTrimming(t *testing.T) { + t.Run("Should trim whitespace from IDs", func(t *testing.T) { + cfg, err := New(t.Context(), " test-task ", + WithAgent(&agent.Config{ID: "test-agent"}), + ) + require.NoError(t, err) + assert.Equal(t, "test-task", cfg.ID) + }) + + t.Run("Should trim whitespace from condition", func(t *testing.T) { + routes := map[string]any{"true": "task-a"} + cfg, err := NewRouter(t.Context(), "router-task", + WithCondition(" input.approved "), + WithRoutes(routes), + ) + require.NoError(t, err) + assert.Equal(t, "input.approved", cfg.Condition) + }) + + t.Run("Should trim whitespace from items", func(t *testing.T) { + taskTemplate := &enginetask.Config{ + BaseConfig: enginetask.BaseConfig{ID: "item-task", Type: enginetask.TaskTypeBasic}, + } + cfg, err := NewCollection(t.Context(), "collection-task", " workflow.data ", + WithTask(taskTemplate), + ) + require.NoError(t, err) + assert.Equal(t, "workflow.data", cfg.Items) + }) +} + +func TestErrorAccumulation(t *testing.T) { + t.Run("Should accumulate multiple validation errors", func(t *testing.T) { + tasks := []enginetask.Config{ + {BaseConfig: enginetask.BaseConfig{ID: "", Type: enginetask.TaskTypeBasic}}, + } + _, err := NewParallel(t.Context(), "", tasks) + require.Error(t, err) + assert.Contains(t, err.Error(), "task id is invalid") + assert.Contains(t, err.Error(), "at least 2 tasks") + }) +} + +func TestTransitionConfiguration(t *testing.T) { + t.Run("Should configure success and error transitions", func(t *testing.T) { + nextTask := "next-task" + errorHandler := "error-handler" + cfg, err := New(t.Context(), "test-task", + WithAgent(&agent.Config{ID: "test-agent"}), + WithOnSuccess(&core.SuccessTransition{ + Next: &nextTask, + }), + WithOnError(&core.ErrorTransition{ + Next: &errorHandler, + }), + WithRetries(3), + ) + require.NoError(t, err) + assert.NotNil(t, cfg.OnSuccess) + assert.Equal(t, "next-task", *cfg.OnSuccess.Next) + assert.NotNil(t, cfg.OnError) + assert.Equal(t, "error-handler", *cfg.OnError.Next) + assert.Equal(t, 3, cfg.Retries) + }) +} diff --git a/sdk/task/generate.go b/sdk/task/generate.go new file mode 100644 index 00000000..c8810c2e --- /dev/null +++ b/sdk/task/generate.go @@ -0,0 +1,3 @@ +package task + +//go:generate go run ../internal/codegen/cmd/optionsgen/main.go -engine ../../engine/task/config.go -struct Config -output options_generated.go diff --git a/sdk/task/options_generated.go b/sdk/task/options_generated.go new file mode 100644 index 00000000..d0c87bf8 --- /dev/null +++ b/sdk/task/options_generated.go @@ -0,0 +1,558 @@ +// Code generated by optionsgen. DO NOT EDIT. + +package task + +import ( + agent "github.com/compozy/compozy/engine/agent" + attachment "github.com/compozy/compozy/engine/attachment" + core "github.com/compozy/compozy/engine/core" + schema "github.com/compozy/compozy/engine/schema" + enginetask "github.com/compozy/compozy/engine/task" + tool "github.com/compozy/compozy/engine/tool" +) + +type Option func(*enginetask.Config) + +// WithModelConfig sets the ModelConfig field +// +// LLM provider configuration defining which AI model to use and its parameters. +// Supports multiple providers including OpenAI, Anthropic, Google, Groq, and local models. +// **Required fields:** provider, model +// **Optional fields:** api_key, api_url, params (temperature, max_tokens, etc.) +func WithModelConfig(modelConfig core.ProviderConfig) Option { + return func(cfg *enginetask.Config) { + cfg.ModelConfig = modelConfig + } +} + +// WithAction sets the Action field +// +// Action identifier that describes what this task does +// Used for logging and debugging purposes +// - **Example**: "process-user-data", "send-notification" +func WithAction(action string) Option { + return func(cfg *enginetask.Config) { + cfg.Action = action + } +} + +// WithPrompt sets the Prompt field +// +// Prompt provides direct instruction to agents when no specific action is needed +// Used for ad-hoc agent interactions without predefined action definitions +// - **Example**: "Analyze this code for security issues", "Summarize the following text" +func WithPrompt(prompt string) Option { + return func(cfg *enginetask.Config) { + cfg.Prompt = prompt + } +} + +// WithRoutes sets the Routes field +// +// Routes maps condition values to task IDs or inline task configurations +// The condition field in BaseConfig is evaluated, and its result is used +// as the key to select the appropriate route +// Values can be: +// - Task ID (string): References an existing task +// - Inline task config (object): Defines task configuration directly +// - **Example**: +// routes: +// approved: "process-payment" # Task ID reference +// rejected: # Inline task config +// type: basic +// agent: { id: rejection-handler } +// pending: "wait-for-approval" +func WithRoutes(routes map[string]any) Option { + return func(cfg *enginetask.Config) { + cfg.Routes = routes + } +} + +// WithStrategy sets the Strategy field +// +// Strategy determines how the parallel execution handles task completion +// Defaults to "wait_all" if not specified +// Options: wait_all, fail_fast, best_effort, race +func WithStrategy(strategy enginetask.ParallelStrategy) Option { + return func(cfg *enginetask.Config) { + cfg.Strategy = strategy + } +} + +// WithMaxWorkers sets the MaxWorkers field +// +// MaxWorkers limits the number of concurrent task executions +// 0 means no limit (all tasks run concurrently) +// - **Example**: 5 means at most 5 tasks run at the same time +func WithMaxWorkers(maxWorkers int) Option { + return func(cfg *enginetask.Config) { + cfg.MaxWorkers = maxWorkers + } +} + +// WithItems sets the Items field +// +// Items is a template expression that evaluates to an array +// The expression should resolve to a list of items to iterate over +// - **Example**: "{{ .workflow.input.users }}" or "{{ range(1, 10) }}" +func WithItems(items string) Option { + return func(cfg *enginetask.Config) { + cfg.Items = items + } +} + +// WithFilter sets the Filter field +// +// Filter is an optional CEL expression to filter items before processing +// Each item is available as 'item' in the expression +// - **Example**: "item.status != 'inactive'" or "item.age > 18" +func WithFilter(filter string) Option { + return func(cfg *enginetask.Config) { + cfg.Filter = filter + } +} + +// WithItemVar sets the ItemVar field +// +// ItemVar is the variable name for the current item (default: "item") +// Available in task templates as {{ .item }} or custom name +// - **Example**: Set to "user" to access as {{ .user }} in templates +func WithItemVar(itemVar string) Option { + return func(cfg *enginetask.Config) { + cfg.ItemVar = itemVar + } +} + +// WithIndexVar sets the IndexVar field +// +// IndexVar is the variable name for the current index (default: "index") +// Available in task templates as {{ .index }} or custom name +// Zero-based index of the current item +func WithIndexVar(indexVar string) Option { + return func(cfg *enginetask.Config) { + cfg.IndexVar = indexVar + } +} + +// WithMode sets the Mode field +// +// Mode determines if items are processed in parallel or sequentially +// Defaults to "parallel" +// Options: parallel, sequential +func WithMode(mode enginetask.CollectionMode) Option { + return func(cfg *enginetask.Config) { + cfg.Mode = mode + } +} + +// WithBatch sets the Batch field +// +// Batch size for processing items in groups (0 = no batching) +// Useful for rate limiting or managing resource usage +// - **Example**: 10 means process 10 items at a time +func WithBatch(batch int) Option { + return func(cfg *enginetask.Config) { + cfg.Batch = batch + } +} + +// WithSignal sets the Signal field +// +// Signal configuration containing the signal ID and payload +func WithSignal(signal *enginetask.SignalConfig) Option { + return func(cfg *enginetask.Config) { + cfg.Signal = signal + } +} + +// WithWaitFor sets the WaitFor field +// +// WaitFor specifies the signal ID to wait for +// The task will pause until a signal with this ID is received +// Must match the ID used in a SignalTask +// - **Example**: "user-approved", "payment-completed" +func WithWaitFor(waitFor string) Option { + return func(cfg *enginetask.Config) { + cfg.WaitFor = waitFor + } +} + +// WithProcessor sets the Processor field +// +// Processor is an optional task configuration to process received signals +// Allows custom handling of signal data before continuing +// The processor receives the signal payload as input +// $ref: inline:# +func WithProcessor(processor *enginetask.Config) Option { + return func(cfg *enginetask.Config) { + cfg.Processor = processor + } +} + +// WithOnTimeout sets the OnTimeout field +// +// OnTimeout specifies the next task to execute if the wait times out +// Uses the timeout value from BaseConfig +// If not specified, the task fails on timeout +func WithOnTimeout(onTimeout string) Option { + return func(cfg *enginetask.Config) { + cfg.OnTimeout = onTimeout + } +} + +// WithOperation sets the Operation field +// +// Operation type to perform on memory +// Required field that determines the action to take +func WithOperation(operation enginetask.MemoryOpType) Option { + return func(cfg *enginetask.Config) { + cfg.Operation = operation + } +} + +// WithMemoryRef sets the MemoryRef field +// +// MemoryRef identifies which memory store to use +// References a memory configuration defined at the project level +// - **Example**: "user-sessions", "workflow-state", "cache" +func WithMemoryRef(memoryRef string) Option { + return func(cfg *enginetask.Config) { + cfg.MemoryRef = memoryRef + } +} + +// WithKeyTemplate sets the KeyTemplate field +// +// KeyTemplate is a template expression for the memory key +// Supports template variables for dynamic key generation +// - **Example**: "user:{{ .workflow.input.user_id }}:profile" +func WithKeyTemplate(keyTemplate string) Option { + return func(cfg *enginetask.Config) { + cfg.KeyTemplate = keyTemplate + } +} + +// WithPayload sets the Payload field +// +// Payload data for write/append operations +// Can be any JSON-serializable data structure +// Required for write and append operations +func WithPayload(payload any) Option { + return func(cfg *enginetask.Config) { + cfg.Payload = payload + } +} + +// WithBatchSize sets the BatchSize field +// +// BatchSize for operations that process multiple keys +// Controls how many keys are processed in each batch +// Default: 100, Maximum: 10,000 +func WithBatchSize(batchSize int) Option { + return func(cfg *enginetask.Config) { + cfg.BatchSize = batchSize + } +} + +// WithMaxKeys sets the MaxKeys field +// +// MaxKeys limits the number of keys processed +// Safety limit to prevent runaway operations +// Default: 1,000, Maximum: 50,000 +func WithMaxKeys(maxKeys int) Option { + return func(cfg *enginetask.Config) { + cfg.MaxKeys = maxKeys + } +} + +// WithFlushConfig sets the FlushConfig field +// +// Configuration for flush operations +// Only used when operation is "flush" +func WithFlushConfig(flushConfig *enginetask.FlushConfig) Option { + return func(cfg *enginetask.Config) { + cfg.FlushConfig = flushConfig + } +} + +// WithHealthConfig sets the HealthConfig field +// +// Configuration for health check operations +// Only used when operation is "health" +func WithHealthConfig(healthConfig *enginetask.HealthConfig) Option { + return func(cfg *enginetask.Config) { + cfg.HealthConfig = healthConfig + } +} + +// WithStatsConfig sets the StatsConfig field +// +// Configuration for statistics operations +// Only used when operation is "stats" +func WithStatsConfig(statsConfig *enginetask.StatsConfig) Option { + return func(cfg *enginetask.Config) { + cfg.StatsConfig = statsConfig + } +} + +// WithClearConfig sets the ClearConfig field +// +// Configuration for clear operations +// Only used when operation is "clear" +func WithClearConfig(clearConfig *enginetask.ClearConfig) Option { + return func(cfg *enginetask.Config) { + cfg.ClearConfig = clearConfig + } +} + +// WithResource sets the Resource field +// +// Resource reference for the task +// Format: "compozy:task:" (e.g., "compozy:task:process-data") +func WithResource(resource string) Option { + return func(cfg *enginetask.Config) { + cfg.Resource = resource + } +} + +// WithID sets the ID field +// +// Unique identifier for the task instance within a workflow +// Must be unique within the workflow scope +func WithID(id string) Option { + return func(cfg *enginetask.Config) { + cfg.ID = id + } +} + +// WithType sets the Type field +// +// Type of task that determines execution behavior +// If not specified, defaults to "basic" +func WithType(typeValue enginetask.Type) Option { + return func(cfg *enginetask.Config) { + cfg.Type = typeValue + } +} + +// WithConfig sets the Config field +// +// Global configuration options inherited from parent contexts +// Includes provider settings, API keys, and other global parameters +func WithConfig(config core.GlobalOpts) Option { + return func(cfg *enginetask.Config) { + cfg.Config = config + } +} + +// WithAgent sets the Agent field +// +// Agent configuration for AI-powered task execution +// Only used when the task needs to interact with an LLM agent +// Mutually exclusive with Tool field +// $ref: schema://agents +func WithAgent(agent *agent.Config) Option { + return func(cfg *enginetask.Config) { + cfg.Agent = agent + } +} + +// WithTool sets the Tool field +// +// Tool configuration for executing specific tool operations +// Used when the task needs to execute a predefined tool +// Mutually exclusive with Agent field +// $ref: schema://tools +func WithTool(tool *tool.Config) Option { + return func(cfg *enginetask.Config) { + cfg.Tool = tool + } +} + +// WithInputSchema sets the InputSchema field +// +// Schema definition for validating task input parameters +// Follows JSON Schema specification for type validation +// Format: +// type: object +// properties: +// user_id: { type: string, description: "User identifier" } +// required: ["user_id"] +func WithInputSchema(inputSchema *schema.Schema) Option { + return func(cfg *enginetask.Config) { + cfg.InputSchema = inputSchema + } +} + +// WithOutputSchema sets the OutputSchema field +// +// Schema definition for validating task output data +// Ensures task results conform to expected structure +// Uses same format as InputSchema +func WithOutputSchema(outputSchema *schema.Schema) Option { + return func(cfg *enginetask.Config) { + cfg.OutputSchema = outputSchema + } +} + +// WithWith sets the With field +// +// Input parameters passed to the task at execution time +// Can include references to workflow inputs, previous task outputs, etc. +// - **Example**: { "user_id": "{{ .workflow.input.user_id }}" } +func WithWith(with *core.Input) Option { + return func(cfg *enginetask.Config) { + cfg.With = with + } +} + +// WithOutputs sets the Outputs field +// +// Output mappings that define what data this task exposes to subsequent tasks +// Uses template expressions to transform task results +// - **Example**: { "processed_data": "{{ .task.output.result }}" } +func WithOutputs(outputs *core.Input) Option { + return func(cfg *enginetask.Config) { + cfg.Outputs = outputs + } +} + +// WithEnv sets the Env field +// +// Environment variables available during task execution +// Can override or extend workflow-level environment variables +// - **Example**: { "API_KEY": "{{ .env.SECRET_KEY }}" } +func WithEnv(env *core.EnvMap) Option { + return func(cfg *enginetask.Config) { + cfg.Env = env + } +} + +// WithKnowledge sets the Knowledge field +// +// Knowledge declares task-scoped knowledge bindings (MVP single binding). +func WithKnowledge(knowledge []core.KnowledgeBinding) Option { + return func(cfg *enginetask.Config) { + cfg.Knowledge = knowledge + } +} + +// WithOnSuccess sets the OnSuccess field +// +// Task execution control +// Defines what happens after successful task completion +// Can specify next task ID or conditional routing +func WithOnSuccess(onSuccess *core.SuccessTransition) Option { + return func(cfg *enginetask.Config) { + cfg.OnSuccess = onSuccess + } +} + +// WithOnError sets the OnError field +// +// Error handling configuration +// Defines fallback behavior when task execution fails +// Can specify error task ID or retry configuration +func WithOnError(onError *core.ErrorTransition) Option { + return func(cfg *enginetask.Config) { + cfg.OnError = onError + } +} + +// WithSleep sets the Sleep field +// +// Sleep duration after task completion +// Format: "5s", "1m", "500ms", "1h30m" +// Useful for rate limiting or giving external systems time to process +func WithSleep(sleep string) Option { + return func(cfg *enginetask.Config) { + cfg.Sleep = sleep + } +} + +// WithFinal sets the Final field +// +// Marks this task as a terminal node in the workflow +// No subsequent tasks will execute after a final task +func WithFinal(final bool) Option { + return func(cfg *enginetask.Config) { + cfg.Final = final + } +} + +// WithFilePath sets the FilePath field +// +// Absolute file path where this task configuration was loaded from +// Set automatically during configuration loading +func WithFilePath(filePath string) Option { + return func(cfg *enginetask.Config) { + cfg.FilePath = filePath + } +} + +// WithTimeout sets the Timeout field +// +// Maximum execution time for parallel or composite tasks +// Format: "30s", "5m", "1h" +// Task will be canceled if it exceeds this duration +func WithTimeout(timeout string) Option { + return func(cfg *enginetask.Config) { + cfg.Timeout = timeout + } +} + +// WithRetries sets the Retries field +// +// Number of retry attempts for failed task executions +// Default: 0 (no retries) +func WithRetries(retries int) Option { + return func(cfg *enginetask.Config) { + cfg.Retries = retries + } +} + +// WithCondition sets the Condition field +// +// CEL expression for conditional task execution or routing decisions +// Task only executes if condition evaluates to true +// - **Example**: "input.status == 'approved' && input.amount > 1000" +func WithCondition(condition string) Option { + return func(cfg *enginetask.Config) { + cfg.Condition = condition + } +} + +// WithAttachments sets the Attachments field +// +// Attachments declared at the task scope are available to all nested agents/actions. +func WithAttachments(attachments attachment.Attachments) Option { + return func(cfg *enginetask.Config) { + cfg.Attachments = attachments + } +} + +// WithTasks sets the Tasks field +// +// Tasks array for parallel, composite, and collection tasks +// Contains the list of sub-tasks to execute +// For parallel: tasks run concurrently +// For composite: tasks run sequentially +// For collection: not used (use Task field instead) +// $ref: inline:# +func WithTasks(tasks []enginetask.Config) Option { + return func(cfg *enginetask.Config) { + cfg.Tasks = tasks + } +} + +// WithTask sets the Task field +// +// Task template for collection tasks +// This configuration is replicated for each item in the collection +// The item and index are available as template variables +// $ref: inline:# +func WithTask(task *enginetask.Config) Option { + return func(cfg *enginetask.Config) { + cfg.Task = task + } +} diff --git a/sdk/tool/README.md b/sdk/tool/README.md new file mode 100644 index 00000000..42b60a25 --- /dev/null +++ b/sdk/tool/README.md @@ -0,0 +1,262 @@ +# Package tool + +SDK package for building tool configurations using functional options pattern. + +## Overview + +The `tool` package provides a fluent API for creating tool configurations that extend AI agent capabilities with external systems, APIs, and custom business logic. + +## Installation + +```go +import "github.com/compozy/compozy/sdk/tool" +``` + +## Usage + +### Basic Example + +```go +cfg, err := tool.New( + ctx, + "file-reader", + tool.WithName("File Reader"), + tool.WithDescription("Read and parse various file formats"), + tool.WithRuntime("bun"), + tool.WithCode(` + export default async function(input) { + const fs = require('fs'); + return fs.readFileSync(input.path, 'utf8'); + } + `), +) +if err != nil { + log.Fatal(err) +} +``` + +### Full Configuration + +```go +inputSchema := &schema.Schema{ + "type": "object", + "properties": map[string]any{ + "path": map[string]any{ + "type": "string", + "description": "File path to read", + }, + "format": map[string]any{ + "type": "string", + "enum": []string{"json", "yaml", "csv"}, + }, + }, + "required": []string{"path"}, +} + +outputSchema := &schema.Schema{ + "type": "object", + "properties": map[string]any{ + "content": map[string]any{ + "type": "string", + }, + }, +} + +cfg, err := tool.New( + ctx, + "api-client", + tool.WithName("API Client"), + tool.WithDescription("HTTP API client with retry logic"), + tool.WithRuntime("bun"), + tool.WithCode(toolCode), + tool.WithTimeout("30s"), + tool.WithInputSchema(inputSchema), + tool.WithOutputSchema(outputSchema), + tool.WithWith(&core.Input{ + "base_url": "https://api.example.com", + }), + tool.WithConfig(&core.Input{ + "retry_count": 3, + "timeout": 10, + }), + tool.WithEnv(&core.EnvMap{ + "API_KEY": "{{ .env.SECRET_API_KEY }}", + }), +) +``` + +## API Reference + +### Constructor + +```go +func New(ctx context.Context, id string, opts ...Option) (*engine.Config, error) +``` + +Creates a new tool configuration with the given ID and options. + +**Parameters:** + +- `ctx`: Context for logging and cancellation +- `id`: Unique tool identifier (kebab-case recommended) +- `opts`: Variadic functional options + +**Returns:** + +- `*engine.Config`: Deep-copied tool configuration +- `error`: Validation errors if any + +### Options + +#### WithName(name string) Option + +Sets the human-readable name for the tool. + +#### WithDescription(description string) Option + +Sets the detailed description of tool capabilities. + +#### WithRuntime(runtime string) Option + +Sets the execution runtime (currently supports "bun"). + +#### WithCode(code string) Option + +Sets the inline source code executed by the runtime. + +#### WithTimeout(timeout string) Option + +Sets the maximum execution time (e.g., "30s", "5m", "1h"). + +#### WithInputSchema(schema \*schema.Schema) Option + +Sets the JSON schema for input validation. + +#### WithOutputSchema(schema \*schema.Schema) Option + +Sets the JSON schema for output validation. + +#### WithWith(input \*core.Input) Option + +Sets default input parameters merged with runtime parameters. + +#### WithConfig(config \*core.Input) Option + +Sets configuration parameters for tool initialization. + +#### WithEnv(env \*core.EnvMap) Option + +Sets environment variables for tool execution. + +## Migration Guide + +### Before (Old SDK) + +```go +cfg, err := tool.New("file-reader"). + WithName("File Reader"). + WithDescription("Read files"). + WithRuntime("bun"). + WithCode(code). + Build(ctx) +``` + +### After (New SDK) + +```go +cfg, err := tool.New( + ctx, + "file-reader", + tool.WithName("File Reader"), + tool.WithDescription("Read files"), + tool.WithRuntime("bun"), + tool.WithCode(code), +) +``` + +### Key Changes + +1. ✅ `ctx` moved to first parameter +2. ✅ No `.Build(ctx)` call needed +3. ✅ Options passed as variadic arguments +4. ✅ Validation happens immediately in constructor +5. ✅ All string fields automatically trimmed + +## Examples + +### Tool with Schema Validation + +```go +schema := &schema.Schema{ + "type": "object", + "properties": map[string]any{ + "query": map[string]any{ + "type": "string", + "minLength": 1, + }, + }, + "required": []string{"query"}, +} + +cfg, err := tool.New( + ctx, + "search-tool", + tool.WithName("Search"), + tool.WithDescription("Search documents"), + tool.WithRuntime("bun"), + tool.WithCode(searchCode), + tool.WithInputSchema(schema), +) +``` + +### Tool with Timeout + +```go +cfg, err := tool.New( + ctx, + "heavy-processor", + tool.WithName("Data Processor"), + tool.WithDescription("Process large datasets"), + tool.WithRuntime("bun"), + tool.WithCode(processorCode), + tool.WithTimeout("5m"), +) +``` + +## Validation Rules + +- **ID**: Required, non-empty, must follow identifier format +- **Name**: Required, non-empty after trimming +- **Description**: Required, non-empty after trimming +- **Runtime**: Required, must be "bun" (case-insensitive) +- **Code**: Required, non-empty after trimming +- **Timeout**: Optional, must be valid Go duration format if provided +- **Timeout**: Must be positive if specified + +## Error Handling + +The constructor collects all validation errors and returns them together: + +```go +cfg, err := tool.New(ctx, "", tool.WithName("")) +if err != nil { + // err contains all validation failures: + // - "tool id is invalid" + // - "tool name cannot be empty" + fmt.Println(err.Error()) +} +``` + +## Testing + +Run tests with: + +```bash +gotestsum --format pkgname -- -race -parallel=4 ./sdk/tool +``` + +Run linting: + +```bash +golangci-lint run --fix --allow-parallel-runners ./sdk/tool/... +``` diff --git a/sdk/tool/constructor.go b/sdk/tool/constructor.go new file mode 100644 index 00000000..48edbbb2 --- /dev/null +++ b/sdk/tool/constructor.go @@ -0,0 +1,219 @@ +package tool + +import ( + "context" + "fmt" + "strings" + "sync" + "time" + + "github.com/compozy/compozy/engine/core" + enginetool "github.com/compozy/compozy/engine/tool" + nativeuser "github.com/compozy/compozy/engine/tool/nativeuser" + "github.com/compozy/compozy/pkg/logger" + sdkerrors "github.com/compozy/compozy/sdk/v2/internal/errors" + "github.com/compozy/compozy/sdk/v2/internal/validate" +) + +// supportedRuntimes defines the list of valid runtime environments +var supportedRuntimes = map[string]struct{}{ + "bun": {}, +} + +var nativeHandlers sync.Map // map[*enginetool.Config]nativeuser.Handler + +// New creates a tool configuration using functional options +func New(ctx context.Context, id string, opts ...Option) (*enginetool.Config, error) { + if ctx == nil { + return nil, fmt.Errorf("context is required") + } + log := logger.FromContext(ctx) + log.Debug("creating tool configuration", "tool", id) + cfg := &enginetool.Config{ + Resource: string(core.ConfigTool), + ID: strings.TrimSpace(id), + } + for _, opt := range opts { + opt(cfg) + } + cfg.Runtime = strings.TrimSpace(cfg.Runtime) + cfg.Code = strings.TrimSpace(cfg.Code) + nativeHandler, handlerProvided := extractNativeHandler(cfg) + implementation, implErr := cfg.EffectiveImplementation() + collected := collectValidationErrors( + ctx, + cfg, + implementation, + implErr, + nativeHandler, + handlerProvided, + ) + filtered := make([]error, 0, len(collected)) + for _, err := range collected { + if err != nil { + filtered = append(filtered, err) + } + } + if len(filtered) > 0 { + return nil, &sdkerrors.BuildError{Errors: filtered} + } + if implementation != "" { + cfg.SetImplementation(implementation) + } + if implementation == enginetool.ImplementationNative { + if err := nativeuser.Register(cfg.ID, nativeHandler); err != nil { + return nil, fmt.Errorf("failed to register native handler: %w", err) + } + } + cloned, err := core.DeepCopy(cfg) + if err != nil { + return nil, fmt.Errorf("failed to clone tool config: %w", err) + } + return cloned, nil +} + +func extractNativeHandler(cfg *enginetool.Config) (nativeuser.Handler, bool) { + value, provided := nativeHandlers.LoadAndDelete(cfg) + if !provided { + return nil, false + } + handler, ok := value.(nativeuser.Handler) + if !ok { + return nil, true + } + return handler, true +} + +func collectValidationErrors( + ctx context.Context, + cfg *enginetool.Config, + implementation string, + implErr error, + nativeHandler nativeuser.Handler, + handlerProvided bool, +) []error { + errors := []error{ + validateID(ctx, cfg), + validateName(ctx, cfg), + validateDescription(ctx, cfg), + } + if implErr != nil { + errors = append(errors, implErr) + } + errors = append( + errors, + validateRuntime(ctx, cfg, implementation), + validateCode(ctx, cfg, implementation), + validateNativeHandler(cfg, implementation, nativeHandler, handlerProvided), + validateTimeout(ctx, cfg), + ) + return errors +} + +func validateID(ctx context.Context, cfg *enginetool.Config) error { + cfg.ID = strings.TrimSpace(cfg.ID) + if err := validate.ID(ctx, cfg.ID); err != nil { + return fmt.Errorf("tool id is invalid: %w", err) + } + return nil +} + +func validateName(ctx context.Context, cfg *enginetool.Config) error { + cfg.Name = strings.TrimSpace(cfg.Name) + if err := validate.NonEmpty(ctx, "tool name", cfg.Name); err != nil { + return err + } + return nil +} + +func validateDescription(ctx context.Context, cfg *enginetool.Config) error { + cfg.Description = strings.TrimSpace(cfg.Description) + if err := validate.NonEmpty(ctx, "tool description", cfg.Description); err != nil { + return err + } + return nil +} + +func validateRuntime(ctx context.Context, cfg *enginetool.Config, implementation string) error { + if implementation == "" { + return nil + } + if implementation == enginetool.ImplementationNative { + runtime := strings.ToLower(strings.TrimSpace(cfg.Runtime)) + if runtime == "" { + runtime = enginetool.RuntimeGo + } + if runtime != enginetool.RuntimeGo { + return fmt.Errorf("native tools must use runtime %s: got %s", enginetool.RuntimeGo, cfg.Runtime) + } + cfg.Runtime = runtime + return nil + } + if err := validate.NonEmpty(ctx, "tool runtime", cfg.Runtime); err != nil { + return err + } + runtime := strings.ToLower(cfg.Runtime) + if _, ok := supportedRuntimes[runtime]; !ok { + return fmt.Errorf("tool runtime must be bun: got %s", cfg.Runtime) + } + cfg.Runtime = runtime + return nil +} + +func validateCode(ctx context.Context, cfg *enginetool.Config, implementation string) error { + if implementation == "" { + return nil + } + if implementation == enginetool.ImplementationNative { + return nil + } + if err := validate.NonEmpty(ctx, "tool code", cfg.Code); err != nil { + return err + } + return nil +} + +func validateNativeHandler( + cfg *enginetool.Config, + implementation string, + handler nativeuser.Handler, + handlerProvided bool, +) error { + if implementation == "" { + return nil + } + if implementation == enginetool.ImplementationNative { + if handler == nil { + return fmt.Errorf("native handler is required for tool %s", cfg.ID) + } + return nil + } + if handlerProvided { + return fmt.Errorf("native handler provided but implementation is %s", implementation) + } + return nil +} + +func validateTimeout(_ context.Context, cfg *enginetool.Config) error { + if cfg.Timeout == "" { + return nil + } + timeout, err := time.ParseDuration(cfg.Timeout) + if err != nil { + return fmt.Errorf("invalid timeout format '%s': %w", cfg.Timeout, err) + } + if timeout <= 0 { + return fmt.Errorf("timeout must be positive, got: %v", timeout) + } + return nil +} + +// WithNativeHandler registers a Go-native handler for the tool configuration. +// The handler is bound to the tool ID during construction and executed in-process at runtime. +func WithNativeHandler(handler nativeuser.Handler) Option { + return func(cfg *enginetool.Config) { + nativeHandlers.Store(cfg, handler) + cfg.SetImplementation(enginetool.ImplementationNative) + cfg.Runtime = enginetool.RuntimeGo + } +} diff --git a/sdk/tool/constructor_test.go b/sdk/tool/constructor_test.go new file mode 100644 index 00000000..5be903e9 --- /dev/null +++ b/sdk/tool/constructor_test.go @@ -0,0 +1,486 @@ +package tool + +import ( + "context" + "testing" + + "github.com/compozy/compozy/engine/core" + engineschema "github.com/compozy/compozy/engine/schema" + nativeuser "github.com/compozy/compozy/engine/tool/nativeuser" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNew_MinimalConfig(t *testing.T) { + t.Run("Should create tool with minimal configuration", func(t *testing.T) { + cfg, err := New( + t.Context(), + "test-tool", + WithName("Test Tool"), + WithDescription("A test tool"), + WithRuntime("bun"), + WithCode("export default () => {}"), + ) + require.NoError(t, err) + assert.Equal(t, "test-tool", cfg.ID) + assert.Equal(t, "Test Tool", cfg.Name) + assert.Equal(t, "A test tool", cfg.Description) + assert.Equal(t, "bun", cfg.Runtime) + assert.Equal(t, "export default () => {}", cfg.Code) + assert.Equal(t, string(core.ConfigTool), cfg.Resource) + }) +} + +func TestNew_NativeHandler(t *testing.T) { + nativeuser.Reset() + t.Run("Should register native handler and normalize config", func(t *testing.T) { + handler := func(_ context.Context, _ map[string]any, _ map[string]any) (map[string]any, error) { + return map[string]any{"ok": true}, nil + } + cfg, err := New( + t.Context(), + "native-tool", + WithName("Native Tool"), + WithDescription("Executes in-process"), + WithNativeHandler(handler), + ) + require.NoError(t, err) + assert.Equal(t, "native", cfg.Implementation) + assert.Equal(t, "go", cfg.Runtime) + definition, ok := nativeuser.Lookup("native-tool") + require.True(t, ok) + out, callErr := definition.Handler(t.Context(), map[string]any{}, map[string]any{}) + require.NoError(t, callErr) + assert.Equal(t, map[string]any{"ok": true}, out) + }) + + t.Run("Should require native handler when runtime is go", func(t *testing.T) { + nativeuser.Reset() + _, err := New( + t.Context(), + "native-tool-missing", + WithName("Missing Handler"), + WithDescription("Fails"), + WithRuntime("go"), + ) + require.Error(t, err) + assert.Contains(t, err.Error(), "native handler") + }) + + t.Run("Should fail when native handler is nil", func(t *testing.T) { + nativeuser.Reset() + _, err := New( + t.Context(), + "native-tool-nil", + WithName("Nil Handler"), + WithDescription("Fails"), + WithNativeHandler(nil), + ) + require.Error(t, err) + assert.Contains(t, err.Error(), "native handler") + }) +} + +func TestNew_FullConfig(t *testing.T) { + t.Run("Should create tool with all options", func(t *testing.T) { + inputSchema := &engineschema.Schema{ + "type": "object", + "properties": map[string]any{ + "path": map[string]any{"type": "string"}, + }, + } + outputSchema := &engineschema.Schema{ + "type": "object", + "properties": map[string]any{ + "content": map[string]any{"type": "string"}, + }, + } + withInput := &core.Input{"default_value": "test"} + configInput := &core.Input{"api_url": "https://api.example.com"} + envMap := &core.EnvMap{"API_KEY": "secret"} + cfg, err := New( + t.Context(), + "full-tool", + WithName("Full Tool"), + WithDescription("A fully configured tool"), + WithRuntime("bun"), + WithCode("export default (input) => input"), + WithTimeout("30s"), + WithInputSchema(inputSchema), + WithOutputSchema(outputSchema), + WithWith(withInput), + WithConfig(configInput), + WithEnv(envMap), + ) + require.NoError(t, err) + assert.Equal(t, "full-tool", cfg.ID) + assert.Equal(t, "Full Tool", cfg.Name) + assert.Equal(t, "A fully configured tool", cfg.Description) + assert.Equal(t, "bun", cfg.Runtime) + assert.Equal(t, "export default (input) => input", cfg.Code) + assert.Equal(t, "30s", cfg.Timeout) + assert.NotNil(t, cfg.InputSchema) + assert.NotNil(t, cfg.OutputSchema) + assert.NotNil(t, cfg.With) + assert.NotNil(t, cfg.Config) + assert.NotNil(t, cfg.Env) + }) +} + +func TestNew_ValidationErrors(t *testing.T) { + tests := []struct { + name string + id string + opts []Option + wantErr string + }{ + { + name: "Should fail with empty ID", + id: "", + opts: []Option{}, + wantErr: "id is invalid", + }, + { + name: "Should fail with invalid ID", + id: "invalid id with spaces", + opts: []Option{}, + wantErr: "id is invalid", + }, + { + name: "Should fail with empty name", + id: "test-tool", + opts: []Option{ + WithName(""), + }, + wantErr: "tool name", + }, + { + name: "Should fail with whitespace-only name", + id: "test-tool", + opts: []Option{ + WithName(" "), + }, + wantErr: "tool name", + }, + { + name: "Should fail with empty description", + id: "test-tool", + opts: []Option{ + WithName("Tool"), + WithDescription(""), + }, + wantErr: "tool description", + }, + { + name: "Should fail with whitespace-only description", + id: "test-tool", + opts: []Option{ + WithName("Tool"), + WithDescription(" "), + }, + wantErr: "tool description", + }, + { + name: "Should fail with empty runtime", + id: "test-tool", + opts: []Option{ + WithName("Tool"), + WithDescription("A tool"), + WithRuntime(""), + }, + wantErr: "tool runtime", + }, + { + name: "Should fail with invalid runtime", + id: "test-tool", + opts: []Option{ + WithName("Tool"), + WithDescription("A tool"), + WithRuntime("python"), + }, + wantErr: "runtime must be bun", + }, + { + name: "Should fail with empty code", + id: "test-tool", + opts: []Option{ + WithName("Tool"), + WithDescription("A tool"), + WithRuntime("bun"), + WithCode(""), + }, + wantErr: "tool code", + }, + { + name: "Should fail with whitespace-only code", + id: "test-tool", + opts: []Option{ + WithName("Tool"), + WithDescription("A tool"), + WithRuntime("bun"), + WithCode(" "), + }, + wantErr: "tool code", + }, + { + name: "Should fail with invalid timeout format", + id: "test-tool", + opts: []Option{ + WithName("Tool"), + WithDescription("A tool"), + WithRuntime("bun"), + WithCode("export default () => {}"), + WithTimeout("invalid"), + }, + wantErr: "invalid timeout format", + }, + { + name: "Should fail with negative timeout", + id: "test-tool", + opts: []Option{ + WithName("Tool"), + WithDescription("A tool"), + WithRuntime("bun"), + WithCode("export default () => {}"), + WithTimeout("-5s"), + }, + wantErr: "timeout must be positive", + }, + { + name: "Should fail with zero timeout", + id: "test-tool", + opts: []Option{ + WithName("Tool"), + WithDescription("A tool"), + WithRuntime("bun"), + WithCode("export default () => {}"), + WithTimeout("0s"), + }, + wantErr: "timeout must be positive", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, err := New(t.Context(), tt.id, tt.opts...) + require.Error(t, err) + assert.Contains(t, err.Error(), tt.wantErr) + }) + } +} + +func TestNew_NilContext(t *testing.T) { + t.Run("Should fail with nil context", func(t *testing.T) { + var nilCtx context.Context + _, err := New(nilCtx, "test-tool") + require.Error(t, err) + assert.Contains(t, err.Error(), "context is required") + }) +} + +func TestNew_DeepCopy(t *testing.T) { + t.Run("Should return deep copied configuration", func(t *testing.T) { + envMap := &core.EnvMap{"KEY": "value"} + cfg1, err := New( + t.Context(), + "test-tool", + WithName("Tool"), + WithDescription("A tool"), + WithRuntime("bun"), + WithCode("export default () => {}"), + WithEnv(envMap), + ) + require.NoError(t, err) + (*cfg1.Env)["KEY"] = "modified" + cfg2, err := New( + t.Context(), + "test-tool", + WithName("Tool"), + WithDescription("A tool"), + WithRuntime("bun"), + WithCode("export default () => {}"), + WithEnv(envMap), + ) + require.NoError(t, err) + assert.NotEqual(t, (*cfg1.Env)["KEY"], (*cfg2.Env)["KEY"]) + assert.Equal(t, "value", (*cfg2.Env)["KEY"]) + }) +} + +func TestNew_WhitespaceTrimming(t *testing.T) { + t.Run("Should trim whitespace from all string fields", func(t *testing.T) { + cfg, err := New( + t.Context(), + " test-tool ", + WithName(" Test Tool "), + WithDescription(" A test tool "), + WithRuntime(" bun "), + WithCode(" export default () => {} "), + ) + require.NoError(t, err) + assert.Equal(t, "test-tool", cfg.ID) + assert.Equal(t, "Test Tool", cfg.Name) + assert.Equal(t, "A test tool", cfg.Description) + assert.Equal(t, "bun", cfg.Runtime) + assert.Equal(t, "export default () => {}", cfg.Code) + }) +} + +func TestNew_RuntimeCaseInsensitive(t *testing.T) { + tests := []struct { + name string + runtime string + want string + }{ + { + name: "Should normalize uppercase runtime", + runtime: "BUN", + want: "bun", + }, + { + name: "Should normalize mixed case runtime", + runtime: "Bun", + want: "bun", + }, + { + name: "Should keep lowercase runtime", + runtime: "bun", + want: "bun", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg, err := New( + t.Context(), + "test-tool", + WithName("Tool"), + WithDescription("A tool"), + WithRuntime(tt.runtime), + WithCode("export default () => {}"), + ) + require.NoError(t, err) + assert.Equal(t, tt.want, cfg.Runtime) + }) + } +} + +func TestNew_TimeoutParsing(t *testing.T) { + tests := []struct { + name string + timeout string + wantErr bool + }{ + { + name: "Should accept seconds", + timeout: "30s", + wantErr: false, + }, + { + name: "Should accept minutes", + timeout: "5m", + wantErr: false, + }, + { + name: "Should accept hours", + timeout: "1h", + wantErr: false, + }, + { + name: "Should accept milliseconds", + timeout: "500ms", + wantErr: false, + }, + { + name: "Should accept combined durations", + timeout: "1h30m", + wantErr: false, + }, + { + name: "Should accept no timeout", + timeout: "", + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg, err := New( + t.Context(), + "test-tool", + WithName("Tool"), + WithDescription("A tool"), + WithRuntime("bun"), + WithCode("export default () => {}"), + WithTimeout(tt.timeout), + ) + if tt.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + assert.Equal(t, tt.timeout, cfg.Timeout) + } + }) + } +} + +func TestNew_SchemaValidation(t *testing.T) { + t.Run("Should accept valid input schema", func(t *testing.T) { + schema := &engineschema.Schema{ + "type": "object", + "properties": map[string]any{ + "name": map[string]any{"type": "string"}, + }, + "required": []string{"name"}, + } + cfg, err := New( + t.Context(), + "test-tool", + WithName("Tool"), + WithDescription("A tool"), + WithRuntime("bun"), + WithCode("export default () => {}"), + WithInputSchema(schema), + ) + require.NoError(t, err) + assert.NotNil(t, cfg.InputSchema) + assert.Equal(t, "object", (*cfg.InputSchema)["type"]) + }) + t.Run("Should accept valid output schema", func(t *testing.T) { + schema := &engineschema.Schema{ + "type": "object", + "properties": map[string]any{ + "result": map[string]any{"type": "string"}, + }, + } + cfg, err := New( + t.Context(), + "test-tool", + WithName("Tool"), + WithDescription("A tool"), + WithRuntime("bun"), + WithCode("export default () => {}"), + WithOutputSchema(schema), + ) + require.NoError(t, err) + assert.NotNil(t, cfg.OutputSchema) + assert.Equal(t, "object", (*cfg.OutputSchema)["type"]) + }) +} + +func TestNew_MultipleErrors(t *testing.T) { + t.Run("Should collect all validation errors", func(t *testing.T) { + _, err := New( + t.Context(), + "", + WithName(""), + WithDescription(""), + WithRuntime(""), + WithCode(""), + ) + require.Error(t, err) + errStr := err.Error() + assert.Contains(t, errStr, "id is invalid") + assert.Contains(t, errStr, "tool name") + assert.Contains(t, errStr, "tool description") + assert.Contains(t, errStr, "tool runtime") + assert.Contains(t, errStr, "tool code") + }) +} diff --git a/sdk/tool/generate.go b/sdk/tool/generate.go new file mode 100644 index 00000000..cd43117d --- /dev/null +++ b/sdk/tool/generate.go @@ -0,0 +1,3 @@ +package tool + +//go:generate go run ../internal/codegen/cmd/optionsgen/main.go -engine ../../engine/tool/config.go -struct Config -output options_generated.go diff --git a/sdk/tool/options_generated.go b/sdk/tool/options_generated.go new file mode 100644 index 00000000..eb813978 --- /dev/null +++ b/sdk/tool/options_generated.go @@ -0,0 +1,190 @@ +// Code generated by optionsgen. DO NOT EDIT. + +package tool + +import ( + core "github.com/compozy/compozy/engine/core" + schema "github.com/compozy/compozy/engine/schema" + tool "github.com/compozy/compozy/engine/tool" +) + +type Option func(*tool.Config) + +// WithResource sets the Resource field +// +// Resource identifier for the autoloader system (must be `"tool"`). +// This field enables automatic discovery and registration of tool configurations. +func WithResource(resource string) Option { + return func(cfg *tool.Config) { + cfg.Resource = resource + } +} + +// WithID sets the ID field +// +// Unique identifier for the tool within the project scope. +// Used for referencing the tool in agent configurations, workflows, and function calls. +// Must be unique across all tools in the project. +// - **Examples:** `"file-reader"`, `"api-client"`, `"data-processor"` +// - **Naming:** Use kebab-case for consistency with other Compozy identifiers +func WithID(id string) Option { + return func(cfg *tool.Config) { + cfg.ID = id + } +} + +// WithName sets the Name field +// +// Name provides a concise, human-readable label for the tool shown in UIs and logs. +// Unlike the identifier, the name may include spaces and capitalization to improve readability. +// When omitted, UIs should fall back to using the identifier. +func WithName(name string) Option { + return func(cfg *tool.Config) { + cfg.Name = name + } +} + +// WithDescription sets the Description field +// +// Human-readable description of the tool's functionality and purpose. +// This description is used by AI agents to understand when and how to use the tool. +// Should clearly explain capabilities, limitations, and expected use cases. +// - **Best practices:** Be specific about what the tool does and its constraints +// - **Example:** `"Read and parse various file formats including JSON, YAML, and CSV with size limits"` +func WithDescription(description string) Option { + return func(cfg *tool.Config) { + cfg.Description = description + } +} + +// WithRuntime sets the Runtime field +// +// Runtime selects the execution environment for custom tool implementations. +// Supported runtimes include `"bun"`, `"node"`, and `"deno"` for JavaScript/TypeScript execution. +// When empty, the project runtime defaults are applied. +func WithRuntime(runtime string) Option { + return func(cfg *tool.Config) { + cfg.Runtime = runtime + } +} + +// WithImplementation sets the Implementation field +// +// Implementation defines how the tool executes within the Compozy engine. +// Supported values are: +// - `"runtime"`: executes via an external runtime such as Bun (default) +// - `"native"`: executes via an in-process Go handler registered at runtime +// When unset, the implementation defaults to `"runtime"` unless the runtime is explicitly `"go"`. +func WithImplementation(implementation string) Option { + return func(cfg *tool.Config) { + cfg.Implementation = implementation + } +} + +// WithCode sets the Code field +// +// Code contains inline source executed by the selected runtime when the tool runs. +// Builders may supply either inline JavaScript/TypeScript code or references resolved at runtime. +func WithCode(code string) Option { + return func(cfg *tool.Config) { + cfg.Code = code + } +} + +// WithTimeout sets the Timeout field +// +// Maximum execution time for the tool in Go duration format. +// If not specified, uses the global tool timeout from project configuration. +// This timeout applies to the entire tool execution lifecycle. +// - **Examples:** `"30s"`, `"5m"`, `"1h"`, `"500ms"` +// - **Constraints:** Must be positive; zero or negative values cause validation errors +// - **Default fallback:** Uses project-level tool timeout when empty +func WithTimeout(timeout string) Option { + return func(cfg *tool.Config) { + cfg.Timeout = timeout + } +} + +// WithInputSchema sets the InputSchema field +// +// JSON schema defining the expected input parameters for the tool. +// Used for validation before execution and to generate LLM function call definitions. +// Must follow JSON Schema Draft 7 specification for compatibility. +// - **When nil:** Tool accepts any input format (no validation performed) +// - **Use cases:** Parameter validation, type safety, auto-generated documentation +// - **Integration:** Automatically converts to LLM function parameters +func WithInputSchema(inputSchema *schema.Schema) Option { + return func(cfg *tool.Config) { + cfg.InputSchema = inputSchema + } +} + +// WithOutputSchema sets the OutputSchema field +// +// JSON schema defining the expected output format from the tool. +// Used for validation after execution and documentation purposes. +// Must follow JSON Schema Draft 7 specification for compatibility. +// - **When nil:** No output validation is performed +// - **Use cases:** Response validation, type safety, workflow data flow verification +// - **Best practice:** Define output schema for tools used in critical workflows +func WithOutputSchema(outputSchema *schema.Schema) Option { + return func(cfg *tool.Config) { + cfg.OutputSchema = outputSchema + } +} + +// WithWith sets the With field +// +// Default input parameters merged with runtime parameters provided by agents. +// Provides a way to set tool defaults while allowing runtime customization. +// - **Merge strategy:** Runtime parameters override defaults (shallow merge) +// - **Use cases:** Default API URLs, fallback configurations, preset options +// - **Security note:** Avoid storing secrets here; use environment variables instead +func WithWith(with *core.Input) Option { + return func(cfg *tool.Config) { + cfg.With = with + } +} + +// WithConfig sets the Config field +// +// Configuration parameters passed to the tool separately from input data. +// Provides static configuration that tools can use for initialization and behavior control. +// Unlike input parameters, config is not meant to change between tool invocations. +// - **Use cases:** API base URLs, retry policies, timeout settings, feature flags +// - **Separation:** Keeps configuration separate from runtime input data +// - **Override:** Can be overridden at workflow or agent level +// - **Example:** +// ```yaml +// config: +// base_url: "https://api.example.com" +// timeout: 30 +// retry_count: 3 +// headers: +// User-Agent: "Compozy/1.0" +// ``` +func WithConfig(config *core.Input) Option { + return func(cfg *tool.Config) { + cfg.Config = config + } +} + +// WithEnv sets the Env field +// +// Environment variables available during tool execution. +// Variables are isolated to the tool's execution context for security. +// Used for configuration, API keys, and runtime settings. +// - **Security:** Variables are only accessible within the tool's execution +// - **Template support:** Values can use template expressions for dynamic configuration +// - **Example:** +// ```yaml +// env: +// API_KEY: "{{ .env.SECRET_API_KEY }}" +// BASE_URL: "https://api.example.com" +// DEBUG: "{{ .project.debug | default(false) }}" +// ``` +func WithEnv(env *core.EnvMap) Option { + return func(cfg *tool.Config) { + cfg.Env = env + } +} diff --git a/sdk/workflow/README.md b/sdk/workflow/README.md new file mode 100644 index 00000000..ff22f8e3 --- /dev/null +++ b/sdk/workflow/README.md @@ -0,0 +1,202 @@ +# Package workflow + +SDK for creating workflow configurations using auto-generated functional options. + +## Installation + +```go +import "github.com/compozy/compozy/sdk/v2/workflow" +``` + +## Usage + +### Basic Example + +```go +package main + +import ( + "context" + "github.com/compozy/compozy/engine/task" + "github.com/compozy/compozy/sdk/v2/workflow" +) + +func main() { + cfg, err := workflow.New(context.Background(), "simple-workflow", + workflow.WithDescription("A simple workflow"), + workflow.WithTasks([]task.Config{ + {BaseConfig: task.BaseConfig{ID: "task1", Type: task.TaskTypeBasic}}, + }), + ) + if err != nil { + panic(err) + } + // Use cfg... +} +``` + +### Full Configuration + +```go +package main + +import ( + "context" + "github.com/compozy/compozy/engine/agent" + "github.com/compozy/compozy/engine/core" + "github.com/compozy/compozy/engine/schema" + "github.com/compozy/compozy/engine/task" + "github.com/compozy/compozy/engine/tool" + engineworkflow "github.com/compozy/compozy/engine/workflow" + "github.com/compozy/compozy/sdk/v2/workflow" +) + +func main() { + author := &core.Author{ + Name: "Your Name", + Email: "your@email.com", + } + + inputSchema := &schema.Schema{ + "type": "object", + "properties": map[string]any{ + "message": map[string]any{ + "type": "string", + }, + }, + } + + outputs := &core.Output{ + "result": "{{ .tasks.process.output }}", + } + + schedule := &engineworkflow.Schedule{ + Cron: "0 * * * *", // Every hour + } + + cfg, err := workflow.New(context.Background(), "advanced-workflow", + workflow.WithVersion("1.0.0"), + workflow.WithDescription("An advanced workflow with all features"), + workflow.WithOpts(engineworkflow.Opts{InputSchema: inputSchema}), + workflow.WithAuthor(author), + workflow.WithTools([]tool.Config{ + { + ID: "data-processor", + Name: "Data Processor", + Description: "Processes data", + Runtime: "bun", + Code: "export default () => { return 'processed'; }", + }, + }), + workflow.WithAgents([]agent.Config{ + { + ID: "assistant", + Instructions: "You are a helpful assistant", + }, + }), + workflow.WithTasks([]task.Config{ + {BaseConfig: task.BaseConfig{ID: "process", Type: task.TaskTypeBasic}}, + {BaseConfig: task.BaseConfig{ID: "finalize", Type: task.TaskTypeBasic}}, + }), + workflow.WithOutputs(outputs), + workflow.WithSchedule(schedule), + ) + if err != nil { + panic(err) + } + // Use cfg... +} +``` + +## API Reference + +### Constructor + +```go +func New(ctx context.Context, id string, opts ...Option) (*engineworkflow.Config, error) +``` + +Creates a new workflow configuration with the given ID and optional configuration. + +**Parameters:** + +- `ctx`: Context for logging and cancellation +- `id`: Unique identifier for the workflow (required, non-empty) +- `opts`: Variadic functional options + +**Returns:** + +- `*engineworkflow.Config`: Deep copied workflow configuration +- `error`: Validation errors (may be `*sdkerrors.BuildError` with multiple errors) + +### Options + +All options are generated from `engine/workflow/config.go`: + +- `WithResource(resource string)` - Resource identifier +- `WithVersion(version string)` - Workflow version +- `WithDescription(description string)` - Human-readable description +- `WithSchemas(schemas []schema.Schema)` - JSON schemas for validation +- `WithOpts(opts Opts)` - Configuration options (input schema, env vars) +- `WithAuthor(author *core.Author)` - Author information +- `WithTools(tools []tool.Config)` - External tools +- `WithAgents(agents []agent.Config)` - AI agents +- `WithKnowledgeBases(knowledgeBases []knowledge.BaseConfig)` - Knowledge bases +- `WithKnowledge(knowledge []core.KnowledgeBinding)` - Knowledge bindings +- `WithMCPs(mcps []mcp.Config)` - MCP servers +- `WithTriggers(triggers []Trigger)` - Event triggers +- `WithTasks(tasks []task.Config)` - Sequential tasks (required) +- `WithOutputs(outputs *core.Output)` - Output mappings +- `WithSchedule(schedule *Schedule)` - Schedule configuration +- `WithCWD(cwd *core.PathCWD)` - Current working directory + +## Migration Guide + +### Before (Old SDK) + +```go +cfg, err := workflow.New("my-workflow"). + WithDescription("My workflow"). + AddTask(&task.Config{ + BaseConfig: task.BaseConfig{ID: "task1"}, + }). + Build(ctx) +``` + +### After (New SDK) + +```go +cfg, err := workflow.New(ctx, "my-workflow", + workflow.WithDescription("My workflow"), + workflow.WithTasks([]task.Config{ + {BaseConfig: task.BaseConfig{ID: "task1", Type: task.TaskTypeBasic}}, + }), +) +``` + +### Key Changes + +1. **Context First**: `ctx` is now the first parameter +2. **No Build()**: Configuration is created immediately +3. **Collection Methods**: Use plural names with slices (e.g., `WithTasks` instead of `AddTask`) +4. **Options as Arguments**: Pass options as variadic arguments +5. **Task Structure**: Tasks require proper `BaseConfig` with `ID` and `Type` fields + +## Validation + +The constructor performs comprehensive validation: + +- **ID Validation**: Must be non-empty and valid format +- **Task Validation**: At least one task required +- **Duplicate Detection**: Task IDs must be unique +- **Error Collection**: All validation errors collected and returned together + +## Examples + +See `sdk/cmd/` directory for complete workflow examples. + +## Testing + +```bash +gotestsum --format pkgname -- -race -parallel=4 ./sdk/v2/workflow +``` diff --git a/sdk/workflow/constructor.go b/sdk/workflow/constructor.go new file mode 100644 index 00000000..7d0afe65 --- /dev/null +++ b/sdk/workflow/constructor.go @@ -0,0 +1,106 @@ +package workflow + +import ( + "context" + "fmt" + "strings" + + "github.com/compozy/compozy/engine/agent" + "github.com/compozy/compozy/engine/core" + "github.com/compozy/compozy/engine/task" + engineworkflow "github.com/compozy/compozy/engine/workflow" + "github.com/compozy/compozy/pkg/logger" + sdkerrors "github.com/compozy/compozy/sdk/v2/internal/errors" + "github.com/compozy/compozy/sdk/v2/internal/validate" +) + +// New creates a workflow configuration using functional options +func New(ctx context.Context, id string, opts ...Option) (*engineworkflow.Config, error) { + if ctx == nil { + return nil, fmt.Errorf("context is required") + } + log := logger.FromContext(ctx) + log.Debug("creating workflow configuration", "workflow", id) + cfg := &engineworkflow.Config{ + ID: strings.TrimSpace(id), + Agents: make([]agent.Config, 0), + Tasks: make([]task.Config, 0), + } + for _, opt := range opts { + opt(cfg) + } + collected := make([]error, 0) + if err := validateID(ctx, cfg); err != nil { + collected = append(collected, err) + } + if err := validateTasks(ctx, cfg); err != nil { + collected = append(collected, err) + } + if err := validateTaskDuplicates(cfg); err != nil { + collected = append(collected, err) + } + filtered := make([]error, 0, len(collected)) + for _, err := range collected { + if err != nil { + filtered = append(filtered, err) + } + } + if len(filtered) > 0 { + return nil, &sdkerrors.BuildError{Errors: filtered} + } + cloned, err := core.DeepCopy(cfg) + if err != nil { + return nil, fmt.Errorf("failed to clone workflow config: %w", err) + } + return cloned, nil +} + +func validateID(ctx context.Context, cfg *engineworkflow.Config) error { + cfg.ID = strings.TrimSpace(cfg.ID) + if err := validate.ID(ctx, cfg.ID); err != nil { + return fmt.Errorf("workflow id is invalid: %w", err) + } + return nil +} + +func validateTasks(_ context.Context, cfg *engineworkflow.Config) error { + if len(cfg.Tasks) == 0 { + return fmt.Errorf("at least one task must be registered") + } + return nil +} + +func validateTaskDuplicates(cfg *engineworkflow.Config) error { + if len(cfg.Tasks) == 0 { + return nil + } + seen := make(map[string]bool, len(cfg.Tasks)) + dupes := make([]string, 0) + for i := range cfg.Tasks { + taskCfg := &cfg.Tasks[i] + id := strings.TrimSpace(taskCfg.ID) + if id == "" { + continue + } + if seen[id] { + if !containsString(dupes, id) { + dupes = append(dupes, id) + } + continue + } + seen[id] = true + } + if len(dupes) > 0 { + return fmt.Errorf("duplicate task ids found: %s", strings.Join(dupes, ", ")) + } + return nil +} + +func containsString(values []string, target string) bool { + for _, value := range values { + if value == target { + return true + } + } + return false +} diff --git a/sdk/workflow/constructor_test.go b/sdk/workflow/constructor_test.go new file mode 100644 index 00000000..58cebad5 --- /dev/null +++ b/sdk/workflow/constructor_test.go @@ -0,0 +1,203 @@ +package workflow_test + +import ( + "context" + "testing" + + "github.com/compozy/compozy/engine/agent" + "github.com/compozy/compozy/engine/core" + "github.com/compozy/compozy/engine/schema" + "github.com/compozy/compozy/engine/task" + "github.com/compozy/compozy/engine/tool" + engineworkflow "github.com/compozy/compozy/engine/workflow" + sdkerrors "github.com/compozy/compozy/sdk/v2/internal/errors" + "github.com/compozy/compozy/sdk/v2/workflow" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNew_MinimalConfig(t *testing.T) { + t.Run("Should create workflow with single task", func(t *testing.T) { + cfg, err := workflow.New(t.Context(), "test-workflow", + workflow.WithTasks([]task.Config{ + {BaseConfig: task.BaseConfig{ID: "task1", Type: task.TaskTypeBasic}}, + }), + ) + require.NoError(t, err) + assert.Equal(t, "test-workflow", cfg.ID) + assert.Len(t, cfg.Tasks, 1) + assert.Equal(t, "task1", cfg.Tasks[0].ID) + }) +} + +func TestNew_FullConfig(t *testing.T) { + t.Run("Should create workflow with all options", func(t *testing.T) { + author := &core.Author{Name: "Test Author", Email: "test@example.com"} + inputSchema := &schema.Schema{"type": "object"} + outputs := &core.Output{"result": "{{ .tasks.task1.output }}"} + schedule := &engineworkflow.Schedule{Cron: "0 0 * * *"} + cfg, err := workflow.New(t.Context(), "full-workflow", + workflow.WithVersion("1.0.0"), + workflow.WithDescription("A full workflow configuration"), + workflow.WithOpts(engineworkflow.Opts{InputSchema: inputSchema}), + workflow.WithAuthor(author), + workflow.WithTools([]tool.Config{ + { + ID: "tool1", + Name: "Test Tool", + Description: "A test tool", + Runtime: "bun", + Code: "console.log('test')", + }, + }), + workflow.WithAgents([]agent.Config{ + {ID: "agent1", Instructions: "Test instructions"}, + }), + workflow.WithTasks([]task.Config{ + {BaseConfig: task.BaseConfig{ID: "task1", Type: task.TaskTypeBasic}}, + {BaseConfig: task.BaseConfig{ID: "task2", Type: task.TaskTypeBasic}}, + }), + workflow.WithOutputs(outputs), + workflow.WithSchedule(schedule), + ) + require.NoError(t, err) + assert.Equal(t, "full-workflow", cfg.ID) + assert.Equal(t, "1.0.0", cfg.Version) + assert.Equal(t, "A full workflow configuration", cfg.Description) + assert.Equal(t, inputSchema, cfg.Opts.InputSchema) + assert.Equal(t, author, cfg.Author) + assert.Len(t, cfg.Tools, 1) + assert.Equal(t, "tool1", cfg.Tools[0].ID) + assert.Len(t, cfg.Agents, 1) + assert.Equal(t, "agent1", cfg.Agents[0].ID) + assert.Len(t, cfg.Tasks, 2) + assert.Equal(t, outputs, cfg.Outputs) + assert.Equal(t, schedule, cfg.Schedule) + }) +} + +func TestNew_ValidationErrors(t *testing.T) { + tests := []struct { + name string + id string + opts []workflow.Option + wantErr string + }{ + { + name: "empty id", + id: "", + opts: []workflow.Option{workflow.WithTasks([]task.Config{ + {BaseConfig: task.BaseConfig{ID: "task1", Type: task.TaskTypeBasic}}, + })}, + wantErr: "workflow id is invalid", + }, + { + name: "no tasks", + id: "test-workflow", + opts: []workflow.Option{}, + wantErr: "at least one task must be registered", + }, + { + name: "duplicate task ids", + id: "test-workflow", + opts: []workflow.Option{ + workflow.WithTasks([]task.Config{ + {BaseConfig: task.BaseConfig{ID: "task1", Type: task.TaskTypeBasic}}, + {BaseConfig: task.BaseConfig{ID: "task1", Type: task.TaskTypeBasic}}, + }), + }, + wantErr: "duplicate task ids found: task1", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, err := workflow.New(t.Context(), tt.id, tt.opts...) + require.Error(t, err) + assert.Contains(t, err.Error(), tt.wantErr) + }) + } +} + +func TestNew_NilContext(t *testing.T) { + t.Run("Should return error for nil context", func(t *testing.T) { + var nilCtx context.Context + _, err := workflow.New(nilCtx, "test-workflow") + require.Error(t, err) + assert.Contains(t, err.Error(), "context is required") + }) +} + +func TestNew_DeepCopy(t *testing.T) { + t.Run("Should return independent copy of configuration", func(t *testing.T) { + tasks := []task.Config{ + {BaseConfig: task.BaseConfig{ID: "task1", Type: task.TaskTypeBasic}}, + } + cfg1, err := workflow.New(t.Context(), "test-workflow", workflow.WithTasks(tasks)) + require.NoError(t, err) + cfg1.Tasks[0].ID = "modified" + cfg2, err := workflow.New(t.Context(), "test-workflow", workflow.WithTasks(tasks)) + require.NoError(t, err) + assert.NotEqual(t, cfg1.Tasks[0].ID, cfg2.Tasks[0].ID) + assert.Equal(t, "task1", cfg2.Tasks[0].ID) + }) +} + +func TestNew_MultipleErrors(t *testing.T) { + t.Run("Should collect all validation errors", func(t *testing.T) { + _, err := workflow.New(t.Context(), "", workflow.WithTasks([]task.Config{ + {BaseConfig: task.BaseConfig{ID: "task1", Type: task.TaskTypeBasic}}, + {BaseConfig: task.BaseConfig{ID: "task1", Type: task.TaskTypeBasic}}, + })) + require.Error(t, err) + var buildErr *sdkerrors.BuildError + require.ErrorAs(t, err, &buildErr) + assert.Len(t, buildErr.Errors, 2) + }) +} + +func TestNew_TaskCollectionHandling(t *testing.T) { + t.Run("Should handle multiple tasks correctly", func(t *testing.T) { + tasks := []task.Config{ + {BaseConfig: task.BaseConfig{ID: "task1", Type: task.TaskTypeBasic}}, + {BaseConfig: task.BaseConfig{ID: "task2", Type: task.TaskTypeParallel}}, + {BaseConfig: task.BaseConfig{ID: "task3", Type: task.TaskTypeWait}}, + } + cfg, err := workflow.New(t.Context(), "multi-task", workflow.WithTasks(tasks)) + require.NoError(t, err) + assert.Len(t, cfg.Tasks, 3) + assert.Equal(t, "task1", cfg.Tasks[0].ID) + assert.Equal(t, "task2", cfg.Tasks[1].ID) + assert.Equal(t, "task3", cfg.Tasks[2].ID) + }) +} + +func TestNew_AgentCollection(t *testing.T) { + t.Run("Should handle multiple agents correctly", func(t *testing.T) { + agents := []agent.Config{ + {ID: "agent1", Instructions: "First agent"}, + {ID: "agent2", Instructions: "Second agent"}, + } + cfg, err := workflow.New(t.Context(), "multi-agent", + workflow.WithAgents(agents), + workflow.WithTasks([]task.Config{ + {BaseConfig: task.BaseConfig{ID: "task1", Type: task.TaskTypeBasic}}, + }), + ) + require.NoError(t, err) + assert.Len(t, cfg.Agents, 2) + assert.Equal(t, "agent1", cfg.Agents[0].ID) + assert.Equal(t, "agent2", cfg.Agents[1].ID) + }) +} + +func TestNew_WhitespaceTrimming(t *testing.T) { + t.Run("Should trim whitespace from id", func(t *testing.T) { + cfg, err := workflow.New(t.Context(), " test-workflow ", + workflow.WithTasks([]task.Config{ + {BaseConfig: task.BaseConfig{ID: "task1", Type: task.TaskTypeBasic}}, + }), + ) + require.NoError(t, err) + assert.Equal(t, "test-workflow", cfg.ID) + }) +} diff --git a/sdk/workflow/generate.go b/sdk/workflow/generate.go new file mode 100644 index 00000000..80fa2325 --- /dev/null +++ b/sdk/workflow/generate.go @@ -0,0 +1,3 @@ +package workflow + +//go:generate go run ../internal/codegen/cmd/optionsgen/main.go -engine ../../engine/workflow/config.go -struct Config -output options_generated.go diff --git a/sdk/workflow/options_generated.go b/sdk/workflow/options_generated.go new file mode 100644 index 00000000..4ad60bd4 --- /dev/null +++ b/sdk/workflow/options_generated.go @@ -0,0 +1,187 @@ +// Code generated by optionsgen. DO NOT EDIT. + +package workflow + +import ( + agent "github.com/compozy/compozy/engine/agent" + core "github.com/compozy/compozy/engine/core" + knowledge "github.com/compozy/compozy/engine/knowledge" + mcp "github.com/compozy/compozy/engine/mcp" + schema "github.com/compozy/compozy/engine/schema" + task "github.com/compozy/compozy/engine/task" + tool "github.com/compozy/compozy/engine/tool" + workflow "github.com/compozy/compozy/engine/workflow" +) + +type Option func(*workflow.Config) + +// WithResource sets the Resource field +// +// Resource reference for external workflow definitions +// Format: "compozy:workflow:" - allows referencing pre-built workflows +func WithResource(resource string) Option { + return func(cfg *workflow.Config) { + cfg.Resource = resource + } +} + +// WithID sets the ID field +// +// Unique identifier for the workflow (required) +// Must be unique within the project scope. Used for referencing and execution. +// - **Example**: "customer-support", "data-processing", "content-generation" +func WithID(id string) Option { + return func(cfg *workflow.Config) { + cfg.ID = id + } +} + +// WithVersion sets the Version field +// +// Version of the workflow for tracking changes +// Follows semantic versioning (e.g., "1.0.0", "2.1.3") +// Useful for managing workflow evolution and backwards compatibility +func WithVersion(version string) Option { + return func(cfg *workflow.Config) { + cfg.Version = version + } +} + +// WithDescription sets the Description field +// +// Human-readable description of the workflow's purpose +// Should clearly explain what the workflow does and when to use it +func WithDescription(description string) Option { + return func(cfg *workflow.Config) { + cfg.Description = description + } +} + +// WithSchemas sets the Schemas field +// +// JSON schemas for validating data structures used in the workflow +// Define reusable schemas that can be referenced throughout the workflow +// using $ref syntax (e.g., $ref: local::schemas.#(id="user_schema")) +func WithSchemas(schemas []schema.Schema) Option { + return func(cfg *workflow.Config) { + cfg.Schemas = schemas + } +} + +// WithOpts sets the Opts field +// +// Configuration options including input schema and environment variables +// Controls workflow behavior, validation, and runtime environment +func WithOpts(opts workflow.Opts) Option { + return func(cfg *workflow.Config) { + cfg.Opts = opts + } +} + +// WithAuthor sets the Author field +// +// Author information for workflow attribution +// Helps track ownership and responsibility for workflow maintenance +func WithAuthor(author *core.Author) Option { + return func(cfg *workflow.Config) { + cfg.Author = author + } +} + +// WithTools sets the Tools field +// +// External tools that can be invoked by agents or tasks +// Define executable scripts or programs that perform specific operations +// Tools provide deterministic, non-AI functionality like API calls or data processing +// $ref: schema://tools +func WithTools(tools []tool.Config) Option { + return func(cfg *workflow.Config) { + cfg.Tools = tools + } +} + +// WithAgents sets the Agents field +// +// AI agents with specific instructions and capabilities +// Configure LLM-powered agents with custom prompts, tools access, and behavior +// Agents can be referenced by tasks using $use: agent(...) syntax +// $ref: schema://agents +func WithAgents(agents []agent.Config) Option { + return func(cfg *workflow.Config) { + cfg.Agents = agents + } +} + +// WithKnowledgeBases sets the KnowledgeBases field +// +// KnowledgeBases declares workflow-scoped knowledge definitions. +func WithKnowledgeBases(knowledgeBases []knowledge.BaseConfig) Option { + return func(cfg *workflow.Config) { + cfg.KnowledgeBases = knowledgeBases + } +} + +// WithKnowledge sets the Knowledge field +// +// Knowledge defines the default knowledge binding for the workflow context. +func WithKnowledge(knowledge []core.KnowledgeBinding) Option { + return func(cfg *workflow.Config) { + cfg.Knowledge = knowledge + } +} + +// WithMCPs sets the MCPs field +// +// Model Context Protocol servers for extending AI capabilities +// MCP servers provide specialized tools and knowledge to agents +// Enable integration with external services and domain-specific functionality +// $ref: schema://mcp +func WithMCPs(mCPs []mcp.Config) Option { + return func(cfg *workflow.Config) { + cfg.MCPs = mCPs + } +} + +// WithTriggers sets the Triggers field +// +// Event triggers that can initiate workflow execution +// Define external events (webhooks, signals) that can start the workflow +// Each trigger can have its own input schema for validation +func WithTriggers(triggers []workflow.Trigger) Option { + return func(cfg *workflow.Config) { + cfg.Triggers = triggers + } +} + +// WithTasks sets the Tasks field +// +// Sequential tasks that define the workflow execution plan (required) +// Tasks are the core execution units, processed in order with conditional branching +// Each task uses either an agent or tool to perform its operation +// $ref: schema://tasks +func WithTasks(tasks []task.Config) Option { + return func(cfg *workflow.Config) { + cfg.Tasks = tasks + } +} + +// WithOutputs sets the Outputs field +// +// Output mappings to structure the final workflow results +// Use template expressions to extract and transform task outputs +// - **Example**: ticket_id: "{{ .tasks.create-ticket.output.id }}" +func WithOutputs(outputs *core.Output) Option { + return func(cfg *workflow.Config) { + cfg.Outputs = outputs + } +} + +// WithSchedule sets the Schedule field +// +// Schedule configuration for automated workflow execution +// Enable cron-based scheduling with timezone support and overlap policies +func WithSchedule(schedule *workflow.Schedule) Option { + return func(cfg *workflow.Config) { + cfg.Schedule = schedule + } +} diff --git a/tasks/prd-modes/TEMPLATE_SYSTEM_ANALYSIS.md b/tasks/prd-modes/TEMPLATE_SYSTEM_ANALYSIS.md deleted file mode 100644 index abb74259..00000000 --- a/tasks/prd-modes/TEMPLATE_SYSTEM_ANALYSIS.md +++ /dev/null @@ -1,401 +0,0 @@ -# Compozy Template System Analysis - -## Current State Summary - -### Template System Architecture - -**Location**: `pkg/template/` -- **Types Definition**: `pkg/template/types.go` - Interface-based template system -- **Registry Pattern**: `pkg/template/registry.go` - Global singleton registry -- **Service Layer**: `pkg/template/service.go` - Service interface with singleton instance -- **Generator**: `pkg/template/generator.go` - Template rendering and file creation -- **Basic Template**: `pkg/template/templates/basic/basic.go` - Only existing template implementation - -**Current Templates Available**: 1 (basic only) - -### Basic Template Structure - -**Location**: `pkg/template/templates/basic/` - -**Files Generated**: -- `compozy.yaml` - Project config file -- `entrypoint.ts` - Runtime entry point (Bun) -- `workflows/main.yaml` - Example workflow -- `greeting_tool.ts` - Example tool -- `api.http` - API testing file -- `env.example` - Environment template -- `.gitignore` - Git ignore file -- `README.md` - Project README -- `docker-compose.yaml` - (Optional, when `--docker` flag used) - -**Configuration Structure** (`projectConfig` struct in basic.go): -```go -type projectConfig struct { - Name string // Project name - Version string // Version - Description string // Description - Author *authorConfig // Author info - Workflows []workflowRef // Workflow references - Models []modelConfig // LLM model config - Runtime *runtimeConfig // Runtime config (type, entrypoint, perms) - Autoload *autoloadConfig // Autoload settings - Templates map[string]string // Custom templates -} -``` - -### Current Mode Configuration in Templates - -**In basic template** (`baseProjectConfig` function, lines 156-194): -```go -Models: []modelConfig{ - { - Provider: "openai", - Model: "gpt-4.1-2025-04-14", - APIKey: "{{ .env.OPENAI_API_KEY }}", - }, -}, -Runtime: &runtimeConfig{ - Type: "bun", - Entrypoint: "./entrypoint.ts", - Permissions: []string{ - "--allow-read", - "--allow-net", - "--allow-write", - }, -}, -Autoload: &autoloadConfig{ - Enabled: true, - Strict: true, - Include: []string{ - "agents/*.yaml", - "tools/*.yaml", - }, - Exclude: []string{ - "**/*~", - "**/*.bak", - "**/*.tmp", - }, -}, -``` - -**Key Finding**: No mode-specific configuration in template. Template generates generic project without mode selection. - -### CLI/TUI Integration - -**Location**: `cli/cmd/init/` - -**Init Command**: `cli/cmd/init/init.go` -- Default template: `"basic"` (line 83) -- `--template` flag allows override (line 83) -- Docker support: `--docker` flag for docker-compose.yaml generation -- Two execution modes: JSON (non-interactive) and TUI (interactive) - -**Project Form**: `cli/cmd/init/components/project_form.go` -- **Template Selection** (lines 68-72): - ```go - huh.NewSelect[string](). - Title("Template"). - Description("Project template to use"). - Options(huh.NewOption("Basic", "basic")). // ← ONLY "basic" option - Value(&data.Template), - ``` - -**Key Finding**: Template selection dropdown only shows "Basic" option. No mode selection in init flow. - -**Init Model**: `cli/cmd/init/components/init_model.go` -- Handles form rendering and validation -- Displays header, collects form data, manages viewport -- No mode-specific questions or logic - -**Key Finding**: Interactive form doesn't ask about mode at all. - -### Docker Compose Configuration - -**Location**: `pkg/template/templates/basic/docker-compose.yaml.tmpl` - -**Services Configured** (when `--docker` flag used): -1. Redis (6379) - for caching -2. PostgreSQL (5432) - application database -3. PostgreSQL (5433) - Temporal database -4. Temporal (7233) - workflow engine -5. Temporal UI (8080) - web interface - -**Key Finding**: Docker compose is **distributed mode only** - sets up external PostgreSQL and Temporal. No option for embedded services. - ---- - -## Three-Mode System Requirements - -Based on `tasks/prd-modes/_techspec.md`, the system needs: - -### Mode Definitions - -1. **memory** (NEW DEFAULT) - - In-memory SQLite (no persistence) - - Embedded Temporal - - Embedded Redis (miniredis) - - Use case: Development, testing, demos - - No config file needed - works out of box - -2. **persistent** (NEW) - - File-based SQLite (with persistence) - - Embedded Temporal - - Embedded Redis with BadgerDB persistence - - Use case: Local development with state preservation - - Requires `compozy.yaml` with mode setting - -3. **distributed** (EXISTING, renamed) - - External PostgreSQL - - External Temporal - - External Redis - - Use case: Production - - Requires full infrastructure setup - -### Configuration Points Needed - -From tech spec, templates need to configure: -- Database driver selection (SQLite vs PostgreSQL) -- Temporal mode (embedded vs external) -- Cache/Redis mode (embedded vs external) -- Default generated workflows/examples -- Docker compose for distributed mode only - ---- - -## Impact Analysis: What Needs Changing - -### 1. Template System Changes - -**Current**: 1 template (basic) with no mode awareness -**Needed**: 1 template or 3 templates with mode selection - -**Options**: -- **Option A**: Single "basic" template that generates mode-specific config based on new GenerateOptions field -- **Option B**: Three templates: "basic-memory", "basic-persistent", "basic-distributed" -- **Recommended**: Option A - maintains simplicity, backward compatible at API level - -**Files to Modify**: -- `pkg/template/types.go` - Add `Mode` field to `GenerateOptions` -- `pkg/template/templates/basic/basic.go` - Add mode-aware config generation -- `pkg/template/templates/basic/compozy.yaml.tmpl` - Make database/cache config mode-dependent -- `pkg/template/templates/basic/docker-compose.yaml.tmpl` - Only include for distributed mode - -### 2. CLI/TUI Changes - -**Current**: No mode selection in init flow -**Needed**: Mode selection in interactive form - -**Files to Modify**: -- `cli/cmd/init/init.go` - Add mode option, pass to template -- `cli/cmd/init/components/project_form.go` - Add mode selection dropdown -- `cli/cmd/init/components/init_model.go` - Handle mode in form data - -**New Field in ProjectFormData**: -```go -type ProjectFormData struct { - // ... existing fields ... - Mode string // "memory", "persistent", or "distributed" -} -``` - -### 3. Template Content Changes - -**compozy.yaml.tmpl**: -- Conditional cache configuration based on mode -- Database driver selection based on mode -- Remove unnecessary Temporal config for embedded modes - -**docker-compose.yaml.tmpl**: -- Only generate for distributed mode (or move to separate template) -- Update to use external services - -**env.example.tmpl**: -- Different variables based on mode -- Only Temporal vars for distributed mode - -### 4. Generated Project Differences - -**Memory Mode**: -```yaml -# No explicit cache.adapter - uses default (memory) -# No explicit temporal.mode - uses default (embedded with :memory:) -database: - driver: sqlite - file: :memory: -``` - -**Persistent Mode**: -```yaml -database: - driver: sqlite - file: ./compozy.db -``` - -**Distributed Mode**: -```yaml -database: - driver: postgres - host: localhost - port: 5432 -cache: - adapter: redis # external -temporal: - mode: remote - address: localhost:7233 -``` - ---- - -## Current Template Flow - -``` -CLI user runs: compozy init my-project - ↓ -init.go: prepareInitOptions() - ↓ -init.go: runInitTUI() or runInitJSON() - ↓ -components/init_model.go: NewInitModel(projectData) - ↓ -components/project_form.go: NewProjectForm(data) - ↓ -User selects: Name, Description, Version, Author, Template(="basic"), Docker(yes/no) - ↓ -ProjectFormData captured - ↓ -init.go: generateProjectStructure(opts) - ↓ -template.GetService().Generate("basic", generateOptions) - ↓ -registry.go: Get("basic") → basic.Template{} - ↓ -basic.go: GetProjectConfig() → generates compozy.yaml content - ↓ -generator.go: createFile() → renders templates to disk -``` - ---- - -## What Exists vs What's Missing - -### Exists ✅ -- Robust template registry system -- Template rendering with sprig functions -- TUI-based form for project initialization -- Docker compose option -- Basic template with multiple files - -### Missing ❌ -- Mode selection in init form -- Mode-aware template generation -- Database driver defaults based on mode -- Cache/temporal configuration by mode -- Mode-specific docker compose (only distributed) -- Mode documentation in generated README - ---- - -## Files That Will Change - -### Phase 1: Configuration (2 files - template system) -1. `pkg/template/types.go` - Add Mode to GenerateOptions -2. `pkg/template/templates/basic/basic.go` - Mode-aware config generation - -### Phase 2: CLI/TUI (3 files) -3. `cli/cmd/init/init.go` - Pass mode to template -4. `cli/cmd/init/components/project_form.go` - Add mode selection field -5. `cli/cmd/init/components/init_model.go` - Handle mode field - -### Phase 3: Templates (4 files) -6. `pkg/template/templates/basic/compozy.yaml.tmpl` - Database/cache config -7. `pkg/template/templates/basic/env.example.tmpl` - Mode-specific vars -8. `pkg/template/templates/basic/docker-compose.yaml.tmpl` - Conditionally included -9. `pkg/template/templates/basic/README.md.tmpl` - Mode documentation - -### Phase 4: Testing (documentation only for now) -- Integration tests will be covered in separate phase (task 11.0 in PRD) - ---- - -## Key Decisions to Make - -### 1. Default Mode -**Decision**: memory mode -**Rationale**: Zero-dependency quickstart, fits dev/testing use case -**Impact**: Changes default generated config significantly - -### 2. Mode Selection UI -**Decision**: Add dropdown after template selection -**Order**: Name → Description → Version → Author → URL → Template → **Mode** ← NEW → Docker → Bun -**Visibility**: Always shown (not conditional) - -### 3. Docker Compose Behavior -**Current**: Generated when --docker flag used -**Change**: Only generate for distributed mode -**Impact**: Memory/persistent modes won't have docker-compose.yaml - -### 4. Configuration in Generated YAML -**Decision**: Explicit mode settings in generated compozy.yaml -```yaml -mode: memory # or persistent/distributed -``` -**Rationale**: Clear intent, easier debugging, runtime can validate - ---- - -## Risk Areas - -### High Risk -1. **Template rendering changes**: If mode logic breaks, all new projects broken -2. **Docker compose conditional**: Need to verify docker-compose generation only for distributed -3. **Form field ordering**: Changes UX, need to validate in TUI - -### Medium Risk -1. **Backward compatibility**: Old code expecting always-docker setup -2. **Test coverage**: New mode paths need test coverage -3. **Documentation**: User confusion if migration docs not clear - -### Low Risk -1. **CLI flag changes**: No breaking flag changes, purely addition -2. **Registry changes**: Interface remains same, only options expanded - ---- - -## Summary Table - -| Component | Current | Needed | Status | -|-----------|---------|--------|--------| -| Templates | 1 (basic) | 1 (basic + mode-aware) | Needs mode logic | -| CLI Mode Selection | None | Dropdown in form | Needs UI | -| Config Generation | Generic | Mode-specific | Needs logic | -| Docker Compose | Always (if --docker) | Distributed only | Needs conditional | -| Database Config | Generic | Mode-dependent | Needs logic | -| Cache Config | Not in template | Mode-dependent | Needs addition | -| Documentation | Generic | Mode examples | Needs updates | - ---- - -## Recommended Next Steps - -1. **Update GenerateOptions** (5 min) - - Add Mode field to pkg/template/types.go - -2. **Update Basic Template** (1 hour) - - Add mode-aware config in basic.go - - Update compozy.yaml.tmpl with conditional logic - - Update env.example.tmpl with mode-specific vars - - Make docker-compose conditional - -3. **Update CLI Form** (1.5 hours) - - Add mode field to ProjectFormData - - Add dropdown in project_form.go - - Wire through init.go to GenerateOptions - -4. **Testing** (2+ hours) - - Verify each mode generates correct config - - Test form with each mode selection - - Validate docker-compose only in distributed - -5. **Documentation** (1+ hours) - - Update generated README.md template - - Add mode notes to each generated project diff --git a/tasks/prd-modes/_docs.md b/tasks/prd-modes/_docs.md deleted file mode 100644 index 9d043b01..00000000 --- a/tasks/prd-modes/_docs.md +++ /dev/null @@ -1,535 +0,0 @@ -# Documentation Plan: Three-Mode Configuration System - -**PRD Reference**: `tasks/prd-modes/_prd.md` -**Tech Spec Reference**: `tasks/prd-modes/_techspec.md` -**Status**: Planning - ---- - -## Documentation Strategy - -### Core Messaging Shift - -**OLD MESSAGE**: "Compozy supports standalone and distributed modes" -**NEW MESSAGE**: "Compozy runs instantly with zero dependencies (memory mode), with optional persistence (persistent mode) or production deployment (distributed mode)" - -**Emphasis**: Memory mode as the default, easiest path. Distributed mode as production option. - ---- - -## Documentation Files to Update - -### 1. Configuration Documentation - -#### File: `docs/content/docs/configuration/mode-configuration.mdx` -**Priority**: CRITICAL (highest visibility) -**Current State**: Documents standalone/distributed two-mode system -**Required Changes**: - -**Updates:** -- Update frontmatter description: "Control deployment modes (memory/persistent/distributed)" -- Rewrite overview section emphasizing memory mode as default -- Update mode list to show three modes with clear use case guidance -- Add decision matrix table (when to use each mode) -- Update all code examples to use memory/persistent/distributed -- Remove all "standalone" references (except migration notes) -- Update environment variable table -- Update validation section for new modes -- Add troubleshooting section (mode selection guidance) - -**New Sections:** -```markdown -## Quick Mode Selection - -| Scenario | Recommended Mode | Why | -|----------|------------------|-----| -| Trying Compozy for first time | memory | Zero setup, instant start | -| Local development | persistent | State preserved between restarts | -| Running tests | memory | Fastest execution | -| CI/CD pipelines | memory | No Docker overhead | -| Production deployment | distributed | Scalability and HA | -| Debugging with state | persistent | Inspect database files | - -## Mode Comparison - -| Feature | Memory | Persistent | Distributed | -|---------|--------|-----------|-------------| -| Startup Time | <1s | <2s | <3s | -| External Dependencies | None | None | PostgreSQL, Redis, Temporal | -| State Persistence | No | Yes | Yes | -| Data Location | RAM | ./.compozy/ | External services | -| Suitable For | Tests, prototyping | Local dev | Production | -| Horizontal Scaling | No | No | Yes | -``` - -**Migration Note**: -```markdown - -The `standalone` mode has been replaced with `memory` (no persistence) and `persistent` (with persistence). - -**Migration**: Replace `mode: standalone` with: -- `mode: memory` if you don't need persistence (tests, quick prototyping) -- `mode: persistent` if you need state between restarts (local development) - -``` - ---- - -### 2. Deployment Documentation - -#### File: `docs/content/docs/deployment/standalone-mode.mdx` -**Action**: RENAME to `memory-mode.mdx` -**Priority**: HIGH - -**Updates:** -- Rename file: `standalone-mode.mdx` → `memory-mode.mdx` -- Update frontmatter title: "Memory Mode Deployment" -- Update frontmatter description: "Zero-dependency deployment with in-memory storage" -- Rewrite content to emphasize ephemeral nature -- Add warning about data loss on restart -- Update all configuration examples -- Add "When to Use" section -- Add "Limitations" section (no persistence, concurrency limits) - -**New Content Structure:** -```markdown -# Memory Mode Deployment - -Memory mode provides the **fastest way to run Compozy** with zero external dependencies. All data is stored in RAM and lost on restart. - -## When to Use Memory Mode - -- ✅ Trying Compozy for the first time -- ✅ Running tests (50-80% faster than distributed mode) -- ✅ CI/CD pipelines -- ✅ Quick prototyping and demos -- ❌ Production deployments (use distributed mode) -- ❌ Development requiring state persistence (use persistent mode) - -## Configuration - -[...] - -## Limitations - -- All data lost on server restart -- Not suitable for production -- Limited by available RAM -- Single-process only (no horizontal scaling) -``` - ---- - -#### File: `docs/content/docs/deployment/persistent-mode.mdx` -**Action**: CREATE NEW -**Priority**: HIGH - -**Content:** -```markdown -# Persistent Mode Deployment - -Persistent mode provides file-based storage while maintaining zero external dependencies. Perfect for local development with state preservation. - -## When to Use Persistent Mode - -- ✅ Local development requiring state between restarts -- ✅ Debugging workflows with database inspection -- ✅ Multi-day development sessions -- ✅ Learning Compozy with persistent examples -- ❌ Production deployments (use distributed mode) -- ❌ Horizontal scaling (use distributed mode) - -## Configuration - -```yaml -mode: persistent - -# Optional: customize data directory -database: - url: ./my-data/compozy.db - -temporal: - standalone: - database_file: ./my-data/temporal.db - -redis: - standalone: - persistence: - enabled: true - dir: ./my-data/redis -``` - -## Data Directory Structure - -``` -.compozy/ -├── compozy.db # Main application database -├── temporal.db # Temporal server database -└── redis/ # Redis persistence - └── dump.rdb -``` - -## Backup and Restore - -[...] - -## Limitations - -- Single-process only -- Limited concurrent writes (SQLite limitation) -- Not suitable for production -- No automatic replication -``` - ---- - -#### File: `docs/content/docs/deployment/distributed-mode.mdx` -**Priority**: MEDIUM (less changes needed) - -**Updates:** -- Add mode comparison callout at top -- Emphasize this is the **production** mode -- Update examples to explicitly show `mode: distributed` -- Add migration path from persistent to distributed -- Keep all existing distributed mode content - -**New Opening Section:** -```markdown -# Distributed Mode Deployment - -Distributed mode is Compozy's **production-ready deployment** option, using external PostgreSQL, Redis, and Temporal services for scalability and high availability. - - -**Need something simpler?** -- **Memory mode**: Zero dependencies, instant start (perfect for trying Compozy) -- **Persistent mode**: File-based storage, still zero external services (local development) -- **Distributed mode**: External services required (production deployments) - - -[... existing content ...] -``` - ---- - -### 3. Quick Start Documentation - -#### File: `docs/content/docs/quick-start/index.mdx` -**Priority**: CRITICAL (first user touchpoint) - -**Updates:** -- Update "Prerequisites" section: Remove Docker, PostgreSQL requirements -- Update installation instructions to emphasize zero dependencies -- Show memory mode in first example -- Add "Next Steps" section guiding to persistent/distributed modes -- Update all code examples to use `mode: memory` explicitly - -**New Prerequisites Section:** -```markdown -## Prerequisites - -- **Go 1.25+** OR download pre-built binary -- **That's it!** No Docker, PostgreSQL, or Redis required - -Compozy's default memory mode requires zero external dependencies. -``` - -**New First Example:** -```markdown -## Your First Compozy Project - -```bash -# Create new project -compozy init my-first-agent - -# Start server (memory mode - instant startup) -cd my-first-agent -compozy start -# Server ready in <1 second! - -# Execute your first workflow -compozy run hello-world -``` - - -**That's it!** Your agent is running with zero external dependencies. - - -## Next Steps - -- **Keep prototyping?** Stay in memory mode -- **Need persistence?** Switch to [persistent mode](/docs/deployment/persistent-mode) -- **Going to production?** Configure [distributed mode](/docs/deployment/distributed-mode) -``` - ---- - -### 4. CLI Help Documentation - -#### File: `cli/help/global-flags.md` -**Priority**: MEDIUM - -**Updates:** -- Update `--mode` flag description -- Show all three modes with use case guidance -- Update default value to "memory" -- Update examples - -**New Content:** -```markdown -## --mode - -**Type:** string -**Default:** `memory` -**Environment:** `COMPOZY_MODE` - -Controls the deployment mode: - -- **memory**: In-memory SQLite, fastest startup, no persistence - - Use for: Tests, quick prototyping, CI/CD - -- **persistent**: File-based SQLite, embedded services, state preserved - - Use for: Local development, debugging - -- **distributed**: External PostgreSQL/Redis/Temporal, production-ready - - Use for: Production deployments, horizontal scaling - -**Examples:** - -```bash -# Use memory mode (default) -compozy start - -# Use persistent mode for local development -compozy start --mode persistent - -# Use distributed mode for production -compozy start --mode distributed -``` -``` - ---- - -### 5. Migration Guide - -#### File: `docs/content/docs/guides/mode-migration-guide.mdx` -**Action**: RENAME from `migrate-standalone-to-distributed.mdx` -**Priority**: HIGH (critical for existing users) - -**New Content:** -```markdown -# Mode Migration Guide - -This guide helps you migrate from the old two-mode system (standalone/distributed) to the new three-mode system (memory/persistent/distributed). - -## Breaking Changes (Alpha) - -- ✅ `mode: distributed` → No changes needed -- ⚠️ `mode: standalone` → Removed, use `memory` or `persistent` -- 🆕 `mode: memory` → New default (in-memory, no persistence) -- 🆕 `mode: persistent` → New mode (file-based, with persistence) - -## Quick Migration - -### Standalone → Memory (No Persistence Needed) - -**Use case**: Tests, CI/CD, quick prototyping - -```yaml -# OLD -mode: standalone -temporal: - standalone: - database_file: :memory: - -# NEW -mode: memory -# Temporal automatically uses in-memory storage -``` - -### Standalone → Persistent (Persistence Needed) - -**Use case**: Local development - -```yaml -# OLD -mode: standalone -temporal: - standalone: - database_file: ./temporal.db - -# NEW -mode: persistent -# Files automatically saved to ./.compozy/ -``` - -## Detailed Migration Steps - -### Step 1: Identify Your Use Case - -[...] - -### Step 2: Update Configuration - -[...] - -### Step 3: Update Environment Variables - -```bash -# OLD -export COMPOZY_MODE=standalone - -# NEW (choose based on use case) -export COMPOZY_MODE=memory # For tests/prototyping -export COMPOZY_MODE=persistent # For local dev -export COMPOZY_MODE=distributed # For production -``` - -### Step 4: Update Docker Compose (If Applicable) - -Memory and persistent modes don't need Docker Compose. - -[...] - -## Troubleshooting - -### Error: "Mode must be 'memory', 'persistent', or 'distributed'" - -Your configuration uses the old `standalone` mode. Update to: -- `memory` if you don't need persistence -- `persistent` if you need state between restarts - -### Data Migration - -[...] -``` - ---- - -### 6. Examples README - -#### File: `examples/README.md` -**Priority**: MEDIUM - -**Updates:** -- List all three mode examples -- Show when to use each example -- Update directory structure - -**New Content:** -```markdown -# Compozy Examples - -## Mode Examples - -### Memory Mode Example -**Location**: `examples/memory-mode/` -**Use Case**: Quick start, tests, CI/CD - -```bash -cd examples/memory-mode -compozy start -# Instant startup, no persistence -``` - -### Persistent Mode Example -**Location**: `examples/persistent-mode/` -**Use Case**: Local development with state - -```bash -cd examples/persistent-mode -compozy start -# State saved to ./.compozy/ -``` - -### Distributed Mode Example -**Location**: `examples/distributed-mode/` -**Use Case**: Production deployment - -```bash -cd examples/distributed-mode -docker-compose up -d # Start external services -compozy start -``` - -[...] -``` - ---- - -### 7. API Documentation (Auto-Generated) - -#### File: `docs/api/openapi.yaml` (Generated) -**Action**: Regenerate from schemas -**Priority**: LOW (auto-generated) - -**Process:** -- Run `make swagger` after schema updates (Task 21.0) -- Verify mode field documentation -- Verify examples use correct modes - ---- - -## Documentation Deliverables - -### Must-Have (MVP) -- [x] `mode-configuration.mdx` updated with three modes -- [x] `memory-mode.mdx` created (renamed from standalone-mode.mdx) -- [x] `persistent-mode.mdx` created (new) -- [x] `distributed-mode.mdx` updated (add comparison) -- [x] `quick-start/index.mdx` updated (memory mode emphasis) -- [x] `mode-migration-guide.mdx` created (breaking change guide) -- [x] `cli/help/global-flags.md` updated - -### Nice-to-Have (Post-MVP) -- [ ] Video tutorial showing mode selection -- [ ] FAQ section on mode selection -- [ ] Performance comparison benchmarks (documented) -- [ ] Blog post announcement - ---- - -## Documentation Testing Checklist - -### Content Validation -- [ ] All code examples run successfully -- [ ] All links resolve (no 404s) -- [ ] All mode references use memory/persistent/distributed -- [ ] No inappropriate "standalone" references (except migration guide) -- [ ] Migration guide tested with real projects - -### SEO and Discoverability -- [ ] Frontmatter metadata accurate -- [ ] Mode comparison table visible in search -- [ ] Quick mode selection guide prominent -- [ ] Migration guide linked from all mode docs - -### User Experience -- [ ] Decision matrix helps users choose mode -- [ ] Error messages referenced in troubleshooting -- [ ] Next steps clear from each mode doc -- [ ] Examples match documentation - ---- - -## Success Criteria - -- [x] All documentation builds without errors -- [x] All code examples work in respective modes -- [x] No "standalone" references (except migration guide) -- [x] Migration guide provides clear path forward -- [x] Quick start emphasizes zero-dependency experience -- [x] Mode comparison table helps users decide -- [x] Documentation search returns mode guides prominently - ---- - -## Implementation Notes - -- Task 14.0 handles deployment documentation -- Task 15.0 handles configuration documentation -- Task 16.0 handles migration guide -- Task 17.0 handles quick start -- Task 18.0 handles CLI help -- Task 26.0 validates all documentation changes - -**Estimated Effort**: 5-6 days (with parallelization to 1 day) diff --git a/tasks/prd-modes/_examples.md b/tasks/prd-modes/_examples.md deleted file mode 100644 index 69428f14..00000000 --- a/tasks/prd-modes/_examples.md +++ /dev/null @@ -1,844 +0,0 @@ -# Examples Plan: Three-Mode Configuration System - -**PRD Reference**: `tasks/prd-modes/_prd.md` -**Tech Spec Reference**: `tasks/prd-modes/_techspec.md` -**Status**: Planning - ---- - -## Examples Strategy - -### Goals -1. Demonstrate all three modes with realistic use cases -2. Show mode-specific configuration patterns -3. Provide copy-paste ready examples for each deployment scenario -4. Validate that all modes work as documented - -### Target Audiences -- **Trial Users**: Memory mode examples (fastest path to value) -- **Local Developers**: Persistent mode examples (development workflow) -- **DevOps Engineers**: Distributed mode examples (production deployment) - ---- - -## Example Directory Structure - -``` -examples/ -├── README.md # Overview and quick links (UPDATE) -├── memory-mode/ # NEW (rename from standalone-mode/) -│ ├── README.md -│ ├── compozy.yaml -│ ├── .env.example -│ ├── src/ -│ │ └── workflows/ -│ │ └── hello-world.ts -│ └── tests/ -│ └── hello-world.test.ts -├── persistent-mode/ # NEW -│ ├── README.md -│ ├── compozy.yaml -│ ├── .env.example -│ ├── .gitignore # Exclude .compozy/ -│ ├── src/ -│ │ └── workflows/ -│ │ └── stateful-agent.ts -│ └── tests/ -│ └── stateful-agent.test.ts -├── distributed-mode/ # UPDATE (formerly examples/basic or distributed) -│ ├── README.md -│ ├── compozy.yaml -│ ├── docker-compose.yaml -│ ├── .env.example -│ ├── src/ -│ │ └── workflows/ -│ │ └── production-agent.ts -│ └── k8s/ # Optional Kubernetes manifests -│ ├── deployment.yaml -│ └── service.yaml -└── advanced/ # Keep existing advanced examples - └── ... -``` - ---- - -## Example Projects - -### 1. Memory Mode Example - -**Directory**: `examples/memory-mode/` -**Purpose**: Demonstrate zero-dependency quickstart -**Priority**: CRITICAL (first impression) - -#### Files to Create/Update - -**`examples/memory-mode/README.md`** -```markdown -# Memory Mode Example - -The fastest way to run Compozy - zero external dependencies, instant startup. - -## Quick Start - -```bash -cd examples/memory-mode -compozy start -``` - -Server ready in <1 second! - -## What This Demonstrates - -- ✅ Zero-dependency deployment -- ✅ In-memory SQLite database -- ✅ Embedded Temporal server -- ✅ Instant startup (<1s) -- ⚠️ All data lost on restart (ephemeral) - -## Use Cases - -- Trying Compozy for the first time -- Running tests (50-80% faster) -- Quick prototyping and demos -- CI/CD pipelines - -## Configuration Highlights - -```yaml -mode: memory # Default, can be omitted - -# All services embedded, no external dependencies -# Database: :memory: -# Cache: in-memory (miniredis) -# Temporal: embedded -``` - -## Running the Example - -```bash -# Start server -compozy start - -# Execute workflow -compozy run hello-world --input '{"name": "World"}' - -# Expected output: -# Hello, World! - -# Stop server (all data lost) -compozy stop -``` - -## Next Steps - -- **Need persistence?** See [persistent-mode example](../persistent-mode/) -- **Going to production?** See [distributed-mode example](../distributed-mode/) -``` - -**`examples/memory-mode/compozy.yaml`** -```yaml -# Memory Mode Example - Zero Dependencies -name: memory-mode-example -version: 0.1.0 -description: Fastest way to run Compozy - no external services required - -# Memory mode (default) -mode: memory - -# Explicit configuration (all defaults) -database: - driver: sqlite - url: ":memory:" - -temporal: - mode: memory - namespace: memory-example - -redis: - mode: memory - # Embedded miniredis, no persistence - -# Agent configuration -agents: - hello-agent: - entrypoint: ./src/workflows/hello-world.ts - tools: - - name: echo - type: builtin - -# Server configuration -server: - host: localhost - port: 8080 - log_level: info -``` - -**`examples/memory-mode/.env.example`** -```bash -# Memory Mode Environment Variables -COMPOZY_MODE=memory -COMPOZY_LOG_LEVEL=info -COMPOZY_SERVER_PORT=8080 - -# No external service configuration needed! -``` - -**`examples/memory-mode/src/workflows/hello-world.ts`** -```typescript -import { defineWorkflow } from '@compozy/sdk' - -export default defineWorkflow({ - name: 'hello-world', - description: 'Simple greeting workflow', - - async execute({ input, tools }) { - const name = input.name || 'World' - const greeting = `Hello, ${name}!` - - await tools.echo({ message: greeting }) - - return { greeting } - } -}) -``` - ---- - -### 2. Persistent Mode Example - -**Directory**: `examples/persistent-mode/` -**Purpose**: Demonstrate local development with state persistence -**Priority**: HIGH (common development scenario) - -#### Files to Create - -**`examples/persistent-mode/README.md`** -```markdown -# Persistent Mode Example - -Local development with state preservation - no external dependencies, but data persists between restarts. - -## Quick Start - -```bash -cd examples/persistent-mode -compozy start -``` - -Server ready in <2 seconds, state saved to `./.compozy/` - -## What This Demonstrates - -- ✅ File-based SQLite database -- ✅ Embedded Temporal with persistence -- ✅ Embedded Redis with BadgerDB persistence -- ✅ State preserved across restarts -- ✅ Still zero external dependencies - -## Use Cases - -- Local development with state -- Multi-day development sessions -- Debugging workflows with database inspection -- Learning Compozy with persistent examples - -## Configuration Highlights - -```yaml -mode: persistent - -# Files automatically saved to ./.compozy/ -# - compozy.db (main database) -# - temporal.db (Temporal database) -# - redis/ (Redis persistence) -``` - -## Running the Example - -```bash -# Start server (first time) -compozy start -# Data directory created: ./.compozy/ - -# Execute stateful workflow -compozy run stateful-agent --input '{"action": "increment"}' -# Counter: 1 - -# Execute again -compozy run stateful-agent --input '{"action": "increment"}' -# Counter: 2 - -# Restart server -compozy restart - -# State preserved! -compozy run stateful-agent --input '{"action": "get"}' -# Counter: 2 (state restored from ./.compozy/) -``` - -## Data Directory Structure - -``` -.compozy/ -├── compozy.db # Main application database -├── temporal.db # Temporal server database -└── redis/ # Redis persistence directory - └── dump.rdb -``` - -## Inspecting State - -```bash -# Inspect main database -sqlite3 ./.compozy/compozy.db "SELECT * FROM workflows;" - -# Inspect Temporal database -sqlite3 ./.compozy/temporal.db "SELECT * FROM executions;" - -# View Redis data -# (Requires Redis CLI with BadgerDB plugin) -``` - -## Next Steps - -- **Don't need persistence?** See [memory-mode example](../memory-mode/) -- **Going to production?** See [distributed-mode example](../distributed-mode/) -``` - -**`examples/persistent-mode/compozy.yaml`** -```yaml -# Persistent Mode Example - Local Development with State -name: persistent-mode-example -version: 0.1.0 -description: Local development with persistence - -# Persistent mode -mode: persistent - -# Optional: customize data directory -# database: -# url: ./my-data/compozy.db - -# Optional: customize Temporal database -# temporal: -# standalone: -# database_file: ./my-data/temporal.db - -# Optional: customize Redis persistence directory -# redis: -# standalone: -# persistence: -# enabled: true -# dir: ./my-data/redis - -# Agent configuration -agents: - stateful-agent: - entrypoint: ./src/workflows/stateful-agent.ts - tools: - - name: counter - type: builtin - -# Server configuration -server: - host: localhost - port: 8080 - log_level: info -``` - -**`examples/persistent-mode/.gitignore`** -```gitignore -# Persistent mode data directory -.compozy/ - -# Node modules -node_modules/ - -# Environment variables -.env -.env.local -``` - -**`examples/persistent-mode/src/workflows/stateful-agent.ts`** -```typescript -import { defineWorkflow } from '@compozy/sdk' - -export default defineWorkflow({ - name: 'stateful-agent', - description: 'Workflow demonstrating state persistence', - - async execute({ input, tools, state }) { - // State persists to ./.compozy/compozy.db - const counter = state.get('counter') || 0 - - if (input.action === 'increment') { - const newValue = counter + 1 - await state.set('counter', newValue) - return { counter: newValue } - } - - if (input.action === 'reset') { - await state.set('counter', 0) - return { counter: 0 } - } - - // Default: get current value - return { counter } - } -}) -``` - ---- - -### 3. Distributed Mode Example - -**Directory**: `examples/distributed-mode/` -**Purpose**: Demonstrate production-ready deployment with external services -**Priority**: MEDIUM (update existing distributed example) - -#### Files to Update - -**`examples/distributed-mode/README.md`** -```markdown -# Distributed Mode Example - -Production-ready deployment with external PostgreSQL, Redis, and Temporal services. - -## Prerequisites - -- Docker & Docker Compose (for local testing) -- OR existing PostgreSQL, Redis, Temporal infrastructure - -## Quick Start (Docker Compose) - -```bash -cd examples/distributed-mode - -# Start external services -docker-compose up -d - -# Wait for services to be ready (30-60 seconds) -docker-compose logs -f - -# Start Compozy -compozy start -``` - -Server ready in <3 seconds, connected to external services. - -## What This Demonstrates - -- ✅ External PostgreSQL database -- ✅ External Redis cache -- ✅ External Temporal server -- ✅ Production-ready configuration -- ✅ Horizontal scaling capability -- ✅ TLS support -- ✅ High availability - -## Use Cases - -- Production deployments -- Horizontal scaling -- High availability setups -- Kubernetes deployments - -## Configuration Highlights - -```yaml -mode: distributed - -database: - driver: postgres - url: postgresql://user:pass@postgres:5432/compozy - -temporal: - mode: remote - host_port: temporal:7233 - namespace: production - -redis: - mode: distributed - distributed: - addr: redis:6379 - tls: - enabled: true -``` - -## Running the Example - -```bash -# Start infrastructure -docker-compose up -d - -# Verify services -docker-compose ps - -# Start Compozy -export COMPOZY_MODE=distributed -export COMPOZY_DATABASE_URL="postgresql://compozy:password@localhost:5432/compozy" -export REDIS_ADDR="localhost:6379" -export TEMPORAL_HOST_PORT="localhost:7233" - -compozy start - -# Execute production workflow -compozy run production-agent --input '{"task": "process-data"}' - -# Scale horizontally (multiple instances) -compozy start --port 8081 # Instance 2 -compozy start --port 8082 # Instance 3 -``` - -## Docker Compose Services - -- **PostgreSQL** (5432): Main application database -- **PostgreSQL** (5433): Temporal database -- **Temporal** (7233): Workflow orchestration -- **Temporal UI** (8080): Workflow monitoring -- **Redis** (6379): Cache and message broker - -## Kubernetes Deployment - -See `k8s/` directory for Kubernetes manifests: - -```bash -kubectl apply -f k8s/ -``` - -## Next Steps - -- **Simpler setup?** See [memory-mode example](../memory-mode/) or [persistent-mode example](../persistent-mode/) -- **Advanced features?** See [advanced examples](../advanced/) -``` - -**`examples/distributed-mode/compozy.yaml`** -```yaml -# Distributed Mode Example - Production Deployment -name: distributed-mode-example -version: 1.0.0 -description: Production-ready deployment with external services - -# Distributed mode -mode: distributed - -# PostgreSQL database -database: - driver: postgres - url: ${COMPOZY_DATABASE_URL} - pool: - max_open_conns: 25 - max_idle_conns: 5 - conn_max_lifetime: 5m - -# External Temporal -temporal: - mode: remote - host_port: ${TEMPORAL_HOST_PORT} - namespace: ${TEMPORAL_NAMESPACE:-production} - tls: - enabled: ${TEMPORAL_TLS_ENABLED:-false} - -# External Redis -redis: - mode: distributed - distributed: - addr: ${REDIS_ADDR} - password: ${REDIS_PASSWORD} - db: ${REDIS_DB:-0} - tls: - enabled: ${REDIS_TLS_ENABLED:-false} - pool: - max_active: 100 - max_idle: 10 - -# Agent configuration -agents: - production-agent: - entrypoint: ./src/workflows/production-agent.ts - tools: - - name: http - type: builtin - - name: database - type: builtin - -# Server configuration -server: - host: 0.0.0.0 - port: ${COMPOZY_SERVER_PORT:-8080} - log_level: ${COMPOZY_LOG_LEVEL:-info} - -# Observability -observability: - metrics: - enabled: true - port: 9090 - tracing: - enabled: true - endpoint: ${OTEL_EXPORTER_OTLP_ENDPOINT} -``` - -**`examples/distributed-mode/docker-compose.yaml`** -```yaml -version: '3.8' - -services: - # Main PostgreSQL database - postgres: - image: postgres:16-alpine - environment: - POSTGRES_DB: compozy - POSTGRES_USER: compozy - POSTGRES_PASSWORD: password - ports: - - "5432:5432" - volumes: - - postgres_data:/var/lib/postgresql/data - healthcheck: - test: ["CMD-SHELL", "pg_isready -U compozy"] - interval: 5s - timeout: 5s - retries: 5 - - # Temporal PostgreSQL database - postgres-temporal: - image: postgres:16-alpine - environment: - POSTGRES_DB: temporal - POSTGRES_USER: temporal - POSTGRES_PASSWORD: temporal - ports: - - "5433:5432" - volumes: - - postgres_temporal_data:/var/lib/postgresql/data - - # Temporal server - temporal: - image: temporalio/auto-setup:latest - depends_on: - postgres-temporal: - condition: service_healthy - environment: - DB: postgresql - DB_PORT: 5432 - POSTGRES_USER: temporal - POSTGRES_PWD: temporal - POSTGRES_SEEDS: postgres-temporal - DYNAMIC_CONFIG_FILE_PATH: config/dynamicconfig/development-sql.yaml - ports: - - "7233:7233" - volumes: - - ./config/temporal:/etc/temporal/config/dynamicconfig - - # Temporal UI - temporal-ui: - image: temporalio/ui:latest - depends_on: - - temporal - environment: - TEMPORAL_ADDRESS: temporal:7233 - ports: - - "8080:8080" - - # Redis - redis: - image: redis:7-alpine - ports: - - "6379:6379" - command: redis-server --appendonly yes - volumes: - - redis_data:/data - healthcheck: - test: ["CMD", "redis-cli", "ping"] - interval: 5s - timeout: 3s - retries: 5 - -volumes: - postgres_data: - postgres_temporal_data: - redis_data: -``` - ---- - -### 4. Examples README Update - -**File**: `examples/README.md` -**Priority**: HIGH (navigation hub) - -**Content:** -```markdown -# Compozy Examples - -Practical examples demonstrating Compozy's three deployment modes. - -## Quick Links - -| Mode | Use Case | Startup Time | External Deps | Example | -|------|----------|--------------|---------------|---------| -| **Memory** | Tests, prototyping | <1s | None | [View](./memory-mode/) | -| **Persistent** | Local development | <2s | None | [View](./persistent-mode/) | -| **Distributed** | Production | <3s | Yes | [View](./distributed-mode/) | - -## Mode Selection Guide - -### 🚀 Memory Mode -**Best for:** First-time users, tests, CI/CD - -- ✅ Zero external dependencies -- ✅ Instant startup -- ✅ Fastest execution -- ⚠️ No persistence (data lost on restart) - -**Quick Start:** -```bash -cd examples/memory-mode -compozy start -``` - -### 💾 Persistent Mode -**Best for:** Local development, debugging - -- ✅ Zero external dependencies -- ✅ State preserved between restarts -- ✅ Inspect database files -- ⚠️ Single-process only - -**Quick Start:** -```bash -cd examples/persistent-mode -compozy start -``` - -### 🏭 Distributed Mode -**Best for:** Production deployments - -- ✅ Horizontal scaling -- ✅ High availability -- ✅ Production-ready -- ⚠️ Requires external PostgreSQL, Redis, Temporal - -**Quick Start:** -```bash -cd examples/distributed-mode -docker-compose up -d -compozy start -``` - -## Running Examples - -Each example is self-contained: - -```bash -# Navigate to example -cd examples/-mode - -# Read the README -cat README.md - -# Start the example -compozy start - -# Execute workflows -compozy run -``` - -## Example Structure - -All examples follow this structure: - -``` --mode/ -├── README.md # Mode-specific guide -├── compozy.yaml # Mode-specific configuration -├── .env.example # Environment variables template -├── src/ -│ └── workflows/ # Example workflows -└── tests/ # Example tests -``` - -## Advanced Examples - -See [advanced/](./advanced/) for more complex scenarios: -- Multi-agent systems -- Custom tools integration -- Observability setup -- Deployment patterns - -## Contributing - -Have an example idea? See [CONTRIBUTING.md](../CONTRIBUTING.md). -``` - ---- - -## Example Validation Checklist - -### Functional Testing -- [ ] Memory mode example runs successfully -- [ ] Persistent mode example runs successfully -- [ ] Distributed mode example runs successfully -- [ ] State persists in persistent mode (verify `./.compozy/` created) -- [ ] State does NOT persist in memory mode -- [ ] Docker compose works for distributed mode -- [ ] All workflows execute without errors - -### Configuration Testing -- [ ] All `compozy.yaml` files are valid -- [ ] Environment variables work correctly -- [ ] Mode defaults are correct -- [ ] No hardcoded values (use env vars) - -### Documentation Testing -- [ ] All README examples run as documented -- [ ] Code snippets are copy-paste ready -- [ ] Links between examples work -- [ ] Troubleshooting sections address common issues - -### User Experience -- [ ] Memory mode is clearly the "quick start" option -- [ ] Persistent mode emphasizes local development -- [ ] Distributed mode emphasizes production -- [ ] Decision matrix helps choose mode -- [ ] Next steps guide users to other modes - ---- - -## Deliverables - -### Must-Have (Task 19.0) -- [x] Rename `standalone-mode/` → `memory-mode/` -- [x] Create `persistent-mode/` example -- [x] Update `distributed-mode/` example -- [x] Update `examples/README.md` -- [x] All examples tested and working - -### Nice-to-Have (Post-MVP) -- [ ] Video walkthrough of each mode -- [ ] Kubernetes manifests for distributed mode -- [ ] Advanced examples (multi-agent, custom tools) -- [ ] Performance comparison benchmarks - ---- - -## Success Criteria - -- [x] All three mode examples exist and work -- [x] Each example README is clear and actionable -- [x] Memory mode example runnable in <5 minutes -- [x] Persistent mode demonstrates state preservation -- [x] Distributed mode demonstrates production patterns -- [x] Examples README provides clear mode selection guidance -- [x] All code examples are copy-paste ready -- [x] No "standalone" references (except migration notes) - ---- - -## Implementation Notes - -- Task 19.0 handles example creation/updates -- Task 23.0 validates all examples work -- Examples should match documentation exactly - -**Estimated Effort**: 1 day (part of Phase 4 parallelization) diff --git a/tasks/prd-modes/_prd.md b/tasks/prd-modes/_prd.md deleted file mode 100644 index 7a897123..00000000 --- a/tasks/prd-modes/_prd.md +++ /dev/null @@ -1,598 +0,0 @@ -# Product Requirements Document: Three-Mode Configuration System - -**Status**: Approved for Implementation -**Version**: 1.0 -**Last Updated**: 2025-01-29 -**Breaking Change**: Yes (acceptable in alpha) - ---- - -## Overview - -Replace Compozy's current two-mode system (standalone/distributed) with a clearer three-mode system that better reflects user intent and deployment scenarios: - -- **memory** (NEW DEFAULT): Zero-dependency, in-memory operation for tests and quick prototyping -- **persistent**: File-based persistence for local development with state preservation -- **distributed**: Production-ready with external PostgreSQL, Redis, and Temporal - -This change addresses the confusion around "standalone" terminology (which implies isolation but requires local services) and dramatically improves the developer experience with instant startup and 50-80% faster tests. - -**Target Users:** -- Developers prototyping AI workflows (memory mode) -- Local development teams (persistent mode) -- DevOps teams deploying to production (distributed mode) - -**Core Value**: "Just works" out of the box with zero external dependencies, while supporting production deployments when needed. - ---- - -## Goals - -### Primary Objectives -1. **Zero-dependency quickstart**: `compozy start` works immediately without Docker, PostgreSQL, or Redis -2. **50-80% faster test suite**: Eliminate testcontainers startup overhead by using in-memory SQLite -3. **Clearer intent-based naming**: Mode names reflect actual use cases (memory/persistent/distributed) -4. **Simplified onboarding**: New users can try Compozy in <1 minute - -### Success Metrics -- Test suite execution time: 3-5 minutes → 45-90 seconds (60-70% improvement) -- Server startup (memory mode): <1 second (from cold start) -- Server startup (persistent mode): <2 seconds (from cold start) -- Time to first workflow execution: <10 seconds (down from 2-3 minutes with Docker) -- Developer satisfaction: "Easy to get started" feedback in user surveys - -### Business Objectives -- Reduce friction in developer adoption (faster onboarding) -- Improve CI/CD pipeline performance (faster tests) -- Maintain production-grade distributed deployment support -- Demonstrate architectural flexibility and thoughtful design - ---- - -## User Stories - -### As a Developer Trying Compozy for the First Time -- I want to run `compozy init` and `compozy start` without installing PostgreSQL or Docker -- So that I can evaluate the framework in under 5 minutes -- **Acceptance**: Memory mode is the default, requires zero external dependencies - -### As a Developer Running Tests -- I want tests to run 50-80% faster than before -- So that I can iterate quickly during development -- **Acceptance**: Test suite uses in-memory SQLite, no Docker containers - -### As a Developer Working on a Feature -- I want my agent workflows to persist between server restarts -- So that I don't lose state during local development -- **Acceptance**: Persistent mode saves SQLite database to `./.compozy/` directory - -### As a DevOps Engineer Deploying to Production -- I want to use external PostgreSQL, Redis, and Temporal services -- So that I can scale horizontally and maintain high availability -- **Acceptance**: Distributed mode supports all external services with proper configuration - -### As a Documentation Reader -- I want clear guidance on which mode to use for my scenario -- So that I can make informed deployment decisions -- **Acceptance**: Documentation explains mode trade-offs with decision matrix - ---- - -## Core Features - -### Feature 1: Memory Mode (Default) - -**What it does:** -- In-memory SQLite database (no persistence) -- Embedded miniredis cache (no persistence) -- Embedded Temporal server (ephemeral) -- Fastest startup and execution - -**Why it's important:** -- Enables instant "try it out" experience -- Eliminates Docker/PostgreSQL as prerequisites -- Dramatically speeds up test suite -- Reduces CI/CD costs (no testcontainers overhead) - -**Functional Requirements:** -1. Memory mode must be the default when `mode` is not specified -2. Server must start in <1 second (cold start) -3. All data lost on server restart (ephemeral by design) -4. No external dependencies required -5. SQLite uses `:memory:` connection string -6. Cache persistence explicitly disabled -7. Temporal uses in-memory SQLite - -### Feature 2: Persistent Mode - -**What it does:** -- File-based SQLite database (persists to `./.compozy/compozy.db`) -- Embedded Redis with BadgerDB persistence (persists to `./.compozy/redis/`) -- Embedded Temporal server with file-based SQLite (persists to `./.compozy/temporal.db`) -- State preserved across restarts - -**Why it's important:** -- Supports local development with persistence -- No external services required -- Predictable state for debugging -- Still zero Docker/PostgreSQL dependency - -**Functional Requirements:** -1. All data persists to `./.compozy/` directory by default -2. Server must start in <2 seconds (cold start) -3. Database file created automatically if missing -4. Redis persistence uses BadgerDB backend -5. Temporal database uses file-based SQLite -6. Graceful shutdown ensures data integrity -7. `.gitignore` should exclude `.compozy/` directory - -### Feature 3: Distributed Mode - -**What it does:** -- External PostgreSQL database (user-managed) -- External Redis cache (user-managed) -- External Temporal server (user-managed) -- Production-ready configuration - -**Why it's important:** -- Horizontal scaling capability -- High availability support -- Separate state management -- Production best practices - -**Functional Requirements:** -1. Requires external PostgreSQL, Redis, Temporal -2. Configuration validation enforces required connection details -3. Startup fails gracefully with actionable errors if services unavailable -4. TLS support for all external connections -5. Connection pooling and retry logic -6. Health checks for all external services -7. Metrics and monitoring integration - -### Feature 4: Mode Selection in Templates - -**What it does:** -- `compozy init` TUI form includes mode selection dropdown -- Generated `compozy.yaml` configures appropriate mode -- Docker compose only generated for distributed mode -- Mode-specific documentation in generated README - -**Why it's important:** -- Users understand mode choices during project setup -- Generated configuration matches deployment intent -- Avoids confusion about infrastructure requirements - -**Functional Requirements:** -1. TUI form has mode dropdown (memory/persistent/distributed) -2. Mode selected during init becomes default in compozy.yaml -3. Docker compose generation conditional on distributed mode -4. README explains chosen mode and how to change it -5. Environment variable examples match selected mode -6. `.gitignore` appropriate for selected mode - ---- - -## User Experience - -### User Personas - -**Persona 1: Trial Developer (Sarah)** -- **Need**: Evaluate Compozy quickly without infrastructure setup -- **Journey**: Downloads binary → runs `compozy init` → chooses memory mode → runs `compozy start` → executes sample workflow -- **Time to value**: <5 minutes -- **Mode**: memory (default) - -**Persona 2: Local Developer (Marcus)** -- **Need**: Develop AI agents with persistent state for debugging -- **Journey**: Initializes project → chooses persistent mode → develops workflows → state preserved between restarts -- **Time to value**: <10 minutes -- **Mode**: persistent - -**Persona 3: DevOps Engineer (Priya)** -- **Need**: Deploy to production with external managed services -- **Journey**: Initializes project → chooses distributed mode → configures PostgreSQL/Redis/Temporal → deploys to Kubernetes -- **Time to value**: 1-2 hours (infrastructure setup time) -- **Mode**: distributed - -### Key User Flows - -**Flow 1: Quick Start (Memory Mode)** -```bash -# Install Compozy -brew install compozy # or download binary - -# Initialize project -compozy init -# TUI form: mode = "memory" (default) - -# Start server -compozy start -# Output: "Server started in 0.8s (memory mode)" - -# Execute workflow -compozy run my-workflow -# Works immediately, no external services needed -``` - -**Flow 2: Local Development (Persistent Mode)** -```bash -# Initialize with persistence -compozy init -# TUI form: mode = "persistent" - -# Start server -compozy start -# Output: "Server started in 1.5s (persistent mode)" -# Output: "Database: ./.compozy/compozy.db" - -# Develop workflows -# ... state persists ... - -# Restart server -compozy restart -# State restored from ./.compozy/ -``` - -**Flow 3: Production Deployment (Distributed Mode)** -```bash -# Initialize for production -compozy init -# TUI form: mode = "distributed" -# TUI form: include Docker = "yes" - -# Configure external services -export COMPOZY_DATABASE_URL="postgresql://..." -export REDIS_ADDR="redis.prod:6379" -export TEMPORAL_HOST_PORT="temporal.prod:7233" - -# Start server -compozy start -# Output: "Server started in 2.3s (distributed mode)" -# Output: "Connected to PostgreSQL" -# Output: "Connected to Redis" -# Output: "Connected to Temporal" -``` - -### UI/UX Considerations - -**TUI Form Design:** -- Mode dropdown appears AFTER template selection, BEFORE Docker toggle -- Default selected: "memory" -- Help text for each mode explains use case -- Visual indicator: 🚀 memory, 💾 persistent, 🏭 distributed - -**Error Messages:** -- Invalid mode: "Mode must be 'memory', 'persistent', or 'distributed'. Did you mean to use 'memory' instead of 'standalone'?" -- Distributed mode missing config: "Distributed mode requires PostgreSQL connection. Set COMPOZY_DATABASE_URL or configure database.url in compozy.yaml" -- pgvector with SQLite: "Vector search (pgvector) requires PostgreSQL. Use 'distributed' mode or remove vector search features." - -**Accessibility:** -- CLI commands provide `--mode` flag for non-interactive use -- Environment variables support all configuration paths -- `compozy config show` displays effective mode and configuration -- Migration guide provides step-by-step instructions - ---- - -## High-Level Technical Constraints - -### Required Integrations -- SQLite driver (modernc.org/sqlite) - in-memory and file-based -- PostgreSQL driver (pgx) - distributed mode -- Miniredis (embedded Redis) - memory/persistent modes -- BadgerDB (Redis persistence backend) - persistent mode -- Temporal embedded server - memory/persistent modes - -### Performance Targets -- Test suite: 50-80% faster than current (3-5 min → 45-90 sec) -- Server startup (memory): <1 second -- Server startup (persistent): <2 seconds -- Server startup (distributed): <3 seconds (network dependent) -- No performance regressions in distributed mode - -### Data Sensitivity -- Memory mode: All data ephemeral, not suitable for sensitive data persistence -- Persistent mode: File-based storage, suitable for local development only -- Distributed mode: Production-grade, supports TLS and encryption at rest - -### Non-Negotiable Requirements -- Breaking change acceptable (alpha version) -- Migration guide MUST be provided -- No backwards compatibility for "standalone" mode name -- All existing distributed mode functionality preserved - ---- - -## Non-Goals (Out of Scope) - -### Explicitly Excluded - -1. **Hybrid mode combinations**: No support for "memory database + distributed cache" mixed configurations -2. **Automatic migration from standalone**: Users must manually update configuration (migration guide provided) -3. **In-place mode switching**: Changing modes requires server restart -4. **Cross-mode data migration**: No automated data export/import between modes -5. **Mode-specific features**: All features work across all modes (except pgvector which requires PostgreSQL) - -### Future Considerations - -1. **Read replicas**: Distributed mode read replica support (future) -2. **Sharding**: Database sharding strategies (future) -3. **Cloud-managed SQLite**: Integration with cloud SQLite providers (future) -4. **Hot reload**: Mode switching without restart (future) - -### Boundaries and Limitations - -- Memory mode data is ALWAYS ephemeral (by design) -- Persistent mode is NOT production-ready (local development only) -- SQLite has concurrency limitations (write serialization) -- Distributed mode requires external service management -- No downgrade path from distributed data to SQLite - ---- - -## Phased Rollout Plan - -### Phase 1: Core Configuration [CRITICAL] ✅ Planned -**Goal**: Configuration system supports three modes - -**Deliverables:** -- Mode constants updated (memory/persistent/distributed) -- Default mode changed to memory -- Configuration validation enforces new modes -- Mode resolution logic handles three modes -- All configuration tests passing - -**Success Criteria:** -- `make test` passes for config package -- `make lint` clean -- Mode selection validated correctly - -**User-Facing Value**: N/A (infrastructure) - ---- - -### Phase 2: Infrastructure Wiring [CRITICAL] ✅ Planned -**Goal**: Runtime correctly activates infrastructure per mode - -**Deliverables:** -- Cache layer mode-aware (memory/persistent/distributed) -- Temporal wiring mode-aware -- Server logging shows correct mode -- Manual runtime validation complete - -**Success Criteria:** -- Server starts successfully in all three modes -- Correct infrastructure activated per mode -- No regressions in distributed mode - -**User-Facing Value**: Backend prepared for new modes - ---- - -### Phase 3: Test Infrastructure [HIGH] ✅ Planned -**Goal**: Test suite 50-80% faster with memory mode - -**Deliverables:** -- Test helpers default to SQLite memory mode -- Integration tests migrated to SQLite (except pgvector tests) -- Golden test files updated -- Performance benchmarked - -**Success Criteria:** -- `make test` 50%+ faster than baseline -- All tests pass -- No flaky tests introduced - -**User-Facing Value**: Faster CI/CD, better developer experience - ---- - -### Phase 4: Documentation [HIGH] ✅ Planned -**Goal**: Users understand mode system and migration path - -**Deliverables:** -- Mode configuration documentation updated -- Deployment guides for all three modes -- Migration guide from standalone to memory/persistent -- Quick start reflects memory mode default -- Examples for each mode - -**Success Criteria:** -- All docs build without errors -- All code examples work -- Migration guide tested with real projects - -**User-Facing Value**: Clear guidance on mode selection and migration - ---- - -### Phase 5: Template System [NEW] 🆕 Proposed -**Goal**: `compozy init` generates mode-appropriate configuration - -**Deliverables:** -- TUI form includes mode selection dropdown -- Generated compozy.yaml reflects selected mode -- Docker compose conditional on distributed mode -- Mode-specific README documentation -- Mode-specific environment variables - -**Success Criteria:** -- All mode selections generate valid configurations -- Generated projects start successfully in chosen mode -- No Docker compose for memory/persistent modes - -**User-Facing Value**: **CRITICAL** - First impression and onboarding experience - ---- - -### Phase 6: Final Validation [CRITICAL] ✅ Planned -**Goal**: Ship-ready system validated end-to-end - -**Deliverables:** -- Comprehensive testing across all modes -- Performance benchmarks documented -- Error messages validated -- Examples tested -- Documentation validated - -**Success Criteria:** -- All tests pass -- Performance targets met -- No regressions -- Documentation accurate - -**User-Facing Value**: Confidence in release quality - ---- - -## Success Metrics - -### User Engagement Metrics -- **Time to first workflow execution**: <10 seconds (memory mode) -- **Developer onboarding completion**: >80% complete quick start guide -- **Mode distribution**: 60% memory, 30% persistent, 10% distributed (estimated) -- **Docker compose generation**: Only for distributed mode users - -### Performance Benchmarks -- **Test suite execution**: 45-90 seconds (down from 3-5 minutes, 60-70% improvement) -- **Server startup (memory)**: <1 second -- **Server startup (persistent)**: <2 seconds -- **Server startup (distributed)**: <3 seconds (network dependent) -- **CI/CD pipeline time**: 40-60% reduction (due to faster tests) - -### Business Impact Indicators -- **GitHub stars increase**: 20-30% within 1 month (easier onboarding) -- **Issue reduction**: 50% fewer "setup issues" (zero dependencies) -- **Community growth**: 30% increase in Discord/community activity -- **Blog mentions**: "Compozy is easiest agentic framework to get started" - -### Quality Attributes -- **Test coverage**: >80% (maintained) -- **Linter warnings**: Zero -- **Breaking change impact**: Documented in migration guide -- **Backward compatibility**: N/A (alpha version, breaking change acceptable) - ---- - -## Risks and Mitigations - -### User Adoption Risks - -**Risk**: Existing users confused by "standalone" removal -- **Severity**: Medium -- **Mitigation**: Clear migration guide with find/replace instructions -- **Mitigation**: Helpful error messages suggesting memory mode -- **Mitigation**: Blog post explaining rationale - -**Risk**: Users don't understand mode differences -- **Severity**: High -- **Mitigation**: Decision matrix in documentation -- **Mitigation**: Mode selection help text in TUI form -- **Mitigation**: Examples for each mode - -### Market Competition Risks - -**Risk**: Other frameworks already have zero-dependency setup -- **Severity**: Low -- **Mitigation**: Compozy's unique value is multi-mode flexibility -- **Mitigation**: Emphasize production-ready distributed mode - -### Resource and Timeline Constraints - -**Risk**: Template system (Phase 5) not originally planned -- **Severity**: High - impacts first impression -- **Mitigation**: Prioritize Phase 5 alongside Phase 4 (parallel work) -- **Mitigation**: Simple implementation (mode dropdown + conditional generation) -- **Estimate**: +1 day to timeline - -**Risk**: 6-7 day estimate may be tight -- **Severity**: Medium -- **Mitigation**: Parallel execution plan (73% time savings) -- **Mitigation**: Clear task dependencies identified -- **Mitigation**: Breaking change acceptable, can iterate post-launch - ---- - -## Open Questions - -### Resolved ✅ -- ~~Should we support mode switching without restart?~~ → No, out of scope -- ~~Should we migrate existing standalone data automatically?~~ → No, manual migration only -- ~~Should we keep "standalone" as an alias?~~ → No, clean break - -### Remaining ❓ - -1. **Template System Priority**: Should Phase 5 (Template System) be mandatory for MVP? - - **Impact**: HIGH - affects first impression and onboarding - - **Recommendation**: Yes, prioritize Phase 5 alongside Phase 4 - -2. **`.compozy/` Directory Location**: Should persistent mode use `./.compozy/` or `~/.compozy/`? - - **Current**: Project-local (`./.compozy/`) - - **Alternative**: User-global (`~/.compozy/`) - - **Recommendation**: Keep project-local, add `--data-dir` flag for customization (future) - -3. **Mode Validation Strictness**: Should we warn on suboptimal configurations (e.g., memory mode in Dockerfile)? - - **Recommendation**: Yes, add warnings in `compozy config diagnostics` - -4. **Migration Tool**: Should we provide `compozy migrate standalone-to-memory` command? - - **Recommendation**: No, manual migration sufficient for alpha (future consideration) - ---- - -## Appendix - -### Research Findings - -**Current Pain Points** (from GitHub issues and Discord): -- "Docker setup too complex for trying out Compozy" -- "Test suite takes forever with testcontainers" -- "Why do I need PostgreSQL just to run Hello World?" -- "Standalone mode still requires Redis, confusing name" - -**Competitive Analysis**: -- **Temporal**: Offers embedded server (Compozy already has this) -- **LangChain**: Zero dependencies for basic use (inspiration) -- **Prefect**: Defaults to SQLite, option for PostgreSQL (similar approach) - -### Design Mockups - -**TUI Form Flow:** -``` -┌─────────────────────────────────────┐ -│ Create New Compozy Project │ -├─────────────────────────────────────┤ -│ Template: [basic ▼] │ -│ │ -│ Mode: [memory ▼] │ ← NEW FIELD -│ 🚀 memory (default) │ -│ 💾 persistent │ -│ 🏭 distributed │ -│ │ -│ Include Docker: [No] │ ← Disabled for memory/persistent -└─────────────────────────────────────┘ -``` - -### Reference Materials - -- Technical Specification: `tasks/prd-modes/_techspec.md` -- Template Analysis: `tasks/prd-modes/TEMPLATE_SYSTEM_ANALYSIS.md` -- Current Mode Config Docs: `docs/content/docs/configuration/mode-configuration.mdx` -- Redis PRD: Reference for persistence patterns - ---- - -## Planning Artifacts (Post-Approval) - -These artifacts support implementation and should be maintained alongside this PRD: - -- ✅ **Tech Spec**: `tasks/prd-modes/_techspec.md` (exists) -- ✅ **Tasks Summary**: `tasks/prd-modes/_tasks.md` (created) -- ✅ **Individual Tasks**: `tasks/prd-modes/_task_*.md` (26 tasks created) -- 🆕 **Docs Plan**: `tasks/prd-modes/_docs.md` (to be created) -- 🆕 **Examples Plan**: `tasks/prd-modes/_examples.md` (to be created) -- 🆕 **Tests Plan**: `tasks/prd-modes/_tests.md` (to be created) - ---- - -**Status**: Ready for Implementation -**Next Steps**: Review Phase 5 (Template System) priority, then begin Phase 1 execution diff --git a/tasks/prd-modes/_task_1.0.md b/tasks/prd-modes/_task_1.0.md deleted file mode 100644 index 5547ac05..00000000 --- a/tasks/prd-modes/_task_1.0.md +++ /dev/null @@ -1,207 +0,0 @@ -# Task 1.0: Update Mode Constants & Defaults - - -Phase 1: Core Configuration -CRITICAL - BLOCKING -Medium -1 day - - ---- - -## Objective - -Update mode constants in `pkg/config/resolver.go` to support three modes (memory/persistent/distributed) and change the default mode from `distributed` to `memory`. - -**Impact**: BLOCKING - All other work depends on this change. - ---- - - -**MANDATORY VALIDATION:** -- Run `go test ./pkg/config -run TestResolveMode` - MUST PASS -- Run `go test ./pkg/config -run TestEffectiveDatabaseDriver` - MUST PASS -- Run `make lint` on pkg/config - MUST BE CLEAN - -**BREAKING CHANGE:** -- Default mode changes from "distributed" to "memory" -- "standalone" mode constant removed (breaking for alpha users) -- All mode references must use new constants - - ---- - - - -### Mode System Changes - -**New Mode Constants:** -- `ModeMemory = "memory"` - In-memory SQLite, fastest -- `ModePersistent = "persistent"` - File-based SQLite -- `ModeDistributed = "distributed"` - PostgreSQL + external services -- `ModeRemoteTemporal = "remote"` - UNCHANGED - -**Default Mode:** -- OLD: `ModeDistributed` -- NEW: `ModeMemory` - -**Rationale**: Zero-dependency quickstart, faster test execution - - - ---- - - -**Implementation Reference**: See `_techspec.md` Section "Phase 1.1: Update Mode Constants" (lines 290-369) - -**Key Files to Understand:** -- `pkg/config/resolver.go` - Mode resolution logic -- `pkg/config/config.go` - Configuration structure -- Redis PRD implementation - Persistence configuration patterns - -**Testing Approach:** -- Unit tests for mode resolution -- Tests for database driver selection -- Tests for temporal mode mapping - - ---- - -## Subtasks - -### 1.1 Replace Mode Constants -**File**: `pkg/config/resolver.go` (lines 6-11) - -- [ ] Remove `ModeStandalone` constant -- [ ] Add `ModeMemory` constant with comment -- [ ] Add `ModePersistent` constant with comment -- [ ] Keep `ModeDistributed` constant (update comment) -- [ ] Keep `ModeRemoteTemporal` constant (unchanged) - -**Reference**: `_techspec.md` lines 297-311 - ---- - -### 1.2 Update Default Mode -**File**: `pkg/config/resolver.go` (line 26) - -- [ ] Change `return ModeDistributed` to `return ModeMemory` -- [ ] Update function docstring (line 18) to reflect new default - -**Reference**: `_techspec.md` lines 314-329 - ---- - -### 1.3 Update EffectiveTemporalMode Logic -**File**: `pkg/config/resolver.go` (lines 36-42) - -- [ ] Update logic to handle `ModeMemory` and `ModePersistent` -- [ ] Both memory and persistent should return embedded mode -- [ ] Only `ModeDistributed` returns `ModeRemoteTemporal` -- [ ] Add comment explaining mode mapping - -**Reference**: `_techspec.md` lines 332-341 - ---- - -### 1.4 Update EffectiveDatabaseDriver Logic -**File**: `pkg/config/resolver.go` (lines 49-65) - -- [ ] Update nil check to return SQLite (changed from Postgres) -- [ ] Check for `ModeMemory || ModePersistent` → return SQLite -- [ ] Check for `ModeDistributed` → return Postgres -- [ ] Default fallback to SQLite -- [ ] Add comprehensive comments - -**Reference**: `_techspec.md` lines 344-362 - ---- - -## Relevant Files - -### Primary Files (Modified) -- `pkg/config/resolver.go` - Mode constants and resolution - -### Dependent Files (Reference Only) -- `pkg/config/config.go` - Will be updated in Task 2.0 -- `pkg/config/definition/schema.go` - Will be updated in Task 3.0 -- `engine/infra/cache/mod.go` - Will be updated in Task 5.0 -- `engine/infra/server/dependencies.go` - Will be updated in Task 6.0 - ---- - -## Deliverables - -1. **Updated Constants** - - `ModeMemory`, `ModePersistent`, `ModeDistributed` constants defined - - `ModeStandalone` constant removed - - Clear comments explaining each mode - -2. **Updated Default** - - Default mode changed to `ModeMemory` - - Docstrings updated - -3. **Updated Mode Resolution** - - `EffectiveTemporalMode()` handles three modes - - `EffectiveDatabaseDriver()` handles three modes - - Logical defaults for each mode - -4. **Tests Pass** - - All existing tests updated and passing - - No lint errors - ---- - -## Tests - -### Unit Tests to Update -**File**: `pkg/config/resolver_test.go` (Task 4.0 will handle this) - -Expected test coverage: -- Mode resolution with new constants -- Database driver selection for each mode -- Temporal mode selection for each mode -- Default mode behavior - -### Validation Commands -```bash -# Must pass before completing task -go test ./pkg/config -run TestResolveMode -v -go test ./pkg/config -run TestEffectiveDatabaseDriver -v -make lint -``` - ---- - -## Success Criteria - -- [x] All mode constants updated (memory/persistent/distributed) -- [x] Default mode changed to `ModeMemory` -- [x] `EffectiveTemporalMode()` logic updated -- [x] `EffectiveDatabaseDriver()` logic updated -- [x] All docstrings and comments updated -- [x] Code compiles without errors -- [x] No lint warnings or errors -- [x] Tests pass (will be comprehensive after Task 4.0) - ---- - -## Dependencies - -**Blocks:** -- Task 2.0 (Configuration Validation) -- Task 3.0 (Configuration Registry) -- Task 4.0 (Configuration Tests) -- ALL Phase 2, 3, 4, 5, 6 tasks - -**Depends On:** -- None (first task in sequence) - ---- - -## Notes - -- This is a BREAKING CHANGE for alpha users -- Old "standalone" mode maps conceptually to new "memory" mode -- Implementation details in `_techspec.md` lines 290-369 -- Keep changes focused - avoid scope creep to other files diff --git a/tasks/prd-modes/_task_10.0.md b/tasks/prd-modes/_task_10.0.md deleted file mode 100644 index fdb3fa19..00000000 --- a/tasks/prd-modes/_task_10.0.md +++ /dev/null @@ -1,124 +0,0 @@ -## status: pending - - -test/helpers -testing -test_infrastructure -medium -database|config|mode_system - - -# Task 10.0: Add Database Mode Helper - -## Overview - -Create a new test helper `SetupDatabaseWithMode` that intelligently selects database backend (SQLite memory/file or PostgreSQL) based on mode configuration, simplifying test setup for mode-aware tests. - - -- **ALWAYS READ** @.cursor/rules/critical-validation.mdc before start -- **ALWAYS READ** the technical docs from this PRD before start (tasks/prd-modes/_techspec.md) -- **YOU SHOULD ALWAYS** have in mind that this is a greenfield approach - no backwards compatibility required -- **MUST** verify Phase 1 (Core Config) is complete before starting - - - -When you need information about mode-based configuration: -- Use perplexity to find patterns for test configuration management -- Reference completed Phase 1 tasks for mode constant definitions - - - -- Create `SetupDatabaseWithMode(t *testing.T, mode string)` helper -- Map mode to appropriate database backend (memory→SQLite :memory:, persistent→SQLite file, distributed→PostgreSQL) -- Use `t.Context()` for proper context inheritance -- Configure database paths appropriately for each mode -- Add comprehensive documentation and examples - - -## Subtasks - -- [ ] 10.1 Create `SetupDatabaseWithMode` function signature -- [ ] 10.2 Implement mode-to-backend mapping logic -- [ ] 10.3 Handle SQLite memory mode configuration -- [ ] 10.4 Handle SQLite persistent mode configuration (temporary file) -- [ ] 10.5 Handle distributed mode configuration (PostgreSQL) -- [ ] 10.6 Add helper documentation with usage examples -- [ ] 10.7 Create example test demonstrating mode switching - -## Implementation Details - -### Objective -Provide a unified test helper that abstracts database setup based on mode, making it easy to test mode-specific behavior without manual configuration. - -### Key Implementation - -**File:** `test/helpers/database.go` - -**New function:** -```go -// SetupDatabaseWithMode configures database based on deployment mode. -// - "memory": SQLite :memory: (fastest, ephemeral) -// - "persistent": SQLite temp file (survives test duration) -// - "distributed": PostgreSQL testcontainer (full features) -func SetupDatabaseWithMode(t *testing.T, mode string) (*sqlx.DB, func()) -``` - -**Mode mapping:** -- `"memory"` → SQLite with `:memory:` path -- `"persistent"` → SQLite with `t.TempDir() + "/compozy.db"` path -- `"distributed"` → Call `SetupPostgresContainer(t)` - -**Return:** -- Configured database connection -- Cleanup function to close connection and remove temp files - -### Usage Example - -```go -func TestWithMemoryMode(t *testing.T) { - db, cleanup := helpers.SetupDatabaseWithMode(t, "memory") - defer cleanup() - - // Test runs with in-memory SQLite -} - -func TestWithDistributedMode(t *testing.T) { - db, cleanup := helpers.SetupDatabaseWithMode(t, "distributed") - defer cleanup() - - // Test runs with PostgreSQL testcontainer -} -``` - -### Relevant Files - -- `test/helpers/database.go` - Primary implementation - -### Dependent Files - -- Phase 1 tasks: `pkg/config/resolver.go` (mode constants) -- Task 9.0: Database helpers - -## Deliverables - -- `SetupDatabaseWithMode` function implementation -- Documentation comments explaining mode mapping -- Example test demonstrating mode switching -- Unit tests for the helper function itself - -## Tests - -- [ ] Unit test: memory mode returns SQLite :memory: connection -- [ ] Unit test: persistent mode returns SQLite file connection with temp directory -- [ ] Unit test: distributed mode returns PostgreSQL connection -- [ ] Integration test: mode switching works correctly across test cases -- [ ] Verify cleanup functions properly close connections and remove temp files -- [ ] Confirm `t.Context()` usage for context inheritance - -## Success Criteria - -- Helper correctly maps all three modes to appropriate backends -- Cleanup functions work properly (no leaked connections or temp files) -- Documentation clearly explains when to use this helper -- Example test demonstrates practical usage -- All unit and integration tests pass diff --git a/tasks/prd-modes/_task_11.0.md b/tasks/prd-modes/_task_11.0.md deleted file mode 100644 index 74b47e1f..00000000 --- a/tasks/prd-modes/_task_11.0.md +++ /dev/null @@ -1,154 +0,0 @@ -## status: pending - - -test/integration -testing -test_migration -high -database|sqlite|pgvector - - -# Task 11.0: Audit & Migrate Integration Tests - -## Overview - -Systematically audit all integration tests to migrate from PostgreSQL testcontainers to SQLite memory mode where appropriate, achieving 50-80% test suite speedup. Tests requiring pgvector or PostgreSQL-specific features remain on PostgreSQL with explicit configuration. - - -- **ALWAYS READ** @.cursor/rules/critical-validation.mdc before start -- **ALWAYS READ** the technical docs from this PRD before start (tasks/prd-modes/_techspec.md) -- **YOU SHOULD ALWAYS** have in mind that this is a greenfield approach - no backwards compatibility required -- **MUST** complete Task 9.0 (Test Helpers) before starting -- **MUST** identify pgvector dependencies before migration - - - -When you need information about SQLite compatibility: -- Use perplexity to find SQLite vs PostgreSQL feature compatibility -- Use context7 to check pgvector usage patterns in Go code - - - -- Audit all integration tests using `GetSharedPostgresDB` or testcontainers -- Migrate tests to SQLite unless they require PostgreSQL-specific features -- Keep pgvector tests on PostgreSQL with explicit `SetupPostgresContainer` -- Add mode configuration to tests that should test mode switching -- Ensure all tests use `t.Context()` instead of `context.Background()` -- Measure and document performance improvements - - -## Subtasks - -- [ ] 11.1 Audit all integration tests for database usage patterns -- [ ] 11.2 Identify tests requiring pgvector (must stay on PostgreSQL) -- [ ] 11.3 Migrate store operation tests to SQLite -- [ ] 11.4 Migrate worker integration tests to SQLite -- [ ] 11.5 Migrate server execution tests to SQLite -- [ ] 11.6 Migrate tool integration tests to SQLite -- [ ] 11.7 Migrate repo tests to SQLite -- [ ] 11.8 Update pgvector tests to use explicit `SetupPostgresContainer` -- [ ] 11.9 Run full test suite and measure performance -- [ ] 11.10 Document migration patterns and exceptions - -## Implementation Details - -### Objective -Convert integration tests from slow PostgreSQL testcontainers to fast SQLite memory mode, while preserving PostgreSQL for tests that genuinely require it (pgvector, PostgreSQL-specific SQL features). - -### Audit Strategy - -**Step 1: Identify test files** -```bash -# Find all integration tests using database -find test/integration -name "*_test.go" | xargs grep -l "GetSharedPostgresDB\|testcontainers" -``` - -**Step 2: Categorize tests** -- **Migrate to SQLite:** Generic database operations, CRUD tests, workflow tests -- **Keep PostgreSQL:** pgvector tests, knowledge/RAG tests, PostgreSQL-specific SQL - -**Step 3: Migration pattern** -```go -// BEFORE: -pool, cleanup := helpers.GetSharedPostgresDB(t) -defer cleanup() - -// AFTER (for SQLite migration): -db, cleanup := helpers.SetupTestDatabase(t, "sqlite") // Now defaults to SQLite -defer cleanup() - -// AFTER (for PostgreSQL retention): -db, cleanup := helpers.SetupPostgresContainer(t) // Explicit PostgreSQL -defer cleanup() -``` - -### Files to Audit and Migrate - -**High-priority migrations (frequent test runs):** -1. `test/integration/store/operations_test.go` → SQLite -2. `test/integration/worker/*/database.go` → SQLite -3. `test/integration/server/executions_integration_test.go` → SQLite -4. `test/integration/tool/helpers.go` → SQLite -5. `test/integration/repo/repo_test_helpers.go` → SQLite - -**Keep on PostgreSQL:** -- Any tests in `test/integration/knowledge/` (pgvector dependency) -- Any tests in `test/integration/rag/` (pgvector dependency) -- Tests explicitly validating PostgreSQL-specific features - -### Performance Measurement - -```bash -# Before migration -time make test > before.log - -# After migration -time make test > after.log - -# Compare results -echo "Before: $(grep 'PASS' before.log | wc -l) tests" -echo "After: $(grep 'PASS' after.log | wc -l) tests" -``` - -### Relevant Files - -- `test/integration/store/operations_test.go` -- `test/integration/worker/*/database.go` -- `test/integration/server/executions_integration_test.go` -- `test/integration/tool/helpers.go` -- `test/integration/repo/repo_test_helpers.go` -- All files in `test/integration/knowledge/` (audit only, no migration) -- All files in `test/integration/rag/` (audit only, no migration) - -### Dependent Files - -- Task 9.0: Updated test helpers -- Task 10.0: Mode-based database helper (optional usage) - -## Deliverables - -- Audit report: list of migrated tests vs PostgreSQL-retained tests -- Migrated integration tests using SQLite by default -- PostgreSQL tests explicitly using `SetupPostgresContainer` -- Performance comparison report (before/after execution time) -- Migration pattern documentation for future reference - -## Tests - -Validation through test execution: - -- [ ] All migrated tests pass with SQLite -- [ ] PostgreSQL-specific tests still pass with explicit container setup -- [ ] No test coverage regression (same number of tests passing) -- [ ] Performance improvement: 50-80% faster test suite execution -- [ ] No Docker containers started for SQLite tests -- [ ] Verify `t.Context()` usage throughout migrated tests - -## Success Criteria - -- 50-80% test suite speedup achieved -- All tests pass with new database backends -- Clear documentation of PostgreSQL vs SQLite test categorization -- pgvector tests explicitly marked and isolated -- No regression in test coverage or quality -- Performance metrics documented and validated diff --git a/tasks/prd-modes/_task_12.0.md b/tasks/prd-modes/_task_12.0.md deleted file mode 100644 index 69702c06..00000000 --- a/tasks/prd-modes/_task_12.0.md +++ /dev/null @@ -1,157 +0,0 @@ -## status: pending - - -test/integration -testing -test_infrastructure -medium -mode_system|temporal|config - - -# Task 12.0: Update Integration Test Helpers - -## Overview - -Update integration test helper files to reference new mode names (memory/persistent/distributed) and ensure proper mode switching test coverage. - - -- **ALWAYS READ** @.cursor/rules/critical-validation.mdc before start -- **ALWAYS READ** the technical docs from this PRD before start (tasks/prd-modes/_techspec.md) -- **YOU SHOULD ALWAYS** have in mind that this is a greenfield approach - no backwards compatibility required -- **MUST** complete Phase 1 (Core Config) before starting -- **MUST** complete Task 11.0 (Integration Test Migration) before starting - - - -When you need information about test organization: -- Use perplexity to find Go integration test best practices -- Reference completed Phase 1 tasks for mode constant usage - - - -- Update `test/integration/standalone/helpers.go` to use "memory" terminology -- Update `test/integration/temporal/mode_switching_test.go` for three modes -- Add test coverage for persistent mode -- Ensure all helpers use `t.Context()` for context inheritance -- Verify mode resolution works correctly in test scenarios - - -## Subtasks - -- [ ] 12.1 Update `test/integration/standalone/helpers.go` mode references -- [ ] 12.2 Rename "standalone" references to "memory" throughout helpers -- [ ] 12.3 Update `mode_switching_test.go` to test all three modes -- [ ] 12.4 Add `TestModeResolver_Persistent` test case -- [ ] 12.5 Update `TestModeResolver_Memory` (renamed from standalone) -- [ ] 12.6 Verify `TestModeResolver_Distributed` still works -- [ ] 12.7 Add integration test for mode inheritance behavior -- [ ] 12.8 Run mode switching tests and verify all pass - -## Implementation Details - -### Objective -Update integration test infrastructure to align with new three-mode system and verify mode resolution logic works correctly. - -### Key Changes - -**File:** `test/integration/standalone/helpers.go` - -1. **Update terminology:** - - Replace "standalone" with "memory" in function names and comments - - Update documentation to reflect new mode system - - Ensure helpers configure memory mode correctly - -2. **Consider renaming directory:** - - Evaluate if `test/integration/standalone/` should be renamed to `test/integration/memory/` - - If renamed, update all import paths - -**File:** `test/integration/temporal/mode_switching_test.go` - -**Existing test structure (needs update):** -```go -func TestModeResolver_Distributed(t *testing.T) { - // ... test distributed mode -} - -func TestModeResolver_Standalone(t *testing.T) { // RENAME - // ... test memory mode -} -``` - -**New test structure:** -```go -func TestModeResolver_Memory(t *testing.T) { - cfg := &config.Config{Mode: "memory"} - - // Verify resolution - assert.Equal(t, "memory", config.ResolveMode(cfg, "")) - assert.Equal(t, "sqlite", cfg.EffectiveDatabaseDriver()) - assert.Equal(t, "memory", cfg.EffectiveTemporalMode()) -} - -func TestModeResolver_Persistent(t *testing.T) { - cfg := &config.Config{Mode: "persistent"} - - // Verify resolution - assert.Equal(t, "persistent", config.ResolveMode(cfg, "")) - assert.Equal(t, "sqlite", cfg.EffectiveDatabaseDriver()) - assert.Equal(t, "persistent", cfg.EffectiveTemporalMode()) -} - -func TestModeResolver_Distributed(t *testing.T) { - cfg := &config.Config{Mode: "distributed"} - - // Verify resolution - assert.Equal(t, "distributed", config.ResolveMode(cfg, "")) - assert.Equal(t, "postgres", cfg.EffectiveDatabaseDriver()) - assert.Equal(t, "remote", cfg.EffectiveTemporalMode()) -} - -func TestModeResolver_Inheritance(t *testing.T) { - cfg := &config.Config{ - Mode: "memory", - Temporal: config.TemporalConfig{ - Mode: "persistent", // Override global - }, - } - - // Verify component override - assert.Equal(t, "memory", config.ResolveMode(cfg, "")) - assert.Equal(t, "persistent", cfg.EffectiveTemporalMode()) -} -``` - -### Relevant Files - -- `test/integration/standalone/helpers.go` -- `test/integration/temporal/mode_switching_test.go` - -### Dependent Files - -- Phase 1: `pkg/config/resolver.go` (mode resolution logic) -- Task 11.0: Migrated integration tests - -## Deliverables - -- Updated helper files with correct mode terminology -- Comprehensive mode switching tests for all three modes -- Mode inheritance test coverage -- Documentation of mode resolution behavior in tests - -## Tests - -- [ ] `TestModeResolver_Memory` passes -- [ ] `TestModeResolver_Persistent` passes -- [ ] `TestModeResolver_Distributed` passes -- [ ] `TestModeResolver_Inheritance` passes (component override) -- [ ] All integration tests using helpers still pass -- [ ] Verify `t.Context()` usage in all test helpers -- [ ] Run `make test` to confirm no regressions - -## Success Criteria - -- All mode switching tests pass -- Helper files use correct mode terminology -- Mode inheritance behavior is tested and verified -- No regressions in existing integration tests -- Documentation clearly explains mode resolution in tests diff --git a/tasks/prd-modes/_task_13.0.md b/tasks/prd-modes/_task_13.0.md deleted file mode 100644 index a11455bc..00000000 --- a/tasks/prd-modes/_task_13.0.md +++ /dev/null @@ -1,155 +0,0 @@ -## status: pending - - -testdata -testing -golden_files -low -cli|config - - -# Task 13.0: Update Golden Test Files - -## Overview - -Regenerate golden test files to reflect new mode names (memory/persistent/distributed) and updated configuration defaults. - - -- **ALWAYS READ** @.cursor/rules/critical-validation.mdc before start -- **ALWAYS READ** the technical docs from this PRD before start (tasks/prd-modes/_techspec.md) -- **YOU SHOULD ALWAYS** have in mind that this is a greenfield approach - no backwards compatibility required -- **MUST** complete Phase 1 (Core Config) and Phase 2 (Infrastructure) before starting - - - -When you need information about golden file testing: -- Use perplexity to find Go golden file testing patterns -- Use context7 to check testify or similar framework documentation - - - -- Identify all golden files containing "standalone" mode references -- Update golden files to use "memory" mode -- Regenerate golden files using `UPDATE_GOLDEN=1` flag -- Verify all CLI config commands pass with new golden files -- Ensure golden files reflect correct default mode (memory) - - -## Subtasks - -- [ ] 13.1 Identify all golden files in `testdata/` directory -- [ ] 13.2 Find golden files containing "standalone" references -- [ ] 13.3 Update `config-diagnostics-standalone.golden` → `config-diagnostics-memory.golden` -- [ ] 13.4 Update `config-show-mixed.golden` mode references -- [ ] 13.5 Update `config-show-standalone.golden` → `config-show-memory.golden` -- [ ] 13.6 Regenerate golden files using `UPDATE_GOLDEN=1` -- [ ] 13.7 Run CLI config tests to verify golden file accuracy -- [ ] 13.8 Update any test code referencing old golden file names - -## Implementation Details - -### Objective -Update golden test files to reflect new mode system and ensure CLI configuration commands produce correct output. - -### Golden Files to Update - -**Location:** `testdata/` directory (typically at project root or under `cli/`) - -**Files identified from techspec:** -1. `testdata/config-diagnostics-standalone.golden` → Rename and update -2. `testdata/config-show-mixed.golden` → Update mode references -3. `testdata/config-show-standalone.golden` → Rename and update - -### Update Process - -**Step 1: Find all golden files** -```bash -find . -name "*.golden" -type f | grep -E "(config|mode|standalone)" -``` - -**Step 2: Manual updates** - -Before regenerating, update mode references: -```yaml -# BEFORE: -mode: standalone - -# AFTER: -mode: memory -``` - -**Step 3: Regenerate golden files** -```bash -# Set environment variable to update golden files -UPDATE_GOLDEN=1 go test ./cli/cmd/config/... -v - -# Verify changes -git diff testdata/ -``` - -**Step 4: Validate** -```bash -# Run tests without UPDATE_GOLDEN to verify matches -go test ./cli/cmd/config/... -v -``` - -### File Rename Operations - -```bash -# Rename golden files to reflect new mode -mv testdata/config-diagnostics-standalone.golden \ - testdata/config-diagnostics-memory.golden - -mv testdata/config-show-standalone.golden \ - testdata/config-show-memory.golden -``` - -### Test Code Updates - -Update test files that reference old golden file names: - -```go -// BEFORE: -goldenFile := "testdata/config-show-standalone.golden" - -// AFTER: -goldenFile := "testdata/config-show-memory.golden" -``` - -### Relevant Files - -- `testdata/config-diagnostics-standalone.golden` → rename to `config-diagnostics-memory.golden` -- `testdata/config-show-mixed.golden` → update mode references -- `testdata/config-show-standalone.golden` → rename to `config-show-memory.golden` -- Test files in `cli/cmd/config/` that reference golden files - -### Dependent Files - -- Phase 1: `pkg/config/resolver.go` (mode defaults) -- Phase 2: Infrastructure changes affecting configuration output - -## Deliverables - -- Renamed golden files with correct mode terminology -- Updated golden file content reflecting new defaults -- Passing CLI config tests with regenerated golden files -- Documentation of golden file regeneration process - -## Tests - -Validation through CLI tests: - -- [ ] Run `go test ./cli/cmd/config/... -v` and verify all pass -- [ ] Check `config diagnostics` command output matches new golden file -- [ ] Check `config show` command output matches updated golden files -- [ ] Verify default mode is "memory" in generated output -- [ ] Confirm mode validation accepts memory/persistent/distributed -- [ ] Check git diff shows expected changes only - -## Success Criteria - -- All CLI config tests pass with new golden files -- Golden files accurately reflect new mode system -- No references to "standalone" mode remain in golden files -- Default mode is "memory" in all generated configuration output -- Regeneration process is documented for future updates diff --git a/tasks/prd-modes/_task_14.0.md b/tasks/prd-modes/_task_14.0.md deleted file mode 100644 index 2d8eed92..00000000 --- a/tasks/prd-modes/_task_14.0.md +++ /dev/null @@ -1,98 +0,0 @@ -## status: pending - - -documentation -documentation -deployment_guides -medium -none - - -# Task 14.0: Update Deployment Documentation - -## Overview - -Rename and update deployment documentation to reflect new three-mode system (memory/persistent/distributed). This involves creating new mode-specific guides and updating existing distributed mode documentation with comparison tables. - - -- **ALWAYS READ** @.cursor/rules/critical-validation.mdc before start -- **ALWAYS READ** the technical docs from this PRD before start (tasks/prd-modes/_techspec.md Phase 4.1) -- **YOU SHOULD ALWAYS** have in mind that this should be done in a greenfield approach, we don't need to care about backwards compatibility since the project is in alpha - - - -- Rename standalone-mode.mdx to memory-mode.mdx with updated content -- Create new persistent-mode.mdx documentation -- Update distributed-mode.mdx with mode comparison table -- All examples must be working and accurate -- Clear use case guidance for each mode -- Consistent structure across all mode documentation - - -## Subtasks - -- [ ] 14.1 Rename and update standalone-mode.mdx to memory-mode.mdx -- [ ] 14.2 Create new persistent-mode.mdx documentation -- [ ] 14.3 Update distributed-mode.mdx with comparison section -- [ ] 14.4 Verify all cross-references between mode docs - -## Implementation Details - -See `tasks/prd-modes/_techspec.md` Section 4.1 for complete implementation details. - -**Key Changes:** - -**Memory Mode (renamed from standalone):** -- Document in-memory SQLite, embedded Temporal, embedded Redis -- Use cases: testing, rapid development, CI/CD pipelines -- Characteristics: instant startup, no persistence, fastest execution -- Limitations: no pgvector support, write concurrency limits - -**Persistent Mode (NEW):** -- Document file-based SQLite with persistence -- Use cases: local development, debugging, small teams -- Default paths: `./.compozy/` directory structure -- Backup and recovery procedures -- Same limitations as memory mode - -**Distributed Mode (updated):** -- Add mode comparison table at top -- Highlight production readiness -- Clear guidance on when to use distributed vs other modes - -### Relevant Files - -- `docs/content/docs/deployment/standalone-mode.mdx` → rename to `memory-mode.mdx` -- `docs/content/docs/deployment/persistent-mode.mdx` (NEW) -- `docs/content/docs/deployment/distributed-mode.mdx` (UPDATE) - -### Dependent Files - -- `docs/content/docs/configuration/mode-configuration.mdx` (Task 15.0) -- `docs/content/docs/guides/mode-migration-guide.mdx` (Task 16.0) - -## Deliverables - -- [ ] `memory-mode.mdx` with updated content and use cases -- [ ] `persistent-mode.mdx` with complete configuration examples -- [ ] `distributed-mode.mdx` with mode comparison table -- [ ] All internal links updated and working -- [ ] Consistent MDX formatting and structure - -## Tests - -Documentation verification (no automated tests): -- [ ] All code examples are syntactically valid YAML -- [ ] All cross-references resolve correctly -- [ ] Mode comparison table is accurate -- [ ] Use case guidance is clear and actionable -- [ ] No references to old "standalone" naming (except in migration context) - -## Success Criteria - -- All three mode documentation pages exist and are complete -- Mode comparison table accurately reflects differences -- Clear guidance on when to use each mode -- Examples work and follow best practices -- No broken internal links -- Consistent structure and formatting across all mode docs diff --git a/tasks/prd-modes/_task_15.0.md b/tasks/prd-modes/_task_15.0.md deleted file mode 100644 index 20912344..00000000 --- a/tasks/prd-modes/_task_15.0.md +++ /dev/null @@ -1,98 +0,0 @@ -## status: pending - - -documentation -documentation -configuration_guides -medium -none - - -# Task 15.0: Update Configuration Documentation - -## Overview - -Update mode configuration documentation to reflect new three-mode system. Document mode resolution order, component override capabilities, and provide clear examples for each mode. - - -- **ALWAYS READ** @.cursor/rules/critical-validation.mdc before start -- **ALWAYS READ** the technical docs from this PRD before start (tasks/prd-modes/_techspec.md Phase 4.2) -- **YOU SHOULD ALWAYS** have in mind that this should be done in a greenfield approach, we don't need to care about backwards compatibility since the project is in alpha - - - -- Update mode-configuration.mdx with new modes (memory/persistent/distributed) -- Document mode resolution order clearly -- Provide component override examples -- Link to mode-specific deployment guides -- All YAML examples must be valid and tested - - -## Subtasks - -- [ ] 15.1 Update mode options section with three modes -- [ ] 15.2 Document mode resolution order (component → global → default) -- [ ] 15.3 Add component override examples -- [ ] 15.4 Link to deployment guides for each mode -- [ ] 15.5 Verify all configuration examples - -## Implementation Details - -See `tasks/prd-modes/_techspec.md` Section 4.2 for complete implementation details. - -**Key Sections to Update:** - -**Mode Options:** -- memory (default): SQLite :memory:, embedded services, zero dependencies -- persistent: SQLite file, embedded services with persistence -- distributed: PostgreSQL, external Temporal, external Redis - -**Resolution Order:** -1. Component-specific mode (if set) -2. Global mode (if set) -3. Default (memory) - -**Component Override Examples:** -- Global memory mode with persistent Temporal -- Mixed mode configurations for hybrid deployments -- Per-component configuration options - -### Relevant Files - -- `docs/content/docs/configuration/mode-configuration.mdx` (PRIMARY) - -### Dependent Files - -- `docs/content/docs/deployment/memory-mode.mdx` (Task 14.0) -- `docs/content/docs/deployment/persistent-mode.mdx` (Task 14.0) -- `docs/content/docs/deployment/distributed-mode.mdx` (Task 14.0) -- `docs/content/docs/examples/memory-mode.mdx` (Task 19.0) -- `docs/content/docs/examples/persistent-mode.mdx` (Task 19.0) -- `docs/content/docs/examples/distributed-mode.mdx` (Task 19.0) - -## Deliverables - -- [ ] Updated `mode-configuration.mdx` with three-mode system -- [ ] Clear mode resolution order documentation -- [ ] Component override examples -- [ ] Working links to deployment and example pages -- [ ] Valid YAML configuration examples - -## Tests - -Documentation verification (no automated tests): -- [ ] All YAML examples are syntactically correct -- [ ] Mode resolution order is clearly explained -- [ ] Component override examples work as documented -- [ ] All internal links resolve correctly -- [ ] Default mode (memory) is clearly stated -- [ ] Examples cover common use cases - -## Success Criteria - -- Configuration documentation accurately reflects new modes -- Mode resolution order is clear and unambiguous -- Component override patterns are well-documented -- All examples are valid and tested -- Links to deployment guides work correctly -- No references to old "standalone" mode (except migration context) diff --git a/tasks/prd-modes/_task_16.0.md b/tasks/prd-modes/_task_16.0.md deleted file mode 100644 index 5306b4fe..00000000 --- a/tasks/prd-modes/_task_16.0.md +++ /dev/null @@ -1,99 +0,0 @@ -## status: pending - - -documentation -documentation -migration_guides -medium -none - - -# Task 16.0: Create Migration Guide - -## Overview - -Create comprehensive migration guide covering transitions between modes and migration from alpha versions. Document common issues, data export/import procedures, and provide troubleshooting guidance. - - -- **ALWAYS READ** @.cursor/rules/critical-validation.mdc before start -- **ALWAYS READ** the technical docs from this PRD before start (tasks/prd-modes/_techspec.md Phase 4.3) -- **YOU SHOULD ALWAYS** have in mind that this should be done in a greenfield approach, we don't need to care about backwards compatibility since the project is in alpha - - - -- Rename existing migration guide to mode-migration-guide.mdx -- Document all migration paths (memory → persistent → distributed) -- Provide alpha version migration instructions (standalone → memory/persistent) -- Include data export/import procedures -- Document common issues and solutions -- Clear step-by-step instructions for each migration path - - -## Subtasks - -- [ ] 16.1 Rename and restructure existing migration guide -- [ ] 16.2 Document alpha version migration (standalone → memory/persistent) -- [ ] 16.3 Add memory → persistent migration path -- [ ] 16.4 Add persistent → distributed migration path -- [ ] 16.5 Document common issues (pgvector, concurrency limits) -- [ ] 16.6 Add data export/import procedures - -## Implementation Details - -See `tasks/prd-modes/_techspec.md` Section 4.3 for complete implementation details. - -**Migration Paths to Document:** - -**Alpha Version Migration:** -- Old `standalone` → New `memory` (for testing/ephemeral) -- Old `standalone` → New `persistent` (for development with persistence) -- Old `distributed` → New `distributed` (no changes) - -**Mode Transitions:** -- **Memory → Persistent**: Add persistence configuration -- **Persistent → Distributed**: Export data, update config, import data - -**Common Issues:** -- pgvector incompatibility with SQLite (solution: use Qdrant/Redis) -- Concurrent workflow limits with SQLite (solution: migrate to distributed) -- Configuration validation errors - -### Relevant Files - -- `docs/content/docs/guides/migrate-standalone-to-distributed.mdx` → rename to `mode-migration-guide.mdx` - -### Dependent Files - -- `docs/content/docs/deployment/memory-mode.mdx` (Task 14.0) -- `docs/content/docs/deployment/persistent-mode.mdx` (Task 14.0) -- `docs/content/docs/deployment/distributed-mode.mdx` (Task 14.0) -- `docs/content/docs/configuration/mode-configuration.mdx` (Task 15.0) - -## Deliverables - -- [ ] Renamed and updated `mode-migration-guide.mdx` -- [ ] Alpha version migration instructions -- [ ] All migration paths documented with examples -- [ ] Data export/import procedures -- [ ] Common issues and troubleshooting section -- [ ] Working code examples for each migration - -## Tests - -Documentation verification (no automated tests): -- [ ] All migration commands are valid and tested -- [ ] YAML examples are syntactically correct -- [ ] Data export/import procedures work -- [ ] Common issues have actionable solutions -- [ ] Migration paths are complete and sequential -- [ ] No broken references to old mode names - -## Success Criteria - -- All migration paths are clearly documented -- Alpha version migration is straightforward -- Data export/import procedures are complete -- Common issues have clear solutions -- Step-by-step instructions are easy to follow -- Examples work and are validated -- No references to outdated mode names (except historical context) diff --git a/tasks/prd-modes/_task_17.0.md b/tasks/prd-modes/_task_17.0.md deleted file mode 100644 index e7a54777..00000000 --- a/tasks/prd-modes/_task_17.0.md +++ /dev/null @@ -1,99 +0,0 @@ -## status: pending - - -documentation -documentation -quick_start_guides -low -none - - -# Task 17.0: Update Quick Start - -## Overview - -Update quick start documentation to reflect memory mode as the new default. Simplify getting started experience by emphasizing zero-dependency setup and provide clear guidance on when to use other modes. - - -- **ALWAYS READ** @.cursor/rules/critical-validation.mdc before start -- **ALWAYS READ** the technical docs from this PRD before start (tasks/prd-modes/_techspec.md Phase 4.4) -- **YOU SHOULD ALWAYS** have in mind that this should be done in a greenfield approach, we don't need to care about backwards compatibility since the project is in alpha - - - -- Update quick start to emphasize memory mode as default -- Simplify getting started steps (no external dependencies) -- Provide clear "next steps" for other modes -- All examples must work with memory mode -- Brief explanation of mode options without overwhelming new users - - -## Subtasks - -- [ ] 17.1 Update installation and first run section -- [ ] 17.2 Emphasize zero-dependency default (memory mode) -- [ ] 17.3 Add brief mode selection guidance -- [ ] 17.4 Update example workflow to work in memory mode -- [ ] 17.5 Add "next steps" section with links to other modes - -## Implementation Details - -See `tasks/prd-modes/_techspec.md` Section 4.4 for complete implementation details. - -**Key Updates:** - -**Getting Started:** -```bash -# Install -brew install compozy - -# Start (default: memory mode, no external deps) -compozy start - -# Your first workflow -compozy workflow run examples/hello-world.yaml -``` - -**Mode Guidance (Brief):** -- **Default mode:** memory (fastest, no persistence) -- **Need persistence?** Add `mode: persistent` to config -- **Production?** Use `mode: distributed` with external services - -Keep quick start focused on getting users running immediately. Defer detailed mode discussions to deployment guides. - -### Relevant Files - -- `docs/content/docs/quick-start/index.mdx` (PRIMARY) - -### Dependent Files - -- `docs/content/docs/deployment/memory-mode.mdx` (Task 14.0) -- `docs/content/docs/deployment/persistent-mode.mdx` (Task 14.0) -- `docs/content/docs/deployment/distributed-mode.mdx` (Task 14.0) - -## Deliverables - -- [ ] Updated `quick-start/index.mdx` with memory mode as default -- [ ] Simplified getting started steps -- [ ] Brief mode selection guidance -- [ ] Working example workflow -- [ ] Clear "next steps" section with mode links - -## Tests - -Documentation verification (no automated tests): -- [ ] Installation commands are correct -- [ ] `compozy start` works without configuration -- [ ] Example workflow runs successfully -- [ ] Links to mode documentation work -- [ ] Quick start doesn't overwhelm with options -- [ ] Clear path from quick start to production deployment - -## Success Criteria - -- Quick start emphasizes simplicity (zero dependencies) -- Memory mode is clearly the default -- Getting started steps work immediately -- Mode selection guidance is brief but helpful -- Clear progression path to other modes -- No confusion about which mode to start with diff --git a/tasks/prd-modes/_task_18.0.md b/tasks/prd-modes/_task_18.0.md deleted file mode 100644 index 082939da..00000000 --- a/tasks/prd-modes/_task_18.0.md +++ /dev/null @@ -1,98 +0,0 @@ -## status: pending - - -documentation -documentation -cli_help -low -none - - -# Task 18.0: Update CLI Help - -## Overview - -Update CLI help documentation and inline help text to reflect new mode system. Ensure --mode flag description accurately describes all three modes and default behavior. - - -- **ALWAYS READ** @.cursor/rules/critical-validation.mdc before start -- **ALWAYS READ** the technical docs from this PRD before start (tasks/prd-modes/_techspec.md Phase 4.5) -- **YOU SHOULD ALWAYS** have in mind that this should be done in a greenfield approach, we don't need to care about backwards compatibility since the project is in alpha - - - -- Update global flags help documentation -- Update inline CLI help for --mode flag -- Ensure environment variable documentation is accurate -- Brief but clear description of each mode -- Default mode (memory) is clearly stated - - -## Subtasks - -- [ ] 18.1 Update cli/help/global-flags.md -- [ ] 18.2 Verify CLI flag help text matches documentation -- [ ] 18.3 Update environment variable documentation (COMPOZY_MODE) -- [ ] 18.4 Ensure consistency across all help outputs - -## Implementation Details - -See `tasks/prd-modes/_techspec.md` Section 4.5 for complete implementation details. - -**Updated --mode Flag Description:** - -```markdown -### --mode - -Deployment mode: memory (default), persistent, or distributed - -- **memory**: In-memory SQLite, embedded services (fastest) -- **persistent**: File-based SQLite, embedded services (local dev) -- **distributed**: PostgreSQL, external services (production) - -**Default:** memory - -**Environment:** COMPOZY_MODE -``` - -**Key Points:** -- Clear one-line summary for each mode -- Default mode explicitly stated -- Use cases in parentheses -- Environment variable documented - -### Relevant Files - -- `cli/help/global-flags.md` (PRIMARY) -- CLI flag definitions (verify inline help matches docs) - -### Dependent Files - -- `pkg/config/resolver.go` (ensure default matches documentation) -- `docs/content/docs/configuration/mode-configuration.mdx` (Task 15.0) - -## Deliverables - -- [ ] Updated `cli/help/global-flags.md` with new mode descriptions -- [ ] Verified inline CLI help matches documentation -- [ ] Updated environment variable documentation -- [ ] Consistent help text across all CLI commands - -## Tests - -Documentation verification (no automated tests): -- [ ] `compozy --help` shows correct mode description -- [ ] `compozy start --help` shows correct mode flag -- [ ] Environment variable (COMPOZY_MODE) is documented -- [ ] Default mode is stated as "memory" -- [ ] All three modes are described -- [ ] Help text is concise and clear - -## Success Criteria - -- CLI help documentation is accurate and complete -- Inline help matches written documentation -- Mode descriptions are brief but informative -- Default mode is clearly stated -- Environment variable usage is documented -- No references to old "standalone" mode diff --git a/tasks/prd-modes/_task_19.0.md b/tasks/prd-modes/_task_19.0.md deleted file mode 100644 index 826aacdf..00000000 --- a/tasks/prd-modes/_task_19.0.md +++ /dev/null @@ -1,131 +0,0 @@ -## status: pending - - -documentation -documentation -examples -medium -none - - -# Task 19.0: Create/Update Examples - -## Overview - -Create or update example configurations for each mode (memory, persistent, distributed). Ensure examples demonstrate common patterns, best practices, and mode-specific features. - - -- **ALWAYS READ** @.cursor/rules/critical-validation.mdc before start -- **ALWAYS READ** the technical docs from this PRD before start (tasks/prd-modes/_techspec.md Phase 4) -- **YOU SHOULD ALWAYS** have in mind that this should be done in a greenfield approach, we don't need to care about backwards compatibility since the project is in alpha - - - -- Create/update example for memory mode -- Create/update example for persistent mode -- Create/update example for distributed mode -- Examples must demonstrate mode-specific features -- All examples must be tested and working -- Clear comments explaining mode-specific configuration - - -## Subtasks - -- [ ] 19.1 Create/update memory mode example -- [ ] 19.2 Create/update persistent mode example -- [ ] 19.3 Create/update distributed mode example -- [ ] 19.4 Add inline comments explaining mode-specific features -- [ ] 19.5 Test all examples to ensure they work -- [ ] 19.6 Update examples index/navigation - -## Implementation Details - -Examples should demonstrate: - -**Memory Mode Example:** -- Minimal configuration (mode can be omitted as it's default) -- Fast startup for testing/development -- No external dependencies -- Typical use case: testing or quick experimentation - -**Persistent Mode Example:** -- File-based storage configuration -- Custom paths for database and Temporal -- Redis persistence settings -- Typical use case: local development with state preservation - -**Distributed Mode Example:** -- External PostgreSQL configuration -- External Temporal cluster -- External Redis cluster -- Production-ready settings -- Typical use case: production deployment - -**Example Structure:** -```yaml -name: example-workflow -mode: [memory|persistent|distributed] # Explicit mode - -# Mode-specific configuration -database: - # ... mode-appropriate settings - -temporal: - # ... mode-appropriate settings - -redis: - # ... mode-appropriate settings - -# Common workflow configuration -models: - - provider: openai - model: gpt-4o-mini - api_key: "${OPENAI_API_KEY}" - -tasks: - # ... example tasks -``` - -### Relevant Files - -- `docs/content/docs/examples/memory-mode.mdx` (CREATE or UPDATE) -- `docs/content/docs/examples/persistent-mode.mdx` (CREATE or UPDATE) -- `docs/content/docs/examples/distributed-mode.mdx` (CREATE or UPDATE) -- `examples/configs/memory-mode.yaml` (CREATE) -- `examples/configs/persistent-mode.yaml` (CREATE) -- `examples/configs/distributed-mode.yaml` (CREATE) - -### Dependent Files - -- `docs/content/docs/configuration/mode-configuration.mdx` (Task 15.0) -- `docs/content/docs/deployment/*.mdx` (Task 14.0) - -## Deliverables - -- [ ] Working memory mode example with documentation -- [ ] Working persistent mode example with documentation -- [ ] Working distributed mode example with documentation -- [ ] Inline comments explaining mode-specific settings -- [ ] Updated examples navigation/index -- [ ] All examples tested and validated - -## Tests - -Documentation verification (no automated tests): -- [ ] Memory mode example runs successfully -- [ ] Persistent mode example runs successfully -- [ ] Distributed mode example runs successfully (with required services) -- [ ] All YAML is syntactically valid -- [ ] Comments are clear and helpful -- [ ] Examples demonstrate mode-specific features -- [ ] Configuration follows best practices - -## Success Criteria - -- Three complete mode examples exist -- Each example demonstrates mode-specific features -- Examples are well-documented with inline comments -- All examples have been tested and work -- Clear use case guidance in each example -- Examples follow configuration best practices -- No references to old "standalone" mode diff --git a/tasks/prd-modes/_task_2.0.md b/tasks/prd-modes/_task_2.0.md deleted file mode 100644 index 14baa4a4..00000000 --- a/tasks/prd-modes/_task_2.0.md +++ /dev/null @@ -1,195 +0,0 @@ -# Task 2.0: Update Configuration Validation - - -Phase 1: Core Configuration -CRITICAL -Medium -1 day - - ---- - -## Objective - -Update configuration struct validation tags and documentation in `pkg/config/config.go` to reflect the new three-mode system (memory/persistent/distributed). - -**Impact**: Ensures configuration validation catches invalid modes and provides clear documentation for developers. - ---- - - -**MANDATORY VALIDATION:** -- Run `go test ./pkg/config -run TestConfigValidation` - MUST PASS -- Run `make lint` on pkg/config - MUST BE CLEAN -- Verify struct tags are correctly formatted - -**BREAKING CHANGE:** -- Mode validation rejects "standalone" (breaking for alpha users) -- Only accepts "memory", "persistent", "distributed" - - ---- - - - -### Configuration Changes - -**Mode Field Validation Tag:** -- OLD: `validate:"omitempty,oneof=standalone distributed"` -- NEW: `validate:"omitempty,oneof=memory persistent distributed"` - -**Mode Field Documentation:** -- Update doc comments to explain all three modes -- Include use case guidance for each mode -- Reference deployment scenarios - -**Constant Cleanup:** -- Remove `mcpProxyModeStandalone` constant (obsolete) -- Keep database driver constants - - - ---- - - -**Implementation Reference**: See `_techspec.md` Section "Phase 1.2: Update Configuration Validation" (lines 371-419) - -**Key Concepts:** -- Go struct tag validation (validator package) -- Koanf configuration binding -- Environment variable mapping -- Documentation comments for Go structs - -**Related Files:** -- `pkg/config/config.go` - Main config struct -- `pkg/config/resolver.go` - Mode resolution (Task 1.0) - - ---- - -## Subtasks - -### 2.1 Update Mode Field Validation -**File**: `pkg/config/config.go` (line 56) - -- [ ] Update `validate` struct tag to accept new modes -- [ ] Change `oneof=standalone distributed` to `oneof=memory persistent distributed` -- [ ] Verify all other struct tags remain unchanged (koanf, env, json, yaml, mapstructure) - -**Reference**: `_techspec.md` lines 375-381 - ---- - -### 2.2 Update Mode Documentation -**File**: `pkg/config/config.go` (lines 52-55) - -- [ ] Update Mode field doc comment -- [ ] Explain "memory" mode (default, in-memory, fastest) -- [ ] Explain "persistent" mode (file-based, local dev) -- [ ] Explain "distributed" mode (production, external services) - -**Reference**: `_techspec.md` lines 384-397 - ---- - -### 2.3 Clean Up Obsolete Constants -**File**: `pkg/config/config.go` (line 17) - -- [ ] Remove `mcpProxyModeStandalone = "standalone"` constant -- [ ] Keep `databaseDriverPostgres` constant -- [ ] Keep `databaseDriverSQLite` constant -- [ ] Verify no other code references the removed constant - -**Reference**: `_techspec.md` lines 400-413 - ---- - -## Relevant Files - -### Primary Files (Modified) -- `pkg/config/config.go` - Configuration struct and validation - -### Dependent Files (Reference Only) -- `pkg/config/resolver.go` - Mode constants (updated in Task 1.0) -- `pkg/config/definition/schema.go` - Will be updated in Task 3.0 - -### Files to Search (Verify No References) -- Grep codebase for `mcpProxyModeStandalone` usage before removing - ---- - -## Deliverables - -1. **Updated Validation Tags** - - Mode field validates against memory/persistent/distributed - - Rejects "standalone" mode - - All struct tags properly formatted - -2. **Updated Documentation** - - Clear mode field documentation - - Use case guidance for each mode - - Deployment scenario references - -3. **Cleaned Constants** - - Obsolete `mcpProxyModeStandalone` removed - - No dangling references in codebase - -4. **Tests Pass** - - Configuration validation tests pass - - No lint errors - ---- - -## Tests - -### Unit Tests to Verify -**File**: `pkg/config/config_test.go` (Task 4.0 will update these) - -Expected validation behavior: -- "memory" mode validates successfully -- "persistent" mode validates successfully -- "distributed" mode validates successfully -- "standalone" mode fails validation -- Invalid modes fail validation - -### Validation Commands -```bash -# Must pass before completing task -go test ./pkg/config -run TestConfigValidation -v -make lint - -# Verify no references to removed constant -grep -r "mcpProxyModeStandalone" . --include="*.go" -# Should return no results (except in git history) -``` - ---- - -## Success Criteria - -- [x] Mode field validation tag updated -- [x] Mode field documentation updated -- [x] Obsolete constant removed -- [x] No dangling references to removed constant -- [x] Code compiles without errors -- [x] No lint warnings or errors -- [x] Validation tests will pass (comprehensive after Task 4.0) - ---- - -## Dependencies - -**Blocks:** -- Task 4.0 (Configuration Tests) - needs updated validation - -**Depends On:** -- Task 1.0 (Mode Constants) - needs new mode constant definitions - ---- - -## Notes - -- Validation changes are BREAKING for alpha users -- Clear error messages important for migration experience -- Implementation details in `_techspec.md` lines 371-419 -- Keep changes focused on config.go only diff --git a/tasks/prd-modes/_task_20.0.md b/tasks/prd-modes/_task_20.0.md deleted file mode 100644 index 4adeacfb..00000000 --- a/tasks/prd-modes/_task_20.0.md +++ /dev/null @@ -1,95 +0,0 @@ -## status: pending - - -schemas -schema_update -configuration -low -none - - -# Task 20.0: Update JSON Schemas - -## Overview - -Update JSON schemas (`config.json` and `compozy.json`) to reflect the new three-mode system (memory/persistent/distributed), replacing references to the old standalone mode. - - -- **ALWAYS READ** @.cursor/rules/critical-validation.mdc before start -- **ALWAYS READ** the technical docs from `_techspec.md` Phase 5.1 before start -- **YOU SHOULD ALWAYS** have in mind that this should be done in a greenfield approach, we don't need to care about backwards compatibility since the project is in alpha - - - -# When you need information about JSON Schema: -- use perplexity and context7 to find out how to properly define enum types and defaults -- validate against JSON Schema specification standards - - - -- Update mode enums to: ["memory", "persistent", "distributed"] -- Change default mode from "distributed" to "memory" -- Update all mode-related descriptions and help text -- Update component-level mode fields (temporal.mode, redis.mode) -- Ensure schema validation passes for all example configs - - -## Subtasks - -- [ ] 20.1 Update mode enum and default in `schemas/config.json` -- [ ] 20.2 Update mode enum and component modes in `schemas/compozy.json` -- [ ] 20.3 Update mode descriptions and help text in both schemas -- [ ] 20.4 Validate schemas against example configs - -## Implementation Details - -See `_techspec.md` Phase 5.1 for complete implementation details. - -### Key Changes - -**schemas/config.json:** -- Update mode enum: `["memory", "persistent", "distributed"]` -- Update default: `"memory"` -- Update description to explain each mode's purpose - -**schemas/compozy.json:** -- Update root-level mode enum and default -- Update temporal.mode enum: `["memory", "persistent", "remote"]` -- Update redis.mode enum: `["memory", "persistent", "distributed"]` -- Add inheritance description (empty = inherit from global) - -### Relevant Files - -- `schemas/config.json` -- `schemas/compozy.json` - -### Dependent Files - -- `examples/memory-mode/compozy.yaml` -- `examples/persistent-mode/compozy.yaml` -- `examples/distributed-mode/compozy.yaml` - -## Deliverables - -- Updated `schemas/config.json` with new mode system -- Updated `schemas/compozy.json` with new mode system -- Schema validation passes for all example configs -- IDE autocomplete shows correct mode options - -## Tests - -- Schema validation tests: - - [ ] Validate memory mode config against schema - - [ ] Validate persistent mode config against schema - - [ ] Validate distributed mode config against schema - - [ ] Validate component mode override configs - - [ ] Reject invalid mode values (e.g., "standalone") - - [ ] Validate mode inheritance (component inherits from global) - -## Success Criteria - -- All JSON schemas updated with new mode names -- Schema validation passes for all example configs -- Default mode is "memory" in schemas -- IDE autocomplete/validation works correctly -- No references to "standalone" mode in schemas diff --git a/tasks/prd-modes/_task_21.0.md b/tasks/prd-modes/_task_21.0.md deleted file mode 100644 index ce178b6b..00000000 --- a/tasks/prd-modes/_task_21.0.md +++ /dev/null @@ -1,102 +0,0 @@ -## status: pending - - -tooling -code_generation -metadata -low -schemas - - -# Task 21.0: Regenerate Generated Files - -## Overview - -Regenerate auto-generated files (Swagger docs, golden test files, schema-generated code) to reflect the new three-mode system. - - -- **ALWAYS READ** @.cursor/rules/critical-validation.mdc before start -- **ALWAYS READ** the technical docs from `_techspec.md` Phase 5.2 before start -- **DEPENDENCIES:** This task depends on Task 20.0 (Update JSON Schemas) being completed - - - -# When you need information about code generation: -- use perplexity to find out how Swagger generation works in Go projects -- check project Makefile for generation commands - - - -- Regenerate Swagger/OpenAPI documentation -- Regenerate golden test files with new mode names -- Regenerate any schema-generated code -- Verify all generated files are consistent with new mode system - - -## Subtasks - -- [ ] 21.1 Regenerate Swagger documentation (`make swagger`) -- [ ] 21.2 Regenerate golden test files (`UPDATE_GOLDEN=1`) -- [ ] 21.3 Regenerate schema-generated code (if applicable) -- [ ] 21.4 Verify all generated files are correct - -## Implementation Details - -See `_techspec.md` Phase 5.2 for complete implementation details. - -### Generation Commands - -**Swagger docs:** -```bash -make swagger -``` - -**Golden files:** -```bash -UPDATE_GOLDEN=1 go test ./cli/cmd/config/... -``` - -**Schema-generated code (if applicable):** -```bash -go run pkg/schemagen/main.go # If this exists -``` - -### Relevant Files - -**Generated files to update:** -- Swagger/OpenAPI documentation -- `testdata/config-diagnostics-standalone.golden` → update to memory -- `testdata/config-show-mixed.golden` → update mode references -- `testdata/config-show-standalone.golden` → update to memory -- Any schema-generated code files - -### Dependent Files - -- `schemas/config.json` (Task 20.0) -- `schemas/compozy.json` (Task 20.0) -- `pkg/config/config.go` -- `pkg/config/resolver.go` - -## Deliverables - -- Regenerated Swagger documentation with new modes -- Updated golden test files with memory/persistent/distributed -- Regenerated schema code (if applicable) -- All generated files pass validation - -## Tests - -- Generated file validation: - - [ ] Swagger docs contain correct mode enums - - [ ] Golden files contain updated mode names - - [ ] Golden test comparisons pass - - [ ] No "standalone" references in generated files - - [ ] All config tests pass with updated golden files - -## Success Criteria - -- All auto-generated files reflect new mode system -- Golden test files updated and passing -- Swagger documentation shows correct modes -- No "standalone" references in generated files -- All generation commands execute successfully diff --git a/tasks/prd-modes/_task_22.0.md b/tasks/prd-modes/_task_22.0.md deleted file mode 100644 index 168f36b9..00000000 --- a/tasks/prd-modes/_task_22.0.md +++ /dev/null @@ -1,125 +0,0 @@ -## status: pending - - -testing -integration -full_system -high -all_previous_phases - - -# Task 22.0: Comprehensive Testing - -## Overview - -Execute full test suite validation across all three modes, verify performance improvements, and ensure zero regressions. This is a critical validation gate before ship. - - -- **ALWAYS READ** @.cursor/rules/critical-validation.mdc before start -- **ALWAYS READ** the technical docs from `_techspec.md` Phase 6.1 before start -- **DEPENDENCIES:** All previous tasks (1.0-21.0) must be completed -- **BLOCKING:** This is a CRITICAL validation gate - must pass before ship - - - -# When you need information about testing: -- use perplexity to find out about Go testing best practices -- check existing test patterns in test/helpers/ - - - -- Full test suite passes (`make test`) -- Linter passes with zero warnings (`make lint`) -- Performance improvement of 50%+ in test execution time -- All three modes tested individually -- No regressions in any mode - - -## Subtasks - -- [ ] 22.1 Clean build and full test suite execution -- [ ] 22.2 Linter validation (zero warnings) -- [ ] 22.3 Memory mode testing (default behavior) -- [ ] 22.4 Persistent mode testing (with state persistence) -- [ ] 22.5 Distributed mode testing (no regressions) -- [ ] 22.6 Performance benchmarking and validation - -## Implementation Details - -See `_techspec.md` Phase 6.1 for complete implementation details. - -### Test Commands - -**Full test suite:** -```bash -make clean -make build -make test # Expected: All pass, 50%+ faster -``` - -**Linter:** -```bash -make lint # Expected: Zero warnings -``` - -**Mode-specific testing:** -```bash -# Memory mode (default) -compozy start -compozy workflow run examples/hello-world.yaml - -# Persistent mode -compozy start --mode persistent -# Restart to verify persistence -compozy start --mode persistent - -# Distributed mode -docker-compose up -d postgres redis temporal -compozy start --mode distributed -``` - -### Relevant Files - -**Test infrastructure:** -- `test/helpers/standalone.go` -- `test/helpers/database.go` -- `test/integration/*/` - -**Core files to validate:** -- `pkg/config/resolver.go` -- `engine/infra/cache/mod.go` -- `engine/infra/server/dependencies.go` - -### Dependent Files - -All files from Tasks 1.0-21.0 - -## Deliverables - -- Full test suite passing -- Linter clean (zero warnings) -- Performance benchmark report showing 50%+ improvement -- All three modes validated and working -- Test execution time documented (before/after) - -## Tests - -- Comprehensive validation: - - [ ] Full test suite passes (`make test`) - - [ ] No test failures or flaky tests - - [ ] Linter clean (`make lint`) - - [ ] Memory mode: server starts and executes workflows - - [ ] Persistent mode: state persists across restarts - - [ ] Distributed mode: connects to external services - - [ ] Performance: test suite 50%+ faster than baseline - - [ ] No regressions in code coverage - - [ ] All pgvector tests explicitly use distributed mode - -## Success Criteria - -- ✅ All tests pass (`make test`) -- ✅ Linter clean (`make lint`) -- ✅ Test suite 50%+ faster (baseline: 3-5 min → target: 45-90 sec) -- ✅ All three modes work correctly -- ✅ No regressions in distributed mode -- ✅ Code coverage >80% for new code diff --git a/tasks/prd-modes/_task_23.0.md b/tasks/prd-modes/_task_23.0.md deleted file mode 100644 index bcf2b575..00000000 --- a/tasks/prd-modes/_task_23.0.md +++ /dev/null @@ -1,123 +0,0 @@ -## status: pending - - -examples -validation -user_documentation -medium -all_previous_phases - - -# Task 23.0: Validate Examples - -## Overview - -Test all example configurations in each mode to ensure they work correctly and demonstrate proper usage patterns for users. - - -- **ALWAYS READ** @.cursor/rules/critical-validation.mdc before start -- **ALWAYS READ** the technical docs from `_techspec.md` Phase 6.2 before start -- **DEPENDENCIES:** Tasks 1.0-21.0 must be completed - - - -# When you need information about example validation: -- check existing example patterns in examples/ -- verify example configs are complete and runnable - - - -- Memory mode example works and starts instantly -- Persistent mode example creates .compozy/ directory structure -- Distributed mode example connects to external services -- All examples are complete and runnable -- Example READMEs are clear and accurate - - -## Subtasks - -- [ ] 23.1 Test memory mode example -- [ ] 23.2 Test persistent mode example -- [ ] 23.3 Test distributed mode example -- [ ] 23.4 Verify example directory structure -- [ ] 23.5 Validate example documentation - -## Implementation Details - -See `_techspec.md` Phase 6.2 for complete implementation details. - -### Example Testing - -**Memory mode:** -```bash -cd examples/memory-mode -compozy start -# Expected: Instant startup, no .compozy/ directory -``` - -**Persistent mode:** -```bash -cd examples/persistent-mode -compozy start -ls -la .compozy/ -# Expected: compozy.db, temporal.db, redis/ created -# Restart test -compozy stop -compozy start -# Expected: Previous state persists -``` - -**Distributed mode:** -```bash -cd examples/distributed-mode -docker-compose up -d -compozy start -# Expected: Connects to postgres, redis, temporal -``` - -### Relevant Files - -**Example directories:** -- `examples/memory-mode/` (renamed from standalone) -- `examples/persistent-mode/` (new) -- `examples/distributed-mode/` (updated) -- `examples/README.md` - -**Example configs:** -- `examples/memory-mode/compozy.yaml` -- `examples/persistent-mode/compozy.yaml` -- `examples/distributed-mode/compozy.yaml` - -### Dependent Files - -- `pkg/config/resolver.go` -- `engine/infra/server/server.go` - -## Deliverables - -- All example configs tested and working -- Memory mode example demonstrates instant startup -- Persistent mode example shows file structure creation -- Distributed mode example connects to external services -- Example README files are accurate and complete - -## Tests - -- Example validation: - - [ ] Memory mode example starts instantly - - [ ] Memory mode example runs workflows successfully - - [ ] Persistent mode example creates .compozy/ directory - - [ ] Persistent mode example persists state across restarts - - [ ] Distributed mode example connects to external services - - [ ] All example configs are valid YAML - - [ ] Example READMEs are clear and complete - - [ ] Examples demonstrate best practices - -## Success Criteria - -- ✅ All examples work in their respective modes -- ✅ Memory mode: instant startup, no persistence -- ✅ Persistent mode: .compozy/ directory created with db files -- ✅ Distributed mode: successful external service connections -- ✅ Example documentation is clear and accurate -- ✅ Examples demonstrate proper mode usage patterns diff --git a/tasks/prd-modes/_task_24.0.md b/tasks/prd-modes/_task_24.0.md deleted file mode 100644 index 830674c6..00000000 --- a/tasks/prd-modes/_task_24.0.md +++ /dev/null @@ -1,118 +0,0 @@ -## status: pending - - -performance -benchmarking -system_performance -medium -all_previous_phases - - -# Task 24.0: Performance Benchmarking - -## Overview - -Measure and validate performance improvements across all modes, particularly focusing on test suite execution speed and server startup times. - - -- **ALWAYS READ** @.cursor/rules/critical-validation.mdc before start -- **ALWAYS READ** the technical docs from `_techspec.md` Phase 6.3 before start -- **DEPENDENCIES:** Tasks 1.0-22.0 must be completed -- **TARGET:** 50-80% improvement in test suite execution time - - - -# When you need information about benchmarking: -- use perplexity to find out about Go benchmarking best practices -- check how to measure startup times accurately - - - -- Test suite 50%+ faster than baseline (3-5 min → 45-90 sec) -- Memory mode startup <1 second -- Persistent mode startup <2 seconds -- Distributed mode startup 5-15 seconds (external connections) -- Document all performance metrics - - -## Subtasks - -- [ ] 24.1 Benchmark test suite execution time (before/after) -- [ ] 24.2 Measure memory mode server startup time -- [ ] 24.3 Measure persistent mode server startup time -- [ ] 24.4 Measure distributed mode server startup time -- [ ] 24.5 Document performance improvements -- [ ] 24.6 Verify 50%+ improvement target met - -## Implementation Details - -See `_techspec.md` Phase 6.3 for complete implementation details. - -### Benchmarking Commands - -**Test suite performance:** -```bash -# Baseline (if available - with testcontainers) -time make test -# Expected baseline: 2-5 minutes - -# Current (with SQLite memory mode) -time make test -# Target: 30-90 seconds (50-80% faster) -``` - -**Server startup benchmarks:** -```bash -# Memory mode -time compozy start --timeout 10s -# Target: <1 second - -# Persistent mode -time compozy start --mode persistent --timeout 10s -# Target: <2 seconds - -# Distributed mode -time compozy start --mode distributed --timeout 30s -# Target: 5-15 seconds -``` - -### Relevant Files - -**Performance-critical code:** -- `engine/infra/server/server.go` -- `engine/infra/server/dependencies.go` -- `engine/infra/cache/mod.go` -- `test/helpers/database.go` - -### Dependent Files - -All test infrastructure from Tasks 3.1-3.5 - -## Deliverables - -- Performance benchmark report with before/after metrics -- Test suite execution time comparison -- Server startup time measurements for all modes -- Verification that 50%+ improvement target is met -- Performance documentation for users - -## Tests - -- Performance validation: - - [ ] Test suite execution time measured and documented - - [ ] 50%+ improvement in test suite speed achieved - - [ ] Memory mode startup <1 second - - [ ] Persistent mode startup <2 seconds - - [ ] Distributed mode startup 5-15 seconds - - [ ] No performance regressions in distributed mode - - [ ] Memory usage is reasonable in all modes - - [ ] Database query performance is acceptable - -## Success Criteria - -- ✅ Test suite 50-80% faster than baseline -- ✅ Memory mode: <1s startup -- ✅ Persistent mode: <2s startup -- ✅ Distributed mode: 5-15s startup (external services) -- ✅ Performance metrics documented -- ✅ No performance regressions in any mode diff --git a/tasks/prd-modes/_task_25.0.md b/tasks/prd-modes/_task_25.0.md deleted file mode 100644 index 210bdb0f..00000000 --- a/tasks/prd-modes/_task_25.0.md +++ /dev/null @@ -1,143 +0,0 @@ -## status: pending - - -error_handling -validation -user_experience -low -all_previous_phases - - -# Task 25.0: Error Message Validation - -## Overview - -Validate that all error messages are helpful, clear, and guide users toward correct configuration. Test error scenarios to ensure quality user experience. - - -- **ALWAYS READ** @.cursor/rules/critical-validation.mdc before start -- **ALWAYS READ** the technical docs from `_techspec.md` Phase 6.4 before start -- **DEPENDENCIES:** Tasks 1.0-23.0 must be completed - - - -# When you need information about error handling: -- check existing error patterns in the codebase -- verify error messages follow Go best practices - - - -- Invalid mode errors show valid options -- pgvector + SQLite error provides clear guidance -- SQLite concurrency warnings are informative -- Migration hints for "standalone" mode users -- All error messages are actionable - - -## Subtasks - -- [ ] 25.1 Test invalid mode error message -- [ ] 25.2 Test pgvector + SQLite incompatibility error -- [ ] 25.3 Test SQLite concurrency warning -- [ ] 25.4 Test "standalone" migration hint -- [ ] 25.5 Verify all error messages are clear and actionable - -## Implementation Details - -See `_techspec.md` Phase 6.4 for complete implementation details. - -### Error Scenarios to Test - -**Invalid mode:** -```bash -compozy start --mode invalid -# Expected error: -# Error: invalid mode "invalid". Valid modes: memory, persistent, distributed -``` - -**pgvector + SQLite:** -```bash -cat > test-config.yaml < test-config.yaml < test-config.yaml < -documentation -validation -user_documentation -low -all_previous_phases - - -# Task 26.0: Documentation Validation - -## Overview - -Final validation of all documentation to ensure accuracy, completeness, and quality. Verify all code examples work and all links are valid. - - -- **ALWAYS READ** @.cursor/rules/critical-validation.mdc before start -- **ALWAYS READ** the technical docs from `_techspec.md` Phase 6.5 before start -- **DEPENDENCIES:** All previous tasks (1.0-25.0) must be completed -- **BLOCKING:** This is the final validation gate before ship - - - -# When you need information about documentation validation: -- check if documentation site has automated link checking -- verify code examples are complete and runnable - - - -- All documentation updated with new mode names -- No broken links in documentation -- All code examples are valid and tested -- Migration guide is complete and accurate -- Documentation is clear and user-friendly - - -## Subtasks - -- [ ] 26.1 Check for broken links in documentation -- [ ] 26.2 Validate all code examples work -- [ ] 26.3 Verify no "standalone" references remain (except historical) -- [ ] 26.4 Review migration guide completeness -- [ ] 26.5 Validate CLI help text accuracy - -## Implementation Details - -See `_techspec.md` Phase 6.5 for complete implementation details. - -### Validation Commands - -**Check broken links:** -```bash -cd docs -npm run lint:links # If available -``` - -**Validate code examples:** -```bash -npm run test:examples # If available -# Or manually test each example -``` - -**Find remaining "standalone" references:** -```bash -grep -r "standalone" docs/ examples/ README.md --exclude-dir=.git -# Should only show historical context or migration guides -``` - -### Relevant Files - -**Documentation files:** -- `docs/content/docs/deployment/memory-mode.mdx` -- `docs/content/docs/deployment/persistent-mode.mdx` -- `docs/content/docs/deployment/distributed-mode.mdx` -- `docs/content/docs/configuration/mode-configuration.mdx` -- `docs/content/docs/guides/mode-migration-guide.mdx` -- `docs/content/docs/quick-start/index.mdx` -- `cli/help/global-flags.md` - -**Example files:** -- `examples/memory-mode/` -- `examples/persistent-mode/` -- `examples/distributed-mode/` -- `examples/README.md` - -### Dependent Files - -All documentation from Tasks 4.1-4.5 - -## Deliverables - -- All documentation validated and accurate -- No broken links in docs -- All code examples tested and working -- Migration guide complete and helpful -- CHANGELOG entry written -- Documentation is ship-ready - -## Tests - -- Documentation validation: - - [ ] All links in documentation are valid - - [ ] All code examples are syntactically correct - - [ ] All code examples execute successfully - - [ ] No "standalone" references except in migration contexts - - [ ] Migration guide covers all scenarios - - [ ] Quick start guide works as documented - - [ ] Mode comparison tables are accurate - - [ ] CLI help text matches implementation - - [ ] CHANGELOG entry is complete - -## Success Criteria - -- ✅ All documentation links are valid -- ✅ All code examples work correctly -- ✅ No inappropriate "standalone" references -- ✅ Migration guide is complete and tested -- ✅ Documentation is clear and user-friendly -- ✅ CHANGELOG entry is written -- ✅ All documentation is ship-ready diff --git a/tasks/prd-modes/_task_27.0.md b/tasks/prd-modes/_task_27.0.md deleted file mode 100644 index 6cf1d06e..00000000 --- a/tasks/prd-modes/_task_27.0.md +++ /dev/null @@ -1,266 +0,0 @@ -# Task 27.0: Add Mode Selection to TUI Form - - -Phase 5: Template System -CRITICAL - First Impression -Medium -0.5 days - - ---- - -## Objective - -Add a mode selection dropdown to the `compozy init` TUI form, allowing users to choose between memory, persistent, and distributed modes during project initialization. - -**Impact**: CRITICAL - This is the user's first interaction with the mode system. - ---- - - -**MANDATORY VALIDATION:** -- Run `go build ./cli` - MUST COMPILE -- Run `compozy init` - TUI form MUST show mode dropdown -- Select each mode - Generated project MUST use selected mode -- Run `make lint` - MUST BE CLEAN - -**USER EXPERIENCE:** -- Mode dropdown appears AFTER template selection -- Default selection: "memory" -- Clear help text for each mode -- Visual indicators: 🚀 memory, 💾 persistent, 🏭 distributed - - ---- - - - -### TUI Form Changes - -**Location**: `cli/cmd/init/components/project_form.go` - -**New Field**: Mode dropdown -- Position: After template selection, before Docker toggle -- Options: memory (🚀), persistent (💾), distributed (🏭) -- Default: memory -- Help text: Clear use case guidance - -**Docker Toggle Behavior**: -- Disabled for memory and persistent modes -- Enabled only for distributed mode -- Help text: "Distributed mode requires external services" - - - ---- - - -**Reference**: `tasks/prd-modes/TEMPLATE_SYSTEM_ANALYSIS.md` - -**Current TUI Structure** (`cli/cmd/init/components/project_form.go`): -- Text fields: Name, Description, Version, Author, Author URL -- Dropdown: Template selection (currently only "basic") -- Toggle: Include Docker configuration - -**Required Changes**: -1. Add mode dropdown field -2. Add mode help text -3. Conditional Docker toggle (disabled for memory/persistent) -4. Update form model to store mode selection - - ---- - -## Subtasks - -### 27.1 Add Mode Field to Form Model -**File**: `cli/cmd/init/components/init_model.go` - -- [ ] Add `Mode string` field to form model struct -- [ ] Initialize mode to "memory" (default) -- [ ] Add mode getter/setter methods -- [ ] Add mode to form data output - ---- - -### 27.2 Create Mode Dropdown Component -**File**: `cli/cmd/init/components/project_form.go` - -- [ ] Add mode dropdown after template selection -- [ ] Options: memory, persistent, distributed -- [ ] Default selected: memory -- [ ] Visual indicators: 🚀 memory, 💾 persistent, 🏭 distributed - -**Help Text**: -``` -Memory Mode (🚀): -- Zero dependencies, instant startup -- Perfect for tests and quick prototyping -- No persistence (data lost on restart) - -Persistent Mode (💾): -- File-based storage, state preserved -- Ideal for local development -- Still zero external dependencies - -Distributed Mode (🏭): -- External PostgreSQL, Redis, Temporal -- Production-ready, horizontal scaling -- Requires Docker or managed services -``` - ---- - -### 27.3 Conditional Docker Toggle -**File**: `cli/cmd/init/components/project_form.go` - -- [ ] Disable Docker toggle when mode is memory or persistent -- [ ] Enable Docker toggle only when mode is distributed -- [ ] Update Docker toggle help text based on mode -- [ ] Gray out Docker toggle when disabled (visual feedback) - -**Conditional Logic**: -```go -// Docker toggle only enabled for distributed mode -dockerToggle.Disabled = (mode != "distributed") - -// Update help text -if mode == "distributed" { - dockerToggle.Help = "Generate docker-compose.yaml for external services" -} else { - dockerToggle.Help = "Docker not needed for embedded mode" -} -``` - ---- - -### 27.4 Update Form Rendering -**File**: `cli/cmd/init/components/project_form.go` - -- [ ] Add mode dropdown to form layout -- [ ] Position after template, before Docker toggle -- [ ] Update form navigation (tab order) -- [ ] Add mode validation (must be one of three values) - ---- - -### 27.5 Pass Mode to Template Generator -**File**: `cli/cmd/init/init.go` - -- [ ] Extract mode from form data -- [ ] Pass mode to `GenerateOptions` struct (Task 28.0 will add this field) -- [ ] Log selected mode for debugging -- [ ] Validate mode before template generation - ---- - -## Relevant Files - -### Primary Files (Modified) -- `cli/cmd/init/components/project_form.go` - TUI form UI -- `cli/cmd/init/components/init_model.go` - Form data model -- `cli/cmd/init/init.go` - Command handler - -### Dependent Files (Reference Only) -- `pkg/template/types.go` - Will be updated in Task 28.0 -- `pkg/template/templates/basic/basic.go` - Will be updated in Task 29.0 - ---- - -## Deliverables - -1. **Mode Dropdown Added** - - Three options: memory, persistent, distributed - - Default: memory - - Visual indicators and help text - -2. **Conditional Docker Toggle** - - Disabled for memory/persistent - - Enabled only for distributed - - Clear visual feedback - -3. **Form Model Updated** - - Mode field added - - Mode passed to template generator - - Mode validated - -4. **User Experience** - - Clear mode selection guidance - - Intuitive form flow - - No confusion about options - ---- - -## Tests - -### Manual Testing -```bash -# Test mode selection -go build ./cli -./compozy init test-project - -# Verify: -# 1. Mode dropdown appears after template selection -# 2. Default is "memory" -# 3. All three modes selectable -# 4. Docker toggle disabled for memory/persistent -# 5. Docker toggle enabled for distributed -# 6. Help text clear and accurate - -# Test each mode -./compozy init memory-test --mode memory -./compozy init persistent-test --mode persistent -./compozy init distributed-test --mode distributed - -# Verify generated compozy.yaml uses correct mode -``` - -### Validation Commands -```bash -# Must pass before completing task -go build ./cli -make lint - -# Manual smoke test -./compozy init test-project -# Select mode, verify generation -``` - ---- - -## Success Criteria - -- [x] Mode dropdown added to TUI form -- [x] Mode dropdown positioned correctly (after template, before Docker) -- [x] Default mode is "memory" -- [x] All three modes selectable -- [x] Help text clear for each mode -- [x] Visual indicators show mode characteristics -- [x] Docker toggle disabled for memory/persistent -- [x] Docker toggle enabled for distributed -- [x] Mode passed to template generator -- [x] Code compiles without errors -- [x] No lint warnings or errors - ---- - -## Dependencies - -**Blocks:** -- Task 29.0 (Mode-Aware Template Generation) - needs mode selection - -**Depends On:** -- Phase 1 complete (mode constants defined) - -**Parallel With:** -- Task 28.0 (Update Template System Types) - can run in parallel - ---- - -## Notes - -- This is the **first user touchpoint** for the mode system -- Clear, helpful UX is critical for adoption -- Default to memory mode emphasizes zero-dependency experience -- Implementation reference in `TEMPLATE_SYSTEM_ANALYSIS.md` -- Keep changes focused on TUI form only diff --git a/tasks/prd-modes/_task_28.0.md b/tasks/prd-modes/_task_28.0.md deleted file mode 100644 index 00b54a4d..00000000 --- a/tasks/prd-modes/_task_28.0.md +++ /dev/null @@ -1,338 +0,0 @@ -# Task 28.0: Update Template System Types for Mode - - -Phase 5: Template System -HIGH -Low -0.5 days - - ---- - -## Objective - -Add `Mode` field to `GenerateOptions` struct in the template system, enabling mode-aware template generation. - -**Impact**: Enables template system to generate mode-specific configurations. - ---- - - -**MANDATORY VALIDATION:** -- Run `go build ./pkg/template` - MUST COMPILE -- Run `make lint` on pkg/template - MUST BE CLEAN -- Run `go test ./pkg/template/... -v` - ALL TESTS MUST PASS - -**BREAKING CHANGE:** -- `GenerateOptions` struct signature changes -- All template generators must handle mode field - - ---- - - - -### Type System Changes - -**File**: `pkg/template/types.go` - -**Add Mode Field**: -```go -type GenerateOptions struct { - OutputDir string - Name string - Description string - Version string - Author string - AuthorURL string - IncludeDocker bool - Mode string // NEW: memory, persistent, or distributed -} -``` - -**Default Value**: "memory" (zero-dependency default) - - - ---- - - -**Reference**: `tasks/prd-modes/TEMPLATE_SYSTEM_ANALYSIS.md` - -**Current Structure** (`pkg/template/types.go`): -- `Template` interface with `Generate(opts GenerateOptions) error` method -- `GenerateOptions` struct with project metadata fields -- Global registry for template registration - -**Required Changes**: -1. Add Mode field to GenerateOptions -2. Update all template implementations to read Mode field -3. Validate mode value (memory/persistent/distributed) - - ---- - -## Subtasks - -### 28.1 Add Mode Field to GenerateOptions -**File**: `pkg/template/types.go` - -- [ ] Add `Mode string` field to `GenerateOptions` struct -- [ ] Add field comment documenting valid values -- [ ] Position after `IncludeDocker` field -- [ ] Update struct documentation - -**Implementation**: -```go -// GenerateOptions contains configuration for project generation -type GenerateOptions struct { - OutputDir string // Target directory for generated files - Name string // Project name - Description string // Project description - Version string // Initial version (e.g., "0.1.0") - Author string // Author name - AuthorURL string // Author URL or email - IncludeDocker bool // Generate docker-compose.yaml - Mode string // Deployment mode: memory, persistent, or distributed -} -``` - ---- - -### 28.2 Add Mode Validation Function -**File**: `pkg/template/types.go` - -- [ ] Add `ValidateMode(mode string) error` function -- [ ] Validate mode is one of: memory, persistent, distributed -- [ ] Return helpful error for invalid modes -- [ ] Suggest correction for "standalone" mode - -**Implementation**: -```go -// ValidateMode checks if the provided mode is valid -func ValidateMode(mode string) error { - validModes := []string{"memory", "persistent", "distributed"} - - for _, valid := range validModes { - if mode == valid { - return nil - } - } - - // Provide helpful error for "standalone" - if mode == "standalone" { - return fmt.Errorf("mode 'standalone' has been replaced. Use 'memory' (no persistence) or 'persistent' (with persistence)") - } - - return fmt.Errorf("invalid mode '%s'. Must be one of: %s", mode, strings.Join(validModes, ", ")) -} -``` - ---- - -### 28.3 Add Default Mode Constant -**File**: `pkg/template/types.go` - -- [ ] Add `DefaultMode = "memory"` constant -- [ ] Use constant in validation and defaults -- [ ] Document constant purpose - -**Implementation**: -```go -const ( - // DefaultMode is the default deployment mode for new projects - DefaultMode = "memory" -) -``` - ---- - -### 28.4 Update Template Interface Documentation -**File**: `pkg/template/types.go` - -- [ ] Update `Template` interface comments -- [ ] Document mode field requirement -- [ ] Add example usage - -**Implementation**: -```go -// Template represents a project template that can generate files -// Implementations must: -// - Generate mode-appropriate compozy.yaml (memory/persistent/distributed) -// - Conditionally generate docker-compose.yaml (distributed mode only) -// - Create mode-specific README documentation -type Template interface { - Name() string - Description() string - Generate(opts GenerateOptions) error -} -``` - ---- - -### 28.5 Update Service Layer -**File**: `pkg/template/service.go` - -- [ ] Add mode validation in service layer -- [ ] Set default mode if not provided -- [ ] Log selected mode - -**Implementation**: -```go -func (s *Service) Generate(name string, opts GenerateOptions) error { - // Set default mode if not provided - if opts.Mode == "" { - opts.Mode = DefaultMode - } - - // Validate mode - if err := ValidateMode(opts.Mode); err != nil { - return fmt.Errorf("invalid mode: %w", err) - } - - // Log selected mode - log.Info("Generating project with mode", "mode", opts.Mode, "template", name) - - // ... rest of implementation -} -``` - ---- - -## Relevant Files - -### Primary Files (Modified) -- `pkg/template/types.go` - Add Mode field and validation -- `pkg/template/service.go` - Add mode validation and logging - -### Dependent Files (Reference Only) -- `pkg/template/templates/basic/basic.go` - Will use Mode field (Task 29.0) -- `cli/cmd/init/init.go` - Will pass Mode from form (Task 27.0) - ---- - -## Deliverables - -1. **Mode Field Added** - - GenerateOptions has Mode field - - Mode field documented - - Mode field validated - -2. **Validation Function** - - ValidateMode function added - - Helpful error messages - - "standalone" migration hint - -3. **Default Mode** - - DefaultMode constant added - - Default applied when Mode empty - - Default is "memory" - -4. **Service Layer Updated** - - Mode validated before generation - - Default mode applied - - Mode logged for debugging - ---- - -## Tests - -### Unit Tests to Add -**File**: `pkg/template/types_test.go` - -```go -func TestValidateMode(t *testing.T) { - tests := []struct { - name string - mode string - wantErr bool - errMsg string - }{ - { - name: "memory mode valid", - mode: "memory", - wantErr: false, - }, - { - name: "persistent mode valid", - mode: "persistent", - wantErr: false, - }, - { - name: "distributed mode valid", - mode: "distributed", - wantErr: false, - }, - { - name: "standalone rejected with hint", - mode: "standalone", - wantErr: true, - errMsg: "has been replaced", - }, - { - name: "invalid mode rejected", - mode: "invalid", - wantErr: true, - errMsg: "invalid mode", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := ValidateMode(tt.mode) - if (err != nil) != tt.wantErr { - t.Errorf("ValidateMode() error = %v, wantErr %v", err, tt.wantErr) - return - } - if err != nil && !strings.Contains(err.Error(), tt.errMsg) { - t.Errorf("ValidateMode() error = %v, want message containing %v", err, tt.errMsg) - } - }) - } -} -``` - -### Validation Commands -```bash -# Must pass before completing task -go test ./pkg/template/... -v -make lint -go build ./pkg/template -``` - ---- - -## Success Criteria - -- [x] Mode field added to GenerateOptions -- [x] ValidateMode function implemented -- [x] DefaultMode constant added -- [x] Service layer validates mode -- [x] Service layer applies default mode -- [x] Mode validation tests added -- [x] All tests pass -- [x] Code compiles without errors -- [x] No lint warnings or errors - ---- - -## Dependencies - -**Blocks:** -- Task 29.0 (Mode-Aware Template Generation) - needs Mode field - -**Depends On:** -- Phase 1 complete (mode constants defined) - -**Parallel With:** -- Task 27.0 (TUI Form) - can run in parallel - ---- - -## Notes - -- Simple structural change, low risk -- Foundation for mode-aware template generation -- Validation ensures type safety -- Implementation reference in `TEMPLATE_SYSTEM_ANALYSIS.md` -- Keep changes focused on types.go and service.go only diff --git a/tasks/prd-modes/_task_29.0.md b/tasks/prd-modes/_task_29.0.md deleted file mode 100644 index 484d84c4..00000000 --- a/tasks/prd-modes/_task_29.0.md +++ /dev/null @@ -1,487 +0,0 @@ -# Task 29.0: Make Template Generation Mode-Aware - - -Phase 5: Template System -CRITICAL - User Onboarding -High -1 day - - ---- - -## Objective - -Update the "basic" template to generate mode-appropriate configuration files, with docker-compose.yaml only for distributed mode and mode-specific documentation. - -**Impact**: CRITICAL - Ensures generated projects work out-of-the-box in selected mode. - ---- - - -**MANDATORY VALIDATION:** -- Run `go build ./pkg/template` - MUST COMPILE -- Generate project in each mode - MUST WORK -- Run generated project `compozy start` - MUST START -- Run `make lint` - MUST BE CLEAN -- Run `make test` - MUST PASS - -**USER EXPERIENCE:** -- Memory mode: Minimal config, no docker-compose, instant startup -- Persistent mode: File paths configured, no docker-compose, state persists -- Distributed mode: External services configured, docker-compose included - - ---- - - - -### Template File Changes - -**Files to Update**: -1. `pkg/template/templates/basic/compozy.yaml.tmpl` - Mode-specific config -2. `pkg/template/templates/basic/docker-compose.yaml.tmpl` - Conditional generation -3. `pkg/template/templates/basic/README.md.tmpl` - Mode-specific docs -4. `pkg/template/templates/basic/env.example.tmpl` - Mode-specific env vars -5. `pkg/template/templates/basic/basic.go` - Generation logic - -**Mode-Specific Behavior**: -- **Memory**: Minimal config, no docker-compose, quick start docs -- **Persistent**: File paths, no docker-compose, state preservation docs -- **Distributed**: External services, docker-compose, production docs - - - ---- - - -**Reference**: `tasks/prd-modes/TEMPLATE_SYSTEM_ANALYSIS.md` - -**Current Template Structure**: -- 8 embedded files using `//go:embed` -- Template rendering with sprig functions -- Docker compose always generated when `IncludeDocker` is true - -**Required Changes**: -1. Add mode-aware template logic to compozy.yaml.tmpl -2. Conditional docker-compose generation (distributed mode only) -3. Mode-specific README sections -4. Mode-specific environment variables - - ---- - -## Subtasks - -### 29.1 Update compozy.yaml Template -**File**: `pkg/template/templates/basic/compozy.yaml.tmpl` - -- [ ] Add mode-specific configuration sections -- [ ] Memory mode: Minimal config, explicit :memory: database -- [ ] Persistent mode: File paths for database and persistence -- [ ] Distributed mode: External service placeholders with env vars -- [ ] Add comments explaining mode-specific settings - -**Implementation**: -```yaml -# {{ .Name }} - Generated with Compozy -name: {{ .Name }} -version: {{ .Version }} -description: {{ .Description }} - -# Deployment Mode: {{ .Mode }} -{{- if eq .Mode "memory" }} -# Memory mode - zero dependencies, instant startup, no persistence -mode: memory - -database: - driver: sqlite - url: ":memory:" - -temporal: - mode: memory - namespace: {{ .Name }}-dev - -redis: - mode: memory - # Embedded miniredis, no persistence - -{{- else if eq .Mode "persistent" }} -# Persistent mode - file-based storage, state preserved -mode: persistent - -database: - driver: sqlite - url: ./.compozy/{{ .Name }}.db - -temporal: - mode: persistent - namespace: {{ .Name }}-dev - standalone: - database_file: ./.compozy/temporal.db - -redis: - mode: persistent - standalone: - persistence: - enabled: true - dir: ./.compozy/redis - -{{- else if eq .Mode "distributed" }} -# Distributed mode - production deployment with external services -mode: distributed - -database: - driver: postgres - url: ${COMPOZY_DATABASE_URL} - pool: - max_open_conns: 25 - max_idle_conns: 5 - -temporal: - mode: remote - host_port: ${TEMPORAL_HOST_PORT} - namespace: ${TEMPORAL_NAMESPACE} - -redis: - mode: distributed - distributed: - addr: ${REDIS_ADDR} - password: ${REDIS_PASSWORD} - db: 0 - -{{- end }} - -# Agent configuration (same for all modes) -agents: - main: - entrypoint: ./src/entrypoint.ts - tools: - - name: echo - type: builtin - -# Server configuration -server: - host: localhost - port: 8080 - log_level: info -``` - ---- - -### 29.2 Conditional Docker Compose Generation -**File**: `pkg/template/templates/basic/basic.go` - -- [ ] Check if mode is "distributed" before generating docker-compose -- [ ] Skip docker-compose.yaml for memory and persistent modes -- [ ] Update file generation logic -- [ ] Log skipped files for transparency - -**Implementation**: -```go -func (t *BasicTemplate) Generate(opts GenerateOptions) error { - // ... existing file generation ... - - // Only generate docker-compose for distributed mode - if opts.Mode == "distributed" && opts.IncludeDocker { - dockerComposePath := filepath.Join(opts.OutputDir, "docker-compose.yaml") - if err := t.renderTemplate("docker-compose.yaml.tmpl", dockerComposePath, opts); err != nil { - return fmt.Errorf("failed to generate docker-compose.yaml: %w", err) - } - log.Info("Generated docker-compose.yaml for distributed mode") - } else { - log.Info("Skipping docker-compose.yaml (not needed for %s mode)", opts.Mode) - } - - return nil -} -``` - ---- - -### 29.3 Update README Template -**File**: `pkg/template/templates/basic/README.md.tmpl` - -- [ ] Add mode-specific quick start sections -- [ ] Memory mode: Instant startup instructions -- [ ] Persistent mode: State preservation notes -- [ ] Distributed mode: Docker setup instructions -- [ ] Add mode switching guide - -**Implementation**: -```markdown -# {{ .Name }} - -{{ .Description }} - -**Mode:** {{ .Mode }} - -## Quick Start - -{{- if eq .Mode "memory" }} - -### Memory Mode (Zero Dependencies) - -Start the server instantly with no external dependencies: - -\```bash -compozy start -\``` - -Server ready in <1 second! - -**Note:** All data is stored in memory and lost on restart. - -{{- else if eq .Mode "persistent" }} - -### Persistent Mode (Local Development) - -Start the server with state preservation: - -\```bash -compozy start -\``` - -Data is saved to `./.compozy/` directory and persists between restarts. - -{{- else if eq .Mode "distributed" }} - -### Distributed Mode (Production) - -Start external services first: - -\```bash -# Start infrastructure -docker-compose up -d - -# Start Compozy -export COMPOZY_DATABASE_URL="postgresql://..." -export TEMPORAL_HOST_PORT="localhost:7233" -export REDIS_ADDR="localhost:6379" -compozy start -\``` - -{{- end }} - -## Switching Modes - -To switch to a different mode: - -1. Update `mode` in `compozy.yaml` -2. Restart the server: `compozy restart` - -### Available Modes - -- **memory**: Zero dependencies, fastest startup, no persistence -- **persistent**: File-based storage, state preserved -- **distributed**: External services, production-ready - -## Development - -[... rest of README ...] -``` - ---- - -### 29.4 Update Environment Variables Template -**File**: `pkg/template/templates/basic/env.example.tmpl` - -- [ ] Add mode-specific environment variables -- [ ] Memory mode: Minimal env vars -- [ ] Persistent mode: Optional data directory overrides -- [ ] Distributed mode: Required external service connections - -**Implementation**: -```bash -# {{ .Name }} - Environment Variables - -# Global Configuration -COMPOZY_MODE={{ .Mode }} -COMPOZY_LOG_LEVEL=info -COMPOZY_SERVER_PORT=8080 - -{{- if eq .Mode "memory" }} - -# Memory Mode - No additional configuration needed! -# All services embedded, all data in-memory - -{{- else if eq .Mode "persistent" }} - -# Persistent Mode - Optional overrides -# COMPOZY_DATABASE_URL=./.compozy/{{ .Name }}.db -# TEMPORAL_DATABASE_FILE=./.compozy/temporal.db -# REDIS_PERSISTENCE_DIR=./.compozy/redis - -{{- else if eq .Mode "distributed" }} - -# Distributed Mode - External Services (REQUIRED) -COMPOZY_DATABASE_URL=postgresql://user:password@localhost:5432/{{ .Name }} -TEMPORAL_HOST_PORT=localhost:7233 -TEMPORAL_NAMESPACE={{ .Name }}-prod -REDIS_ADDR=localhost:6379 -REDIS_PASSWORD= -REDIS_DB=0 - -# Optional: TLS Configuration -# TEMPORAL_TLS_ENABLED=true -# REDIS_TLS_ENABLED=true - -{{- end }} -``` - ---- - -### 29.5 Update .gitignore Template -**File**: `pkg/template/templates/basic/gitignore.tmpl` - -- [ ] Add .compozy/ directory for persistent mode -- [ ] Mode-agnostic ignores (node_modules, .env) -- [ ] Keep minimal and clean - -**Implementation**: -```gitignore -# Dependencies -node_modules/ -bun.lockb - -# Environment -.env -.env.local - -{{- if or (eq .Mode "persistent") (eq .Mode "memory") }} - -# Compozy data directory (persistent mode) -.compozy/ - -{{- end }} - -# Build outputs -dist/ -build/ - -# Logs -*.log -``` - ---- - -## Relevant Files - -### Primary Files (Modified) -- `pkg/template/templates/basic/compozy.yaml.tmpl` - Mode-specific config -- `pkg/template/templates/basic/docker-compose.yaml.tmpl` - Conditional generation -- `pkg/template/templates/basic/README.md.tmpl` - Mode-specific docs -- `pkg/template/templates/basic/env.example.tmpl` - Mode-specific env vars -- `pkg/template/templates/basic/gitignore.tmpl` - Mode-specific ignores -- `pkg/template/templates/basic/basic.go` - Generation logic - -### Dependent Files (Reference Only) -- `pkg/template/types.go` - Mode field (Task 28.0) -- `cli/cmd/init/init.go` - Mode selection (Task 27.0) - ---- - -## Deliverables - -1. **Mode-Specific compozy.yaml** - - Memory mode: :memory: database, embedded services - - Persistent mode: File paths, embedded services - - Distributed mode: External services, env vars - -2. **Conditional Docker Compose** - - Generated only for distributed mode - - Skipped for memory and persistent modes - - Clear logging of generation decision - -3. **Mode-Specific Documentation** - - README quick start matches mode - - Mode switching guide included - - Environment variables documented - -4. **Generated Projects Work** - - Memory mode starts instantly - - Persistent mode creates .compozy/ directory - - Distributed mode includes docker-compose - ---- - -## Tests - -### Functional Testing -```bash -# Test memory mode generation -compozy init memory-test -cd memory-test -cat compozy.yaml # Verify mode: memory, :memory: database -ls -la # Verify NO docker-compose.yaml -compozy start # Must start in <1 second -compozy stop - -# Test persistent mode generation -compozy init persistent-test --mode persistent -cd persistent-test -cat compozy.yaml # Verify mode: persistent, file paths -ls -la # Verify NO docker-compose.yaml -compozy start # Must create .compozy/ directory -ls -la .compozy/ # Verify files created -compozy stop - -# Test distributed mode generation -compozy init distributed-test --mode distributed --docker -cd distributed-test -cat compozy.yaml # Verify mode: distributed, env vars -ls -la # Verify docker-compose.yaml EXISTS -cat docker-compose.yaml # Verify PostgreSQL, Redis, Temporal -``` - -### Validation Commands -```bash -# Must pass before completing task -go test ./pkg/template/... -v -make lint -make test - -# Manual validation -compozy init test-memory --mode memory -compozy init test-persistent --mode persistent -compozy init test-distributed --mode distributed --docker -``` - ---- - -## Success Criteria - -- [x] compozy.yaml template has mode-specific sections -- [x] docker-compose generated ONLY for distributed mode -- [x] README has mode-specific quick start -- [x] Environment variables match mode requirements -- [x] .gitignore includes .compozy/ for persistent mode -- [x] Generated projects compile and run -- [x] Memory mode project starts in <1 second -- [x] Persistent mode creates .compozy/ directory -- [x] Distributed mode includes docker-compose.yaml -- [x] All tests pass -- [x] Code compiles without errors -- [x] No lint warnings or errors - ---- - -## Dependencies - -**Blocks:** -- Phase 6 (Final Validation) - needs working template generation - -**Depends On:** -- Task 27.0 (TUI Form) - provides mode selection -- Task 28.0 (Template Types) - provides Mode field -- Phase 1 complete (mode constants) - ---- - -## Notes - -- This is the most complex task in Phase 5 -- Tests all three modes end-to-end -- Generated projects are users' first experience -- Documentation in generated README is critical -- Implementation reference in `TEMPLATE_SYSTEM_ANALYSIS.md` -- Keep template logic clean and maintainable diff --git a/tasks/prd-modes/_task_3.0.md b/tasks/prd-modes/_task_3.0.md deleted file mode 100644 index b74df168..00000000 --- a/tasks/prd-modes/_task_3.0.md +++ /dev/null @@ -1,203 +0,0 @@ -# Task 3.0: Update Configuration Registry - - -Phase 1: Core Configuration -CRITICAL -Medium -1 day - - ---- - -## Objective - -Update field definitions in `pkg/config/definition/schema.go` to register new mode defaults, help text, and metadata for the configuration system. - -**Impact**: Ensures CLI flags, environment variables, and config help text reflect the new three-mode system. - ---- - - -**MANDATORY VALIDATION:** -- Run `go test ./pkg/config/definition -v` - MUST PASS -- Run `make lint` on pkg/config/definition - MUST BE CLEAN -- Verify CLI help text shows correct modes - -**BREAKING CHANGE:** -- Default mode changes from "distributed" to "memory" in registry -- Help text updated for all mode fields - - ---- - - - -### Registry Changes - -**Global Mode Field:** -- Default: `"memory"` (changed from `"distributed"`) -- Help: Updated to explain all three modes - -**Component Mode Fields:** -- `temporal.mode`: Help text updated for memory/persistent/remote -- `redis.mode`: Help text updated for memory/persistent/distributed -- Both inherit from global mode if unset (empty default) - -**CLI Integration:** -- Flags show correct help text -- Environment variables documented -- Default values correct - - - ---- - - -**Implementation Reference**: See `_techspec.md` Section "Phase 1.3: Update Configuration Registry" (lines 421-464) - -**Key Concepts:** -- Configuration field registry pattern -- CLI flag generation from registry -- Environment variable mapping -- Help text generation - -**Related Files:** -- `pkg/config/definition/schema.go` - Field registry -- `pkg/config/resolver.go` - Mode constants (Task 1.0) -- `pkg/config/config.go` - Config struct (Task 2.0) - - ---- - -## Subtasks - -### 3.1 Update Global Mode Registration -**File**: `pkg/config/definition/schema.go` (~line 733) - -- [ ] Change `Default` from `"distributed"` to `"memory"` -- [ ] Update `Help` text to explain all three modes -- [ ] Verify `CLIFlag` is `"mode"` -- [ ] Verify `EnvVar` is `"COMPOZY_MODE"` - -**Reference**: `_techspec.md` lines 426-434 - ---- - -### 3.2 Update Temporal Mode Registration -**File**: `pkg/config/definition/schema.go` - -- [ ] Verify `Default` is `""` (empty = inherit from global) -- [ ] Update `Help` text: "Temporal deployment mode (memory/persistent/remote), inherits from global mode if unset" -- [ ] Verify `CLIFlag` is `"temporal-mode"` -- [ ] Verify `EnvVar` is `"TEMPORAL_MODE"` - -**Reference**: `_techspec.md` lines 437-446 - ---- - -### 3.3 Update Redis Mode Registration -**File**: `pkg/config/definition/schema.go` - -- [ ] Verify `Default` is `""` (empty = inherit from global) -- [ ] Update `Help` text: "Redis deployment mode (memory/persistent/distributed), inherits from global mode if unset" -- [ ] Verify `CLIFlag` is `"redis-mode"` -- [ ] Verify `EnvVar` is `"REDIS_MODE"` - -**Reference**: `_techspec.md` lines 449-458 - ---- - -## Relevant Files - -### Primary Files (Modified) -- `pkg/config/definition/schema.go` - Field registry - -### Dependent Files (Reference Only) -- `pkg/config/resolver.go` - Mode constants (Task 1.0) -- `pkg/config/config.go` - Config struct (Task 2.0) -- `cli/help/global-flags.md` - Will be updated in Task 18.0 - -### Generated Files (May Need Refresh) -- CLI help text (auto-generated from registry) -- Environment variable documentation - ---- - -## Deliverables - -1. **Updated Global Mode Registration** - - Default changed to "memory" - - Help text explains all three modes - - CLI flag and env var correct - -2. **Updated Component Mode Registrations** - - Temporal mode help text updated - - Redis mode help text updated - - Both inherit from global (empty default) - -3. **Verified CLI Integration** - - `compozy --help` shows correct mode help - - Environment variables documented correctly - -4. **Tests Pass** - - Definition tests pass - - No lint errors - ---- - -## Tests - -### Unit Tests to Verify -**File**: `pkg/config/definition/*_test.go` - -Expected behavior: -- Mode field registered with correct default -- Mode field has correct help text -- CLI flags map to correct config paths -- Environment variables map correctly - -### Validation Commands -```bash -# Must pass before completing task -go test ./pkg/config/definition -v -make lint - -# Manual verification -compozy --help | grep -A 5 "mode" -# Should show updated help text with memory/persistent/distributed -``` - ---- - -## Success Criteria - -- [x] Global mode field default changed to "memory" -- [x] Global mode help text updated for three modes -- [x] Temporal mode help text updated -- [x] Redis mode help text updated -- [x] All CLI flags correct -- [x] All environment variables correct -- [x] Code compiles without errors -- [x] No lint warnings or errors -- [x] Tests pass -- [x] CLI help shows correct information - ---- - -## Dependencies - -**Blocks:** -- Task 4.0 (Configuration Tests) - needs registry updates - -**Depends On:** -- Task 1.0 (Mode Constants) - needs new mode definitions -- Task 2.0 (Configuration Validation) - needs struct updates - ---- - -## Notes - -- Registry changes affect CLI help output -- Default value change is BREAKING for alpha users -- Implementation details in `_techspec.md` lines 421-464 -- Keep changes focused on schema.go only diff --git a/tasks/prd-modes/_task_4.0.md b/tasks/prd-modes/_task_4.0.md deleted file mode 100644 index c8f7ac98..00000000 --- a/tasks/prd-modes/_task_4.0.md +++ /dev/null @@ -1,303 +0,0 @@ -# Task 4.0: Update Configuration Tests - - -Phase 1: Core Configuration -CRITICAL -Large -2 days - - ---- - -## Objective - -Update all configuration tests in `pkg/config/*_test.go` to validate the new three-mode system (memory/persistent/distributed) and ensure comprehensive test coverage for mode resolution and validation. - -**Impact**: Validates Phase 1 implementation and ensures no regressions in configuration logic. - ---- - - -**MANDATORY VALIDATION:** -- Run `make lint` - MUST BE CLEAN -- Run `go test ./pkg/config/... -v` - ALL TESTS MUST PASS -- Run `make test` - FULL TEST SUITE MUST PASS - -**COMPLETION CRITERIA:** -- Cannot complete this task until ALL tests pass -- Cannot complete this task until linter is clean -- This task BLOCKS all remaining phases - -**TEST COVERAGE:** -- Mode resolution for all three modes -- Database driver selection for all modes -- Temporal mode selection for all modes -- Validation accepts valid modes -- Validation rejects invalid modes (including "standalone") - - ---- - - - -### Test File Updates - -**Files to Update:** -1. `pkg/config/resolver_test.go` - Mode resolution tests -2. `pkg/config/config_test.go` - Validation tests -3. `pkg/config/loader_test.go` - Config loading tests (if needed) - -**Test Coverage Required:** -- Mode resolution with new constants -- Database driver selection for each mode -- Temporal mode selection for each mode -- Default mode behavior (memory) -- Validation accepts memory/persistent/distributed -- Validation rejects standalone/invalid - - - ---- - - -**Implementation Reference**: See `_techspec.md` Section "Phase 1.4: Update Configuration Tests" (lines 466-532) - -**Key Test Patterns:** -- Table-driven tests for mode resolution -- Validation test cases -- Edge case handling (empty modes, nil configs) -- Error message validation - -**Related Files:** -- `pkg/config/resolver_test.go` - Primary focus -- `pkg/config/config_test.go` - Validation focus -- `pkg/config/resolver.go` - Implementation (Tasks 1.0) -- `pkg/config/config.go` - Struct (Task 2.0) - - ---- - -## Subtasks - -### 4.1 Update Mode Resolution Tests -**File**: `pkg/config/resolver_test.go` - -- [ ] Update `TestResolveMode` test cases -- [ ] Test component mode override (distributed → memory) -- [ ] Test global mode inheritance (persistent → components use persistent) -- [ ] Test default mode behavior (empty → memory) -- [ ] Add test for all three modes explicitly - -**Reference**: `_techspec.md` lines 470-499 - -**Example Test Structure:** -```go -func TestResolveMode(t *testing.T) { - tests := []struct { - name string - globalMode string - componentMode string - want string - }{ - { - name: "Should use component mode when set", - globalMode: "distributed", - componentMode: "memory", - want: "memory", - }, - { - name: "Should use global mode when component not set", - globalMode: "persistent", - componentMode: "", - want: "persistent", - }, - { - name: "Should default to memory when neither set", - globalMode: "", - componentMode: "", - want: "memory", // Changed from "distributed" - }, - } - // ... test implementation -} -``` - ---- - -### 4.2 Update Database Driver Selection Tests -**File**: `pkg/config/resolver_test.go` - -- [ ] Update `TestEffectiveDatabaseDriver` test cases -- [ ] Test memory mode → SQLite -- [ ] Test persistent mode → SQLite -- [ ] Test distributed mode → PostgreSQL -- [ ] Test nil config → SQLite (default changed) -- [ ] Test explicit driver override - ---- - -### 4.3 Update Temporal Mode Selection Tests -**File**: `pkg/config/resolver_test.go` - -- [ ] Update `TestEffectiveTemporalMode` test cases (if exists) -- [ ] Test memory mode → embedded temporal -- [ ] Test persistent mode → embedded temporal -- [ ] Test distributed mode → remote temporal -- [ ] Test explicit temporal mode override - ---- - -### 4.4 Update Mode Validation Tests -**File**: `pkg/config/config_test.go` - -- [ ] Update `TestModeValidation` test cases -- [ ] Test "memory" mode validates successfully -- [ ] Test "persistent" mode validates successfully -- [ ] Test "distributed" mode validates successfully -- [ ] Test "standalone" mode FAILS validation (breaking change) -- [ ] Test invalid modes FAIL validation -- [ ] Verify error messages are helpful - -**Reference**: `_techspec.md` lines 505-518 - -**Example Test Structure:** -```go -func TestModeValidation(t *testing.T) { - tests := []struct { - mode string - wantErr bool - }{ - {"memory", false}, - {"persistent", false}, - {"distributed", false}, - {"standalone", true}, // No longer valid - {"invalid", true}, - } - // ... test implementation -} -``` - ---- - -### 4.5 Update Config Loading Tests -**File**: `pkg/config/loader_test.go` - -- [ ] Review loader tests for mode-specific logic -- [ ] Update test configs with new mode values -- [ ] Verify mode defaults applied correctly -- [ ] Test environment variable overrides - ---- - -### 4.6 Run Full Validation -**All Test Files** - -- [ ] Run `make lint` - verify zero warnings -- [ ] Run `go test ./pkg/config/... -v` - all tests pass -- [ ] Run `make test` - full suite passes -- [ ] Verify test coverage maintained (>80%) - ---- - -## Relevant Files - -### Primary Files (Modified) -- `pkg/config/resolver_test.go` - Mode resolution tests -- `pkg/config/config_test.go` - Validation tests -- `pkg/config/loader_test.go` - Loader tests (if needed) - -### Dependent Files (Implementation) -- `pkg/config/resolver.go` - Implementation (Task 1.0) -- `pkg/config/config.go` - Struct (Task 2.0) -- `pkg/config/definition/schema.go` - Registry (Task 3.0) - ---- - -## Deliverables - -1. **Updated Test Cases** - - All mode resolution tests updated - - All validation tests updated - - All loader tests updated (if needed) - -2. **Comprehensive Coverage** - - All three modes tested - - Edge cases covered - - Error cases validated - -3. **All Tests Passing** - - `make lint` clean - - `go test ./pkg/config/...` passes - - `make test` passes - -4. **Helpful Error Messages** - - Invalid modes produce clear errors - - "standalone" rejection includes migration hint - ---- - -## Tests - -### Test Execution Commands -```bash -# Individual test packages -go test ./pkg/config -v -go test ./pkg/config/definition -v - -# Specific test functions -go test ./pkg/config -run TestResolveMode -v -go test ./pkg/config -run TestEffectiveDatabaseDriver -v -go test ./pkg/config -run TestModeValidation -v - -# Full validation -make lint -make test -``` - -### Expected Outcomes -- All tests pass (100%) -- Linter clean (zero warnings) -- Test coverage >80% -- No flaky tests - ---- - -## Success Criteria - -- [x] Mode resolution tests updated and passing -- [x] Database driver tests updated and passing -- [x] Temporal mode tests updated and passing -- [x] Validation tests updated and passing -- [x] Loader tests updated and passing (if needed) -- [x] All tests pass: `go test ./pkg/config/... -v` -- [x] Linter clean: `make lint` -- [x] Full suite passes: `make test` -- [x] Test coverage maintained (>80%) -- [x] Error messages are clear and helpful - ---- - -## Dependencies - -**Blocks:** -- ALL Phase 2 tasks (Infrastructure) -- ALL Phase 3 tasks (Test Infrastructure) -- ALL Phase 4 tasks (Documentation) -- ALL Phase 5 tasks (Schemas) -- ALL Phase 6 tasks (Validation) - -**Depends On:** -- Task 1.0 (Mode Constants) - implementation to test -- Task 2.0 (Configuration Validation) - validation to test -- Task 3.0 (Configuration Registry) - registry to test - ---- - -## Notes - -- This task is the **final gate for Phase 1** -- Cannot proceed to Phase 2 until ALL tests pass -- Focus on comprehensive coverage for all three modes -- Validation failures should produce helpful error messages -- Implementation details in `_techspec.md` lines 466-532 -- This is the most critical testing task in Phase 1 diff --git a/tasks/prd-modes/_task_5.0.md b/tasks/prd-modes/_task_5.0.md deleted file mode 100644 index 4a4a8bfa..00000000 --- a/tasks/prd-modes/_task_5.0.md +++ /dev/null @@ -1,96 +0,0 @@ -## markdown - -## status: pending # Options: pending, in-progress, completed, excluded - - -engine/infra/cache -implementation -core_feature -low -redis - - -# Task 5.0: Update Cache Layer - -## Overview - -Update cache layer (`engine/infra/cache/mod.go`) to support three modes (memory/persistent/distributed) with auto-configuration of persistence settings. - - -- **ALWAYS READ** @.cursor/rules/critical-validation.mdc before start -- **ALWAYS READ** the technicals docs from this PRD before start -- **YOU SHOULD ALWAYS** have in mind that this should be done in a greenfield approach, we don't need to care about backwards compatibility since the project is in alpha, and support old and new stuff just introduces more complexity in the project; never sacrifice quality because of backwards compatibility - - - -# When you need information about a library or external API: -- use perplexity and context7 to find out how to properly fix/resolve this -- when using perplexity mcp, you can pass a prompt to the query param with more description about what you want to know, you don't need to pass a query-style search phrase, the same for the topic param of context7 -- for context7 to use the mcp is two steps, one you will find out the library id and them you will check what you want - - - -- Replace old mode constants (standalone/distributed) with new modes (memory/persistent/distributed) -- Auto-disable persistence for memory mode -- Auto-enable persistence for persistent mode with default paths -- Both memory and persistent modes use the same `setupStandaloneCache()` function -- Add informative logging for mode transitions - - -## Subtasks - -- [ ] 5.1 Update mode constants in cache/mod.go -- [ ] 5.2 Update SetupCache() switch statement with mode-specific persistence logic -- [ ] 5.3 Add logging for cache mode and persistence configuration -- [ ] 5.4 Validate cache setup works correctly in all three modes - -## Implementation Details - -See **Phase 2.1: Update Cache Layer** in `_techspec.md` (lines 543-609). - -**Key Changes:** -- Lines 12-15: Update mode constants from `modeStandalone`/`modeDistributed` to `modeMemory`/`modePersistent`/`modeDistributed` -- Lines 60-69: Update SetupCache() switch with auto-configuration: - - `memory`: Force persistence OFF - - `persistent`: Auto-enable persistence with default path `./.compozy/redis` - - `distributed`: Use external Redis (unchanged) - -**Key Insight:** Both memory and persistent modes use the SAME `setupStandaloneCache()` function (from Redis PRD), differentiated only by persistence settings. - -### Relevant Files - -- `engine/infra/cache/mod.go` - Cache factory and mode routing - -### Dependent Files - -- `pkg/config/resolver.go` - Mode constants and resolution -- Redis PRD infrastructure: - - `MiniredisStandalone` wrapper - - `SnapshotManager` with BadgerDB - -## Deliverables - -- Updated mode constants in cache layer -- Mode-aware persistence configuration in SetupCache() -- Clear logging showing cache mode and persistence status -- All existing cache tests passing with updated modes - -## Tests - -Unit tests mapped from `_tests.md` for cache layer: -- [ ] Test cache setup in memory mode (persistence forced OFF) -- [ ] Test cache setup in persistent mode (persistence auto-enabled) -- [ ] Test cache setup in distributed mode (external Redis) -- [ ] Test default persistence path for persistent mode -- [ ] Test explicit persistence override in persistent mode -- [ ] Verify no persistence files created in memory mode -- [ ] Verify persistence files created in persistent mode - -## Success Criteria - -- `make lint` passes with no errors -- `go test ./engine/infra/cache/... -v` passes all tests -- Cache initializes correctly in each mode -- Persistence behavior matches mode intent (ephemeral for memory, persisted for persistent) -- Logging clearly indicates active mode and persistence state -- No breaking changes to distributed mode behavior diff --git a/tasks/prd-modes/_task_6.0.md b/tasks/prd-modes/_task_6.0.md deleted file mode 100644 index 38fe10cc..00000000 --- a/tasks/prd-modes/_task_6.0.md +++ /dev/null @@ -1,110 +0,0 @@ -## markdown - -## status: pending # Options: pending, in-progress, completed, excluded - - -engine/infra/server -implementation -core_feature -medium -temporal - - -# Task 6.0: Update Temporal Wiring - -## Overview - -Update Temporal wiring in `engine/infra/server/dependencies.go` to support three modes with intelligent database path defaults and updated validation logic. - - -- **ALWAYS READ** @.cursor/rules/critical-validation.mdc before start -- **ALWAYS READ** the technicals docs from this PRD before start -- **YOU SHOULD ALWAYS** have in mind that this should be done in a greenfield approach, we don't need to care about backwards compatibility since the project is in alpha, and support old and new stuff just introduces more complexity in the project; never sacrifice quality because of backwards compatibility - - - -# When you need information about a library or external API: -- use perplexity and context7 to find out how to properly fix/resolve this -- when using perplexity mcp, you can pass a prompt to the query param with more description about what you want to know, you don't need to pass a query-style search phrase, the same for the topic param of context7 -- for context7 to use the mcp is two steps, one you will find out the library id and them you will check what you want - - - -- Update maybeStartStandaloneTemporal() to start embedded Temporal for both memory and persistent modes -- Update standaloneEmbeddedConfig() to set intelligent database path defaults based on mode -- Update validateDatabaseConfig() to remove hardcoded "standalone" string references -- Memory mode should default to `:memory:` for Temporal database -- Persistent mode should default to `./.compozy/temporal.db` for Temporal database -- Add clear logging showing mode, database path, and ports - - -## Subtasks - -- [ ] 6.1 Update maybeStartStandaloneTemporal() to handle memory and persistent modes -- [ ] 6.2 Update standaloneEmbeddedConfig() with intelligent database path defaults -- [ ] 6.3 Update validateDatabaseConfig() to use mode checks instead of hardcoded strings -- [ ] 6.4 Add comprehensive logging for Temporal startup -- [ ] 6.5 Validate Temporal starts correctly in all three modes - -## Implementation Details - -See **Phase 2.2: Update Temporal Wiring** in `_techspec.md` (lines 611-755). - -**Key Changes:** - -1. **Lines 378-414** - Update `maybeStartStandaloneTemporal()`: - - Check for `mode == ModeMemory || mode == ModePersistent` to start embedded Temporal - - Distributed mode continues to use external Temporal (no change) - - Add detailed logging with mode, database path, and ports - -2. **Lines 416-430** - Update `standaloneEmbeddedConfig()`: - - If `DatabaseFile` is empty and mode is `persistent`: default to `./.compozy/temporal.db` - - If `DatabaseFile` is empty and mode is `memory`: default to `:memory:` - - Explicit config values always take precedence - -3. **Lines 133-160** - Update `validateDatabaseConfig()`: - - Replace hardcoded "standalone" strings with mode variable - - Add mode to warning/error log messages - - Improve guidance messages to reference correct modes - -### Relevant Files - -- `engine/infra/server/dependencies.go` - Temporal startup and database validation - -### Dependent Files - -- `pkg/config/resolver.go` - Mode constants and resolution -- `pkg/config/config.go` - Configuration structure -- Embedded Temporal package (`temporal.io/server/temporal`) - -## Deliverables - -- Updated `maybeStartStandaloneTemporal()` supporting memory and persistent modes -- Intelligent database path defaults in `standaloneEmbeddedConfig()` -- Mode-aware validation messages in `validateDatabaseConfig()` -- Clear logging showing Temporal configuration and startup status -- All existing Temporal tests passing with updated modes - -## Tests - -Unit tests mapped from `_tests.md` for Temporal layer: -- [ ] Test Temporal startup in memory mode (uses :memory: database) -- [ ] Test Temporal startup in persistent mode (uses file database) -- [ ] Test Temporal skips startup in distributed mode -- [ ] Test default database path for memory mode -- [ ] Test default database path for persistent mode -- [ ] Test explicit database path override in each mode -- [ ] Test validateDatabaseConfig() warnings with mode context -- [ ] Verify Temporal state persists in persistent mode after restart -- [ ] Verify Temporal state is ephemeral in memory mode - -## Success Criteria - -- `make lint` passes with no errors -- `go test ./engine/infra/server/... -run TestMaybeStartStandaloneTemporal -v` passes -- `go test ./engine/infra/server/... -run TestValidateDatabaseConfig -v` passes -- Embedded Temporal starts correctly in memory mode with :memory: database -- Embedded Temporal starts correctly in persistent mode with file database -- Distributed mode continues to use external Temporal (no regression) -- Logging clearly indicates mode, database path, and ports -- Database validation warnings include mode context diff --git a/tasks/prd-modes/_task_7.0.md b/tasks/prd-modes/_task_7.0.md deleted file mode 100644 index 50d16c9b..00000000 --- a/tasks/prd-modes/_task_7.0.md +++ /dev/null @@ -1,104 +0,0 @@ -## markdown - -## status: pending # Options: pending, in-progress, completed, excluded - - -engine/infra/server -implementation -configuration -low -none - - -# Task 7.0: Update Server Logging - -## Overview - -Update server initialization logging in `engine/infra/server/server.go` to use actual mode values instead of hardcoded "standalone" strings, ensuring clear visibility into runtime configuration. - - -- **ALWAYS READ** @.cursor/rules/critical-validation.mdc before start -- **ALWAYS READ** the technicals docs from this PRD before start -- **YOU SHOULD ALWAYS** have in mind that this should be done in a greenfield approach, we don't need to care about backwards compatibility since the project is in alpha, and support old and new stuff just introduces more complexity in the project; never sacrifice quality because of backwards compatibility - - - -# When you need information about a library or external API: -- use perplexity and context7 to find out how to properly fix/resolve this -- when using perplexity mcp, you can pass a prompt to the query param with more description about what you want to know, you don't need to pass a query-style search phrase, the same for the topic param of context7 -- for context7 to use the mcp is two steps, one you will find out the library id and them you will check what you want - - - -- Find and replace any hardcoded "standalone" strings in server logging -- Use `cfg.Mode` to dynamically log the actual mode value -- Ensure all mode-related logging uses structured fields -- Maintain consistent log format across server initialization -- No functional changes - logging updates only - - -## Subtasks - -- [ ] 7.1 Search for hardcoded "standalone" strings in server.go logging -- [ ] 7.2 Replace with dynamic mode values from config -- [ ] 7.3 Verify structured logging format consistency -- [ ] 7.4 Test logging output in each mode - -## Implementation Details - -See **Phase 2.3: Update Server Logging** in `_techspec.md` (lines 756-783). - -**Key Change Pattern:** -```go -// BEFORE: -log.Info("Starting in standalone mode", ...) - -// AFTER: -log.Info("Starting server", "mode", cfg.Mode, ...) -``` - -**Search Strategy:** -```bash -grep -n "standalone" engine/infra/server/server.go -``` - -**Logging Best Practices:** -- Use structured fields (`"mode", cfg.Mode`) -- Include relevant context (database driver, temporal mode, redis mode) -- Keep messages concise and actionable -- Use consistent terminology (memory/persistent/distributed) - -### Relevant Files - -- `engine/infra/server/server.go` - Server initialization and logging - -### Dependent Files - -- `pkg/config/config.go` - Configuration structure with Mode field -- Logger package (`logger.FromContext(ctx)`) - -## Deliverables - -- All hardcoded "standalone" strings removed from server logging -- Mode-specific logging using dynamic config values -- Consistent structured logging format -- Clear log output showing active mode during server startup - -## Tests - -Manual validation tests: -- [ ] Run `compozy start --mode memory` and verify logs show "mode=memory" -- [ ] Run `compozy start --mode persistent` and verify logs show "mode=persistent" -- [ ] Run `compozy start --mode distributed` and verify logs show "mode=distributed" -- [ ] Verify no "standalone" strings appear in log output -- [ ] Verify structured log fields are consistent across modes -- [ ] Check log clarity and usefulness for debugging - -## Success Criteria - -- `make lint` passes with no errors -- No hardcoded "standalone" strings remain in server.go (except comments referencing old behavior) -- Server startup logs clearly indicate active mode for all three modes -- Log format is consistent and uses structured fields -- Manual testing confirms correct mode values in logs -- No functional changes to server behavior - purely logging updates diff --git a/tasks/prd-modes/_task_8.0.md b/tasks/prd-modes/_task_8.0.md deleted file mode 100644 index b2f487d2..00000000 --- a/tasks/prd-modes/_task_8.0.md +++ /dev/null @@ -1,165 +0,0 @@ -## markdown - -## status: pending # Options: pending, in-progress, completed, excluded - - -engine/infra/server -testing -core_feature -medium -database|temporal|redis - - -# Task 8.0: Manual Runtime Validation - -## Overview - -Perform comprehensive manual validation of runtime infrastructure behavior across all three modes to ensure correct component initialization, state persistence, and mode-specific behavior. - - -- **ALWAYS READ** @.cursor/rules/critical-validation.mdc before start -- **ALWAYS READ** the technicals docs from this PRD before start -- **YOU SHOULD ALWAYS** have in mind that this should be done in a greenfield approach, we don't need to care about backwards compatibility since the project is in alpha, and support old and new stuff just introduces more complexity in the project; never sacrifice quality because of backwards compatibility - - - -# When you need information about a library or external API: -- use perplexity and context7 to find out how to properly fix/resolve this -- when using perplexity mcp, you can pass a prompt to the query param with more description about what you want to know, you don't need to pass a query-style search phrase, the same for the topic param of context7 -- for context7 to use the mcp is two steps, one you will find out the library id and them you will check what you want - - - -- Test server startup in each mode (memory/persistent/distributed) -- Verify correct infrastructure components activate per mode -- Validate state persistence behavior in persistent mode -- Verify ephemeral behavior in memory mode -- Confirm no regressions in distributed mode -- Validate error messages and warnings are clear and helpful -- Confirm default mode (memory) works without any configuration - - -## Subtasks - -- [ ] 8.1 Manual validation: memory mode -- [ ] 8.2 Manual validation: persistent mode -- [ ] 8.3 Manual validation: distributed mode -- [ ] 8.4 Verify error handling and validation messages -- [ ] 8.5 Test default mode behavior (no config) -- [ ] 8.6 Document validation results and any issues - -## Implementation Details - -See **Phase 2.3: Update Server Logging** in `_techspec.md` (lines 756-783) for validation approach. - -### Memory Mode Validation - -```bash -# Start in memory mode (default) -compozy start - -# Or explicitly -compozy start --mode memory - -# Verify: -# - Server starts in <1 second -# - Logs show "mode=memory" -# - Database: SQLite :memory: -# - Temporal: embedded :memory: -# - Redis: Miniredis (no persistence) -# - No .compozy/ directory created -``` - -### Persistent Mode Validation - -```bash -# Start in persistent mode -compozy start --mode persistent - -# Verify: -# - Server starts in <2 seconds -# - Logs show "mode=persistent" -# - Database: SQLite file at ./.compozy/compozy.db -# - Temporal: file at ./.compozy/temporal.db -# - Redis: BadgerDB at ./.compozy/redis/ -# - .compozy/ directory created with db files - -# Test persistence: -# 1. Run a workflow -# 2. Stop server -# 3. Restart server -# 4. Verify workflow history persists -``` - -### Distributed Mode Validation - -```bash -# Requires external services -docker-compose up -d postgres redis temporal - -# Start in distributed mode -compozy start --mode distributed - -# Verify: -# - Server starts in 5-15 seconds -# - Logs show "mode=distributed" -# - Database: PostgreSQL external -# - Temporal: external cluster -# - Redis: external cluster -# - No embedded services started -``` - -### Relevant Files - -- `engine/infra/server/server.go` - Server initialization -- `engine/infra/server/dependencies.go` - Component startup -- `engine/infra/cache/mod.go` - Cache initialization -- Examples: - - `examples/hello-world.yaml` - Simple workflow for testing - -### Dependent Files - -- All Phase 2 implementation files (Tasks 5.0-7.0) - -## Deliverables - -- Documented validation results for all three modes -- List of any issues or unexpected behaviors discovered -- Confirmation that infrastructure behaves correctly per mode -- Verification of logging clarity and helpfulness -- Evidence of state persistence in persistent mode -- Evidence of ephemeral behavior in memory mode - -## Tests - -Manual validation checklist: -- [ ] Memory mode: server starts <1s -- [ ] Memory mode: no persistence files created -- [ ] Memory mode: data lost on restart -- [ ] Memory mode: correct logging output -- [ ] Persistent mode: server starts <2s -- [ ] Persistent mode: .compozy/ directory created -- [ ] Persistent mode: all db files present -- [ ] Persistent mode: state persists across restarts -- [ ] Persistent mode: correct logging output -- [ ] Distributed mode: connects to external services -- [ ] Distributed mode: no embedded services started -- [ ] Distributed mode: correct logging output -- [ ] Default behavior: memory mode without config -- [ ] Error messages: clear and helpful -- [ ] Warnings: appropriate context with mode info - -## Success Criteria - -- Server successfully starts in all three modes -- Infrastructure components activate correctly per mode: - - Memory: embedded SQLite :memory:, embedded Temporal :memory:, Miniredis ephemeral - - Persistent: embedded SQLite file, embedded Temporal file, Miniredis + BadgerDB - - Distributed: external Postgres, external Temporal, external Redis -- State persistence verified in persistent mode -- Ephemeral behavior verified in memory mode -- No regressions in distributed mode behavior -- Logging clearly indicates active mode and component configuration -- Default mode (memory) works without any configuration -- Error messages and warnings are clear and actionable -- All issues documented and resolved or tracked diff --git a/tasks/prd-modes/_task_9.0.md b/tasks/prd-modes/_task_9.0.md deleted file mode 100644 index 82cdb580..00000000 --- a/tasks/prd-modes/_task_9.0.md +++ /dev/null @@ -1,100 +0,0 @@ -## status: pending - - -test/helpers -testing -test_infrastructure -low -database|sqlite - - -# Task 9.0: Update Test Helpers - -## Overview - -Update test helper utilities to default to SQLite memory mode instead of PostgreSQL testcontainers, enabling 50-80% faster test execution with zero Docker dependencies. - - -- **ALWAYS READ** @.cursor/rules/critical-validation.mdc before start -- **ALWAYS READ** the technical docs from this PRD before start (tasks/prd-modes/_techspec.md) -- **YOU SHOULD ALWAYS** have in mind that this is a greenfield approach - no backwards compatibility required - - - -When you need information about SQLite best practices: -- Use perplexity to find SQLite testing patterns and in-memory database optimization -- Use context7 for Go testing framework documentation - - - -- Default `SetupTestDatabase` to SQLite memory mode -- Remove testcontainers as default dependency -- Add explicit `SetupPostgresContainer` for tests requiring PostgreSQL -- Ensure all helper functions use `t.Context()` instead of `context.Background()` -- Maintain backwards compatibility for tests explicitly requiring PostgreSQL - - -## Subtasks - -- [ ] 9.1 Update `SetupTestDatabase` to default to SQLite :memory: -- [ ] 9.2 Add explicit `SetupPostgresContainer` helper for PostgreSQL tests -- [ ] 9.3 Update `GetSharedPostgresDB` documentation to recommend SQLite -- [ ] 9.4 Verify context inheritance patterns (t.Context() usage) -- [ ] 9.5 Run test suite to measure performance improvement - -## Implementation Details - -### Objective -Change default test database from PostgreSQL testcontainers to SQLite memory mode for dramatic speed improvements while maintaining PostgreSQL test capability for specialized tests. - -### Key Changes - -**File:** `test/helpers/database.go` - -1. **Update `SetupTestDatabase` signature and implementation:** - - Change default driver from "postgres" to "sqlite" - - Use `:memory:` as default SQLite path - - Remove testcontainers startup from default path - -2. **Add new helper:** - - Create `SetupPostgresContainer(t *testing.T)` for tests explicitly requiring PostgreSQL - - Move testcontainers logic from `SetupTestDatabase` to this new function - -3. **Update documentation:** - - Document when to use SQLite vs PostgreSQL in tests - - Add migration guide comments for existing tests - -### Relevant Files - -- `test/helpers/database.go` - Primary implementation -- `test/helpers/standalone.go` - May reference database helpers - -### Dependent Files - -- All integration tests using `SetupTestDatabase` -- Tests using `GetSharedPostgresDB` - -## Deliverables - -- Updated `test/helpers/database.go` with SQLite default -- New `SetupPostgresContainer` helper for PostgreSQL tests -- Documentation comments explaining when to use each helper -- Performance benchmarks showing speedup - -## Tests - -Since this task updates test infrastructure itself, validation is through: - -- [ ] Run `make test` and verify all tests pass -- [ ] Measure test suite execution time (should be 50-80% faster) -- [ ] Verify no testcontainers startup in default test runs -- [ ] Confirm PostgreSQL tests still work with explicit `SetupPostgresContainer` -- [ ] Check that all helpers use `t.Context()` for proper context inheritance - -## Success Criteria - -- All tests pass with SQLite as default -- Test suite runs 50-80% faster than before -- Zero Docker dependencies for default test runs -- PostgreSQL tests still functional with explicit opt-in -- No `context.Background()` usage in test helpers diff --git a/tasks/prd-modes/_tasks.md b/tasks/prd-modes/_tasks.md deleted file mode 100644 index f9e02fcb..00000000 --- a/tasks/prd-modes/_tasks.md +++ /dev/null @@ -1,271 +0,0 @@ -# Three-Mode Configuration System - Task Summary - -**Status**: Ready for Implementation -**Breaking Change**: Yes (acceptable in alpha) -**Estimated Duration**: 7-8 days with parallelization -**Files Affected**: ~50 files across 7 phases - ---- - -## Overview - -Replace the current two-mode system (standalone/distributed) with a three-mode system: -- **memory** (NEW DEFAULT): In-memory SQLite + embedded services (no persistence) -- **persistent**: File-based SQLite + embedded services (with persistence) -- **distributed**: PostgreSQL + external services (production) - -**Key Benefits**: -- 50-80% faster test suite (no testcontainers/Docker startup) -- Zero-dependency quickstart (`compozy start` just works) -- Clearer intent-based naming (mode matches use case) - -**Technical Specification**: See `_techspec.md` for detailed implementation - ---- - -## Task List (29 Tasks) - -### Phase 1: Core Configuration [CRITICAL] 🔴 - -**Blocking**: All other work depends on Phase 1 completion - -- **1.0** Update Mode Constants & Defaults [M] - 1 day -- **2.0** Update Configuration Validation [M] - 1 day -- **3.0** Update Configuration Registry [M] - 1 day -- **4.0** Update Configuration Tests [L] - 2 days - -**Duration**: 2 days (with parallelization) - ---- - -### Phase 2: Infrastructure Wiring [HIGH] 🟡 - -**Dependencies**: Phase 1 complete - -- **5.0** Update Cache Layer [M] - 1 day -- **6.0** Update Temporal Wiring [L] - 2 days -- **7.0** Update Server Logging [S] - 0.5 days -- **8.0** Manual Runtime Validation [M] - 1 day - -**Duration**: 1.5 days (with parallelization) -**Parallel Lanes**: 3 (tasks 5.0, 6.0, 7.0 can run concurrently) - ---- - -### Phase 3: Test Infrastructure [HIGH] 🟡 - -**Dependencies**: Phase 1, Phase 2 complete - -- **9.0** Update Test Helpers [M] - 1 day -- **10.0** Add Database Mode Helper [S] - 0.5 days -- **11.0** Audit & Migrate Integration Tests [XL] - 3 days -- **12.0** Update Integration Test Helpers [M] - 1 day -- **13.0** Update Golden Test Files [S] - 0.5 days - -**Duration**: 2 days (with parallelization) -**Parallel Lanes**: 4 initially (9.0, 10.0, 12.0, 13.0 can start together) - ---- - -### Phase 4: Documentation [MEDIUM] 🟢 - -**Dependencies**: Phase 1 complete (can run parallel with Phases 2-3) - -- **14.0** Update Deployment Documentation [L] - 2 days -- **15.0** Update Configuration Documentation [M] - 1 day -- **16.0** Create Migration Guide [L] - 2 days -- **17.0** Update Quick Start [S] - 0.5 days -- **18.0** Update CLI Help [S] - 0.5 days -- **19.0** Create/Update Examples [M] - 1 day - -**Duration**: 1 day (with parallelization) -**Parallel Lanes**: 5 (all tasks can run concurrently) - ---- - -### Phase 5: Template System [CRITICAL] 🔴 - -**Dependencies**: Phase 1 complete (can run parallel with Phases 2-3-4) - -- **27.0** Add Mode Selection to TUI Form [M] - 0.5 days -- **28.0** Update Template System Types for Mode [S] - 0.5 days -- **29.0** Make Template Generation Mode-Aware [L] - 1 day - -**Duration**: 1 day (with parallelization) -**Parallel Lanes**: 2 (tasks 27.0 and 28.0 can run in parallel, then 29.0) - -**CRITICAL**: First impression for new users, affects onboarding experience - ---- - -### Phase 6: Schemas & Metadata [MEDIUM] 🟢 - -**Dependencies**: Phase 1 complete (can run parallel with Phases 2-3-4-5) - -- **20.0** Update JSON Schemas [S] - 0.5 days -- **21.0** Regenerate Generated Files [M] - 1 day - -**Duration**: 1 day (sequential) - ---- - -### Phase 7: Final Validation [CRITICAL] 🔴 - -**Dependencies**: ALL previous phases complete - -- **22.0** Comprehensive Testing [L] - 2 days (BLOCKING) -- **23.0** Validate Examples [M] - 1 day -- **24.0** Performance Benchmarking [M] - 1 day -- **25.0** Error Message Validation [S] - 0.5 days -- **26.0** Documentation Validation [S] - 0.5 days - -**Duration**: 1 day (with parallelization after task 22.0) -**Parallel Lanes**: 3 (after comprehensive tests complete) - ---- - -## Execution Strategy - -### Critical Path (6.5 days) - -``` -Phase 1: Core Config [2 days] - BLOCKING - ↓ -Phase 2: Infrastructure [1.5 days] - ↓ -Phase 3: Tests [2 days] - ↓ -Phase 6: Validation [1 day] -``` - -### Parallel Optimization - -**Week 1 (Days 1-2): Foundation** -- All hands on Phase 1 (Core Config) - CRITICAL BLOCKING - -**Week 1 (Days 3-5): Parallel Tracks** -- **Track A**: Phase 2 (Infrastructure) - 1 developer -- **Track B**: Phase 4 (Documentation) - 1 developer -- **Track C**: Phase 5 (Schemas) - 1 developer -- **Day 4-5**: Phase 2 completes → start Phase 3 (Tests) - -**Week 2 (Day 6): Validation & Ship** -- Phase 6 (Final Validation) - All tracks converge -- Ship readiness verification - ---- - -## Batch Plan (Git Commits) - -### Batch 1: Core Configuration -**Tasks**: 1.0, 2.0, 3.0, 4.0 -**Commit**: `feat(config): add memory/persistent/distributed modes` - -### Batch 2: Infrastructure Wiring -**Tasks**: 5.0, 6.0, 7.0 -**Commit**: `feat(infra): wire three-mode system to runtime` - -### Batch 3: Test Helpers -**Tasks**: 9.0, 10.0, 12.0, 13.0 -**Commit**: `test: migrate test infrastructure to memory mode` - -### Batch 4: Test Migration -**Tasks**: 11.0 -**Commit**: `test: migrate integration tests to SQLite` - -### Batch 5: Documentation & Templates -**Tasks**: 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 27.0, 28.0, 29.0 -**Commit**: `docs: update for three-mode system and template generation` - -### Batch 6: Schemas & Validation -**Tasks**: 20.0, 21.0, 22.0-26.0 -**Commit**: `chore: update schemas and validate ship readiness` - ---- - -## Parallelization Summary - -| Phase | Sequential | Parallel | Savings | -|-------|-----------|----------|---------| -| Phase 1 | 5 days | 2 days | 60% | -| Phase 2 | 3.5 days | 1.5 days | 57% | -| Phase 3 | 6 days | 2 days | 67% | -| Phase 4 | 5.5 days | 1 day | 82% | -| Phase 5 | 2 days | 1 day | 50% | -| Phase 6 | 1.5 days | 1 day | 33% | -| Phase 7 | 3 days | 1 day | 67% | -| **TOTAL** | **27.5 days** | **7.5 days** | **73%** | - ---- - -## Success Metrics - -### Performance Targets -- ✅ Test suite: 50-80% faster (3-5 min → 45-90 sec) -- ✅ Server startup (memory): <1 second -- ✅ Server startup (persistent): <2 seconds -- ✅ No regressions in distributed mode - -### Quality Targets -- ✅ All tests pass (`make test`) -- ✅ Linter clean (`make lint`) -- ✅ Code coverage >80% -- ✅ All examples work in each mode - -### Documentation Targets -- ✅ All mode references updated -- ✅ Migration guide complete -- ✅ No inappropriate "standalone" references -- ✅ API docs regenerated - ---- - -## Risk Mitigation - -### High-Risk Tasks -1. **Task 11.0** (Test Migration): XL size, touches many files - - Mitigation: Break into sub-tasks per test suite - -2. **Task 6.0** (Temporal Wiring): Complex runtime behavior - - Mitigation: Extensive manual testing (Task 8.0) - -3. **Task 22.0** (Comprehensive Testing): Blocks ship - - Mitigation: Continuous testing throughout phases - -### Breaking Change Management -- Alpha version = acceptable breakage -- Clear migration guide (Task 16.0) -- Helpful error messages (Task 25.0) -- Version bump in CHANGELOG - ---- - -## Definition of Done - -### Code Complete -- [ ] All ~40 files updated -- [ ] All tests passing (`make test`) -- [ ] Linter clean (`make lint`) -- [ ] No "standalone" references (except historical) - -### Quality Complete -- [ ] Performance benchmarked (50%+ faster) -- [ ] Examples tested in each mode -- [ ] State persists in persistent mode -- [ ] Error messages helpful and clear - -### Documentation Complete -- [ ] All mode references updated -- [ ] Migration guide written and tested -- [ ] CHANGELOG entry written -- [ ] API docs regenerated - -### Ship Ready -- [ ] Smoke tests pass in all modes -- [ ] No regressions in distributed mode -- [ ] Team reviewed and approved -- [ ] Version bumped appropriately - ---- - -**Next Steps**: Begin with Phase 1 (Core Configuration) - all developers focus here first. diff --git a/tasks/prd-modes/_techspec.md b/tasks/prd-modes/_techspec.md deleted file mode 100644 index 0a4fd129..00000000 --- a/tasks/prd-modes/_techspec.md +++ /dev/null @@ -1,2170 +0,0 @@ -# Technical Specification: Three-Mode Configuration System - -## Executive Summary - -Replace the current two-mode system (standalone/distributed) with a clearer three-mode system (memory/persistent/distributed) to improve developer experience, reduce friction for new users, and dramatically speed up test execution. - -**Modes:** -- **memory** (NEW DEFAULT): In-memory SQLite + embedded Temporal + embedded Redis (no persistence) -- **persistent**: File-based SQLite + embedded Temporal + embedded Redis (with persistence) -- **distributed**: PostgreSQL + external Temporal + external Redis (production) - -**Key Benefits:** -- 50-80% faster test suite (no testcontainers/Docker startup) -- Zero-dependency quickstart (`compozy start` just works) -- Clearer intent-based naming (mode matches use case) -- Simpler onboarding for new developers - -**Impact:** -- Breaking change (acceptable in alpha, no backwards compatibility) -- ~40 files to update across 6 implementation phases -- Estimated effort: 6 days with proper phasing - ---- - -## Table of Contents - -1. [Current State Analysis](#current-state-analysis) -2. [Proposed Architecture](#proposed-architecture) -3. [Implementation Plan](#implementation-plan) -4. [Phase-by-Phase Details](#phase-by-phase-details) -5. [Testing Strategy](#testing-strategy) -6. [Risk Mitigation](#risk-mitigation) -7. [Success Metrics](#success-metrics) -8. [Migration Guide](#migration-guide) - ---- - -## Current State Analysis - -### Existing Mode System - -**Mode Constants** (`pkg/config/resolver.go`): -```go -const ( - ModeStandalone = "standalone" - ModeDistributed = "distributed" - ModeRemoteTemporal = "remote" -) -``` - -**Default Mode:** `ModeDistributed` (line 26 in resolver.go) -- Requires external PostgreSQL, Redis, and Temporal -- High barrier to entry for new users -- Slow test execution (testcontainers startup overhead) - -**Database Driver Selection** (`pkg/config/resolver.go:49-65`): -```go -func (cfg *Config) EffectiveDatabaseDriver() string { - if cfg.Database.Driver != "" { - return cfg.Database.Driver - } - if cfg.Mode == ModeStandalone { - return databaseDriverSQLite - } - return databaseDriverPostgres // Default -} -``` - -### Infrastructure Components - -**1. Database Layer:** -- PostgreSQL: Production-ready with pgvector support -- SQLite: Full feature parity via migrations, no pgvector - -**2. Temporal Layer:** -- External: Production clusters (mode="remote") -- Embedded: In-process server (mode="standalone") - - Default DB: `:memory:` - - Configurable via `temporal.standalone.database_file` - -**3. Redis/Cache Layer:** -- External: Production Redis clusters (mode="distributed") -- Embedded: Miniredis with optional BadgerDB persistence (mode="standalone") - - Implemented in Redis PRD (Tasks 1.0-13.0) - - `MiniredisStandalone` wrapper with `SnapshotManager` - -### Test Infrastructure - -**Current Pattern:** -```go -// Most tests use testcontainers with Postgres -pool, cleanup := helpers.GetSharedPostgresDB(t) -// Spins up pgvector/pgvector:pg16 container -``` - -**Impact:** -- Slow startup time (~10-30 seconds per test run) -- Docker dependency for local development -- CI/CD resource overhead - -**Alternative Available:** -```go -// SQLite tests (faster, but not default) -provider, cleanup := helpers.SetupTestDatabase(t, "sqlite") -``` - -### Known Limitations - -**SQLite Constraints:** -1. No pgvector support (hard error if configured) -2. Write concurrency limit (~10 concurrent workflows recommended) -3. Single writer at a time (serialized writes) -4. Requires external vector DB for knowledge/RAG features - -**Documented Validation** (`engine/infra/server/dependencies.go:122-161`): -```go -func (s *Server) validateDatabaseConfig(cfg *config.Config) error { - if driver != driverSQLite { - return nil - } - // Validates: - // - No pgvector with SQLite - // - Warns if >10 concurrent workflows - // - Warns if no vector DB configured -} -``` - ---- - -## Proposed Architecture - -### Three-Mode System - -``` -┌─────────────────────────────────────────────────────────────┐ -│ Mode System │ -├─────────────────────────────────────────────────────────────┤ -│ │ -│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │ -│ │ MEMORY │ │ PERSISTENT │ │ DISTRIBUTED │ │ -│ │ (default) │ │ │ │ │ │ -│ └─────────────┘ └─────────────┘ └─────────────┘ │ -│ │ │ │ │ -│ ▼ ▼ ▼ │ -│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │ -│ │ SQLite │ │ SQLite │ │ Postgres │ │ -│ │ :memory: │ │ file │ │ external │ │ -│ └─────────────┘ └─────────────┘ └─────────────┘ │ -│ │ │ │ │ -│ ▼ ▼ ▼ │ -│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │ -│ │ Temporal │ │ Temporal │ │ Temporal │ │ -│ │ embedded │ │ embedded │ │ external │ │ -│ │ :memory: │ │ file │ │ remote │ │ -│ └─────────────┘ └─────────────┘ └─────────────┘ │ -│ │ │ │ │ -│ ▼ ▼ ▼ │ -│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │ -│ │ Redis │ │ Redis │ │ Redis │ │ -│ │ Miniredis │ │ Miniredis │ │ external │ │ -│ │ no persist │ │ + BadgerDB │ │ cluster │ │ -│ └─────────────┘ └─────────────┘ └─────────────┘ │ -│ │ -└─────────────────────────────────────────────────────────────┘ -``` - -### Mode Characteristics - -| Aspect | memory | persistent | distributed | -|--------|--------|-----------|-------------| -| **Database** | SQLite :memory: | SQLite file | PostgreSQL | -| **Temporal** | Embedded :memory: | Embedded file | External | -| **Redis** | Miniredis ephemeral | Miniredis + BadgerDB | External | -| **Persistence** | None | Survives restarts | Full production | -| **Startup** | Instant | Instant | Requires services | -| **Test Speed** | Fastest | Fast | Slow (containers) | -| **Use Case** | Tests, quick dev | Local dev, debug | Production | -| **Data Loss Risk** | On restart | On disk failure | Replicated | - -### Configuration Inheritance - -**Resolution Hierarchy:** -``` -Component Mode (explicit) - ↓ (if not set) -Global Mode (config.Mode) - ↓ (if not set) -Default (memory) ← NEW DEFAULT -``` - -**Example:** -```yaml -# Implicit memory mode (all components use memory) -name: my-workflow - -# OR explicit mode -mode: persistent -database: - # Driver auto-selected: sqlite - path: ./.compozy/compozy.db -temporal: - # Mode inherited: persistent - standalone: - database_file: ./.compozy/temporal.db -redis: - # Mode inherited: persistent - standalone: - persistence: - enabled: true # Auto-enabled - data_dir: ./.compozy/redis -``` - ---- - -## Implementation Plan - -### Overview - -``` -Phase 1: Core Config [Days 1-2] CRITICAL - ↓ -Phase 2: Infrastructure [Days 2-3] HIGH - ↓ -Phase 3: Test Migration [Days 3-4] HIGH - ↓ -Phase 4: Documentation [Days 4-5] MEDIUM - ↓ -Phase 5: Schemas [Day 5] MEDIUM - ↓ -Phase 6: Final Validation [Day 6] CRITICAL -``` - -### File Change Summary - -``` -Core Configuration (7 files): - pkg/config/resolver.go - pkg/config/config.go - pkg/config/definition/schema.go - pkg/config/loader.go - pkg/config/resolver_test.go - pkg/config/config_test.go - pkg/config/loader_test.go - -Infrastructure (4 files): - engine/infra/cache/mod.go - engine/infra/server/dependencies.go - engine/infra/server/server.go - engine/infra/server/temporal_resolver_test.go - -Test Infrastructure (6+ files): - test/helpers/standalone.go - test/helpers/database.go - test/integration/standalone/helpers.go - test/integration/temporal/mode_switching_test.go - testdata/*.golden (3 files) - + integration test updates - -Documentation (8+ files): - docs/content/docs/deployment/*.mdx - docs/content/docs/configuration/*.mdx - docs/content/docs/guides/*.mdx - docs/content/docs/quick-start/*.mdx - cli/help/global-flags.md - -Examples (2-3 files): - examples/memory-mode/ (renamed from standalone) - examples/persistent-mode/ (new) - examples/README.md - -Schemas (2 files): - schemas/config.json - schemas/compozy.json - -TOTAL: ~40 files -``` - ---- - -## Phase-by-Phase Details - -### Phase 1: Core Configuration [CRITICAL] - -**Priority:** BLOCKING - All other work depends on this - -**Duration:** 1-2 days - -**Goal:** Core configuration system supports three modes with proper validation and resolution - -#### 1.1 Update Mode Constants - -**File:** `pkg/config/resolver.go` - -**Changes:** - -**Lines 6-11** - Replace mode constants: -```go -// BEFORE: -const ( - ModeStandalone = "standalone" - ModeDistributed = "distributed" - ModeRemoteTemporal = "remote" -) - -// AFTER: -const ( - ModeMemory = "memory" // In-memory SQLite, fastest - ModePersistent = "persistent" // File-based SQLite - ModeDistributed = "distributed" // Postgres + external services - ModeRemoteTemporal = "remote" // Temporal-specific (unchanged) -) -``` - -**Line 26** - Change default mode: -```go -// BEFORE: -return ModeDistributed - -// AFTER: -return ModeMemory -``` - -**Line 18** - Update docstring: -```go -// BEFORE: -// 3. Default fallback ("distributed") - -// AFTER: -// 3. Default fallback ("memory") -``` - -**Lines 36-42** - Update EffectiveTemporalMode: -```go -func (cfg *Config) EffectiveTemporalMode() string { - mode := ResolveMode(cfg, cfg.Temporal.Mode) - if mode == ModeDistributed { - return ModeRemoteTemporal - } - // memory and persistent both use embedded Temporal - return mode -} -``` - -**Lines 49-65** - Update EffectiveDatabaseDriver: -```go -func (cfg *Config) EffectiveDatabaseDriver() string { - if cfg == nil { - return databaseDriverSQLite // Changed default - } - driver := strings.TrimSpace(cfg.Database.Driver) - if driver != "" { - return driver // Explicit override - } - mode := strings.TrimSpace(cfg.Mode) - if mode == ModeMemory || mode == ModePersistent { - return databaseDriverSQLite - } - if mode == ModeDistributed { - return databaseDriverPostgres - } - return databaseDriverSQLite // Default to SQLite -} -``` - -**Validation:** -```bash -go test ./pkg/config -run TestResolveMode -go test ./pkg/config -run TestEffectiveDatabaseDriver -``` - -#### 1.2 Update Configuration Validation - -**File:** `pkg/config/config.go` - -**Line 56** - Update Mode field validation: -```go -// BEFORE: -Mode string `koanf:"mode" env:"COMPOZY_MODE" json:"mode" yaml:"mode" mapstructure:"mode" validate:"omitempty,oneof=standalone distributed"` - -// AFTER: -Mode string `koanf:"mode" env:"COMPOZY_MODE" json:"mode" yaml:"mode" mapstructure:"mode" validate:"omitempty,oneof=memory persistent distributed"` -``` - -**Lines 52-55** - Update Mode documentation: -```go -// BEFORE: -// Mode controls global deployment model. -// -// "distributed" (default): External services required -// "standalone": Embedded services, single-process - -// AFTER: -// Mode controls global deployment model. -// -// "memory" (default): In-memory SQLite, embedded services, fastest for tests/dev -// "persistent": File-based SQLite, embedded services, local development with persistence -// "distributed": PostgreSQL, external Temporal/Redis, production deployments -``` - -**Line 17** - Clean up constants: -```go -// BEFORE: -const ( - mcpProxyModeStandalone = "standalone" - databaseDriverPostgres = "postgres" - databaseDriverSQLite = "sqlite" -) - -// AFTER: -const ( - databaseDriverPostgres = "postgres" - databaseDriverSQLite = "sqlite" -) -``` - -**Validation:** -```bash -go test ./pkg/config -run TestConfigValidation -``` - -#### 1.3 Update Configuration Registry - -**File:** `pkg/config/definition/schema.go` - -**Update mode field registration (~line 733):** -```go -registry.Register(&FieldDef{ - Path: "mode", - Default: "memory", // Changed from "distributed" - CLIFlag: "mode", - EnvVar: "COMPOZY_MODE", - Type: reflect.TypeOf(""), - Help: "Deployment mode: memory (default, in-memory SQLite), persistent (file SQLite), or distributed (Postgres)", -}) -``` - -**Update temporal.mode registration:** -```go -registry.Register(&FieldDef{ - Path: "temporal.mode", - Default: "", // Empty = inherit from global - CLIFlag: "temporal-mode", - EnvVar: "TEMPORAL_MODE", - Type: reflect.TypeOf(""), - Help: "Temporal deployment mode (memory/persistent/remote), inherits from global mode if unset", -}) -``` - -**Update redis.mode registration:** -```go -registry.Register(&FieldDef{ - Path: "redis.mode", - Default: "", - CLIFlag: "redis-mode", - EnvVar: "REDIS_MODE", - Type: reflect.TypeOf(""), - Help: "Redis deployment mode (memory/persistent/distributed), inherits from global mode if unset", -}) -``` - -**Validation:** -```bash -go test ./pkg/config/definition -v -``` - -#### 1.4 Update Configuration Tests - -**File:** `pkg/config/resolver_test.go` - -Update test cases for mode resolution: -```go -func TestResolveMode(t *testing.T) { - tests := []struct { - name string - globalMode string - componentMode string - want string - }{ - { - name: "Should use component mode when set", - globalMode: "distributed", - componentMode: "memory", - want: "memory", - }, - { - name: "Should use global mode when component not set", - globalMode: "persistent", - componentMode: "", - want: "persistent", - }, - { - name: "Should default to memory when neither set", - globalMode: "", - componentMode: "", - want: "memory", // Changed from "distributed" - }, - } - // ... test implementation -} -``` - -**File:** `pkg/config/config_test.go` - -Update validation test cases: -```go -func TestModeValidation(t *testing.T) { - tests := []struct { - mode string - wantErr bool - }{ - {"memory", false}, - {"persistent", false}, - {"distributed", false}, - {"standalone", true}, // No longer valid - {"invalid", true}, - } - // ... test implementation -} -``` - -**Validation Point:** -```bash -make lint -go test ./pkg/config/... -v -``` - -**Success Criteria:** -- All config tests pass -- Linter shows no errors -- Mode resolution works correctly -- Database driver selection works for all modes - ---- - -### Phase 2: Infrastructure Wiring [HIGH] - -**Priority:** HIGH - Runtime behavior depends on this - -**Duration:** 1-2 days - -**Goal:** Runtime systems (Cache, Temporal, Database) work correctly with new modes - -#### 2.1 Update Cache Layer - -**File:** `engine/infra/cache/mod.go` - -**Lines 12-15** - Update mode constants: -```go -// BEFORE: -const ( - modeStandalone = "standalone" - modeDistributed = "distributed" -) - -// AFTER: -const ( - modeMemory = "memory" - modePersistent = "persistent" - modeDistributed = "distributed" -) -``` - -**Lines 60-69** - Update SetupCache switch: -```go -// BEFORE: -switch mode { -case modeStandalone: - return setupStandaloneCache(ctx, cacheCfg) -case modeDistributed: - return setupDistributedCache(ctx, cacheCfg) -default: - return nil, nil, fmt.Errorf("unsupported redis mode: %s", mode) -} - -// AFTER: -switch mode { -case modeMemory: - // Force persistence OFF for memory mode - cacheCfg.Redis.Standalone.Persistence.Enabled = false - log.Info("Cache in memory mode (no persistence)") - return setupStandaloneCache(ctx, cacheCfg) - -case modePersistent: - // Auto-enable persistence for persistent mode - if !cacheCfg.Redis.Standalone.Persistence.Enabled { - cacheCfg.Redis.Standalone.Persistence.Enabled = true - if cacheCfg.Redis.Standalone.Persistence.DataDir == "" { - cacheCfg.Redis.Standalone.Persistence.DataDir = "./.compozy/redis" - } - log.Info("Cache in persistent mode (auto-enabled persistence)", - "data_dir", cacheCfg.Redis.Standalone.Persistence.DataDir, - ) - } - return setupStandaloneCache(ctx, cacheCfg) - -case modeDistributed: - return setupDistributedCache(ctx, cacheCfg) - -default: - return nil, nil, fmt.Errorf("unsupported redis mode: %s", mode) -} -``` - -**Key Insight:** Both `memory` and `persistent` use the SAME `setupStandaloneCache()` function (from Redis PRD), just with different persistence settings! - -**Validation:** -```bash -go test ./engine/infra/cache/... -v -``` - -#### 2.2 Update Temporal Wiring - -**File:** `engine/infra/server/dependencies.go` - -**Lines 378-414** - Update maybeStartStandaloneTemporal: -```go -// BEFORE: -func maybeStartStandaloneTemporal(ctx context.Context) (func(), error) { - cfg := config.FromContext(ctx) - if cfg == nil { - return nil, fmt.Errorf("configuration is required to start Temporal") - } - if cfg.EffectiveTemporalMode() != modeStandalone { - return nil, nil - } - // ... start embedded Temporal -} - -// AFTER: -func maybeStartStandaloneTemporal(ctx context.Context) (func(), error) { - cfg := config.FromContext(ctx) - if cfg == nil { - return nil, fmt.Errorf("configuration is required to start Temporal") - } - mode := cfg.EffectiveTemporalMode() - // Start embedded Temporal for both memory and persistent modes - if mode != config.ModeMemory && mode != config.ModePersistent { - return nil, nil // Distributed mode uses external Temporal - } - embeddedCfg := standaloneEmbeddedConfig(cfg) - log := logger.FromContext(ctx) - log.Info( - "Starting embedded Temporal", - "mode", mode, - "database", embeddedCfg.DatabaseFile, - "frontend_port", embeddedCfg.FrontendPort, - "ui_enabled", embeddedCfg.EnableUI, - ) - // ... rest unchanged -} -``` - -**Lines 416-430** - Update standaloneEmbeddedConfig: -```go -// BEFORE: -func standaloneEmbeddedConfig(cfg *config.Config) *embedded.Config { - standalone := cfg.Temporal.Standalone - return &embedded.Config{ - DatabaseFile: standalone.DatabaseFile, // Defaults to ":memory:" - // ... rest of config - } -} - -// AFTER: -func standaloneEmbeddedConfig(cfg *config.Config) *embedded.Config { - standalone := cfg.Temporal.Standalone - - // Determine database file based on mode - dbFile := standalone.DatabaseFile - if dbFile == "" { - // Set intelligent defaults based on mode - if cfg.Mode == config.ModePersistent { - dbFile = "./.compozy/temporal.db" - } else { - dbFile = ":memory:" // Default for memory mode - } - } - - return &embedded.Config{ - DatabaseFile: dbFile, - FrontendPort: standalone.FrontendPort, - BindIP: standalone.BindIP, - Namespace: standalone.Namespace, - ClusterName: standalone.ClusterName, - EnableUI: standalone.EnableUI, - RequireUI: standalone.RequireUI, - UIPort: standalone.UIPort, - LogLevel: standalone.LogLevel, - StartTimeout: standalone.StartTimeout, - } -} -``` - -**Lines 133-160** - Update validateDatabaseConfig: -```go -// Replace string references to "standalone" with mode checks -func (s *Server) validateDatabaseConfig(cfg *config.Config) error { - if cfg == nil { - return fmt.Errorf("config is required for database validation") - } - driver := strings.TrimSpace(cfg.Database.Driver) - if driver == "" { - driver = driverPostgres - } - if driver != driverSQLite { - return nil - } - - log := logger.FromContext(s.ctx) - mode := cfg.Mode // Add for logging - - // Vector DB validation - if len(cfg.Knowledge.VectorDBs) == 0 { - log.Warn("SQLite mode without vector database - knowledge features will not work", - "mode", mode, - "driver", driverSQLite, - "recommendation", "Configure Qdrant, Redis, or Filesystem vector DB", - ) - } - - // pgvector incompatibility check (unchanged) - for _, vdb := range cfg.Knowledge.VectorDBs { - provider := strings.TrimSpace(vdb.Provider) - if strings.EqualFold(provider, "pgvector") { - return fmt.Errorf( - "pgvector provider is incompatible with SQLite driver. " + - "SQLite requires an external vector database. " + - "Configure one of: Qdrant, Redis, or Filesystem. " + - "See documentation: docs/database/sqlite.md#vector-database-requirement", - ) - } - } - - // Concurrency warning - maxWorkflows := cfg.Worker.MaxConcurrentWorkflowExecutionSize - if maxWorkflows > recommendedSQLiteConcurrency { - log.Warn("SQLite has concurrency limitations", - "mode", mode, - "driver", driverSQLite, - "max_concurrent_workflows", maxWorkflows, - "recommended_max", recommendedSQLiteConcurrency, - "note", "Consider using mode: distributed for high-concurrency workloads", - ) - } - - return nil -} -``` - -**Validation:** -```bash -go test ./engine/infra/server/... -run TestMaybeStartStandaloneTemporal -go test ./engine/infra/server/... -run TestValidateDatabaseConfig -``` - -#### 2.3 Update Server Logging - -**File:** `engine/infra/server/server.go` - -Search for any hardcoded "standalone" strings in logging and update to use the actual mode value: - -```go -// BEFORE: -log.Info("Starting in standalone mode", ...) - -// AFTER: -log.Info("Starting server", "mode", cfg.Mode, ...) -``` - -**Validation:** -```bash -# Manual test -compozy start --mode memory -compozy start --mode persistent -compozy start --mode distributed -``` - -**Success Criteria:** -- Server starts successfully in each mode -- Correct infrastructure components activate per mode -- Logging clearly shows active mode -- Default mode (memory) works without any config - ---- - -### Phase 3: Test Infrastructure [HIGH] - -**Priority:** HIGH - Unblocks test suite - -**Duration:** 1-2 days - -**Goal:** Test suite runs with SQLite by default, 50%+ faster - -#### 3.1 Update Test Helpers - -**File:** `test/helpers/standalone.go` - -**Line 20** - Update constant: -```go -// BEFORE: -const testModeStandalone = "standalone" - -// AFTER: -const testModeMemory = "memory" -``` - -**Lines 51-52, 103-104, 212-213, 284-286** - Update mode assignments: -```go -// BEFORE: -cfg.Mode = testModeStandalone -cfg.Redis.Mode = testModeStandalone - -// AFTER: -cfg.Mode = testModeMemory -cfg.Redis.Mode = testModeMemory -``` - -**Validation:** -```bash -go test ./test/helpers/... -v -``` - -#### 3.2 Add Database Mode Helper - -**File:** `test/helpers/database.go` - -Add new helper function (after line 306): -```go -// SetupTestDatabaseForMode sets up database based on explicit mode. -// This helper routes to the appropriate database backend: -// - memory/persistent → SQLite (fast, no containers) -// - distributed → PostgreSQL (full features, slower) -// -// Example usage: -// provider, cleanup := helpers.SetupTestDatabaseForMode(t, "memory") -// defer cleanup() -func SetupTestDatabaseForMode(t *testing.T, mode string) (*repo.Provider, func()) { - t.Helper() - switch mode { - case "memory", "persistent": - // Use SQLite for fast in-memory testing - return SetupTestDatabase(t, "sqlite") - case "distributed": - // Use PostgreSQL for full features (pgvector, etc.) - return SetupTestDatabase(t, "postgres") - default: - // Default to SQLite (memory mode) - t.Logf("Unknown mode %q, defaulting to sqlite", mode) - return SetupTestDatabase(t, "sqlite") - } -} -``` - -**Validation:** -```bash -go test ./test/helpers -run TestSetupTestDatabase -``` - -#### 3.3 Audit and Migrate Tests - -**Strategy:** -1. Most tests should use SQLite (memory mode) by default -2. Explicitly mark tests requiring PostgreSQL -3. Tests requiring pgvector must use distributed mode -4. High-concurrency tests (>10 workflows) should use distributed mode - -**Find tests using Postgres:** -```bash -grep -r "GetSharedPostgresDB" test/ --include="*.go" -``` - -**Migration Pattern:** - -**For tests that CAN use SQLite:** -```go -// BEFORE: -func TestWorkflowExecution(t *testing.T) { - pool, cleanup := helpers.GetSharedPostgresDB(t) - defer cleanup() - // ... test code -} - -// AFTER: -func TestWorkflowExecution(t *testing.T) { - provider, cleanup := helpers.SetupTestDatabase(t, "sqlite") - defer cleanup() - // ... test code (no changes) -} -``` - -**For tests that NEED PostgreSQL:** -```go -func TestPgVectorEmbedding(t *testing.T) { - // Explicitly require distributed mode for pgvector - provider, cleanup := helpers.SetupTestDatabase(t, "postgres") - defer cleanup() - // ... test code -} -``` - -**Files to Audit:** -- `test/integration/store/operations_test.go` -- `test/integration/worker/*/database.go` -- `test/integration/server/executions_integration_test.go` -- `test/integration/tool/helpers.go` -- `test/integration/repo/repo_test_helpers.go` -- Any knowledge/RAG tests using pgvector - -**Validation:** -```bash -# Run tests and measure time -time make test - -# Should see significant speedup -# Before: ~2-5 minutes (with testcontainers) -# After: ~30-90 seconds (with SQLite) -``` - -#### 3.4 Update Integration Test Helpers - -**File:** `test/integration/standalone/helpers.go` - -Update any references to "standalone" mode in test setup. - -**File:** `test/integration/temporal/mode_switching_test.go` - -Update test cases for mode switching: -```go -func TestModeResolver_Distributed(t *testing.T) { - // ... test distributed mode -} - -func TestModeResolver_Memory(t *testing.T) { - // ... test memory mode (renamed from standalone) -} - -func TestModeResolver_Persistent(t *testing.T) { - // ... test persistent mode (new) -} -``` - -#### 3.5 Update Golden Test Files - -**Files:** -- `testdata/config-diagnostics-standalone.golden` -- `testdata/config-show-mixed.golden` -- `testdata/config-show-standalone.golden` - -Update these files to reflect new mode names: -```yaml -# BEFORE: -mode: standalone - -# AFTER: -mode: memory -``` - -**Regenerate golden files:** -```bash -# Run tests with UPDATE_GOLDEN=1 to regenerate -UPDATE_GOLDEN=1 go test ./cli/cmd/config/... -``` - -**Success Criteria:** -- Test suite passes completely -- Test execution time reduced by 50%+ -- No regressions in test coverage -- pgvector tests explicitly use distributed mode - ---- - -### Phase 4: Documentation [MEDIUM] - -**Priority:** MEDIUM - User-facing changes - -**Duration:** 1-2 days - -**Goal:** All documentation reflects new modes, clear migration guide - -#### 4.1 Update Deployment Documentation - -**File:** `docs/content/docs/deployment/standalone-mode.mdx` - -**Action:** Rename to `memory-mode.mdx` and update content: - -```mdx ---- -title: "Memory Mode Deployment" -description: "Run Compozy with in-memory SQLite for fastest testing and development" -icon: Zap ---- - - -Memory mode is optimized for tests, rapid development, and CI/CD pipelines. -All data is ephemeral and lost on restart. - - -## When to Use Memory Mode - - - - Run test suites 50-80% faster without Docker dependencies - - - Instant startup for rapid iteration cycles - - - Deterministic, fast builds without external services - - - -## Quick Start - -Memory mode is the default - just run: - -```bash -compozy start -``` - -All data is stored in-memory and lost on restart. - -## Configuration - -```yaml -name: my-workflow -# mode: memory # Default, can be omitted - -models: - - provider: openai - model: gpt-4o-mini - api_key: "${OPENAI_API_KEY}" -``` - -## Characteristics - -- **Database:** SQLite :memory: (ephemeral) -- **Temporal:** Embedded in-memory -- **Redis:** Miniredis without persistence -- **Startup:** Instant -- **Data:** Lost on restart -- **Use Cases:** Tests, demos, quick experimentation - -## Limitations - -- No data persistence -- No pgvector support (use Qdrant/Redis for embeddings) -- Single process only -- Write concurrency limited (~10 concurrent workflows) - -## Next Steps - -- Need persistence? Use [persistent mode](/docs/deployment/persistent-mode) -- Ready for production? See [distributed mode](/docs/deployment/distributed-mode) -``` - -**Create:** `docs/content/docs/deployment/persistent-mode.mdx` - -```mdx ---- -title: "Persistent Mode Deployment" -description: "Run Compozy with file-based SQLite for local development with data persistence" -icon: Save ---- - - -Persistent mode is ideal for local development where you need to preserve -state between restarts without managing external services. - - -## When to Use Persistent Mode - - - - Develop workflows with state preservation across restarts - - - Inspect database state to debug complex workflows - - - Single-instance deployment for 5-10 users - - - -## Quick Start - -```bash -compozy start --mode persistent -``` - -Or configure in `compozy.yaml`: - -```yaml -name: my-workflow -mode: persistent - -# Optional: customize paths -database: - path: ./.compozy/compozy.db - -temporal: - standalone: - database_file: ./.compozy/temporal.db - -redis: - standalone: - persistence: - data_dir: ./.compozy/redis - snapshot_interval: 5m -``` - -## Default Paths - -When not specified, persistent mode uses: -- Database: `./.compozy/compozy.db` -- Temporal: `./.compozy/temporal.db` -- Redis: `./.compozy/redis/` - -## Characteristics - -- **Database:** SQLite file -- **Temporal:** Embedded with file storage -- **Redis:** Miniredis with BadgerDB snapshots -- **Startup:** Instant -- **Data:** Persists across restarts -- **Use Cases:** Local dev, debugging, small teams - -## Backup and Recovery - -```bash -# Backup -cp -r .compozy ./backup-$(date +%Y%m%d) - -# Restore -cp -r ./backup-20250101 .compozy -``` - -## Limitations - -- Same as memory mode (no pgvector, write concurrency limits) -- Single point of failure (no replication) -- Not recommended for production - -## Next Steps - -- Need scalability? See [distributed mode](/docs/deployment/distributed-mode) -- Need high availability? Use [distributed mode](/docs/deployment/distributed-mode) -``` - -**Update:** `docs/content/docs/deployment/distributed-mode.mdx` - -Add comparison section at the top: - -```mdx -## Mode Comparison - -| Feature | Memory | Persistent | Distributed | -|---------|--------|-----------|-------------| -| Persistence | None | File-based | Full | -| Startup | Instant | Instant | Requires services | -| Scalability | Single process | Single process | Horizontal | -| HA | No | No | Yes | -| Production | No | No | Yes | - -Use distributed mode when you need: -- Horizontal scaling -- High availability -- pgvector embeddings -- >10 concurrent workflows -``` - -#### 4.2 Update Configuration Documentation - -**File:** `docs/content/docs/configuration/mode-configuration.mdx` - -```mdx ---- -title: "Mode Configuration" -description: "Control deployment modes: memory, persistent, or distributed" -icon: GitBranch ---- - -Compozy supports three deployment modes, each optimized for different use cases. - -## Overview - - - - Set `mode: memory|persistent|distributed` at root level - - - Override per component (temporal, redis, database) - - - No external dependencies required - - - -## Mode Options - -### memory (Default) - -Fastest mode for testing and development: -- SQLite :memory: -- Embedded Temporal (in-memory) -- Embedded Redis (no persistence) -- Zero external dependencies - -```yaml -mode: memory # Default, can be omitted -``` - -### persistent - -File-based SQLite for local development: -- SQLite file storage -- Embedded Temporal (file storage) -- Embedded Redis (BadgerDB persistence) -- State survives restarts - -```yaml -mode: persistent -database: - path: ./.compozy/compozy.db -``` - -### distributed - -Production-ready with external services: -- PostgreSQL with pgvector -- External Temporal cluster -- External Redis cluster -- Horizontal scaling - -```yaml -mode: distributed -database: - driver: postgres - host: postgres.prod.internal - port: 5432 -temporal: - mode: remote - host_port: temporal.prod.internal:7233 -redis: - mode: distributed - distributed: - addr: redis.prod.internal:6379 -``` - -## Component Override - -```yaml -mode: memory # Global default - -temporal: - mode: persistent # Override for Temporal only - standalone: - database_file: ./.compozy/temporal.db - -redis: - mode: memory # Explicitly use memory mode -``` - -## Resolution Order - -1. Component-specific mode (if set) -2. Global mode (if set) -3. Default (memory) - -## Examples - -See complete examples: -- [Memory mode example](/docs/examples/memory-mode) -- [Persistent mode example](/docs/examples/persistent-mode) -- [Distributed mode example](/docs/examples/distributed-mode) -``` - -#### 4.3 Update Migration Guide - -**File:** `docs/content/docs/guides/migrate-standalone-to-distributed.mdx` - -**Rename to:** `mode-migration-guide.mdx` - -```mdx ---- -title: "Mode Migration Guide" -description: "Migrate between memory, persistent, and distributed modes" -icon: ArrowRightLeft ---- - -## Migration Paths - -``` -memory (fast, ephemeral) - ↓ -persistent (fast, saved) - ↓ -distributed (production) -``` - -## Migrating from Alpha Versions - -### Old standalone → New memory/persistent - -**Before (Alpha):** -```yaml -mode: standalone -``` - -**After:** -```yaml -# For ephemeral (testing) -mode: memory - -# OR for persistence (development) -mode: persistent -``` - -**What changed:** -- `standalone` mode split into `memory` (ephemeral) and `persistent` (files) -- Default changed from `distributed` to `memory` -- Configuration is otherwise identical - -### Old distributed → New distributed - -No changes needed - distributed mode works identically. - -## Memory → Persistent - -Add persistence without changing infrastructure: - -```yaml -# Before -mode: memory - -# After -mode: persistent -database: - path: ./.compozy/compozy.db -temporal: - standalone: - database_file: ./.compozy/temporal.db -``` - -**Data Migration:** None (memory mode has no data to migrate) - -## Persistent → Distributed - -**Step 1:** Export data (if needed) -```bash -# Export workflows -compozy workflow list --format json > workflows.json - -# Export memory/knowledge data using API -curl http://localhost:8080/api/v0/memory > memory.json -``` - -**Step 2:** Update configuration -```yaml -# Before -mode: persistent - -# After -mode: distributed -database: - driver: postgres - host: localhost - port: 5432 - user: compozy - password: ${DB_PASSWORD} - name: compozy -temporal: - mode: remote - host_port: localhost:7233 -redis: - mode: distributed - distributed: - addr: localhost:6379 -``` - -**Step 3:** Import data -```bash -# Import workflows using API -compozy workflow import workflows.json -``` - -## Common Issues - -### pgvector Error with SQLite - -**Error:** -``` -pgvector provider is incompatible with SQLite driver -``` - -**Solution:** -Use Qdrant, Redis, or Filesystem vector DB: -```yaml -mode: persistent -knowledge: - vector_dbs: - - name: default - provider: qdrant # or redis, or filesystem - config: - host: localhost - port: 6333 -``` - -### Concurrent Workflow Limit - -**Warning:** -``` -SQLite has concurrency limitations (max_concurrent_workflows=50, recommended_max=10) -``` - -**Solution:** -Migrate to distributed mode: -```yaml -mode: distributed -database: - driver: postgres -``` -``` - -#### 4.4 Update Quick Start - -**File:** `docs/content/docs/quick-start/index.mdx` - -Update the getting started section: - -```mdx -## Quick Start - -```bash -# Install -brew install compozy - -# Start (default: memory mode, no external deps) -compozy start - -# Your first workflow -compozy workflow run examples/hello-world.yaml -``` - -**Default mode:** memory (fastest, no persistence) -**Need persistence?** Add `mode: persistent` to config -**Production?** Use `mode: distributed` with external services -``` - -#### 4.5 Update CLI Help - -**File:** `cli/help/global-flags.md` - -Update mode flag description: - -```markdown -### --mode - -Deployment mode: memory (default), persistent, or distributed - -- **memory**: In-memory SQLite, embedded services (fastest) -- **persistent**: File-based SQLite, embedded services (local dev) -- **distributed**: PostgreSQL, external services (production) - -**Default:** memory - -**Environment:** COMPOZY_MODE -``` - -**Success Criteria:** -- All references to "standalone" removed (except historical context) -- Clear explanation of when to use each mode -- Migration guide covers all scenarios -- Examples work in each mode - ---- - -### Phase 5: Schemas & Metadata [MEDIUM] - -**Priority:** MEDIUM - Tooling and validation - -**Duration:** 0.5-1 day - -**Goal:** Schemas reflect new modes, tooling updated - -#### 5.1 Update JSON Schemas - -**File:** `schemas/config.json` - -Update mode enum: -```json -{ - "properties": { - "mode": { - "type": "string", - "enum": ["memory", "persistent", "distributed"], - "default": "memory", - "description": "Deployment mode: memory (in-memory, fastest), persistent (file-based), or distributed (production)" - } - } -} -``` - -**File:** `schemas/compozy.json` - -Update root-level mode and component modes: -```json -{ - "properties": { - "mode": { - "type": "string", - "enum": ["memory", "persistent", "distributed"], - "default": "memory" - }, - "temporal": { - "properties": { - "mode": { - "type": "string", - "enum": ["memory", "persistent", "remote"], - "description": "Temporal mode (empty = inherit from global)" - } - } - }, - "redis": { - "properties": { - "mode": { - "type": "string", - "enum": ["memory", "persistent", "distributed"], - "description": "Redis mode (empty = inherit from global)" - } - } - } - } -} -``` - -**Validation:** -```bash -# Validate schemas -go run scripts/validate-schemas.go - -# Or if using JSON schema validator -jsonschema -i examples/memory-mode/compozy.yaml schemas/compozy.json -``` - -#### 5.2 Update Generated Files - -**Regenerate Swagger docs:** -```bash -make swagger -``` - -**Regenerate schemas if auto-generated:** -```bash -# If using schemagen -go run pkg/schemagen/main.go -``` - -**Update golden files:** -```bash -UPDATE_GOLDEN=1 go test ./cli/cmd/config/... -``` - -**Success Criteria:** -- Schema validation passes -- IDE autocomplete shows correct modes -- Generated docs are correct -- No validation errors for example configs - ---- - -### Phase 6: Final Validation [CRITICAL] - -**Priority:** CRITICAL - Ship readiness - -**Duration:** 1 day - -**Goal:** Production-ready quality, all validations pass - -#### 6.1 Comprehensive Testing - -**Run full test suite:** -```bash -# Clean build -make clean -make build - -# Full test suite -make test - -# Expected: All pass, 50%+ faster than before -``` - -**Run linter:** -```bash -make lint - -# Expected: Zero warnings -``` - -**Test each mode:** -```bash -# Memory mode (default) -compozy start -# In another terminal: -compozy workflow run examples/hello-world.yaml - -# Persistent mode -compozy start --mode persistent -# Restart and verify state persists -compozy start --mode persistent -# Should show previous workflows - -# Distributed mode (requires services) -docker-compose up -d postgres redis temporal -compozy start --mode distributed -``` - -#### 6.2 Validate Examples - -**Test memory mode example:** -```bash -cd examples/memory-mode -compozy start -# Should start instantly -``` - -**Test persistent mode example:** -```bash -cd examples/persistent-mode -compozy start -# Should create .compozy/ directory -ls -la .compozy/ -# Should show: compozy.db, temporal.db, redis/ -``` - -**Test distributed mode example:** -```bash -cd examples/distributed-mode -docker-compose up -d -compozy start -# Should connect to external services -``` - -#### 6.3 Performance Benchmarking - -**Measure test suite performance:** -```bash -# Before (with testcontainers) -time make test -# Expected: 2-5 minutes - -# After (with SQLite memory mode) -time make test -# Expected: 30-90 seconds (50-80% faster) -``` - -**Measure server startup:** -```bash -# Memory mode -time compozy start --timeout 10s -# Expected: <1 second - -# Persistent mode -time compozy start --mode persistent --timeout 10s -# Expected: <2 seconds - -# Distributed mode -time compozy start --mode distributed --timeout 30s -# Expected: 5-15 seconds (external service connection) -``` - -#### 6.4 Error Message Validation - -**Test invalid mode:** -```bash -compozy start --mode invalid -# Should show helpful error: -# Error: invalid mode "invalid". Valid modes: memory, persistent, distributed -``` - -**Test pgvector with SQLite:** -```bash -# Create config with pgvector + SQLite -cat > test-config.yaml <80% for new code - -**Performance:** -- [ ] Test suite 50%+ faster -- [ ] Server startup <1s in memory mode -- [ ] Server startup <2s in persistent mode - -**Functionality:** -- [ ] All three modes work correctly -- [ ] State persists in persistent mode -- [ ] No regressions in distributed mode - -**Documentation:** -- [ ] All mode references updated -- [ ] Migration guide complete -- [ ] Examples work in each mode -- [ ] API docs generated - -### Secondary Metrics - -**User Experience:** -- [ ] `compozy start` works with zero config -- [ ] Error messages are helpful -- [ ] Mode switching is clear -- [ ] Configuration is intuitive - -**Quality:** -- [ ] No flaky tests -- [ ] Graceful error handling -- [ ] Comprehensive logging -- [ ] Clear diagnostics - ---- - -## Migration Guide - -### For Users - -**Alpha users with existing configs:** - -```yaml -# OLD (Alpha): -mode: standalone - -# NEW: -# For ephemeral (testing): -mode: memory - -# OR for persistence (development): -mode: persistent -database: - path: ./.compozy/compozy.db -``` - -**No changes needed for distributed mode.** - -### For Developers - -**Test Migration:** - -```go -// OLD: -func TestSomething(t *testing.T) { - pool, cleanup := helpers.GetSharedPostgresDB(t) - defer cleanup() -} - -// NEW (default): -func TestSomething(t *testing.T) { - provider, cleanup := helpers.SetupTestDatabase(t, "sqlite") - defer cleanup() -} - -// NEW (requires Postgres): -func TestWithPgVector(t *testing.T) { - provider, cleanup := helpers.SetupTestDatabase(t, "postgres") - defer cleanup() -} -``` - -**Configuration Code:** - -```go -// OLD: -if cfg.Mode == config.ModeStandalone { - // ... -} - -// NEW: -if cfg.Mode == config.ModeMemory || cfg.Mode == config.ModePersistent { - // ... embedded services -} -``` - ---- - -## CHANGELOG Entry - -```markdown -## [VERSION] - YYYY-MM-DD - -### ⚠️ BREAKING CHANGES - -**Mode System Overhaul** (#XXX) - -Replaced two-mode system (standalone/distributed) with clearer three-mode system: - -- **mode: memory** (NEW DEFAULT): In-memory SQLite, embedded services, fastest for tests -- **mode: persistent**: File-based SQLite, embedded services, local dev with persistence -- **mode: distributed**: PostgreSQL, external services, production deployments - -**Migration:** -- `mode: standalone` → `mode: memory` (ephemeral) or `mode: persistent` (with files) -- `mode: distributed` → No changes (works identically) -- Default mode changed from `distributed` to `memory` - -**Benefits:** -- 50-80% faster test suite (no testcontainers) -- Zero-dependency quickstart -- Clearer intent-based naming - -**See:** docs/guides/mode-migration-guide.mdx - -### Features - -- **config**: Add memory/persistent/distributed mode system (#XXX) -- **cache**: Auto-configure persistence based on mode (#XXX) -- **temporal**: Set database path based on mode (#XXX) -- **tests**: Migrate to SQLite for faster execution (#XXX) - -### Documentation - -- Add memory mode deployment guide -- Add persistent mode deployment guide -- Update mode configuration reference -- Add mode migration guide -``` - ---- - -## Definition of Done - -### Code Complete - -- [ ] All ~40 files updated -- [ ] All tests passing (`make test`) -- [ ] Linter clean (`make lint`) -- [ ] No "standalone" references remain (except historical context in docs) -- [ ] All imports updated -- [ ] No dead code - -### Quality Complete - -- [ ] Performance benchmarked (test suite 50%+ faster) -- [ ] Examples tested in each mode -- [ ] Server starts in each mode -- [ ] State persists correctly in persistent mode -- [ ] Error messages are helpful and clear -- [ ] Logging shows correct mode information - -### Documentation Complete - -- [ ] All mode references updated -- [ ] Migration guide written and tested -- [ ] Examples updated and verified -- [ ] CHANGELOG entry written -- [ ] API docs regenerated -- [ ] CLI help updated - -### Ship Ready - -- [ ] Smoke tests pass in all modes -- [ ] No regressions in distributed mode -- [ ] Clear error messages for config issues -- [ ] Team reviewed and approved -- [ ] Version bumped appropriately - ---- - -## Next Steps - -**Immediate Actions:** - -1. **Review Plan** - Team review and approval -2. **Create Branch** - `feature/three-mode-system` -3. **Start Phase 1** - Update core config system -4. **Iterative Implementation** - Follow phases in order - -**First Commits:** - -```bash -# Phase 1: Core Configuration -git checkout -b feature/three-mode-system - -# Commit 1: Update mode constants and resolution -git commit -m "feat(config): add memory/persistent/distributed modes" - -# Commit 2: Update validation and registry -git commit -m "feat(config): update validation for new modes" - -# Commit 3: Update config tests -git commit -m "test(config): update mode resolution tests" - -# Phase 2: Infrastructure -git commit -m "feat(cache): auto-configure persistence by mode" -git commit -m "feat(temporal): set database path by mode" - -# Continue through remaining phases... -``` - ---- - -## Appendix - -### Related Redis PRD Work - -This implementation builds on the Redis PRD infrastructure: -- `MiniredisStandalone` wrapper (Tasks 2.0) -- `SnapshotManager` with BadgerDB (Task 7.0) -- Mode-aware cache factory (Task 3.0) -- Comprehensive testing (Tasks 8.0-10.0) - -**Key Insight:** The three-mode plan is a **refactoring** of the Redis PRD work, not new infrastructure. Both `memory` and `persistent` modes use the same `MiniredisStandalone` implementation, just with different persistence settings. - -### File Reference Map - -**Core Configuration (7 files):** -- `pkg/config/resolver.go` - Mode constants, resolution, driver selection -- `pkg/config/config.go` - Validation, struct tags, documentation -- `pkg/config/definition/schema.go` - Registry defaults and help text -- `pkg/config/loader.go` - Custom validation logic -- `pkg/config/resolver_test.go` - Mode resolution tests -- `pkg/config/config_test.go` - Validation tests -- `pkg/config/loader_test.go` - Loader tests - -**Infrastructure (4 files):** -- `engine/infra/cache/mod.go` - Cache factory mode switch -- `engine/infra/server/dependencies.go` - Temporal startup and DB validation -- `engine/infra/server/server.go` - Logging and initialization -- `engine/infra/server/temporal_resolver_test.go` - Mode switching tests - -**Test Helpers (6 files):** -- `test/helpers/standalone.go` - Mode constants and helpers -- `test/helpers/database.go` - Database setup helpers -- `test/integration/standalone/helpers.go` - Integration test helpers -- `test/integration/temporal/mode_switching_test.go` - Mode tests -- `testdata/config-diagnostics-standalone.golden` - Golden file -- `testdata/config-show-*.golden` - Golden files - -**Documentation (8 files):** -- `docs/content/docs/deployment/memory-mode.mdx` - Memory mode guide -- `docs/content/docs/deployment/persistent-mode.mdx` - Persistent mode guide -- `docs/content/docs/deployment/distributed-mode.mdx` - Distributed mode guide -- `docs/content/docs/configuration/mode-configuration.mdx` - Mode config reference -- `docs/content/docs/guides/mode-migration-guide.mdx` - Migration guide -- `docs/content/docs/quick-start/index.mdx` - Quick start update -- `docs/content/docs/troubleshooting/common-issues.mdx` - Troubleshooting -- `cli/help/global-flags.md` - CLI help - -**Examples (3 files):** -- `examples/memory-mode/` - Memory mode example (renamed) -- `examples/persistent-mode/` - Persistent mode example (new) -- `examples/README.md` - Examples index - -**Schemas (2 files):** -- `schemas/config.json` - Configuration schema -- `schemas/compozy.json` - Root schema - ---- - -**Document Version:** 1.0 -**Status:** Ready for Implementation -**Estimated Effort:** 6 days (with proper phasing) -**Breaking Change:** Yes (acceptable in alpha) diff --git a/tasks/prd-modes/_tests.md b/tasks/prd-modes/_tests.md deleted file mode 100644 index 49299f20..00000000 --- a/tasks/prd-modes/_tests.md +++ /dev/null @@ -1,467 +0,0 @@ -# Test Plan: Three-Mode Configuration System - -**PRD Reference**: `tasks/prd-modes/_prd.md` -**Tech Spec Reference**: `tasks/prd-modes/_techspec.md` -**Status**: Planning - ---- - -## Testing Strategy - -### Goals -1. **Performance**: Achieve 50-80% faster test suite execution -2. **Coverage**: Maintain >80% test coverage across all modes -3. **Quality**: Zero regressions in existing functionality -4. **Confidence**: Comprehensive validation for breaking change - -### Testing Principles -- **Mode-agnostic tests**: Most tests should work across all modes -- **Mode-specific tests**: Validate unique behaviors per mode -- **Fast by default**: Memory mode for 90% of tests -- **PostgreSQL exceptions**: pgvector and vector search tests only - ---- - -## Test Categories - -### 1. Unit Tests - -**Scope**: Individual functions and components -**Mode**: Memory (fastest) -**Coverage Target**: >90% - -#### Configuration Layer Tests (`pkg/config/*_test.go`) - -**Test Suite**: Mode Resolution -- [ ] `TestResolveMode_DefaultsToMemory` - Empty config defaults to memory -- [ ] `TestResolveMode_ComponentOverridesGlobal` - Component mode takes precedence -- [ ] `TestResolveMode_GlobalModeInheritance` - Components inherit global mode -- [ ] `TestResolveMode_AllThreeModes` - Memory, persistent, distributed all resolve correctly - -**Test Suite**: Database Driver Selection -- [ ] `TestEffectiveDatabaseDriver_Memory` - Memory mode returns SQLite -- [ ] `TestEffectiveDatabaseDriver_Persistent` - Persistent mode returns SQLite -- [ ] `TestEffectiveDatabaseDriver_Distributed` - Distributed mode returns PostgreSQL -- [ ] `TestEffectiveDatabaseDriver_ExplicitOverride` - Explicit driver overrides mode default -- [ ] `TestEffectiveDatabaseDriver_NilConfig` - Nil config returns SQLite (changed default) - -**Test Suite**: Temporal Mode Selection -- [ ] `TestEffectiveTemporalMode_Memory` - Memory mode returns embedded -- [ ] `TestEffectiveTemporalMode_Persistent` - Persistent mode returns embedded -- [ ] `TestEffectiveTemporalMode_Distributed` - Distributed mode returns remote -- [ ] `TestEffectiveTemporalMode_ExplicitOverride` - Explicit mode overrides - -**Test Suite**: Configuration Validation -- [ ] `TestModeValidation_Memory` - "memory" validates successfully -- [ ] `TestModeValidation_Persistent` - "persistent" validates successfully -- [ ] `TestModeValidation_Distributed` - "distributed" validates successfully -- [ ] `TestModeValidation_Standalone_Rejected` - "standalone" fails validation -- [ ] `TestModeValidation_Invalid_Rejected` - Invalid modes fail validation -- [ ] `TestModeValidation_ErrorMessage` - Error messages suggest memory mode - -**Test Suite**: Configuration Registry -- [ ] `TestFieldDefinition_ModeDefault` - Global mode default is "memory" -- [ ] `TestFieldDefinition_ModeHelpText` - Help text mentions all three modes -- [ ] `TestFieldDefinition_TemporalMode` - Temporal mode inherits from global -- [ ] `TestFieldDefinition_RedisMode` - Redis mode inherits from global - ---- - -### 2. Integration Tests - -**Scope**: Component interactions -**Mode**: Memory (default), PostgreSQL (when needed) -**Coverage Target**: >80% - -#### Cache Layer Tests (`engine/infra/cache/*_test.go`) - -**Test Suite**: Cache Setup -- [ ] `TestSetupCache_MemoryMode` - Memory mode uses miniredis without persistence -- [ ] `TestSetupCache_PersistentMode` - Persistent mode uses miniredis with BadgerDB -- [ ] `TestSetupCache_DistributedMode` - Distributed mode uses external Redis -- [ ] `TestSetupCache_AutoDisablePersistence_Memory` - Persistence disabled for memory -- [ ] `TestSetupCache_AutoEnablePersistence_Persistent` - Persistence enabled for persistent - -**Test Suite**: Cache Operations (All Modes) -- [ ] `TestCache_SetGet_Memory` - Basic operations in memory mode -- [ ] `TestCache_SetGet_Persistent` - Basic operations in persistent mode -- [ ] `TestCache_Persistence_Memory` - Verify no persistence in memory mode -- [ ] `TestCache_Persistence_Persistent` - Verify state persists in persistent mode - -#### Temporal Wiring Tests (`engine/infra/server/*_test.go`) - -**Test Suite**: Temporal Startup -- [ ] `TestMaybeStartTemporal_MemoryMode` - Starts embedded Temporal with :memory: -- [ ] `TestMaybeStartTemporal_PersistentMode` - Starts embedded Temporal with file DB -- [ ] `TestMaybeStartTemporal_DistributedMode` - Does not start embedded Temporal -- [ ] `TestMaybeStartTemporal_DatabasePath_Memory` - Uses :memory: by default -- [ ] `TestMaybeStartTemporal_DatabasePath_Persistent` - Uses ./.compozy/temporal.db by default -- [ ] `TestMaybeStartTemporal_ExplicitPath` - Respects explicit database_file path - -**Test Suite**: Server Startup (All Modes) -- [ ] `TestServerStart_MemoryMode` - Server starts in <1 second -- [ ] `TestServerStart_PersistentMode` - Server starts in <2 seconds -- [ ] `TestServerStart_MemoryMode_NoFiles` - No files created in memory mode -- [ ] `TestServerStart_PersistentMode_FilesCreated` - .compozy/ directory created - -#### Database Layer Tests (`engine/infra/database/*_test.go`) - -**Test Suite**: Database Connection -- [ ] `TestDatabaseSetup_Memory_SQLite` - Memory mode uses :memory: -- [ ] `TestDatabaseSetup_Persistent_SQLite` - Persistent mode uses file-based SQLite -- [ ] `TestDatabaseSetup_Distributed_Postgres` - Distributed mode uses PostgreSQL -- [ ] `TestDatabaseSetup_AutoMigration` - Schema migrations run for all modes - -**Test Suite**: Database Operations (Mode-Agnostic) -- [ ] `TestDatabase_CRUD_Operations` - Create, read, update, delete work across modes -- [ ] `TestDatabase_Transactions` - Transaction support across modes -- [ ] `TestDatabase_Concurrency_SQLite` - SQLite handles write serialization correctly -- [ ] `TestDatabase_Indexes` - Indexes created correctly across modes - ---- - -### 3. End-to-End Tests - -**Scope**: Full system behavior -**Mode**: All three modes tested separately -**Coverage Target**: >70% - -#### Server Lifecycle Tests (`test/integration/server/*_test.go`) - -**Test Suite**: Startup and Shutdown -- [ ] `TestE2E_MemoryMode_Startup` - Server starts with memory mode -- [ ] `TestE2E_PersistentMode_Startup` - Server starts with persistent mode -- [ ] `TestE2E_DistributedMode_Startup` - Server starts with distributed mode -- [ ] `TestE2E_MemoryMode_Shutdown` - Graceful shutdown in memory mode -- [ ] `TestE2E_PersistentMode_Shutdown_DataPersists` - State survives shutdown -- [ ] `TestE2E_MemoryMode_Restart_DataLost` - Verify ephemeral nature - -**Test Suite**: Workflow Execution (All Modes) -- [ ] `TestE2E_WorkflowExecution_Memory` - Execute workflow in memory mode -- [ ] `TestE2E_WorkflowExecution_Persistent` - Execute workflow in persistent mode -- [ ] `TestE2E_WorkflowExecution_Distributed` - Execute workflow in distributed mode -- [ ] `TestE2E_WorkflowState_Persistent` - Workflow state persists across restarts - -#### Mode Switching Tests (`test/integration/temporal/mode_switching_test.go`) - -**Test Suite**: Mode Transitions -- [ ] `TestModeSwitching_MemoryToPersistent` - Switch from memory to persistent (restart required) -- [ ] `TestModeSwitching_PersistentToDistributed` - Switch from persistent to distributed -- [ ] `TestModeSwitching_DistributedToMemory` - Switch from distributed to memory -- [ ] `TestModeSwitching_ConfigUpdate` - Configuration changes respected after restart - ---- - -### 4. Performance Tests - -**Scope**: Measure performance improvements -**Mode**: All modes for comparison -**Target**: 50-80% improvement - -#### Test Suite Execution Time - -**Baseline Measurement** (Before Changes): -```bash -# Full test suite with testcontainers -time make test -# Expected: 3-5 minutes -``` - -**Target Measurement** (After Changes): -```bash -# Full test suite with memory mode -time make test -# Target: 45-90 seconds (60-70% improvement) -``` - -**Benchmarks**: -- [ ] `BenchmarkTestSuite_Before` - Baseline with PostgreSQL testcontainers -- [ ] `BenchmarkTestSuite_After_Memory` - With memory mode (SQLite) -- [ ] `BenchmarkTestSuite_After_Persistent` - With persistent mode -- [ ] `BenchmarkTestSuite_After_Distributed` - With distributed mode (should match baseline) - -#### Startup Time Benchmarks - -**Test Suite**: Server Startup Performance -- [ ] `BenchmarkServerStartup_Memory` - Target: <1 second -- [ ] `BenchmarkServerStartup_Persistent` - Target: <2 seconds -- [ ] `BenchmarkServerStartup_Distributed` - Target: <3 seconds -- [ ] `BenchmarkServerStartup_Memory_ColdStart` - From completely cold state - -**Test Suite**: Workflow Execution Performance -- [ ] `BenchmarkWorkflowExecution_Memory` - Baseline for memory mode -- [ ] `BenchmarkWorkflowExecution_Persistent` - Compare to memory mode -- [ ] `BenchmarkWorkflowExecution_Distributed` - Compare to embedded modes -- [ ] `BenchmarkWorkflowExecution_NoRegression` - Verify no performance degradation - ---- - -### 5. Regression Tests - -**Scope**: Ensure no existing functionality broken -**Mode**: All modes -**Coverage Target**: 100% of existing features - -#### Test Suite: Distributed Mode Parity -- [ ] `TestRegression_DistributedMode_AllFeatures` - All features work in distributed mode -- [ ] `TestRegression_DistributedMode_PostgreSQL` - PostgreSQL functionality unchanged -- [ ] `TestRegression_DistributedMode_ExternalRedis` - External Redis functionality unchanged -- [ ] `TestRegression_DistributedMode_ExternalTemporal` - External Temporal functionality unchanged -- [ ] `TestRegression_DistributedMode_PgVector` - pgvector tests still pass with PostgreSQL - -#### Test Suite: Feature Parity -- [ ] `TestRegression_Migrations_AllModes` - Schema migrations work in all modes -- [ ] `TestRegression_Authentication_AllModes` - Authentication works in all modes -- [ ] `TestRegression_API_AllModes` - API endpoints work in all modes -- [ ] `TestRegression_CLI_AllModes` - CLI commands work in all modes - ---- - -### 6. Error Handling Tests - -**Scope**: Validate error messages and failure modes -**Mode**: All modes -**Coverage Target**: >90% - -#### Test Suite: Invalid Configuration -- [ ] `TestError_InvalidMode_HelpfulMessage` - "standalone" suggests "memory" -- [ ] `TestError_DistributedMode_MissingPostgres` - Clear error when PostgreSQL unreachable -- [ ] `TestError_DistributedMode_MissingRedis` - Clear error when Redis unreachable -- [ ] `TestError_DistributedMode_MissingTemporal` - Clear error when Temporal unreachable -- [ ] `TestError_PgVector_WithSQLite` - Clear error about pgvector requiring PostgreSQL - -#### Test Suite: Data Integrity -- [ ] `TestError_ConcurrentWrites_SQLite` - SQLite write serialization handled gracefully -- [ ] `TestError_DiskFull_PersistentMode` - Graceful handling of disk space issues -- [ ] `TestError_CorruptedDatabase_PersistentMode` - Recovery or clear error on corruption - ---- - -## Test Helpers and Utilities - -### New Test Helpers (Task 9.0, 10.0) - -**File**: `test/helpers/database.go` - -**New Functions**: -```go -// SetupTestDatabase - Default to memory mode (SQLite :memory:) -func SetupTestDatabase(t *testing.T) *sql.DB - -// SetupDatabaseWithMode - Explicit mode selection -func SetupDatabaseWithMode(t *testing.T, mode string) *sql.DB - -// SetupPostgresContainer - Explicit PostgreSQL (for pgvector tests) -func SetupPostgresContainer(t *testing.T) (*dockertest.Pool, func()) -``` - -**Usage Pattern**: -```go -// Fast tests (90% of test suite) -func TestSomething(t *testing.T) { - db := helpers.SetupTestDatabase(t) // Uses :memory: by default - // ... test logic ... -} - -// Tests needing persistence -func TestStateful(t *testing.T) { - db := helpers.SetupDatabaseWithMode(t, "persistent") - // ... test logic ... -} - -// Tests requiring PostgreSQL (pgvector only) -func TestVectorSearch(t *testing.T) { - pool, cleanup := helpers.SetupPostgresContainer(t) - defer cleanup() - // ... test logic ... -} -``` - ---- - -## Test Migration Plan (Task 11.0) - -### Audit Existing Tests - -**Identify**: -1. Tests using `helpers.GetSharedPostgresDB(t)` → Can migrate to SQLite -2. Tests requiring pgvector → Must keep PostgreSQL -3. Tests with PostgreSQL-specific SQL → Requires compatibility check - -**Migration Strategy**: - -```go -// BEFORE (PostgreSQL testcontainers) -func TestWorkflowExecution(t *testing.T) { - pool, cleanup := helpers.GetSharedPostgresDB(t) - defer cleanup() - // ... test logic ... -} - -// AFTER (SQLite memory mode - 50-80% faster) -func TestWorkflowExecution(t *testing.T) { - db := helpers.SetupTestDatabase(t) // Defaults to :memory: - // ... test logic ... -} -``` - -**Keep PostgreSQL For**: -- `test/integration/vector/*_test.go` - pgvector functionality -- `test/integration/postgres/*_test.go` - PostgreSQL-specific features -- Any tests explicitly testing distributed mode - ---- - -## Golden Test Files (Task 13.0) - -### Files to Update - -**Test Data Files**: -- `testdata/config-diagnostics-standalone.golden` → Rename to `config-diagnostics-memory.golden` -- `testdata/config-show-mixed.golden` → Update mode values -- `testdata/config-show-standalone.golden` → Rename to `config-show-memory.golden` - -**Regeneration Process**: -```bash -# Update golden files -UPDATE_GOLDEN=1 go test ./cli/cmd/config/... -v - -# Verify changes -git diff testdata/ -``` - ---- - -## Test Execution Commands - -### Local Development - -```bash -# Fast unit tests (memory mode) -go test ./pkg/config/... -v - -# Integration tests (scoped) -gotestsum --format pkgname -- -race -parallel=4 ./engine/agent - -# Full test suite -make test - -# Linter -make lint -``` - -### CI/CD Pipeline - -```bash -# Run all tests -make test - -# Verify performance improvement -time make test # Should be 50-80% faster than before - -# Coverage report -go test -coverprofile=coverage.out ./... -go tool cover -html=coverage.out -``` - ---- - -## Performance Benchmarking (Task 24.0) - -### Baseline Metrics - -**Before** (Two-Mode System): -- Full test suite: 3-5 minutes -- Server startup (standalone): ~2 seconds -- Server startup (distributed): ~3 seconds - -**After** (Three-Mode System): -- Full test suite: 45-90 seconds (60-70% improvement) -- Server startup (memory): <1 second -- Server startup (persistent): <2 seconds -- Server startup (distributed): <3 seconds - -### Benchmark Script - -```bash -#!/bin/bash -# benchmark.sh - -echo "Benchmarking test suite performance..." - -# Baseline (before changes) -git checkout main -time make test > /tmp/baseline.txt 2>&1 -BASELINE_TIME=$? - -# After changes (with memory mode) -git checkout feature/three-mode-system -time make test > /tmp/optimized.txt 2>&1 -OPTIMIZED_TIME=$? - -# Calculate improvement -echo "Baseline: ${BASELINE_TIME}s" -echo "Optimized: ${OPTIMIZED_TIME}s" -echo "Improvement: $(( (BASELINE_TIME - OPTIMIZED_TIME) * 100 / BASELINE_TIME ))%" -``` - ---- - -## Success Criteria - -### Performance Targets -- [x] Test suite execution: 50-80% faster (3-5 min → 45-90 sec) -- [x] Server startup (memory): <1 second -- [x] Server startup (persistent): <2 seconds -- [x] No performance regressions in distributed mode - -### Coverage Targets -- [x] Unit test coverage: >90% -- [x] Integration test coverage: >80% -- [x] E2E test coverage: >70% -- [x] Overall coverage maintained: >80% - -### Quality Targets -- [x] All tests pass: `make test` -- [x] Linter clean: `make lint` -- [x] Zero flaky tests -- [x] No regressions in distributed mode -- [x] All three modes validated - -### Error Handling -- [x] Invalid mode error messages clear and helpful -- [x] "standalone" rejection suggests migration path -- [x] pgvector + SQLite error explains requirement -- [x] All error paths tested - ---- - -## Test Deliverables - -### Phase 3 (Test Infrastructure) - Task 11.0 -- [ ] Updated test helpers (`test/helpers/database.go`) -- [ ] Migrated integration tests to SQLite (90%+) -- [ ] Golden test files updated -- [ ] Performance benchmarks documented - -### Phase 6 (Final Validation) - Task 22.0-25.0 -- [ ] Comprehensive test execution report -- [ ] Performance benchmark comparison -- [ ] Coverage report (>80%) -- [ ] Error message validation report -- [ ] Regression test results - ---- - -## Implementation Notes - -- Tests MUST use `t.Context()` instead of `context.Background()` -- Tests MUST use `logger.FromContext(ctx)` for logging -- Tests MUST use `config.FromContext(ctx)` for configuration -- Tests MUST NOT use global singletons -- Test scope commands during development: - - Tests: `gotestsum --format pkgname -- -race -parallel=4 ` - - Linting: `golangci-lint run --fix --allow-parallel-runners ` -- Full validation before task completion: `make test && make lint` - -**Estimated Effort**: 2 days for test migration (Phase 3) diff --git a/tasks/prd-redis/_docs.md b/tasks/prd-redis/_docs.md deleted file mode 100644 index 1bd7dbf8..00000000 --- a/tasks/prd-redis/_docs.md +++ /dev/null @@ -1,229 +0,0 @@ -# Documentation Plan: Standalone Mode - Redis Alternatives - -## Goals - -- Document the standalone deployment mode for Compozy -- Provide clear configuration examples for mode inheritance pattern -- Create migration guides from standalone to distributed mode -- Update existing deployment documentation with mode options - -## New/Updated Pages - -### docs/content/docs/deployment/standalone-mode.mdx (new) -- Purpose: Comprehensive guide to deploying Compozy in standalone mode -- Outline: - - What is Standalone Mode - - When to Use Standalone vs Distributed - - Architecture Overview (miniredis + optional persistence) - - Quick Start Guide - - Configuration Reference - - Memory and Performance Considerations - - Limitations and Trade-offs - - Migration to Distributed Mode -- Links: - - configuration/mode-configuration.mdx - - deployment/distributed-mode.mdx - - deployment/production.mdx - -### docs/content/docs/configuration/mode-configuration.mdx (new) -- Purpose: Document the global mode configuration pattern and inheritance -- Outline: - - Global Mode Configuration - - Component Mode Inheritance - - Mode Resolution Priority - - Configuration Examples (full standalone, full distributed, mixed mode) - - Per-Component Override Examples - - Environment Variable Overrides - - Validation Rules -- Links: - - configuration/redis.mdx - - configuration/temporal.mdx - - configuration/mcp-proxy.mdx - - deployment/standalone-mode.mdx - -### docs/content/docs/configuration/redis.mdx (new) -- Purpose: Complete Redis/cache configuration reference -- Outline: - - Overview (distributed vs standalone) - - Distributed Mode Configuration (external Redis) - - Standalone Mode Configuration (miniredis) - - Persistence Options (BadgerDB snapshots) - - Performance Tuning - - Troubleshooting -- Links: - - configuration/mode-configuration.mdx - - deployment/standalone-mode.mdx - -### docs/content/docs/deployment/distributed-mode.mdx (update) -- Purpose: Update to clarify distributed mode is for production/scale -- Updates: - - Add comparison table: standalone vs distributed - - Add section on when to migrate from standalone - - Update prerequisites to mention mode configuration -- Links: - - deployment/standalone-mode.mdx - - configuration/mode-configuration.mdx - -### docs/content/docs/getting-started/quickstart.mdx (update) -- Purpose: Add standalone mode quick start option -- Updates: - - Add "Option 1: Standalone Mode" section before existing instructions - - Show simple `mode: standalone` config example - - Note that standalone is ideal for local development -- Links: - - deployment/standalone-mode.mdx - -### docs/content/docs/deployment/production.mdx (update) -- Purpose: Clarify production deployments should use distributed mode -- Updates: - - Add warning about standalone mode limitations - - Add decision matrix: standalone vs distributed - - Update deployment checklist to include mode selection -- Links: - - deployment/distributed-mode.mdx - - deployment/standalone-mode.mdx - -## Schema Docs - -### docs/content/docs/reference/config-schema.mdx (update) -- Renders `schemas/config.json` -- Notes to highlight: - - New global `mode` field (standalone | distributed) - - New `redis` configuration section with mode inheritance - - New `redis.standalone.persistence` configuration - - Mode resolution logic explanation -- Add visual diagram showing mode inheritance - -## API Docs - -No API changes required - standalone mode is transparent to API consumers. - -## CLI Docs - -### docs/content/docs/cli/start.mdx (update) -- Purpose: Document `--standalone` flag and mode configuration -- Updates: - - Add `--mode` flag documentation - - Show examples: `compozy start --mode standalone` - - Show examples: `compozy start --mode distributed` - - Note that YAML config takes precedence over flags -- Links: - - configuration/mode-configuration.mdx - - deployment/standalone-mode.mdx - -### docs/content/docs/cli/config.mdx (update) -- Purpose: Document config validation for mode settings -- Updates: - - Add mode configuration validation examples - - Show `compozy config show` output with mode fields - - Show `compozy config diagnostics` mode resolution output -- Links: - - configuration/mode-configuration.mdx - -## Cross-page Updates - -### docs/content/docs/concepts/architecture.mdx (update) -- Add section on "Deployment Modes" -- Update architecture diagrams to show both standalone and distributed options - -### docs/content/docs/configuration/temporal.mdx (update) -- Document mode inheritance from global config -- Add examples showing `temporal.mode` override - -### docs/content/docs/configuration/mcp-proxy.mdx (update) -- Document mode inheritance from global config -- Add examples showing `mcpproxy.mode` override - -### docs/content/docs/troubleshooting/common-issues.mdx (update) -- Add section: "Redis Connection Issues in Standalone Mode" -- Add section: "Mode Configuration Validation Errors" -- Add section: "Snapshot/Persistence Failures" - -## Navigation & Indexing - -Update `docs/source.config.ts`: - -```typescript -// Deployment section -{ - title: "Deployment", - pages: [ - "deployment/standalone-mode", // NEW - "deployment/distributed-mode", // UPDATED - "deployment/production", - "deployment/docker", - "deployment/kubernetes" - ] -} - -// Configuration section -{ - title: "Configuration", - pages: [ - "configuration/overview", - "configuration/mode-configuration", // NEW - "configuration/redis", // NEW - "configuration/temporal", - "configuration/database", - // ... existing pages - ] -} -``` - -## Migration Guide - -### docs/content/docs/guides/migrate-standalone-to-distributed.mdx (new) -- Purpose: Step-by-step guide for migrating from standalone to distributed -- Outline: - - Prerequisites (Redis, updated config) - - Step 1: Provision External Redis - - Step 2: Update Configuration (change mode) - - Step 3: Export Critical Data (if needed) - - Step 4: Restart Services - - Step 5: Verify Functionality - - Step 6: Clean Up Standalone Data - - Rollback Procedure - - Troubleshooting Common Issues -- Links: - - deployment/distributed-mode.mdx - - configuration/redis.mdx - -## Acceptance Criteria - -- [ ] All new pages exist with complete outlines and working examples -- [ ] Cross-links between standalone, distributed, and mode configuration docs are bidirectional -- [ ] Configuration schema docs render the new mode and redis fields correctly -- [ ] CLI documentation shows mode flags and configuration examples -- [ ] Navigation in `source.config.ts` includes new pages in logical order -- [ ] Migration guide provides clear, testable steps -- [ ] Docs dev server builds without warnings or missing routes -- [ ] All code examples in docs are syntactically correct and follow project standards -- [ ] Performance and limitations clearly documented for standalone mode -- [ ] Decision matrices help users choose appropriate deployment mode - -## Visual Assets Needed - -1. **Architecture Diagram**: Standalone vs Distributed comparison - - Location: `docs/public/images/deployment/` - - Shows: miniredis + optional BadgerDB vs external Redis - -2. **Mode Inheritance Diagram**: Configuration resolution flow - - Location: `docs/public/images/configuration/` - - Shows: Global mode → Component modes → Default fallback - -3. **Decision Matrix**: When to use standalone vs distributed - - Location: Inline in docs as table - - Criteria: Team size, workload, durability, budget, complexity - -## Documentation Review Checklist - -- [ ] Technical accuracy verified against implementation -- [ ] Configuration examples tested and validated -- [ ] Migration guide steps tested end-to-end -- [ ] Performance numbers and limitations are accurate -- [ ] Security considerations documented -- [ ] Troubleshooting section covers common issues -- [ ] Links to external resources (miniredis, BadgerDB) are current -- [ ] Code snippets follow project coding standards -- [ ] YAML examples follow configuration best practices - diff --git a/tasks/prd-redis/_examples.md b/tasks/prd-redis/_examples.md deleted file mode 100644 index a069b9d8..00000000 --- a/tasks/prd-redis/_examples.md +++ /dev/null @@ -1,394 +0,0 @@ -# Examples Plan: Standalone Mode - Redis Alternatives - -## Conventions - -- Folder prefix: `examples/standalone/*` -- Use `mode: standalone` in all examples to demonstrate standalone deployment -- Avoid secrets; use environment variable interpolation (`${VAR}`) -- Include README with prerequisites and commands -- Keep examples minimal and focused on specific use cases - -## Example Matrix - -### 1. examples/standalone/basic - -- **Purpose**: Simplest possible standalone deployment for local development -- **Files**: - - `compozy.yaml` - Minimal config with `mode: standalone` - - `workflows/hello-world.yaml` - Basic workflow - - `README.md` - Quick start instructions -- **Demonstrates**: - - Global mode configuration (inheritance pattern) - - Zero external dependencies (except PostgreSQL) - - In-memory operation (no persistence) -- **Walkthrough**: - ```bash - cd examples/standalone/basic - compozy start - compozy workflow run hello-world - ``` - -### 2. examples/standalone/with-persistence - -- **Purpose**: Standalone with BadgerDB snapshots for data durability -- **Files**: - - `compozy.yaml` - Standalone with persistence enabled - - `workflows/stateful-workflow.yaml` - Workflow using memory store - - `.gitignore` - Exclude `./data` directory - - `README.md` - Persistence configuration guide -- **Demonstrates**: - - Snapshot configuration (interval, on-shutdown, restore) - - Data persistence across restarts - - Memory store usage with conversation history -- **Walkthrough**: - ```bash - cd examples/standalone/with-persistence - compozy start - # Run workflow, stop server, restart - compozy start # Data restored from snapshot - ``` - -### 3. examples/standalone/mixed-mode - -- **Purpose**: Advanced - override specific components for hybrid deployment -- **Files**: - - `compozy.yaml` - Standalone with Redis override to distributed - - `workflows/hybrid-workflow.yaml` - Workflow example - - `docker-compose.yml` - External Redis for testing - - `README.md` - Mixed mode use case explanation -- **Demonstrates**: - - Per-component mode overrides - - Global mode with Redis using external instance - - Temporal and MCPProxy still embedded - - When to use mixed mode (dev + shared Redis) -- **Walkthrough**: - ```bash - cd examples/standalone/mixed-mode - docker compose up -d redis # Start external Redis - compozy start - ``` - -### 4. examples/standalone/edge-deployment - -- **Purpose**: Minimal footprint for edge/IoT deployments -- **Files**: - - `compozy.yaml` - Standalone with memory limits and persistence - - `workflows/edge-workflow.yaml` - Lightweight workflow - - `Dockerfile` - Optimized container image - - `README.md` - Edge deployment guide -- **Demonstrates**: - - Resource-constrained configuration - - Compact snapshot intervals - - Minimal logging and telemetry - - Single-binary deployment -- **Walkthrough**: - ```bash - cd examples/standalone/edge-deployment - docker build -t compozy-edge . - docker run -p 8080:8080 compozy-edge - ``` - -### 5. examples/standalone/migration-demo - -- **Purpose**: Demonstrate migration from standalone to distributed -- **Files**: - - `compozy-standalone.yaml` - Initial standalone config - - `compozy-distributed.yaml` - Target distributed config - - `workflows/sample-workflow.yaml` - Test workflow - - `migrate.sh` - Migration script with steps - - `docker-compose.yml` - External Redis and PostgreSQL - - `README.md` - Complete migration walkthrough -- **Demonstrates**: - - Configuration differences between modes - - Data export (if applicable) - - Service restart procedure - - Validation steps -- **Walkthrough**: - ```bash - cd examples/standalone/migration-demo - # Start with standalone - compozy start --config compozy-standalone.yaml - # Run migration script - ./migrate.sh - # Start with distributed - docker compose up -d redis - compozy start --config compozy-distributed.yaml - ``` - -## Minimal YAML Shapes - -### Basic Standalone (Full Inheritance) - -```yaml -# compozy.yaml - Minimal standalone -mode: standalone - -server: - host: 0.0.0.0 - port: 8080 - -database: - host: localhost - port: 5432 - name: compozy - -# All components (redis, temporal, mcpproxy) inherit "standalone" mode -``` - -### Standalone with Persistence - -```yaml -# compozy.yaml - With persistence -mode: standalone - -redis: - standalone: - persistence: - enabled: true - data_dir: ./data/redis - snapshot_interval: 5m - snapshot_on_shutdown: true - restore_on_startup: true - -server: - host: 0.0.0.0 - port: 8080 - -database: - host: localhost - port: 5432 - name: compozy -``` - -### Mixed Mode (Advanced) - -```yaml -# compozy.yaml - Mixed mode -mode: standalone # Default for all components - -# Override Redis to use external instance -redis: - mode: distributed - addr: localhost:6379 - password: ${REDIS_PASSWORD} - -# Temporal and MCPProxy inherit "standalone" -server: - host: 0.0.0.0 - port: 8080 - -database: - host: localhost - port: 5432 - name: compozy -``` - -### Full Distributed (Comparison) - -```yaml -# compozy.yaml - Distributed mode -mode: distributed - -redis: - addr: redis.prod.internal:6379 - password: ${REDIS_PASSWORD} - -temporal: - host_port: temporal.prod.internal:7233 - namespace: production - -mcpproxy: - mode: "" # Uses external MCP proxy (or configure as needed) - -server: - host: 0.0.0.0 - port: 8080 - -database: - host: postgres.prod.internal - port: 5432 - name: compozy -``` - -## Test & CI Coverage - -Add to `test/integration/examples/`: - -- `standalone_basic_test.go` - Validate basic example runs and executes workflow -- `standalone_persistence_test.go` - Verify snapshot/restore cycle -- `mixed_mode_test.go` - Validate mode overrides work correctly - -Integration test requirements: -- Use testcontainers for PostgreSQL and Redis (when needed) -- Test each example's workflow execution -- Verify mode configuration is respected -- Validate persistence (if applicable) - -## Runbooks per Example - -### basic -- **Prereqs**: PostgreSQL running locally (or via Docker) -- **Env vars**: None required -- **Commands**: - ```bash - compozy start - compozy workflow list - compozy workflow run hello-world - ``` -- **Expected**: Workflow executes successfully, server shows standalone mode logs - -### with-persistence -- **Prereqs**: PostgreSQL, writable `./data` directory -- **Env vars**: None required -- **Commands**: - ```bash - compozy start - compozy workflow run stateful-workflow - # Stop server (Ctrl+C) - compozy start - # Verify data restored - compozy workflow list - ``` -- **Expected**: Data persists across restarts, snapshot logs visible - -### mixed-mode -- **Prereqs**: Docker, PostgreSQL -- **Env vars**: `REDIS_PASSWORD` (optional, if Redis auth enabled) -- **Commands**: - ```bash - docker compose up -d redis - compozy start - compozy config show # Verify Redis in distributed, others standalone - ``` -- **Expected**: Redis uses external instance, Temporal/MCP embedded - -### edge-deployment -- **Prereqs**: Docker -- **Env vars**: None required -- **Commands**: - ```bash - docker build -t compozy-edge . - docker run -p 8080:8080 compozy-edge - curl http://localhost:8080/health - ``` -- **Expected**: Container starts, memory footprint <512MB - -### migration-demo -- **Prereqs**: Docker, Docker Compose -- **Env vars**: `REDIS_PASSWORD` for distributed mode -- **Commands**: - ```bash - # Standalone phase - compozy start --config compozy-standalone.yaml - compozy workflow run sample-workflow - # Migration - ./migrate.sh # Starts Docker services - compozy start --config compozy-distributed.yaml - compozy workflow list # Verify migration - ``` -- **Expected**: Successful migration, workflows accessible in distributed mode - -## Example README Template - -Each example should include: - -```markdown -# [Example Name] - -## Purpose -[What this example demonstrates] - -## Prerequisites -- PostgreSQL [version] -- [Other requirements] - -## Configuration Highlights -- `mode: standalone` - [explanation] -- [Other key config points] - -## Quick Start -1. [Setup step] -2. `compozy start` -3. `compozy workflow run [workflow]` - -## What to Observe -- [Log messages to look for] -- [Behavior to verify] - -## Cleanup -```bash -[Cleanup commands] -``` - -## Next Steps -- Try [related example] -- Read [related docs] -``` - -## Acceptance Criteria - -- [ ] All 5 examples exist and are runnable -- [ ] Each example has a comprehensive README with commands and expected outputs -- [ ] YAML configurations follow project standards and pass validation -- [ ] Workflows in examples are minimal but demonstrate key features -- [ ] Examples are tested in CI (integration tests) -- [ ] Each example builds successfully (if Dockerfile included) -- [ ] Mixed-mode example clearly shows mode override pattern -- [ ] Migration demo provides clear before/after comparison -- [ ] All examples use environment variable interpolation for sensitive data -- [ ] `.gitignore` excludes generated data directories -- [ ] Examples are referenced in documentation (cross-links) - -## Additional Assets - -### docker-compose.yml (for examples needing external services) - -```yaml -version: '3.8' -services: - postgres: - image: postgres:15-alpine - environment: - POSTGRES_DB: compozy - POSTGRES_USER: compozy - POSTGRES_PASSWORD: compozy - ports: - - "5432:5432" - volumes: - - pgdata:/var/lib/postgresql/data - - redis: - image: redis:7-alpine - ports: - - "6379:6379" - command: redis-server --requirepass ${REDIS_PASSWORD:-compozy} - -volumes: - pgdata: -``` - -### Dockerfile (for edge-deployment example) - -```dockerfile -FROM golang:1.25-alpine AS builder -WORKDIR /build -COPY go.mod go.sum ./ -RUN go mod download -COPY . . -RUN CGO_ENABLED=0 go build -ldflags="-s -w" -o compozy - -FROM alpine:3.19 -RUN apk add --no-cache ca-certificates -COPY --from=builder /build/compozy /usr/local/bin/ -COPY examples/standalone/edge-deployment/compozy.yaml /etc/compozy/ -EXPOSE 8080 -CMD ["compozy", "start", "--config", "/etc/compozy/compozy.yaml"] -``` - -## Documentation Links - -Each example README should link to: -- Configuration reference: `docs/configuration/mode-configuration.mdx` -- Standalone deployment guide: `docs/deployment/standalone-mode.mdx` -- Migration guide: `docs/guides/migrate-standalone-to-distributed.mdx` - diff --git a/tasks/prd-redis/_prd.md b/tasks/prd-redis/_prd.md deleted file mode 100644 index ef62b8af..00000000 --- a/tasks/prd-redis/_prd.md +++ /dev/null @@ -1,383 +0,0 @@ -# PRD: Standalone Mode - Redis Alternatives - -## Executive Summary - -Enable Compozy to run in standalone mode without external dependencies (Redis, etc.), targeting local development, small teams, and edge deployments. This feature implements embedded alternatives for all Redis-backed functionality while maintaining the distributed mode for production scalability. - -## Problem Statement - -### Current State -- Compozy requires external Redis for core features (caching, pub/sub, locking, memory storage) -- Complex deployment setup (Redis + PostgreSQL + Temporal + Compozy) -- High barrier to entry for new users and local development -- Over-provisioning for single-user or small team scenarios - -### Pain Points -1. **Deployment Complexity**: Users must install and configure Redis before using Compozy -2. **Resource Overhead**: Redis requires additional memory/CPU for simple use cases -3. **Development Friction**: Local development requires Docker Compose or external services -4. **Cost**: Cloud deployments incur Redis hosting costs even for light usage - -### Target Users -- **Developers**: Local development and testing without Docker Compose -- **Small Teams**: 1-10 users deploying on single VM or container -- **Edge Deployments**: IoT, embedded systems, air-gapped environments -- **Evaluators**: Trying Compozy without infrastructure commitment - -## Goals & Non-Goals - -### Goals -1. **Single Binary Deployment**: `compozy start --standalone` works with zero external dependencies (except PostgreSQL) -2. **Feature Parity**: All core features functional in standalone mode (agents, workflows, tasks, memory, tools) -3. **Performance Adequacy**: Acceptable performance for single-user and small team workloads (10-100 req/sec) -4. **Seamless Upgrade**: Clear migration path from standalone to distributed mode as needs grow -5. **Backward Compatibility**: Existing Redis-based deployments continue to work unchanged - -### Non-Goals -1. **Horizontal Scaling**: Standalone mode is single-process only (no distributed workloads) -2. **Production High-Availability**: No replication, failover, or clustering in standalone -3. **Performance Parity**: Standalone may be slower than Redis for high-concurrency workloads -4. **Hybrid Mode**: Cannot mix standalone and distributed backends within same deployment - -## Success Metrics - -### Primary Metrics -- **Installation Success Rate**: >95% of users successfully start standalone mode on first attempt -- **Feature Completeness**: 100% of core features work in standalone mode -- **Performance**: Single-user workflows complete within 1.5x of Redis time -- **Adoption**: 30%+ of new deployments use standalone mode within 3 months - -### Secondary Metrics -- **Documentation Quality**: <5% of support questions relate to standalone setup -- **Migration Success**: >90% of users successfully migrate to distributed mode when needed -- **Resource Usage**: Standalone uses <50% memory/CPU vs Redis-based deployment for equivalent workload - -## User Stories - -### US-1: Local Development -**As a** developer -**I want to** run Compozy locally without external dependencies -**So that** I can develop and test workflows quickly - -**Acceptance Criteria**: -- Run `compozy start --standalone` and server starts successfully -- All CLI commands work (agents, workflows, tools, tasks) -- Workflow execution with memory and tools functions correctly -- Data persists across restarts using local filesystem - ---- - -### US-2: Small Team Deployment -**As a** small team lead -**I want to** deploy Compozy on a single VM without managing Redis -**So that** we can start using AI workflows immediately - -**Acceptance Criteria**: -- Deploy single Docker container or binary -- Configuration through environment variables or YAML -- Multi-user access (authentication required) -- Performance adequate for 5-10 concurrent workflows - ---- - -### US-3: Migration to Production -**As a** platform engineer -**I want to** migrate from standalone to distributed mode -**So that** we can scale as usage grows - -**Acceptance Criteria**: -- Clear documentation on migration process -- Configuration changes clearly documented -- Data export/import utilities available -- No workflow rewrites required - ---- - -### US-4: Edge Deployment -**As an** IoT engineer -**I want to** run Compozy on resource-constrained edge devices -**So that** we can process workflows locally without cloud dependencies - -**Acceptance Criteria**: -- Binary size <100MB -- Memory footprint <512MB for idle server -- Works on ARM64 and x86_64 -- Configurable data retention policies - -## Technical Requirements - -### Functional Requirements - -#### FR-1: Embedded Redis Server (miniredis) -- Use miniredis (pure Go in-memory Redis implementation) -- 100% Redis protocol compatibility including Lua scripts -- Support all existing Redis operations without code changes -- Native Pub/Sub and transaction (TxPipeline) support - -#### FR-2: Optional Persistence Layer -- BadgerDB for periodic snapshots of miniredis state -- Configurable snapshot interval (default: 5 minutes) -- Snapshot on graceful shutdown -- Restore last snapshot on startup - -#### FR-3: Memory Store Compatibility -- All existing Lua scripts work natively (AppendAndTrimWithMetadataScript, etc.) -- TxPipeline operations maintain atomicity guarantees -- Conversation history consistency (messages + metadata) -- No consumer code changes required - -#### FR-4: Resource Store Compatibility -- Lua script-based optimistic locking (PutIfMatch) works natively -- TxPipeline for atomic multi-key operations (value + etag) -- Watch notifications via Redis Pub/Sub -- No consumer code changes required - -#### FR-5: Streaming Features -- Redis Pub/Sub for real-time events -- Pattern subscriptions for workflow/task events -- Native go-redis PubSub types -- No emulation complexity - -#### FR-6: Configuration Management -- Add global `mode` configuration field (standalone | distributed) -- Support per-component mode overrides for mixed deployments -- Mode inheritance: component mode > global mode > "distributed" default -- Provide standalone-specific configuration section (`redis.standalone.*`) -- Support environment variable overrides -- Validate mode-specific requirements - -### Non-Functional Requirements - -#### NFR-1: Performance -- Single-user workflow latency: <2x Redis baseline -- Throughput: Support 10-100 requests/sec -- Memory usage: <512MB for typical standalone workload -- Disk I/O: Optimize for SSD, acceptable on HDD - -#### NFR-2: Reliability -- Data durability: No data loss on graceful shutdown -- Error handling: Proper recovery from BadgerDB errors -- Graceful degradation: Inform users of limitations - -#### NFR-3: Maintainability -- Clean adapter interfaces following existing patterns -- Comprehensive unit and integration tests -- Clear separation between mode-specific code -- Documentation for future contributors - -#### NFR-4: Compatibility -- Backward compatible with existing Redis deployments -- No breaking changes to APIs or configurations -- Default mode remains "distributed" for production - -## Architecture Overview - -### Components - -#### 1. Miniredis Integration (`engine/infra/cache/miniredis_standalone.go`) -- Embeds miniredis v2 (pure Go Redis server) -- Starts in-memory Redis on random available port -- Standard go-redis client connects to embedded server -- Zero emulation complexity - full Redis compatibility - -#### 2. Snapshot Manager (`engine/infra/cache/snapshot_manager.go`) -- Periodically saves miniredis state to BadgerDB -- Configurable snapshot interval (default: 5min) -- Graceful snapshot on server shutdown -- Restores last snapshot on startup -- Optional (can run purely in-memory) - -#### 3. Mode-Aware Factory (`engine/infra/cache/mod.go`) -- SetupCache reads configuration from config.FromContext(ctx) -- Uses resolver pattern: cfg.EffectiveRedisMode() for mode determination -- Mode resolution: redis.mode > global mode > "distributed" default -- Constructs appropriate backend (Redis or miniredis) -- Returns unified cache.Cache interface -- Uses logger.FromContext(ctx) for all logging - -### Data Flow - -``` -User Request - ↓ -Server Dependencies (dependencies.go) - ↓ -SetupCache (mode-aware factory) - ├─ cfg.EffectiveRedisMode() [resolver] - │ ├─ Check redis.mode (explicit override) - │ ├─ Check global mode (inheritance) - │ └─ Default to "distributed" - ↓ - ├→ [distributed] External Redis Client - └→ [standalone] Embedded miniredis + go-redis Client - ↓ -Unified Cache Interface (go-redis) - ↓ -Domain Services (memory, resources, tasks) - ↓ - [standalone only] - ↓ -Periodic Snapshot Manager → BadgerDB (optional persistence) -``` - -### Configuration Schema - -```yaml -# Global deployment mode (applies to all components by default) -mode: standalone # or "distributed" - -# Component-specific mode overrides (optional) -redis: - mode: "" # empty = inherit from global; "standalone" | "distributed" = explicit override - addr: localhost:6379 # used when mode = "distributed" - standalone: - # Optional persistence (can run purely in-memory) - persistence: - enabled: true - data_dir: ./compozy-data - snapshot_interval: 5m # Save state periodically - snapshot_on_shutdown: true # Save on graceful exit - restore_on_startup: true # Restore last snapshot - -temporal: - mode: "" # empty = inherit from global - host_port: localhost:7233 - -mcpproxy: - mode: "" # empty = inherit from global - host: 127.0.0.1 - port: 6001 -``` - -**Mode Resolution**: -- Component mode takes precedence if explicitly set -- Otherwise inherits from global `mode` -- Default fallback is "distributed" if neither is set - -## Implementation Phases - -### Phase 1: Core Integration (Day 1-2) -1. Add miniredis dependency to go.mod -2. Create MiniredisStandalone wrapper -3. Implement mode-aware factory in SetupCache -4. Add configuration schema for standalone mode -5. Basic integration tests - -### Phase 2: Optional Persistence (Day 3-4) -6. Create SnapshotManager for BadgerDB integration -7. Implement periodic snapshot logic -8. Implement graceful shutdown snapshot -9. Implement startup restore logic -10. Snapshot lifecycle tests - -### Phase 3: Testing & Validation (Day 5-6) -11. Verify all Lua scripts work (memory store) -12. Verify TxPipeline operations (resource store) -13. Verify Pub/Sub for streaming -14. End-to-end integration tests -15. Performance benchmarking - -### Phase 4: Documentation & Polish (Day 7) -16. User documentation (deployment guide) -17. Migration guide (standalone → distributed) -18. CLI improvements (`--standalone` flag) -19. Example configurations - -**Total Timeline: 1-2 weeks (vs original 9-10 weeks)** - -## Risks & Mitigations - -### Risk 1: Data Loss on Crash (Between Snapshots) -**Risk**: Miniredis is in-memory; data since last snapshot lost on unexpected crash -**Impact**: Potential loss of recent workflow state or messages -**Mitigation**: -- Default 5-minute snapshot interval minimizes exposure window -- Graceful shutdown always saves snapshot -- Target use cases (dev, small teams) can tolerate 5min data loss -- Document this limitation clearly -- Production deployments use distributed mode - -### Risk 2: Memory Growth -**Risk**: Long-running standalone instances accumulate data in memory -**Impact**: Increased memory usage over time -**Mitigation**: -- Configure TTLs on all cached data (already in place) -- Document memory expectations for typical workloads -- Add optional memory limit configuration for miniredis -- Monitor memory usage metrics - -### Risk 3: Snapshot Performance Impact -**Risk**: Large snapshots may cause brief latency spikes -**Impact**: Slow request processing during snapshot -**Mitigation**: -- Snapshot operation is non-blocking (background goroutine) -- Use BadgerDB streaming writes to minimize memory -- Make snapshot interval configurable -- Skip snapshots if persistence disabled - -### Risk 4: Migration Complexity -**Risk**: Cannot migrate data from miniredis snapshots to external Redis -**Impact**: Must rebuild state when switching modes -**Mitigation**: -- Document that mode switch requires clean start -- Provide export/import utilities for workflows and configs -- Agent memory can be re-initialized -- Configuration files remain unchanged - -## Dependencies - -### External Libraries -- **miniredis v2** (github.com/alicebob/miniredis/v2) - MIT license, pure Go Redis server -- **BadgerDB v4** (github.com/dgraph-io/badger/v4) - MPL-2.0 license, for optional persistence - -### Internal Dependencies -- engine/infra/cache - Core cache abstraction layer -- engine/infra/server - Dependency injection and initialization -- pkg/config - Configuration management -- engine/memory/store - Memory store interfaces -- engine/resources - Resource store interfaces - -### No Breaking Changes -- All existing code continues to work -- Redis remains default backend -- New code uses existing interfaces - -## Open Questions - -1. **Vector Database**: Should Qdrant be required in standalone mode, or make it optional? -2. **Snapshot Frequency**: What's the optimal default snapshot interval (5min, 10min, 15min)? -3. **Snapshot Size Limits**: Should we enforce max snapshot size to prevent BadgerDB bloat? -4. **Memory Limits**: Should miniredis have configurable memory limits in standalone mode? -5. **Monitoring**: Do we need separate metrics for standalone vs distributed? - -## Future Considerations - -### Post-MVP Enhancements -- Automatic mode switching based on workload detection -- Hybrid mode (local cache + remote Redis) -- Replication support for standalone high-availability -- S3-backed persistence for BadgerDB -- Kubernetes operator with automatic mode selection - -### Alternative Libraries -- Pebble (CockroachDB) - Higher performance alternative to BadgerDB -- NutsDB - Redis-like data structures in Go -- SQLite - Universal embedded database (less performant for cache) - -## Glossary - -- **Standalone Mode**: Single-process deployment with embedded dependencies -- **Distributed Mode**: Multi-instance deployment with external Redis -- **Adapter**: Implementation of cache interfaces for specific backend -- **BadgerDB**: LSM-tree based embedded key-value store -- **TTL**: Time-to-live, automatic key expiration -- **Atomic Operation**: Multiple operations guaranteed to execute together or not at all - ---- - -**Document Version**: 1.0 -**Created**: 2025-01-27 -**Last Updated**: 2025-01-27 -**Status**: Approved -**Stakeholders**: Engineering, Product, DevOps - diff --git a/tasks/prd-redis/_task_01.md b/tasks/prd-redis/_task_01.md deleted file mode 100644 index c99614e7..00000000 --- a/tasks/prd-redis/_task_01.md +++ /dev/null @@ -1,253 +0,0 @@ -## markdown - -## status: completed # Options: pending, in-progress, completed, excluded - - -pkg/config, engine/infra/cache -implementation -core_feature, configuration -medium -none - - -# Task 1.0: Global Mode Configuration & Resolver - -## Overview - -Implement the global mode configuration system with component inheritance pattern. This establishes the foundation for all standalone mode functionality by adding a top-level `mode` field and per-component mode overrides with a resolver pattern. - - -- **ALWAYS READ** @.cursor/rules/critical-validation.mdc before start -- **ALWAYS READ** the technical docs from this PRD before start -- **YOU SHOULD ALWAYS** have in mind that this should be done in a greenfield approach, we don't need to care about backwards compatibility since the project is in alpha, and support old and new stuff just introduces more complexity in the project; never sacrifice quality because of backwards compatibility -- **MUST** use `config.FromContext(ctx)` - never store config -- **MUST** use `logger.FromContext(ctx)` - never pass logger as parameter -- **NEVER** use `context.Background()` in tests, use `t.Context()` instead - - - -# When you need information about a library or external API: -- use perplexity and context7 to find out how to properly fix/resolve this -- when using perplexity mcp, you can pass a prompt to the query param with more description about what you want to know, you don't need to pass a query-style search phrase, the same for the topic param of context7 -- for context7 to use the mcp is two steps, one you will find out the library id and them you will check what you want - - - -- Add global `mode` field to Config struct ("standalone" | "distributed") -- Add `RedisConfig.Mode` field with same options (empty string = inherit) -- Create `pkg/config/resolver.go` with mode resolution logic -- Implement `EffectiveRedisMode()` helper method -- Implement `EffectiveTemporalMode()` helper method (normalizes "distributed" → "remote") -- Implement `EffectiveMCPProxyMode()` helper method -- Add validation for mode field values -- Default mode must be "distributed" for backward compatibility -- Support component-level mode overrides - - -## Subtasks - -- [x] 1.1 Add global `mode` field to Config struct in `pkg/config/config.go` -- [x] 1.2 Add `RedisConfig` struct with mode, addr, password, and standalone sections -- [x] 1.3 Add `RedisStandaloneConfig` and `RedisPersistenceConfig` structs -- [x] 1.4 Create `pkg/config/resolver.go` with `ResolveMode()` function -- [x] 1.5 Implement `EffectiveRedisMode()` method on Config -- [x] 1.6 Implement `EffectiveTemporalMode()` method on Config -- [x] 1.7 Implement `EffectiveMCPProxyMode()` method on Config -- [x] 1.8 Add validation rules in `pkg/config/loader.go` for mode fields -- [x] 1.9 Update config tests to verify mode resolution logic - -## Implementation Details - -### Configuration Schema - -Add to `pkg/config/config.go`: - -```go -type Config struct { - // ... existing fields ... - - // Mode controls global deployment model - // "distributed" (default): External services required - // "standalone": Embedded services, single-process - Mode string `koanf:"mode" json:"mode" yaml:"mode" mapstructure:"mode" validate:"omitempty,oneof=standalone distributed"` - - // Redis cache configuration - Redis RedisConfig `koanf:"redis" json:"redis" yaml:"redis" mapstructure:"redis"` -} - -type RedisConfig struct { - // Mode controls Redis deployment model - // "" (empty): Inherit from global Config.Mode - // "distributed": Use external Redis (explicit override) - // "standalone": Use embedded miniredis (explicit override) - Mode string `koanf:"mode" json:"mode" yaml:"mode" mapstructure:"mode" validate:"omitempty,oneof=standalone distributed"` - - Addr string `koanf:"addr" json:"addr" yaml:"addr" mapstructure:"addr"` - Password config.SensitiveString `koanf:"password" json:"password" yaml:"password" mapstructure:"password" sensitive:"true"` - - Standalone RedisStandaloneConfig `koanf:"standalone" json:"standalone" yaml:"standalone" mapstructure:"standalone"` -} - -type RedisStandaloneConfig struct { - Persistence RedisPersistenceConfig `koanf:"persistence" json:"persistence" yaml:"persistence" mapstructure:"persistence"` -} - -type RedisPersistenceConfig struct { - Enabled bool `koanf:"enabled" json:"enabled" yaml:"enabled" mapstructure:"enabled"` - DataDir string `koanf:"data_dir" json:"data_dir" yaml:"data_dir" mapstructure:"data_dir"` - SnapshotInterval time.Duration `koanf:"snapshot_interval" json:"snapshot_interval" yaml:"snapshot_interval" mapstructure:"snapshot_interval"` - SnapshotOnShutdown bool `koanf:"snapshot_on_shutdown" json:"snapshot_on_shutdown" yaml:"snapshot_on_shutdown" mapstructure:"snapshot_on_shutdown"` - RestoreOnStartup bool `koanf:"restore_on_startup" json:"restore_on_startup" yaml:"restore_on_startup" mapstructure:"restore_on_startup"` -} -``` - -### Mode Resolver - -Create `pkg/config/resolver.go`: - -```go -package config - -// ResolveMode determines the effective deployment mode for a component. -// -// Resolution priority: -// 1. Component mode (if explicitly set) -// 2. Global mode (if set in Config.Mode) -// 3. Default fallback ("distributed") -func ResolveMode(cfg *Config, componentMode string) string { - if componentMode != "" { - return componentMode - } - if cfg.Mode != "" { - return cfg.Mode - } - return "distributed" -} - -// EffectiveRedisMode returns the resolved Redis deployment mode. -func (cfg *Config) EffectiveRedisMode() string { - return ResolveMode(cfg, cfg.Redis.Mode) -} - -// EffectiveTemporalMode returns the resolved Temporal deployment mode. -// Normalizes "distributed" → "remote" for Temporal. -func (cfg *Config) EffectiveTemporalMode() string { - mode := ResolveMode(cfg, cfg.Temporal.Mode) - if mode == "distributed" { - return "remote" - } - return mode -} - -// EffectiveMCPProxyMode returns the resolved MCPProxy deployment mode. -func (cfg *Config) EffectiveMCPProxyMode() string { - return ResolveMode(cfg, cfg.MCPProxy.Mode) -} -``` - -### Relevant Files - -- `pkg/config/config.go` - Add Config structs -- `pkg/config/resolver.go` - NEW - Mode resolution logic -- `pkg/config/loader.go` - Add validation rules - -### Dependent Files - -None - this is the foundation task with no dependencies - -## Deliverables - -- [x] Global `mode` field added to Config struct -- [x] RedisConfig struct with mode and standalone sections -- [x] RedisStandaloneConfig with persistence configuration -- [x] RedisPersistenceConfig with all snapshot settings -- [x] `pkg/config/resolver.go` created with ResolveMode function -- [x] EffectiveRedisMode() method implemented -- [x] EffectiveTemporalMode() method implemented with "distributed" → "remote" normalization -- [x] EffectiveMCPProxyMode() method implemented -- [x] Validation rules for mode fields -- [x] Default mode is "distributed" for backward compatibility - -## Tests - -Unit tests mapped from `_tests.md` for this feature: - -### pkg/config/resolver_test.go (NEW) - -- [ ] Should return component mode when explicitly set - ```go - func TestResolveMode_ExplicitComponentMode(t *testing.T) { - t.Run("Should return component mode when explicitly set", func(t *testing.T) { - cfg := &Config{ - Mode: "standalone", - Redis: RedisConfig{Mode: "distributed"}, - } - result := cfg.EffectiveRedisMode() - assert.Equal(t, "distributed", result) - }) - } - ``` - -- [ ] Should inherit from global mode when component mode is empty - ```go - t.Run("Should inherit from global mode", func(t *testing.T) { - cfg := &Config{ - Mode: "standalone", - Redis: RedisConfig{Mode: ""}, - } - result := cfg.EffectiveRedisMode() - assert.Equal(t, "standalone", result) - }) - ``` - -- [ ] Should default to "distributed" when both modes are empty - ```go - t.Run("Should default to distributed", func(t *testing.T) { - cfg := &Config{ - Mode: "", - Redis: RedisConfig{Mode: ""}, - } - result := cfg.EffectiveRedisMode() - assert.Equal(t, "distributed", result) - }) - ``` - -- [ ] Should normalize "distributed" to "remote" for Temporal - ```go - func TestEffectiveTemporalMode_Normalization(t *testing.T) { - t.Run("Should normalize distributed to remote for Temporal", func(t *testing.T) { - cfg := &Config{Mode: "distributed"} - result := cfg.EffectiveTemporalMode() - assert.Equal(t, "remote", result) - }) - - t.Run("Should pass through standalone for Temporal", func(t *testing.T) { - cfg := &Config{Mode: "standalone"} - result := cfg.EffectiveTemporalMode() - assert.Equal(t, "standalone", result) - }) - } - ``` - -- [ ] Should validate mode values against allowed enums -- [ ] Should handle mixed mode configurations correctly -- [ ] Should resolve effective modes for all components (Redis, Temporal, MCPProxy) - -### pkg/config/loader_test.go (UPDATE) - -- [ ] Should validate global mode field (standalone | distributed) -- [ ] Should validate component mode fields -- [ ] Should reject invalid mode values -- [ ] Should allow empty mode values (inheritance) -- [ ] Should validate Redis persistence configuration - -## Success Criteria - -- [ ] All resolver tests pass (`go test ./pkg/config/...`) -- [ ] Mode resolution logic handles all scenarios (explicit, inherit, default) -- [ ] Temporal mode normalization works correctly -- [ ] Configuration validation rejects invalid mode values -- [ ] Default mode is "distributed" for backward compatibility -- [ ] All helper methods (EffectiveRedisMode, EffectiveTemporalMode, EffectiveMCPProxyMode) work correctly -- [ ] `make lint` passes with zero warnings -- [ ] Code follows project standards (context patterns, error handling) diff --git a/tasks/prd-redis/_task_02.md b/tasks/prd-redis/_task_02.md deleted file mode 100644 index 213f9298..00000000 --- a/tasks/prd-redis/_task_02.md +++ /dev/null @@ -1,343 +0,0 @@ -## markdown - -## status: completed # Options: pending, in-progress, completed, excluded - - -engine/infra/cache -implementation -core_feature -low -miniredis v2, Task 1.0 - - -# Task 2.0: MiniredisStandalone Wrapper - -## Overview - -Create a lightweight wrapper around miniredis v2 that manages the embedded Redis server lifecycle and provides a standard go-redis client. This wrapper starts miniredis on a random port, creates a go-redis client connection, and handles graceful shutdown with optional snapshot support. - - -- **ALWAYS READ** @.cursor/rules/critical-validation.mdc before start -- **ALWAYS READ** the technical docs from this PRD before start -- **YOU SHOULD ALWAYS** have in mind that this should be done in a greenfield approach, we don't need to care about backwards compatibility since the project is in alpha, and support old and new stuff just introduces more complexity in the project; never sacrifice quality because of backwards compatibility -- **MUST** use `config.FromContext(ctx)` - never store config -- **MUST** use `logger.FromContext(ctx)` - never pass logger as parameter -- **NEVER** use `context.Background()` in tests, use `t.Context()` instead - - - -# When you need information about miniredis or go-redis: -- use perplexity to find miniredis v2 API documentation -- use context7 for go-redis client patterns -- when using perplexity mcp, you can pass a prompt to the query param with more description about what you want to know -- for context7 to use the mcp is two steps, one you will find out the library id and them you will check what you want - - - -- Create `engine/infra/cache/miniredis_standalone.go` wrapper -- Add miniredis v2 dependency to go.mod (`github.com/alicebob/miniredis/v2`) -- Start miniredis on random available port -- Create standard go-redis client connected to embedded server -- Test connection with Ping before returning -- Use atomic.Bool for thread-safe Close tracking -- Support graceful shutdown with optional snapshot -- Use `logger.FromContext(ctx)` for all logging -- Use `config.FromContext(ctx)` for configuration access -- Handle cleanup of miniredis server on Close - - -## Subtasks - -- [x] 2.1 Add miniredis v2 dependency (`go get github.com/alicebob/miniredis/v2`) -- [x] 2.2 Create `engine/infra/cache/miniredis_standalone.go` -- [x] 2.3 Implement MiniredisStandalone struct with server, client, and snapshot fields -- [x] 2.4 Implement NewMiniredisStandalone constructor -- [x] 2.5 Implement Client() method to expose go-redis client -- [x] 2.6 Implement Close() method with graceful shutdown -- [x] 2.7 Add thread-safe close protection with atomic.Bool -- [x] 2.8 Create unit tests in `engine/infra/cache/miniredis_standalone_test.go` - -## Implementation Details - -### MiniredisStandalone Structure - -Create `engine/infra/cache/miniredis_standalone.go`: - -```go -package cache - -import ( - "context" - "fmt" - "sync/atomic" - - "github.com/alicebob/miniredis/v2" - "github.com/redis/go-redis/v9" - - "github.com/compozy/compozy/pkg/config" - "github.com/compozy/compozy/pkg/logger" -) - -type MiniredisStandalone struct { - server *miniredis.Miniredis - client *redis.Client - snapshot *SnapshotManager - closed atomic.Bool -} - -// NewMiniredisStandalone creates and starts an embedded Redis server -func NewMiniredisStandalone(ctx context.Context) (*MiniredisStandalone, error) { - log := logger.FromContext(ctx) - cfg := config.FromContext(ctx) - - // Start embedded Redis server - mr := miniredis.NewMiniRedis() - if err := mr.Start(); err != nil { - return nil, fmt.Errorf("start miniredis: %w", err) - } - - log.Info("Started embedded Redis server", - "addr", mr.Addr(), - "mode", "standalone", - ) - - // Create standard go-redis client - client := redis.NewClient(&redis.Options{ - Addr: mr.Addr(), - }) - - // Test connection - if err := client.Ping(ctx).Err(); err != nil { - mr.Close() - return nil, fmt.Errorf("ping miniredis: %w", err) - } - - standalone := &MiniredisStandalone{ - server: mr, - client: client, - } - - // Initialize optional snapshot manager - if cfg.Redis.Standalone.Persistence.Enabled { - log.Info("Initializing persistence layer", - "data_dir", cfg.Redis.Standalone.Persistence.DataDir, - "snapshot_interval", cfg.Redis.Standalone.Persistence.SnapshotInterval, - ) - - snapshot, err := NewSnapshotManager(ctx, mr, cfg.Redis.Standalone.Persistence) - if err != nil { - standalone.Close(ctx) - return nil, fmt.Errorf("create snapshot manager: %w", err) - } - standalone.snapshot = snapshot - - // Restore last snapshot if exists - if cfg.Redis.Standalone.Persistence.RestoreOnStartup { - if err := snapshot.Restore(ctx); err != nil { - log.Warn("Failed to restore snapshot", "error", err) - } else { - log.Info("Restored last snapshot") - } - } - - // Start periodic snapshots - snapshot.StartPeriodicSnapshots(ctx) - } - - return standalone, nil -} - -// Client returns the go-redis client connected to the embedded server -func (m *MiniredisStandalone) Client() *redis.Client { - return m.client -} - -// Close gracefully shuts down the embedded Redis server -func (m *MiniredisStandalone) Close(ctx context.Context) error { - if !m.closed.CompareAndSwap(false, true) { - return nil // Already closed - } - - log := logger.FromContext(ctx) - cfg := config.FromContext(ctx) - - // Snapshot before shutdown if enabled - if m.snapshot != nil && cfg.Redis.Standalone.Persistence.SnapshotOnShutdown { - log.Info("Taking final snapshot before shutdown") - if err := m.snapshot.Snapshot(ctx); err != nil { - log.Error("Failed to snapshot on shutdown", "error", err) - } - m.snapshot.Stop() - } - - // Close connections - if err := m.client.Close(); err != nil { - log.Warn("Failed to close Redis client", "error", err) - } - - m.server.Close() - log.Info("Closed embedded Redis server") - - return nil -} -``` - -### Relevant Files - -- `engine/infra/cache/miniredis_standalone.go` - NEW - MiniredisStandalone wrapper -- `go.mod` - Add miniredis v2 dependency - -### Dependent Files - -- `pkg/config/config.go` - Uses RedisConfig from Task 1.0 -- `pkg/config/resolver.go` - Uses config from Task 1.0 -- `engine/infra/cache/snapshot_manager.go` - Will be created in Task 7.0 (optional dependency) - -## Deliverables - -- [x] miniredis v2 dependency added to go.mod -- [x] MiniredisStandalone struct created -- [x] NewMiniredisStandalone constructor implemented -- [x] Client() method returns go-redis client -- [x] Close() method with graceful shutdown -- [x] Thread-safe close protection with atomic.Bool -- [x] Support for optional snapshot manager integration -- [x] All logging uses `logger.FromContext(ctx)` -- [x] All config access uses `config.FromContext(ctx)` -- [x] Connection tested with Ping before returning - -## Tests - -Unit tests in `engine/infra/cache/miniredis_standalone_test.go`: - -### Lifecycle Tests - -- [x] Should start and stop embedded Redis server - ```go - func TestMiniredisStandalone_Lifecycle(t *testing.T) { - t.Run("Should start embedded Redis server", func(t *testing.T) { - ctx := t.Context() - cfg := testConfigWithStandaloneMode(false) // persistence disabled - ctx = config.ContextWithManager(ctx, cfg) - - mr, err := NewMiniredisStandalone(ctx) - require.NoError(t, err) - defer mr.Close(ctx) - - // Verify connection works - err = mr.Client().Ping(ctx).Err() - assert.NoError(t, err) - }) - } - ``` - -- [x] Should close cleanly without errors - ```go - t.Run("Should close cleanly without errors", func(t *testing.T) { - ctx := t.Context() - cfg := testConfigWithStandaloneMode(false) - ctx = config.ContextWithManager(ctx, cfg) - - mr, err := NewMiniredisStandalone(ctx) - require.NoError(t, err) - - err = mr.Close(ctx) - assert.NoError(t, err) - - // Verify double close is safe - err = mr.Close(ctx) - assert.NoError(t, err) - }) - ``` - -- [x] Should handle startup errors gracefully - ```go - t.Run("Should handle startup errors gracefully", func(t *testing.T) { - // Test error handling (e.g., invalid config) - }) - ``` - -### Basic Operations Tests - -- [x] Should support Get/Set operations - ```go - func TestMiniredisStandalone_BasicOperations(t *testing.T) { - t.Run("Should support Get/Set operations", func(t *testing.T) { - ctx := t.Context() - mr := setupMiniredisForTest(ctx, t) - defer mr.Close(ctx) - - // Test Set - err := mr.Client().Set(ctx, "key", "value", 0).Err() - require.NoError(t, err) - - // Test Get - val, err := mr.Client().Get(ctx, "key").Result() - require.NoError(t, err) - assert.Equal(t, "value", val) - }) - } - ``` - -- [x] Should support Eval (Lua scripts) - ```go - t.Run("Should support Lua scripts", func(t *testing.T) { - ctx := t.Context() - mr := setupMiniredisForTest(ctx, t) - defer mr.Close(ctx) - - script := `return redis.call('SET', KEYS[1], ARGV[1])` - result, err := mr.Client().Eval(ctx, script, []string{"test-key"}, "test-value").Result() - require.NoError(t, err) - assert.NotNil(t, result) - - // Verify value was set - val, err := mr.Client().Get(ctx, "test-key").Result() - require.NoError(t, err) - assert.Equal(t, "test-value", val) - }) - ``` - -- [x] Should support TxPipeline operations - ```go - t.Run("Should support TxPipeline operations", func(t *testing.T) { - ctx := t.Context() - mr := setupMiniredisForTest(ctx, t) - defer mr.Close(ctx) - - pipe := mr.Client().TxPipeline() - pipe.Set(ctx, "key1", "value1", 0) - pipe.Set(ctx, "key2", "value2", 0) - - _, err := pipe.Exec(ctx) - require.NoError(t, err) - - // Verify both keys set - val1, _ := mr.Client().Get(ctx, "key1").Result() - val2, _ := mr.Client().Get(ctx, "key2").Result() - assert.Equal(t, "value1", val1) - assert.Equal(t, "value2", val2) - }) - ``` - -### Persistence Integration Tests (Optional) - -- [ ] Should initialize snapshot manager when persistence enabled -- [ ] Should skip snapshot manager when persistence disabled -- [ ] Should restore snapshot on startup when configured -- [ ] Should snapshot on shutdown when configured - -## Success Criteria - -- [ ] miniredis v2 dependency added successfully -- [ ] MiniredisStandalone starts and stops cleanly -- [ ] go-redis client successfully connects to embedded server -- [ ] Basic Redis operations (Get, Set) work correctly -- [ ] Lua scripts execute successfully -- [ ] TxPipeline operations work correctly -- [ ] Close() is thread-safe and idempotent -- [ ] All tests pass (`go test ./engine/infra/cache/...`) -- [ ] `make lint` passes with zero warnings -- [ ] All logging uses logger.FromContext(ctx) -- [ ] All config access uses config.FromContext(ctx) -- [ ] No context.Background() used in tests diff --git a/tasks/prd-redis/_task_03.md b/tasks/prd-redis/_task_03.md deleted file mode 100644 index 54f3f4bc..00000000 --- a/tasks/prd-redis/_task_03.md +++ /dev/null @@ -1,412 +0,0 @@ -## markdown - -## status: completed # Options: pending, in-progress, completed, excluded - - -engine/infra/cache, engine/infra/server -implementation, integration -core_feature -low -Task 1.0, Task 2.0 - - -# Task 3.0: Mode-Aware Cache Factory - -## Overview - -Update the cache factory (`SetupCache`) to use mode resolution and construct the appropriate backend (external Redis or embedded miniredis) based on configuration. Also update Temporal and MCPProxy factories to use the new resolver pattern for unified mode inheritance. - - -- **ALWAYS READ** @.cursor/rules/critical-validation.mdc before start -- **ALWAYS READ** the technical docs from this PRD before start -- **YOU SHOULD ALWAYS** have in mind that this should be done in a greenfield approach, we don't need to care about backwards compatibility since the project is in alpha, and support old and new stuff just introduces more complexity in the project; never sacrifice quality because of backwards compatibility -- **MUST** use `config.FromContext(ctx)` - never store config -- **MUST** use `logger.FromContext(ctx)` - never pass logger as parameter -- **NEVER** use `context.Background()` in tests, use `t.Context()` instead - - - -# When you need information about the existing cache setup: -- Read existing `engine/infra/cache/mod.go` to understand current SetupCache pattern -- Read `engine/infra/server/dependencies.go` to see how cache is currently initialized -- Read `engine/infra/server/temporal.go` for maybeStartStandaloneTemporal pattern -- Read `engine/infra/server/mcp.go` for shouldEmbedMCPProxy pattern - - - -- Update `SetupCache()` in `engine/infra/cache/mod.go` to check effective mode -- Use `cfg.EffectiveRedisMode()` for mode determination -- Create external Redis client when mode is "distributed" -- Create MiniredisStandalone when mode is "standalone" -- Return unified cache.Cache interface for both backends -- Update `maybeStartStandaloneTemporal()` to use `cfg.EffectiveTemporalMode()` -- Update `shouldEmbedMCPProxy()` to use `cfg.EffectiveMCPProxyMode()` -- Ensure cleanup functions work for both backends -- Maintain backward compatibility (default "distributed") - - -## Subtasks - -- [x] 3.1 Update SetupCache() to read config from context -- [x] 3.2 Add mode resolution using cfg.EffectiveRedisMode() -- [x] 3.3 Implement distributed mode branch (existing external Redis) -- [x] 3.4 Implement standalone mode branch (new MiniredisStandalone) -- [x] 3.5 Update maybeStartStandaloneTemporal() to use cfg.EffectiveTemporalMode() -- [x] 3.6 Update shouldEmbedMCPProxy() to use cfg.EffectiveMCPProxyMode() -- [x] 3.7 Create unit tests in `engine/infra/cache/mod_test.go` -- [x] 3.8 Create unit tests for Temporal factory pattern -- [x] 3.9 Create unit tests for MCPProxy factory pattern - -## Implementation Details - -### Update SetupCache Factory - -Update `engine/infra/cache/mod.go`: - -```go -package cache - -import ( - "context" - "fmt" - - "github.com/compozy/compozy/pkg/config" - "github.com/compozy/compozy/pkg/logger" -) - -// SetupCache creates a mode-aware cache backend -func SetupCache(ctx context.Context) (Cache, func(), error) { - log := logger.FromContext(ctx) - cfg := config.FromContext(ctx) - - mode := cfg.EffectiveRedisMode() - log.Info("Initializing cache backend", "mode", mode) - - switch mode { - case "standalone": - return setupStandaloneCache(ctx) - case "distributed": - return setupDistributedCache(ctx) - default: - return nil, nil, fmt.Errorf("unsupported redis mode: %s", mode) - } -} - -// setupStandaloneCache creates embedded miniredis backend -func setupStandaloneCache(ctx context.Context) (Cache, func(), error) { - log := logger.FromContext(ctx) - - // Start embedded Redis server - standalone, err := NewMiniredisStandalone(ctx) - if err != nil { - return nil, nil, fmt.Errorf("create miniredis standalone: %w", err) - } - - // Create unified cache with miniredis client - cache := &Redis{ - client: standalone.Client(), - } - - lockManager := NewRedisLockManager(standalone.Client()) - notificationSystem := NewRedisNotificationSystem(standalone.Client()) - - cleanup := func() { - if err := standalone.Close(ctx); err != nil { - log.Error("Failed to close standalone cache", "error", err) - } - } - - log.Info("Standalone cache initialized", - "persistence", cfg.Redis.Standalone.Persistence.Enabled, - ) - - return &Cache{ - Redis: cache, - LockManager: lockManager, - NotificationSystem: notificationSystem, - }, cleanup, nil -} - -// setupDistributedCache creates external Redis backend -func setupDistributedCache(ctx context.Context) (Cache, func(), error) { - log := logger.FromContext(ctx) - cfg := config.FromContext(ctx) - - // Connect to external Redis - opts := &redis.Options{ - Addr: cfg.Redis.Addr, - Password: string(cfg.Redis.Password), - } - - client := redis.NewClient(opts) - - // Test connection - if err := client.Ping(ctx).Err(); err != nil { - return nil, nil, fmt.Errorf("connect to redis: %w", err) - } - - cache := &Redis{ - client: client, - } - - lockManager := NewRedisLockManager(client) - notificationSystem := NewRedisNotificationSystem(client) - - cleanup := func() { - if err := client.Close(); err != nil { - log.Error("Failed to close redis client", "error", err) - } - } - - log.Info("Distributed cache initialized", "addr", cfg.Redis.Addr) - - return &Cache{ - Redis: cache, - LockManager: lockManager, - NotificationSystem: notificationSystem, - }, cleanup, nil -} -``` - -### Update Temporal Factory - -Update `engine/infra/server/temporal.go` (or wherever `maybeStartStandaloneTemporal` is defined): - -```go -func maybeStartStandaloneTemporal(ctx context.Context) (*temporalite.Server, error) { - cfg := config.FromContext(ctx) - log := logger.FromContext(ctx) - - mode := cfg.EffectiveTemporalMode() - if mode != "standalone" { - log.Debug("Temporal mode is remote, skipping embedded server") - return nil, nil - } - - log.Info("Starting embedded Temporal server", "mode", "standalone") - // ... existing implementation ... -} -``` - -### Update MCPProxy Factory - -Update `engine/infra/server/mcp.go` (or wherever `shouldEmbedMCPProxy` is defined): - -```go -func shouldEmbedMCPProxy(ctx context.Context) bool { - cfg := config.FromContext(ctx) - mode := cfg.EffectiveMCPProxyMode() - return mode == "standalone" -} -``` - -### Relevant Files - -- `engine/infra/cache/mod.go` - UPDATE - Mode-aware factory -- `engine/infra/server/dependencies.go` - UPDATE - Temporal factory usage -- `engine/infra/server/mcp.go` - UPDATE - MCPProxy factory usage - -### Dependent Files - -- `pkg/config/config.go` - Uses Config from Task 1.0 -- `pkg/config/resolver.go` - Uses resolver from Task 1.0 -- `engine/infra/cache/miniredis_standalone.go` - Uses MiniredisStandalone from Task 2.0 - -## Deliverables - -- [x] SetupCache() updated to use mode resolution -- [x] setupStandaloneCache() function created for miniredis backend -- [x] setupDistributedCache() function created for external Redis backend -- [x] Both backends return unified Cache interface -- [x] maybeStartStandaloneTemporal() uses cfg.EffectiveTemporalMode() -- [x] shouldEmbedMCPProxy() uses cfg.EffectiveMCPProxyMode() -- [x] Cleanup functions work for both backends -- [x] All logging uses logger.FromContext(ctx) -- [x] All config access uses config.FromContext(ctx) - -## Tests - -Unit tests in `engine/infra/cache/mod_test.go`: - -### Mode-Aware Factory Tests - -- [ ] Should create external Redis in distributed mode - ```go - func TestSetupCache_ModeAware(t *testing.T) { - t.Run("Should create external Redis in distributed mode", func(t *testing.T) { - ctx := t.Context() - cfg := &config.Config{ - Mode: "distributed", - Redis: config.RedisConfig{ - Addr: "localhost:6379", - }, - } - ctx = config.ContextWithManager(ctx, cfg) - - cache, cleanup, err := SetupCache(ctx) - require.NoError(t, err) - defer cleanup() - - assert.NotNil(t, cache) - assert.NotNil(t, cache.Redis) - assert.NotNil(t, cache.LockManager) - assert.NotNil(t, cache.NotificationSystem) - }) - } - ``` - -- [ ] Should create miniredis in standalone mode - ```go - t.Run("Should create miniredis in standalone mode", func(t *testing.T) { - ctx := t.Context() - cfg := &config.Config{ - Mode: "standalone", - Redis: config.RedisConfig{ - Standalone: config.RedisStandaloneConfig{ - Persistence: config.RedisPersistenceConfig{ - Enabled: false, - }, - }, - }, - } - ctx = config.ContextWithManager(ctx, cfg) - - cache, cleanup, err := SetupCache(ctx) - require.NoError(t, err) - defer cleanup() - - assert.NotNil(t, cache) - assert.NotNil(t, cache.Redis) - - // Verify it's working by testing basic operation - err = cache.Redis.Set(ctx, "test-key", "test-value", 0).Err() - assert.NoError(t, err) - }) - ``` - -- [ ] Should respect component mode override - ```go - t.Run("Should respect component mode override", func(t *testing.T) { - ctx := t.Context() - cfg := &config.Config{ - Mode: "distributed", // Global mode - Redis: config.RedisConfig{ - Mode: "standalone", // Component override - }, - } - ctx = config.ContextWithManager(ctx, cfg) - - cache, cleanup, err := SetupCache(ctx) - require.NoError(t, err) - defer cleanup() - - assert.NotNil(t, cache) - // Should be miniredis due to override - }) - ``` - -- [ ] Should handle startup errors for both modes - ```go - t.Run("Should handle Redis connection errors", func(t *testing.T) { - ctx := t.Context() - cfg := &config.Config{ - Mode: "distributed", - Redis: config.RedisConfig{ - Addr: "invalid:9999", // Invalid address - }, - } - ctx = config.ContextWithManager(ctx, cfg) - - _, _, err := SetupCache(ctx) - assert.Error(t, err) - }) - ``` - -- [ ] Should return proper cleanup functions - ```go - t.Run("Should cleanup standalone cache", func(t *testing.T) { - ctx := t.Context() - cfg := testConfigStandalone() - ctx = config.ContextWithManager(ctx, cfg) - - cache, cleanup, err := SetupCache(ctx) - require.NoError(t, err) - assert.NotNil(t, cache) - - // Cleanup should not error - cleanup() - }) - ``` - -### Temporal Factory Tests - -- [ ] Should start embedded Temporal when mode is standalone - ```go - func TestMaybeStartStandaloneTemporal(t *testing.T) { - t.Run("Should start embedded Temporal in standalone mode", func(t *testing.T) { - ctx := t.Context() - cfg := &config.Config{Mode: "standalone"} - ctx = config.ContextWithManager(ctx, cfg) - - server, err := maybeStartStandaloneTemporal(ctx) - require.NoError(t, err) - assert.NotNil(t, server) - defer server.Stop() - }) - } - ``` - -- [ ] Should skip embedded Temporal when mode is remote - ```go - t.Run("Should skip embedded Temporal in remote mode", func(t *testing.T) { - ctx := t.Context() - cfg := &config.Config{Mode: "distributed"} - ctx = config.ContextWithManager(ctx, cfg) - - server, err := maybeStartStandaloneTemporal(ctx) - require.NoError(t, err) - assert.Nil(t, server) - }) - ``` - -### MCPProxy Factory Tests - -- [ ] Should embed MCPProxy when mode is standalone - ```go - func TestShouldEmbedMCPProxy(t *testing.T) { - t.Run("Should embed MCPProxy in standalone mode", func(t *testing.T) { - ctx := t.Context() - cfg := &config.Config{Mode: "standalone"} - ctx = config.ContextWithManager(ctx, cfg) - - result := shouldEmbedMCPProxy(ctx) - assert.True(t, result) - }) - } - ``` - -- [ ] Should skip MCPProxy when mode is distributed - ```go - t.Run("Should skip MCPProxy in distributed mode", func(t *testing.T) { - ctx := t.Context() - cfg := &config.Config{Mode: "distributed"} - ctx = config.ContextWithManager(ctx, cfg) - - result := shouldEmbedMCPProxy(ctx) - assert.False(t, result) - }) - ``` - -## Success Criteria - -- [ ] SetupCache() correctly routes to appropriate backend based on mode -- [ ] Both backends (distributed and standalone) work correctly -- [ ] Unified Cache interface returned for both modes -- [ ] Temporal factory uses cfg.EffectiveTemporalMode() -- [ ] MCPProxy factory uses cfg.EffectiveMCPProxyMode() -- [ ] Cleanup functions work for both backends -- [ ] All factory tests pass (`go test ./engine/infra/cache/... ./engine/infra/server/...`) -- [ ] `make lint` passes with zero warnings -- [ ] No context.Background() used in tests -- [ ] All logging uses logger.FromContext(ctx) -- [ ] All config access uses config.FromContext(ctx) diff --git a/tasks/prd-redis/_task_04.md b/tasks/prd-redis/_task_04.md deleted file mode 100644 index 6c3b2dee..00000000 --- a/tasks/prd-redis/_task_04.md +++ /dev/null @@ -1,354 +0,0 @@ -## markdown - -## status: completed # Options: pending, in-progress, completed, excluded - - -engine/memory/store, test/integration/standalone -integration, testing -core_feature -medium -Task 1.0, Task 2.0, Task 3.0 - - -# Task 4.0: Memory Store Integration - -## Overview - -Verify that the memory store works seamlessly with miniredis by testing all Lua script operations, concurrent message appends, metadata preservation, and conversation history consistency. This task validates FR-3 from the PRD (Memory Store Compatibility). - - -- **ALWAYS READ** @.cursor/rules/critical-validation.mdc before start -- **ALWAYS READ** the technical docs from this PRD before start -- **YOU SHOULD ALWAYS** have in mind that this should be done in a greenfield approach, we don't need to care about backwards compatibility since the project is in alpha, and support old and new stuff just introduces more complexity in the project; never sacrifice quality because of backwards compatibility -- **MUST** use `config.FromContext(ctx)` - never store config -- **MUST** use `logger.FromContext(ctx)` - never pass logger as parameter -- **NEVER** use `context.Background()` in tests, use `t.Context()` instead -- **NEVER** modify memory store implementation - only write tests to verify compatibility - - - -# When you need information about memory store implementation: -- Read `engine/memory/store/redis.go` to understand the RedisMemoryStore -- Read `engine/memory/store/scripts.go` for Lua script definitions -- Identify AppendAndTrimWithMetadataScript and other Lua scripts -- Review existing memory store tests for patterns - - - -- Verify memory store works with miniredis (zero code changes to memory store) -- Test AppendAndTrimWithMetadataScript Lua script execution -- Test concurrent message appends -- Test message metadata preservation -- Test conversation history trim at max length -- Test conversation history consistency -- Create integration tests in `test/integration/standalone/memory_store_test.go` -- All tests must use `t.Context()` and follow project standards - - -## Subtasks - -- [x] 4.1 Read existing memory store implementation to understand operations -- [x] 4.2 Identify all Lua scripts used by memory store -- [x] 4.3 Create test/integration/standalone/memory_store_test.go -- [x] 4.4 Create test helper to setup memory store with miniredis -- [x] 4.5 Write test for AppendAndTrimWithMetadataScript execution -- [x] 4.6 Write test for concurrent message appends -- [x] 4.7 Write test for message metadata preservation -- [x] 4.8 Write test for conversation history consistency -- [x] 4.9 Write test for conversation history trimming -- [x] 4.10 Write test for message retrieval with pagination - -## Implementation Details - -### Memory Store Test Structure - -Create `test/integration/standalone/memory_store_test.go`: - -```go -package standalone_test - -import ( - "context" - "sync" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/compozy/compozy/engine/infra/cache" - "github.com/compozy/compozy/engine/memory/store" - "github.com/compozy/compozy/pkg/config" - "github.com/compozy/compozy/test/helpers" -) - -// setupMemoryStoreWithMiniredis creates a memory store backed by miniredis -func setupMemoryStoreWithMiniredis(ctx context.Context, t *testing.T) store.MemoryStore { - t.Helper() - - // Setup standalone config - cfg := &config.Config{ - Mode: "standalone", - Redis: config.RedisConfig{ - Standalone: config.RedisStandaloneConfig{ - Persistence: config.RedisPersistenceConfig{ - Enabled: false, // No persistence for tests - }, - }, - }, - } - ctx = config.ContextWithManager(ctx, cfg) - - // Create miniredis backend - standalone, err := cache.NewMiniredisStandalone(ctx) - require.NoError(t, err) - t.Cleanup(func() { - standalone.Close(ctx) - }) - - // Create memory store with miniredis client - memoryStore := store.NewRedisMemoryStore(standalone.Client()) - return memoryStore -} - -func TestMemoryStore_MiniredisCompatibility(t *testing.T) { - t.Run("Should execute Lua scripts natively", func(t *testing.T) { - ctx := t.Context() - ms := setupMemoryStoreWithMiniredis(ctx, t) - - agentID := "test-agent" - message := &store.Message{ - Role: "user", - Content: "Hello, world!", - Metadata: map[string]interface{}{ - "timestamp": "2025-01-27T10:00:00Z", - "session": "test-session", - }, - } - - // Test AppendAndTrimWithMetadataScript - err := ms.AppendMessage(ctx, agentID, message) - require.NoError(t, err) - - // Verify message stored with metadata - messages, err := ms.GetMessages(ctx, agentID) - require.NoError(t, err) - assert.Len(t, messages, 1) - assert.Equal(t, message.Role, messages[0].Role) - assert.Equal(t, message.Content, messages[0].Content) - assert.Equal(t, message.Metadata["timestamp"], messages[0].Metadata["timestamp"]) - assert.Equal(t, message.Metadata["session"], messages[0].Metadata["session"]) - }) - - t.Run("Should handle concurrent message appends", func(t *testing.T) { - ctx := t.Context() - ms := setupMemoryStoreWithMiniredis(ctx, t) - - agentID := "concurrent-test-agent" - numMessages := 50 - var wg sync.WaitGroup - - // Append messages concurrently - for i := 0; i < numMessages; i++ { - wg.Add(1) - go func(idx int) { - defer wg.Done() - message := &store.Message{ - Role: "user", - Content: fmt.Sprintf("Message %d", idx), - } - err := ms.AppendMessage(ctx, agentID, message) - assert.NoError(t, err) - }(i) - } - - wg.Wait() - - // Verify all messages stored - messages, err := ms.GetMessages(ctx, agentID) - require.NoError(t, err) - assert.Len(t, messages, numMessages) - }) - - t.Run("Should trim conversation history at max length", func(t *testing.T) { - ctx := t.Context() - ms := setupMemoryStoreWithMiniredis(ctx, t) - - agentID := "trim-test-agent" - maxLength := 10 - - // Append more than max messages - for i := 0; i < maxLength+5; i++ { - message := &store.Message{ - Role: "user", - Content: fmt.Sprintf("Message %d", i), - } - err := ms.AppendMessage(ctx, agentID, message) - require.NoError(t, err) - } - - // Verify only max messages retained - messages, err := ms.GetMessages(ctx, agentID) - require.NoError(t, err) - assert.LessOrEqual(t, len(messages), maxLength) - - // Verify newest messages retained - lastMessage := messages[len(messages)-1] - assert.Contains(t, lastMessage.Content, "Message") - }) - - t.Run("Should preserve message metadata across operations", func(t *testing.T) { - ctx := t.Context() - ms := setupMemoryStoreWithMiniredis(ctx, t) - - agentID := "metadata-test-agent" - metadata := map[string]interface{}{ - "timestamp": "2025-01-27T10:00:00Z", - "session": "test-session", - "user_id": "user-123", - "ip_address": "192.168.1.1", - } - - message := &store.Message{ - Role: "user", - Content: "Test message", - Metadata: metadata, - } - - err := ms.AppendMessage(ctx, agentID, message) - require.NoError(t, err) - - // Retrieve and verify metadata - messages, err := ms.GetMessages(ctx, agentID) - require.NoError(t, err) - require.Len(t, messages, 1) - - retrieved := messages[0] - assert.Equal(t, metadata["timestamp"], retrieved.Metadata["timestamp"]) - assert.Equal(t, metadata["session"], retrieved.Metadata["session"]) - assert.Equal(t, metadata["user_id"], retrieved.Metadata["user_id"]) - assert.Equal(t, metadata["ip_address"], retrieved.Metadata["ip_address"]) - }) - - t.Run("Should maintain conversation history consistency", func(t *testing.T) { - ctx := t.Context() - ms := setupMemoryStoreWithMiniredis(ctx, t) - - agentID := "consistency-test-agent" - - // Append multiple messages - messages := []*store.Message{ - {Role: "user", Content: "Question 1"}, - {Role: "assistant", Content: "Answer 1"}, - {Role: "user", Content: "Question 2"}, - {Role: "assistant", Content: "Answer 2"}, - } - - for _, msg := range messages { - err := ms.AppendMessage(ctx, agentID, msg) - require.NoError(t, err) - } - - // Verify conversation order maintained - retrieved, err := ms.GetMessages(ctx, agentID) - require.NoError(t, err) - require.Len(t, retrieved, len(messages)) - - for i, msg := range messages { - assert.Equal(t, msg.Role, retrieved[i].Role) - assert.Equal(t, msg.Content, retrieved[i].Content) - } - }) - - t.Run("Should support message retrieval with pagination", func(t *testing.T) { - ctx := t.Context() - ms := setupMemoryStoreWithMiniredis(ctx, t) - - agentID := "pagination-test-agent" - totalMessages := 25 - - // Append messages - for i := 0; i < totalMessages; i++ { - message := &store.Message{ - Role: "user", - Content: fmt.Sprintf("Message %d", i), - } - err := ms.AppendMessage(ctx, agentID, message) - require.NoError(t, err) - } - - // Test pagination (if supported) - messages, err := ms.GetMessages(ctx, agentID) - require.NoError(t, err) - assert.Len(t, messages, totalMessages) - }) -} -``` - -### Relevant Files - -- `test/integration/standalone/memory_store_test.go` - NEW - Memory store integration tests -- `engine/memory/store/redis.go` - VERIFY ONLY - Memory store implementation (no changes) -- `engine/memory/store/scripts.go` - VERIFY ONLY - Lua scripts (no changes) - -### Dependent Files - -- `engine/infra/cache/miniredis_standalone.go` - Uses MiniredisStandalone from Task 2.0 -- `pkg/config/config.go` - Uses Config from Task 1.0 - -## Deliverables - -- [x] test/integration/standalone/memory_store_test.go created -- [x] setupMemoryStoreWithMiniredis() helper function -- [x] Test for Lua script execution (AppendAndTrimWithMetadataScript) -- [x] Test for concurrent message appends -- [x] Test for message metadata preservation -- [x] Test for conversation history trimming -- [x] Test for conversation history consistency -- [x] Test for message retrieval with pagination -- [x] All tests use t.Context() (no context.Background()) -- [x] All tests follow "Should..." naming convention - -## Tests - -All tests are defined in the implementation section above. Summary of test coverage: - -### Lua Script Tests -- [x] Should execute Lua scripts natively (AppendAndTrimWithMetadataScript) -- [x] Should handle script errors gracefully - -### Concurrent Operation Tests -- [x] Should handle concurrent message appends without data loss -- [x] Should maintain message ordering under concurrent writes - -### Metadata Tests -- [x] Should preserve message metadata across operations -- [x] Should preserve complex metadata structures (nested objects, arrays) - -### Conversation History Tests -- [x] Should trim conversation history at max length -- [x] Should maintain conversation history consistency -- [x] Should preserve message order in conversation history - -### Pagination Tests -- [x] Should support message retrieval with pagination (if applicable) -- [x] Should handle edge cases (empty history, single message) - -### Edge Cases -- [x] Should handle empty conversation history -- [x] Should handle messages with no metadata -- [x] Should handle messages with large content -- [x] Should handle special characters in message content - -## Success Criteria - -- [x] All memory store integration tests pass -- [x] Lua scripts execute successfully in miniredis -- [x] Concurrent appends work without data loss -- [x] Message metadata preserved correctly -- [x] Conversation history maintains consistency -- [x] Conversation trimming works at max length -- [x] Zero changes required to memory store implementation -- [x] All tests use t.Context() (no context.Background()) -- [x] `go test ./test/integration/standalone/...` passes -- [x] `make lint` passes with zero warnings -- [x] Test coverage demonstrates miniredis compatibility with memory store diff --git a/tasks/prd-redis/_task_05.md b/tasks/prd-redis/_task_05.md deleted file mode 100644 index 76e2193f..00000000 --- a/tasks/prd-redis/_task_05.md +++ /dev/null @@ -1,308 +0,0 @@ -## markdown - -## status: completed # Options: pending, in-progress, completed, excluded - - -engine/resources -testing -integration_testing -medium -miniredis|cache_adapter - - -# Task 5.0: Resource Store Integration - -## Overview - -Verify that the resource store works correctly with miniredis backend, ensuring full compatibility with atomic operations, optimistic locking, ETag consistency, and concurrent resource updates. This task validates that miniredis provides identical behavior to external Redis for all resource store operations including TxPipeline atomicity, Lua script-based locking (PutIfMatch), and watch notifications via Pub/Sub. - - -- **ALWAYS READ** @.cursor/rules/critical-validation.mdc before start -- **ALWAYS READ** the technical docs from this PRD before start -- **YOU SHOULD ALWAYS** have in mind that this should be done in a greenfield approach, we don't need to care about backwards compatibility since the project is in alpha, and support old and new stuff just introduces more complexity in the project; never sacrifice quality because of backwards compatibility - - - -# When you need information about a library or external API: -- use perplexity and context7 to find out how to properly fix/resolve this -- when using perplexity mcp, you can pass a prompt to the query param with more description about what you want to know, you don't need to pass a query-style search phrase, the same for the topic param of context7 -- for context7 to use the mcp is two steps, one you will find out the library id and them you will check what you want - - - -- Resource store MUST work identically with miniredis and external Redis -- TxPipeline operations MUST maintain atomicity guarantees (value + etag together) -- Optimistic locking (PutIfMatch) MUST function via native Lua scripts -- ETag consistency MUST be maintained across all operations -- Concurrent resource updates MUST handle race conditions correctly -- Watch notifications MUST work via native Redis Pub/Sub -- All tests MUST use t.Context() (never context.Background()) -- All tests MUST follow "Should..." naming convention with testify assertions -- MUST use real miniredis (no mocks) with temp directories - - -## Subtasks - -- [x] 5.1 Create test/integration/standalone/resource_store_test.go with test suite -- [x] 5.2 Verify TxPipeline atomic operations (value + etag stored atomically) -- [x] 5.3 Verify optimistic locking via Lua scripts (PutIfMatch works natively) -- [x] 5.4 Verify ETag consistency across all resource operations -- [x] 5.5 Verify concurrent resource update handling (race conditions) -- [x] 5.6 Verify watch notifications via Pub/Sub -- [x] 5.7 Add test fixtures and helpers in test/helpers/standalone.go -- [x] 5.8 Run full test suite and ensure >80% coverage for integration code - -## Implementation Details - -This task verifies that the resource store, which relies on Redis TxPipeline for atomic multi-key operations and Lua scripts for optimistic locking, works identically with miniredis. - -### Relevant Files - -- `engine/resources/redis_store.go` - Resource store implementation using cache.RedisInterface -- `engine/infra/cache/miniredis_standalone.go` - MiniredisStandalone wrapper (created in Task 2.0) -- `engine/infra/cache/mod.go` - Mode-aware factory (updated in Task 3.0) -- `test/integration/standalone/resource_store_test.go` - NEW: Integration tests for resource store -- `test/helpers/standalone.go` - NEW: Test environment helpers - -### Dependent Files - -- `engine/infra/cache/redis.go` - Cache interface used by resource store -- `pkg/config/config.go` - Configuration structs for mode selection -- `pkg/config/resolver.go` - Mode resolution logic (created in Task 1.0) - -### Key Technical Details from Tech Spec - -**Resource Store Uses**: -- TxPipeline for atomic operations: Store resource value and ETag together atomically -- Lua scripts for optimistic locking: PutIfMatch checks ETag before updating -- Pub/Sub for watch notifications: Notify subscribers when resources change - -**Miniredis Compatibility**: -- Miniredis natively supports TxPipeline operations -- Miniredis natively executes Lua scripts (no emulation needed) -- Miniredis natively supports Redis Pub/Sub -- Zero consumer code changes required - -## Deliverables - -- `test/integration/standalone/resource_store_test.go` - Full integration test suite -- Test fixtures for resources in `test/fixtures/standalone/` -- Helper functions in `test/helpers/standalone.go` for resource store testing -- Updated CI pipeline in `.github/workflows/test.yml` (if needed) -- Documentation of any discovered edge cases or limitations - -## Tests - -Integration tests mapped from `_tests.md`: - -- [ ] Should store and retrieve resources atomically via TxPipeline - - Test: Create resource, verify value and ETag stored together - - Test: Update resource, verify old value not visible before ETag updated - -- [ ] Should support optimistic locking via PutIfMatch Lua script - - Test: Update with correct ETag succeeds - - Test: Update with incorrect ETag fails - - Test: Concurrent updates with stale ETag properly rejected - -- [ ] Should maintain ETag consistency across operations - - Test: ETag changes on every resource update - - Test: ETag retrieved matches last stored ETag - - Test: Concurrent reads see consistent ETags - -- [ ] Should handle concurrent resource updates correctly - - Test: Multiple goroutines updating same resource - - Test: Last writer wins with proper ETag verification - - Test: No lost updates due to race conditions - -- [ ] Should publish watch notifications via Pub/Sub - - Test: Subscribe to resource watch channel - - Test: Publish notification on resource update - - Test: Multiple subscribers receive notifications - - Test: Pattern subscriptions work correctly - -- [ ] Should handle error cases gracefully - - Test: Missing resource returns proper error - - Test: ETag mismatch returns conflict error - - Test: Pub/Sub connection failures handled - -### Test Structure Example - -```go -// test/integration/standalone/resource_store_test.go - -func TestResourceStore_MiniredisCompatibility(t *testing.T) { - t.Run("Should support TxPipeline atomic operations", func(t *testing.T) { - ctx := t.Context() - env := setupResourceStoreWithMiniredis(ctx, t) - defer env.Cleanup() - - resource := generateTestResource() - - // Store resource (atomic: value + etag) - err := env.Store.Put(ctx, resource) - require.NoError(t, err) - - // Retrieve and verify atomicity - retrieved, err := env.Store.Get(ctx, resource.ID) - require.NoError(t, err) - assert.Equal(t, resource.Value, retrieved.Value) - assert.Equal(t, resource.ETag, retrieved.ETag) - }) - - t.Run("Should handle optimistic locking via PutIfMatch", func(t *testing.T) { - ctx := t.Context() - env := setupResourceStoreWithMiniredis(ctx, t) - defer env.Cleanup() - - // Create initial resource - resource := generateTestResource() - err := env.Store.Put(ctx, resource) - require.NoError(t, err) - - // Update with correct ETag should succeed - resource.Value = "updated" - err = env.Store.PutIfMatch(ctx, resource, resource.ETag) - require.NoError(t, err) - - // Update with stale ETag should fail - staleResource := resource - staleResource.Value = "should-fail" - err = env.Store.PutIfMatch(ctx, staleResource, "stale-etag") - assert.Error(t, err) - assert.Contains(t, err.Error(), "conflict") - }) - - t.Run("Should handle concurrent resource updates", func(t *testing.T) { - ctx := t.Context() - env := setupResourceStoreWithMiniredis(ctx, t) - defer env.Cleanup() - - // Create initial resource - resource := generateTestResource() - err := env.Store.Put(ctx, resource) - require.NoError(t, err) - - // Concurrent updates - var wg sync.WaitGroup - errors := make([]error, 10) - for i := 0; i < 10; i++ { - wg.Add(1) - go func(idx int) { - defer wg.Done() - r := resource - r.Value = fmt.Sprintf("update-%d", idx) - errors[idx] = env.Store.PutIfMatch(ctx, r, resource.ETag) - }(i) - } - wg.Wait() - - // Only one update should succeed - successCount := 0 - for _, err := range errors { - if err == nil { - successCount++ - } - } - assert.Equal(t, 1, successCount, "Only one concurrent update should succeed") - }) - - t.Run("Should publish watch notifications via Pub/Sub", func(t *testing.T) { - ctx := t.Context() - env := setupResourceStoreWithMiniredis(ctx, t) - defer env.Cleanup() - - // Subscribe to notifications - notifications := make(chan string, 10) - err := env.Store.Watch(ctx, "resource:*", notifications) - require.NoError(t, err) - - // Update resource - resource := generateTestResource() - err = env.Store.Put(ctx, resource) - require.NoError(t, err) - - // Verify notification received - select { - case notif := <-notifications: - assert.Contains(t, notif, resource.ID) - case <-time.After(5 * time.Second): - t.Fatal("Watch notification not received") - } - }) -} - -// Helper functions -func setupResourceStoreWithMiniredis(ctx context.Context, t *testing.T) *ResourceStoreTestEnv { - // Setup miniredis via mode-aware factory - // Create resource store with miniredis client - // Return test environment with cleanup -} - -func generateTestResource() *resources.Resource { - // Generate sample resource with ID, value, ETag -} -``` - -## Success Criteria - -- [ ] All integration tests pass with miniredis backend -- [ ] TxPipeline operations maintain atomicity (value + etag together) -- [ ] Optimistic locking (PutIfMatch) works via native Lua scripts -- [ ] ETag consistency maintained across all operations -- [ ] Concurrent updates handle race conditions correctly -- [ ] Watch notifications work via native Pub/Sub -- [ ] Test coverage >80% for integration code -- [ ] `make test` passes with no failures -- [ ] All tests use `t.Context()` (no `context.Background()`) -- [ ] All tests follow "Should..." naming convention -- [ ] Test output clearly shows miniredis backend being tested -- [ ] No behavioral differences between miniredis and external Redis -- [ ] Documentation updated with any edge cases discovered - -## Dependencies - -- **Blocks**: Task 9.0 (End-to-End Workflow Tests) - requires resource store validation -- **Blocked By**: Task 3.0 (Mode-Aware Cache Factory) - requires factory to create miniredis clients - -## Estimated Effort - -**Size**: M (Medium - 1 day) - -**Breakdown**: -- Test suite creation: 3 hours -- TxPipeline atomicity tests: 2 hours -- Optimistic locking tests: 2 hours -- Concurrent update tests: 2 hours -- Watch notification tests: 1 hour -- Edge case testing and documentation: 2 hours - -**Total**: ~12 hours (1 day) - -## Risk Assessment - -**Risks**: -1. TxPipeline behavior differences between miniredis and Redis -2. Lua script execution differences -3. Pub/Sub notification delivery differences -4. Race condition test flakiness - -**Mitigations**: -1. Run identical test suite against both miniredis and external Redis (contract tests) -2. Use deterministic test data and proper synchronization -3. Add retry logic for Pub/Sub tests with reasonable timeouts -4. Document any discovered behavioral differences - -## Validation Checklist - -Before marking this task complete: - -- [ ] All subtasks completed -- [ ] All tests in "Tests" section implemented and passing -- [ ] Test coverage verified (>80%) -- [ ] `make lint` passes with no warnings -- [ ] `make test` passes with no failures -- [ ] Integration tests added to CI pipeline -- [ ] Code follows `.cursor/rules/test-standards.mdc` -- [ ] All uses of context follow patterns (t.Context() in tests) -- [ ] Test fixtures and helpers properly organized -- [ ] Documentation updated if edge cases discovered diff --git a/tasks/prd-redis/_task_06.md b/tasks/prd-redis/_task_06.md deleted file mode 100644 index 19297fe4..00000000 --- a/tasks/prd-redis/_task_06.md +++ /dev/null @@ -1,365 +0,0 @@ -## markdown - -## status: completed # Options: pending, in-progress, completed, excluded - - -engine/infra/server -testing -integration_testing -medium -miniredis|pub_sub - - -# Task 6.0: Streaming & Pub/Sub Integration - -## Overview - -Verify that streaming and Pub/Sub functionality work correctly with miniredis backend, ensuring full compatibility with event publishing, pattern subscriptions, multiple subscribers, and event delivery reliability. This task validates that miniredis provides native Redis Pub/Sub support for workflow and task event notifications without emulation complexity. - - -- **ALWAYS READ** @.cursor/rules/critical-validation.mdc before start -- **ALWAYS READ** the technical docs from this PRD before start -- **YOU SHOULD ALWAYS** have in mind that this should be done in a greenfield approach, we don't need to care about backwards compatibility since the project is in alpha, and support old and new stuff just introduces more complexity in the project; never sacrifice quality because of backwards compatibility - - - -# When you need information about a library or external API: -- use perplexity and context7 to find out how to properly fix/resolve this -- when using perplexity mcp, you can pass a prompt to the query param with more description about what you want to know, you don't need to pass a query-style search phrase, the same for the topic param of context7 -- for context7 to use the mcp is two steps, one you will find out the library id and them you will check what you want - - - -- Streaming MUST work identically with miniredis and external Redis -- Pub/Sub MUST support publish/subscribe operations natively -- Pattern subscriptions (workflow:*, task:*) MUST work correctly -- Multiple concurrent subscribers MUST receive all events -- Event delivery MUST be reliable (no lost events) -- Native go-redis PubSub types MUST be used (no emulation) -- All tests MUST use t.Context() (never context.Background()) -- All tests MUST follow "Should..." naming convention with testify assertions -- MUST use real miniredis (no mocks) with temp directories - - -## Subtasks - -- [x] 6.1 Create test/integration/standalone/streaming_test.go with test suite -- [x] 6.2 Verify basic publish/subscribe functionality -- [x] 6.3 Verify pattern subscriptions (wildcard channels) -- [x] 6.4 Verify multiple subscribers receive events -- [x] 6.5 Verify event delivery reliability (no lost events) -- [x] 6.6 Verify subscription lifecycle (subscribe, unsubscribe, cleanup) -- [x] 6.7 Add test fixtures and event generators -- [x] 6.8 Run full test suite and ensure >80% coverage for integration code - -## Implementation Details - -This task verifies that the streaming/pub-sub functionality, which uses Redis Pub/Sub for real-time workflow and task event notifications, works identically with miniredis. - -### Relevant Files - -- `engine/infra/server/dependencies.go` - Sets up streaming/pub-sub connections -- `engine/infra/cache/miniredis_standalone.go` - MiniredisStandalone wrapper (created in Task 2.0) -- `engine/infra/cache/mod.go` - Mode-aware factory (updated in Task 3.0) -- `test/integration/standalone/streaming_test.go` - NEW: Integration tests for streaming -- `test/helpers/standalone.go` - NEW: Test environment helpers for pub-sub - -### Dependent Files - -- `engine/infra/cache/redis.go` - Cache interface with Pub/Sub methods -- `pkg/config/config.go` - Configuration structs for mode selection -- `pkg/config/resolver.go` - Mode resolution logic (created in Task 1.0) - -### Key Technical Details from Tech Spec - -**Streaming Features Use**: -- Redis Pub/Sub for real-time event notifications -- Pattern subscriptions for workflow/task events (e.g., `workflow:*`, `task:*`) -- Native go-redis PubSub types (no custom implementation) -- Multiple concurrent subscribers supported - -**Miniredis Compatibility**: -- Miniredis natively supports Redis Pub/Sub protocol -- Pattern subscriptions work identically to external Redis -- Multiple subscribers work without emulation -- Zero consumer code changes required - -## Deliverables - -- `test/integration/standalone/streaming_test.go` - Full integration test suite -- Event generators and test fixtures in `test/fixtures/standalone/` -- Helper functions in `test/helpers/standalone.go` for pub-sub testing -- Updated CI pipeline in `.github/workflows/test.yml` (if needed) -- Documentation of any discovered edge cases or limitations - -## Tests - -Integration tests mapped from `_tests.md`: - -- [ ] Should publish and subscribe to events - - Test: Publish event to channel, verify subscriber receives it - - Test: Multiple events published in sequence - - Test: Event payload integrity maintained - -- [ ] Should support pattern subscriptions (wildcards) - - Test: Subscribe to `workflow:*` pattern - - Test: Receive events for `workflow:123`, `workflow:456`, etc. - - Test: Pattern subscriptions don't match unrelated channels - -- [ ] Should support multiple concurrent subscribers - - Test: Multiple subscribers to same channel - - Test: All subscribers receive same events - - Test: Subscribers don't interfere with each other - -- [ ] Should deliver events reliably - - Test: No events lost under normal conditions - - Test: Events delivered in order published - - Test: Large event payloads delivered correctly - -- [ ] Should handle subscription lifecycle correctly - - Test: Subscribe, receive events, unsubscribe cleanly - - Test: Re-subscribe to same channel after unsubscribe - - Test: Cleanup on context cancellation - -- [ ] Should handle error cases gracefully - - Test: Subscribe to invalid channel pattern - - Test: Publish to channel with no subscribers (no error) - - Test: Subscriber disconnection handling - -### Test Structure Example - -```go -// test/integration/standalone/streaming_test.go - -func TestStreaming_MiniredisCompatibility(t *testing.T) { - t.Run("Should publish and subscribe to events", func(t *testing.T) { - ctx := t.Context() - env := setupStreamingWithMiniredis(ctx, t) - defer env.Cleanup() - - // Subscribe to channel - events := make(chan string, 10) - err := env.Subscribe(ctx, "test-channel", events) - require.NoError(t, err) - - // Publish event - testEvent := "test-event-payload" - err = env.Publish(ctx, "test-channel", testEvent) - require.NoError(t, err) - - // Verify event received - select { - case evt := <-events: - assert.Equal(t, testEvent, evt) - case <-time.After(5 * time.Second): - t.Fatal("Event not received within timeout") - } - }) - - t.Run("Should support pattern subscriptions", func(t *testing.T) { - ctx := t.Context() - env := setupStreamingWithMiniredis(ctx, t) - defer env.Cleanup() - - // Subscribe to pattern - events := make(chan string, 10) - err := env.SubscribePattern(ctx, "workflow:*", events) - require.NoError(t, err) - - // Publish to matching channels - channels := []string{"workflow:123", "workflow:456", "workflow:789"} - for _, ch := range channels { - err = env.Publish(ctx, ch, "event-data") - require.NoError(t, err) - } - - // Verify all events received - receivedCount := 0 - timeout := time.After(5 * time.Second) - for receivedCount < len(channels) { - select { - case <-events: - receivedCount++ - case <-timeout: - t.Fatalf("Only received %d of %d events", receivedCount, len(channels)) - } - } - assert.Equal(t, len(channels), receivedCount) - }) - - t.Run("Should support multiple subscribers", func(t *testing.T) { - ctx := t.Context() - env := setupStreamingWithMiniredis(ctx, t) - defer env.Cleanup() - - // Create multiple subscribers - numSubscribers := 5 - subscribers := make([]chan string, numSubscribers) - for i := 0; i < numSubscribers; i++ { - subscribers[i] = make(chan string, 10) - err := env.Subscribe(ctx, "broadcast-channel", subscribers[i]) - require.NoError(t, err) - } - - // Publish event - testEvent := "broadcast-event" - err := env.Publish(ctx, "broadcast-channel", testEvent) - require.NoError(t, err) - - // Verify all subscribers received event - for i, sub := range subscribers { - select { - case evt := <-sub: - assert.Equal(t, testEvent, evt, "Subscriber %d didn't receive event", i) - case <-time.After(5 * time.Second): - t.Fatalf("Subscriber %d didn't receive event", i) - } - } - }) - - t.Run("Should deliver events reliably", func(t *testing.T) { - ctx := t.Context() - env := setupStreamingWithMiniredis(ctx, t) - defer env.Cleanup() - - // Subscribe - events := make(chan string, 100) - err := env.Subscribe(ctx, "test-channel", events) - require.NoError(t, err) - - // Publish multiple events - numEvents := 50 - for i := 0; i < numEvents; i++ { - err = env.Publish(ctx, "test-channel", fmt.Sprintf("event-%d", i)) - require.NoError(t, err) - } - - // Verify all events received - receivedCount := 0 - timeout := time.After(10 * time.Second) - for receivedCount < numEvents { - select { - case <-events: - receivedCount++ - case <-timeout: - t.Fatalf("Only received %d of %d events", receivedCount, numEvents) - } - } - assert.Equal(t, numEvents, receivedCount, "Some events were lost") - }) - - t.Run("Should handle subscription lifecycle", func(t *testing.T) { - ctx := t.Context() - env := setupStreamingWithMiniredis(ctx, t) - defer env.Cleanup() - - // Subscribe - events := make(chan string, 10) - sub := env.SubscribeRaw(ctx, "test-channel") - - // Receive event - err := env.Publish(ctx, "test-channel", "event-1") - require.NoError(t, err) - - msg, err := sub.ReceiveMessage(ctx) - require.NoError(t, err) - assert.Equal(t, "event-1", msg.Payload) - - // Unsubscribe - err = sub.Unsubscribe(ctx, "test-channel") - require.NoError(t, err) - - // Close - err = sub.Close() - require.NoError(t, err) - - // Re-subscribe should work - events2 := make(chan string, 10) - err = env.Subscribe(ctx, "test-channel", events2) - require.NoError(t, err) - - err = env.Publish(ctx, "test-channel", "event-2") - require.NoError(t, err) - - select { - case evt := <-events2: - assert.Equal(t, "event-2", evt) - case <-time.After(5 * time.Second): - t.Fatal("Event not received after re-subscribe") - } - }) -} - -// Helper functions -func setupStreamingWithMiniredis(ctx context.Context, t *testing.T) *StreamingTestEnv { - // Setup miniredis via mode-aware factory - // Create pub-sub connections - // Return test environment with cleanup -} -``` - -## Success Criteria - -- [x] All integration tests pass with miniredis backend -- [x] Basic publish/subscribe functionality works correctly -- [x] Pattern subscriptions (wildcards) work identically to Redis -- [x] Multiple subscribers receive all events -- [x] Event delivery is reliable (no lost events) -- [x] Subscription lifecycle (subscribe/unsubscribe/cleanup) works correctly -- [x] Test coverage >80% for integration code -- [x] `make test` passes with no failures -- [x] All tests use `t.Context()` (no `context.Background()`) -- [x] All tests follow "Should..." naming convention -- [x] Test output clearly shows miniredis backend being tested -- [x] No behavioral differences between miniredis and external Redis -- [x] Documentation updated with any edge cases discovered - -## Dependencies - -- **Blocks**: Task 9.0 (End-to-End Workflow Tests) - requires streaming validation -- **Blocked By**: Task 3.0 (Mode-Aware Cache Factory) - requires factory to create miniredis clients - -## Estimated Effort - -**Size**: M (Medium - 1 day) - -**Breakdown**: -- Test suite creation: 3 hours -- Basic publish/subscribe tests: 2 hours -- Pattern subscription tests: 2 hours -- Multiple subscriber tests: 2 hours -- Reliability and lifecycle tests: 2 hours -- Edge case testing and documentation: 1 hour - -**Total**: ~12 hours (1 day) - -## Risk Assessment - -**Risks**: -1. Pub/Sub behavior differences between miniredis and Redis -2. Pattern subscription matching differences -3. Event delivery timing issues causing flaky tests -4. Subscription cleanup not working correctly - -**Mitigations**: -1. Run identical test suite against both miniredis and external Redis (contract tests) -2. Use deterministic test patterns and reasonable timeouts -3. Add retry logic for timing-sensitive tests -4. Ensure proper cleanup in all test cases with t.Cleanup() -5. Document any discovered behavioral differences - -## Validation Checklist - -Before marking this task complete: - -- [x] All subtasks completed -- [x] All tests in "Tests" section implemented and passing -- [x] Test coverage verified (>80%) -- [x] `make lint` passes with no warnings -- [x] `make test` passes with no failures -- [x] Integration tests added to CI pipeline -- [x] Code follows `.cursor/rules/test-standards.mdc` -- [x] All uses of context follow patterns (t.Context() in tests) -- [x] Test fixtures and helpers properly organized -- [x] No flaky tests (all tests deterministic) -- [x] Documentation updated if edge cases discovered diff --git a/tasks/prd-redis/_task_07.md b/tasks/prd-redis/_task_07.md deleted file mode 100644 index e03479d9..00000000 --- a/tasks/prd-redis/_task_07.md +++ /dev/null @@ -1,505 +0,0 @@ -## markdown - -## status: completed # Options: pending, in-progress, completed, excluded - - -engine/infra/cache -implementation -persistence_layer -medium -miniredis|badgerdb - - -# Task 7.0: Snapshot Manager Implementation - -## Overview - -Implement the optional persistence layer for standalone mode using BadgerDB to create periodic snapshots of miniredis state. This provides optional durability for standalone deployments while maintaining the simplicity of in-memory Redis. The snapshot manager runs in the background, taking periodic snapshots at configurable intervals and ensuring a final snapshot on graceful shutdown. - - -- **ALWAYS READ** @.cursor/rules/critical-validation.mdc before start -- **ALWAYS READ** the technical docs from this PRD before start -- **YOU SHOULD ALWAYS** have in mind that this should be done in a greenfield approach, we don't need to care about backwards compatibility since the project is in alpha, and support old and new stuff just introduces more complexity in the project; never sacrifice quality because of backwards compatibility -- **MUST** use `logger.FromContext(ctx)` for all logging - never pass logger as parameter -- **MUST** use `config.FromContext(ctx)` to read persistence configuration -- **NEVER** use `context.Background()` in runtime code - always inherit context - - - -# When you need information about a library or external API: -- use perplexity and context7 to find out how to properly fix/resolve this -- when using perplexity mcp, you can pass a prompt to the query param with more description about what you want to know, you don't need to pass a query-style search phrase, the same for the topic param of context7 -- for context7 to use the mcp is two steps, one you will find out the library id and them you will check what you want - - - -- Snapshot manager MUST be optional (only used when persistence.enabled = true) -- MUST use BadgerDB v4 for snapshot storage -- Periodic snapshots MUST run at configurable interval (default 5 minutes) -- Graceful shutdown MUST trigger final snapshot when configured -- Startup restore MUST load last snapshot when configured -- Snapshot operations MUST run in background (non-blocking) -- MUST use `logger.FromContext(ctx)` for all logging -- MUST use `config.FromContext(ctx)` for reading configuration -- MUST use proper goroutine lifecycle management (start/stop) -- MUST handle BadgerDB errors gracefully -- All code MUST follow `.cursor/rules/go-coding-standards.mdc` -- All code MUST follow context patterns from `.cursor/rules/global-config.mdc` - - -## Subtasks - -- [x] 7.1 Create engine/infra/cache/snapshot_manager.go with SnapshotManager struct -- [x] 7.2 Implement NewSnapshotManager constructor with context patterns -- [x] 7.3 Implement Snapshot() method for creating snapshots -- [x] 7.4 Implement Restore() method for loading snapshots -- [x] 7.5 Implement StartPeriodicSnapshots() with background goroutine -- [x] 7.6 Implement Stop() method for graceful shutdown -- [x] 7.7 Add snapshot metrics (duration, size, count) -- [x] 7.8 Create unit tests in snapshot_manager_test.go -- [x] 7.9 Test periodic snapshot functionality -- [x] 7.10 Test graceful shutdown snapshot -- [x] 7.11 Test snapshot restore on startup -- [x] 7.12 Test error handling (corrupt snapshots, disk full, etc.) -- [x] 7.13 Run full test suite and ensure >80% coverage - -## Implementation Details - -Implement the snapshot manager as a separate component that wraps miniredis and provides optional persistence. The manager should be non-blocking and use proper goroutine lifecycle management. - -### Relevant Files - -- `engine/infra/cache/snapshot_manager.go` - NEW: SnapshotManager implementation -- `engine/infra/cache/snapshot_manager_test.go` - NEW: Unit tests -- `engine/infra/cache/miniredis_standalone.go` - UPDATE: Integrate snapshot manager -- `pkg/config/config.go` - Configuration already added in Task 1.0 - -### Dependent Files - -- `engine/infra/cache/miniredis_standalone.go` - Uses snapshot manager when persistence enabled -- `pkg/config/config.go` - RedisPersistenceConfig struct -- `pkg/config/resolver.go` - Mode resolution logic - -### Key Technical Details from Tech Spec - -**SnapshotManager Responsibilities**: -- Create periodic snapshots of miniredis state to BadgerDB -- Restore last snapshot on startup -- Snapshot on graceful shutdown -- Non-blocking operations (background goroutine) -- Configurable snapshot interval - -**BadgerDB Integration**: -- Store snapshots as key-value pairs (miniredis key → value) -- Use BadgerDB transactions for atomicity -- Store metadata (timestamp, snapshot version) -- Handle BadgerDB lifecycle (open, close) - -**Configuration**: -```go -type RedisPersistenceConfig struct { - Enabled bool // Enable/disable persistence - DataDir string // Directory for BadgerDB storage - SnapshotInterval time.Duration // How often to snapshot (default 5m) - SnapshotOnShutdown bool // Snapshot on graceful shutdown - RestoreOnStartup bool // Restore last snapshot on startup -} -``` - -### Implementation Skeleton - -```go -// engine/infra/cache/snapshot_manager.go - -package cache - -import ( - "context" - "fmt" - "sync" - "time" - - "github.com/alicebob/miniredis/v2" - "github.com/dgraph-io/badger/v4" - - "github.com/compozy/compozy3/pkg/config" - "github.com/compozy/compozy3/pkg/logger" -) - -type SnapshotManager struct { - miniredis *miniredis.Miniredis - db *badger.DB - stopCh chan struct{} - wg sync.WaitGroup - mu sync.RWMutex -} - -// ✅ CORRECT: No config stored, retrieved from context -func NewSnapshotManager(ctx context.Context, mr *miniredis.Miniredis, cfg config.RedisPersistenceConfig) (*SnapshotManager, error) { - log := logger.FromContext(ctx) // ✅ MUST use context pattern - - // Open BadgerDB - opts := badger.DefaultOptions(cfg.DataDir) - db, err := badger.Open(opts) - if err != nil { - return nil, fmt.Errorf("open badger: %w", err) - } - - log.Info("Opened BadgerDB for snapshots", "data_dir", cfg.DataDir) - - return &SnapshotManager{ - miniredis: mr, - db: db, - stopCh: make(chan struct{}), - }, nil -} - -// Snapshot creates a snapshot of current miniredis state -func (sm *SnapshotManager) Snapshot(ctx context.Context) error { - log := logger.FromContext(ctx) // ✅ MUST use context pattern - - start := time.Now() - - // Get all keys from miniredis - keys := sm.miniredis.Keys() - - // Write to BadgerDB in transaction - err := sm.db.Update(func(txn *badger.Txn) error { - // Store metadata - metadata := map[string]string{ - "timestamp": time.Now().Format(time.RFC3339), - "version": "1.0", - } - - // Write metadata - for k, v := range metadata { - if err := txn.Set([]byte("_meta:"+k), []byte(v)); err != nil { - return err - } - } - - // Write all keys - for _, key := range keys { - value, _ := sm.miniredis.Get(key) - if err := txn.Set([]byte(key), []byte(value)); err != nil { - return err - } - } - - return nil - }) - - if err != nil { - log.Error("Snapshot failed", "error", err) - return fmt.Errorf("snapshot transaction: %w", err) - } - - duration := time.Since(start) - log.Info("Snapshot completed", - "duration_ms", duration.Milliseconds(), - "keys", len(keys), - ) - - return nil -} - -// Restore loads the last snapshot into miniredis -func (sm *SnapshotManager) Restore(ctx context.Context) error { - log := logger.FromContext(ctx) // ✅ MUST use context pattern - - start := time.Now() - keyCount := 0 - - err := sm.db.View(func(txn *badger.Txn) error { - opts := badger.DefaultIteratorOptions - it := txn.NewIterator(opts) - defer it.Close() - - for it.Rewind(); it.Valid(); it.Next() { - item := it.Item() - key := string(item.Key()) - - // Skip metadata keys - if strings.HasPrefix(key, "_meta:") { - continue - } - - err := item.Value(func(val []byte) error { - sm.miniredis.Set(key, string(val)) - keyCount++ - return nil - }) - if err != nil { - return err - } - } - return nil - }) - - if err != nil { - log.Error("Restore failed", "error", err) - return fmt.Errorf("restore transaction: %w", err) - } - - duration := time.Since(start) - log.Info("Restore completed", - "duration_ms", duration.Milliseconds(), - "keys", keyCount, - ) - - return nil -} - -// StartPeriodicSnapshots starts background goroutine for periodic snapshots -func (sm *SnapshotManager) StartPeriodicSnapshots(ctx context.Context) { - cfg := config.FromContext(ctx) // ✅ MUST use context pattern - log := logger.FromContext(ctx) // ✅ MUST use context pattern - - interval := cfg.Redis.Standalone.Persistence.SnapshotInterval - - log.Info("Starting periodic snapshots", "interval", interval) - - sm.wg.Add(1) - go func() { - defer sm.wg.Done() - - ticker := time.NewTicker(interval) - defer ticker.Stop() - - for { - select { - case <-ticker.C: - if err := sm.Snapshot(ctx); err != nil { - log.Error("Periodic snapshot failed", "error", err) - } - case <-sm.stopCh: - log.Info("Stopping periodic snapshots") - return - } - } - }() -} - -// Stop gracefully stops the snapshot manager -func (sm *SnapshotManager) Stop() { - close(sm.stopCh) - sm.wg.Wait() - sm.db.Close() -} -``` - -## Deliverables - -- `engine/infra/cache/snapshot_manager.go` - Complete SnapshotManager implementation -- `engine/infra/cache/snapshot_manager_test.go` - Comprehensive unit tests -- Updated `engine/infra/cache/miniredis_standalone.go` - Integration with snapshot manager -- Snapshot metrics added to `engine/infra/cache/metrics.go` -- Test fixtures and helpers for snapshot testing -- Documentation of snapshot format and recovery procedures - -## Tests - -Unit tests mapped from `_tests.md`: - -- [ ] Should create snapshots of miniredis state - - Test: Create snapshot, verify BadgerDB contains all keys - - Test: Snapshot includes metadata (timestamp, version) - - Test: Large datasets snapshot correctly - -- [ ] Should restore snapshots to miniredis - - Test: Restore snapshot, verify all keys present in miniredis - - Test: Restored values match original values - - Test: Metadata properly restored - -- [ ] Should handle snapshot failures gracefully - - Test: BadgerDB write failure doesn't crash - - Test: Partial snapshot is rolled back - - Test: Errors logged with proper context - -- [ ] Should run periodic snapshots at configured interval - - Test: Snapshots created at correct intervals - - Test: Interval configurable via config - - Test: Goroutine doesn't leak - -- [ ] Should stop periodic snapshots on manager close - - Test: Stop() terminates goroutine cleanly - - Test: No goroutine leaks after Stop() - - Test: WaitGroup properly synchronized - -- [ ] Should create snapshot directory if missing - - Test: DataDir created if doesn't exist - - Test: Proper file permissions set - -- [ ] Should handle corrupt snapshots gracefully - - Test: Corrupt BadgerDB detected and handled - - Test: Restore fails gracefully with error - - Test: System remains operational after restore failure - -- [ ] Should track snapshot metrics - - Test: Duration metric recorded - - Test: Size metric updated - - Test: Success/failure count tracked - -### Test Structure Example - -```go -// engine/infra/cache/snapshot_manager_test.go - -func TestSnapshotManager_Lifecycle(t *testing.T) { - t.Run("Should snapshot and restore miniredis state", func(t *testing.T) { - ctx := t.Context() - tempDir := t.TempDir() - - // Setup miniredis with data - mr := miniredis.NewMiniRedis() - require.NoError(t, mr.Start()) - defer mr.Close() - - mr.Set("key1", "value1") - mr.Set("key2", "value2") - - // Create snapshot manager - cfg := testPersistenceConfig(tempDir) - ctx = config.ContextWithConfig(ctx, &config.Config{ - Redis: config.RedisConfig{ - Standalone: config.RedisStandaloneConfig{ - Persistence: cfg, - }, - }, - }) - - sm, err := NewSnapshotManager(ctx, mr, cfg) - require.NoError(t, err) - defer sm.Stop() - - // Take snapshot - err = sm.Snapshot(ctx) - require.NoError(t, err) - - // Create new miniredis - mr2 := miniredis.NewMiniRedis() - require.NoError(t, mr2.Start()) - defer mr2.Close() - - // Restore snapshot - sm2, err := NewSnapshotManager(ctx, mr2, cfg) - require.NoError(t, err) - defer sm2.Stop() - - err = sm2.Restore(ctx) - require.NoError(t, err) - - // Verify data restored - val1, _ := mr2.Get("key1") - assert.Equal(t, "value1", val1) - - val2, _ := mr2.Get("key2") - assert.Equal(t, "value2", val2) - }) -} - -func TestSnapshotManager_Periodic(t *testing.T) { - t.Run("Should take periodic snapshots", func(t *testing.T) { - ctx := t.Context() - tempDir := t.TempDir() - - mr := setupMiniredis(t) - defer mr.Close() - - // Short interval for testing - cfg := testPersistenceConfig(tempDir) - cfg.SnapshotInterval = 1 * time.Second - - ctx = config.ContextWithConfig(ctx, &config.Config{ - Redis: config.RedisConfig{ - Standalone: config.RedisStandaloneConfig{ - Persistence: cfg, - }, - }, - }) - - sm, err := NewSnapshotManager(ctx, mr, cfg) - require.NoError(t, err) - defer sm.Stop() - - // Start periodic snapshots - sm.StartPeriodicSnapshots(ctx) - - // Wait for at least 2 snapshots - time.Sleep(2500 * time.Millisecond) - - // Verify snapshots were created (check BadgerDB or metrics) - // TODO: Add verification logic - }) -} -``` - -## Success Criteria - -- [ ] SnapshotManager implementation complete and tested -- [ ] Snapshot creation works correctly (saves to BadgerDB) -- [ ] Snapshot restore works correctly (loads from BadgerDB) -- [ ] Periodic snapshots run at configured interval -- [ ] Graceful shutdown triggers final snapshot -- [ ] Goroutine lifecycle managed properly (no leaks) -- [ ] Error handling works for all failure modes -- [ ] Snapshot metrics tracked and exposed -- [ ] Test coverage >80% for snapshot manager code -- [ ] `make lint` passes with no warnings -- [ ] `make test` passes with no failures -- [ ] All code follows context patterns (logger, config from context) -- [ ] All tests use `t.Context()` (no `context.Background()`) -- [ ] Integration with MiniredisStandalone complete -- [ ] Documentation updated with snapshot procedures - -## Dependencies - -- **Blocks**: Task 8.0 (Persistence Integration Tests) - requires snapshot manager implementation -- **Blocked By**: Task 2.0 (MiniredisStandalone Wrapper) - requires miniredis to be available - -## Estimated Effort - -**Size**: M (Medium - 1-2 days) - -**Breakdown**: -- SnapshotManager struct and constructor: 2 hours -- Snapshot() implementation: 3 hours -- Restore() implementation: 3 hours -- Periodic snapshot goroutine: 2 hours -- Error handling and metrics: 2 hours -- Unit tests: 4 hours -- Integration and edge case testing: 2 hours - -**Total**: ~18 hours (1-2 days) - -## Risk Assessment - -**Risks**: -1. BadgerDB corruption or write failures -2. Large snapshot operations blocking miniredis -3. Goroutine leaks on improper shutdown -4. Snapshot interval too aggressive causing performance issues - -**Mitigations**: -1. Proper BadgerDB error handling and transaction rollback -2. Snapshots run in background goroutine (non-blocking) -3. Proper WaitGroup and channel-based shutdown -4. Default 5-minute interval, configurable for tuning -5. Metrics to monitor snapshot performance - -## Validation Checklist - -Before marking this task complete: - -- [ ] All subtasks completed -- [ ] All tests in "Tests" section implemented and passing -- [ ] Test coverage verified (>80%) -- [ ] `make lint` passes with no warnings -- [ ] `make test` passes with no failures -- [ ] Code follows `.cursor/rules/go-coding-standards.mdc` -- [ ] Context patterns followed (logger, config from context) -- [ ] Goroutine lifecycle properly managed -- [ ] No goroutine leaks (verified with tests) -- [ ] BadgerDB integration working correctly -- [ ] Metrics properly tracked and exposed -- [ ] Documentation updated with snapshot procedures diff --git a/tasks/prd-redis/_task_08.md b/tasks/prd-redis/_task_08.md deleted file mode 100644 index fe1d6ab1..00000000 --- a/tasks/prd-redis/_task_08.md +++ /dev/null @@ -1,476 +0,0 @@ -## markdown - -## status: completed # Options: pending, in-progress, completed, excluded - - -test/integration/standalone -testing -integration_testing -medium -snapshot_manager|badgerdb - - -# Task 8.0: Persistence Integration Tests - -## Overview - -Create comprehensive integration tests for the persistence layer, validating the full snapshot/restore cycle, data persistence across restarts, snapshot failure handling, and corrupt snapshot recovery. These tests ensure that the optional BadgerDB persistence layer works correctly in real-world scenarios and handles edge cases gracefully. - - -- **ALWAYS READ** @.cursor/rules/critical-validation.mdc before start -- **ALWAYS READ** the technical docs from this PRD before start -- **YOU SHOULD ALWAYS** have in mind that this should be done in a greenfield approach, we don't need to care about backwards compatibility since the project is in alpha, and support old and new stuff just introduces more complexity in the project; never sacrifice quality because of backwards compatibility - - - -# When you need information about a library or external API: -- use perplexity and context7 to find out how to properly fix/resolve this -- when using perplexity mcp, you can pass a prompt to the query param with more description about what you want to know, you don't need to pass a query-style search phrase, the same for the topic param of context7 -- for context7 to use the mcp is two steps, one you will find out the library id and them you will check what you want - - - -- Integration tests MUST verify full snapshot/restore cycle -- MUST test data persistence across simulated restarts -- MUST test snapshot failure handling and recovery -- MUST test corrupt snapshot detection and recovery -- MUST test periodic snapshot behavior under load -- MUST test graceful shutdown snapshot creation -- All tests MUST use t.Context() (never context.Background()) -- All tests MUST follow "Should..." naming convention with testify assertions -- MUST use real miniredis and real BadgerDB (no mocks) -- Tests MUST use temp directories (t.TempDir()) -- Tests MUST clean up resources with t.Cleanup() - - -## Subtasks - -- [x] 8.1 Create test/integration/standalone/persistence_test.go with test suite -- [x] 8.2 Test full snapshot/restore cycle (complete data persistence) -- [x] 8.3 Test data persistence across simulated restarts -- [x] 8.4 Test snapshot failure handling (BadgerDB error simulation) -- [x] 8.5 Test corrupt snapshot detection and recovery -- [x] 8.6 Test periodic snapshot behavior under concurrent load -- [x] 8.7 Test graceful shutdown snapshot creation -- [x] 8.8 Test snapshot restore on startup (cold start scenario) -- [x] 8.9 Add test fixtures and data generators -- [x] 8.10 Run full test suite and ensure >80% coverage for integration code - -## Implementation Details - -This task creates integration tests that validate the snapshot manager's behavior in real-world scenarios, including edge cases and failure modes. - -### Relevant Files - -- `test/integration/standalone/persistence_test.go` - NEW: Integration tests for persistence -- `engine/infra/cache/snapshot_manager.go` - Created in Task 7.0, tested here -- `engine/infra/cache/miniredis_standalone.go` - MiniredisStandalone with persistence -- `test/helpers/standalone.go` - Test environment helpers - -### Dependent Files - -- `engine/infra/cache/snapshot_manager.go` - Snapshot manager implementation -- `engine/infra/cache/miniredis_standalone.go` - MiniredisStandalone wrapper -- `pkg/config/config.go` - RedisPersistenceConfig - -### Key Technical Details from Tech Spec - -**Persistence Testing Focus**: -- Full lifecycle: start → populate data → snapshot → shutdown → restart → verify data -- Edge cases: corrupt snapshots, disk full, BadgerDB errors -- Concurrency: snapshots under load, concurrent reads/writes -- Configuration: different snapshot intervals, restore options - -**Test Environment Requirements**: -- Temp directories for BadgerDB (t.TempDir()) -- Ability to simulate restarts (close and re-open) -- Ability to inject failures (corrupt files, disk errors) -- Proper cleanup (t.Cleanup()) - -## Deliverables - -- `test/integration/standalone/persistence_test.go` - Full integration test suite -- Test fixtures and data generators in `test/fixtures/standalone/` -- Helper functions in `test/helpers/standalone.go` for persistence testing -- Documentation of test scenarios and expected behaviors -- Updated CI pipeline in `.github/workflows/test.yml` (if needed) - -## Tests - -Integration tests mapped from `_tests.md`: - -- [ ] Should persist and restore data across full cycle - - Test: Create miniredis, populate data, snapshot, close - - Test: Create new miniredis, restore snapshot, verify data identical - - Test: Large datasets (1000+ keys) persist correctly - -- [ ] Should persist data across simulated restarts - - Test: Phase 1 - start, populate, snapshot, graceful shutdown - - Test: Phase 2 - restart, restore, verify data persisted - - Test: Multiple restart cycles maintain data integrity - -- [ ] Should handle snapshot failures gracefully - - Test: Disk full during snapshot (mock filesystem full) - - Test: BadgerDB write error during snapshot - - Test: Snapshot manager continues working after failure - - Test: Next snapshot succeeds after previous failure - -- [ ] Should detect and recover from corrupt snapshots - - Test: Corrupt BadgerDB file (truncate, corrupt bytes) - - Test: Restore fails gracefully with error - - Test: System remains operational with empty state - - Test: Fresh snapshot can be created after corruption - -- [ ] Should handle periodic snapshots under load - - Test: Continuous writes during periodic snapshots - - Test: No data loss between snapshots - - Test: Snapshots don't block operations - - Test: Performance acceptable during snapshots - -- [ ] Should create snapshot on graceful shutdown - - Test: Configure snapshot_on_shutdown: true - - Test: Trigger shutdown, verify final snapshot created - - Test: Restored data includes all data up to shutdown - -- [ ] Should restore snapshot on startup when configured - - Test: Configure restore_on_startup: true - - Test: Start with existing snapshot, verify data restored - - Test: Start without snapshot, system initializes empty - -### Test Structure Example - -```go -// test/integration/standalone/persistence_test.go - -func TestPersistence_FullCycle(t *testing.T) { - t.Run("Should persist and restore data across full cycle", func(t *testing.T) { - ctx := t.Context() - tempDir := t.TempDir() - - // Phase 1: Create data and snapshot - testData := map[string]string{ - "user:1": "alice", - "user:2": "bob", - "count": "42", - } - - { - env := setupStandaloneWithPersistence(ctx, t, tempDir) - - // Populate data - for k, v := range testData { - err := env.Client.Set(ctx, k, v, 0).Err() - require.NoError(t, err) - } - - // Trigger snapshot - err := env.SnapshotManager.Snapshot(ctx) - require.NoError(t, err) - - // Clean shutdown - env.Shutdown(ctx) - } - - // Phase 2: Restore and verify - { - env := setupStandaloneWithPersistence(ctx, t, tempDir) - defer env.Shutdown(ctx) - - // Restore snapshot - err := env.SnapshotManager.Restore(ctx) - require.NoError(t, err) - - // Verify all data restored - for k, expectedVal := range testData { - val, err := env.Client.Get(ctx, k).Result() - require.NoError(t, err) - assert.Equal(t, expectedVal, val, "Key %s value mismatch", k) - } - } - }) - - t.Run("Should persist data across multiple restarts", func(t *testing.T) { - ctx := t.Context() - tempDir := t.TempDir() - - // Multiple restart cycles - for cycle := 1; cycle <= 3; cycle++ { - env := setupStandaloneWithPersistence(ctx, t, tempDir) - - // Add data in this cycle - key := fmt.Sprintf("cycle:%d", cycle) - err := env.Client.Set(ctx, key, fmt.Sprintf("data-%d", cycle), 0).Err() - require.NoError(t, err) - - // Snapshot and shutdown - err = env.SnapshotManager.Snapshot(ctx) - require.NoError(t, err) - env.Shutdown(ctx) - } - - // Final restore - verify all cycles' data present - env := setupStandaloneWithPersistence(ctx, t, tempDir) - defer env.Shutdown(ctx) - - err := env.SnapshotManager.Restore(ctx) - require.NoError(t, err) - - for cycle := 1; cycle <= 3; cycle++ { - key := fmt.Sprintf("cycle:%d", cycle) - val, err := env.Client.Get(ctx, key).Result() - require.NoError(t, err) - assert.Equal(t, fmt.Sprintf("data-%d", cycle), val) - } - }) -} - -func TestPersistence_FailureHandling(t *testing.T) { - t.Run("Should handle snapshot failures gracefully", func(t *testing.T) { - ctx := t.Context() - tempDir := t.TempDir() - - env := setupStandaloneWithPersistence(ctx, t, tempDir) - defer env.Shutdown(ctx) - - // Populate data - env.Client.Set(ctx, "key1", "value1", 0) - - // Simulate disk full by making directory read-only - err := os.Chmod(tempDir, 0444) - require.NoError(t, err) - - // Snapshot should fail - err = env.SnapshotManager.Snapshot(ctx) - assert.Error(t, err) - - // Restore write permissions - err = os.Chmod(tempDir, 0755) - require.NoError(t, err) - - // Next snapshot should succeed - env.Client.Set(ctx, "key2", "value2", 0) - err = env.SnapshotManager.Snapshot(ctx) - assert.NoError(t, err) - }) - - t.Run("Should recover from corrupt snapshot", func(t *testing.T) { - ctx := t.Context() - tempDir := t.TempDir() - - // Phase 1: Create snapshot - { - env := setupStandaloneWithPersistence(ctx, t, tempDir) - env.Client.Set(ctx, "key1", "value1", 0) - err := env.SnapshotManager.Snapshot(ctx) - require.NoError(t, err) - env.Shutdown(ctx) - } - - // Corrupt the snapshot (truncate BadgerDB files) - files, err := os.ReadDir(tempDir) - require.NoError(t, err) - if len(files) > 0 { - filePath := filepath.Join(tempDir, files[0].Name()) - err = os.Truncate(filePath, 0) - require.NoError(t, err) - } - - // Phase 2: Restore should fail gracefully - { - env := setupStandaloneWithPersistence(ctx, t, tempDir) - defer env.Shutdown(ctx) - - err := env.SnapshotManager.Restore(ctx) - assert.Error(t, err, "Restore should fail with corrupt snapshot") - - // System should remain operational (empty state) - _, err = env.Client.Get(ctx, "key1").Result() - assert.Error(t, err) // Key not found (empty state) - - // Should be able to create new data and snapshot - env.Client.Set(ctx, "key2", "value2", 0) - err = env.SnapshotManager.Snapshot(ctx) - assert.NoError(t, err, "Should create new snapshot after corruption") - } - }) -} - -func TestPersistence_PeriodicSnapshots(t *testing.T) { - t.Run("Should take periodic snapshots under load", func(t *testing.T) { - ctx := t.Context() - tempDir := t.TempDir() - - // Short interval for testing - env := setupStandaloneWithPeriodicSnapshots(ctx, t, tempDir, 2*time.Second) - defer env.Shutdown(ctx) - - // Start periodic snapshots - env.SnapshotManager.StartPeriodicSnapshots(ctx) - - // Continuous writes - stopCh := make(chan struct{}) - var writeCount atomic.Int64 - - go func() { - for { - select { - case <-stopCh: - return - default: - count := writeCount.Add(1) - key := fmt.Sprintf("key:%d", count) - env.Client.Set(ctx, key, fmt.Sprintf("value:%d", count), 0) - time.Sleep(10 * time.Millisecond) - } - } - }() - - // Wait for at least 2 periodic snapshots - time.Sleep(5 * time.Second) - close(stopCh) - - // Final snapshot - err := env.SnapshotManager.Snapshot(ctx) - require.NoError(t, err) - - finalCount := writeCount.Load() - t.Logf("Wrote %d keys during periodic snapshots", finalCount) - - // Shutdown and restore - env.Shutdown(ctx) - - env2 := setupStandaloneWithPersistence(ctx, t, tempDir) - defer env2.Shutdown(ctx) - - err = env2.SnapshotManager.Restore(ctx) - require.NoError(t, err) - - // Verify data restored (check sample keys) - for i := int64(1); i <= finalCount; i += 100 { - key := fmt.Sprintf("key:%d", i) - _, err := env2.Client.Get(ctx, key).Result() - assert.NoError(t, err, "Key %s should exist", key) - } - }) -} - -func TestPersistence_GracefulShutdown(t *testing.T) { - t.Run("Should snapshot on graceful shutdown", func(t *testing.T) { - ctx := t.Context() - tempDir := t.TempDir() - - // Phase 1: Create data and shutdown (should auto-snapshot) - { - env := setupStandaloneWithPersistence(ctx, t, tempDir) - - // Populate data - for i := 0; i < 100; i++ { - key := fmt.Sprintf("key:%d", i) - env.Client.Set(ctx, key, fmt.Sprintf("value:%d", i), 0) - } - - // Graceful shutdown (should trigger final snapshot) - env.Shutdown(ctx) - } - - // Phase 2: Restore and verify shutdown snapshot was created - { - env := setupStandaloneWithPersistence(ctx, t, tempDir) - defer env.Shutdown(ctx) - - err := env.SnapshotManager.Restore(ctx) - require.NoError(t, err) - - // Verify all data from shutdown snapshot - for i := 0; i < 100; i++ { - key := fmt.Sprintf("key:%d", i) - val, err := env.Client.Get(ctx, key).Result() - require.NoError(t, err) - assert.Equal(t, fmt.Sprintf("value:%d", i), val) - } - } - }) -} - -// Helper functions -func setupStandaloneWithPersistence(ctx context.Context, t *testing.T, dataDir string) *PersistenceTestEnv { - // Create config with persistence enabled - // Setup miniredis with snapshot manager - // Return test environment with cleanup -} - -func setupStandaloneWithPeriodicSnapshots(ctx context.Context, t *testing.T, dataDir string, interval time.Duration) *PersistenceTestEnv { - // Setup with custom snapshot interval for testing -} -``` - -## Success Criteria - -- [ ] All integration tests pass with persistence layer -- [ ] Full snapshot/restore cycle works correctly -- [ ] Data persists across simulated restarts -- [ ] Snapshot failures handled gracefully (system remains operational) -- [ ] Corrupt snapshots detected and recovered from -- [ ] Periodic snapshots work under concurrent load -- [ ] Graceful shutdown creates final snapshot -- [ ] Startup restore works when configured -- [ ] Test coverage >80% for integration code -- [ ] `make test` passes with no failures -- [ ] All tests use `t.Context()` (no `context.Background()`) -- [ ] All tests follow "Should..." naming convention -- [ ] Tests are deterministic (no flaky tests) -- [ ] Test output clearly shows persistence behavior -- [ ] Documentation updated with test scenarios - -## Dependencies - -- **Blocks**: Task 9.0 (End-to-End Workflow Tests) - requires persistence validation -- **Blocked By**: Task 7.0 (Snapshot Manager Implementation) - requires snapshot manager - -## Estimated Effort - -**Size**: M (Medium - 1 day) - -**Breakdown**: -- Test suite setup: 2 hours -- Full cycle tests: 2 hours -- Restart simulation tests: 2 hours -- Failure handling tests: 2 hours -- Corruption recovery tests: 2 hours -- Load testing and periodic snapshots: 2 hours -- Documentation and cleanup: 1 hour - -**Total**: ~13 hours (1 day) - -## Risk Assessment - -**Risks**: -1. Tests may be flaky due to timing issues with periodic snapshots -2. File system operations may behave differently across platforms -3. Large test data may slow down test suite -4. Cleanup failures may leave temp files - -**Mitigations**: -1. Use deterministic delays and synchronization (WaitGroups, channels) -2. Test on multiple platforms (CI covers Linux, macOS, Windows) -3. Use reasonable dataset sizes (balance coverage vs speed) -4. Always use t.TempDir() and t.Cleanup() for automatic cleanup -5. Add timeout protections to prevent hanging tests - -## Validation Checklist - -Before marking this task complete: - -- [ ] All subtasks completed -- [ ] All tests in "Tests" section implemented and passing -- [ ] Test coverage verified (>80%) -- [ ] `make lint` passes with no warnings -- [ ] `make test` passes with no failures -- [ ] Integration tests added to CI pipeline -- [ ] Code follows `.cursor/rules/test-standards.mdc` -- [ ] All uses of context follow patterns (t.Context() in tests) -- [ ] Test fixtures and helpers properly organized -- [ ] No flaky tests (all tests deterministic) -- [ ] Tests run successfully on CI (multiple platforms) -- [ ] Documentation updated with test scenarios -- [ ] Cleanup verified (no leaked temp files or goroutines) diff --git a/tasks/prd-redis/_task_09.md b/tasks/prd-redis/_task_09.md deleted file mode 100644 index f6d4d00c..00000000 --- a/tasks/prd-redis/_task_09.md +++ /dev/null @@ -1,156 +0,0 @@ -## markdown - -## status: completed # Options: pending, in-progress, completed, excluded - - -test/integration/standalone -testing -integration_validation -medium -cache|memory_store|resource_store|streaming|persistence - - -# Task 9.0: End-to-End Workflow Tests [Size: M - 1-2 days] - -## Overview - -Create comprehensive end-to-end integration tests that validate complete workflow execution in standalone mode. These tests verify that all components (cache, memory store, resource store, streaming, persistence) work together correctly using miniredis as the cache backend. - - -- **ALWAYS READ** @.cursor/rules/test-standards.mdc before start -- **ALWAYS READ** the technical docs from this PRD before start -- **YOU SHOULD ALWAYS** have in mind that this should be done in a greenfield approach, we don't need to care about backwards compatibility since the project is in alpha, and support old and new stuff just introduces more complexity in the project; never sacrifice quality because of backwards compatibility -- **MUST** use `t.Context()` in all tests - NEVER `context.Background()` -- **MUST** follow `t.Run("Should ...")` naming convention -- **MUST** use testify assertions (require/assert) - - - -# When you need information about a library or external API: -- use perplexity and context7 to find out how to properly fix/resolve this -- when using perplexity mcp, you can pass a prompt to the query param with more description about what you want to know, you don't need to pass a query-style search phrase, the same for the topic param of context7 -- for context7 to use the mcp is two steps, one you will find out the library id and them you will check what you want - - - -- Complete workflow execution in standalone mode must work identically to distributed mode -- Multi-agent workflows must execute correctly -- Workflows with memory and tools must function properly -- Concurrent workflow execution must be supported -- Workflow state persistence must work across snapshots -- All tests must be deterministic and parallelizable where safe -- Test coverage >80% for integration test scenarios - - -## Subtasks - -- [x] 9.1 Create test environment helper for standalone mode -- [x] 9.2 Implement end-to-end workflow execution tests -- [x] 9.3 Implement multi-agent workflow tests -- [x] 9.4 Implement workflows with memory and tools tests -- [x] 9.5 Implement concurrent workflow execution tests -- [x] 9.6 Implement workflow state persistence tests -- [x] 9.7 Add performance benchmarks for workflow execution - -## Implementation Details - -### Test Structure - -Create integration tests under `test/integration/standalone/` that validate complete workflow execution scenarios. Use real miniredis (no mocks) with test fixtures and helper functions. - -### Relevant Files - -**New Files:** -- `test/integration/standalone/workflow_test.go` - End-to-end workflow execution tests -- `test/integration/standalone/helpers.go` - Test environment setup helpers -- `test/fixtures/standalone/workflows/test-workflow.yaml` - Sample workflow fixture -- `test/fixtures/standalone/workflows/stateful-workflow.yaml` - Workflow with memory fixture - -### Dependent Files - -- `engine/infra/cache/miniredis_standalone.go` - Miniredis wrapper (Task 2.0) -- `engine/infra/cache/snapshot_manager.go` - Snapshot manager (Task 7.0) -- `engine/memory/store/redis.go` - Memory store (Task 4.0) -- `engine/resources/redis_store.go` - Resource store (Task 5.0) -- `engine/infra/server/dependencies.go` - Server dependencies setup - -## Deliverables - -- Test environment helper (`SetupStandaloneTestEnv`) for integration tests -- End-to-end workflow execution tests with complete lifecycle -- Multi-agent workflow tests with agent coordination -- Memory and tools integration tests with state management -- Concurrent workflow execution tests (10+ workflows) -- State persistence tests with snapshot/restore cycles -- Performance benchmarks comparing standalone vs distributed mode -- Test fixtures and sample workflows -- All tests passing with `make test` - -## Tests - -Unit tests mapped from `_tests.md` for this feature: - -### End-to-End Workflow Tests (`test/integration/standalone/workflow_test.go`) - -- [x] Should execute complete workflow with agent, tasks, and tools in standalone mode -- [ ] Should persist conversation history across workflow steps -- [ ] Should handle workflow state correctly during execution -- [x] Should execute multiple workflows concurrently (10+ workflows) -- [ ] Should handle workflow errors and retries gracefully -- [ ] Should maintain workflow isolation (no cross-workflow interference) -- [ ] Should cleanup resources after workflow completion - -### Multi-Agent Workflows - -- [ ] Should coordinate multiple agents in single workflow -- [ ] Should maintain separate conversation histories per agent -- [ ] Should share resources between agents correctly -- [ ] Should handle agent failures without affecting other agents - -### Workflows with Memory and Tools - -- [ ] Should persist agent memory across workflow steps -- [ ] Should execute tool calls correctly -- [ ] Should maintain tool state across invocations -- [ ] Should handle tool errors gracefully - -### Concurrent Execution - -- [ ] Should execute 10+ workflows concurrently without interference -- [ ] Should maintain correct state for each workflow -- [ ] Should handle concurrent cache operations correctly -- [ ] Should not exceed memory limits under load - -### State Persistence - -- [ ] Should persist workflow state to snapshots -- [ ] Should restore workflow state after restart -- [ ] Should handle snapshot failures gracefully -- [ ] Should continue execution after restore - -### Performance Benchmarks - -- [ ] Should complete workflow within 1.5x of Redis time -- [ ] Should handle 100+ cache operations per second -- [ ] Should use <512MB memory for typical workload -- [ ] Should complete snapshots within 5 seconds - -### Edge Cases - -- [ ] Should handle empty workflows -- [ ] Should handle workflows with no memory or tools -- [ ] Should handle long-running workflows (>1 hour) -- [ ] Should recover from miniredis errors - -## Success Criteria - -- All workflow execution tests pass (`go test -v -race ./test/integration/standalone/`) -- Workflows execute correctly in standalone mode with miniredis -- Multi-agent workflows coordinate properly -- Memory and tools work as expected -- Concurrent workflows execute without interference (10+ concurrent) -- State persistence works across restarts -- Performance benchmarks meet NFRs (<1.5x Redis time, 100+ ops/sec) -- Test coverage >80% for integration scenarios -- No flaky tests in the test suite -- All tests follow project test standards (naming, context, assertions) diff --git a/tasks/prd-redis/_task_10.md b/tasks/prd-redis/_task_10.md deleted file mode 100644 index e8d19546..00000000 --- a/tasks/prd-redis/_task_10.md +++ /dev/null @@ -1,192 +0,0 @@ -## markdown - -## status: completed # Options: pending, in-progress, completed, excluded - - -test/integration/cache -testing -contract_validation -medium -cache|miniredis - - -# Task 10.0: Contract Tests & Validation [Size: M - 1 day] - -## Overview - -Create comprehensive contract tests that verify miniredis adapter behaves identically to external Redis adapter. These tests validate that all 48 methods of the `cache.RedisInterface` work correctly with both backends, ensuring complete behavioral parity. - - -- **ALWAYS READ** @.cursor/rules/test-standards.mdc before start -- **ALWAYS READ** the technical docs from this PRD before start -- **YOU SHOULD ALWAYS** have in mind that this should be done in a greenfield approach, we don't need to care about backwards compatibility since the project is in alpha, and support old and new stuff just introduces more complexity in the project; never sacrifice quality because of backwards compatibility -- **MUST** use `t.Context()` in all tests - NEVER `context.Background()` -- **MUST** follow `t.Run("Should ...")` naming convention -- **MUST** use testify assertions (require/assert) -- **MUST** test ALL 48 RedisInterface methods - - - -# When you need information about a library or external API: -- use perplexity and context7 to find out how to properly fix/resolve this -- when using perplexity mcp, you can pass a prompt to the query param with more description about what you want to know, you don't need to pass a query-style search phrase, the same for the topic param of context7 -- for context7 to use the mcp is two steps, one you will find out the library id and them you will check what you want - - - -- All 48 RedisInterface methods must be tested with both adapters -- Behavioral equivalence must be verified (same inputs → same outputs) -- Error cases must behave identically -- Lua scripts must execute identically -- TxPipeline operations must behave identically -- Pub/Sub must work identically -- Mode switching between adapters must work correctly -- Contract tests must be comprehensive and exhaustive - - -## Subtasks - -- [x] 10.1 Create contract test framework for cache adapters -- [x] 10.2 Implement basic operations contract tests (Get, Set, Del, Exists) -- [x] 10.3 Implement Lua script contract tests (Eval, EvalSha) -- [x] 10.4 Implement TxPipeline contract tests (transactions, atomicity) -- [x] 10.5 Implement Pub/Sub contract tests (Subscribe, Publish, patterns) -- [x] 10.6 Implement data structure contract tests (Hash, List, Set, ZSet) -- [x] 10.7 Implement error handling contract tests -- [x] 10.8 Implement mode switching tests - -## Implementation Details - -### Contract Test Pattern - -Create a test suite that runs the same tests against both Redis and miniredis adapters. This ensures behavioral parity between the two implementations. - -### Relevant Files - -**New/Updated Files:** -- `test/integration/cache/adapter_contract_test.go` - Cache adapter contract tests -- `test/integration/cache/helpers.go` - Test helpers for adapter setup - -### Dependent Files - -- `engine/infra/cache/redis.go` - Redis interface definition -- `engine/infra/cache/miniredis_standalone.go` - Miniredis implementation (Task 2.0) -- `engine/infra/cache/mod.go` - Cache factory (Task 3.0) - -## Deliverables - -- Contract test framework that runs same tests against both adapters -- Basic operations tests (Get, Set, Del, Exists, TTL, etc.) -- Lua script tests (Eval, EvalSha with all memory store scripts) -- TxPipeline tests (atomic multi-key operations) -- Pub/Sub tests (Subscribe, Publish, PSubscribe, pattern matching) -- Data structure tests (Hash, List, Set, Sorted Set operations) -- Error handling tests (connection errors, invalid operations) -- Mode switching tests (config-based adapter selection) -- Test report documenting all 48 methods tested -- All tests passing with `make test` - -## Tests - -Unit tests mapped from `_tests.md` for this feature: - -### Cache Adapter Contract Tests (`test/integration/cache/adapter_contract_test.go`) - -**Framework:** -- [ ] Should run same test suite against both Redis and miniredis -- [ ] Should verify identical behavior for all operations -- [ ] Should compare outputs byte-for-byte where possible -- [ ] Should validate error types and messages match - -**Basic Operations (String Commands):** -- [ ] Should satisfy cache.RedisInterface contract -- [ ] Get, Set, Del, Exists should behave identically -- [ ] SetNX, SetEX, GetSet should behave identically -- [ ] Incr, Decr, IncrBy, DecrBy should behave identically -- [ ] MGet, MSet should behave identically -- [ ] TTL, Expire, ExpireAt, Persist should behave identically - -**Lua Scripts:** -- [ ] Eval should execute scripts identically -- [ ] EvalSha should work with script caching -- [ ] AppendAndTrimWithMetadataScript should execute correctly -- [ ] PutIfMatch script should execute correctly -- [ ] All memory store Lua scripts should produce same results - -**TxPipeline (Transactions):** -- [ ] TxPipeline should support atomic operations -- [ ] Multi-key operations should be atomic -- [ ] Watch should detect concurrent modifications -- [ ] Pipeline commands should batch correctly -- [ ] Rollback on error should work identically - -**Pub/Sub:** -- [ ] Subscribe should receive published messages -- [ ] PSubscribe should match patterns correctly -- [ ] Publish should deliver to all subscribers -- [ ] Unsubscribe should stop receiving messages -- [ ] Multiple subscribers should all receive messages - -**Hash Operations:** -- [ ] HGet, HSet, HDel, HExists should behave identically -- [ ] HMGet, HMSet should behave identically -- [ ] HGetAll, HKeys, HVals, HLen should behave identically -- [ ] HIncrBy should behave identically - -**List Operations:** -- [ ] LPush, RPush, LPop, RPop should behave identically -- [ ] LLen, LRange, LIndex should behave identically -- [ ] LTrim should behave identically - -**Set Operations:** -- [ ] SAdd, SRem, SMembers should behave identically -- [ ] SIsMember, SCard should behave identically -- [ ] SInter, SUnion, SDiff should behave identically - -**Sorted Set Operations:** -- [ ] ZAdd, ZRem, ZScore should behave identically -- [ ] ZRange, ZRevRange should behave identically -- [ ] ZCard, ZCount should behave identically -- [ ] ZIncrBy should behave identically - -**Error Handling:** -- [ ] Invalid operations should return same error types -- [ ] Connection errors should be handled identically -- [ ] Type errors should be handled identically -- [ ] Script errors should propagate identically - -**Mode Switching:** -- [ ] Should create correct adapter based on config mode -- [ ] Should switch between adapters on config change -- [ ] Should handle invalid mode configurations -- [ ] Should respect mode overrides - -### Edge Cases - -- [ ] Empty values should behave identically -- [ ] Nil returns should behave identically -- [ ] Concurrent operations should behave identically -- [ ] Large values (>1MB) should behave identically -- [ ] Special characters in keys should behave identically - -### Coverage Requirements - -- [ ] All 48 RedisInterface methods tested -- [ ] Test coverage >95% for contract tests -- [ ] Document any behavioral differences found - -## Success Criteria - -- Contract test framework implemented and working -- All 48 RedisInterface methods have contract tests -- All tests pass for both Redis and miniredis adapters -- Zero behavioral differences detected between adapters -- Lua scripts execute identically on both backends -- TxPipeline operations are atomic on both backends -- Pub/Sub works identically on both backends -- Error handling is consistent across adapters -- Mode switching works correctly based on configuration -- Test report documents complete method coverage -- Tests pass with `go test -v -race ./test/integration/cache/` -- No flaky tests in contract test suite -- All tests follow project test standards diff --git a/tasks/prd-redis/_task_11.md b/tasks/prd-redis/_task_11.md deleted file mode 100644 index 8cc6ee83..00000000 --- a/tasks/prd-redis/_task_11.md +++ /dev/null @@ -1,172 +0,0 @@ -## markdown - -## status: completed # Options: pending, in-progress, completed, excluded - - -pkg/config|cli/cmd -implementation -configuration|cli -low -config_loader|cli - - -# Task 11.0: Configuration Validation & CLI [Size: S - ≤ half-day] - -## Overview - -Add validation rules for mode configuration and update CLI commands to support the new mode configuration. This includes validation in the config loader, updates to CLI flags, and enhancements to config display and diagnostics commands. - - -- **ALWAYS READ** @.cursor/rules/go-coding-standards.mdc before start -- **ALWAYS READ** the technical docs from this PRD before start -- **YOU SHOULD ALWAYS** have in mind that this should be done in a greenfield approach, we don't need to care about backwards compatibility since the project is in alpha, and support old and new stuff just introduces more complexity in the project; never sacrifice quality because of backwards compatibility -- **MUST** use `config.FromContext(ctx)` - never store config -- **MUST** use `logger.FromContext(ctx)` - never pass logger as parameter - - - -# When you need information about a library or external API: -- use perplexity and context7 to find out how to properly fix/resolve this -- when using perplexity mcp, you can pass a prompt to the query param with more description about what you want to know, you don't need to pass a query-style search phrase, the same for the topic param of context7 -- for context7 to use the mcp is two steps, one you will find out the library id and them you will check what you want - - - -- Mode field must be validated (standalone | distributed | empty) -- Component mode fields must be validated -- Invalid mode configurations must be rejected with clear error messages -- CLI `--mode` flag must be added to `compozy start` -- `compozy config show` must display mode configuration -- `compozy config diagnostics` must show effective mode resolution -- All validation errors must be helpful and actionable - - -## Subtasks - -- [x] 11.1 Add mode validation rules to config loader -- [x] 11.2 Add `--mode` flag to `compozy start` command -- [x] 11.3 Update `compozy config show` to display mode configuration -- [x] 11.4 Update `compozy config diagnostics` to show mode resolution -- [x] 11.5 Add CLI tests with golden files -- [x] 11.6 Add validation error message tests - -## Implementation Details - -### Validation Rules - -Add validation to `pkg/config/loader.go` that checks: -- Global `mode` field is empty or one of: "standalone", "distributed" -- Component `mode` fields are empty or one of: "standalone", "distributed" -- Redis standalone persistence config is valid when enabled -- Mode-specific requirements are met (e.g., Redis address when distributed) - -### CLI Updates - -Update CLI commands to support mode configuration: -- Add `--mode` flag to `compozy start` command -- Display mode in config show/diagnostics output -- Show effective mode resolution for each component - -### Relevant Files - -**Files to Update:** -- `pkg/config/loader.go` - Add mode validation rules -- `cli/cmd/start/start.go` - Add `--mode` flag -- `cli/cmd/config/show.go` - Display mode configuration -- `cli/cmd/config/diagnostics.go` - Show mode resolution - -**Files to Create:** -- `cli/cmd/config/config_test.go` - CLI tests with goldens -- `testdata/config-show-standalone.golden` - Expected output for standalone config -- `testdata/config-show-mixed.golden` - Expected output for mixed mode config - -### Dependent Files - -- `pkg/config/config.go` - Config structs (Task 1.0) -- `pkg/config/resolver.go` - Mode resolution logic (Task 1.0) - -## Deliverables - -- Mode validation rules in config loader with clear error messages -- `--mode` flag added to `compozy start` command -- `compozy config show` displays global and component modes -- `compozy config diagnostics` shows effective mode resolution for all components -- CLI tests with golden files for mode-related output -- Validation error tests for invalid mode configurations -- Help text and documentation for CLI flags -- All changes passing `make lint` and `make test` - -## Tests - -Unit tests mapped from `_tests.md` for this feature: - -### Configuration Validation Tests (`pkg/config/loader_test.go`) - -- [x] Should validate global mode field (standalone | distributed | empty) -- [x] Should validate component mode fields -- [x] Should reject invalid mode values with clear error message -- [x] Should allow empty mode values (inheritance) -- [x] Should validate Redis persistence configuration when enabled -- [x] Should validate mode-specific requirements (Redis addr when distributed) -- [x] Should validate snapshot interval is positive duration -- [x] Should validate data directory path is valid -- [x] Should accept valid standalone configurations -- [x] Should accept valid distributed configurations -- [x] Should accept valid mixed mode configurations - -### CLI Flag Tests (`cli/cmd/start/start_test.go`) - -- [x] Should accept `--mode standalone` flag -- [x] Should accept `--mode distributed` flag -- [x] Should reject invalid `--mode` values -- [x] Should prioritize config file over CLI flags -- [x] Should merge CLI flags with config file correctly -- [x] Should display mode in startup logs - -### Config Show Tests (`cli/cmd/config/config_test.go`) - -- [x] Should show global mode in output -- [x] Should show component modes in output -- [x] Should show Redis standalone persistence config -- [x] Should format mode configuration clearly -- [x] Should match golden file for standalone config -- [x] Should match golden file for mixed mode config - -### Config Diagnostics Tests (`cli/cmd/config/config_test.go`) - -- [x] Should display effective mode resolution for Redis -- [x] Should display effective mode resolution for Temporal -- [x] Should display effective mode resolution for MCPProxy -- [x] Should show mode inheritance clearly -- [x] Should highlight mode overrides -- [x] Should show default fallback mode - -### Error Message Tests - -- [x] Should provide helpful error for invalid global mode -- [x] Should provide helpful error for invalid component mode -- [x] Should provide helpful error for missing Redis address in distributed mode -- [x] Should provide helpful error for invalid persistence config -- [x] Should provide helpful error for invalid snapshot interval - -### Golden File Tests - -- [x] `testdata/config-show-standalone.golden` - Standalone config output -- [x] `testdata/config-show-mixed.golden` - Mixed mode config output -- [x] `testdata/config-diagnostics-standalone.golden` - Diagnostics output -- [x] Golden files should be regenerated with `--update-golden` flag - -## Success Criteria - -- All validation rules implemented and working correctly -- Invalid mode configurations are rejected with clear, actionable error messages -- `--mode` flag works correctly in `compozy start` command -- Config file mode takes precedence over CLI flag -- `compozy config show` displays all mode configuration clearly -- `compozy config diagnostics` shows effective mode resolution for all components -- CLI tests with golden files pass -- Golden files accurately represent expected output -- All validation tests pass with `make test` -- All lint checks pass with `make lint` -- Help text is clear and accurate -- Error messages are helpful and guide users to fixes diff --git a/tasks/prd-redis/_task_12.md b/tasks/prd-redis/_task_12.md deleted file mode 100644 index 2cb39007..00000000 --- a/tasks/prd-redis/_task_12.md +++ /dev/null @@ -1,214 +0,0 @@ -## markdown - -## status: completed # Options: pending, in-progress, completed, excluded - - -docs/content/docs -documentation -user_documentation -medium -none - - -# Task 12.0: User Documentation [Size: M - 1-2 days] - -## Overview - -Create comprehensive user-facing documentation for standalone mode, including deployment guides, configuration references, and migration guides. This documentation should help users understand when to use standalone vs distributed mode, how to configure it, and how to migrate between modes. - - -- **ALWAYS READ** @.cursor/rules/go-coding-standards.mdc before start -- **ALWAYS READ** the technical docs from this PRD before start -- **YOU SHOULD ALWAYS** have in mind that this should be done in a greenfield approach, we don't need to care about backwards compatibility since the project is in alpha, and support old and new stuff just introduces more complexity in the project; never sacrifice quality because of backwards compatibility -- Documentation must be accurate, complete, and beginner-friendly -- All code examples must be tested and work correctly -- Configuration examples must be valid YAML - - - -# When you need information about a library or external API: -- use perplexity and context7 to find out how to properly fix/resolve this -- when using perplexity mcp, you can pass a prompt to the query param with more description about what you want to know, you don't need to pass a query-style search phrase, the same for the topic param of context7 -- for context7 to use the mcp is two steps, one you will find out the library id and them you will check what you want - - - -- Create deployment guide for standalone mode -- Create mode configuration reference guide -- Create Redis-specific configuration reference -- Create migration guide from standalone to distributed -- Update existing docs to reference standalone mode -- Update navigation to include new docs -- All examples must be tested and working -- Documentation must follow project style and conventions - - -## Subtasks - -- [x] 12.1 Create standalone deployment guide -- [x] 12.2 Create mode configuration guide -- [x] 12.3 Create Redis configuration reference -- [x] 12.4 Create migration guide (standalone → distributed) -- [x] 12.5 Update getting started quickstart guide -- [x] 12.6 Update distributed mode deployment guide -- [x] 12.7 Update architecture documentation -- [x] 12.8 Update configuration overview -- [x] 12.9 Update troubleshooting guide -- [x] 12.10 Update FAQ -- [x] 12.11 Update navigation structure -- [x] 12.12 Validate all code examples and configuration samples - -## Implementation Details - -### Documentation Structure - -Create new documentation pages under `docs/content/docs/` following the existing structure and style. Update existing pages to reference standalone mode where relevant. - -### Relevant Files - -**New Documentation Files:** -- `docs/content/docs/deployment/standalone-mode.mdx` - Standalone deployment guide -- `docs/content/docs/configuration/mode-configuration.mdx` - Mode configuration guide -- `docs/content/docs/configuration/redis.mdx` - Redis configuration reference -- `docs/content/docs/guides/migrate-standalone-to-distributed.mdx` - Migration guide - -**Files to Update:** -- `docs/content/docs/deployment/distributed-mode.mdx` - Add comparison with standalone -- `docs/content/docs/getting-started/quickstart.mdx` - Add standalone quick start -- `docs/content/docs/architecture/overview.mdx` - Explain both modes -- `docs/content/docs/configuration/overview.mdx` - Reference mode configuration -- `docs/content/docs/troubleshooting/common-issues.mdx` - Add standalone troubleshooting -- `docs/content/docs/faq.mdx` - Add standalone mode FAQs -- `docs/meta.json` or navigation config - Add new pages to nav - -### Dependent Files - -- Task 3.0 deliverables - Mode-aware cache factory for examples -- Examples from Task 13.0 - Reference in documentation - -## Deliverables - -### New Documentation Pages - -**1. Standalone Deployment Guide** (`deployment/standalone-mode.mdx`): -- When to use standalone mode (use cases, benefits, limitations) -- Requirements (Go 1.25+, PostgreSQL, optional dependencies) -- Quick start installation -- Configuration examples -- Running the server -- Verifying the setup -- Performance expectations -- Troubleshooting common issues - -**2. Mode Configuration Guide** (`configuration/mode-configuration.mdx`): -- Overview of deployment modes (standalone vs distributed) -- Global mode configuration -- Component-specific mode overrides -- Mode resolution and inheritance rules -- Configuration examples (pure standalone, pure distributed, mixed) -- Best practices for mode selection -- Configuration validation - -**3. Redis Configuration Reference** (`configuration/redis.mdx`): -- Redis configuration structure -- Distributed mode settings (addr, password, TLS) -- Standalone mode settings (persistence config) -- Persistence options (snapshot interval, data directory) -- Mode resolution for Redis -- Performance tuning -- Monitoring and metrics -- Troubleshooting - -**4. Migration Guide** (`guides/migrate-standalone-to-distributed.mdx`): -- When to migrate (scaling triggers, use cases) -- Prerequisites (Redis setup, infrastructure) -- Step-by-step migration process -- Configuration changes required -- Data export/import (if applicable) -- Rollback procedures -- Testing and validation -- Common migration issues - -### Updated Documentation Pages - -**5. Distributed Mode Guide** - Add comparison with standalone mode -**6. Quickstart Guide** - Add standalone quick start option -**7. Architecture Overview** - Explain both deployment modes -**8. Configuration Overview** - Reference mode configuration -**9. Troubleshooting Guide** - Add standalone-specific issues -**10. FAQ** - Add standalone mode questions - -### Navigation Updates - -- Update docs navigation to include new pages in appropriate sections -- Ensure logical flow from getting started → deployment → configuration → guides - -## Tests - -Documentation validation checklist: - -### Content Quality - -- [ ] All documentation is accurate and complete -- [ ] Technical details match implementation -- [ ] Use cases and benefits are clearly explained -- [ ] Limitations and trade-offs are honestly presented -- [ ] Examples are relevant and helpful -- [ ] Troubleshooting covers common issues -- [ ] Links to related documentation work correctly - -### Code Examples - -- [ ] All YAML configuration examples are valid -- [ ] All CLI commands are correct and tested -- [ ] All code snippets compile and work -- [ ] Configuration examples cover common scenarios -- [ ] Examples follow project conventions - -### Configuration Examples Validation - -- [ ] Minimal standalone config works -- [ ] Standalone with persistence config works -- [ ] Mixed mode config works -- [ ] Distributed mode config works (existing, unchanged) -- [ ] Invalid configs are rejected with helpful errors - -### User Experience - -- [ ] Documentation is beginner-friendly -- [ ] Navigation is logical and intuitive -- [ ] Search finds relevant pages -- [ ] Cross-references are helpful -- [ ] Formatting is consistent with existing docs - -### Migration Guide Validation - -- [ ] Migration steps are clear and complete -- [ ] Prerequisites are listed -- [ ] Configuration changes are accurate -- [ ] Rollback procedure is provided -- [ ] Common issues are addressed - -### Completeness - -- [ ] All new features are documented -- [ ] All configuration options are documented -- [ ] All CLI flags are documented -- [ ] All error messages are explained -- [ ] All limitations are disclosed - -## Success Criteria - -- All 4 new documentation pages created and published -- All 6 existing pages updated with standalone mode references -- Navigation updated to include new pages -- All code examples and configuration samples tested and working -- Documentation follows project style and conventions -- Documentation is clear, accurate, and beginner-friendly -- Migration guide provides complete migration path -- Troubleshooting covers common standalone mode issues -- FAQ answers key questions about standalone mode -- Documentation builds successfully with docs tooling -- No broken links in documentation -- Search functionality finds new pages -- Peer review completed and feedback addressed diff --git a/tasks/prd-redis/_task_13.md b/tasks/prd-redis/_task_13.md deleted file mode 100644 index b80c55ea..00000000 --- a/tasks/prd-redis/_task_13.md +++ /dev/null @@ -1,289 +0,0 @@ -## markdown - -## status: completed # Options: pending, in-progress, completed, excluded - - -examples/standalone -documentation -examples_runbooks -medium -cache|config - - -# Task 13.0: Examples & Runbooks [Size: M - 1-2 days] - -## Overview - -Create comprehensive example projects and runbooks that demonstrate standalone mode usage in various scenarios. These examples should be runnable, well-documented, and cover common use cases from basic deployment to edge computing. - - -- **ALWAYS READ** @.cursor/rules/go-coding-standards.mdc before start -- **ALWAYS READ** the technical docs from this PRD before start -- **YOU SHOULD ALWAYS** have in mind that this should be done in a greenfield approach, we don't need to care about backwards compatibility since the project is in alpha, and support old and new stuff just introduces more complexity in the project; never sacrifice quality because of backwards compatibility -- All examples must be tested and working -- All configuration files must be valid -- All Docker Compose files must work correctly -- All README files must be complete and accurate - - - -# When you need information about a library or external API: -- use perplexity and context7 to find out how to properly fix/resolve this -- when using perplexity mcp, you can pass a prompt to the query param with more description about what you want to know, you don't need to pass a query-style search phrase, the same for the topic param of context7 -- for context7 to use the mcp is two steps, one you will find out the library id and them you will check what you want - - - -- Create 5 example projects covering different use cases -- Each example must have complete README with setup instructions -- Each example must be independently runnable -- Docker Compose files must be provided where needed -- Integration tests must verify examples work correctly -- Examples must follow project conventions and best practices -- All examples must be documented in main docs - - -## Subtasks - -- [x] 13.1 Create basic standalone example -- [x] 13.2 Create standalone with persistence example -- [x] 13.3 Create mixed mode example -- [x] 13.4 Create edge deployment example -- [x] 13.5 Create migration demo example -- [x] 13.6 Create integration tests for all examples -- [x] 13.7 Add examples index to documentation - -## Implementation Details - -### Example Project Structure - -Each example should follow this structure: -``` -examples/standalone// -├── README.md # Complete setup and usage guide -├── compozy.yaml # Compozy configuration -├── docker-compose.yml # Docker Compose (if needed) -├── .env.example # Environment variables template -├── workflows/ # Sample workflows -│ └── example-workflow.yaml -└── test/ # Integration tests - └── example_test.go -``` - -### Relevant Files - -**New Example Projects:** -- `examples/standalone/basic/` - Minimal standalone deployment -- `examples/standalone/with-persistence/` - Standalone with BadgerDB snapshots -- `examples/standalone/mixed-mode/` - Hybrid deployment example -- `examples/standalone/edge-deployment/` - Edge/IoT deployment -- `examples/standalone/migration-demo/` - Migration walkthrough - -**Integration Tests:** -- `examples/standalone/test/examples_test.go` - Test all examples work - -**Documentation:** -- `docs/content/docs/examples/standalone-examples.mdx` - Examples index - -### Dependent Files - -- Task 3.0 deliverables - Mode-aware cache factory -- Task 7.0 deliverables - Snapshot manager for persistence example -- Task 12.0 deliverables - Documentation to reference examples - -## Deliverables - -### Example 1: Basic Standalone (`examples/standalone/basic/`) - -**Purpose**: Minimal standalone deployment for local development - -**Contents**: -- `compozy.yaml` - Minimal config with `mode: standalone` -- `README.md` - Setup and usage instructions -- `workflows/hello-world.yaml` - Simple workflow example -- `.env.example` - Environment variables template - -**Features Demonstrated**: -- Zero external dependencies (PostgreSQL in Docker Compose) -- Quick start for local development -- Basic agent and workflow execution - -### Example 2: With Persistence (`examples/standalone/with-persistence/`) - -**Purpose**: Standalone with BadgerDB snapshots for data persistence - -**Contents**: -- `compozy.yaml` - Config with persistence enabled -- `README.md` - Setup including persistence configuration -- `workflows/stateful-workflow.yaml` - Workflow using agent memory -- `docker-compose.yml` - PostgreSQL only - -**Features Demonstrated**: -- Snapshot configuration -- Data persistence across restarts -- Periodic snapshots and graceful shutdown -- State recovery after restart - -### Example 3: Mixed Mode (`examples/standalone/mixed-mode/`) - -**Purpose**: Hybrid deployment with standalone cache but external Temporal - -**Contents**: -- `compozy.yaml` - Mixed mode configuration -- `docker-compose.yml` - External Temporal server -- `README.md` - Setup for hybrid deployment -- `workflows/distributed-workflow.yaml` - Workflow leveraging Temporal - -**Features Demonstrated**: -- Global mode with component overrides -- Standalone Redis + External Temporal -- When to use mixed mode deployments -- Configuration flexibility - -### Example 4: Edge Deployment (`examples/standalone/edge-deployment/`) - -**Purpose**: Resource-constrained edge/IoT deployment - -**Contents**: -- `compozy.yaml` - Optimized for low resources -- `README.md` - Edge deployment guide -- `Dockerfile.edge` - Minimal Docker image -- `workflows/edge-workflow.yaml` - Lightweight workflow - -**Features Demonstrated**: -- Memory limits and resource constraints -- Minimal configuration -- ARM64 and x86_64 support -- Configurable retention policies -- Running without Docker - -### Example 5: Migration Demo (`examples/standalone/migration-demo/`) - -**Purpose**: Step-by-step migration from standalone to distributed - -**Contents**: -- `phase1-standalone/compozy.yaml` - Initial standalone config -- `phase2-distributed/compozy.yaml` - Final distributed config -- `phase2-distributed/docker-compose.yml` - Add Redis -- `README.md` - Complete migration walkthrough -- `migrate.sh` - Migration helper script - -**Features Demonstrated**: -- Migration triggers (when to migrate) -- Configuration changes required -- Testing migration without downtime -- Rollback procedures -- Data considerations - -### Integration Tests - -**Test File**: `examples/standalone/test/examples_test.go` - -Test each example project: -- Setup environment -- Start Compozy with example config -- Execute sample workflows -- Verify expected behavior -- Cleanup resources - -### Examples Documentation - -**Documentation**: `docs/content/docs/examples/standalone-examples.mdx` - -Create index page linking to all examples with: -- Overview of each example -- Use cases and target audience -- Quick start links -- Prerequisites -- Learning objectives - -## Tests - -Integration tests mapped from `_tests.md` for this feature: - -### Example 1: Basic Standalone Tests - -- [ ] Should start Compozy with basic standalone config -- [ ] Should execute hello-world workflow successfully -- [ ] Should complete workflow without external dependencies -- [ ] Should use <256MB memory for basic workload -- [ ] Should start in <5 seconds - -### Example 2: With Persistence Tests - -- [ ] Should start with persistence enabled -- [ ] Should create snapshot directory -- [ ] Should execute stateful workflow with memory -- [ ] Should persist agent memory across restarts -- [ ] Should restore state after restart -- [ ] Should take periodic snapshots -- [ ] Should snapshot on graceful shutdown - -### Example 3: Mixed Mode Tests - -- [ ] Should start with mixed mode configuration -- [ ] Should use standalone cache and external Temporal -- [ ] Should execute distributed workflow correctly -- [ ] Should honor component mode overrides -- [ ] Should connect to external Temporal server - -### Example 4: Edge Deployment Tests - -- [ ] Should start with minimal resource configuration -- [ ] Should run with <512MB memory limit -- [ ] Should execute lightweight workflow -- [ ] Should work on ARM64 architecture (if available) -- [ ] Should start without Docker (binary only) -- [ ] Should enforce retention policies - -### Example 5: Migration Demo Tests - -- [ ] Should run phase 1 (standalone) successfully -- [ ] Should export configuration from phase 1 -- [ ] Should run phase 2 (distributed) successfully -- [ ] Should execute same workflows in both phases -- [ ] Migration script should handle config transformation -- [ ] Should document required Redis setup - -### Documentation Tests - -- [ ] Examples index page should list all examples -- [ ] All example links should work -- [ ] All prerequisites should be accurate -- [ ] All setup instructions should be complete -- [ ] All examples should be discoverable via search - -### Quality Checks - -- [ ] All example configs are valid YAML -- [ ] All Docker Compose files work correctly -- [ ] All README files are complete and accurate -- [ ] All commands in README are tested and work -- [ ] All environment variables are documented -- [ ] All examples follow project conventions -- [ ] All examples use project-standard structure - -### Cross-Platform Testing - -- [ ] Examples work on Linux (amd64) -- [ ] Examples work on macOS (arm64/amd64) -- [ ] Edge example works on ARM64 -- [ ] Docker examples work on all platforms - -## Success Criteria - -- All 5 example projects created and working -- Each example has complete, accurate README -- Each example is independently runnable -- Docker Compose files work correctly where provided -- Integration tests pass for all examples -- All examples follow project conventions -- Examples documentation created and published -- All code examples are tested and working -- All configuration files are valid -- Examples cover diverse use cases (dev, prod, edge, migration) -- Examples are discoverable in main documentation -- Cross-platform compatibility verified -- Tests pass with `go test -v ./examples/standalone/test/` -- Examples can be used as templates for real deployments -- Migration demo provides clear, actionable migration path diff --git a/tasks/prd-redis/_tasks.md b/tasks/prd-redis/_tasks.md deleted file mode 100644 index c594f5e3..00000000 --- a/tasks/prd-redis/_tasks.md +++ /dev/null @@ -1,252 +0,0 @@ -# Redis Standalone Mode Implementation Task Summary - -## Relevant Files - -### Core Implementation Files - -- `pkg/config/config.go` - Add global mode and RedisConfig structs -- `pkg/config/resolver.go` - Mode resolution logic and helper methods (NEW) -- `pkg/config/loader.go` - Mode validation rules -- `engine/infra/cache/miniredis_standalone.go` - MiniredisStandalone wrapper (NEW) -- `engine/infra/cache/snapshot_manager.go` - BadgerDB snapshot persistence (NEW) -- `engine/infra/cache/mod.go` - Mode-aware cache factory (UPDATE) - -### Integration Points - -- `engine/infra/server/dependencies.go` - Update Temporal factory to use resolver -- `engine/infra/server/mcp.go` - Update MCPProxy factory to use resolver -- `engine/memory/store/redis.go` - Memory store (no changes, verify compatibility) -- `engine/resources/redis_store.go` - Resource store (no changes, verify compatibility) -- `engine/infra/server/dependencies.go` - Streaming setup (no changes, verify compatibility) - -### Documentation Files - -- `docs/content/docs/deployment/standalone-mode.mdx` - Standalone deployment guide (NEW) -- `docs/content/docs/configuration/mode-configuration.mdx` - Mode configuration guide (NEW) -- `docs/content/docs/configuration/redis.mdx` - Redis configuration reference (NEW) -- `docs/content/docs/guides/migrate-standalone-to-distributed.mdx` - Migration guide (NEW) -- `docs/content/docs/deployment/distributed-mode.mdx` - Update with comparison (UPDATE) -- `docs/content/docs/getting-started/quickstart.mdx` - Add standalone quick start (UPDATE) - -### Examples - -- `examples/standalone/basic/*` - Minimal standalone deployment -- `examples/standalone/with-persistence/*` - Standalone with BadgerDB snapshots -- `examples/standalone/mixed-mode/*` - Hybrid deployment example -- `examples/standalone/edge-deployment/*` - Edge/IoT deployment -- `examples/standalone/migration-demo/*` - Migration walkthrough - -## Tasks - -- [x] 1.0 Global Mode Configuration & Resolver (M - 1-2 days) -- [x] 2.0 MiniredisStandalone Wrapper (S - ≤ half-day) -- [x] 3.0 Mode-Aware Cache Factory (S - ≤ half-day) -- [x] 4.0 Memory Store Integration (M - 1 day) -- [x] 5.0 Resource Store Integration (M - 1 day) -- [x] 6.0 Streaming & Pub/Sub Integration (M - 1 day) -- [x] 7.0 Snapshot Manager Implementation (M - 1-2 days) -- [x] 8.0 Persistence Integration Tests (M - 1 day) -- [x] 9.0 End-to-End Workflow Tests (M - 1-2 days) -- [x] 10.0 Contract Tests & Validation (M - 1 day) -- [x] 11.0 Configuration Validation & CLI (S - ≤ half-day) -- [x] 12.0 User Documentation (M - 1-2 days) -- [x] 13.0 Examples & Runbooks (M - 1-2 days) - -Notes on sizing: -- S = Small (≤ half-day) -- M = Medium (1–2 days) -- L = Large (3+ days) - -## Task Design Rules - -- Each parent task is a closed deliverable: independently shippable and reviewable -- Do not split one deliverable across multiple parent tasks; avoid cross-task coupling -- Each parent task must include unit test subtasks derived from `_tests.md` for this feature -- Each generated `/_task_.md` must contain explicit Deliverables and Tests sections - -## Execution Plan - -### Critical Path (5-7 days) -Task 1.0 → Task 2.0 → Task 3.0 → Task 4.0 → Task 9.0 → Task 10.0 - -### Parallel Execution Lanes - -**Lane 1 (Config - 2 days):** -- Task 1.0 → Task 11.0 - -**Lane 2 (Core - 2-3 days):** -- Task 1.0 → Task 2.0 → Task 3.0 - -**Lane 3 (Memory Store - 3-4 days):** -- Task 3.0 → Task 4.0 → Task 9.0 - -**Lane 4 (Resource Store - 3-4 days):** -- Task 3.0 → Task 5.0 → Task 9.0 - -**Lane 5 (Streaming - 3-4 days):** -- Task 3.0 → Task 6.0 → Task 9.0 - -**Lane 6 (Persistence - 3-4 days):** -- Task 2.0 → Task 7.0 → Task 8.0 → Task 9.0 - -**Lane 7 (Validation - 1-2 days):** -- Task 9.0 → Task 10.0 - -**Lane 8 (Documentation - 1-2 days):** -- Task 3.0 → Task 12.0 - -**Lane 9 (Examples - 1-2 days):** -- Task 3.0 → Task 13.0 - -### Team Allocation Suggestion - -**With 3 developers (5-7 days total):** - -Developer 1 (Backend - Critical Path): -- Lane 1: Task 1.0 → Task 11.0 -- Lane 2: Task 2.0 → Task 3.0 -- Lane 7: Task 9.0 → Task 10.0 - -Developer 2 (Domain Integration): -- Lane 3: Task 4.0 -- Lane 4: Task 5.0 -- Lane 5: Task 6.0 - -Developer 3 (Persistence + Content): -- Lane 6: Task 7.0 → Task 8.0 -- Lane 8: Task 12.0 -- Lane 9: Task 13.0 - -**With 2 developers (7-10 days total):** -- Dev 1: Lanes 1, 2, 7 -- Dev 2: Lanes 3, 4, 5, 6, 8, 9 - -Notes: -- All runtime code MUST use `logger.FromContext(ctx)` and `config.FromContext(ctx)` -- Run `make lint` and `make test` before marking any task as completed -- Each task includes its own tests - no separate testing phase -- Tasks 4, 5, 6 can run 100% in parallel after Task 3.0 completes -- Documentation and examples can start as soon as Task 3.0 completes - -## Batch Plan (Grouped Commits) - -### Batch 1 — Configuration Foundation -- [x] Task 1.0: Global Mode Configuration & Resolver -- [ ] Task 11.0: Configuration Validation & CLI - -**Commit Message**: `feat(config): add global mode configuration with component inheritance` - -**Why grouped**: Configuration foundation that enables all other work. Single logical feature. - ---- - -### Batch 2 — Core Miniredis Integration -- [ ] Task 2.0: MiniredisStandalone Wrapper -- [ ] Task 3.0: Mode-Aware Cache Factory - -**Commit Message**: `feat(cache): add miniredis standalone backend with mode-aware factory` - -**Why grouped**: Core cache backend implementation. Completes the miniredis integration. - ---- - -### Batch 3 — Domain Store Compatibility (Parallel) -- [ ] Task 4.0: Memory Store Integration -- [ ] Task 5.0: Resource Store Integration -- [ ] Task 6.0: Streaming & Pub/Sub Integration - -**Commit Message**: `test(cache): verify miniredis compatibility across all domain stores` - -**Why grouped**: All integration tests proving miniredis works with existing stores. Can be one commit or split into 3 if preferred. - ---- - -### Batch 4 — Persistence Layer -- [ ] Task 7.0: Snapshot Manager Implementation -- [ ] Task 8.0: Persistence Integration Tests - -**Commit Message**: `feat(cache): add BadgerDB snapshot persistence for standalone mode` - -**Why grouped**: Complete persistence feature with tests. Single logical enhancement. - ---- - -### Batch 5 — End-to-End Validation -- [ ] Task 9.0: End-to-End Workflow Tests -- [ ] Task 10.0: Contract Tests & Validation - -**Commit Message**: `test(standalone): add comprehensive integration and contract tests` - -**Why grouped**: Final validation suite. Ensures feature completeness. - ---- - -### Batch 6 — Documentation -- [ ] Task 12.0: User Documentation - -**Commit Message**: `docs: add standalone mode deployment and configuration guides` - -**Why grouped**: All user-facing documentation in one commit. - ---- - -### Batch 7 — Examples -- [ ] Task 13.0: Examples & Runbooks - -**Commit Message**: `examples: add standalone mode example projects and runbooks` - -**Why grouped**: All example projects together for easier review. - ---- - -## Risk Mitigation - -### High Priority Risks - -1. **Miniredis Lua Script Compatibility** - - **Risk**: Lua scripts may behave differently in miniredis - - **Mitigation**: Task 4.0 explicitly tests all Lua scripts used by memory store - - **Contingency**: Contract tests in Task 10.0 verify behavioral parity - -2. **TxPipeline Atomicity** - - **Risk**: Resource store relies on TxPipeline for atomic operations - - **Mitigation**: Task 5.0 has dedicated tests for TxPipeline and optimistic locking - - **Contingency**: Contract tests verify exact Redis behavior - -3. **Snapshot Performance Impact** - - **Risk**: Large snapshots may block operations - - **Mitigation**: Task 7.0 uses background goroutines for non-blocking snapshots - - **Validation**: Task 8.0 tests snapshot operations under load - -### Medium Priority Risks - -4. **Mode Configuration Confusion** - - **Risk**: Users may misconfigure mode inheritance - - **Mitigation**: Task 1.0 includes comprehensive validation; Task 12.0 provides clear docs - - **Validation**: Task 11.0 adds helpful CLI diagnostics - -5. **Data Loss Between Snapshots** - - **Risk**: In-memory data lost if crash happens between snapshots - - **Mitigation**: Documented in Task 12.0 as expected behavior; configurable intervals - - **Validation**: Task 8.0 tests edge cases and recovery scenarios - -## Success Metrics - -- [ ] All 13 tasks completed and tests passing -- [ ] `make lint` passes with zero warnings -- [ ] `make test` passes with >80% coverage for new code -- [ ] All PRD acceptance criteria met -- [ ] Memory store, resource store, and streaming work identically with miniredis -- [ ] Documentation published and examples runnable -- [ ] Migration path validated and documented - -## Dependencies - -### External Libraries (Added) -- `github.com/alicebob/miniredis/v2` - In-memory Redis server -- `github.com/dgraph-io/badger/v4` - BadgerDB for snapshot persistence - -### No Breaking Changes -- All existing Redis-based deployments continue to work -- Default mode is "distributed" for backward compatibility -- Consumer code requires ZERO changes diff --git a/tasks/prd-redis/_techspec.md b/tasks/prd-redis/_techspec.md deleted file mode 100644 index d077edbf..00000000 --- a/tasks/prd-redis/_techspec.md +++ /dev/null @@ -1,755 +0,0 @@ -# Technical Specification: Standalone Mode - Redis Alternatives - -## Executive Summary - -This specification details the implementation of standalone mode for Compozy, enabling single-process deployments without external Redis dependencies. The solution embeds **miniredis** (pure Go Redis server) for 100% Redis compatibility, with optional BadgerDB persistence layer for snapshots. This approach provides full feature parity including Lua scripts, TxPipeline operations, and Pub/Sub, with zero consumer code changes. - -**Key Technical Decisions:** -- **Storage Backend**: miniredis v2 (in-memory Redis server in pure Go) -- **Persistence Layer**: BadgerDB v4 for optional periodic snapshots -- **Lua Scripts**: Native support via miniredis (AppendAndTrimWithMetadataScript, etc.) -- **Transactions**: Native TxPipeline support (memory store, resource store atomicity) -- **Pub/Sub**: Native Redis Pub/Sub for streaming features -- **Mode Selection**: Factory pattern in SetupCache based on configuration from context - -## System Architecture - -### Domain Placement - -**Primary Domain**: `engine/infra/cache/` -- New adapters and providers live alongside existing Redis implementation -- Follows established package structure and naming conventions - -**Affected Domains**: -- `engine/infra/server/` - Dependency injection and mode selection logic -- `engine/memory/store/` - Memory store uses cache adapter -- `engine/resources/` - Resource store uses cache adapter -- `engine/task/services/` - Task config store uses cache adapter -- `pkg/config/` - Configuration schema additions -- `pkg/mcp-proxy/` - MCP proxy storage uses cache adapter - -**Testing Infrastructure**: -- `test/integration/cache/` - Cache adapter contract tests -- `test/integration/standalone/` - End-to-end standalone mode tests - -### Component Overview - -#### 1. MiniredisStandalone (`engine/infra/cache/miniredis_standalone.go`) -**Responsibility**: Embed and manage miniredis server lifecycle - -**Key Features**: -- Starts miniredis on random available port -- Creates go-redis client pointing to embedded server -- Full Redis protocol compatibility (Lua, TxPipeline, Pub/Sub) -- Zero emulation complexity - native Redis behavior -- Graceful shutdown with optional snapshot - -#### 2. SnapshotManager (`engine/infra/cache/snapshot_manager.go`) -**Responsibility**: Optional persistence layer for miniredis state - -**Key Features**: -- Periodic snapshots to BadgerDB (configurable interval) -- Snapshot on graceful shutdown -- Restore last snapshot on startup -- Non-blocking snapshot operations (background goroutine) -- Configurable via `standalone.persistence.*` config - -#### 3. Mode-Aware Factory (`engine/infra/cache/mod.go` - refactored) -**Responsibility**: Construct appropriate cache backend based on configuration - -**Key Relationships**: -- Reads configuration from `config.FromContext(ctx)` -- Uses resolver pattern: `cfg.EffectiveRedisMode()` for mode determination -- Mode resolution priority: `redis.mode` > global `mode` > "distributed" default -- Uses `logger.FromContext(ctx)` for all logging -- Returns cache.Cache with mode-appropriate implementations -- Handles cleanup and lifecycle management - -**CRITICAL PATTERN COMPLIANCE**: -- ✅ MUST use `config.FromContext(ctx)` - never store config -- ✅ MUST use `logger.FromContext(ctx)` - never pass logger as parameter -- ✅ Follow `.cursor/rules/global-config.mdc` and `.cursor/rules/logger-config.mdc` - -### Data Flow - -``` -Server Startup - ↓ -SetupCache(ctx) reads config.FromContext(ctx) - ↓ -cfg.EffectiveRedisMode() [Resolver] - ├─ Check redis.mode (explicit override) - ├─ Check global mode (inheritance) - └─ Default to "distributed" - ↓ -SetupCache(ctx) [Factory] - ├─ [mode=distributed] - │ ├─ Connect to external Redis - │ ├─ NewRedis() → cache.Redis - │ ├─ NewRedisLockManager() - │ └─ NewRedisNotificationSystem() - │ - └─ [mode=standalone] - ├─ Start miniredis (embedded Redis server) - ├─ Create go-redis client → localhost:randomPort - ├─ NewRedis() → cache.Redis (same type!) - ├─ NewRedisLockManager() (same!) - ├─ NewRedisNotificationSystem() (same!) - └─ Optional: NewSnapshotManager() for persistence - ↓ -Unified cache.Cache Interface (identical for both modes) - ↓ -Domain Services (ZERO changes - same go-redis client) - ├─ Memory Store (Lua scripts work natively) - ├─ Resource Store (TxPipeline works natively) - └─ Task Store, Webhook Store (all unchanged) -``` - -## Implementation Design - -### Key Insight: No Interface Changes Needed - -**The breakthrough**: Miniredis implements the Redis protocol. Consumer code already uses `cache.RedisInterface`, which is just the go-redis client interface. We simply point the go-redis client at an embedded miniredis server instead of an external Redis server. - -```go -// NO NEW INTERFACES - We use existing cache.RedisInterface -// engine/infra/cache/redis.go (already exists) - -type RedisInterface interface { - // Already defined with ~48 methods including: - Get(ctx context.Context, key string) *redis.StringCmd - Set(ctx context.Context, key string, value any, ttl time.Duration) *redis.StatusCmd - Eval(ctx context.Context, script string, keys []string, args ...any) *redis.Cmd - Subscribe(ctx context.Context, channels ...string) *redis.PubSub - TxPipeline() redis.Pipeliner - // ... all Redis commands -} - -// Consumer code is UNCHANGED -// engine/memory/store/redis.go -type RedisMemoryStore struct { - client cache.RedisInterface // Same interface, different backend! -} - -// Works identically whether client points to: -// - External Redis server (distributed mode) -// - Embedded miniredis (standalone mode) -``` - -### MiniredisStandalone Implementation (CORRECT PATTERNS) - -```go -// engine/infra/cache/miniredis_standalone.go - -type MiniredisStandalone struct { - server *miniredis.Miniredis - client *redis.Client - snapshot *SnapshotManager - closed atomic.Bool -} - -// ✅ CORRECT: No config stored, retrieved from context -func NewMiniredisStandalone(ctx context.Context) (*MiniredisStandalone, error) { - log := logger.FromContext(ctx) // ✅ MUST use context pattern - cfg := config.FromContext(ctx) // ✅ MUST use context pattern - - // Start embedded Redis server - mr := miniredis.NewMiniRedis() - if err := mr.Start(); err != nil { - return nil, fmt.Errorf("start miniredis: %w", err) - } - - log.Info("Started embedded Redis server", - "addr", mr.Addr(), - "mode", "standalone", - ) - - // Create standard go-redis client pointing to embedded server - client := redis.NewClient(&redis.Options{ - Addr: mr.Addr(), - }) - - // Test connection - if err := client.Ping(ctx).Err(); err != nil { - mr.Close() - return nil, fmt.Errorf("ping miniredis: %w", err) - } - - standalone := &MiniredisStandalone{ - server: mr, - client: client, - } - - // Initialize optional snapshot manager - if cfg.Redis.Standalone.Persistence.Enabled { - log.Info("Initializing persistence layer", - "data_dir", cfg.Redis.Standalone.Persistence.DataDir, - "snapshot_interval", cfg.Redis.Standalone.Persistence.SnapshotInterval, - ) - - snapshot, err := NewSnapshotManager(ctx, mr, cfg.Redis.Standalone.Persistence) - if err != nil { - standalone.Close() - return nil, fmt.Errorf("create snapshot manager: %w", err) - } - standalone.snapshot = snapshot - - // Restore last snapshot if exists - if cfg.Redis.Standalone.Persistence.RestoreOnStartup { - if err := snapshot.Restore(ctx); err != nil { - log.Warn("Failed to restore snapshot", "error", err) - } else { - log.Info("Restored last snapshot") - } - } - - // Start periodic snapshots - snapshot.StartPeriodicSnapshots(ctx) - } - - return standalone, nil -} - -func (m *MiniredisStandalone) Client() *redis.Client { - return m.client -} - -func (m *MiniredisStandalone) Close(ctx context.Context) error { - if !m.closed.CompareAndSwap(false, true) { - return nil - } - - log := logger.FromContext(ctx) // ✅ MUST use context pattern - cfg := config.FromContext(ctx) // ✅ MUST use context pattern - - // Snapshot before shutdown if enabled - if m.snapshot != nil && cfg.Redis.Standalone.Persistence.SnapshotOnShutdown { - log.Info("Taking final snapshot before shutdown") - if err := m.snapshot.Snapshot(ctx); err != nil { - log.Error("Failed to snapshot on shutdown", "error", err) - } - m.snapshot.Stop() - } - - // Close connections - if err := m.client.Close(); err != nil { - log.Warn("Failed to close Redis client", "error", err) - } - - m.server.Close() - log.Info("Closed embedded Redis server") - - return nil -} -``` - -### Data Models - -#### Configuration Schema - -```go -// pkg/config/config.go - ADD - -type Config struct { - // ... existing fields ... - - // Mode controls global deployment model (applies to all components by default) - // "distributed" (default): External services required - // "standalone": Embedded services, single-process - // Components can override with their own mode field - Mode string `koanf:"mode" json:"mode" yaml:"mode" mapstructure:"mode" validate:"omitempty,oneof=standalone distributed"` - - // Redis cache configuration - Redis RedisConfig `koanf:"redis" json:"redis" yaml:"redis" mapstructure:"redis"` -} - -type RedisConfig struct { - // Mode controls Redis deployment model - // "" (empty): Inherit from global Config.Mode - // "distributed": Use external Redis (explicit override) - // "standalone": Use embedded miniredis (explicit override) - Mode string `koanf:"mode" json:"mode" yaml:"mode" mapstructure:"mode" validate:"omitempty,oneof=standalone distributed"` - - // Addr is the Redis server address (used when mode = "distributed") - Addr string `koanf:"addr" json:"addr" yaml:"addr" mapstructure:"addr"` - - // Password for Redis authentication - Password config.SensitiveString `koanf:"password" json:"password" yaml:"password" mapstructure:"password" sensitive:"true"` - - // Standalone configuration (used when mode = "standalone") - Standalone RedisStandaloneConfig `koanf:"standalone" json:"standalone" yaml:"standalone" mapstructure:"standalone"` -} - -type RedisStandaloneConfig struct { - // Persistence configuration for optional BadgerDB snapshots - Persistence RedisPersistenceConfig `koanf:"persistence" json:"persistence" yaml:"persistence" mapstructure:"persistence"` -} - -type RedisPersistenceConfig struct { - // Enabled controls whether snapshots are taken - Enabled bool `koanf:"enabled" json:"enabled" yaml:"enabled" mapstructure:"enabled"` - - // DataDir is the directory for BadgerDB snapshot storage - DataDir string `koanf:"data_dir" json:"data_dir" yaml:"data_dir" mapstructure:"data_dir"` - - // SnapshotInterval controls how often snapshots are taken - SnapshotInterval time.Duration `koanf:"snapshot_interval" json:"snapshot_interval" yaml:"snapshot_interval" mapstructure:"snapshot_interval"` - - // SnapshotOnShutdown controls whether to snapshot during graceful shutdown - SnapshotOnShutdown bool `koanf:"snapshot_on_shutdown" json:"snapshot_on_shutdown" yaml:"snapshot_on_shutdown" mapstructure:"snapshot_on_shutdown"` - - // RestoreOnStartup controls whether to restore last snapshot on startup - RestoreOnStartup bool `koanf:"restore_on_startup" json:"restore_on_startup" yaml:"restore_on_startup" mapstructure:"restore_on_startup"` -} -``` - -#### Mode Resolution Logic - -```go -// pkg/config/resolver.go - NEW FILE - -package config - -// ResolveMode determines the effective deployment mode for a component. -// -// Resolution priority: -// 1. Component mode (if explicitly set) -// 2. Global mode (if set in Config.Mode) -// 3. Default fallback ("distributed") -func ResolveMode(cfg *Config, componentMode string) string { - if componentMode != "" { - return componentMode // Explicit component override - } - if cfg.Mode != "" { - return cfg.Mode // Inherit from global - } - return "distributed" // Default fallback -} - -// EffectiveRedisMode returns the resolved Redis deployment mode. -// Returns "standalone" or "distributed" -func (cfg *Config) EffectiveRedisMode() string { - return ResolveMode(cfg, cfg.Redis.Mode) -} - -// EffectiveTemporalMode returns the resolved Temporal deployment mode. -// Returns "standalone" or "remote" (normalizes "distributed" -> "remote") -func (cfg *Config) EffectiveTemporalMode() string { - mode := ResolveMode(cfg, cfg.Temporal.Mode) - if mode == "distributed" { - return "remote" // Temporal uses "remote" not "distributed" - } - return mode -} - -// EffectiveMCPProxyMode returns the resolved MCPProxy deployment mode. -// Returns "standalone" or "distributed" -func (cfg *Config) EffectiveMCPProxyMode() string { - return ResolveMode(cfg, cfg.MCPProxy.Mode) -} -``` - -### API Endpoints - -No new API endpoints required. Existing endpoints work transparently with either backend. The go-redis client interface is identical regardless of whether it connects to external Redis or embedded miniredis. - -## Integration Points - -### External Libraries Assessment - -#### miniredis v2 -- **Repository**: github.com/alicebob/miniredis/v2 -- **License**: MIT (permissive) -- **Stars**: 1k+ GitHub stars -- **Maintenance**: Actively maintained -- **Maturity**: Production-grade, widely used for testing Redis applications -- **Performance**: In-memory, ~100k+ ops/sec -- **Pros**: - - **100% Redis compatibility** (Lua, TxPipeline, Pub/Sub, all data structures) - - Pure Go, no external dependencies - - Zero emulation complexity - native Redis protocol - - Well-tested, used by thousands of projects -- **Cons**: In-memory only (mitigated by optional BadgerDB snapshots) -- **Integration Fit**: **PERFECT** - drop-in replacement for external Redis - -#### BadgerDB v4 (For Persistence Only) -- **Repository**: github.com/dgraph-io/badger/v4 -- **License**: MPL-2.0 (permissive) -- **Stars**: 13k+ GitHub stars -- **Maintenance**: Actively maintained by Dgraph team -- **Maturity**: Production-grade -- **Usage**: Optional snapshot storage only (not primary storage) -- **Pros**: Pure Go, ACID transactions, proven reliability -- **Integration Fit**: Excellent for snapshot persistence - -**Build vs Buy Decision**: **BUY** - miniredis eliminates 8-12 weeks of complex emulation work. It's a production-ready library that provides full Redis compatibility with zero implementation complexity. - -### Migration Considerations - -**From Redis to BadgerDB**: Data export/import utilities (future work) -**From BadgerDB to Redis**: Configuration change only (data not portable) - -## Impact Analysis - -| Affected Component | Type of Impact | Description & Risk Level | Required Action | -|-------------------|----------------|--------------------------|----------------| -| `engine/infra/cache/` | New Code | Add miniredis wrapper and snapshot manager. **Very Low risk** - simple integration. | Create 2 new files | -| `engine/infra/server/dependencies.go` | Logic Change | Add mode-aware cache setup. **Very Low risk** - small conditional. | Update SetupCache | -| `pkg/config/` | Schema Addition | Add standalone config section. **Very Low risk** - new fields only. | Update Config struct | -| `engine/memory/store/` | **No Change** | Already uses cache.RedisInterface. **Zero risk**. | **None** - works with miniredis automatically | -| `engine/resources/` | **No Change** | Already uses cache.RedisInterface. **Zero risk**. | **None** - works with miniredis automatically | -| `engine/task/services/` | **No Change** | Already uses cache.RedisInterface. **Zero risk**. | **None** - works with miniredis automatically | -| `pkg/mcp-proxy/` | **No Change** | Storage already abstracted. **Zero risk**. | **None** - transparent | -| Documentation | New Content | Add standalone mode guides. **Very Low risk**. | Write user documentation | -| Tests | New Tests | Verify Lua scripts, TxPipeline, Pub/Sub work. **Low risk**. | Integration tests only | - -**Performance Impact**: Standalone mode expected to have ~similar performance to Redis for in-memory operations. Snapshot operations may cause brief latency spikes but are configurable. - -## Testing Approach - -### Unit Tests - -**Critical Test Scenarios**: -1. MiniredisStandalone starts and stops cleanly -2. Snapshot manager creates and restores snapshots -3. Configuration validation works correctly -4. Graceful shutdown triggers final snapshot -5. Periodic snapshots run without blocking - -**Mock Requirements**: None - use real miniredis and temp directories for BadgerDB - -**Test Structure**: -```go -// engine/infra/cache/miniredis_standalone_test.go - -func TestMiniredisStandalone_Lifecycle(t *testing.T) { - t.Run("Should start embedded Redis server", func(t *testing.T) { - ctx := t.Context() // ✅ Use t.Context() in tests - mr, err := NewMiniredisStandalone(ctx) - require.NoError(t, err) - defer mr.Close(ctx) - - // Verify connection works - err = mr.Client().Ping(ctx).Err() - assert.NoError(t, err) - }) -} - -func TestSnapshotManager_Persistence(t *testing.T) { - t.Run("Should snapshot and restore miniredis state", func(t *testing.T) { - // Test snapshot/restore cycle - }) -} -``` - -### Integration Tests - -**Test Location**: `test/integration/standalone/` - -**Test Scenarios**: -1. **End-to-End Workflow Execution**: Run complete workflow in standalone mode -2. **Lua Scripts Work**: Verify AppendAndTrimWithMetadataScript executes (memory store) -3. **TxPipeline Works**: Verify atomic multi-key operations (resource store) -4. **Pub/Sub Works**: Test workflow and task event notifications (streaming) -5. **Snapshot Persistence**: Verify state survives restarts -6. **Mode Switching**: Start standalone, switch to distributed (config only) - -**Test Data Requirements**: -- Sample workflows with agents, tasks, and tools -- Conversation history with multiple messages -- Resource configurations with versioning - -**Validation Tests**: Verify miniredis behaves identically to external Redis for all consumer operations - -## Development Sequencing - -### Build Order (1-2 Weeks Total) - -#### Phase 1: Core Integration (Days 1-2) -1. **Add miniredis Dependency** - Add to go.mod (`go get github.com/alicebob/miniredis/v2`) -2. **Add Configuration Schema** - Add global `mode`, `RedisConfig` with mode and standalone sections -3. **Create Mode Resolver** - Implement `pkg/config/resolver.go` with resolution logic and helper methods -4. **Create MiniredisStandalone** - Wrapper for embedded Redis server (~100 lines) -5. **Update SetupCache Factory** - Use `cfg.EffectiveRedisMode()` for mode detection -6. **Update Temporal Factory** - Use `cfg.EffectiveTemporalMode()` in `maybeStartStandaloneTemporal()` -7. **Update MCPProxy Factory** - Use `cfg.EffectiveMCPProxyMode()` in `shouldEmbedMCPProxy()` -8. **Basic Integration Test** - Verify miniredis works and mode inheritance functions correctly - -**Why First**: Establishes foundation; proves miniredis compatibility and unified mode inheritance pattern immediately - -#### Phase 2: Persistence Layer (Days 3-4) -6. **Create SnapshotManager** - BadgerDB integration for periodic snapshots -7. **Implement Snapshot Logic** - Save/restore miniredis state -8. **Add Graceful Shutdown** - Snapshot before exit -9. **Test Snapshot Lifecycle** - Verify persistence across restarts - -**Why Second**: Optional feature; can be developed/tested independently - -#### Phase 3: Validation (Days 5-6) -10. **Verify Lua Scripts** - Test AppendAndTrimWithMetadataScript (memory store) -11. **Verify TxPipeline** - Test atomic operations (resource store) -12. **Verify Pub/Sub** - Test event notifications (streaming) -13. **End-to-End Tests** - Complete workflow execution in standalone mode -14. **Performance Benchmarking** - Compare standalone vs distributed - -**Why Third**: Validates that no consumer code changes are needed - -#### Phase 4: Documentation & Polish (Day 7) -15. **User Documentation** - Deployment guide, configuration examples -16. **Migration Guide** - Document standalone → distributed transition -17. **CLI Improvements** - Add `--standalone` flag and error messages -18. **Example Configurations** - Sample compozy.yaml files - -**Why Last**: User-facing polish after implementation is proven - -### Technical Dependencies - -**Blocking Dependencies**: -1. None - standalone mode is additive, doesn't break existing code -2. miniredis v2 must be added to go.mod -3. BadgerDB v4 for optional persistence (only if snapshots enabled) - -**Optional Dependencies**: -- Qdrant for vector search (already optional in current architecture) - -### Critical Path - -``` -Config Schema + Resolver (0.5d) → MiniredisStandalone (1d) → - Factory Pattern Update (0.5d) → Update Temporal/MCP Factories (0.5d) → - Basic Integration Test (0.5d) → SnapshotManager (1d) → Validation Tests (1d) → - Documentation (1d) - -Total: ~6-8 days (1-2 weeks with buffer) -``` - -### Parallel Workstreams - -**Stream A (Core)**: Days 1-2 -- Config schema (global mode + RedisConfig) -- Mode resolver (pkg/config/resolver.go) -- MiniredisStandalone wrapper -- Factory updates (cache, temporal, mcpproxy) -- Basic tests - -**Stream B (Persistence - Optional)**: Days 3-4 -- SnapshotManager -- BadgerDB integration -- Snapshot tests - -**Stream C (Validation)**: Days 5-6 -- Lua script testing -- TxPipeline testing -- Pub/Sub testing -- E2E integration - -**Stream D (Documentation)**: Day 7 (can start anytime) -- User guides -- Migration documentation -- Example configurations - -## Monitoring & Observability - -### Metrics (Prometheus Format) - -```go -// engine/infra/cache/metrics.go - -var ( - cacheOperations = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "compozy_cache_operations_total", - Help: "Total cache operations by backend and operation type", - }, - []string{"backend", "operation", "status"}, // backend=miniredis|redis - ) - - cacheOperationDuration = prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Name: "compozy_cache_operation_duration_seconds", - Help: "Cache operation latency", - Buckets: prometheus.DefBuckets, - }, - []string{"backend", "operation"}, - ) - - standaloneSnapshotDuration = prometheus.NewHistogram( - prometheus.HistogramOpts{ - Name: "compozy_standalone_snapshot_duration_seconds", - Help: "Snapshot operation duration", - }, - ) - - standaloneSnapshotSize = prometheus.NewGauge( - prometheus.GaugeOpts{ - Name: "compozy_standalone_snapshot_size_bytes", - Help: "Last snapshot size in bytes", - }, - ) -) -``` - -### Key Logs (Using Context Patterns) - -```go -// ✅ CORRECT: Always use logger.FromContext(ctx) -log := logger.FromContext(ctx) - -log.Info("Cache backend initialized", - "backend", "miniredis", - "mode", "standalone", - "persistence", cfg.Redis.Standalone.Persistence.Enabled) - -log.Info("Taking periodic snapshot", - "interval", cfg.Redis.Standalone.Persistence.SnapshotInterval) - -log.Warn("Snapshot operation slow", - "duration_ms", duration.Milliseconds(), - "size_mb", sizeMB) - -log.Error("Snapshot failed", - "error", err, - "operation", "periodic_snapshot") -``` - -### Grafana Dashboard Updates - -Add panel to existing dashboards: -- **Cache Operations by Backend**: Line graph showing ops/sec for miniredis vs redis -- **Snapshot Operations**: Success/failure rate over time -- **Snapshot Duration**: Histogram of snapshot operation latency -- **Snapshot Size**: Gauge showing last snapshot size - -## Technical Considerations - -### Key Decisions - -**Decision 1: miniredis over BadgerDB Emulation** -- **Rationale**: - - 100% Redis compatibility (Lua, TxPipeline, Pub/Sub all work natively) - - Production-proven library used by thousands of projects - - Eliminates 8-12 weeks of complex emulation work - - Zero risk of behavioral differences from Redis -- **Trade-offs**: In-memory only (mitigated by optional BadgerDB snapshots) -- **Alternatives Rejected**: - - BadgerDB with emulation: 8-12 weeks, high complexity, emulation bugs likely - - Hybrid approach: Still 4-6 weeks, partial emulation needed - -**Decision 2: Optional Persistence Layer** -- **Rationale**: Target users (dev, small teams) can tolerate brief data loss between snapshots -- **Trade-offs**: Not suitable for strict durability requirements (use distributed mode instead) -- **Alternatives Rejected**: - - WAL (Write-Ahead Log): Adds complexity, reduces performance - - Synchronous snapshots: Block operations, poor user experience - -**Decision 3: Global Mode with Component Inheritance** -- **Rationale**: - - Simple UX: Set `mode: standalone` once at top level - - Flexible: Per-component overrides for mixed deployments - - Non-breaking: Existing configs without global mode continue working - - Follows Go composition patterns (not inheritance) -- **Trade-offs**: Requires mode resolver pattern (minimal complexity) -- **Alternatives Rejected**: - - Per-component only: Repetitive configuration, easy to misconfigure - - Deployment profiles: Overkill for simple mode selection - -**Decision 4: Context-First Patterns (MANDATORY)** -- **Rationale**: Project standards require `config.FromContext(ctx)` and `logger.FromContext(ctx)` -- **Trade-offs**: None - this is the established project pattern -- **Compliance**: All code examples follow `.cursor/rules/global-config.mdc` and `.cursor/rules/logger-config.mdc` - -### Known Risks - -**Risk 1: Data Loss Between Snapshots** -- **Challenge**: In-memory storage means data since last snapshot lost on crash -- **Mitigation**: - - Default 5-minute snapshot interval minimizes exposure - - Graceful shutdown always saves snapshot - - Target users (dev, small teams) can tolerate this - - Production deployments use distributed mode - - Document this limitation clearly -- **Monitoring**: Track snapshot success rate, alert on failures - -**Risk 2: Memory Growth Over Time** -- **Challenge**: Long-running instances may accumulate data in memory -- **Mitigation**: - - Existing TTL configuration on cached data - - Document expected memory usage for typical workloads - - Add optional memory limit configuration for miniredis - - Monitor memory metrics -- **Monitoring**: Alert when memory usage > 80% of configured limit - -**Risk 3: Snapshot Performance Impact** -- **Challenge**: Large snapshots may briefly impact performance -- **Mitigation**: - - Snapshots run in background goroutine (non-blocking) - - Use streaming writes to BadgerDB to minimize memory - - Configurable snapshot interval - - Skip snapshots if persistence disabled -- **Monitoring**: Track snapshot duration, alert if > 5 seconds - -### Special Requirements - -**Performance Requirements**: -- Single-user workflow latency: Similar to external Redis (in-memory) -- Throughput: Support 50+ concurrent workflows (standalone target workload) -- Memory: Baseline + ~500MB for miniredis data -- Disk: Only for optional snapshots (~1-2GB for typical deployment) - -**Security Considerations**: -- BadgerDB encryption at rest for snapshots (optional, via EncryptionKey) -- File permissions: Ensure data directory is readable only by process user -- No network exposure beyond localhost (miniredis binds to 127.0.0.1) - -### Standards Compliance - -**Architecture Principles** (from .cursor/rules/architecture.mdc): -- ✅ **SOLID**: Wrapper pattern (OCP), Interface reuse (ISP), Context injection (DIP) -- ✅ **Clean Architecture**: Domain layer unchanged, wrapper in infrastructure layer -- ✅ **DRY**: Reuse existing RedisInterface, zero duplication - -**Go Coding Standards** (from .cursor/rules/go-coding-standards.mdc): -- ✅ **Error Handling**: Context-aware errors, proper wrapping -- ✅ **Context Propagation**: context.Context as first parameter everywhere -- ✅ **Resource Cleanup**: Defer patterns, cleanup functions -- ✅ **Concurrency**: Proper goroutine lifecycle for snapshot manager - -**Critical Pattern Compliance** (MANDATORY): -- ✅ **Config Access**: MUST use `config.FromContext(ctx)` - never store config -- ✅ **Logger Access**: MUST use `logger.FromContext(ctx)` - never pass as parameter -- ✅ **Test Context**: MUST use `t.Context()` in tests, never `context.Background()` - -**Testing Standards** (from .cursor/rules/test-standards.mdc): -- ✅ **Unit Tests**: `t.Run("Should...")` pattern, testify assertions -- ✅ **Integration Tests**: `test/integration/` directory, cleanup in t.Cleanup() -- ✅ **No Mocks**: Use real miniredis with temp directories for BadgerDB - -**No Breaking Changes**: -- ✅ Existing Redis deployments continue to work unchanged -- ✅ Default mode is "distributed" for backward compatibility -- ✅ Consumer code ZERO changes (same go-redis client interface) - -## Libraries Assessment Summary - -| Library | License | Stars | Maintenance | Decision | Rationale | -|---------|---------|-------|-------------|----------|-----------| -| **miniredis v2** | MIT | 1k+ | Active | **✅ ADOPT** | **Primary choice** - 100% Redis compatibility, eliminates 8-12 weeks emulation work, production-proven | -| BadgerDB v4 | MPL-2.0 | 13k+ | Active (Dgraph) | **Adopt** | Optional persistence layer for snapshots only | -| Qdrant | Apache-2.0 | 20k+ | Active | **Keep Optional** | Vector DB already optional, works in standalone mode | - -**License Compatibility**: All licenses are permissive and compatible with Compozy's BSL-1.1 license. - -**Implementation Complexity Comparison**: -- **miniredis approach**: 1-2 weeks, ~200 lines of code, zero emulation -- **BadgerDB emulation approach** (rejected): 8-12 weeks, ~5,000+ lines, high complexity - ---- - -**Technical Specification Version**: 2.1 -**Created**: 2025-01-27 -**Updated**: 2025-10-28 (Major revision: miniredis approach + global mode configuration) -**Zen MCP Analysis**: Completed with Gemini 2.5 Pro -**Expert Review**: Validated miniredis approach, confirmed elimination of critical issues -**Configuration Pattern**: Global mode with component inheritance (composition over inheritance) -**Status**: ✅ Ready for Implementation with Global Mode Configuration Pattern - diff --git a/tasks/prd-redis/_tests.md b/tasks/prd-redis/_tests.md deleted file mode 100644 index b515a85e..00000000 --- a/tasks/prd-redis/_tests.md +++ /dev/null @@ -1,672 +0,0 @@ -# Tests Plan: Standalone Mode - Redis Alternatives - -## Guiding Principles - -- Follow `.cursor/rules/test-standards.mdc` and project testing rules -- Use `t.Run("Should …")` naming convention with testify assertions -- Use `t.Context()` for test contexts (never `context.Background()`) -- No mocks for internal components - use real miniredis and temp directories -- Mock external services only when necessary -- Ensure all tests are deterministic and can run in parallel where safe - -## Coverage Matrix - -Map PRD acceptance criteria to test files: - -| PRD Criterion | Test File | Test Type | -|---------------|-----------|-----------| -| FR-1: Embedded Redis (miniredis) | `engine/infra/cache/miniredis_standalone_test.go` | Unit | -| FR-2: Optional Persistence | `engine/infra/cache/snapshot_manager_test.go` | Unit | -| FR-3: Memory Store Compatibility | `engine/memory/store/redis_test.go` | Integration | -| FR-4: Resource Store Compatibility | `engine/resources/redis_store_test.go` | Integration | -| FR-5: Streaming Features | `test/integration/standalone/streaming_test.go` | Integration | -| FR-6: Configuration Management | `pkg/config/resolver_test.go` | Unit | -| Mode Resolution Logic | `pkg/config/resolver_test.go` | Unit | -| Factory Pattern | `engine/infra/cache/mod_test.go` | Unit | -| End-to-End Workflow | `test/integration/standalone/workflow_test.go` | Integration | - -## Unit Tests - -### pkg/config/resolver_test.go (NEW) -**Purpose**: Test mode resolution logic and helper methods - -- Should return component mode when explicitly set -- Should return global mode when component mode is empty -- Should return "distributed" default when both are empty -- Should normalize "distributed" to "remote" for Temporal -- Should validate mode values against allowed enums -- Should handle mixed mode configurations correctly -- Should resolve effective modes for all components (Redis, Temporal, MCPProxy) - -**Test Structure**: -```go -func TestResolveMode(t *testing.T) { - t.Run("Should return component mode when explicitly set", func(t *testing.T) { - cfg := &Config{ - Mode: "standalone", - Redis: RedisConfig{Mode: "distributed"}, - } - result := cfg.EffectiveRedisMode() - assert.Equal(t, "distributed", result) - }) - - t.Run("Should inherit from global mode", func(t *testing.T) { - cfg := &Config{ - Mode: "standalone", - Redis: RedisConfig{Mode: ""}, - } - result := cfg.EffectiveRedisMode() - assert.Equal(t, "standalone", result) - }) - - t.Run("Should default to distributed", func(t *testing.T) { - cfg := &Config{ - Mode: "", - Redis: RedisConfig{Mode: ""}, - } - result := cfg.EffectiveRedisMode() - assert.Equal(t, "distributed", result) - }) -} - -func TestEffectiveTemporalMode(t *testing.T) { - t.Run("Should normalize distributed to remote for Temporal", func(t *testing.T) { - cfg := &Config{Mode: "distributed"} - result := cfg.EffectiveTemporalMode() - assert.Equal(t, "remote", result) - }) -} -``` - -### engine/infra/cache/miniredis_standalone_test.go (NEW) -**Purpose**: Test MiniredisStandalone lifecycle and operations - -- Should start embedded Redis server successfully -- Should create working go-redis client connected to miniredis -- Should handle startup errors gracefully -- Should close cleanly without errors -- Should support all Redis operations (Get, Set, Eval, TxPipeline) -- Should initialize snapshot manager when persistence enabled -- Should skip snapshot manager when persistence disabled -- Should restore snapshot on startup when configured -- Should snapshot on shutdown when configured - -**Test Structure**: -```go -func TestMiniredisStandalone_Lifecycle(t *testing.T) { - t.Run("Should start and stop embedded Redis server", func(t *testing.T) { - ctx := t.Context() - // Setup test config with persistence disabled - cfg := testConfig(false) - ctx = config.ContextWithManager(ctx, cfg) - - mr, err := NewMiniredisStandalone(ctx) - require.NoError(t, err) - defer mr.Close(ctx) - - // Verify client works - err = mr.Client().Ping(ctx).Err() - assert.NoError(t, err) - }) -} - -func TestMiniredisStandalone_Operations(t *testing.T) { - t.Run("Should support basic Redis operations", func(t *testing.T) { - ctx := t.Context() - mr := setupMiniredis(ctx, t) - defer mr.Close(ctx) - - // Test Set/Get - err := mr.Client().Set(ctx, "key", "value", 0).Err() - require.NoError(t, err) - - val, err := mr.Client().Get(ctx, "key").Result() - require.NoError(t, err) - assert.Equal(t, "value", val) - }) - - t.Run("Should support Lua scripts", func(t *testing.T) { - // Test Eval operation - }) - - t.Run("Should support TxPipeline", func(t *testing.T) { - // Test transaction pipeline - }) -} -``` - -### engine/infra/cache/snapshot_manager_test.go (NEW) -**Purpose**: Test BadgerDB snapshot and restore operations - -- Should create snapshots of miniredis state -- Should restore snapshots to miniredis -- Should handle snapshot failures gracefully -- Should run periodic snapshots at configured interval -- Should stop periodic snapshots on manager close -- Should create snapshot directory if missing -- Should handle corrupt snapshots gracefully -- Should track snapshot metrics (size, duration, count) - -**Test Structure**: -```go -func TestSnapshotManager_Lifecycle(t *testing.T) { - t.Run("Should snapshot and restore miniredis state", func(t *testing.T) { - ctx := t.Context() - tempDir := t.TempDir() - - // Create miniredis with data - mr := setupMiniredisWithData(ctx, t) - - // Create snapshot manager - sm, err := NewSnapshotManager(ctx, mr, persistenceConfig(tempDir)) - require.NoError(t, err) - defer sm.Close() - - // Take snapshot - err = sm.Snapshot(ctx) - require.NoError(t, err) - - // Create new miniredis - mr2 := setupMiniredis(ctx, t) - defer mr2.Close(ctx) - - // Restore snapshot - sm2, _ := NewSnapshotManager(ctx, mr2, persistenceConfig(tempDir)) - err = sm2.Restore(ctx) - require.NoError(t, err) - - // Verify data restored - verifyDataRestored(ctx, t, mr2) - }) -} - -func TestSnapshotManager_Periodic(t *testing.T) { - t.Run("Should take periodic snapshots", func(t *testing.T) { - // Test with short interval (1s for testing) - // Verify multiple snapshots created - }) - - t.Run("Should stop periodic snapshots on close", func(t *testing.T) { - // Test cleanup - }) -} -``` - -### engine/infra/cache/mod_test.go (UPDATE) -**Purpose**: Test mode-aware factory pattern - -- Should create external Redis client when mode is distributed -- Should create miniredis client when mode is standalone -- Should respect mode resolution from config -- Should initialize snapshot manager for standalone with persistence -- Should skip snapshot manager for standalone without persistence -- Should return proper cleanup functions -- Should handle startup errors for both modes - -**Test Structure**: -```go -func TestSetupCache_ModeAware(t *testing.T) { - t.Run("Should create external Redis in distributed mode", func(t *testing.T) { - ctx := t.Context() - cfg := configWithMode("distributed") - ctx = config.ContextWithManager(ctx, cfg) - - cache, cleanup, err := SetupCache(ctx) - require.NoError(t, err) - defer cleanup() - - assert.NotNil(t, cache) - // Verify it's external Redis - }) - - t.Run("Should create miniredis in standalone mode", func(t *testing.T) { - ctx := t.Context() - cfg := configWithMode("standalone") - ctx = config.ContextWithManager(ctx, cfg) - - cache, cleanup, err := SetupCache(ctx) - require.NoError(t, err) - defer cleanup() - - assert.NotNil(t, cache) - // Verify it's miniredis - }) -} -``` - -### pkg/config/loader_test.go (UPDATE) -**Purpose**: Test configuration validation for modes - -- Should validate global mode field (standalone | distributed) -- Should validate component mode fields -- Should reject invalid mode values -- Should allow empty mode values (inheritance) -- Should validate Redis persistence configuration -- Should validate mode-specific requirements (e.g., MCPProxy port in standalone) - -## Integration Tests - -### test/integration/standalone/workflow_test.go (NEW) -**Purpose**: End-to-end workflow execution in standalone mode - -- Should execute complete workflow with agent, tasks, and tools -- Should persist conversation history across workflow steps -- Should handle workflow state correctly -- Should execute multiple workflows concurrently -- Should handle workflow errors and retries - -**Test Structure**: -```go -func TestStandaloneWorkflow_EndToEnd(t *testing.T) { - t.Run("Should execute workflow in standalone mode", func(t *testing.T) { - ctx := t.Context() - - // Setup test environment with standalone config - env := setupStandaloneTestEnv(ctx, t) - defer env.Cleanup() - - // Load and execute workflow - result, err := env.ExecuteWorkflow(ctx, "test-workflow") - require.NoError(t, err) - assert.NotNil(t, result) - - // Verify workflow completed successfully - assert.Equal(t, "completed", result.Status) - }) -} -``` - -### test/integration/standalone/memory_store_test.go (NEW) -**Purpose**: Verify memory store compatibility with miniredis - -- Should append messages to conversation history -- Should trim conversation history at max length -- Should preserve message metadata -- Should handle concurrent message appends -- Should execute Lua scripts (AppendAndTrimWithMetadataScript) correctly -- Should maintain consistency across operations - -**Test Structure**: -```go -func TestMemoryStore_MiniredisCompatibility(t *testing.T) { - t.Run("Should execute Lua scripts natively", func(t *testing.T) { - ctx := t.Context() - store := setupMemoryStoreWithMiniredis(ctx, t) - - // Test AppendAndTrimWithMetadataScript - err := store.AppendMessage(ctx, agentID, message) - require.NoError(t, err) - - // Verify message stored with metadata - messages, err := store.GetMessages(ctx, agentID) - require.NoError(t, err) - assert.Len(t, messages, 1) - }) -} -``` - -### test/integration/standalone/resource_store_test.go (NEW) -**Purpose**: Verify resource store compatibility with miniredis - -- Should store and retrieve resources atomically -- Should handle optimistic locking (PutIfMatch) via Lua scripts -- Should support TxPipeline for multi-key operations -- Should publish watch notifications via Pub/Sub -- Should maintain ETags correctly -- Should handle concurrent resource updates - -**Test Structure**: -```go -func TestResourceStore_MiniredisCompatibility(t *testing.T) { - t.Run("Should support TxPipeline operations", func(t *testing.T) { - ctx := t.Context() - store := setupResourceStoreWithMiniredis(ctx, t) - - // Test atomic multi-key operation - err := store.PutWithETag(ctx, resource) - require.NoError(t, err) - - // Verify ETag stored atomically - retrieved, err := store.Get(ctx, resource.ID) - require.NoError(t, err) - assert.Equal(t, resource.ETag, retrieved.ETag) - }) - - t.Run("Should publish watch notifications", func(t *testing.T) { - // Test Pub/Sub notifications - }) -} -``` - -### test/integration/standalone/streaming_test.go (NEW) -**Purpose**: Verify streaming and Pub/Sub functionality - -- Should publish task events via Redis Pub/Sub -- Should subscribe to workflow events -- Should support pattern subscriptions -- Should handle multiple subscribers -- Should deliver events reliably - -**Test Structure**: -```go -func TestStreaming_MiniredisCompatibility(t *testing.T) { - t.Run("Should publish and subscribe to events", func(t *testing.T) { - ctx := t.Context() - publisher := setupPublisherWithMiniredis(ctx, t) - subscriber := setupSubscriberWithMiniredis(ctx, t) - - // Subscribe to channel - events := make(chan Event, 10) - err := subscriber.Subscribe(ctx, "workflow:*", events) - require.NoError(t, err) - - // Publish event - err = publisher.Publish(ctx, "workflow:123", testEvent) - require.NoError(t, err) - - // Verify event received - select { - case evt := <-events: - assert.Equal(t, testEvent, evt) - case <-time.After(5 * time.Second): - t.Fatal("Event not received") - } - }) -} -``` - -### test/integration/standalone/persistence_test.go (NEW) -**Purpose**: Test snapshot persistence across restarts - -- Should persist data to BadgerDB snapshots -- Should restore data from snapshots on startup -- Should handle graceful shutdown snapshots -- Should handle periodic snapshots -- Should recover from snapshot failures - -**Test Structure**: -```go -func TestPersistence_SnapshotRestore(t *testing.T) { - t.Run("Should persist and restore data across restarts", func(t *testing.T) { - ctx := t.Context() - tempDir := t.TempDir() - - // Phase 1: Create data and snapshot - { - env := setupStandaloneWithPersistence(ctx, t, tempDir) - - // Store data - storeTestData(ctx, t, env) - - // Trigger snapshot - env.TriggerSnapshot(ctx) - - // Clean shutdown - env.Shutdown(ctx) - } - - // Phase 2: Restore and verify - { - env := setupStandaloneWithPersistence(ctx, t, tempDir) - defer env.Shutdown(ctx) - - // Verify data restored - verifyTestDataRestored(ctx, t, env) - } - }) -} -``` - -### test/integration/standalone/mode_switching_test.go (NEW) -**Purpose**: Test switching between modes (config-only, no data migration) - -- Should start in standalone mode -- Should start in distributed mode -- Should handle invalid mode configurations -- Should respect mode overrides - -## Fixtures & Testdata - -Add fixtures under `test/fixtures/standalone/`: - -- `minimal-config.yaml` - Minimal standalone configuration -- `with-persistence-config.yaml` - Standalone with persistence -- `mixed-mode-config.yaml` - Mixed mode configuration -- `workflows/test-workflow.yaml` - Sample workflow for integration tests -- `workflows/stateful-workflow.yaml` - Workflow with memory usage - -## Mocks & Stubs - -**No mocks needed for internal components** - use real implementations: -- Use real miniredis (in-memory, fast) -- Use real BadgerDB with temp directories -- Use real memory/resource stores - -**Mock external services only**: -- LLM providers (use test providers from `test/helpers/`) -- External APIs called by tools -- External MCP servers (if testing MCP integration) - -## Contract Tests - -### Cache Adapter Contract Tests -Location: `test/integration/cache/adapter_contract_test.go` (UPDATE) - -Add test cases for miniredis adapter: -- Should satisfy cache.RedisInterface contract -- Should support all 48 interface methods -- Should behave identically to external Redis adapter -- Should handle error cases consistently - -**Test Structure**: -```go -// Run the same test suite against both Redis and miniredis -func TestCacheAdapter_Contract(t *testing.T) { - adapters := []struct { - name string - setup func(t *testing.T) cache.RedisInterface - }{ - {"ExternalRedis", setupExternalRedis}, - {"Miniredis", setupMiniredis}, - } - - for _, adapter := range adapters { - t.Run(adapter.name, func(t *testing.T) { - client := adapter.setup(t) - - // Run same tests against both - testBasicOperations(t, client) - testLuaScripts(t, client) - testTxPipeline(t, client) - testPubSub(t, client) - }) - } -} -``` - -## Observability Assertions - -### Metrics Presence Tests -Location: `engine/infra/cache/metrics_test.go` (NEW) - -- Should increment cache operation counters (by backend and operation) -- Should record operation duration histograms -- Should track snapshot duration and size metrics -- Should label metrics with correct backend ("miniredis" vs "redis") - -### Log Output Tests -Location: Integration tests - -- Should log "Started embedded Redis server" with address -- Should log "Initializing persistence layer" when enabled -- Should log "Taking periodic snapshot" at intervals -- Should log "Taking final snapshot before shutdown" -- Should log errors with proper context - -### Trace Span Tests (if applicable) -- Should create spans for cache operations -- Should propagate context through cache calls -- Should record span attributes (backend, operation, keys) - -## Performance & Limits - -### Performance Tests -Location: `test/integration/standalone/performance_test.go` (NEW) - -- Should handle 100+ ops/sec in standalone mode -- Should complete workflow within 1.5x of Redis time -- Should use <512MB memory for typical workload -- Should complete snapshots within 5 seconds (warn threshold) - -**Test Structure**: -```go -func TestPerformance_Standalone(t *testing.T) { - if testing.Short() { - t.Skip("Skipping performance test in short mode") - } - - t.Run("Should handle 100 ops/sec", func(t *testing.T) { - ctx := t.Context() - env := setupStandaloneTestEnv(ctx, t) - defer env.Cleanup() - - // Run 1000 operations - start := time.Now() - for i := 0; i < 1000; i++ { - env.Cache.Set(ctx, fmt.Sprintf("key%d", i), "value", 0) - } - duration := time.Since(start) - - opsPerSec := 1000 / duration.Seconds() - assert.GreaterOrEqual(t, opsPerSec, 100.0) - }) -} -``` - -### Memory Limits Tests -- Should not exceed configured memory limits -- Should enforce TTLs correctly -- Should clean up expired keys - -## CLI Tests (Goldens) - -### Config Commands -Location: `cli/cmd/config/config_test.go` (UPDATE) - -- Should show mode field in `compozy config show` -- Should validate mode configurations in `compozy config validate` -- Should display mode resolution in `compozy config diagnostics` - -**Golden Files**: -- `testdata/config-show-standalone.golden` - Expected output for standalone config -- `testdata/config-show-mixed.golden` - Expected output for mixed mode config - -### Start Command -Location: `cli/cmd/start/start_test.go` (UPDATE) - -- Should accept `--mode standalone` flag -- Should accept `--mode distributed` flag -- Should prioritize config file over CLI flags -- Should show mode in startup logs - -## Exit Criteria - -- [ ] All unit tests exist and pass (`make test`) -- [ ] All integration tests exist and pass (`make test-all`) -- [ ] Contract tests verify miniredis behaves identically to Redis -- [ ] Performance tests validate NFRs (100 ops/sec, <1.5x latency) -- [ ] Memory tests verify <512MB footprint -- [ ] Metrics, logs, and traces are properly emitted -- [ ] CLI tests with goldens are updated -- [ ] All tests use `t.Context()` (no `context.Background()`) -- [ ] All tests follow `t.Run("Should ...")` naming convention -- [ ] Test coverage >80% for new code -- [ ] CI pipeline updated to run standalone integration tests -- [ ] Flaky tests identified and fixed -- [ ] All tests are deterministic and parallelizable where safe - -## CI/CD Integration - -Update `.github/workflows/test.yml`: - -```yaml -- name: Run Standalone Integration Tests - run: | - go test -v -race -tags=integration ./test/integration/standalone/... - env: - POSTGRES_HOST: localhost - POSTGRES_PORT: 5432 -``` - -No Docker Compose needed for standalone tests (uses miniredis in-memory). - -## Test Environment Helpers - -Create `test/helpers/standalone.go` (NEW): - -```go -// SetupStandaloneTestEnv creates a complete test environment in standalone mode -func SetupStandaloneTestEnv(ctx context.Context, t *testing.T) *TestEnv { - // Setup config with mode: standalone - // Initialize cache with miniredis - // Setup database (testcontainers or in-memory) - // Return configured environment -} - -// SetupMiniredisWithData creates miniredis pre-populated with test data -func SetupMiniredisWithData(ctx context.Context, t *testing.T) *MiniredisStandalone { - // Create miniredis - // Populate with test data - // Return configured miniredis -} -``` - -## Test Data Generators - -Create `test/fixtures/generators.go` (NEW): - -```go -// GenerateTestWorkflow creates a minimal test workflow -func GenerateTestWorkflow() *workflow.Config { ... } - -// GenerateTestMessages creates sample conversation messages -func GenerateTestMessages(count int) []*memory.Message { ... } - -// GenerateTestResources creates sample resources with ETags -func GenerateTestResources(count int) []*resources.Resource { ... } -``` - -## Test Cleanup - -All tests must use `t.Cleanup()` for resource cleanup: - -```go -func TestExample(t *testing.T) { - tempDir := t.TempDir() // Auto-cleanup - - mr, _ := NewMiniredisStandalone(ctx) - t.Cleanup(func() { - mr.Close(ctx) - }) - - // Test code -} -``` - -## Acceptance Criteria Summary - -- [ ] 100% of PRD acceptance criteria have corresponding tests -- [ ] All tests pass locally and in CI -- [ ] Test coverage >80% for `engine/infra/cache/` new code -- [ ] Mode resolution logic has 100% coverage -- [ ] MiniredisStandalone lifecycle fully tested -- [ ] SnapshotManager fully tested with edge cases -- [ ] Memory/resource store compatibility verified -- [ ] Streaming Pub/Sub compatibility verified -- [ ] End-to-end workflow execution tested -- [ ] Performance benchmarks meet NFRs -- [ ] CLI tests updated with mode flags -- [ ] Contract tests prove behavioral parity with Redis -- [ ] All tests follow project standards (naming, assertions, context usage) -- [ ] No flaky tests in standalone test suite - diff --git a/tasks/prd-tools/_task_1.md b/tasks/prd-tools/_task_1.md new file mode 100644 index 00000000..e6bafb07 --- /dev/null +++ b/tasks/prd-tools/_task_1.md @@ -0,0 +1,81 @@ +## markdown + +## status: completed # Options: pending, in-progress, completed, excluded + + +engine/tool +implementation +core_feature +high +http_server + + +# Task 1.0: Go-Native Tooling Infrastructure + +## Overview + +Introduce first-class Go-native tool support across the SDK and engine so handlers implemented in Go can register, resolve, and execute alongside existing runtime-based tools. + + +- **ALWAYS READ** @.cursor/rules/critical-validation.mdc before start +- **ALWAYS READ** the technicals docs from this PRD before start +- **YOU SHOULD ALWAYS** have in mind that this should be done in a greenfield approach, we don't need to care about backwards compatibility since the project is in alpha, and support old and new stuff just introduces more complexity in the project; never sacrifice quality because of backwards compatibility + + + +# When you need information about a library or external API: +- use perplexity and context7 to find out how to properly fix/resolve this +- when using perplexity mcp, you can pass a prompt to the query param with more description about what you want to know, you don't need to pass a query-style search phrase, the same for the topic param of context7 +- for context7 to use the mcp is two steps, one you will find out the library id and them you will check what you want + + + +- Add `Implementation` (or equivalent) field to `engine/tool.Config` with validation and serialization +- Provide SDK ergonomics for registering Go handlers (`tool.WithNativeHandler`, runtime acceptance of `"go"`) +- Build a native handler registry that maps tool IDs to in-process handlers with concurrency safety +- Update LLM service/registry to execute native handlers with schema validation, panic recovery, and context-first logging +- Preserve existing cp__ builtin registration and resource-store persistence semantics + + +## Subtasks + +- [x] 1.1 Extend tool configuration model and SDK constructors for native mode +- [x] 1.2 Implement native handler registry and integrate with LLM tool registry adapters +- [x] 1.3 Add regression coverage (unit + integration) ensuring native tools execute and surface errors + +## Implementation Details (**FOR LLM READING THIS: KEEP THIS BRIEFLY AND HIGH-LEVEL, THE IMPLEMENTATION ALREADY EXIST IN THE TECHSPEC**) + +Focus on tech spec §5.1 (Go-Native Tool Execution Path) and §6 (Data Model & API Adjustments). Ensure adapters in `engine/llm/service.go` branch on implementation type and wrap handler invocation with input/output schema checks. + +### Relevant Files + +- `engine/tool/config.go` +- `engine/tool/nativeuser/registry.go` (new) +- `sdk/tool/constructor.go` +- `engine/llm/service.go` +- `engine/llm/tool_registry.go` + +### Dependent Files + +- `engine/tool/router/*` +- `sdk/compozy/engine_registration.go` + +## Deliverables + +- Updated tool config schema and generated Go SDK options for native handlers +- Native handler registry package with documentation and tests +- Modified LLM registration pipeline executing Go handlers with telemetry +- Passing unit/integration tests covering native execution path + +## Tests + +- Unit tests mapped from `_tests.md` for this feature: + - [x] Native registry concurrency + registration failures + - [x] Native adapter input/output validation and panic recovery + - [x] SDK constructor enforcing native handler requirements + +## Success Criteria + +- Go-native tools execute via engine without relying on Bun runtime +- All new and affected tests (`go test ./...` scope) pass alongside `make lint` +- Tool resolver returns consistent configs for both native and runtime tools diff --git a/tasks/prd-tools/_task_2.md b/tasks/prd-tools/_task_2.md new file mode 100644 index 00000000..d69c203c --- /dev/null +++ b/tasks/prd-tools/_task_2.md @@ -0,0 +1,81 @@ +## markdown + +## status: completed # Options: pending, in-progress, completed, excluded + + +engine/runtime +implementation +core_feature +high +http_server + + +# Task 2.0: Inline TypeScript Execution Pipeline + +## Overview + +Materialize `tool.Config.Code` into the runtime by generating inline tool modules, composing an entrypoint, and wiring the Bun manager to consume the generated output automatically. + + +- **ALWAYS READ** @.cursor/rules/critical-validation.mdc before start +- **ALWAYS READ** the technicals docs from this PRD before start +- **YOU SHOULD ALWAYS** have in mind that this should be done in a greenfield approach, we don't need to care about backwards compatibility since the project is in alpha, and support old and new stuff just introduces more complexity in the project; never sacrifice quality because of backwards compatibility + + + +# When you need information about a library or external API: +- use perplexity and context7 to find out how to properly fix/resolve this +- when using perplexity mcp, you can pass a prompt to the query param with more description about what you want to know, you don't need to pass a query-style search phrase, the same for the topic param of context7 +- for context7 to use the mcp is two steps, one you will find out the library id and them you will check what you want + + + +- Build inline code manager that syncs `tool.Config.Code` to `.compozy/runtime/inline/` using atomic writes +- Generate composite entrypoint merging user-provided exports with inline modules +- Subscribe to tool resource changes (store watcher) to trigger regeneration +- Update runtime configuration to point Bun manager at generated entrypoint when inline tools exist +- Preserve security constraints (permissions, path normalization) defined in tech spec §5.2 + + +## Subtasks + +- [x] 2.1 Implement inline manager (file emission, entrypoint template, watcher integration) +- [x] 2.2 Update engine bootstrap + runtime wiring to activate inline manager and adjust Bun manager behaviour +- [x] 2.3 Add automated tests validating code generation, regeneration on updates, and Bun execution success + +## Implementation Details (**FOR LLM READING THIS: KEEP THIS BRIEFLY AND HIGH-LEVEL, THE IMPLEMENTATION ALREADY EXIST IN THE TECHSPEC**) + +Follow tech spec §5.2 (Inline TypeScript Code Integration) and §5.3 (Combined Execution Flow). Ensure generated entrypoint imports user entrypoint when configured and overlays inline exports without mutating existing runtime behaviour. + +### Relevant Files + +- `engine/tool/inline/manager.go` (new package) +- `engine/runtime/bun_manager.go` +- `engine/runtime/bun/worker.tpl.ts` +- `sdk/compozy/engine.go` + +### Dependent Files + +- `engine/resources/store.go` +- `engine/tool/router/*` +- `sdk/examples/05_runtime_native_tools.go` + +## Deliverables + +- Inline manager module with documented API and internal tests +- Updated Bun runtime configuration writing composite entrypoint automatically +- Regenerated or updated examples demonstrating inline execution without manual entrypoint +- Passing unit/integration tests including runtime execution of inline tools + +## Tests + +- Unit tests mapped from `_tests.md` for this feature: + - [x] Inline manager emits deterministic files and handles concurrent updates + - [x] Resource watcher triggers regeneration on PUT/DELETE + - [x] Bun runtime executes generated entrypoint successfully + +## Success Criteria + +- Inline TypeScript tools execute end-to-end using generated entrypoint +- Regeneration occurs automatically when tool configs change +- `make lint` and scoped integration tests covering runtime path pass without flakiness diff --git a/tasks/prd-tools/_task_3.md b/tasks/prd-tools/_task_3.md new file mode 100644 index 00000000..22e880bf --- /dev/null +++ b/tasks/prd-tools/_task_3.md @@ -0,0 +1,79 @@ +## markdown + +## status: pending # Options: pending, in-progress, completed, excluded + + +sdk/tool +testing +configuration +medium +http_server + + +# Task 3.0: Validation, Testing, and Documentation Hardening + +## Overview + +Consolidate end-to-end coverage, ensure validation rules are enforced across APIs, and update documentation/examples to reflect Go-native and inline tooling capabilities. + + +- **ALWAYS READ** @.cursor/rules/critical-validation.mdc before start +- **ALWAYS READ** the technicals docs from this PRD before start +- **YOU SHOULD ALWAYS** have in mind that this should be done in a greenfield approach, we don't need to care about backwards compatibility since the project is in alpha, and support old and new stuff just introduces more complexity in the project; never sacrifice quality because of backwards compatibility + + + +# When you need information about a library or external API: +- use perplexity and context7 to find out how to properly fix/resolve this +- when using perplexity mcp, you can pass a prompt to the query param with more description about what you want to know, you don't need to pass a query-style search phrase, the same for the topic param of context7 +- for context7 to use the mcp is two steps, one you will find out the library id and them you will check what you want + + + +- Add REST-layer validation preventing native implementation payloads without registered handlers +- Expand SDK + runtime documentation to describe new tooling modes and usage patterns +- Update examples to demonstrate hybrid (Go + inline TS) workflow and ensure sample compiles +- Provide regression tests covering API contracts, end-to-end workflow execution, and lint/test automation + + +## Subtasks + +- [ ] 3.1 Harden validation paths (tool router/usecases, SDK guardrails, telemetry) +- [ ] 3.2 Update docs/examples (`sdk/tool/README.md`, `sdk/examples/05_runtime_native_tools.go`, site docs) +- [ ] 3.3 Add end-to-end regression suite exercising hybrid tooling and CI commands + +## Implementation Details (**FOR LLM READING THIS: KEEP THIS BRIEFLY AND HIGH-LEVEL, THE IMPLEMENTATION ALREADY EXIST IN THE TECHSPEC**) + +Reference tech spec §6 (Data Model & API Adjustments), §7.5 (Validation & Tooling), and §8 (Testing & Validation Strategy). Focus on ensuring user-facing surfaces communicate capabilities and automated tests guard regressions. + +### Relevant Files + +- `engine/tool/router/*` +- `sdk/tool/README.md` +- `docs/content/docs/core/tools/runtime-environment.mdx` +- `sdk/examples/05_runtime_native_tools.go` +- `Makefile`/CI scripts if adjustments are required + +### Dependent Files + +- `engine/tool/uc/*` +- `pkg/config/native_tools.go` + +## Deliverables + +- Validation logic and tests preventing inconsistent tool definitions through REST/SDK +- Updated documentation and examples reflecting new tooling workflows +- Expanded automated test suite (unit/integration/end-to-end) covering hybrid scenarios + +## Tests + +- Unit tests mapped from `_tests.md` for this feature: + - [ ] Router/usecase validation rejecting invalid implementation states + - [ ] Example workflow execution test combining Go and inline tools + - [ ] Documentation snippet tests or doctests (if applicable) + +## Success Criteria + +- Users can follow docs to create Go-native and inline tools without ambiguity +- Regression suite ensures mixed tooling workflows remain functional +- `make lint` and `make test` pass with new coverage included in CI diff --git a/tasks/prd-tools/_tasks.md b/tasks/prd-tools/_tasks.md new file mode 100644 index 00000000..e5a7b129 --- /dev/null +++ b/tasks/prd-tools/_tasks.md @@ -0,0 +1,61 @@ +# [Feature] Tooling Refactor Implementation Task Summary + +## Relevant Files + +### Core Implementation Files + +- `engine/tool/config.go` - Extend tool configuration model with implementation modes and validation +- `engine/llm/service.go` - Register native Go tools and runtime-backed adapters +- `engine/llm/tool_registry.go` - Add adapters for native handlers and maintain execution flow +- `engine/runtime/bun_manager.go` - Wire generated entrypoint for inline code execution +- `sdk/tool/constructor.go` - Expose SDK options for native handler registration and inline code + +### Integration Points + +- `sdk/compozy/engine.go` - Initialize inline manager and attach to engine lifecycle +- `engine/resources/store.go` - Ensure tool watcher hooks subscribe for inline sync +- `engine/tool/router` - Validate REST payloads against new implementation field + +### Documentation Files + +- `sdk/tool/README.md` - Document Go-native tooling usage +- `docs/content/docs/core/tools/runtime-environment.mdx` - Update runtime tooling guidance + +### Examples (if applicable) + +- `sdk/examples/05_runtime_native_tools.go` - Expand to cover Go-native and inline TypeScript coexistence + +## Tasks + +- [x] 1.0 Go-Native Tooling Infrastructure (L) +- [x] 2.0 Inline TypeScript Execution Pipeline (L) +- [ ] 3.0 Validation, Testing, and Documentation Hardening (M) + +Notes on sizing: + +- S = Small (≤ half-day) +- M = Medium (1–2 days) +- L = Large (3+ days) + +## Task Design Rules + +- Each parent task is a closed deliverable: independently shippable and reviewable +- Do not split one deliverable across multiple parent tasks; avoid cross-task coupling +- Each parent task must include unit test subtasks derived from `_tests.md` for this feature +- Each generated `/_task_.md` must contain explicit Deliverables and Tests sections + +## Execution Plan + +- Critical Path: 1.0 → 2.0 → 3.0 +- Parallel Track A (after 2.0): Targeted docs/SDK polish from 3.0 may begin once inline pipeline stabilizes +- Parallel Track B: None identified + +Notes + +- All runtime code MUST use `logger.FromContext(ctx)` and `config.FromContext(ctx)` +- Run `make fmt && make lint && make test` before marking any task as completed + +## Batch Plan (Grouped Commits) + +- [x] Batch 1 — Native Foundations: 1.0 +- [ ] Batch 2 — Runtime & Quality: 2.0, 3.0 diff --git a/tasks/prd-tools/_techspec.md b/tasks/prd-tools/_techspec.md new file mode 100644 index 00000000..f3724919 --- /dev/null +++ b/tasks/prd-tools/_techspec.md @@ -0,0 +1,243 @@ +# Compozy Tooling Refactor: Architecture, Issues, and Upgrade Plan + +## 1. Purpose and Scope +- Enable **Go-native tools** to coexist with the current TypeScript/Bun runtime so SDK users can author tools directly in Go. +- Integrate the `tool.Config.Code` field into the execution pipeline so inline TypeScript code is runnable without manual entrypoint stitching. +- Stay aligned with project rules (context-first logger/config, function size limits, no backwards-compat constraints) and Go 1.25+ features. + +Deliverable: A clear blueprint covering current architecture, gaps, design decisions, validation strategy, and phased plan for implementation. + +## 2. Current Architecture Snapshot + +``` +Go SDK (tool.New) ──► engine/tool.Config ──► resources.Store ──► appstate + │ + ▼ + LLM Service + │ + ▼ + ┌────────── Tool Registry (engine/llm/tool_registry.go:538) ─────────┐ + │ │ + native.Definitions() (Go built-ins) runtimeAdapter.ExecuteTool() + │ │ + builtin.Handler (cp__ tools) runtime.Runtime (Bun) executes entrypoint +``` + +Key components and references: +- **SDK constructor** trims and validates runtime/code but only accepts `"bun"` (`sdk/tool/constructor.go:15-118`). +- **Tool configs** store metadata plus `Runtime`, `Code`, schemas (`engine/tool/config.go:35-358`). +- **Registration** persists tool configs via `compozy.Engine` into the resource store (`sdk/compozy/engine_registration.go:111-157`). +- **Execution pipeline**: + - `engine/llm/service.go:118-189` registers cp__ built-ins then wraps `tool.Config` as `localToolAdapter`. + - `localToolAdapter` delegates to `runtimeAdapter.ExecuteTool`, which invokes `runtime.Runtime.ExecuteTool` (`engine/llm/tool_registry.go:538-576`). + - Bun worker (`engine/runtime/bun/worker.tpl.ts`) loads exports from a project entrypoint configured through `runtime.Config.EntrypointPath` (`engine/runtime/bun_manager.go:700-747`). +- **Built-in Go tooling** (cp__ prefixed) is managed through `native.Definitions` + `builtin.RegisterBuiltins` (`engine/tool/native/catalog.go`, `engine/tool/builtin/definition.go`). + +### Observations +- Go-only tooling is restricted to hard-coded cp__ built-ins; user-defined Go handlers are absent. +- Inline TypeScript strings validated at SDK level never reach the runtime; no pipeline writes or evaluates `Code`. +- Runtime assumes an existing entrypoint file, typically crafted manually or via template scaffolding. + +## 3. Problem Statement +1. **Gap:** No path for programmatic Go tools in SDK (`Runtime=go` unsupported, handlers cannot be registered). +2. **Gap:** `tool.Config.Code` is validated (`sdk/tool/constructor.go:103-111`) but ignored downstream; examples such as `sdk/examples/05_runtime_native_tools.go` rely on this placeholder. +3. **Operational friction:** Maintaining manual entrypoint files for inline code conflicts with SDK ergonomics. +4. **Schema + validation:** Input/output schemas exist but are enforced only inside runtime worker; Go handlers don’t benefit from the same pipeline today. + +## 4. Requirements + +### Functional +1. Allow SDK consumers to register Go-native tools with: + - Context-first handler signature (`context.Context`, input map, config map). + - Optional schemas and metadata reused by LLM function calling. +2. Inline TypeScript (`tool.Config.Code`) must execute without manual entrypoint authoring. +3. Agents must mix Go-native and TypeScript tools transparently. +4. Tool definitions still persist to the resource store, but non-serialisable handler state must be reattached programmatically per process. + +### Non-Functional +1. Preserve context propagation rules (`logger.FromContext`, `config.FromContext`). +2. Keep functions under 50 lines, use Go 1.25+ idioms (e.g., `sync.WaitGroup.Go` where relevant). +3. No backwards-compatibility support required; we can break legacy patterns. +4. Maintain deterministic tool IDs and avoid cp__ collisions. +5. Resilient file generation (atomic writes into `.compozy`), safe for concurrent updates. + +## 5. Proposed Architecture + +### 5.1 Go-Native Tool Execution Path + +#### 5.1.1 New Concepts +- **Tool Mode:** Extend `tool.Config` with an enum-like field (e.g., `Implementation` or `Kind`) defaulting to `runtime`. `native` indicates Go handler. +- **Native Tool Registry:** New package, e.g., `engine/tool/nativeuser`, storing `Definition{ID, Description, InputSchema, OutputSchema, Handler}` registered at runtime. Backed by `sync.Map` + `Register/Lookup`. +- **Handler Signature:** `func(ctx context.Context, input map[string]any, cfg map[string]any) (map[string]any, error)`. Wrapper will convert to `core.Output`. + +#### 5.1.2 SDK Surface (example) +```go +handler := func(ctx context.Context, input map[string]any, cfg map[string]any) (map[string]any, error) { + log := logger.FromContext(ctx) + log.Info("native tool invoked", "tool", "weather-brief") + // … business logic … + return map[string]any{"summary": "sunny"}, nil +} + +cfg, err := tool.New( + ctx, + "weather-brief", + tool.WithName("Weather Brief"), + tool.WithDescription("Summarises weather from in-memory data"), + tool.WithNativeHandler(handler, tool.WithInputSchema(schema), tool.WithOutputSchema(schema)), +) +``` +- `WithNativeHandler` registers the handler inside the native registry and mutates the config to `Runtime="go"` (or `Implementation=native`). +- The handler is not serialised; registry must be repopulated each process start. + +#### 5.1.3 Engine Integration +- **Registration:** `compozy.Engine.registerTool` continues to persist config for ID tracking, but when `Implementation=native`, the resource store record omits runtime-only handler state. +- **Execution:** Update `registerRuntimeTools` (`engine/llm/service.go:170-189`) to branch: + - If `cfg.Runtime` empty or `"bun"` ⇒ existing `localToolAdapter`. + - If `cfg.Runtime`/`Implementation` == `"go"` ⇒ register `nativeToolAdapter`. +- **nativeToolAdapter (new):** + - Retrieves registered handler from `nativeuser.Lookup`. + - Runs validation using `cfg.ValidateInput` and `cfg.ValidateOutput`. + - Wraps handler panics to `core.NewError`. + - Provides metrics/logging parity with JS path. +- **Telemetry:** Extend `logNativeTools` to list user native tool IDs alongside cp__ built-ins. + +#### 5.1.4 Tool Resolution +- `ToolResolver` already clones tool configs; no change needed. Ensure deep copies do not lose Implementation flag. +- For REST-created tools, reject `Implementation=native` payloads with validation errors to avoid handler-less configs. + +#### 5.1.5 Concurrency & Safety +- Registry operations protected with `sync.RWMutex` or `sync.Once`. +- Optional `context.WithCancel` pipeline inside handler to manage timeouts (surfaced via config). + +### 5.2 Inline TypeScript Code Integration + +#### 5.2.1 Design Options Reviewed +| Option | Summary | Pros | Cons | +| --- | --- | --- | --- | +| A | Generate files under `.compozy/runtime/inline/` and build a composite entrypoint | Leverages existing Bun worker (no runtime protocol change), supports multiple tools | Requires file IO + watcher infrastructure | +| B | Send `Code` with each ExecuteTool request, evaluate dynamically in worker | No disk writes, immediate updates | Bun worker changes, runtime must compile TS per request, potential perf hit | +| C | Pre-bundle into shared library via CLI | Aligns with distribution flows | Requires new build step, complicates hot reload | + +**Chosen:** Option A (file generation + composite entrypoint) for predictable runtime and reuse of existing Bun executor. + +#### 5.2.2 Inline Tool Manager +- New component `engine/tool/inline` started during engine boot. +- Responsibilities: + 1. **Sync Files:** Iterate tool configs, for each with `Code` write `./.compozy/runtime/inline/.ts` (stable naming via slug/hash). Use atomic rename to avoid partial files. + 2. **Generate Composite Entrypoint:** Template merges user entrypoint (if configured) and inline exports: + ```ts + import * as userExports from "{{userEntrypointRel}}"; // optional + import tool_weather from "./inline/weather-brief.ts"; + export default { + ...(userExports.default ?? userExports), + "weather-brief": tool_weather, + }; + ``` + 3. **Watch Store:** Subscribe to `ResourceStore.Watch(project, ResourceTool)`; re-sync on PUT/DELETE. +- Update runtime wiring: if inline manager detects at least one inline tool, set `runtime.Config.EntrypointPath` to generated file. Preserve user-specified entrypoint (import + merge). +- Ensure `.compozy` directory uses permissions from `runtime.Config.WorkerFilePerm`. + +#### 5.2.3 Validation / Error Handling +- Invalid TypeScript should surface as compilation failure during Bun execution; propagate errors via existing `ToolExecutionError`. +- Add lint check in manager to ensure `Code` is non-empty (already enforced at SDK) and optionally run `bun build --check` during sync (guarded by config flag). + +### 5.3 Combined Execution Flow (after changes) + +``` +Agent call ─► ToolResolver ─► Tool Registry + │ + ├─ Native Tool Adapter ─► nativeuser.Handler (Go) + └─ Runtime Tool Adapter ─► Bun Runtime + │ + ├─ Generated inline entrypoint exports inline code + └─ User entrypoint exports legacy modules +``` + +## 6. Data Model & API Adjustments +| Area | Change | +| --- | --- | +| `engine/tool.Config` | Add `Implementation string` (json/yaml `implementation,omitempty`), default `runtime`. Keep `Runtime` for backwards compatibility but deprecate once implementation is present. | +| SDK | Add options: `WithNativeHandler`, `WithImplementation`, `WithEntrypointAlias` (optional for custom names). Runtime validation accepts `"bun"` or `"go"`. | +| REST API | Reject `implementation=native` to avoid handler-less configs; allow `code` updates. Document new field. | +| Runtime Config | Inline manager sets `EntrypointPath` to generated file when needed; keep user path in metadata. | + +## 7. Implementation Phases + +### Phase 1 – Discovery Hardening (already in progress) +- Finalise code references, confirm resource store behaviour (completed by this document). + +### Phase 2 – Foundations +1. Add `Implementation` field and update serializers (`engine/tool/config.go`, schema updates). +2. Implement native registry (`engine/tool/nativeuser/registry.go`) with unit tests. +3. Extend SDK options and ensure unit coverage (`sdk/tool/constructor_test.go`). + +### Phase 3 – Execution Adapters +1. Implement `nativeToolAdapter` and update `registerRuntimeTools`. +2. Update telemetry/logging and ensure cp__ built-ins unaffected (`engine/llm/service.go`, `engine/tool/builtin/registry.go`). +3. Add panic recovery + schema validation in adapter. + +### Phase 4 – Inline Code Manager +1. Introduce `inline.Manager` with sync + watcher logic (new package). +2. Update engine startup to instantiate manager once runtime + store available (probably in `sdk/compozy/engine.go` after store initialisation). +3. Modify runtime config builder to consume generated entrypoint. +4. Add integration tests verifying Bun worker loads generated file (`engine/runtime/bun_manager_test.go`). + +### Phase 5 – Validation & Tooling +1. Unit tests for native registry, inline manager, runtime adapters. +2. Integration test using SDK example: Go native tool + inline TS tool in same workflow. +3. Update docs (SDK README, examples) to showcase Go handler usage and inline code support. + +## 8. Testing & Validation Strategy +- **Unit**: native registry concurrency, inline manager file output, adapter behaviour with schema validation, error propagation. +- **Integration**: + - Run workflow executing Go tool and JS tool sequentially. + - Ensure `tool.Config.Code` is honoured by verifying generated file contents. + - Watcher test: Update tool via store stub, expect regenerated entrypoint. +- **End-to-end**: Expand `sdk/examples/05_runtime_native_tools.go` to include Go handler scenario and assert output via `gotestsum`. +- **Performance**: Benchmark inline manager sync on large tool sets; ensure Bun worker initialisation unaffected. +- **Lint/Test gates**: `make lint`, `make test` mandatory before completion per project rules. + +## 9. Risks & Mitigations +| Risk | Mitigation | +| --- | --- | +| Handler registry not populated before tool usage | Require `WithNativeHandler` to register during construction; add runtime check raising descriptive error if handler missing. | +| File system race during regeneration | Use atomic temp-file writes + `os.Rename`; guard sync with mutex. | +| User-provided entrypoint conflicts with generated exports | Merge order ensures inline exports override duplicates; document precedence. | +| REST clients attempt to create native tools | Validate in router usecases (e.g., `tooluc.Upsert`) and reject gracefully. | +| Context misuse inside handlers | Provide helper utilities enforcing `logger.FromContext`, `config.FromContext` usage and document expectation. | + +## 10. Open Questions +1. Should we allow YAML-defined inline code (from CLI) or keep SDK-only? Decision impacts security posture. +2. Do we need hot reload for inline code (e.g., `compozy dev` watching SDK files)? If so, integrate manager with existing dev loop. +3. Should Bun worker cache compiled inline modules across executions for performance? Evaluate after baseline. + +## 11. Relevant Files & References +- `sdk/tool/constructor.go` – runtime validation and code trimming logic. +- `sdk/examples/05_runtime_native_tools.go` – demonstrates placeholder inline code usage. +- `sdk/compozy/engine_registration.go` – tool persistence flow. +- `engine/tool/config.go` – current config schema and validation (no enforcement of runtime/code). +- `engine/llm/service.go` (`registerNativeBuiltins`, `registerRuntimeTools`) – tool registration pipeline. +- `engine/llm/tool_registry.go` (`localToolAdapter`) – runtime adapter logic. +. `engine/runtime/bun_manager.go` & `engine/runtime/bun/worker.tpl.ts` – Bun execution path, entrypoint expectations. +- `engine/tool/native/catalog.go` & `engine/tool/builtin/definition.go` – cp__ built-in infrastructure to emulate. +- `pkg/config/native_tools.go` – telemetry + config gating for native/built-in tooling. +- `engine/resources/store.go` – resource persistence contract (clarifies why handlers must be re-registered in-process). + +## 12. Sources Consulted +- `sdk/tool/constructor.go` +- `sdk/tool/options_generated.go` +- `sdk/examples/05_runtime_native_tools.go` +- `sdk/compozy/engine_registration.go` +- `engine/tool/config.go` +- `engine/tool/router/dto.go` +- `engine/tool/native/catalog.go` +- `engine/tool/builtin/definition.go` +- `engine/llm/service.go` +- `engine/llm/tool_registry.go` +- `engine/runtime/bun_manager.go` +- `engine/runtime/bun/worker.tpl.ts` +- `pkg/config/native_tools.go` +- `engine/resources/store.go` + +The above references are all within the current repository (`/Users/pedronauck/Dev/compozy/compozy`). diff --git a/test/integration/memory/token_counting_test.go b/test/integration/memory/token_counting_test.go index e56c5b01..81e4a6f6 100644 --- a/test/integration/memory/token_counting_test.go +++ b/test/integration/memory/token_counting_test.go @@ -168,6 +168,7 @@ func TestTokenCountingConsistency(t *testing.T) { err := instance.Append(ctx, msg) require.NoError(t, err) } + require.NoError(t, waitForTokenCountStabilization(ctx, instance, 0, 100*time.Millisecond)) // Get initial counts health1, err := instance.GetMemoryHealth(ctx) require.NoError(t, err) diff --git a/test/integration/temporal/standalone_test.go b/test/integration/temporal/standalone_test.go index 5320d16c..325edf4e 100644 --- a/test/integration/temporal/standalone_test.go +++ b/test/integration/temporal/standalone_test.go @@ -4,9 +4,11 @@ import ( "context" "encoding/json" "fmt" + "math/rand" "net" "os" "path/filepath" + "strconv" "strings" "testing" "time" @@ -29,6 +31,12 @@ const ( workflowTimeout = 30 * time.Second ) +func init() { + if os.Getenv("TEMPORAL_TEST_PORT_BASE") == "" { + _ = os.Setenv("TEMPORAL_TEST_PORT_BASE", "45000") + } +} + type workflowExecution struct { WorkflowID string RunID string @@ -124,13 +132,42 @@ func TestStandaloneWorkflowExecution(t *testing.T) { func startStandaloneServer(ctx context.Context, t *testing.T, cfg *embedded.Config) *embedded.Server { t.Helper() - server, err := embedded.NewServer(ctx, cfg) - require.NoError(t, err) - require.NoError(t, server.Start(ctx)) - t.Cleanup(func() { - stopTemporalServer(ctx, t, server) - }) - return server + var lastErr error + for attempts := 0; attempts < 5; attempts++ { + server, err := embedded.NewServer(ctx, cfg) + if isAddressInUseErr(err) { + cfg.FrontendPort = findAvailablePortRange(ctx, t, 4) + lastErr = err + continue + } + require.NoError(t, err) + startErr := server.Start(ctx) + if isAddressInUseErr(startErr) { + lastErr = startErr + stopCtx, cancel := context.WithTimeout(context.WithoutCancel(ctx), 5*time.Second) + server.Stop(stopCtx) // ignore error; attempting retry + cancel() + cfg.FrontendPort = findAvailablePortRange(ctx, t, 4) + continue + } + require.NoError(t, startErr) + t.Cleanup(func() { + stopTemporalServer(ctx, t, server) + }) + return server + } + if lastErr != nil { + require.FailNow(t, "failed to start embedded temporal server after retries", lastErr) + } + require.FailNow(t, "failed to start embedded temporal server after retries") + return nil +} + +func isAddressInUseErr(err error) bool { + if err == nil { + return false + } + return strings.Contains(err.Error(), "address already in use") } func stopTemporalServer(ctx context.Context, t *testing.T, server *embedded.Server) { @@ -264,37 +301,79 @@ func loadWorkflowInput(t *testing.T) workflowInput { func findAvailablePortRange(ctx context.Context, t *testing.T, size int) int { t.Helper() - for port := 15000; port < 25000; port++ { - if !portsAvailable(ctx, port, size) { - continue + start := temporalPortSearchStart() + deadline := time.Now().Add(5 * time.Second) + maxSequential := 60000 + maxPort := 65535 + + tryReserve := func(port int) bool { + if port <= 0 || port+size+1000 > maxPort { + return false } - // Ensure auxiliary port at +1000 offset is available for Temporal UI when enabled - if !portAvailable(ctx, port+1000) { - continue + if !reservePorts(ctx, port, size) { + return false + } + if !reservePorts(ctx, port+1000, 1) { + return false + } + return true + } + + // First attempt sequential scan to keep behavior deterministic. + for port := start; port < maxSequential; port++ { + if time.Now().After(deadline) { + break + } + if tryReserve(port) { + return port + } + } + + // Fallback to random probing before giving up. + rng := rand.New(rand.NewSource(time.Now().UnixNano())) + for attempts := 0; attempts < 200; attempts++ { + if time.Now().After(deadline) { + break + } + candidate := start + rng.Intn(maxPort-start-size-1000) + // Align candidate to avoid wrapping when checking contiguous ports. + candidate -= candidate % size + if tryReserve(candidate) { + return candidate } - return port } - t.Fatalf("no available port range found") + + t.Fatalf("no available port range found after retries (start=%d, size=%d)", start, size) return 0 } -func portsAvailable(ctx context.Context, start int, size int) bool { +func temporalPortSearchStart() int { + raw := strings.TrimSpace(os.Getenv("TEMPORAL_TEST_PORT_BASE")) + if raw == "" { + return 15000 + } + base, err := strconv.Atoi(raw) + if err != nil || base < 1024 || base > 60000 { + return 15000 + } + return base +} + +func reservePorts(ctx context.Context, start int, size int) bool { + listeners := make([]net.Listener, 0, size) for offset := 0; offset < size; offset++ { - if !portAvailable(ctx, start+offset) { + ln, err := (&net.ListenConfig{}).Listen(ctx, "tcp", fmt.Sprintf("127.0.0.1:%d", start+offset)) + if err != nil { + for _, listener := range listeners { + _ = listener.Close() + } return false } + listeners = append(listeners, ln) } - return true -} - -func portAvailable(ctx context.Context, port int) bool { - dialCtx, cancel := context.WithTimeout(ctx, 500*time.Millisecond) - ln, err := (&net.ListenConfig{}).Listen(dialCtx, "tcp", fmt.Sprintf("127.0.0.1:%d", port)) - cancel() - if err != nil { - return false + for _, listener := range listeners { + _ = listener.Close() } - _ = ln.Close() return true }