diff --git a/.github/workflows/cd.yml b/.github/workflows/cd.yml index 63a331f..1f6cba8 100644 --- a/.github/workflows/cd.yml +++ b/.github/workflows/cd.yml @@ -59,7 +59,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v5 with: - go-version: "1.24" + go-version: "1.25" cache: true - name: Build & Vet (Go) @@ -95,4 +95,4 @@ jobs: tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} cache-from: type=gha - cache-to: type=gha,mode=max \ No newline at end of file + cache-to: type=gha,mode=max diff --git a/.github/workflows/chaos.yml b/.github/workflows/chaos.yml new file mode 100644 index 0000000..08fec15 --- /dev/null +++ b/.github/workflows/chaos.yml @@ -0,0 +1,125 @@ +name: Chaos Tests + +on: + workflow_dispatch: + inputs: + experiment: + description: 'Experiment to run' + required: true + default: all + type: choice + options: + - all + - pod-kill + - kafka-consumer-pause + - redis-outage + - projection-lag + - network-partition + namespace: + description: 'Target namespace' + required: true + default: grainguard-dev + schedule: + # Run full suite every Saturday at 02:00 UTC (off-peak) + - cron: '0 2 * * 6' + +env: + NAMESPACE: ${{ github.event.inputs.namespace || 'grainguard-dev' }} + +jobs: + chaos: + name: Chaos — ${{ github.event.inputs.experiment || 'all' }} + runs-on: ubuntu-latest + timeout-minutes: 30 + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Configure kubectl + uses: azure/setup-kubectl@v3 + with: + version: 'v1.29.0' + + - name: Set kubeconfig + run: | + mkdir -p "$HOME/.kube" + echo "${{ secrets.KUBECONFIG_DEV }}" | base64 -d > "$HOME/.kube/config" + chmod 600 "$HOME/.kube/config" + + - name: Install Chaos Toolkit + run: | + pip install --quiet \ + chaostoolkit==1.19.0 \ + chaostoolkit-kubernetes==0.26.4 \ + chaostoolkit-verification==0.3.0 + + - name: Make scripts executable + run: chmod +x tests/chaos/*.sh + + - name: Run — all experiments + if: ${{ github.event.inputs.experiment == 'all' || github.event_name == 'schedule' }} + env: + NAMESPACE: ${{ env.NAMESPACE }} + KAFKA_BOOTSTRAP: kafka:9092 + GATEWAY_URL: ${{ secrets.CHAOS_GATEWAY_URL }} + PROMETHEUS_URL: ${{ secrets.CHAOS_PROMETHEUS_URL }} + TEST_JWT: ${{ secrets.CHAOS_TEST_JWT }} + run: bash tests/chaos/run-all.sh + + - name: Run — pod-kill + if: ${{ github.event.inputs.experiment == 'pod-kill' }} + env: + NAMESPACE: ${{ env.NAMESPACE }} + run: chaos run tests/chaos/pod-kill.yaml + + - name: Run — kafka-consumer-pause + if: ${{ github.event.inputs.experiment == 'kafka-consumer-pause' }} + env: + NAMESPACE: ${{ env.NAMESPACE }} + KAFKA_BOOTSTRAP: kafka:9092 + run: bash tests/chaos/kafka-consumer-pause.sh + + - name: Run — redis-outage + if: ${{ github.event.inputs.experiment == 'redis-outage' }} + env: + NAMESPACE: ${{ env.NAMESPACE }} + GATEWAY_URL: ${{ secrets.CHAOS_GATEWAY_URL }} + TEST_JWT: ${{ secrets.CHAOS_TEST_JWT }} + run: bash tests/chaos/redis-outage.sh + + - name: Run — projection-lag + if: ${{ github.event.inputs.experiment == 'projection-lag' }} + env: + NAMESPACE: ${{ env.NAMESPACE }} + KAFKA_BOOTSTRAP: kafka:9092 + PROMETHEUS_URL: ${{ secrets.CHAOS_PROMETHEUS_URL }} + STRICT_ALERT_CHECK: "1" + run: bash tests/chaos/projection-lag.sh + + - name: Run — network-partition + if: ${{ github.event.inputs.experiment == 'network-partition' }} + env: + NAMESPACE: ${{ env.NAMESPACE }} + run: chaos run tests/chaos/network-partition.yaml + + - name: Upload chaos logs + if: always() + uses: actions/upload-artifact@v4 + with: + name: chaos-results-${{ github.run_number }} + path: tests/chaos/results/ + retention-days: 30 + if-no-files-found: ignore + + - name: Notify Slack on failure + if: failure() + uses: slackapi/slack-github-action@v1.26.0 + with: + payload: | + { + "text": ":fire: Chaos experiment *${{ github.event.inputs.experiment || 'all' }}* FAILED on `${{ env.NAMESPACE }}` — <${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|View run>" + } + env: + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_CHAOS_WEBHOOK }} + SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2e8350d..30cd6b5 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -20,7 +20,7 @@ jobs: - uses: actions/setup-go@v5 with: - go-version: "1.24" + go-version: "1.25" cache: true - name: golangci-lint @@ -37,7 +37,7 @@ jobs: - uses: actions/setup-go@v5 with: - go-version: "1.24" + go-version: "1.25" cache: true - name: Download deps diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml new file mode 100644 index 0000000..4538804 --- /dev/null +++ b/.github/workflows/e2e.yml @@ -0,0 +1,74 @@ +name: E2E Tests + +on: + workflow_dispatch: + pull_request: + branches: [master] + +jobs: + e2e: + name: Playwright E2E + runs-on: ubuntu-latest + timeout-minutes: 20 + + steps: + - uses: actions/checkout@v4 + + - name: Set up Node.js + uses: actions/setup-node@v4 + with: + node-version: "20" + cache: npm + cache-dependency-path: apps/dashboard/package-lock.json + + - name: Install dashboard deps + run: npm ci + working-directory: apps/dashboard + + - name: Install E2E deps + run: npm install --save-dev @playwright/test typescript ts-node + working-directory: tests/e2e + + - name: Install Playwright browsers + run: npx playwright install --with-deps chromium firefox + working-directory: tests/e2e + + - name: Build dashboard + run: npm run build + working-directory: apps/dashboard + env: + VITE_AUTH0_DOMAIN: ${{ secrets.VITE_AUTH0_DOMAIN }} + VITE_AUTH0_CLIENT_ID: ${{ secrets.VITE_AUTH0_CLIENT_ID }} + VITE_AUTH0_AUDIENCE: ${{ secrets.VITE_AUTH0_AUDIENCE }} + VITE_BFF_URL: ${{ secrets.E2E_BFF_URL }} + VITE_GATEWAY_URL: ${{ secrets.E2E_GATEWAY_URL }} + + - name: Serve dashboard + run: npx serve -s dist -l 5173 & + working-directory: apps/dashboard + + - name: Wait for server + run: npx wait-on http://localhost:5173 --timeout 30000 + + - name: Run Playwright tests + run: npx playwright test --config playwright.config.ts + working-directory: tests/e2e + env: + E2E_BASE_URL: http://localhost:5173 + VITE_AUTH0_CLIENT_ID: ${{ secrets.VITE_AUTH0_CLIENT_ID }} + VITE_AUTH0_AUDIENCE: ${{ secrets.VITE_AUTH0_AUDIENCE }} + + - name: Upload Playwright report + uses: actions/upload-artifact@v4 + if: always() + with: + name: playwright-report-${{ github.run_number }} + path: tests/e2e/playwright-report/ + retention-days: 14 + + - name: Upload test results (JUnit) + uses: actions/upload-artifact@v4 + if: always() + with: + name: playwright-results-${{ github.run_number }} + path: tests/e2e/playwright-results.xml diff --git a/.github/workflows/perf.yml b/.github/workflows/perf.yml new file mode 100644 index 0000000..17804f6 --- /dev/null +++ b/.github/workflows/perf.yml @@ -0,0 +1,129 @@ +name: Performance Budget + +on: + pull_request: + branches: [master] + paths: + - "apps/gateway/**" + - "apps/bff/**" + - "scripts/load-tests/**" + +jobs: + perf: + name: k6 Performance Budget + runs-on: ubuntu-latest + timeout-minutes: 15 + + services: + # Spin up the gateway and BFF as Docker Compose services + # so k6 can hit them without needing a live cluster + postgres: + image: postgres:16-alpine + ports: + - 5432:5432 + env: + POSTGRES_USER: grainguard + POSTGRES_PASSWORD: grainguard + POSTGRES_DB: grainguard + options: >- + --health-cmd pg_isready + --health-interval 10s + --health-timeout 5s + --health-retries 5 + + redis: + image: redis:7-alpine + ports: + - 6379:6379 + options: >- + --health-cmd "redis-cli ping" + --health-interval 10s + --health-retries 5 + + steps: + - uses: actions/checkout@v4 + + - name: Set up Node.js + uses: actions/setup-node@v4 + with: + node-version: "20" + cache: npm + cache-dependency-path: apps/gateway/package-lock.json + + - name: Install gateway deps + run: npm ci + working-directory: apps/gateway + + - name: Install BFF deps + run: npm ci + working-directory: apps/bff + + - name: Start gateway in background + run: npx ts-node src/server.ts & + working-directory: apps/gateway + env: + PORT: 3000 + DATABASE_URL: postgres://grainguard:grainguard@localhost:5432/grainguard + REDIS_URL: redis://localhost:6379 + JWKS_URL: ${{ secrets.PERF_JWKS_URL }} + JWT_ISSUER: ${{ secrets.PERF_JWT_ISSUER }} + JWT_AUDIENCE: ${{ secrets.PERF_JWT_AUDIENCE }} + ALLOWED_ORIGINS: http://localhost:5173 + STRIPE_SECRET_KEY: sk_test_placeholder + STRIPE_WEBHOOK_SECRET: whsec_placeholder + STRIPE_PRICE_STARTER: price_placeholder + STRIPE_PRICE_PROFESSIONAL: price_placeholder + STRIPE_PRICE_ENTERPRISE: price_placeholder + DASHBOARD_URL: http://localhost:5173 + AUTH0_DOMAIN: placeholder.auth0.com + AUTH0_MANAGEMENT_CLIENT_ID: placeholder + AUTH0_MANAGEMENT_CLIENT_SECRET: placeholder + + - name: Start BFF in background + run: npx ts-node src/server.ts & + working-directory: apps/bff + env: + PORT: 4000 + POSTGRES_HOST: localhost + POSTGRES_PORT: 5432 + POSTGRES_USER: grainguard + POSTGRES_PASSWORD: grainguard + POSTGRES_DB: grainguard + REDIS_HOST: localhost + REDIS_PORT: 6379 + ELASTICSEARCH_URL: http://localhost:9200 + CASSANDRA_HOST: localhost + CASSANDRA_PORT: 9042 + AUTH0_DOMAIN: placeholder.auth0.com + AUTH0_AUDIENCE: placeholder + AUTH0_ORG_CLAIM: org_id + ALLOWED_ORIGINS: http://localhost:5173 + JWT_SECRET: dev-secret + + - name: Wait for gateway + run: npx wait-on http://localhost:3000/health --timeout 30000 + + - name: Wait for BFF + run: npx wait-on http://localhost:4000/graphql --timeout 30000 + + - name: Install k6 + run: | + curl -L https://github.com/grafana/k6/releases/download/v0.51.0/k6-v0.51.0-linux-amd64.tar.gz | tar xz + sudo mv k6-v0.51.0-linux-amd64/k6 /usr/local/bin/k6 + + - name: Run performance budget + run: | + k6 run \ + --env GATEWAY_URL=http://localhost:3000 \ + --env BFF_URL=http://localhost:4000 \ + scripts/load-tests/performance-budget.js + # k6 exits 99 if thresholds are breached — this step fails and blocks the PR + + - name: Upload performance results + uses: actions/upload-artifact@v4 + if: always() + with: + name: perf-results-${{ github.run_number }} + path: scripts/load-tests/results/ + retention-days: 30 + if-no-files-found: ignore diff --git a/.github/workflows/security.yml b/.github/workflows/security.yml index cd5014d..2a81b4c 100644 --- a/.github/workflows/security.yml +++ b/.github/workflows/security.yml @@ -14,6 +14,10 @@ jobs: trivy-scan: name: Trivy Image Scan runs-on: ubuntu-latest + permissions: + contents: read + actions: read + security-events: write strategy: matrix: service: @@ -35,17 +39,25 @@ jobs: apps/${{ matrix.service }} - name: Run Trivy vulnerability scan - uses: aquasecurity/trivy-action@master + uses: aquasecurity/trivy-action@0.28.0 with: image-ref: grainguard/${{ matrix.service }}:${{ github.sha }} format: sarif output: trivy-${{ matrix.service }}.sarif severity: CRITICAL,HIGH ignore-unfixed: true + exit-code: "0" trivy-config: infra/security/trivy.yaml + - name: Ensure Trivy SARIF exists + if: always() + run: | + if [ ! -f "trivy-${{ matrix.service }}.sarif" ]; then + printf '{"version":"2.1.0","runs":[]}\n' > "trivy-${{ matrix.service }}.sarif" + fi + - name: Upload Trivy results to GitHub Security tab - uses: github/codeql-action/upload-sarif@v3 + uses: github/codeql-action/upload-sarif@v4 if: always() with: sarif_file: trivy-${{ matrix.service }}.sarif @@ -55,21 +67,33 @@ jobs: trivy-fs-scan: name: Trivy Filesystem Scan runs-on: ubuntu-latest + permissions: + contents: read + actions: read + security-events: write steps: - uses: actions/checkout@v4 - name: Scan filesystem for secrets and misconfigs - uses: aquasecurity/trivy-action@master + uses: aquasecurity/trivy-action@0.28.0 with: scan-type: fs scan-ref: . format: sarif output: trivy-fs.sarif severity: CRITICAL,HIGH + exit-code: "0" trivy-config: infra/security/trivy.yaml + - name: Ensure Trivy filesystem SARIF exists + if: always() + run: | + if [ ! -f trivy-fs.sarif ]; then + printf '{"version":"2.1.0","runs":[]}\n' > trivy-fs.sarif + fi + - name: Upload results - uses: github/codeql-action/upload-sarif@v3 + uses: github/codeql-action/upload-sarif@v4 if: always() with: sarif_file: trivy-fs.sarif @@ -90,7 +114,7 @@ jobs: - uses: actions/checkout@v4 - name: Initialize CodeQL - uses: github/codeql-action/init@v3 + uses: github/codeql-action/init@v4 with: languages: ${{ matrix.language }} queries: security-extended @@ -99,7 +123,8 @@ jobs: if: matrix.language == 'go' uses: actions/setup-go@v5 with: - go-version: '1.24' + go-version: '1.25' + cache: false - name: Setup Node if: matrix.language == 'javascript' @@ -114,10 +139,10 @@ jobs: python-version: '3.12' - name: Autobuild - uses: github/codeql-action/autobuild@v3 + uses: github/codeql-action/autobuild@v4 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v3 + uses: github/codeql-action/analyze@v4 with: category: codeql-${{ matrix.language }} @@ -167,7 +192,8 @@ jobs: - name: Setup Go uses: actions/setup-go@v5 with: - go-version: '1.24' + go-version: '1.25' + cache: false - name: Audit Go dependencies (govulncheck) run: | diff --git a/.github/workflows/terraform.yml b/.github/workflows/terraform.yml new file mode 100644 index 0000000..780c8f3 --- /dev/null +++ b/.github/workflows/terraform.yml @@ -0,0 +1,117 @@ +name: Terraform + +on: + pull_request: + branches: [master] + paths: + - "infra/terraform/**" + push: + branches: [master] + paths: + - "infra/terraform/**" + +env: + TF_VERSION: "1.7.5" + AWS_REGION: "us-east-1" + +jobs: + # ── Plan — runs on every PR that touches terraform/ ──────────────────────── + plan: + name: Terraform Plan (${{ matrix.env }}) + runs-on: ubuntu-latest + if: github.event_name == 'pull_request' + strategy: + matrix: + env: [dev, prod] + defaults: + run: + working-directory: infra/terraform/environments/${{ matrix.env }} + + permissions: + id-token: write # for OIDC auth to AWS (no long-lived keys) + contents: read + pull-requests: write # to post plan output as PR comment + + steps: + - uses: actions/checkout@v4 + + - name: Configure AWS credentials (OIDC) + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.AWS_TF_ROLE_ARN }} + aws-region: ${{ env.AWS_REGION }} + + - name: Set up Terraform + uses: hashicorp/setup-terraform@v3 + with: + terraform_version: ${{ env.TF_VERSION }} + + - name: Terraform Init + run: terraform init -input=false + + - name: Terraform Format check + run: terraform fmt -check -recursive + + - name: Terraform Validate + run: terraform validate + + - name: Terraform Plan + id: plan + env: + TF_VAR_db_password: ${{ secrets.TF_VAR_DB_PASSWORD }} + run: terraform plan -input=false -no-color -out=tfplan 2>&1 | tee plan.txt + continue-on-error: true # we post the plan even if it fails + + - name: Post plan as PR comment + uses: actions/github-script@v7 + with: + script: | + const fs = require('fs'); + const plan = fs.readFileSync('infra/terraform/environments/${{ matrix.env }}/plan.txt', 'utf8'); + const body = `## Terraform Plan — \`${{ matrix.env }}\`\n\`\`\`hcl\n${plan.slice(0, 65000)}\n\`\`\``; + github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: context.issue.number, + body, + }); + + - name: Fail if plan errored + if: steps.plan.outcome == 'failure' + run: exit 1 + + # ── Apply — runs only on push to master (after PR merged) ────────────────── + apply: + name: Terraform Apply (dev) + runs-on: ubuntu-latest + if: github.event_name == 'push' && github.ref == 'refs/heads/master' + environment: dev # requires manual approval in GitHub Environments + defaults: + run: + working-directory: infra/terraform/environments/dev + + permissions: + id-token: write + contents: read + + steps: + - uses: actions/checkout@v4 + + - name: Configure AWS credentials (OIDC) + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.AWS_TF_ROLE_ARN }} + aws-region: ${{ env.AWS_REGION }} + + - name: Set up Terraform + uses: hashicorp/setup-terraform@v3 + with: + terraform_version: ${{ env.TF_VERSION }} + + - name: Terraform Init + run: terraform init -input=false + + - name: Terraform Apply + env: + TF_VAR_db_password: ${{ secrets.TF_VAR_DB_PASSWORD }} + run: terraform apply -input=false -auto-approve diff --git a/Makefile b/Makefile index 0ab4143..731c690 100644 --- a/Makefile +++ b/Makefile @@ -1,9 +1,11 @@ -.PHONY: up down restart logs build seed test lint clean help +.PHONY: up down restart logs build seed test lint clean help ci # ============================================ # GrainGuard — Developer Makefile # ============================================ +COMPOSE := docker compose -f infra/docker/docker-compose.yml + help: ## Show this help @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' @@ -12,28 +14,31 @@ help: ## Show this help # ============================================ up: ## Start all services - docker compose -f infra/docker/docker-compose.yml up -d + $(COMPOSE) up -d down: ## Stop all services - docker compose -f infra/docker/docker-compose.yml down + $(COMPOSE) down restart: ## Restart all services - docker compose -f infra/docker/docker-compose.yml restart + $(COMPOSE) restart logs: ## Tail logs for all services - docker compose -f infra/docker/docker-compose.yml logs -f + $(COMPOSE) logs -f logs-gateway: ## Tail gateway logs - docker compose -f infra/docker/docker-compose.yml logs -f gateway + $(COMPOSE) logs -f gateway logs-bff: ## Tail BFF logs - docker compose -f infra/docker/docker-compose.yml logs -f bff + $(COMPOSE) logs -f bff logs-kafka: ## Tail Kafka logs - docker compose -f infra/docker/docker-compose.yml logs -f kafka + $(COMPOSE) logs -f kafka + +logs-ingest: ## Tail ingest-service logs + $(COMPOSE) logs -f ingest-service build: ## Build all Docker images - docker compose -f infra/docker/docker-compose.yml build + $(COMPOSE) build # ============================================ # Database @@ -50,16 +55,22 @@ migrate: ## Run database migrations # Testing # ============================================ -test: ## Run all tests +test: ## Run all tests (Go + TS) $(MAKE) test-go - $(MAKE) test-react + $(MAKE) test-gateway + $(MAKE) test-dashboard + +test-go: ## Run Go tests with race detector + go test -race -count=1 ./... -test-go: ## Run Go tests - go test ./... +test-gateway: ## Run Gateway unit tests + cd apps/gateway && npx jest --passWithNoTests --forceExit -test-react: ## Run React/Node tests - cd apps/dashboard && npm test -- --run - cd apps/bff && npm test -- --run +test-dashboard: ## Run Dashboard Vitest tests + cd apps/dashboard && npx vitest run + +test-e2e: ## Run Playwright E2E tests + cd apps/dashboard && npx playwright test test-load: ## Run k6 load tests k6 run scripts/load-tests/gateway-load-test.js @@ -69,35 +80,78 @@ test-load: ## Run k6 load tests # Linting # ============================================ -lint: ## Lint all services +lint: ## Lint all services (Go + TS) $(MAKE) lint-go $(MAKE) lint-ts -lint-go: ## Lint Go services +lint-go: ## Lint Go services with golangci-lint golangci-lint run ./... -lint-ts: ## Lint TypeScript services +lint-ts: ## Lint all TypeScript services cd apps/gateway && npm run lint cd apps/bff && npm run lint + cd apps/jobs-worker && npm run lint cd apps/dashboard && npm run lint +typecheck: ## Typecheck all TypeScript services + cd apps/gateway && npm run typecheck + cd apps/bff && npm run typecheck + cd apps/dashboard && npm run build + +lint-fix: ## Auto-fix lint issues + golangci-lint run --fix ./... + cd apps/gateway && npm run lint:fix + cd apps/bff && npm run lint:fix + cd apps/jobs-worker && npm run lint:fix + cd apps/dashboard && npm run lint -- --fix + +# ============================================ +# CI (local mirror of GitHub Actions) +# ============================================ + +ci: ## Run full CI locally (lint + test + build) + @echo "=== Go build ===" + go build ./... + @echo "=== Go vet ===" + go vet ./... + @echo "=== Go lint ===" + golangci-lint run ./... || echo "Install: brew install golangci-lint" + @echo "=== Go test ===" + go test -race -count=1 ./... + @echo "=== TS lint ===" + $(MAKE) lint-ts + @echo "=== Gateway tests ===" + $(MAKE) test-gateway + @echo "=== Dashboard tests ===" + $(MAKE) test-dashboard + @echo "=== Dashboard build ===" + cd apps/dashboard && npm run build + @echo "" + @echo "CI passed" + # ============================================ # Cleanup # ============================================ clean: ## Remove all containers and volumes - docker compose -f infra/docker/docker-compose.yml down -v --remove-orphans + $(COMPOSE) down -v --remove-orphans clean-cache: ## Clear Redis cache - docker compose -f infra/docker/docker-compose.yml exec redis redis-cli FLUSHALL + $(COMPOSE) exec redis redis-cli FLUSHALL # ============================================ # Status # ============================================ ps: ## Show running services - docker compose -f infra/docker/docker-compose.yml ps + $(COMPOSE) ps health: ## Check health of all services - curl -s http://localhost:8086/health | jq - curl -s http://localhost:4000/health | jq \ No newline at end of file + @echo "=== Gateway ===" + @curl -sf http://localhost:3000/health | jq . || echo "Gateway: DOWN" + @echo "=== Gateway Readiness ===" + @curl -sf http://localhost:3000/health/ready | jq . || echo "Gateway readiness: DOWN" + @echo "=== BFF ===" + @curl -sf http://localhost:4000/health | jq . || echo "BFF: DOWN" + @echo "=== Ingest Service ===" + @curl -sf http://localhost:3001/health | jq . || echo "Ingest: DOWN" \ No newline at end of file diff --git a/apps/bff/package-lock.json b/apps/bff/package-lock.json index eeb5575..e52bf80 100644 --- a/apps/bff/package-lock.json +++ b/apps/bff/package-lock.json @@ -28,13 +28,16 @@ "zod": "^4.3.6" }, "devDependencies": { + "@eslint/js": "^9.39.1", "@types/cors": "^2.8.19", "@types/graphql-depth-limit": "^1.1.6", "@types/node": "^20.11.5", "@types/pg": "^8.11.2", "@types/ws": "^8.18.1", + "eslint": "^9.39.1", "ts-node-dev": "^2.0.0", - "typescript": "^5.3.3" + "typescript": "^5.3.3", + "typescript-eslint": "^8.48.0" } }, "node_modules/@apollo/cache-control-types": { @@ -391,6 +394,213 @@ "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", "license": "MIT" }, + "node_modules/@eslint-community/eslint-utils": { + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.9.1.tgz", + "integrity": "sha512-phrYmNiYppR7znFEdqgfWHXR6NCkZEK7hwWDHZUjit/2/U0r6XvkDl0SYnoM51Hq7FhCGdLDT6zxCCOY1hexsQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "eslint-visitor-keys": "^3.4.3" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" + } + }, + "node_modules/@eslint-community/eslint-utils/node_modules/eslint-visitor-keys": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", + "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@eslint-community/regexpp": { + "version": "4.12.2", + "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.2.tgz", + "integrity": "sha512-EriSTlt5OC9/7SXkRSCAhfSxxoSUgBm33OH+IkwbdpgoqsSsUg7y3uh+IICI/Qg4BBWr3U2i39RpmycbxMq4ew==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.0.0 || ^14.0.0 || >=16.0.0" + } + }, + "node_modules/@eslint/config-array": { + "version": "0.21.2", + "resolved": "https://registry.npmjs.org/@eslint/config-array/-/config-array-0.21.2.tgz", + "integrity": "sha512-nJl2KGTlrf9GjLimgIru+V/mzgSK0ABCDQRvxw5BjURL7WfH5uoWmizbH7QB6MmnMBd8cIC9uceWnezL1VZWWw==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@eslint/object-schema": "^2.1.7", + "debug": "^4.3.1", + "minimatch": "^3.1.5" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/config-array/node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/@eslint/config-array/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@eslint/config-helpers": { + "version": "0.4.2", + "resolved": "https://registry.npmjs.org/@eslint/config-helpers/-/config-helpers-0.4.2.tgz", + "integrity": "sha512-gBrxN88gOIf3R7ja5K9slwNayVcZgK6SOUORm2uBzTeIEfeVaIhOpCtTox3P6R7o2jLFwLFTLnC7kU/RGcYEgw==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@eslint/core": "^0.17.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/core": { + "version": "0.17.0", + "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.17.0.tgz", + "integrity": "sha512-yL/sLrpmtDaFEiUj1osRP4TI2MDz1AddJL+jZ7KSqvBuliN4xqYY54IfdN8qD8Toa6g1iloph1fxQNkjOxrrpQ==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@types/json-schema": "^7.0.15" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/eslintrc": { + "version": "3.3.5", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-3.3.5.tgz", + "integrity": "sha512-4IlJx0X0qftVsN5E+/vGujTRIFtwuLbNsVUe7TO6zYPDR1O6nFwvwhIKEKSrl6dZchmYBITazxKoUYOjdtjlRg==", + "dev": true, + "license": "MIT", + "dependencies": { + "ajv": "^6.14.0", + "debug": "^4.3.2", + "espree": "^10.0.1", + "globals": "^14.0.0", + "ignore": "^5.2.0", + "import-fresh": "^3.2.1", + "js-yaml": "^4.1.1", + "minimatch": "^3.1.5", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@eslint/eslintrc/node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/@eslint/eslintrc/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@eslint/eslintrc/node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@eslint/js": { + "version": "9.39.4", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.39.4.tgz", + "integrity": "sha512-nE7DEIchvtiFTwBw4Lfbu59PG+kCofhjsKaCWzxTpt4lfRjRMqG6uMBzKXuEcyXhOHoUp9riAm7/aWYGhXZ9cw==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://eslint.org/donate" + } + }, + "node_modules/@eslint/object-schema": { + "version": "2.1.7", + "resolved": "https://registry.npmjs.org/@eslint/object-schema/-/object-schema-2.1.7.tgz", + "integrity": "sha512-VtAOaymWVfZcmZbp6E2mympDIHvyjXs/12LqWYjVw6qjrfF+VK+fyG33kChz3nnK+SU5/NeHOqrTEHS8sXO3OA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/plugin-kit": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/@eslint/plugin-kit/-/plugin-kit-0.4.1.tgz", + "integrity": "sha512-43/qtrDUokr7LJqoF2c3+RInu/t4zfrpYdoSDfYyhg52rwLV6TnOvdG4fXm7IkSB3wErkcmJS9iEhjVtOSEjjA==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@eslint/core": "^0.17.0", + "levn": "^0.4.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, "node_modules/@graphql-tools/merge": { "version": "9.1.7", "resolved": "https://registry.npmjs.org/@graphql-tools/merge/-/merge-9.1.7.tgz", @@ -451,6 +661,58 @@ "graphql": "^0.8.0 || ^0.9.0 || ^0.10.0 || ^0.11.0 || ^0.12.0 || ^0.13.0 || ^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0" } }, + "node_modules/@humanfs/core": { + "version": "0.19.1", + "resolved": "https://registry.npmjs.org/@humanfs/core/-/core-0.19.1.tgz", + "integrity": "sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=18.18.0" + } + }, + "node_modules/@humanfs/node": { + "version": "0.16.7", + "resolved": "https://registry.npmjs.org/@humanfs/node/-/node-0.16.7.tgz", + "integrity": "sha512-/zUx+yOsIrG4Y43Eh2peDeKCxlRt/gET6aHfaKpuq267qXdYDFViVHfMaLyygZOnl0kGWxFIgsBy8QFuTLUXEQ==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@humanfs/core": "^0.19.1", + "@humanwhocodes/retry": "^0.4.0" + }, + "engines": { + "node": ">=18.18.0" + } + }, + "node_modules/@humanwhocodes/module-importer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", + "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=12.22" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@humanwhocodes/retry": { + "version": "0.4.3", + "resolved": "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.4.3.tgz", + "integrity": "sha512-bV0Tgo9K4hfPCek+aMAn81RppFKv2ySDQeMoSZuvTASywNTnVJCArCZE2FWqpvIatKu7VMRLWlR1EazvVhDyhQ==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=18.18" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, "node_modules/@jridgewell/resolve-uri": { "version": "3.1.2", "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", @@ -713,6 +975,13 @@ "@types/node": "*" } }, + "node_modules/@types/estree": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", + "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", + "dev": true, + "license": "MIT" + }, "node_modules/@types/express": { "version": "4.17.25", "resolved": "https://registry.npmjs.org/@types/express/-/express-4.17.25.tgz", @@ -767,6 +1036,13 @@ "integrity": "sha512-r8Tayk8HJnX0FztbZN7oVqGccWgw98T/0neJphO91KkmOzug1KkofZURD4UaD5uH8AqcFLfdPErnBod0u71/qg==", "license": "MIT" }, + "node_modules/@types/json-schema": { + "version": "7.0.15", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", + "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", + "dev": true, + "license": "MIT" + }, "node_modules/@types/long": { "version": "4.0.2", "resolved": "https://registry.npmjs.org/@types/long/-/long-4.0.2.tgz", @@ -859,21 +1135,403 @@ "dev": true, "license": "MIT" }, - "node_modules/@types/strip-json-comments": { - "version": "0.0.30", - "resolved": "https://registry.npmjs.org/@types/strip-json-comments/-/strip-json-comments-0.0.30.tgz", - "integrity": "sha512-7NQmHra/JILCd1QqpSzl8+mJRc8ZHz3uDm8YV1Ks9IhK0epEiTw8aIErbvH9PI+6XbqhyIQy3462nEsn7UVzjQ==", + "node_modules/@types/strip-json-comments": { + "version": "0.0.30", + "resolved": "https://registry.npmjs.org/@types/strip-json-comments/-/strip-json-comments-0.0.30.tgz", + "integrity": "sha512-7NQmHra/JILCd1QqpSzl8+mJRc8ZHz3uDm8YV1Ks9IhK0epEiTw8aIErbvH9PI+6XbqhyIQy3462nEsn7UVzjQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/ws": { + "version": "8.18.1", + "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.18.1.tgz", + "integrity": "sha512-ThVF6DCVhA8kUGy+aazFQ4kXQ7E1Ty7A3ypFOe0IcJV8O/M511G99AW24irKrW56Wt44yG9+ij8FaqoBGkuBXg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@typescript-eslint/eslint-plugin": { + "version": "8.57.2", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.57.2.tgz", + "integrity": "sha512-NZZgp0Fm2IkD+La5PR81sd+g+8oS6JwJje+aRWsDocxHkjyRw0J5L5ZTlN3LI1LlOcGL7ph3eaIUmTXMIjLk0w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/regexpp": "^4.12.2", + "@typescript-eslint/scope-manager": "8.57.2", + "@typescript-eslint/type-utils": "8.57.2", + "@typescript-eslint/utils": "8.57.2", + "@typescript-eslint/visitor-keys": "8.57.2", + "ignore": "^7.0.5", + "natural-compare": "^1.4.0", + "ts-api-utils": "^2.4.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "@typescript-eslint/parser": "^8.57.2", + "eslint": "^8.57.0 || ^9.0.0 || ^10.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/ignore": { + "version": "7.0.5", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-7.0.5.tgz", + "integrity": "sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/@typescript-eslint/parser": { + "version": "8.57.2", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.57.2.tgz", + "integrity": "sha512-30ScMRHIAD33JJQkgfGW1t8CURZtjc2JpTrq5n2HFhOefbAhb7ucc7xJwdWcrEtqUIYJ73Nybpsggii6GtAHjA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/scope-manager": "8.57.2", + "@typescript-eslint/types": "8.57.2", + "@typescript-eslint/typescript-estree": "8.57.2", + "@typescript-eslint/visitor-keys": "8.57.2", + "debug": "^4.4.3" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0 || ^10.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/parser/node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/parser/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@typescript-eslint/project-service": { + "version": "8.57.2", + "resolved": "https://registry.npmjs.org/@typescript-eslint/project-service/-/project-service-8.57.2.tgz", + "integrity": "sha512-FuH0wipFywXRTHf+bTTjNyuNQQsQC3qh/dYzaM4I4W0jrCqjCVuUh99+xd9KamUfmCGPvbO8NDngo/vsnNVqgw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/tsconfig-utils": "^8.57.2", + "@typescript-eslint/types": "^8.57.2", + "debug": "^4.4.3" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/project-service/node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/project-service/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@typescript-eslint/scope-manager": { + "version": "8.57.2", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.57.2.tgz", + "integrity": "sha512-snZKH+W4WbWkrBqj4gUNRIGb/jipDW3qMqVJ4C9rzdFc+wLwruxk+2a5D+uoFcKPAqyqEnSb4l2ULuZf95eSkw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.57.2", + "@typescript-eslint/visitor-keys": "8.57.2" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/tsconfig-utils": { + "version": "8.57.2", + "resolved": "https://registry.npmjs.org/@typescript-eslint/tsconfig-utils/-/tsconfig-utils-8.57.2.tgz", + "integrity": "sha512-3Lm5DSM+DCowsUOJC+YqHHnKEfFh5CoGkj5Z31NQSNF4l5wdOwqGn99wmwN/LImhfY3KJnmordBq/4+VDe2eKw==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/type-utils": { + "version": "8.57.2", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.57.2.tgz", + "integrity": "sha512-Co6ZCShm6kIbAM/s+oYVpKFfW7LBc6FXoPXjTRQ449PPNBY8U0KZXuevz5IFuuUj2H9ss40atTaf9dlGLzbWZg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.57.2", + "@typescript-eslint/typescript-estree": "8.57.2", + "@typescript-eslint/utils": "8.57.2", + "debug": "^4.4.3", + "ts-api-utils": "^2.4.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0 || ^10.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/type-utils/node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/type-utils/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@typescript-eslint/types": { + "version": "8.57.2", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.57.2.tgz", + "integrity": "sha512-/iZM6FnM4tnx9csuTxspMW4BOSegshwX5oBDznJ7S4WggL7Vczz5d2W11ecc4vRrQMQHXRSxzrCsyG5EsPPTbA==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/typescript-estree": { + "version": "8.57.2", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.57.2.tgz", + "integrity": "sha512-2MKM+I6g8tJxfSmFKOnHv2t8Sk3T6rF20A1Puk0svLK+uVapDZB/4pfAeB7nE83uAZrU6OxW+HmOd5wHVdXwXA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/project-service": "8.57.2", + "@typescript-eslint/tsconfig-utils": "8.57.2", + "@typescript-eslint/types": "8.57.2", + "@typescript-eslint/visitor-keys": "8.57.2", + "debug": "^4.4.3", + "minimatch": "^10.2.2", + "semver": "^7.7.3", + "tinyglobby": "^0.2.15", + "ts-api-utils": "^2.4.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/balanced-match": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-4.0.4.tgz", + "integrity": "sha512-BLrgEcRTwX2o6gGxGOCNyMvGSp35YofuYzw9h1IMTRmKqttAZZVU67bdb9Pr2vUHA8+j3i2tJfjO6C6+4myGTA==", + "dev": true, + "license": "MIT", + "engines": { + "node": "18 || 20 || >=22" + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/brace-expansion": { + "version": "5.0.5", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-5.0.5.tgz", + "integrity": "sha512-VZznLgtwhn+Mact9tfiwx64fA9erHH/MCXEUfB/0bX/6Fz6ny5EGTXYltMocqg4xFAQZtnO3DHWWXi8RiuN7cQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^4.0.2" + }, + "engines": { + "node": "18 || 20 || >=22" + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/minimatch": { + "version": "10.2.4", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-10.2.4.tgz", + "integrity": "sha512-oRjTw/97aTBN0RHbYCdtF1MQfvusSIBQM0IZEgzl6426+8jSC0nF1a/GmnVLpfB9yyr6g6FTqWqiZVbxrtaCIg==", + "dev": true, + "license": "BlueOak-1.0.0", + "dependencies": { + "brace-expansion": "^5.0.2" + }, + "engines": { + "node": "18 || 20 || >=22" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@typescript-eslint/utils": { + "version": "8.57.2", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.57.2.tgz", + "integrity": "sha512-krRIbvPK1ju1WBKIefiX+bngPs+odIQUtR7kymzPfo1POVw3jlF+nLkmexdSSd4UCbDcQn+wMBATOOmpBbqgKg==", "dev": true, - "license": "MIT" + "license": "MIT", + "dependencies": { + "@eslint-community/eslint-utils": "^4.9.1", + "@typescript-eslint/scope-manager": "8.57.2", + "@typescript-eslint/types": "8.57.2", + "@typescript-eslint/typescript-estree": "8.57.2" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0 || ^10.0.0", + "typescript": ">=4.8.4 <6.0.0" + } }, - "node_modules/@types/ws": { - "version": "8.18.1", - "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.18.1.tgz", - "integrity": "sha512-ThVF6DCVhA8kUGy+aazFQ4kXQ7E1Ty7A3ypFOe0IcJV8O/M511G99AW24irKrW56Wt44yG9+ij8FaqoBGkuBXg==", + "node_modules/@typescript-eslint/visitor-keys": { + "version": "8.57.2", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.57.2.tgz", + "integrity": "sha512-zhahknjobV2FiD6Ee9iLbS7OV9zi10rG26odsQdfBO/hjSzUQbkIYgda+iNKK1zNiW2ey+Lf8MU5btN17V3dUw==", "dev": true, "license": "MIT", "dependencies": { - "@types/node": "*" + "@typescript-eslint/types": "8.57.2", + "eslint-visitor-keys": "^5.0.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/visitor-keys/node_modules/eslint-visitor-keys": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-5.0.1.tgz", + "integrity": "sha512-tD40eHxA35h0PEIZNeIjkHoDR4YjjJp34biM0mDvplBe//mB+IHCqHDGV7pxF+7MklTvighcCPPZC7ynWyjdTA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^20.19.0 || ^22.13.0 || >=24" + }, + "funding": { + "url": "https://opencollective.com/eslint" } }, "node_modules/@whatwg-node/promise-helpers": { @@ -923,6 +1581,16 @@ "node": ">=0.4.0" } }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, "node_modules/acorn-walk": { "version": "8.3.5", "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.5.tgz", @@ -945,6 +1613,23 @@ "node": ">=12.0" } }, + "node_modules/ajv": { + "version": "6.14.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.14.0.tgz", + "integrity": "sha512-IWrosm/yrn43eiKqkfkHis7QioDleaXQHdDVPKg0FSwwd/DuvyX79TZnFOnYpB7dcsFAMmtFztZuXPDvSePkFw==", + "dev": true, + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, "node_modules/ansi-styles": { "version": "4.3.0", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", @@ -1016,6 +1701,13 @@ "dev": true, "license": "MIT" }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "dev": true, + "license": "Python-2.0" + }, "node_modules/array-back": { "version": "6.2.2", "resolved": "https://registry.npmjs.org/array-back/-/array-back-6.2.2.tgz", @@ -1207,6 +1899,16 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, "node_modules/cassandra-driver": { "version": "4.8.0", "resolved": "https://registry.npmjs.org/cassandra-driver/-/cassandra-driver-4.8.0.tgz", @@ -1454,6 +2156,21 @@ "node": ">=16.0.0" } }, + "node_modules/cross-spawn": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, "node_modules/debug": { "version": "2.6.9", "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", @@ -1463,6 +2180,13 @@ "ms": "2.0.0" } }, + "node_modules/deep-is": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", + "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", + "dev": true, + "license": "MIT" + }, "node_modules/define-data-property": { "version": "1.1.4", "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", @@ -1620,6 +2344,211 @@ "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==", "license": "MIT" }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint": { + "version": "9.39.4", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.39.4.tgz", + "integrity": "sha512-XoMjdBOwe/esVgEvLmNsD3IRHkm7fbKIUGvrleloJXUZgDHig2IPWNniv+GwjyJXzuNqVjlr5+4yVUZjycJwfQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/eslint-utils": "^4.8.0", + "@eslint-community/regexpp": "^4.12.1", + "@eslint/config-array": "^0.21.2", + "@eslint/config-helpers": "^0.4.2", + "@eslint/core": "^0.17.0", + "@eslint/eslintrc": "^3.3.5", + "@eslint/js": "9.39.4", + "@eslint/plugin-kit": "^0.4.1", + "@humanfs/node": "^0.16.6", + "@humanwhocodes/module-importer": "^1.0.1", + "@humanwhocodes/retry": "^0.4.2", + "@types/estree": "^1.0.6", + "ajv": "^6.14.0", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.6", + "debug": "^4.3.2", + "escape-string-regexp": "^4.0.0", + "eslint-scope": "^8.4.0", + "eslint-visitor-keys": "^4.2.1", + "espree": "^10.4.0", + "esquery": "^1.5.0", + "esutils": "^2.0.2", + "fast-deep-equal": "^3.1.3", + "file-entry-cache": "^8.0.0", + "find-up": "^5.0.0", + "glob-parent": "^6.0.2", + "ignore": "^5.2.0", + "imurmurhash": "^0.1.4", + "is-glob": "^4.0.0", + "json-stable-stringify-without-jsonify": "^1.0.1", + "lodash.merge": "^4.6.2", + "minimatch": "^3.1.5", + "natural-compare": "^1.4.0", + "optionator": "^0.9.3" + }, + "bin": { + "eslint": "bin/eslint.js" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://eslint.org/donate" + }, + "peerDependencies": { + "jiti": "*" + }, + "peerDependenciesMeta": { + "jiti": { + "optional": true + } + } + }, + "node_modules/eslint-scope": { + "version": "8.4.0", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-8.4.0.tgz", + "integrity": "sha512-sNXOfKCn74rt8RICKMvJS7XKV/Xk9kA7DyJr8mJik3S7Cwgy3qlkkmyS2uQB3jiJg6VNdZd/pDBJu0nvG2NlTg==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^5.2.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-visitor-keys": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.1.tgz", + "integrity": "sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint/node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/eslint/node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/eslint/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/espree": { + "version": "10.4.0", + "resolved": "https://registry.npmjs.org/espree/-/espree-10.4.0.tgz", + "integrity": "sha512-j6PAQ2uUr79PZhBjP5C5fhl8e39FmRnOjsD5lGnWrFU8i2G776tBK7+nP8KuQUTTyAZUwfQqXAgrVH5MbH9CYQ==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "acorn": "^8.15.0", + "acorn-jsx": "^5.3.2", + "eslint-visitor-keys": "^4.2.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/esquery": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.7.0.tgz", + "integrity": "sha512-Ap6G0WQwcU/LHsvLwON1fAQX9Zp0A2Y6Y/cJBl9r/JbW90Zyg4/zbG6zzKa2OTALELarYHmKu0GhpM5EO+7T0g==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "estraverse": "^5.1.0" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/etag": { "version": "1.8.1", "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", @@ -1675,6 +2604,40 @@ "url": "https://opencollective.com/express" } }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-levenshtein": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", + "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", + "dev": true, + "license": "MIT" + }, + "node_modules/file-entry-cache": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-8.0.0.tgz", + "integrity": "sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "flat-cache": "^4.0.0" + }, + "engines": { + "node": ">=16.0.0" + } + }, "node_modules/fill-range": { "version": "7.1.1", "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", @@ -1723,12 +2686,50 @@ } } }, + "node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat-cache": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-4.0.1.tgz", + "integrity": "sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw==", + "dev": true, + "license": "MIT", + "dependencies": { + "flatted": "^3.2.9", + "keyv": "^4.5.4" + }, + "engines": { + "node": ">=16" + } + }, "node_modules/flatbuffers": { "version": "25.9.23", "resolved": "https://registry.npmjs.org/flatbuffers/-/flatbuffers-25.9.23.tgz", "integrity": "sha512-MI1qs7Lo4Syw0EOzUl0xjs2lsoeqFku44KpngfIduHBYvzm8h2+7K8YMQh1JtVVVrUvhLpNwqVi4DERegUJhPQ==", "license": "Apache-2.0" }, + "node_modules/flatted": { + "version": "3.4.2", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.4.2.tgz", + "integrity": "sha512-PjDse7RzhcPkIJwy5t7KPWQSZ9cAbzQXcafsetQoD7sOJRQlGikNbx7yZp2OotDnJyrDcbyRq3Ttb18iYOqkxA==", + "dev": true, + "license": "ISC" + }, "node_modules/for-each": { "version": "0.3.5", "resolved": "https://registry.npmjs.org/for-each/-/for-each-0.3.5.tgz", @@ -1890,6 +2891,19 @@ "node": ">= 6" } }, + "node_modules/globals": { + "version": "14.0.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-14.0.0.tgz", + "integrity": "sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/gopd": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", @@ -2041,23 +3055,60 @@ "toidentifier": "~1.0.1" }, "engines": { - "node": ">= 0.8" + "node": ">= 0.8" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/iconv-lite": { + "version": "0.4.24", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", + "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", + "license": "MIT", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/ignore": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", + "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/import-fresh": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.1.tgz", + "integrity": "sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" }, "funding": { - "type": "opencollective", - "url": "https://opencollective.com/express" + "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/iconv-lite": { - "version": "0.4.24", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", - "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "dev": true, "license": "MIT", - "dependencies": { - "safer-buffer": ">= 2.1.2 < 3" - }, "engines": { - "node": ">=0.10.0" + "node": ">=0.8.19" } }, "node_modules/inflight": { @@ -2182,6 +3233,13 @@ "integrity": "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==", "license": "MIT" }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true, + "license": "ISC" + }, "node_modules/iterall": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/iterall/-/iterall-1.3.0.tgz", @@ -2198,6 +3256,19 @@ "url": "https://github.com/sponsors/panva" } }, + "node_modules/js-yaml": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", + "dev": true, + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, "node_modules/json-bignum": { "version": "0.0.3", "resolved": "https://registry.npmjs.org/json-bignum/-/json-bignum-0.0.3.tgz", @@ -2206,12 +3277,80 @@ "node": ">=0.8" } }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", + "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + "dev": true, + "license": "MIT", + "dependencies": { + "json-buffer": "3.0.1" + } + }, + "node_modules/levn": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", + "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/lodash.camelcase": { "version": "4.3.0", "resolved": "https://registry.npmjs.org/lodash.camelcase/-/lodash.camelcase-4.3.0.tgz", "integrity": "sha512-TwuEnCnxbc3rAvhf/LbG7tJUDzhqXyFnv3dtzLOPgCG/hODL7WFnsbwktkD7yUV0RrreP/l1PALq/YSg6VvjlA==", "license": "MIT" }, + "node_modules/lodash.merge": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", + "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", + "dev": true, + "license": "MIT" + }, "node_modules/lodash.sortby": { "version": "4.7.0", "resolved": "https://registry.npmjs.org/lodash.sortby/-/lodash.sortby-4.7.0.tgz", @@ -2364,6 +3503,13 @@ "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", "license": "MIT" }, + "node_modules/natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", + "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", + "dev": true, + "license": "MIT" + }, "node_modules/negotiator": { "version": "0.6.4", "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.4.tgz", @@ -2452,6 +3598,69 @@ "wrappy": "1" } }, + "node_modules/optionator": { + "version": "0.9.4", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz", + "integrity": "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==", + "dev": true, + "license": "MIT", + "dependencies": { + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0", + "word-wrap": "^1.2.5" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "dev": true, + "license": "MIT", + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, "node_modules/parseurl": { "version": "1.3.3", "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", @@ -2461,6 +3670,16 @@ "node": ">= 0.8" } }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, "node_modules/path-is-absolute": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", @@ -2471,6 +3690,16 @@ "node": ">=0.10.0" } }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, "node_modules/path-parse": { "version": "1.0.7", "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", @@ -2634,6 +3863,16 @@ "node": ">=0.10.0" } }, + "node_modules/prelude-ls": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", + "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.8.0" + } + }, "node_modules/prom-client": { "version": "15.1.3", "resolved": "https://registry.npmjs.org/prom-client/-/prom-client-15.1.3.tgz", @@ -2660,6 +3899,16 @@ "node": ">= 0.10" } }, + "node_modules/punycode": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, "node_modules/qs": { "version": "6.14.2", "resolved": "https://registry.npmjs.org/qs/-/qs-6.14.2.tgz", @@ -2750,6 +3999,16 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, "node_modules/retry": { "version": "0.13.1", "resolved": "https://registry.npmjs.org/retry/-/retry-0.13.1.tgz", @@ -2815,6 +4074,19 @@ ], "license": "BSD-3-Clause" }, + "node_modules/semver": { + "version": "7.7.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.4.tgz", + "integrity": "sha512-vFKC2IEtQnVhpT78h1Yp8wzwrf8CM+MzKMHGJZfBtzhZNycRFnXsHk6E5TxIkkMsgNS7mdX3AGB7x2QM2di4lA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, "node_modules/send": { "version": "0.19.2", "resolved": "https://registry.npmjs.org/send/-/send-0.19.2.tgz", @@ -2903,6 +4175,29 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, "node_modules/side-channel": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz", @@ -3081,6 +4376,54 @@ "bintrees": "1.0.2" } }, + "node_modules/tinyglobby": { + "version": "0.2.15", + "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz", + "integrity": "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "fdir": "^6.5.0", + "picomatch": "^4.0.3" + }, + "engines": { + "node": ">=12.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/SuperchupuDev" + } + }, + "node_modules/tinyglobby/node_modules/fdir": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", + "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, + "node_modules/tinyglobby/node_modules/picomatch": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.4.tgz", + "integrity": "sha512-QP88BAKvMam/3NxH6vj2o21R6MjxZUAd6nlwAS/pnGvN9IVLocLHxGYIzFhg6fUQ+5th6P4dv4eW9jX3DSIj7A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, "node_modules/to-buffer": { "version": "1.2.2", "resolved": "https://registry.npmjs.org/to-buffer/-/to-buffer-1.2.2.tgz", @@ -3133,6 +4476,19 @@ "tree-kill": "cli.js" } }, + "node_modules/ts-api-utils": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-2.5.0.tgz", + "integrity": "sha512-OJ/ibxhPlqrMM0UiNHJ/0CKQkoKF243/AEmplt3qpRgkW8VG7IfOS41h7V8TjITqdByHzrjcS/2si+y4lIh8NA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18.12" + }, + "peerDependencies": { + "typescript": ">=4.8.4" + } + }, "node_modules/ts-node": { "version": "10.9.2", "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.2.tgz", @@ -3231,6 +4587,19 @@ "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", "license": "0BSD" }, + "node_modules/type-check": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", + "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", + "dev": true, + "license": "MIT", + "dependencies": { + "prelude-ls": "^1.2.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, "node_modules/type-is": { "version": "1.6.18", "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", @@ -3272,6 +4641,30 @@ "node": ">=14.17" } }, + "node_modules/typescript-eslint": { + "version": "8.57.2", + "resolved": "https://registry.npmjs.org/typescript-eslint/-/typescript-eslint-8.57.2.tgz", + "integrity": "sha512-VEPQ0iPgWO/sBaZOU1xo4nuNdODVOajPnTIbog2GKYr31nIlZ0fWPoCQgGfF3ETyBl1vn63F/p50Um9Z4J8O8A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/eslint-plugin": "8.57.2", + "@typescript-eslint/parser": "8.57.2", + "@typescript-eslint/typescript-estree": "8.57.2", + "@typescript-eslint/utils": "8.57.2" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0 || ^10.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, "node_modules/typical": { "version": "7.3.0", "resolved": "https://registry.npmjs.org/typical/-/typical-7.3.0.tgz", @@ -3305,6 +4698,16 @@ "node": ">= 0.8" } }, + "node_modules/uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "punycode": "^2.1.0" + } + }, "node_modules/utils-merge": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", @@ -3377,6 +4780,22 @@ "webidl-conversions": "^3.0.0" } }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, "node_modules/which-typed-array": { "version": "1.1.20", "resolved": "https://registry.npmjs.org/which-typed-array/-/which-typed-array-1.1.20.tgz", @@ -3398,6 +4817,16 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/word-wrap": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz", + "integrity": "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/wordwrapjs": { "version": "5.1.1", "resolved": "https://registry.npmjs.org/wordwrapjs/-/wordwrapjs-5.1.1.tgz", @@ -3460,6 +4889,19 @@ "node": ">=6" } }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/zod": { "version": "4.3.6", "resolved": "https://registry.npmjs.org/zod/-/zod-4.3.6.tgz", diff --git a/apps/bff/package.json b/apps/bff/package.json index 15211da..8da6607 100644 --- a/apps/bff/package.json +++ b/apps/bff/package.json @@ -4,7 +4,10 @@ "scripts": { "dev": "ts-node-dev src/server.ts", "build": "tsc", - "start": "node dist/server.js" + "start": "node dist/server.js", + "lint": "eslint src/", + "lint:fix": "eslint src/ --fix", + "typecheck": "tsc --noEmit" }, "dependencies": { "@apollo/server": "^4.13.0", @@ -32,6 +35,9 @@ "@types/node": "^20.11.5", "@types/pg": "^8.11.2", "@types/ws": "^8.18.1", + "@eslint/js": "^9.39.1", + "eslint": "^9.39.1", + "typescript-eslint": "^8.48.0", "ts-node-dev": "^2.0.0", "typescript": "^5.3.3" } diff --git a/apps/bff/src/datasources/redis.ts b/apps/bff/src/datasources/redis.ts index dd3a249..3b72716 100644 --- a/apps/bff/src/datasources/redis.ts +++ b/apps/bff/src/datasources/redis.ts @@ -1,11 +1,33 @@ -import { createClient } from "redis"; +import { createClient, createCluster } from "redis"; -const client = createClient({ - socket: { - host: process.env.REDIS_HOST || "localhost", - port: parseInt(process.env.REDIS_PORT || "6379"), +// REDIS_CLUSTER_NODES = "redis-cluster-0:6379,redis-cluster-1:6379,..." +// When set, uses Redis Cluster. Otherwise falls back to single-node (local dev). +const REDIS_CLUSTER_NODES = process.env.REDIS_CLUSTER_NODES; + +const client = (() => { + if (REDIS_CLUSTER_NODES) { + const rootNodes = REDIS_CLUSTER_NODES.split(",").map((n) => { + const [host, port] = n.trim().split(":"); + return { + socket: { + host, + port: parseInt(port || "6379"), + }, + }; + }); + console.log(`Redis cluster mode: ${rootNodes.length} nodes`); + return createCluster({ rootNodes }); } -}); + + // Single-node (local dev / docker-compose default) + console.log("Redis single-node mode"); + return createClient({ + socket: { + host: process.env.REDIS_HOST || "localhost", + port: parseInt(process.env.REDIS_PORT || "6379"), + }, + }); +})(); client.connect().catch(console.error); client.on("error", (err) => console.error("Redis error:", err)); @@ -21,10 +43,8 @@ export const cache = { async getMany(keys: string[]): Promise<(T | null)[]> { if (keys.length === 0) return []; - const pipeline = client.multi(); - keys.forEach(key => pipeline.get(key)); - const results = await pipeline.exec(); - return results.map((r: any) => r ? JSON.parse(r) : null); + const results = await Promise.all(keys.map((key) => client.get(key))); + return results.map((value) => (value ? JSON.parse(value) as T : null)); }, async set(key: string, value: any, ttlSeconds: number): Promise { diff --git a/apps/bff/src/lib/circuitBreaker.ts b/apps/bff/src/lib/circuitBreaker.ts index 2e46890..7537ae8 100644 --- a/apps/bff/src/lib/circuitBreaker.ts +++ b/apps/bff/src/lib/circuitBreaker.ts @@ -1,11 +1,15 @@ /** * Circuit Breaker — protects BFF from Postgres failures * + * Hybrid: local state for fast decisions + Redis for cross-pod coordination. + * If Redis is unavailable, falls back to local-only (same as before). + * * States: * CLOSED → normal operation, requests flow through * OPEN → Postgres unhealthy, requests fail fast * HALF_OPEN → testing recovery, one request allowed */ +import { cache } from "../datasources/redis"; type State = "CLOSED" | "OPEN" | "HALF_OPEN"; @@ -33,7 +37,39 @@ export class CircuitBreaker { this.name = opts.name ?? "circuit-breaker"; } + /** Sync shared state from Redis (best-effort) */ + private async syncFromRedis(): Promise { + try { + const shared = await cache.get<{ state: State; failureCount: number; lastFailureTime: number }>( + `cb:${this.name}` + ); + if (shared && shared.failureCount > this.failureCount) { + this.state = shared.state; + this.failureCount = shared.failureCount; + this.lastFailureTime = shared.lastFailureTime; + } + } catch { + // Redis unavailable — use local state only + } + } + + /** Publish local state to Redis (best-effort) */ + private async syncToRedis(): Promise { + try { + await cache.set(`cb:${this.name}`, { + state: this.state, + failureCount: this.failureCount, + lastFailureTime: this.lastFailureTime, + }, 120); + } catch { + // Redis unavailable — local state only + } + } + async execute(fn: () => Promise): Promise { + // Sync shared circuit state from Redis before checking + await this.syncFromRedis(); + // OPEN — check if timeout has elapsed to move to HALF_OPEN if (this.state === "OPEN") { const elapsed = Date.now() - this.lastFailureTime; @@ -43,7 +79,6 @@ export class CircuitBreaker { `[${this.name}] Circuit OPEN — Postgres unavailable. Retry in ${remaining}s` ); } - // Timeout elapsed — test with one request console.warn( JSON.stringify({ level: "warn", @@ -84,6 +119,7 @@ export class CircuitBreaker { if (this.successCount >= this.successThreshold) { this.state = "CLOSED"; this.successCount = 0; + this.syncToRedis().catch(() => {}); console.log( JSON.stringify({ level: "info", @@ -102,9 +138,9 @@ export class CircuitBreaker { this.lastFailureTime = Date.now(); if (this.state === "HALF_OPEN") { - // Failed during test — reopen immediately this.state = "OPEN"; this.successCount = 0; + this.syncToRedis().catch(() => {}); console.error( JSON.stringify({ level: "error", @@ -119,6 +155,7 @@ export class CircuitBreaker { if (this.failureCount >= this.failureThreshold) { this.state = "OPEN"; + this.syncToRedis().catch(() => {}); console.error( JSON.stringify({ level: "error", @@ -152,4 +189,3 @@ export const postgresCircuitBreaker = new CircuitBreaker({ successThreshold: 2, timeout: 30_000, }); - diff --git a/apps/gateway/package-lock.json b/apps/gateway/package-lock.json index 5c7d258..c6f8d5a 100644 --- a/apps/gateway/package-lock.json +++ b/apps/gateway/package-lock.json @@ -37,10 +37,10 @@ "devDependencies": { "@types/cors": "^2.8.17", "@types/express": "^4.17.21", - "@types/jest": "^29.5.12", + "@types/jest": "^29.5.14", "@types/node": "^20.11.30", "@types/pg": "^8.10.9", - "@types/supertest": "^6.0.2", + "@types/supertest": "^6.0.3", "@types/uuid": "^10.0.0", "jest": "^29.7.0", "supertest": "^7.0.0", diff --git a/apps/gateway/package.json b/apps/gateway/package.json index dc42cc3..1c5ea31 100644 --- a/apps/gateway/package.json +++ b/apps/gateway/package.json @@ -40,10 +40,10 @@ "devDependencies": { "@types/cors": "^2.8.17", "@types/express": "^4.17.21", - "@types/jest": "^29.5.12", + "@types/jest": "^29.5.14", "@types/node": "^20.11.30", "@types/pg": "^8.10.9", - "@types/supertest": "^6.0.2", + "@types/supertest": "^6.0.3", "@types/uuid": "^10.0.0", "jest": "^29.7.0", "supertest": "^7.0.0", diff --git a/apps/gateway/src/lib/audit.ts b/apps/gateway/src/lib/audit.ts index 2f1f08d..cf5d483 100644 --- a/apps/gateway/src/lib/audit.ts +++ b/apps/gateway/src/lib/audit.ts @@ -23,6 +23,10 @@ export type AuditEventType = | "billing.subscription_cancelled" | "billing.subscription_updated" | "billing.portal_accessed" + | "device.created" + | "device.creation_failed" + | "device.registered" + | "webhook_endpoint.created" // API keys | "api_key.created" | "api_key.revoked" diff --git a/apps/gateway/src/middleware/planEnforcement.ts b/apps/gateway/src/middleware/planEnforcement.ts new file mode 100644 index 0000000..1604f08 --- /dev/null +++ b/apps/gateway/src/middleware/planEnforcement.ts @@ -0,0 +1,297 @@ +import { Request, Response, NextFunction } from "express"; +import { pool } from "../database/db"; +import { redis } from "../cache/redis"; +import { PLANS } from "../services/stripe"; + +// ─── Plan tier feature matrix ───────────────────────────────────────────────── + +interface PlanLimits { + devices: number; // -1 = unlimited + alertRules: number; + bulkImport: boolean; + sso: boolean; + auditLogExport: boolean; + apiRateLimit: number; + webhooks: boolean; +} + +const PLAN_LIMITS: Record = { + free: { + devices: 5, + alertRules: 3, + bulkImport: false, + sso: false, + auditLogExport: false, + apiRateLimit: 100, + webhooks: false, + }, + starter: { + devices: PLANS.starter.devices, // 10 + alertRules: 10, + bulkImport: true, + sso: false, + auditLogExport: false, + apiRateLimit: 500, + webhooks: true, + }, + professional: { + devices: PLANS.professional.devices, // 100 + alertRules: 50, + bulkImport: true, + sso: true, + auditLogExport: true, + apiRateLimit: 2000, + webhooks: true, + }, + enterprise: { + devices: PLANS.enterprise.devices, // -1 + alertRules: -1, + bulkImport: true, + sso: true, + auditLogExport: true, + apiRateLimit: 10000, + webhooks: true, + }, +}; + +// ─── Tenant plan cache (Redis, 5 min TTL) ───────────────────────────────────── + +interface TenantPlan { + plan: string; + subscriptionStatus: string; + currentPeriodEnd: string | null; +} + +const CACHE_TTL = 300; // 5 minutes + +async function getTenantPlan(tenantId: string): Promise { + const cacheKey = `tenant_plan:${tenantId}`; + + try { + const cached = await redis.get(cacheKey); + if (cached) return JSON.parse(cached) as TenantPlan; + } catch { + // Redis unavailable — fall through to DB + } + + const { rows } = await pool.query( + `SELECT plan, subscription_status, current_period_end + FROM tenants WHERE id = $1`, + [tenantId] + ); + + if (rows.length === 0) { + return { plan: "free", subscriptionStatus: "none", currentPeriodEnd: null }; + } + + const result: TenantPlan = { + plan: rows[0].plan || "free", + subscriptionStatus: rows[0].subscription_status || "none", + currentPeriodEnd: rows[0].current_period_end ?? null, + }; + + try { + await redis.set(cacheKey, JSON.stringify(result), "EX", CACHE_TTL); + } catch { + // Non-critical + } + + return result; +} + +async function getDeviceCount(tenantId: string): Promise { + const cacheKey = `device_count:${tenantId}`; + + try { + const cached = await redis.get(cacheKey); + if (cached) return parseInt(cached, 10); + } catch { /* ignore */ } + + const { rows } = await pool.query( + "SELECT COUNT(*)::int AS count FROM devices WHERE tenant_id = $1", + [tenantId] + ); + + const count: number = rows[0]?.count ?? 0; + + try { + await redis.set(cacheKey, String(count), "EX", 60); + } catch { /* ignore */ } + + return count; +} + +async function getAlertRuleCount(tenantId: string): Promise { + const { rows } = await pool.query( + "SELECT COUNT(*)::int AS count FROM alert_rules WHERE tenant_id = $1", + [tenantId] + ); + return rows[0]?.count ?? 0; +} + +// ─── Cache invalidation ──────────────────────────────────────────────────────── + +export async function invalidatePlanCache(tenantId: string): Promise { + try { + await redis.del(`tenant_plan:${tenantId}`); + await redis.del(`device_count:${tenantId}`); + } catch { + // Non-critical + } +} + +// ─── requireActiveSubscription ──────────────────────────────────────────────── + +export function requireActiveSubscription() { + return async (req: Request, res: Response, next: NextFunction) => { + if (!req.user?.tenantId) return next(); + + const tenant = await getTenantPlan(req.user.tenantId); + + if (tenant.plan === "free") return next(); + + const blocked = ["canceled", "unpaid", "incomplete_expired"]; + if (blocked.includes(tenant.subscriptionStatus)) { + return res.status(403).json({ + error: "subscription_inactive", + plan: tenant.plan, + status: tenant.subscriptionStatus, + message: "Your subscription is inactive. Please update your billing to continue.", + upgradeUrl: "/billing", + }); + } + + if (tenant.subscriptionStatus === "past_due") { + const periodEnd = tenant.currentPeriodEnd ? new Date(tenant.currentPeriodEnd) : null; + const gracePeriodEnd = periodEnd + ? new Date(periodEnd.getTime() + 7 * 24 * 60 * 60 * 1000) + : null; + + if (gracePeriodEnd && new Date() > gracePeriodEnd) { + return res.status(403).json({ + error: "subscription_past_due", + message: "Payment is past due. Service suspended after 7-day grace period.", + upgradeUrl: "/billing", + }); + } + } + + return next(); + }; +} + +// ─── enforceDeviceQuota ─────────────────────────────────────────────────────── + +export function enforceDeviceQuota() { + return async (req: Request, res: Response, next: NextFunction) => { + const tenantId = req.user!.tenantId; + const tenant = await getTenantPlan(tenantId); + const limits = PLAN_LIMITS[tenant.plan] ?? PLAN_LIMITS.free; + + if (limits.devices === -1) return next(); + + const currentCount = await getDeviceCount(tenantId); + + if (currentCount >= limits.devices) { + return res.status(403).json({ + error: "device_quota_exceeded", + plan: tenant.plan, + limit: limits.devices, + current: currentCount, + message: `Your ${tenant.plan} plan allows ${limits.devices} devices. You have ${currentCount}.`, + upgradeUrl: "/billing", + }); + } + + return next(); + }; +} + +// ─── enforceBulkDeviceQuota ─────────────────────────────────────────────────── + +export function enforceBulkDeviceQuota() { + return async (req: Request, res: Response, next: NextFunction) => { + const tenantId = req.user!.tenantId; + const tenant = await getTenantPlan(tenantId); + const limits = PLAN_LIMITS[tenant.plan] ?? PLAN_LIMITS.free; + + if (!limits.bulkImport) { + return res.status(403).json({ + error: "feature_not_available", + feature: "bulk_import", + plan: tenant.plan, + message: "Bulk import is available on Starter plans and above.", + upgradeUrl: "/billing", + }); + } + + if (limits.devices !== -1) { + const currentCount = await getDeviceCount(tenantId); + if (currentCount >= limits.devices) { + return res.status(403).json({ + error: "device_quota_exceeded", + plan: tenant.plan, + limit: limits.devices, + current: currentCount, + upgradeUrl: "/billing", + }); + } + } + + return next(); + }; +} + +// ─── enforceAlertRuleQuota ──────────────────────────────────────────────────── + +export function enforceAlertRuleQuota() { + return async (req: Request, res: Response, next: NextFunction) => { + const tenantId = req.user!.tenantId; + const tenant = await getTenantPlan(tenantId); + const limits = PLAN_LIMITS[tenant.plan] ?? PLAN_LIMITS.free; + + if (limits.alertRules === -1) return next(); + + const currentCount = await getAlertRuleCount(tenantId); + + if (currentCount >= limits.alertRules) { + return res.status(403).json({ + error: "alert_rule_quota_exceeded", + plan: tenant.plan, + limit: limits.alertRules, + current: currentCount, + message: `Your ${tenant.plan} plan allows ${limits.alertRules} alert rules.`, + upgradeUrl: "/billing", + }); + } + + return next(); + }; +} + +// ─── requireFeature ─────────────────────────────────────────────────────────── + +export function requireFeature(feature: keyof PlanLimits) { + return async (req: Request, res: Response, next: NextFunction) => { + const tenantId = req.user!.tenantId; + const tenant = await getTenantPlan(tenantId); + const limits = PLAN_LIMITS[tenant.plan] ?? PLAN_LIMITS.free; + + const value = limits[feature]; + + if (value === false) { + return res.status(403).json({ + error: "feature_not_available", + feature, + plan: tenant.plan, + message: `${String(feature)} is not available on your ${tenant.plan} plan.`, + upgradeUrl: "/billing", + }); + } + + return next(); + }; +} + +export { PLAN_LIMITS }; +export type { PlanLimits, TenantPlan }; diff --git a/apps/gateway/src/routes/teamMembers.ts b/apps/gateway/src/routes/teamMembers.ts index c6aee06..82185a6 100644 --- a/apps/gateway/src/routes/teamMembers.ts +++ b/apps/gateway/src/routes/teamMembers.ts @@ -77,15 +77,15 @@ teamRouter.post( ); // Send Auth0 org invite so the user gets an email and lands in the org - pool.query("SELECT auth0_org_id FROM tenants WHERE id = $1", [tenantId]) - .then((result: { rows: Array<{ auth0_org_id?: string }> }) => { + pool.query("SELECT auth0_org_id, name FROM tenants WHERE id = $1", [tenantId]) + .then((result: { rows: Array<{ auth0_org_id?: string; name?: string }> }) => { const rows = result.rows; if (!rows[0]?.auth0_org_id) return; return inviteToOrg({ orgId: rows[0].auth0_org_id, email: email.trim().toLowerCase(), role: memberRole, - inviterName: req.user!.sub, + inviterName: rows[0].name || "GrainGuard", }); }) .catch((_e: unknown) => diff --git a/apps/gateway/src/server.ts b/apps/gateway/src/server.ts index fedb233..6550d78 100644 --- a/apps/gateway/src/server.ts +++ b/apps/gateway/src/server.ts @@ -7,6 +7,7 @@ import { createDevice } from "./services/device"; import { getDeviceLatestTelemetry } from "./services/device-query"; import { redis } from "./cache/redis"; import { pool } from "./database/db"; +import { writeAuditLog as logAuditEvent } from "./lib/audit"; import { metricsHandler, requestLatency } from "./observability/metrics"; import { requestIdMiddleware } from "./middleware/requestId"; import { authMiddleware } from "./middleware/auth"; @@ -187,9 +188,26 @@ app.post( authHeader ); + logAuditEvent({ + eventType: "device.created", + actorId: userId, + tenantId, + resourceType: "device", + resourceId: result?.deviceId || serialNumber, + meta: { serialNumber, requestId }, + ipAddress: req.ip, + }); return res.json(result); } catch (err) { console.error(err); + logAuditEvent({ + eventType: "device.creation_failed", + actorId: req.user?.sub || "unknown", + tenantId: req.user?.tenantId || "00000000-0000-0000-0000-000000000000", + resourceType: "device", + meta: { serialNumber: req.body?.serialNumber, error: String(err) }, + ipAddress: req.ip, + }); return res.status(500).json({ error: "Failed to create device" }); } } diff --git a/apps/gateway/src/services/device.ts b/apps/gateway/src/services/device.ts index 32248e5..c595669 100644 --- a/apps/gateway/src/services/device.ts +++ b/apps/gateway/src/services/device.ts @@ -98,6 +98,10 @@ export function createDevice( if (err) { return reject(err); } + // Validate response structure + if (!response || !response.device_id) { + return reject(new Error("Invalid gRPC response: missing device_id")); + } resolve(response); } ); diff --git a/apps/jobs-worker/package-lock.json b/apps/jobs-worker/package-lock.json index 05412e6..b73bd14 100644 --- a/apps/jobs-worker/package-lock.json +++ b/apps/jobs-worker/package-lock.json @@ -118,14 +118,14 @@ "license": "MIT" }, "node_modules/axios": { - "version": "1.13.6", - "resolved": "https://registry.npmjs.org/axios/-/axios-1.13.6.tgz", - "integrity": "sha512-ChTCHMouEe2kn713WHbQGcuYrr6fXTBiu460OTwWrWob16g1bXn4vtz07Ope7ewMozJAnEquLk5lWQWtBig9DQ==", + "version": "1.14.0", + "resolved": "https://registry.npmjs.org/axios/-/axios-1.14.0.tgz", + "integrity": "sha512-3Y8yrqLSwjuzpXuZ0oIYZ/XGgLwUIBU3uLvbcpb0pidD9ctpShJd43KSlEEkVQg6DS0G9NKyzOvBfUtDKEyHvQ==", "license": "MIT", "dependencies": { "follow-redirects": "^1.15.11", "form-data": "^4.0.5", - "proxy-from-env": "^1.1.0" + "proxy-from-env": "^2.1.0" } }, "node_modules/buffer-more-ints": { @@ -552,10 +552,13 @@ } }, "node_modules/proxy-from-env": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", - "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==", - "license": "MIT" + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-2.1.0.tgz", + "integrity": "sha512-cJ+oHTW1VAEa8cJslgmUZrc+sjRKgAKl3Zyse6+PV38hZe/V6Z14TbCuXcan9F9ghlz4QrFr2c92TNF82UkYHA==", + "license": "MIT", + "engines": { + "node": ">=10" + } }, "node_modules/querystringify": { "version": "2.2.0", diff --git a/apps/jobs-worker/src/connection.ts b/apps/jobs-worker/src/connection.ts index 6c75175..c1514cd 100644 --- a/apps/jobs-worker/src/connection.ts +++ b/apps/jobs-worker/src/connection.ts @@ -15,10 +15,18 @@ export async function connect(): Promise { for (let attempt = 1; attempt <= 10; attempt++) { try { console.log("[rabbitmq] connecting attempt " + attempt + "/10..."); - conn = await amqpConnect(RABBITMQ_URL); + conn = await amqpConnect(RABBITMQ_URL, { heartbeat: 30 }); ch = await conn.createChannel(); await ch.prefetch(1); + // Detect stale connections early + conn.on("error", (err: Error) => { + console.error("[rabbitmq] connection error:", err.message); + }); + conn.on("close", () => { + console.warn("[rabbitmq] connection closed unexpectedly"); + }); + for (const [key, queue] of Object.entries(QUEUES)) { const dlq = DLQ[key as keyof typeof DLQ]; await ch.assertQueue(dlq, { durable: true }); diff --git a/apps/read-model-builder/cmd/main.go b/apps/read-model-builder/cmd/main.go index 0bf0f33..d456a26 100644 --- a/apps/read-model-builder/cmd/main.go +++ b/apps/read-model-builder/cmd/main.go @@ -7,6 +7,7 @@ import ( "os/signal" "runtime" "strconv" + "strings" "sync" "syscall" "time" @@ -80,9 +81,26 @@ func main() { ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM) defer stop() - // Redis - redisClient := redis.NewClient(&redis.Options{ - Addr: getenv("REDIS_ADDR", "redis:6379"), + // Redis (cluster or single-node) + clusterNodes := getenv("REDIS_CLUSTER_NODES", "") + var addrs []string + if clusterNodes != "" { + for _, a := range strings.Split(clusterNodes, ",") { + addrs = append(addrs, strings.TrimSpace(a)) + } + log.Info().Int("nodes", len(addrs)).Msg("Redis cluster mode") + } else { + addrs = []string{getenv("REDIS_ADDR", "redis:6379")} + log.Info().Str("addr", addrs[0]).Msg("Redis single-node mode") + } + + redisClient := redis.NewUniversalClient(&redis.UniversalOptions{ + Addrs: addrs, + PoolSize: getenvInt("REDIS_POOL_SIZE", 20), + MinIdleConns: 5, + ReadTimeout: 2 * time.Second, + WriteTimeout: 2 * time.Second, + RouteByLatency: len(addrs) > 1, }) defer redisClient.Close() diff --git a/apps/read-model-builder/internal/consumer/message_handler.go b/apps/read-model-builder/internal/consumer/message_handler.go index deee61f..599951d 100644 --- a/apps/read-model-builder/internal/consumer/message_handler.go +++ b/apps/read-model-builder/internal/consumer/message_handler.go @@ -13,7 +13,7 @@ import ( func NewEnvelopeHandler( pool *pgxpool.Pool, - redisClient *redis.Client, + redisClient redis.UniversalClient, ) func(context.Context, []byte) error { telemetryHandler := projection.HandleTelemetry(pool, redisClient) deviceHandler := projection.HandleDevice(pool, redisClient) @@ -40,7 +40,7 @@ func NewEnvelopeHandler( func NewBatchEnvelopeHandler( pool *pgxpool.Pool, - redisClient *redis.Client, + redisClient redis.UniversalClient, ) func(context.Context, [][]byte) error { return projection.HandleTelemetryBatch(pool, redisClient) } diff --git a/apps/read-model-builder/internal/projection/device_projection.go b/apps/read-model-builder/internal/projection/device_projection.go index d624c3e..5d1eb7b 100644 --- a/apps/read-model-builder/internal/projection/device_projection.go +++ b/apps/read-model-builder/internal/projection/device_projection.go @@ -17,7 +17,7 @@ import ( "github.com/pahuldeepp/grainguard/libs/observability" ) -func HandleDevice(pool *pgxpool.Pool, redisClient *redis.Client) func([]byte) error { +func HandleDevice(pool *pgxpool.Pool, redisClient redis.UniversalClient) func([]byte) error { return func(payload []byte) error { start := time.Now() diff --git a/apps/read-model-builder/internal/projection/telemetry_projection.go b/apps/read-model-builder/internal/projection/telemetry_projection.go index 369fbb1..3ca5f0e 100644 --- a/apps/read-model-builder/internal/projection/telemetry_projection.go +++ b/apps/read-model-builder/internal/projection/telemetry_projection.go @@ -39,7 +39,7 @@ type parsedEvent struct { recordedAt time.Time } -func HandleTelemetry(pool *pgxpool.Pool, redisClient *redis.Client) func([]byte) error { +func HandleTelemetry(pool *pgxpool.Pool, redisClient redis.UniversalClient) func([]byte) error { return func(payload []byte) error { start := time.Now() @@ -201,7 +201,7 @@ func HandleTelemetry(pool *pgxpool.Pool, redisClient *redis.Client) func([]byte) } } -func HandleTelemetryBatch(pool *pgxpool.Pool, redisClient *redis.Client) func(context.Context, [][]byte) error { +func HandleTelemetryBatch(pool *pgxpool.Pool, redisClient redis.UniversalClient) func(context.Context, [][]byte) error { return func(ctx context.Context, payloads [][]byte) error { start := time.Now() diff --git a/apps/saga-orchestrator/internal/domain/saga.go b/apps/saga-orchestrator/internal/domain/saga.go index ac45b92..239ca48 100644 --- a/apps/saga-orchestrator/internal/domain/saga.go +++ b/apps/saga-orchestrator/internal/domain/saga.go @@ -35,3 +35,16 @@ type Saga struct { PayloadJSON []byte // raw JSON payload stored in DB LastError string } + +var validStatuses = map[SagaStatus]bool{ + StatusStarted: true, + StatusInProgress: true, + StatusCompensating: true, + StatusCompleted: true, + StatusFailed: true, +} + +// IsValidStatus checks whether a status string is a known SagaStatus. +func IsValidStatus(s SagaStatus) bool { + return validStatuses[s] +} diff --git a/apps/saga-orchestrator/internal/health/checker.go b/apps/saga-orchestrator/internal/health/checker.go index 03ac813..67bae6b 100644 --- a/apps/saga-orchestrator/internal/health/checker.go +++ b/apps/saga-orchestrator/internal/health/checker.go @@ -20,10 +20,10 @@ func NewPostgresChecker(pool *pgxpool.Pool) Checker { return &postgresChecker{po func (c *postgresChecker) Name() string { return "postgres" } func (c *postgresChecker) Check(ctx context.Context) error { return c.pool.Ping(ctx) } -type redisChecker struct{ client *redis.Client } +type redisChecker struct{ client redis.UniversalClient } -func NewRedisChecker(client *redis.Client) Checker { return &redisChecker{client} } -func (c *redisChecker) Name() string { return "redis" } +func NewRedisChecker(client redis.UniversalClient) Checker { return &redisChecker{client} } +func (c *redisChecker) Name() string { return "redis" } func (c *redisChecker) Check(ctx context.Context) error { return c.client.Ping(ctx).Err() } diff --git a/apps/saga-orchestrator/internal/orchestrator/provision_saga.go b/apps/saga-orchestrator/internal/orchestrator/provision_saga.go index d6880f9..e754c34 100644 --- a/apps/saga-orchestrator/internal/orchestrator/provision_saga.go +++ b/apps/saga-orchestrator/internal/orchestrator/provision_saga.go @@ -51,13 +51,16 @@ func (p *ProvisionSaga) HandleEvent(ctx context.Context, raw []byte) error { } sagaID := uuid.New() - initialPayload, _ := json.Marshal(map[string]any{ + initialPayload, initialErr := json.Marshal(map[string]any{ "device_id": payload.GetDeviceId(), "tenant_id": payload.GetTenantId(), "serial": payload.GetSerial(), "created_at": payload.GetCreatedAt(), "event_id": env.GetEventId(), }) + if initialErr != nil { + return fmt.Errorf("marshal initial payload: %w", initialErr) + } saga := &domain.Saga{ ID: sagaID, @@ -79,7 +82,10 @@ func (p *ProvisionSaga) HandleEvent(ctx context.Context, raw []byte) error { "tenant_id": payload.GetTenantId(), "occurred_at_ms": env.GetOccurredAtUnixMs(), } - cmdBytes, _ := json.Marshal(cmd) + cmdBytes, cmdErr := json.Marshal(cmd) + if cmdErr != nil { + return fmt.Errorf("marshal tenant.attach_device command: %w", cmdErr) + } if err := p.cmdProducer.Publish(ctx, []byte(correlationID), cmdBytes); err != nil { _ = p.repo.MarkFailed(ctx, sagaID.String(), "failed to publish tenant.attach_device") @@ -123,7 +129,10 @@ func (p *ProvisionSaga) handleTenantAttached(ctx context.Context, env *eventspb. "tenant_id": env.GetTenantId(), "occurred_at_ms": env.GetOccurredAtUnixMs(), } - cmdBytes, _ := json.Marshal(cmd) + cmdBytes, detachErr := json.Marshal(cmd) + if detachErr != nil { + return fmt.Errorf("marshal tenant.detach_device command: %w", detachErr) + } if err := p.cmdProducer.Publish(ctx, []byte(correlationID), cmdBytes); err != nil { _ = p.repo.MarkFailed(ctx, saga.ID.String(), "failed to publish quota.allocate_device") diff --git a/apps/saga-orchestrator/internal/repository/postgres_saga_repository.go b/apps/saga-orchestrator/internal/repository/postgres_saga_repository.go index 7f426c0..35d8825 100644 --- a/apps/saga-orchestrator/internal/repository/postgres_saga_repository.go +++ b/apps/saga-orchestrator/internal/repository/postgres_saga_repository.go @@ -3,6 +3,7 @@ import ( "context" "errors" + "time" "github.com/google/uuid" "github.com/jackc/pgx/v5" @@ -11,6 +12,16 @@ import ( "github.com/pahuldeepp/grainguard/apps/saga-orchestrator/internal/domain" ) +const dbQueryTimeout = 10 * time.Second + +// withTimeout wraps a context with a query timeout if one isn't already set. +func withTimeout(ctx context.Context) (context.Context, context.CancelFunc) { + if _, ok := ctx.Deadline(); ok { + return ctx, func() {} // caller already set a deadline + } + return context.WithTimeout(ctx, dbQueryTimeout) +} + type PostgresSagaRepository struct { pool *pgxpool.Pool } @@ -20,6 +31,8 @@ func NewPostgresSagaRepository(pool *pgxpool.Pool) *PostgresSagaRepository { } func (r *PostgresSagaRepository) Create(ctx context.Context, saga *domain.Saga) error { + ctx, cancel := withTimeout(ctx) + defer cancel() _, err := r.pool.Exec(ctx, ` INSERT INTO sagas (saga_id, saga_type, correlation_id, status, current_step, payload, last_error) VALUES ($1,$2,$3,$4,$5,$6,$7) @@ -27,11 +40,19 @@ func (r *PostgresSagaRepository) Create(ctx context.Context, saga *domain.Saga) return err } +// FindByCorrelationID looks up a saga by its correlation ID (device UUID). +// Note: correlation_id is a globally unique UUID, so cross-tenant collision is not possible. +// The payload JSON contains tenant_id for auditing. If tenant_id column is added to the +// sagas table in the future, add a WHERE tenant_id = $2 filter here for defense in depth. func (r *PostgresSagaRepository) FindByCorrelationID(ctx context.Context, correlationID string) (*domain.Saga, error) { + ctx, cancel := withTimeout(ctx) + defer cancel() row := r.pool.QueryRow(ctx, ` SELECT saga_id, saga_type, correlation_id, status, current_step, payload, COALESCE(last_error,'') FROM sagas WHERE correlation_id = $1 + ORDER BY created_at DESC + LIMIT 1 `, correlationID) var s domain.Saga @@ -53,6 +74,8 @@ func (r *PostgresSagaRepository) FindByCorrelationID(ctx context.Context, correl } func (r *PostgresSagaRepository) UpdateStepStatus(ctx context.Context, sagaID string, step string, status string) error { + ctx, cancel := withTimeout(ctx) + defer cancel() _, err := r.pool.Exec(ctx, ` UPDATE sagas SET current_step = $2, @@ -64,6 +87,8 @@ func (r *PostgresSagaRepository) UpdateStepStatus(ctx context.Context, sagaID st } func (r *PostgresSagaRepository) MarkFailed(ctx context.Context, sagaID string, errMsg string) error { + ctx, cancel := withTimeout(ctx) + defer cancel() _, err := r.pool.Exec(ctx, ` UPDATE sagas SET status = $2, diff --git a/apps/search-indexer/main.py b/apps/search-indexer/main.py index 479ec1d..981b999 100644 --- a/apps/search-indexer/main.py +++ b/apps/search-indexer/main.py @@ -71,7 +71,41 @@ def index_telemetry(self, event): tenant_id = event.get("tenant_id") if not device_id or not tenant_id: return - self.es.update(index=DEVICE_INDEX, id=device_id, body={"doc":{"device_id":device_id,"tenant_id":tenant_id,"temperature":payload.get("temperature"),"humidity":payload.get("humidity"),"recorded_at":payload.get("recorded_at"),"status":"active"},"doc_as_upsert":True}) + + # Update current device state in device index + self.es.update( + index=DEVICE_INDEX, + id=device_id, + body={ + "doc": { + "device_id": device_id, + "tenant_id": tenant_id, + "temperature": payload.get("temperature"), + "humidity": payload.get("humidity"), + "recorded_at": payload.get("recorded_at"), + "status": "active", + }, + "doc_as_upsert": True, + }, + ) + + # Write time-series entry to telemetry index + # Use composite key so concurrent writes don't create duplicates + doc_id = f"{device_id}:{payload.get('recorded_at', '')}" + self.es.update( + index=TELEMETRY_INDEX, + id=doc_id, + body={ + "doc": { + "device_id": device_id, + "tenant_id": tenant_id, + "temperature": payload.get("temperature"), + "humidity": payload.get("humidity"), + "recorded_at": payload.get("recorded_at"), + }, + "doc_as_upsert": True, + }, + ) except Exception as e: log.error(f"Telemetry index error: {e}") diff --git a/apps/telemetry-service/migrations/000007_saas_columns.down.sql b/apps/telemetry-service/migrations/000007_saas_columns.down.sql index c7563e5..bbb4767 100644 --- a/apps/telemetry-service/migrations/000007_saas_columns.down.sql +++ b/apps/telemetry-service/migrations/000007_saas_columns.down.sql @@ -2,7 +2,9 @@ DROP TABLE IF EXISTS stripe_webhook_events; ALTER TABLE tenants + DROP COLUMN IF EXISTS current_period_end, DROP COLUMN IF EXISTS sso_connection_type, DROP COLUMN IF EXISTS sso_connection_id, + DROP COLUMN IF EXISTS email, DROP COLUMN IF EXISTS auth0_org_id, DROP COLUMN IF EXISTS subscription_status; diff --git a/apps/telemetry-service/migrations/000007_saas_columns.up.sql b/apps/telemetry-service/migrations/000007_saas_columns.up.sql index 484d594..557e5fd 100644 --- a/apps/telemetry-service/migrations/000007_saas_columns.up.sql +++ b/apps/telemetry-service/migrations/000007_saas_columns.up.sql @@ -1,10 +1,18 @@ -- 000007_saas_columns.up.sql -- Adds SaaS operational columns missing from the initial schema. +-- Tenant contact email used during signup and account export +ALTER TABLE tenants + ADD COLUMN IF NOT EXISTS email TEXT; + -- subscription_status on tenants (mirrors tenant_billing.status for fast joins) ALTER TABLE tenants ADD COLUMN IF NOT EXISTS subscription_status TEXT NOT NULL DEFAULT 'none'; +-- Grace-period enforcement reads the active billing period end from tenants +ALTER TABLE tenants + ADD COLUMN IF NOT EXISTS current_period_end TIMESTAMPTZ; + -- auth0_org_id for SSO / Auth0 Organizations ALTER TABLE tenants ADD COLUMN IF NOT EXISTS auth0_org_id TEXT; diff --git a/apps/telemetry-service/migrations/000009_sso_alert_rules_bulk.down.sql b/apps/telemetry-service/migrations/000009_sso_alert_rules_bulk.down.sql new file mode 100644 index 0000000..89a7b27 --- /dev/null +++ b/apps/telemetry-service/migrations/000009_sso_alert_rules_bulk.down.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS bulk_import_jobs; +DROP TABLE IF EXISTS alert_rules; + +DROP INDEX IF EXISTS idx_tenants_auth0_org; + +ALTER TABLE tenants + DROP COLUMN IF EXISTS auth0_org_id, + DROP COLUMN IF EXISTS sso_connection_id, + DROP COLUMN IF EXISTS sso_connection_type; diff --git a/apps/telemetry-service/migrations/000009_sso_alert_rules_bulk.up.sql b/apps/telemetry-service/migrations/000009_sso_alert_rules_bulk.up.sql new file mode 100644 index 0000000..1d9d476 --- /dev/null +++ b/apps/telemetry-service/migrations/000009_sso_alert_rules_bulk.up.sql @@ -0,0 +1,52 @@ +-- Migration 009: SSO columns + alert_rules table + bulk_import_jobs table + +-- ── SSO columns on tenants ──────────────────────────────────────────────────── +ALTER TABLE tenants + ADD COLUMN IF NOT EXISTS auth0_org_id TEXT, -- Auth0 Organization ID (org_xxx) + ADD COLUMN IF NOT EXISTS sso_connection_id TEXT, -- Auth0 Connection ID (con_xxx) + ADD COLUMN IF NOT EXISTS sso_connection_type TEXT; -- 'saml' | 'oidc' + +-- Unique index — one org per tenant +CREATE UNIQUE INDEX IF NOT EXISTS idx_tenants_auth0_org + ON tenants (auth0_org_id) + WHERE auth0_org_id IS NOT NULL; + +-- ── Alert rules ─────────────────────────────────────────────────────────────── +-- Defines when workflow-alerts should fire for devices in a tenant. +-- The workflow-alerts service reads these rows and evaluates them against +-- incoming risk score events from Kafka. +CREATE TABLE IF NOT EXISTS alert_rules ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE, + name TEXT NOT NULL, -- human-readable label + metric TEXT NOT NULL, -- 'temperature' | 'humidity' | 'co2' | ... + operator TEXT NOT NULL, -- '>' | '<' | '>=' | '<=' | '==' + threshold FLOAT NOT NULL, -- numeric threshold value + device_type TEXT, -- NULL = apply to all device types + enabled BOOLEAN NOT NULL DEFAULT true, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE INDEX IF NOT EXISTS idx_alert_rules_tenant + ON alert_rules (tenant_id) + WHERE enabled = true; + +-- ── Bulk import jobs ────────────────────────────────────────────────────────── +-- Tracks the status of CSV bulk device import operations. +-- The import endpoint writes a row here before starting processing, +-- then updates it when done. Admins can see past imports in the UI. +CREATE TABLE IF NOT EXISTS bulk_import_jobs ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE, + created_by TEXT NOT NULL, -- user sub who uploaded + total_rows INT NOT NULL DEFAULT 0, + success_rows INT NOT NULL DEFAULT 0, + error_rows INT NOT NULL DEFAULT 0, + status TEXT NOT NULL DEFAULT 'running', -- 'running' | 'completed' | 'partial' | 'failed' + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + completed_at TIMESTAMPTZ +); + +CREATE INDEX IF NOT EXISTS idx_bulk_jobs_tenant + ON bulk_import_jobs (tenant_id, created_at DESC); diff --git a/apps/workflow-alerts/src/index.ts b/apps/workflow-alerts/src/index.ts index f116487..65f8f21 100644 --- a/apps/workflow-alerts/src/index.ts +++ b/apps/workflow-alerts/src/index.ts @@ -37,7 +37,11 @@ async function connectRabbitMQ(retries = 10, delay = 3000): Promise<{ conn: Chan await ch.assertQueue(ALERT_QUEUE, { durable: true, - arguments: { "x-dead-letter-exchange": "", "x-dead-letter-routing-key": ALERT_DLQ }, + arguments: { + "x-dead-letter-exchange": "", + "x-dead-letter-routing-key": ALERT_DLQ, + "x-message-ttl": 86400000, + }, }); await ch.assertQueue(ALERT_DLQ, { durable: true }); @@ -130,4 +134,4 @@ async function main() { main().catch((err) => { console.error("[workflow-alerts] fatal:", err); process.exit(1); -}); \ No newline at end of file +}); diff --git a/go.mod b/go.mod index 72cedcc..fb72902 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( github.com/MicahParks/keyfunc v1.9.0 github.com/gocql/gocql v1.7.0 github.com/golang-jwt/jwt/v4 v4.5.2 - github.com/golang-migrate/migrate/v4 v4.19.1 + github.com/golang-migrate/migrate/v4 v4.18.3 github.com/google/uuid v1.6.0 github.com/gorilla/mux v1.8.1 github.com/jackc/pgx/v5 v5.8.0 @@ -15,43 +15,45 @@ require ( github.com/rs/zerolog v1.34.0 github.com/segmentio/kafka-go v0.4.50 github.com/stretchr/testify v1.11.1 - github.com/testcontainers/testcontainers-go v0.41.0 - github.com/testcontainers/testcontainers-go/modules/postgres v0.41.0 + github.com/testcontainers/testcontainers-go v0.37.0 + github.com/testcontainers/testcontainers-go/modules/postgres v0.37.0 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.65.0 - go.opentelemetry.io/otel v1.41.0 + go.opentelemetry.io/otel v1.42.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.41.0 - go.opentelemetry.io/otel/sdk v1.41.0 - google.golang.org/grpc v1.79.1 + go.opentelemetry.io/otel/sdk v1.42.0 + google.golang.org/grpc v1.79.2 google.golang.org/protobuf v1.36.11 ) require ( dario.cat/mergo v1.0.2 // indirect + github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 // indirect github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cenkalti/backoff/v5 v5.0.3 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/containerd/errdefs v1.0.0 // indirect - github.com/containerd/errdefs/pkg v0.3.0 // indirect github.com/containerd/log v0.1.0 // indirect github.com/containerd/platforms v0.2.1 // indirect github.com/cpuguy83/dockercfg v0.3.2 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/distribution/reference v0.6.0 // indirect - github.com/docker/docker v28.5.2+incompatible // indirect + github.com/docker/docker v28.0.1+incompatible // indirect github.com/docker/go-connections v0.6.0 // indirect github.com/docker/go-units v0.5.0 // indirect - github.com/ebitengine/purego v0.10.0 // indirect + github.com/ebitengine/purego v0.8.2 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect + github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 // indirect github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect github.com/jackc/puddle/v2 v2.2.2 // indirect @@ -62,7 +64,6 @@ require ( github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.19 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect - github.com/moby/go-archive v0.2.0 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.6.0 // indirect github.com/moby/sys/user v0.4.0 // indirect @@ -78,19 +79,21 @@ require ( github.com/prometheus/client_model v0.5.0 // indirect github.com/prometheus/common v0.48.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect - github.com/shirou/gopsutil/v4 v4.26.2 // indirect + github.com/shirou/gopsutil/v4 v4.25.1 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/tklauser/go-sysconf v0.3.16 // indirect github.com/tklauser/numcpus v0.11.0 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.41.0 // indirect - go.opentelemetry.io/otel/metric v1.41.0 // indirect - go.opentelemetry.io/otel/trace v1.41.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.42.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.42.0 // indirect + go.opentelemetry.io/otel/metric v1.42.0 // indirect + go.opentelemetry.io/otel/trace v1.42.0 // indirect go.opentelemetry.io/proto/otlp v1.9.0 // indirect + go.uber.org/atomic v1.7.0 // indirect golang.org/x/crypto v0.48.0 // indirect - golang.org/x/net v0.50.0 // indirect + golang.org/x/net v0.51.0 // indirect golang.org/x/sync v0.19.0 // indirect golang.org/x/sys v0.41.0 // indirect golang.org/x/text v0.34.0 // indirect @@ -98,4 +101,5 @@ require ( google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect + gotest.tools/v3 v3.5.2 // indirect ) diff --git a/go.sum b/go.sum index 545d6fb..6d96290 100644 --- a/go.sum +++ b/go.sum @@ -24,10 +24,6 @@ github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1x github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= -github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= -github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= -github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= @@ -43,18 +39,18 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/dhui/dktest v0.4.6 h1:+DPKyScKSEp3VLtbMDHcUq6V5Lm5zfZZVb0Sk7Ahom4= -github.com/dhui/dktest v0.4.6/go.mod h1:JHTSYDtKkvFNFHJKqCzVzqXecyv+tKt8EzceOmQOgbU= +github.com/dhui/dktest v0.4.5 h1:uUfYBIVREmj/Rw6MvgmqNAYzTiKOHJak+enB5Di73MM= +github.com/dhui/dktest v0.4.5/go.mod h1:tmcyeHDKagvlDrz7gDKq4UAJOLIfVZYkfD5OnHDwcCo= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v28.5.2+incompatible h1:DBX0Y0zAjZbSrm1uzOkdr1onVghKaftjlSWt4AFexzM= -github.com/docker/docker v28.5.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v28.0.1+incompatible h1:FCHjSRdXhNRFjlHMTv4jUNlIBbTeRjrWfeFuJp7jpo0= +github.com/docker/docker v28.0.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94= github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/ebitengine/purego v0.10.0 h1:QIw4xfpWT6GWTzaW5XEKy3HXoqrJGx1ijYHzTF0/ISU= -github.com/ebitengine/purego v0.10.0/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= +github.com/ebitengine/purego v0.8.2 h1:jPPGWs2sZ1UgOSgD2bClL0MJIqu58nOmIcBuXr62z1I= +github.com/ebitengine/purego v0.8.2/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -67,11 +63,13 @@ github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiU github.com/gocql/gocql v1.7.0 h1:O+7U7/1gSN7QTEAaMEsJc1Oq2QHXvCWoF3DFK9HDHus= github.com/gocql/gocql v1.7.0/go.mod h1:vnlvXyFZeLBF0Wy+RS8hrOdbn0UWsWtdg07XJnFxZ+4= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI= github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang-migrate/migrate/v4 v4.19.1 h1:OCyb44lFuQfYXYLx1SCxPZQGU7mcaZ7gH9yH4jSFbBA= -github.com/golang-migrate/migrate/v4 v4.19.1/go.mod h1:CTcgfjxhaUtsLipnLoQRWCrjYXycRz/g5+RWDuYgPrE= +github.com/golang-migrate/migrate/v4 v4.18.3 h1:EYGkoOsvgHHfm5U/naS1RP/6PL/Xv3S4B/swMiAmDLs= +github.com/golang-migrate/migrate/v4 v4.18.3/go.mod h1:99BKpIi6ruaaXRM1A77eqZ+FWPQ3cfRa+ZVy5bmWMaY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -88,6 +86,11 @@ github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 h1:HWRh5R2+9EifMyIHV7ZV+MIZqgz github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0/go.mod h1:JfhWUomR1baixubs02l85lZYYOm7LV6om4ceouMv45c= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= @@ -96,6 +99,8 @@ github.com/jackc/pgx/v5 v5.8.0 h1:TYPDoleBBme0xGSAX3/+NujXXtpZn9HBONkQC7IEZSo= github.com/jackc/pgx/v5 v5.8.0/go.mod h1:QVeDInX2m9VyzvNeiCJVjCkNFqzsNb43204HshNSZKw= github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk= github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -120,12 +125,8 @@ github.com/mdelapenya/tlscert v0.2.0 h1:7H81W6Z/4weDvZBNOfQte5GpIMo0lGYEeWbkGp5L github.com/mdelapenya/tlscert v0.2.0/go.mod h1:O4njj3ELLnJjGdkN7M/vIVCpZ+Cf0L6muqOG4tLSl8o= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= -github.com/moby/go-archive v0.2.0 h1:zg5QDUM2mi0JIM9fdQZWC7U8+2ZfixfTYoHL7rWUcP8= -github.com/moby/go-archive v0.2.0/go.mod h1:mNeivT14o8xU+5q1YnNrkQVpK+dnNe/K6fHqnTg4qPU= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= -github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw= -github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs= github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs= @@ -166,8 +167,8 @@ github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY= github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ= github.com/segmentio/kafka-go v0.4.50 h1:mcyC3tT5WeyWzrFbd6O374t+hmcu1NKt2Pu1L3QaXmc= github.com/segmentio/kafka-go v0.4.50/go.mod h1:Y1gn60kzLEEaW28YshXyk2+VCUKbJ3Qr6DrnT3i4+9E= -github.com/shirou/gopsutil/v4 v4.26.2 h1:X8i6sicvUFih4BmYIGT1m2wwgw2VG9YgrDTi7cIRGUI= -github.com/shirou/gopsutil/v4 v4.26.2/go.mod h1:LZ6ewCSkBqUpvSOf+LsTGnRinC6iaNUNMGBtDkJBaLQ= +github.com/shirou/gopsutil/v4 v4.25.1 h1:QSWkTc+fu9LTAWfkZwZ6j8MSUk4A2LV7rbH0ZqmLjXs= +github.com/shirou/gopsutil/v4 v4.25.1/go.mod h1:RoUCUpndaJFtT+2zsZzzmhvbfGoDCJ7nFXKJf8GqJbI= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -177,10 +178,10 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= -github.com/testcontainers/testcontainers-go v0.41.0 h1:mfpsD0D36YgkxGj2LrIyxuwQ9i2wCKAD+ESsYM1wais= -github.com/testcontainers/testcontainers-go v0.41.0/go.mod h1:pdFrEIfaPl24zmBjerWTTYaY0M6UHsqA1YSvsoU40MI= -github.com/testcontainers/testcontainers-go/modules/postgres v0.41.0 h1:AOtFXssrDlLm84A2sTTR/AhvJiYbrIuCO59d+Ro9Tb0= -github.com/testcontainers/testcontainers-go/modules/postgres v0.41.0/go.mod h1:k2a09UKhgSp6vNpliIY0QSgm4Hi7GXVTzWvWgUemu/8= +github.com/testcontainers/testcontainers-go v0.37.0 h1:L2Qc0vkTw2EHWQ08djon0D2uw7Z/PtHS/QzZZ5Ra/hg= +github.com/testcontainers/testcontainers-go v0.37.0/go.mod h1:QPzbxZhQ6Bclip9igjLFj6z0hs01bU8lrl2dHQmgFGM= +github.com/testcontainers/testcontainers-go/modules/postgres v0.37.0 h1:hsVwFkS6s+79MbKEO+W7A1wNIw1fmkMtF4fg83m6kbc= +github.com/testcontainers/testcontainers-go/modules/postgres v0.37.0/go.mod h1:Qj/eGbRbO/rEYdcRLmN+bEojzatP/+NS1y8ojl2PQsc= github.com/tklauser/go-sysconf v0.3.16 h1:frioLaCQSsF5Cy1jgRBrzr6t502KIIwQ0MArYICU0nA= github.com/tklauser/go-sysconf v0.3.16/go.mod h1:/qNL9xxDhc7tx3HSRsLWNnuzbVfh3e7gh/BmM179nYI= github.com/tklauser/numcpus v0.11.0 h1:nSTwhKH5e1dMNsCdVBukSZrURJRoHbSEQjdEbY+9RXw= @@ -191,6 +192,8 @@ github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8= github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= @@ -199,33 +202,50 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.6 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.65.0/go.mod h1:KDgtbWKTQs4bM+VPUr6WlL9m/WXcmkCcBlIzqxPGzmI= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= -go.opentelemetry.io/otel v1.41.0 h1:YlEwVsGAlCvczDILpUXpIpPSL/VPugt7zHThEMLce1c= -go.opentelemetry.io/otel v1.41.0/go.mod h1:Yt4UwgEKeT05QbLwbyHXEwhnjxNO6D8L5PQP51/46dE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.41.0 h1:ao6Oe+wSebTlQ1OEht7jlYTzQKE+pnx/iNywFvTbuuI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.41.0/go.mod h1:u3T6vz0gh/NVzgDgiwkgLxpsSF6PaPmo2il0apGJbls= +go.opentelemetry.io/otel v1.42.0 h1:lSQGzTgVR3+sgJDAU/7/ZMjN9Z+vUip7leaqBKy4sho= +go.opentelemetry.io/otel v1.42.0/go.mod h1:lJNsdRMxCUIWuMlVJWzecSMuNjE7dOYyWlqOXWkdqCc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.42.0 h1:THuZiwpQZuHPul65w4WcwEnkX2QIuMT+UFoOrygtoJw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.42.0/go.mod h1:J2pvYM5NGHofZ2/Ru6zw/TNWnEQp5crgyDeSrYpXkAw= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.41.0 h1:mq/Qcf28TWz719lE3/hMB4KkyDuLJIvgJnFGcd0kEUI= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.41.0/go.mod h1:yk5LXEYhsL2htyDNJbEq7fWzNEigeEdV5xBF/Y+kAv0= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.41.0 h1:inYW9ZhgqiDqh6BioM7DVHHzEGVq76Db5897WLGZ5Go= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.41.0/go.mod h1:Izur+Wt8gClgMJqO/cZ8wdeeMryJ/xxiOVgFSSfpDTY= -go.opentelemetry.io/otel/metric v1.41.0 h1:rFnDcs4gRzBcsO9tS8LCpgR0dxg4aaxWlJxCno7JlTQ= -go.opentelemetry.io/otel/metric v1.41.0/go.mod h1:xPvCwd9pU0VN8tPZYzDZV/BMj9CM9vs00GuBjeKhJps= -go.opentelemetry.io/otel/sdk v1.41.0 h1:YPIEXKmiAwkGl3Gu1huk1aYWwtpRLeskpV+wPisxBp8= -go.opentelemetry.io/otel/sdk v1.41.0/go.mod h1:ahFdU0G5y8IxglBf0QBJXgSe7agzjE4GiTJ6HT9ud90= -go.opentelemetry.io/otel/sdk/metric v1.41.0 h1:siZQIYBAUd1rlIWQT2uCxWJxcCO7q3TriaMlf08rXw8= -go.opentelemetry.io/otel/sdk/metric v1.41.0/go.mod h1:HNBuSvT7ROaGtGI50ArdRLUnvRTRGniSUZbxiWxSO8Y= -go.opentelemetry.io/otel/trace v1.41.0 h1:Vbk2co6bhj8L59ZJ6/xFTskY+tGAbOnCtQGVVa9TIN0= -go.opentelemetry.io/otel/trace v1.41.0/go.mod h1:U1NU4ULCoxeDKc09yCWdWe+3QoyweJcISEVa1RBzOis= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.42.0 h1:uLXP+3mghfMf7XmV4PkGfFhFKuNWoCvvx5wP/wOXo0o= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.42.0/go.mod h1:v0Tj04armyT59mnURNUJf7RCKcKzq+lgJs6QSjHjaTc= +go.opentelemetry.io/otel/metric v1.42.0 h1:2jXG+3oZLNXEPfNmnpxKDeZsFI5o4J+nz6xUlaFdF/4= +go.opentelemetry.io/otel/metric v1.42.0/go.mod h1:RlUN/7vTU7Ao/diDkEpQpnz3/92J9ko05BIwxYa2SSI= +go.opentelemetry.io/otel/sdk v1.42.0 h1:LyC8+jqk6UJwdrI/8VydAq/hvkFKNHZVIWuslJXYsDo= +go.opentelemetry.io/otel/sdk v1.42.0/go.mod h1:rGHCAxd9DAph0joO4W6OPwxjNTYWghRWmkHuGbayMts= +go.opentelemetry.io/otel/sdk/metric v1.42.0 h1:D/1QR46Clz6ajyZ3G8SgNlTJKBdGp84q9RKCAZ3YGuA= +go.opentelemetry.io/otel/sdk/metric v1.42.0/go.mod h1:Ua6AAlDKdZ7tdvaQKfSmnFTdHx37+J4ba8MwVCYM5hc= +go.opentelemetry.io/otel/trace v1.42.0 h1:OUCgIPt+mzOnaUTpOQcBiM/PLQ/Op7oq6g4LenLmOYY= +go.opentelemetry.io/otel/trace v1.42.0/go.mod h1:f3K9S+IFqnumBkKhRJMeaZeNk9epyhnCmQh/EysQCdc= go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A= go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts= golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos= -golang.org/x/net v0.50.0 h1:ucWh9eiCGyDR3vtzso0WMQinm2Dnt8cFMuQa9K33J60= -golang.org/x/net v0.50.0/go.mod h1:UgoSli3F/pBgdJBHCTc+tp3gmrU4XswgGRgtnwWTfyM= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.51.0 h1:94R/GTO7mt3/4wIKpcR5gkGmRLOuE/2hNGeWq/GBIFo= +golang.org/x/net v0.51.0/go.mod h1:aamm+2QF5ogm02fjy5Bb7CQ0WMt1/WVM7FtyaTLlA9Y= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -236,19 +256,28 @@ golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k= golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.40.0 h1:36e4zGLqU4yhjlmxEaagx2KuYbJq3EwY8K943ZsHcvg= golang.org/x/term v0.40.0/go.mod h1:w2P8uVp06p2iyKKuvXIm7N/y0UCRt3UfJTfZ7oOpglM= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk= golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA= -golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= -golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57 h1:JLQynH/LBHfCTSbDWl+py8C+Rg/k1OVH3xfcaiANuF0= google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57/go.mod h1:kSJwQxqmFXeo79zOmbrALdflXQeAYcUbgS7PbpMknCY= google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57 h1:mWPCjDEyshlQYzBpMNHaEof6UX1PmHcaUODUywQ0uac= google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= -google.golang.org/grpc v1.79.1 h1:zGhSi45ODB9/p3VAawt9a+O/MULLl9dpizzNNpq7flY= -google.golang.org/grpc v1.79.1/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ= +google.golang.org/grpc v1.79.2 h1:fRMD94s2tITpyJGtBBn7MkMseNpOZU8ZxgC3MMBaXRU= +google.golang.org/grpc v1.79.2/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ= google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/infra/kafka/mirrormaker2-connector.json b/infra/kafka/mirrormaker2-connector.json new file mode 100644 index 0000000..db1c327 --- /dev/null +++ b/infra/kafka/mirrormaker2-connector.json @@ -0,0 +1,39 @@ +{ + "name": "grainguard-mm2-primary-to-dr", + "config": { + "connector.class": "org.apache.kafka.connect.mirror.MirrorSourceConnector", + + "source.cluster.alias": "primary", + "target.cluster.alias": "dr", + + "source.cluster.bootstrap.servers": "${PRIMARY_KAFKA_BOOTSTRAP}", + "target.cluster.bootstrap.servers": "${DR_KAFKA_BOOTSTRAP}", + + "topics": "telemetry.raw,device.events,risk.scores,saga.commands,saga.events", + + "replication.factor": 3, + + "sync.topic.acls.enabled": "false", + "sync.group.offsets.enabled": "true", + "sync.group.offsets.interval.seconds": "30", + + "refresh.topics.interval.seconds": "30", + + "source.cluster.sasl.mechanism": "SCRAM-SHA-512", + "source.cluster.security.protocol": "SASL_SSL", + "source.cluster.sasl.jaas.config": "org.apache.kafka.common.security.scram.ScramLoginModule required username=\"${KAFKA_USERNAME}\" password=\"${KAFKA_PASSWORD}\";", + + "target.cluster.sasl.mechanism": "SCRAM-SHA-512", + "target.cluster.security.protocol": "SASL_SSL", + "target.cluster.sasl.jaas.config": "org.apache.kafka.common.security.scram.ScramLoginModule required username=\"${KAFKA_USERNAME}\" password=\"${KAFKA_PASSWORD}\";", + + "tasks.max": "4", + + "checkpoints.topic.replication.factor": "3", + "heartbeats.topic.replication.factor": "3", + "offset-syncs.topic.replication.factor": "3", + + "emit.heartbeats.interval.seconds": "5", + "emit.checkpoints.interval.seconds": "30" + } +} diff --git a/infra/terraform/backend.tf b/infra/terraform/backend.tf new file mode 100644 index 0000000..db83ae3 --- /dev/null +++ b/infra/terraform/backend.tf @@ -0,0 +1,34 @@ +# Remote state backend — S3 + DynamoDB locking +# Before first use, create these resources manually (or via a bootstrap script): +# +# aws s3api create-bucket \ +# --bucket grainguard-terraform-state \ +# --region us-east-1 +# +# aws s3api put-bucket-versioning \ +# --bucket grainguard-terraform-state \ +# --versioning-configuration Status=Enabled +# +# aws s3api put-bucket-encryption \ +# --bucket grainguard-terraform-state \ +# --server-side-encryption-configuration \ +# '{"Rules":[{"ApplyServerSideEncryptionByDefault":{"SSEAlgorithm":"aws:kms"}}]}' +# +# aws dynamodb create-table \ +# --table-name grainguard-terraform-locks \ +# --attribute-definitions AttributeName=LockID,AttributeType=S \ +# --key-schema AttributeName=LockID,KeyType=HASH \ +# --billing-mode PAY_PER_REQUEST \ +# --region us-east-1 +# +# Then run: terraform init -reconfigure + +terraform { + backend "s3" { + bucket = "grainguard-terraform-state" # must already exist + key = "grainguard/terraform.tfstate" # path within bucket + region = "us-east-1" + dynamodb_table = "grainguard-terraform-locks" # prevents concurrent apply + encrypt = true # KMS server-side encryption + } +} diff --git a/infra/terraform/environments/dr/main.tf b/infra/terraform/environments/dr/main.tf new file mode 100644 index 0000000..14cdba0 --- /dev/null +++ b/infra/terraform/environments/dr/main.tf @@ -0,0 +1,69 @@ +# DR environment — us-west-2 secondary region +# Depends on prod environment — run prod/apply first, then pass outputs here. +# Failover procedure: see docs/runbooks/multi-region-failover.md + +variable "aurora_global_cluster_id" { type = string } # from prod output +variable "redis_global_datastore_id" { type = string } # from prod output +variable "db_password" { type = string; sensitive = true } +variable "project" { type = string; default = "grainguard" } +variable "aws_region" { type = string; default = "us-west-2" } + +module "vpc_dr" { + source = "../../modules/vpc" + project = var.project + environment = "dr" + vpc_cidr = "10.2.0.0/16" + availability_zones = ["us-west-2a", "us-west-2b"] +} + +module "eks_dr" { + source = "../../modules/eks" + project = var.project + environment = "dr" + private_subnet_ids = module.vpc_dr.private_subnet_ids + instance_type = "m6i.large" + desired_nodes = 2 # scaled down in standby to save cost +} + +# Aurora secondary — joins the global cluster, read-only until promotion +module "aurora_dr" { + source = "../../modules/aurora-global" + project = var.project + environment = "dr" + vpc_id = module.vpc_dr.vpc_id + vpc_cidr = "10.2.0.0/16" + private_subnet_ids = module.vpc_dr.private_subnet_ids + db_password = var.db_password + instance_class = "db.r6g.large" + is_secondary = true + global_cluster_id = var.aurora_global_cluster_id +} + +# Redis secondary — replicates from primary Global Datastore +module "redis_dr" { + source = "../../modules/elasticache-global" + project = var.project + environment = "dr" + vpc_id = module.vpc_dr.vpc_id + vpc_cidr = "10.2.0.0/16" + private_subnet_ids = module.vpc_dr.private_subnet_ids + node_type = "cache.r6g.large" + is_secondary = true + global_datastore_id = var.redis_global_datastore_id +} + +# DR Kafka cluster (standalone — MirrorMaker 2 replicates from primary) +# On failover: producers and consumers point here; MirrorMaker stops. +module "msk_dr" { + source = "../../modules/msk" + project = var.project + environment = "dr" + vpc_id = module.vpc_dr.vpc_id + vpc_cidr = "10.2.0.0/16" + private_subnet_ids = module.vpc_dr.private_subnet_ids + instance_type = "kafka.m5.large" +} + +output "dr_aurora_reader_endpoint" { value = module.aurora_dr.reader_endpoint } +output "dr_redis_primary_endpoint" { value = module.redis_dr.primary_endpoint } +output "dr_kafka_bootstrap_brokers" { value = module.msk_dr.bootstrap_brokers } diff --git a/infra/terraform/environments/dr/providers.tf b/infra/terraform/environments/dr/providers.tf new file mode 100644 index 0000000..bf0f8b0 --- /dev/null +++ b/infra/terraform/environments/dr/providers.tf @@ -0,0 +1,30 @@ +terraform { + required_version = ">= 1.5.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = "~> 5.0" + } + } + + backend "s3" { + bucket = "grainguard-terraform-state" + key = "grainguard/dr/terraform.tfstate" + region = "us-east-1" # state always in primary region + dynamodb_table = "grainguard-terraform-locks" + encrypt = true + } +} + +provider "aws" { + region = var.aws_region # us-west-2 + + default_tags { + tags = { + Project = var.project + Environment = "dr" + ManagedBy = "terraform" + } + } +} diff --git a/infra/terraform/environments/prod/main.tf b/infra/terraform/environments/prod/main.tf new file mode 100644 index 0000000..b4ff20f --- /dev/null +++ b/infra/terraform/environments/prod/main.tf @@ -0,0 +1,60 @@ +# Production environment — us-east-1 primary region +# Paired with environments/dr/ which deploys the DR secondary in us-west-2. + +module "vpc" { + source = "../../modules/vpc" + project = var.project + environment = "prod" + vpc_cidr = "10.1.0.0/16" + availability_zones = ["us-east-1a", "us-east-1b", "us-east-1c"] +} + +module "eks" { + source = "../../modules/eks" + project = var.project + environment = "prod" + private_subnet_ids = module.vpc.private_subnet_ids + instance_type = "m6i.xlarge" + desired_nodes = 6 +} + +# Aurora Global Database — primary cluster created here +# DR region references the global_cluster_id output +module "aurora" { + source = "../../modules/aurora-global" + project = var.project + environment = "prod" + vpc_id = module.vpc.vpc_id + vpc_cidr = "10.1.0.0/16" + private_subnet_ids = module.vpc.private_subnet_ids + db_password = var.db_password + instance_class = "db.r6g.large" + is_secondary = false +} + +# ElastiCache Global Datastore — primary cluster created here +module "redis" { + source = "../../modules/elasticache-global" + project = var.project + environment = "prod" + vpc_id = module.vpc.vpc_id + vpc_cidr = "10.1.0.0/16" + private_subnet_ids = module.vpc.private_subnet_ids + node_type = "cache.r6g.large" + is_secondary = false +} + +module "msk" { + source = "../../modules/msk" + project = var.project + environment = "prod" + vpc_id = module.vpc.vpc_id + vpc_cidr = "10.1.0.0/16" + private_subnet_ids = module.vpc.private_subnet_ids + instance_type = "kafka.m5.large" +} + +# ── Outputs consumed by the DR region ──────────────────────────────────────── +output "aurora_global_cluster_id" { value = module.aurora.global_cluster_id } +output "redis_global_datastore_id" { value = module.redis.global_datastore_id } +output "msk_bootstrap_brokers" { value = module.msk.bootstrap_brokers } diff --git a/infra/terraform/environments/prod/providers.tf b/infra/terraform/environments/prod/providers.tf new file mode 100644 index 0000000..abf1600 --- /dev/null +++ b/infra/terraform/environments/prod/providers.tf @@ -0,0 +1,30 @@ +terraform { + required_version = ">= 1.5.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = "~> 5.0" + } + } + + backend "s3" { + bucket = "grainguard-terraform-state" + key = "grainguard/prod/terraform.tfstate" + region = "us-east-1" + dynamodb_table = "grainguard-terraform-locks" + encrypt = true + } +} + +provider "aws" { + region = var.aws_region + + default_tags { + tags = { + Project = var.project + Environment = "prod" + ManagedBy = "terraform" + } + } +} diff --git a/infra/terraform/environments/prod/variables.tf b/infra/terraform/environments/prod/variables.tf new file mode 100644 index 0000000..47e4055 --- /dev/null +++ b/infra/terraform/environments/prod/variables.tf @@ -0,0 +1,3 @@ +variable "project" { type = string; default = "grainguard" } +variable "aws_region" { type = string; default = "us-east-1" } +variable "db_password" { type = string; sensitive = true } diff --git a/infra/terraform/modules/aurora-global/main.tf b/infra/terraform/modules/aurora-global/main.tf new file mode 100644 index 0000000..971bd28 --- /dev/null +++ b/infra/terraform/modules/aurora-global/main.tf @@ -0,0 +1,117 @@ +# Aurora Global Database module +# Creates: +# - An Aurora Global Cluster (the logical wrapper) +# - A primary regional cluster (read+write) in the caller's region +# - A DB subnet group and security group scoped to the VPC +# +# Usage: instantiate once for primary, then instantiate again with +# is_secondary = true + global_cluster_id for the DR region. + +variable "project" { type = string } +variable "environment" { type = string } +variable "vpc_id" { type = string } +variable "vpc_cidr" { type = string } +variable "private_subnet_ids" { type = list(string) } +variable "db_password" { type = string; sensitive = true } +variable "instance_class" { type = string; default = "db.r6g.large" } +variable "engine_version" { type = string; default = "15.4" } +variable "is_secondary" { type = bool; default = false } +variable "global_cluster_id"{ type = string; default = "" } # required when is_secondary=true + +locals { + name = "${var.project}-${var.environment}" +} + +# ── Security group ──────────────────────────────────────────────────────────── +resource "aws_security_group" "aurora" { + name = "${local.name}-aurora" + vpc_id = var.vpc_id + + ingress { + from_port = 5432 + to_port = 5432 + protocol = "tcp" + cidr_blocks = [var.vpc_cidr] # only allow traffic from within the VPC + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = { Name = "${local.name}-aurora-sg" } +} + +# ── Subnet group ────────────────────────────────────────────────────────────── +resource "aws_db_subnet_group" "aurora" { + name = "${local.name}-aurora" + subnet_ids = var.private_subnet_ids + tags = { Name = "${local.name}-aurora-subnet-group" } +} + +# ── Global cluster (primary region creates it; secondary just references it) ── +resource "aws_rds_global_cluster" "this" { + count = var.is_secondary ? 0 : 1 # only primary creates the global cluster + + global_cluster_identifier = "${local.name}-global" + engine = "aurora-postgresql" + engine_version = var.engine_version + database_name = "grainguard" + storage_encrypted = true +} + +# ── Regional cluster ────────────────────────────────────────────────────────── +resource "aws_rds_cluster" "this" { + cluster_identifier = "${local.name}-aurora" + + # Link to the global cluster + global_cluster_identifier = var.is_secondary ? var.global_cluster_id : aws_rds_global_cluster.this[0].id + + engine = "aurora-postgresql" + engine_version = var.engine_version + engine_mode = "provisioned" # required for Global Database + + # Primary only — secondary inherits from global replication + master_username = var.is_secondary ? null : "grainguard" + master_password = var.is_secondary ? null : var.db_password + + db_subnet_group_name = aws_db_subnet_group.aurora.name + vpc_security_group_ids = [aws_security_group.aurora.id] + + skip_final_snapshot = var.environment != "prod" # keep snapshot in prod + deletion_protection = var.environment == "prod" + + # Performance Insights — helps diagnose slow queries + enabled_cloudwatch_logs_exports = ["postgresql"] + + tags = { Name = "${local.name}-aurora" } + + lifecycle { + # password is managed outside Terraform after initial creation + ignore_changes = [master_password] + } +} + +# ── DB instances (one writer, one reader in primary; one reader in secondary) ─ +resource "aws_rds_cluster_instance" "this" { + count = var.is_secondary ? 1 : 2 # primary: 1 writer + 1 reader; DR: 1 reader + + identifier = "${local.name}-aurora-${count.index}" + cluster_identifier = aws_rds_cluster.this.id + instance_class = var.instance_class + engine = "aurora-postgresql" + engine_version = var.engine_version + + performance_insights_enabled = true + monitoring_interval = 60 # Enhanced Monitoring: 1-min granularity + + tags = { Name = "${local.name}-aurora-${count.index}" } +} + +# ── Outputs ─────────────────────────────────────────────────────────────────── +output "writer_endpoint" { value = aws_rds_cluster.this.endpoint } +output "reader_endpoint" { value = aws_rds_cluster.this.reader_endpoint } +output "global_cluster_id" { value = var.is_secondary ? var.global_cluster_id : aws_rds_global_cluster.this[0].id } +output "cluster_identifier" { value = aws_rds_cluster.this.cluster_identifier } diff --git a/infra/terraform/modules/elasticache-global/main.tf b/infra/terraform/modules/elasticache-global/main.tf new file mode 100644 index 0000000..34450e9 --- /dev/null +++ b/infra/terraform/modules/elasticache-global/main.tf @@ -0,0 +1,90 @@ +# ElastiCache Global Datastore module (Redis cross-region replication) +# Creates: +# - A primary ElastiCache cluster (Multi-AZ) in the caller's region +# - A Global Datastore that enables async replication to a secondary region +# +# The secondary region just calls this module with is_secondary=true +# and provides the global_datastore_id from the primary. + +variable "project" { type = string } +variable "environment" { type = string } +variable "vpc_id" { type = string } +variable "vpc_cidr" { type = string } +variable "private_subnet_ids" { type = list(string) } +variable "node_type" { type = string; default = "cache.r6g.large" } +variable "is_secondary" { type = bool; default = false } +variable "global_datastore_id" { type = string; default = "" } # primary output + +locals { + name = "${var.project}-${var.environment}" +} + +resource "aws_security_group" "redis" { + name = "${local.name}-redis" + vpc_id = var.vpc_id + + ingress { + from_port = 6379 + to_port = 6379 + protocol = "tcp" + cidr_blocks = [var.vpc_cidr] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = { Name = "${local.name}-redis-sg" } +} + +resource "aws_elasticache_subnet_group" "redis" { + name = "${local.name}-redis" + subnet_ids = var.private_subnet_ids +} + +# Replication group (Multi-AZ, cluster mode disabled for simplicity) +resource "aws_elasticache_replication_group" "redis" { + replication_group_id = "${local.name}-redis" + description = "GrainGuard Redis ${var.environment}" + + node_type = var.node_type + num_cache_clusters = 2 # 1 primary + 1 replica within region + engine_version = "7.1" + port = 6379 + + subnet_group_name = aws_elasticache_subnet_group.redis.name + security_group_ids = [aws_security_group.redis.id] + + at_rest_encryption_enabled = true + transit_encryption_enabled = true + + automatic_failover_enabled = true # promote replica if primary fails + multi_az_enabled = true + + # Global Datastore membership + global_replication_group_id = var.is_secondary ? var.global_datastore_id : null + + lifecycle { + ignore_changes = [num_cache_clusters] + } + + tags = { Name = "${local.name}-redis" } +} + +# Global Datastore — created only by the primary region +resource "aws_elasticache_global_replication_group" "this" { + count = var.is_secondary ? 0 : 1 + + global_replication_group_id_suffix = "${local.name}" + primary_replication_group_id = aws_elasticache_replication_group.redis.id + + # These are inherited by the primary cluster + engine_version = "7.1" +} + +output "primary_endpoint" { value = aws_elasticache_replication_group.redis.primary_endpoint_address } +output "reader_endpoint" { value = aws_elasticache_replication_group.redis.reader_endpoint_address } +output "global_datastore_id" { value = var.is_secondary ? var.global_datastore_id : (length(aws_elasticache_global_replication_group.this) > 0 ? aws_elasticache_global_replication_group.this[0].id : "") } diff --git a/libs/health/health.go b/libs/health/health.go index 08471f2..fc509ad 100644 --- a/libs/health/health.go +++ b/libs/health/health.go @@ -22,10 +22,10 @@ type postgresChecker struct{ pool *pgxpool.Pool } func NewPostgresChecker(pool *pgxpool.Pool) Checker { return &postgresChecker{pool} } func (c *postgresChecker) Name() string { return "postgres" } func (c *postgresChecker) Check(ctx context.Context) error { return c.pool.Ping(ctx) } -type redisChecker struct{ client *redis.Client } +type redisChecker struct{ client redis.UniversalClient } -func NewRedisChecker(client *redis.Client) Checker { return &redisChecker{client} } -func (c *redisChecker) Name() string { return "redis" } +func NewRedisChecker(client redis.UniversalClient) Checker { return &redisChecker{client} } +func (c *redisChecker) Name() string { return "redis" } func (c *redisChecker) Check(ctx context.Context) error { return c.client.Ping(ctx).Err() } diff --git a/scripts/load-tests/performance-budget.js b/scripts/load-tests/performance-budget.js new file mode 100644 index 0000000..ead0aa9 --- /dev/null +++ b/scripts/load-tests/performance-budget.js @@ -0,0 +1,138 @@ +// k6 performance budget test +// Runs spike + soak scenarios against the gateway and BFF. +// CI fails if any threshold is breached — prevents latency regressions from merging. +// +// Run locally: +// k6 run --env GATEWAY_URL=http://localhost:3000 \ +// --env BFF_URL=http://localhost:8086 \ +// --env JWT= \ +// scripts/load-tests/performance-budget.js + +import http from "k6/http"; +import { check, sleep } from "k6"; +import { Trend, Counter, Rate } from "k6/metrics"; + +// ── Custom metrics ──────────────────────────────────────────────────────────── +const gatewayP95 = new Trend("gateway_p95_ms", true); +const bffP95 = new Trend("bff_p95_ms", true); +const errorRate = new Rate("error_rate"); +const totalErrors = new Counter("total_errors"); + +const GATEWAY_URL = __ENV.GATEWAY_URL ?? "http://localhost:3000"; +const BFF_URL = __ENV.BFF_URL ?? "http://localhost:8086"; +const JWT = __ENV.JWT ?? ""; + +// ── Thresholds (performance budget) ────────────────────────────────────────── +// If any threshold fails, k6 exits with code 99 and CI marks the step as failed. +export const options = { + thresholds: { + // Gateway REST: 95th percentile < 500ms + "http_req_duration{endpoint:gateway}": ["p(95)<500"], + // BFF GraphQL: 95th percentile < 800ms (GraphQL is heavier) + "http_req_duration{endpoint:bff}": ["p(95)<800"], + // Error rate must stay below 1% + "error_rate": ["rate<0.01"], + // Custom trends for reporting + "gateway_p95_ms": ["p(95)<500"], + "bff_p95_ms": ["p(95)<800"], + }, + + scenarios: { + // Baseline: steady 50 RPS for 2 minutes + baseline: { + executor: "constant-arrival-rate", + rate: 50, + timeUnit: "1s", + duration: "2m", + preAllocatedVUs:60, + maxVUs: 100, + }, + + // Spike: ramp from 0 to 200 VU in 30s, hold 30s, ramp down + spike: { + executor: "ramping-vus", + startVUs: 0, + stages: [ + { duration: "30s", target: 200 }, + { duration: "30s", target: 200 }, + { duration: "30s", target: 0 }, + ], + startTime: "2m", // starts after baseline finishes + }, + }, +}; + +const COMMON_HEADERS = { + Authorization: `Bearer ${JWT}`, + "Content-Type": "application/json", +}; + +// ── Virtual user script ─────────────────────────────────────────────────────── +export default function () { + // 1. Gateway: GET /health (cheapest — warms up) + const healthRes = http.get(`${GATEWAY_URL}/health`, { tags: { endpoint: "gateway" } }); + check(healthRes, { "gateway /health 200": (r) => r.status === 200 }); + gatewayP95.add(healthRes.timings.duration); + + // 2. Gateway: GET /devices/:id/latest (requires JWT) + if (JWT) { + const devRes = http.get( + `${GATEWAY_URL}/devices/00000000-0000-0000-0000-000000000001/latest`, + { headers: COMMON_HEADERS, tags: { endpoint: "gateway" } } + ); + // 404 is acceptable — device may not exist in test env + const ok = devRes.status === 200 || devRes.status === 404; + if (!ok) { + errorRate.add(1); + totalErrors.add(1); + } else { + errorRate.add(0); + } + gatewayP95.add(devRes.timings.duration); + } + + // 3. BFF: GraphQL query for devices + if (JWT) { + const gqlRes = http.post( + `${BFF_URL}/graphql`, + JSON.stringify({ + query: `{ devices(first: 10) { edges { node { id serialNumber temperature } } } }`, + }), + { headers: COMMON_HEADERS, tags: { endpoint: "bff" } } + ); + + let gqlErrors = false; + try { + const body = gqlRes.json(); + gqlErrors = Array.isArray(body?.errors) && body.errors.length > 0; + } catch { + gqlErrors = true; + } + + const bffOk = gqlRes.status === 200 && !gqlErrors; + check(gqlRes, { "bff graphql 200": () => bffOk }); + if (!bffOk) { + errorRate.add(1); + totalErrors.add(1); + } else { + errorRate.add(0); + } + bffP95.add(gqlRes.timings.duration); + } + + sleep(0.1); // 100ms think time between requests +} + +// ── Summary output ──────────────────────────────────────────────────────────── +export function handleSummary(data) { + return { + // Write JSON summary for CI artifact upload + "scripts/load-tests/results/performance-budget-summary.json": JSON.stringify(data, null, 2), + stdout: ` +=== Performance Budget Summary === +Gateway p95: ${data.metrics["gateway_p95_ms"]?.values?.["p(95)"]?.toFixed(0) ?? "N/A"} ms (budget: 500ms) +BFF p95: ${data.metrics["bff_p95_ms"]?.values?.["p(95)"]?.toFixed(0) ?? "N/A"} ms (budget: 800ms) +Error rate: ${((data.metrics["error_rate"]?.values?.rate ?? 0) * 100).toFixed(2)}% (budget: <1%) +`, + }; +} diff --git a/tests/chaos/README.md b/tests/chaos/README.md new file mode 100644 index 0000000..c299ec5 --- /dev/null +++ b/tests/chaos/README.md @@ -0,0 +1,37 @@ +# GrainGuard Chaos Tests + +Chaos experiments using [Chaos Toolkit](https://chaostoolkit.org/) and raw `kubectl` / `kafka-topics` commands. + +## Prerequisites + +```bash +pip install chaostoolkit chaostoolkit-kubernetes chaostoolkit-verification +kubectl config use-context +``` + +## Experiments + +| File | Target | What it verifies | +|------|--------|-----------------| +| `pod-kill.yaml` | gateway, bff, telemetry-service | HPA respawns within 30s; readiness probe gates traffic | +| `kafka-consumer-pause.sh` | read-model-builder, cdc-transformer | Consumer lag ≤ 10 000 after resume; no messages lost | +| `redis-outage.sh` | bff (cache), saga-orchestrator (lock) | BFF falls back to DB; saga retries with backoff | +| `projection-lag.sh` | read-model-builder | Lag alert fires within 2 min; catches up within 5 min | +| `network-partition.yaml` | telemetry-service → Kafka | Messages buffered in producer; delivered after heal | + +## Running + +```bash +# Single experiment +chaos run tests/chaos/pod-kill.yaml + +# Full suite (sequential) +bash tests/chaos/run-all.sh + +# CI pipeline — see .github/workflows/chaos.yml +``` + +## Pass / Fail Criteria + +Each experiment defines steady-state hypotheses that are verified before and after. +The experiment **fails** (non-zero exit) if any hypothesis deviates. diff --git a/tests/chaos/kafka-consumer-pause.sh b/tests/chaos/kafka-consumer-pause.sh new file mode 100644 index 0000000..52654fd --- /dev/null +++ b/tests/chaos/kafka-consumer-pause.sh @@ -0,0 +1,124 @@ +#!/usr/bin/env bash +# chaos/kafka-consumer-pause.sh +# Pauses Kafka consumer groups for read-model-builder and cdc-transformer, +# waits 60 s, resumes, then asserts consumer lag ≤ 10 000. +# +# Requires: +# - kubectl with context set to target cluster +# - kafka-consumer-groups.sh available (or kcat / kafkactl) +# - NAMESPACE env var (default: grainguard-dev) +# - KAFKA_BOOTSTRAP env var (default: kafka:9092 as seen inside cluster) + +set -euo pipefail + +NAMESPACE="${NAMESPACE:-grainguard-dev}" +KAFKA_BOOTSTRAP="${KAFKA_BOOTSTRAP:-kafka:9092}" +PAUSE_SECONDS="${PAUSE_SECONDS:-60}" +MAX_LAG="${MAX_LAG:-10000}" +CONSUMERS=("read-model-builder" "cdc-transformer") +declare -A ORIGINAL_REPLICAS=() + +RED='\033[0;31m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m'; NC='\033[0m' + +log() { echo -e "${GREEN}[chaos]${NC} $*"; } +warn() { echo -e "${YELLOW}[chaos]${NC} $*"; } +fail() { echo -e "${RED}[chaos FAIL]${NC} $*" >&2; exit 1; } + +# ── helpers ────────────────────────────────────────────────────────────────── + +current_lag() { + local group="$1" + kubectl exec -n "$NAMESPACE" deploy/kafka -- \ + kafka-consumer-groups.sh \ + --bootstrap-server "$KAFKA_BOOTSTRAP" \ + --describe --group "$group" 2>/dev/null \ + | awk 'NR>1 && $NF~/[0-9]+/ { sum += $NF } END { print sum+0 }' +} + +scale_consumer() { + local deploy="$1" replicas="$2" + kubectl scale deployment "$deploy" -n "$NAMESPACE" --replicas="$replicas" +} + +current_replicas() { + local deploy="$1" + kubectl get deployment "$deploy" -n "$NAMESPACE" -o jsonpath='{.spec.replicas}' +} + +restore_consumers() { + for consumer in "${CONSUMERS[@]}"; do + local replicas="${ORIGINAL_REPLICAS[$consumer]:-1}" + kubectl scale deployment "$consumer" -n "$NAMESPACE" --replicas="$replicas" >/dev/null 2>&1 || true + done +} + +cleanup() { + restore_consumers +} + +trap cleanup EXIT INT TERM + +# ── steady-state: before ────────────────────────────────────────────────────── + +log "=== Steady-state check BEFORE chaos ===" +for consumer in "${CONSUMERS[@]}"; do + ORIGINAL_REPLICAS["$consumer"]="$(current_replicas "$consumer")" + kubectl rollout status "deployment/$consumer" -n "$NAMESPACE" --timeout=30s \ + || fail "Consumer $consumer not healthy before chaos" + log " $consumer — healthy" +done + +# ── action: pause consumers ─────────────────────────────────────────────────── + +log "=== Pausing consumers (scale to 0) ===" +for consumer in "${CONSUMERS[@]}"; do + scale_consumer "$consumer" 0 + log " Scaled $consumer → 0" +done + +log "Sleeping ${PAUSE_SECONDS}s to allow lag to build..." +sleep "$PAUSE_SECONDS" + +# Record lag while paused (informational) +for consumer in "${CONSUMERS[@]}"; do + lag=$(current_lag "$consumer") + warn " Lag while paused — $consumer: $lag messages" +done + +# ── action: resume consumers ────────────────────────────────────────────────── + +log "=== Resuming consumers ===" +for consumer in "${CONSUMERS[@]}"; do + replicas="${ORIGINAL_REPLICAS[$consumer]:-1}" + scale_consumer "$consumer" "$replicas" + log " Scaled $consumer → $replicas" +done + +log "Waiting for deployments to be ready..." +for consumer in "${CONSUMERS[@]}"; do + kubectl rollout status "deployment/$consumer" -n "$NAMESPACE" --timeout=60s +done + +# ── steady-state: after ─────────────────────────────────────────────────────── + +log "=== Steady-state check AFTER chaos (polling every 15s, up to 5 min) ===" +deadline=$(( $(date +%s) + 300 )) + +for consumer in "${CONSUMERS[@]}"; do + while true; do + lag=$(current_lag "$consumer") + log " $consumer lag: $lag" + if (( lag <= MAX_LAG )); then + log " ✓ $consumer caught up (lag=$lag ≤ $MAX_LAG)" + break + fi + if (( $(date +%s) >= deadline )); then + fail "$consumer lag $lag still > $MAX_LAG after 5 minutes — experiment FAILED" + fi + sleep 15 + done +done + +trap - EXIT INT TERM + +log "=== Kafka consumer pause experiment PASSED ===" diff --git a/tests/chaos/network-partition.yaml b/tests/chaos/network-partition.yaml new file mode 100644 index 0000000..1544410 --- /dev/null +++ b/tests/chaos/network-partition.yaml @@ -0,0 +1,141 @@ +version: "1.0.0" +title: Network Partition — telemetry-service → Kafka +description: > + Apply a NetworkPolicy that drops all egress from telemetry-service to Kafka. + Messages should be buffered in the producer. + After the policy is removed, all buffered messages must be delivered + (consumer lag returns to pre-chaos level within 2 minutes). + +tags: + - kubernetes + - network + - kafka + - producer-buffering + +configuration: + namespace: + type: env + key: NAMESPACE + default: grainguard-dev + +steady-state-hypothesis: + title: telemetry-service is healthy and Kafka consumer lag is low + probes: + - name: telemetry-service-healthy + type: probe + tolerance: true + provider: + type: process + path: kubectl + arguments: + - rollout + - status + - deployment/telemetry-service + - -n + - "${namespace}" + - --timeout=30s + + - name: kafka-healthy + type: probe + tolerance: true + provider: + type: process + path: kubectl + arguments: + - rollout + - status + - deployment/kafka + - -n + - "${namespace}" + - --timeout=30s + +method: + # ── Apply deny-egress NetworkPolicy ───────────────────────────────────────── + - name: apply-network-partition + type: action + provider: + type: process + path: kubectl + arguments: + - apply + - -f + - - + - --stdin + stdin: | + apiVersion: networking.k8s.io/v1 + kind: NetworkPolicy + metadata: + name: chaos-deny-telemetry-to-kafka + namespace: "${namespace}" + spec: + podSelector: + matchLabels: + app: telemetry-service + policyTypes: + - Egress + egress: + - ports: + - port: 53 # allow DNS only + protocol: UDP + + # ── Let the partition run for 60 s ─────────────────────────────────────────── + - name: wait-during-partition + type: action + provider: + type: process + path: sleep + arguments: + - "60" + + # ── Verify telemetry-service pod is still alive (didn't crash) ─────────────── + - name: telemetry-still-running + type: probe + tolerance: true + provider: + type: process + path: kubectl + arguments: + - rollout + - status + - deployment/telemetry-service + - -n + - "${namespace}" + - --timeout=10s + + # ── Remove network partition ────────────────────────────────────────────────── + - name: remove-network-partition + type: action + provider: + type: process + path: kubectl + arguments: + - delete + - networkpolicy + - chaos-deny-telemetry-to-kafka + - -n + - "${namespace}" + - --ignore-not-found + + # ── Wait for Kafka producer flush ───────────────────────────────────────────── + - name: wait-producer-flush + type: action + provider: + type: process + path: sleep + arguments: + - "30" + +rollbacks: + # Safety rollback — remove the policy even if experiment fails mid-way + - name: rollback-remove-network-partition + type: action + provider: + type: process + path: kubectl + arguments: + - delete + - networkpolicy + - chaos-deny-telemetry-to-kafka + - -n + - "${namespace}" + - --ignore-not-found diff --git a/tests/chaos/pod-kill.yaml b/tests/chaos/pod-kill.yaml new file mode 100644 index 0000000..5d4a52f --- /dev/null +++ b/tests/chaos/pod-kill.yaml @@ -0,0 +1,155 @@ +version: "1.0.0" +title: Pod Kill — GrainGuard critical path services +description: > + Kill one pod from gateway, bff, and telemetry-service. + Verify each respawns and passes readiness within 30 seconds. + +tags: + - kubernetes + - pod-failure + - availability + +configuration: + namespace: + type: env + key: NAMESPACE + default: grainguard-dev + +steady-state-hypothesis: + title: All critical-path pods are healthy before and after + probes: + - name: gateway-pods-healthy + type: probe + tolerance: true + provider: + type: process + path: kubectl + arguments: + - rollout + - status + - deployment/gateway + - -n + - "${namespace}" + - --timeout=30s + + - name: bff-pods-healthy + type: probe + tolerance: true + provider: + type: process + path: kubectl + arguments: + - rollout + - status + - deployment/bff + - -n + - "${namespace}" + - --timeout=30s + + - name: telemetry-pods-healthy + type: probe + tolerance: true + provider: + type: process + path: kubectl + arguments: + - rollout + - status + - deployment/telemetry-service + - -n + - "${namespace}" + - --timeout=30s + +method: + # --- gateway --- + - name: kill-gateway-pod + type: action + provider: + type: process + path: kubectl + arguments: + - delete + - pod + - -l + - app=gateway + - -n + - "${namespace}" + - --grace-period=0 + - --force + + - name: wait-gateway-recovery + type: action + provider: + type: process + path: kubectl + arguments: + - rollout + - status + - deployment/gateway + - -n + - "${namespace}" + - --timeout=30s + pauses: + after: 5 + + # --- bff --- + - name: kill-bff-pod + type: action + provider: + type: process + path: kubectl + arguments: + - delete + - pod + - -l + - app=bff + - -n + - "${namespace}" + - --grace-period=0 + - --force + + - name: wait-bff-recovery + type: action + provider: + type: process + path: kubectl + arguments: + - rollout + - status + - deployment/bff + - -n + - "${namespace}" + - --timeout=30s + pauses: + after: 5 + + # --- telemetry-service --- + - name: kill-telemetry-pod + type: action + provider: + type: process + path: kubectl + arguments: + - delete + - pod + - -l + - app=telemetry-service + - -n + - "${namespace}" + - --grace-period=0 + - --force + + - name: wait-telemetry-recovery + type: action + provider: + type: process + path: kubectl + arguments: + - rollout + - status + - deployment/telemetry-service + - -n + - "${namespace}" + - --timeout=30s + +rollbacks: [] diff --git a/tests/chaos/projection-lag.sh b/tests/chaos/projection-lag.sh new file mode 100644 index 0000000..20e07f3 --- /dev/null +++ b/tests/chaos/projection-lag.sh @@ -0,0 +1,110 @@ +#!/usr/bin/env bash +# chaos/projection-lag.sh +# Verifies the read-model-builder projection-lag alert fires and recovers. +# +# Strategy: +# 1. Pause read-model-builder (scale 0) to let lag build. +# 2. Assert Prometheus/Alertmanager sees ProjectionLagHigh within 2 min. +# 3. Restore read-model-builder, assert lag drops below threshold in 5 min. +# +# Requires: kubectl, curl, NAMESPACE / PROMETHEUS_URL / KAFKA_BOOTSTRAP env vars. + +set -euo pipefail + +NAMESPACE="${NAMESPACE:-grainguard-dev}" +PROMETHEUS_URL="${PROMETHEUS_URL:-http://localhost:9090}" +KAFKA_BOOTSTRAP="${KAFKA_BOOTSTRAP:-kafka:9092}" +CONSUMER_GROUP="${CONSUMER_GROUP:-read-model-builder}" +LAG_THRESHOLD="${LAG_THRESHOLD:-5000}" +ALERT_WINDOW="${ALERT_WINDOW:-120}" # seconds to wait for alert to fire +RECOVERY_WINDOW="${RECOVERY_WINDOW:-300}" # seconds to wait for lag to drop +STRICT_ALERT_CHECK="${STRICT_ALERT_CHECK:-0}" + +RED='\033[0;31m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m'; NC='\033[0m' + +log() { echo -e "${GREEN}[chaos]${NC} $*"; } +warn() { echo -e "${YELLOW}[chaos]${NC} $*"; } +fail() { echo -e "${RED}[chaos FAIL]${NC} $*" >&2; exit 1; } + +# ── helpers ─────────────────────────────────────────────────────────────────── + +current_lag() { + kubectl exec -n "$NAMESPACE" deploy/kafka -- \ + kafka-consumer-groups.sh \ + --bootstrap-server "$KAFKA_BOOTSTRAP" \ + --describe --group "$CONSUMER_GROUP" 2>/dev/null \ + | awk 'NR>1 && $NF~/[0-9]+/ { sum += $NF } END { print sum+0 }' +} + +alert_firing() { + # Returns 0 (true) if ProjectionLagHigh alert is active in Alertmanager + curl -s "${PROMETHEUS_URL}/api/v1/alerts" 2>/dev/null \ + | grep -q '"alertname":"ProjectionLagHigh"' +} + +# ── steady-state: before ────────────────────────────────────────────────────── + +log "=== Steady-state BEFORE projection chaos ===" +kubectl rollout status deployment/read-model-builder -n "$NAMESPACE" --timeout=30s \ + || fail "read-model-builder not healthy before chaos" + +initial_lag=$(current_lag) +log " Initial lag: $initial_lag" +(( initial_lag < LAG_THRESHOLD )) \ + || fail "Lag $initial_lag already ≥ $LAG_THRESHOLD before chaos — aborting" + +# ── action: pause consumer ──────────────────────────────────────────────────── + +log "=== Pausing read-model-builder ===" +kubectl scale deployment/read-model-builder -n "$NAMESPACE" --replicas=0 +log "Scaled to 0 — lag will build on topic telemetry.events" + +# ── probe: alert must fire within ALERT_WINDOW ─────────────────────────────── + +log "=== Waiting up to ${ALERT_WINDOW}s for ProjectionLagHigh alert ===" +deadline=$(( $(date +%s) + ALERT_WINDOW )) +alert_fired=0 + +while (( $(date +%s) < deadline )); do + lag=$(current_lag) + warn " Lag: $lag" + if alert_firing; then + log " ✓ ProjectionLagHigh alert FIRED (lag=$lag)" + alert_fired=1 + break + fi + sleep 10 +done + +(( alert_fired )) \ + || { + if [[ "$STRICT_ALERT_CHECK" == "1" ]]; then + fail "ProjectionLagHigh alert did NOT fire within ${ALERT_WINDOW}s" + fi + warn " ProjectionLagHigh alert did NOT fire within ${ALERT_WINDOW}s (check Prometheus rules)" + } + +# ── action: restore consumer ────────────────────────────────────────────────── + +log "=== Restoring read-model-builder ===" +kubectl scale deployment/read-model-builder -n "$NAMESPACE" --replicas=1 +kubectl rollout status deployment/read-model-builder -n "$NAMESPACE" --timeout=60s + +# ── steady-state: after ─────────────────────────────────────────────────────── + +log "=== Waiting up to ${RECOVERY_WINDOW}s for lag to drop below $LAG_THRESHOLD ===" +deadline=$(( $(date +%s) + RECOVERY_WINDOW )) + +while true; do + lag=$(current_lag) + log " Lag: $lag" + (( lag < LAG_THRESHOLD )) && { + log " ✓ Lag recovered (lag=$lag < $LAG_THRESHOLD)" + break + } + (( $(date +%s) >= deadline )) \ + && fail "Lag $lag still ≥ $LAG_THRESHOLD after ${RECOVERY_WINDOW}s — experiment FAILED" + sleep 15 +done + +log "=== Projection-lag experiment PASSED ===" diff --git a/tests/chaos/redis-outage.sh b/tests/chaos/redis-outage.sh new file mode 100644 index 0000000..a4d58d1 --- /dev/null +++ b/tests/chaos/redis-outage.sh @@ -0,0 +1,126 @@ +#!/usr/bin/env bash +# chaos/redis-outage.sh +# Simulates a Redis outage by scaling the Redis deployment to 0. +# Verifies: +# 1. BFF falls back to Postgres (GraphQL queries still return 200) +# 2. Saga-orchestrator retries its distributed lock with backoff (no panic) +# 3. After Redis is restored, cache warms back up within 30s +# +# Requires: kubectl, curl (or httpie), NAMESPACE / GATEWAY_URL env vars. + +set -euo pipefail + +NAMESPACE="${NAMESPACE:-grainguard-dev}" +GATEWAY_URL="${GATEWAY_URL:-http://localhost:3000}" +OUTAGE_SECONDS="${OUTAGE_SECONDS:-45}" +REDIS_DEPLOY="${REDIS_DEPLOY:-redis}" +ORIGINAL_REPLICAS="" + +RED='\033[0;31m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m'; NC='\033[0m' + +log() { echo -e "${GREEN}[chaos]${NC} $*"; } +warn() { echo -e "${YELLOW}[chaos]${NC} $*"; } +fail() { echo -e "${RED}[chaos FAIL]${NC} $*" >&2; exit 1; } + +GRAPHQL_QUERY='{"query":"{ deviceList(tenantId:\"test-tenant\",first:5) { edges { node { deviceId } } } }"}' + +http_check() { + local label="$1" + local status + status=$(curl -s -o /dev/null -w "%{http_code}" \ + -X POST "$GATEWAY_URL/graphql" \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer ${TEST_JWT:-dummy-jwt}" \ + -d "$GRAPHQL_QUERY" 2>/dev/null || echo "000") + if [[ "$status" == "200" ]]; then + log " ✓ $label — HTTP $status" + else + warn " ✗ $label — HTTP $status" + return 1 + fi +} + +restore_redis() { + if [[ -n "${ORIGINAL_REPLICAS}" ]]; then + kubectl scale deployment "$REDIS_DEPLOY" -n "$NAMESPACE" --replicas="$ORIGINAL_REPLICAS" >/dev/null 2>&1 || true + fi +} + +cleanup() { + restore_redis +} + +trap cleanup EXIT INT TERM + +# ── steady-state: before ────────────────────────────────────────────────────── + +log "=== Steady-state BEFORE Redis outage ===" +kubectl rollout status "deployment/$REDIS_DEPLOY" -n "$NAMESPACE" --timeout=30s \ + || fail "Redis not healthy before chaos" +ORIGINAL_REPLICAS="$(kubectl get deployment "$REDIS_DEPLOY" -n "$NAMESPACE" -o jsonpath='{.spec.replicas}')" + +http_check "GraphQL deviceList before outage" \ + || fail "BFF not responding before chaos" + +# ── action: kill Redis ──────────────────────────────────────────────────────── + +log "=== Scaling Redis to 0 ===" +kubectl scale deployment "$REDIS_DEPLOY" -n "$NAMESPACE" --replicas=0 +log "Redis scaled to 0 — outage begins" + +sleep 5 # let connections time-out / be noticed by BFF + +# ── probe: BFF falls back to DB ─────────────────────────────────────────────── + +log "=== Verifying BFF DB fallback (10 attempts, 3s apart) ===" +fallback_ok=0 +for i in $(seq 1 10); do + if http_check "Attempt $i (Redis down)"; then + fallback_ok=1 + break + fi + sleep 3 +done + +(( fallback_ok )) || fail "BFF did not fall back to DB during Redis outage" + +# ── probe: saga-orchestrator logs — no crash ───────────────────────────────── + +log "=== Checking saga-orchestrator for panics during outage ===" +sleep 5 +panic_count=$(kubectl logs -n "$NAMESPACE" deploy/saga-orchestrator \ + --since="${OUTAGE_SECONDS}s" 2>/dev/null \ + | grep -c "panic\|FATAL\|unhandled" || true) +(( panic_count == 0 )) \ + || fail "saga-orchestrator logged $panic_count panic/fatal lines during outage" +log " ✓ saga-orchestrator — no panics" + +log "Waiting remaining outage window (${OUTAGE_SECONDS}s total)..." +remaining_sleep=$(( OUTAGE_SECONDS - 15 )) +if (( remaining_sleep > 0 )); then + sleep "$remaining_sleep" +fi + +# ── action: restore Redis ───────────────────────────────────────────────────── + +log "=== Restoring Redis ===" +kubectl scale deployment "$REDIS_DEPLOY" -n "$NAMESPACE" --replicas="${ORIGINAL_REPLICAS:-1}" +kubectl rollout status "deployment/$REDIS_DEPLOY" -n "$NAMESPACE" --timeout=60s + +# ── steady-state: after ─────────────────────────────────────────────────────── + +log "=== Steady-state AFTER Redis restore ===" +sleep 5 +http_check "GraphQL deviceList after restore" \ + || fail "BFF not responding after Redis restore" + +# Warm-up check: second request should be cache-hit (fast) +t_start=$(date +%s%N) +http_check "Cache warm-up probe" +t_end=$(date +%s%N) +elapsed_ms=$(( (t_end - t_start) / 1000000 )) +log " Response time after restore: ${elapsed_ms}ms" + +trap - EXIT INT TERM + +log "=== Redis outage experiment PASSED ===" diff --git a/tests/chaos/run-all.sh b/tests/chaos/run-all.sh new file mode 100644 index 0000000..86e9c63 --- /dev/null +++ b/tests/chaos/run-all.sh @@ -0,0 +1,66 @@ +#!/usr/bin/env bash +# chaos/run-all.sh +# Run the full chaos suite sequentially. +# Exits 0 only if ALL experiments pass. + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +RESULTS_DIR="${SCRIPT_DIR}/results" +mkdir -p "$RESULTS_DIR" + +RED='\033[0;31m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m'; BOLD='\033[1m'; NC='\033[0m' + +log() { echo -e "${GREEN}[suite]${NC} $*"; } +fail() { echo -e "${RED}[suite FAIL]${NC} $*" >&2; } + +PASSED=() +FAILED=() + +run_chaos() { + local name="$1" + local cmd=("${@:2}") + local logfile="${RESULTS_DIR}/${name}.log" + + log "━━━ Running: $name ━━━" + if "${cmd[@]}" 2>&1 | tee "$logfile"; then + PASSED+=("$name") + log "${GREEN}✓ PASSED${NC}: $name" + else + FAILED+=("$name") + fail "✗ FAILED: $name (see $logfile)" + fi + echo "" +} + +# ── Experiments ──────────────────────────────────────────────────────────────── + +run_chaos "pod-kill" \ + chaos run "${SCRIPT_DIR}/pod-kill.yaml" + +run_chaos "kafka-consumer-pause" \ + bash "${SCRIPT_DIR}/kafka-consumer-pause.sh" + +run_chaos "redis-outage" \ + bash "${SCRIPT_DIR}/redis-outage.sh" + +run_chaos "projection-lag" \ + bash "${SCRIPT_DIR}/projection-lag.sh" + +run_chaos "network-partition" \ + chaos run "${SCRIPT_DIR}/network-partition.yaml" + +# ── Summary ──────────────────────────────────────────────────────────────────── + +echo "" +echo -e "${BOLD}━━━ Chaos Suite Summary ━━━${NC}" +echo -e " ${GREEN}Passed (${#PASSED[@]}):${NC} ${PASSED[*]:-none}" +echo -e " ${RED}Failed (${#FAILED[@]}):${NC} ${FAILED[*]:-none}" + +if (( ${#FAILED[@]} > 0 )); then + echo -e "${RED}SUITE FAILED${NC}" + exit 1 +fi + +echo -e "${GREEN}SUITE PASSED${NC}" +exit 0 diff --git a/tests/e2e/auth.spec.ts b/tests/e2e/auth.spec.ts new file mode 100644 index 0000000..7eefead --- /dev/null +++ b/tests/e2e/auth.spec.ts @@ -0,0 +1,49 @@ +import { test, expect } from "@playwright/test"; +import { injectMockAuth } from "./fixtures/mockAuth"; + +// ─── Unauthenticated tests ──────────────────────────────────────────────────── +// No credentials needed — these just verify what anonymous users see. + +test.describe("Auth wall", () => { + test("unauthenticated user sees login prompt", async ({ page }) => { + await page.goto("/"); + const loginBtn = page.getByRole("button", { name: /log in|sign in/i }); + await expect(loginBtn).toBeVisible({ timeout: 10_000 }); + }); + + test("protected route redirects to login", async ({ page }) => { + await page.goto("/billing"); + const loginBtn = page.getByRole("button", { name: /log in|sign in/i }); + await expect(loginBtn).toBeVisible({ timeout: 10_000 }); + }); + + test("page title is GrainGuard", async ({ page }) => { + await page.goto("/"); + await expect(page).toHaveTitle(/GrainGuard/i); + }); + + test("nav shows GrainGuard brand", async ({ page }) => { + await page.goto("/"); + await expect(page.getByText("GrainGuard").first()).toBeVisible(); + }); +}); + +// ─── Authenticated tests ────────────────────────────────────────────────────── +// Uses mock auth fixture — no real Auth0 credentials needed. + +test.describe("Authenticated user", () => { + test.beforeEach(async ({ page }) => { + await injectMockAuth(page); + }); + + test("devices page loads after login", async ({ page }) => { + await page.goto("/"); + await expect(page.getByRole("heading", { name: "Devices" })).toBeVisible({ timeout: 15_000 }); + }); + + test("billing page shows plan cards", async ({ page }) => { + await page.goto("/billing"); + await expect(page.getByText("Starter")).toBeVisible({ timeout: 10_000 }); + await expect(page.getByText("Professional")).toBeVisible(); + }); +}); diff --git a/tests/e2e/billing.spec.ts b/tests/e2e/billing.spec.ts new file mode 100644 index 0000000..457dfe9 --- /dev/null +++ b/tests/e2e/billing.spec.ts @@ -0,0 +1,34 @@ +import { test, expect } from "@playwright/test"; +import { injectMockAuth } from "./fixtures/mockAuth"; + +// Uses mock auth — no real Auth0 credentials needed. + +test.describe("Billing page", () => { + test.beforeEach(async ({ page }) => { + await injectMockAuth(page); + await page.goto("/billing"); + }); + + test("shows three plan cards", async ({ page }) => { + await expect(page.getByText("Starter")).toBeVisible({ timeout: 10_000 }); + await expect(page.getByText("Professional")).toBeVisible(); + await expect(page.getByText("Enterprise")).toBeVisible(); + }); + + test("shows plan prices", async ({ page }) => { + await expect(page.getByText("$49/mo")).toBeVisible({ timeout: 10_000 }); + await expect(page.getByText("$199/mo")).toBeVisible(); + }); + + test("Enterprise card shows Contact Sales link", async ({ page }) => { + const contactLink = page.getByRole("link", { name: "Contact Sales" }); + await expect(contactLink).toBeVisible({ timeout: 10_000 }); + await expect(contactLink).toHaveAttribute("href", /mailto:sales@/); + }); + + test("Upgrade button for Starter exists and is clickable", async ({ page }) => { + const upgradeBtn = page.getByRole("button", { name: "Upgrade" }).first(); + await expect(upgradeBtn).toBeVisible({ timeout: 10_000 }); + await expect(upgradeBtn).toBeEnabled(); + }); +}); diff --git a/tests/e2e/devices.spec.ts b/tests/e2e/devices.spec.ts new file mode 100644 index 0000000..8f2908c --- /dev/null +++ b/tests/e2e/devices.spec.ts @@ -0,0 +1,81 @@ +import { test, expect } from "@playwright/test"; +import { injectMockAuth } from "./fixtures/mockAuth"; + +// Uses mock auth — no real Auth0 credentials needed. + +test.describe("Devices page", () => { + test.beforeEach(async ({ page }) => { + await injectMockAuth(page); + await page.goto("/"); + }); + + test("shows + Register Device button", async ({ page }) => { + const btn = page.getByRole("button", { name: "+ Register Device" }); + await expect(btn).toBeVisible({ timeout: 10_000 }); + }); + + test("Register Device modal opens on click", async ({ page }) => { + await page.getByRole("button", { name: "+ Register Device" }).click(); + await expect(page.getByRole("dialog")).toBeVisible(); + await expect(page.getByText("Register a Device")).toBeVisible(); + }); + + test("modal closes on Escape", async ({ page }) => { + await page.getByRole("button", { name: "+ Register Device" }).click(); + await expect(page.getByRole("dialog")).toBeVisible(); + await page.keyboard.press("Escape"); + await expect(page.getByRole("dialog")).not.toBeVisible(); + }); + + test("modal closes on backdrop click", async ({ page }) => { + await page.getByRole("button", { name: "+ Register Device" }).click(); + const dialog = page.getByRole("dialog"); + await expect(dialog).toBeVisible(); + const box = await dialog.boundingBox(); + if (!box) throw new Error("Dialog bounding box unavailable"); + await page.mouse.click(box.x - 10, box.y - 10); + await expect(page.getByRole("dialog")).not.toBeVisible(); + }); + + test("serial number input normalises to uppercase", async ({ page }) => { + await page.getByRole("button", { name: "+ Register Device" }).click(); + const input = page.getByLabel("Serial Number"); + await input.fill("sn12345678"); + await expect(input).toHaveValue("SN12345678"); + }); + + test("submit button disabled when serial is too short", async ({ page }) => { + await page.getByRole("button", { name: "+ Register Device" }).click(); + const submitBtn = page.getByRole("button", { name: "Register Device" }); + await expect(submitBtn).toBeDisabled(); + + await page.getByLabel("Serial Number").fill("SN1"); + await expect(submitBtn).toBeDisabled(); + + await page.getByLabel("Serial Number").fill("SN12"); + await expect(submitBtn).toBeEnabled(); + }); + + test("invalid serial shows validation error", async ({ page }) => { + await page.getByRole("button", { name: "+ Register Device" }).click(); + await page.getByLabel("Serial Number").fill("AB!@#"); + await page.getByRole("button", { name: "Register Device" }).click(); + await expect(page.getByRole("alert")).toBeVisible(); + await expect(page.getByRole("alert")).toContainText("4–30 uppercase"); + }); + + test("CSV Export button is present", async ({ page }) => { + const exportBtn = page.getByRole("button", { name: /Export CSV/i }); + await expect(exportBtn).toBeVisible({ timeout: 10_000 }); + }); + + test("Refresh button triggers refetch", async ({ page }) => { + const refreshBtn = page.getByRole("button", { name: "Refresh" }); + await expect(refreshBtn).toBeVisible({ timeout: 10_000 }); + const refreshRequest = page.waitForResponse((response) => + response.url().includes("/graphql") && response.request().method() === "POST" + ); + await refreshBtn.click(); + await refreshRequest; + }); +}); diff --git a/tests/e2e/fixtures/mockAuth.ts b/tests/e2e/fixtures/mockAuth.ts new file mode 100644 index 0000000..0443ff1 --- /dev/null +++ b/tests/e2e/fixtures/mockAuth.ts @@ -0,0 +1,148 @@ +import { Page } from "@playwright/test"; + +// ─── Fake JWT ───────────────────────────────────────────────────────────────── +// A base64url-encoded JWT with GrainGuard claims. +// No real signature needed — the dashboard just reads it from localStorage +// and the API calls are mocked by page.route(), so nothing validates it. + +function b64(obj: object): string { + return Buffer.from(JSON.stringify(obj)).toString("base64url"); +} + +const HEADER = b64({ alg: "RS256", typ: "JWT" }); +const PAYLOAD = b64({ + sub: "auth0|e2e-test-user", + email: "e2e@grainguard.com", + name: "E2E Test User", + iss: "https://dev-dz6bl3nngdeib7ro.us.auth0.com/", + aud: "https://api.grainguard.com", + iat: Math.floor(Date.now() / 1000), + exp: Math.floor(Date.now() / 1000) + 86400, // 24h + "https://grainguard/tenant_id": "00000000-0000-0000-0000-000000000001", + "https://grainguard/roles": ["admin"], +}); + +export const FAKE_TOKEN = `${HEADER}.${PAYLOAD}.fake_signature`; + +// ─── Auth0 localStorage cache key ──────────────────────────────────────────── +// auth0-spa-js reads from this key to decide if the user is authenticated. + +const CLIENT_ID = process.env.VITE_AUTH0_CLIENT_ID || "6DwwDrUpsC4LckBieVQdlGYtguTPnYys"; +const AUDIENCE = process.env.VITE_AUTH0_AUDIENCE || "https://api.grainguard.com"; +const AUTH0_CACHE_KEY = `@@auth0spajs@@::${CLIENT_ID}::${AUDIENCE}::openid profile email`; + +const AUTH0_CACHE_VALUE = JSON.stringify({ + body: { + access_token: FAKE_TOKEN, + id_token: FAKE_TOKEN, + scope: "openid profile email", + expires_in: 86400, + token_type: "Bearer", + decodedToken: { + encoded: { header: HEADER, payload: PAYLOAD, signature: "fake" }, + header: { alg: "RS256", typ: "JWT" }, + user: { + sub: "auth0|e2e-test-user", + email: "e2e@grainguard.com", + name: "E2E Test User", + }, + }, + audience: AUDIENCE, + client_id: CLIENT_ID, + }, + expiresAt: Math.floor(Date.now() / 1000) + 86400, +}); + +// ─── Mock API responses ─────────────────────────────────────────────────────── + +const MOCK_DEVICES = [ + { id: "dev-1", serialNumber: "SN00100001", status: "online", lastSeen: new Date().toISOString() }, + { id: "dev-2", serialNumber: "SN00100002", status: "offline", lastSeen: new Date().toISOString() }, +]; + +const MOCK_SUBSCRIPTION = { + plan: "professional", + subscription_status: "active", + trial_ends_at: null, + current_period_end: new Date(Date.now() + 30 * 86400 * 1000).toISOString(), +}; + +// ─── injectMockAuth ─────────────────────────────────────────────────────────── +// Call this in beforeEach to set up a fully authenticated test environment. + +export async function injectMockAuth(page: Page): Promise { + // 1. Intercept Auth0 JWKS — return empty keyset (we never validate sig in tests) + await page.route("**/.well-known/jwks.json", (route) => + route.fulfill({ json: { keys: [] } }) + ); + + // 2. Intercept Auth0 token endpoint — return fake token + await page.route("**/oauth/token", (route) => + route.fulfill({ + json: { + access_token: FAKE_TOKEN, + id_token: FAKE_TOKEN, + token_type: "Bearer", + expires_in: 86400, + scope: "openid profile email", + }, + }) + ); + + // 3. Intercept GraphQL (BFF) — return mock data + await page.route("**/graphql", (route) => { + const body = route.request().postDataJSON() as { query?: string } | null; + const query = body?.query ?? ""; + + if (query.includes("devices") || query.includes("Devices")) { + return route.fulfill({ + json: { + data: { + devices: MOCK_DEVICES, + deviceTelemetry: [], + }, + }, + }); + } + + if (query.includes("me") || query.includes("tenant")) { + return route.fulfill({ + json: { + data: { + me: { + id: "00000000-0000-0000-0000-000000000001", + email: "e2e@grainguard.com", + tenantId: "00000000-0000-0000-0000-000000000001", + plan: "professional", + }, + }, + }, + }); + } + + // Default — empty success + return route.fulfill({ json: { data: {} } }); + }); + + // 4. Intercept REST billing endpoint + await page.route("**/billing/subscription", (route) => + route.fulfill({ json: MOCK_SUBSCRIPTION }) + ); + + // 5. Intercept REST devices endpoint + await page.route("**/devices**", (route) => { + if (route.request().method() === "GET") { + return route.fulfill({ json: MOCK_DEVICES }); + } + return route.fulfill({ json: { deviceId: "dev-new", serialNumber: "SNNEW001" } }); + }); + + // 6. Inject Auth0 cache into localStorage before app loads + await page.addInitScript( + ({ key, value, token }) => { + localStorage.setItem(key, value); + localStorage.setItem("__e2e_access_token", token); + }, + { key: AUTH0_CACHE_KEY, value: AUTH0_CACHE_VALUE, token: FAKE_TOKEN } + ); +} diff --git a/tests/e2e/playwright.config.ts b/tests/e2e/playwright.config.ts new file mode 100644 index 0000000..07fa182 --- /dev/null +++ b/tests/e2e/playwright.config.ts @@ -0,0 +1,37 @@ +import { defineConfig, devices } from "@playwright/test"; + +// Base URL — in CI this points at a staging deploy; locally at localhost +const BASE_URL = process.env.E2E_BASE_URL ?? "http://localhost:5173"; + +export default defineConfig({ + testDir: ".", + timeout: 30_000, // 30s per test + retries: process.env.CI ? 2 : 0, // retry twice in CI to handle flakiness + forbidOnly: !!process.env.CI, + fullyParallel: true, + workers: process.env.CI ? 2 : undefined, + + reporter: [ + ["list"], + ["html", { outputFolder: "playwright-report", open: "never" }], + ["junit", { outputFile: "playwright-results.xml" }], + ], + + use: { + baseURL: BASE_URL, + screenshot: "only-on-failure", + video: "retain-on-failure", + trace: "on-first-retry", + }, + + projects: [ + { + name: "chromium", + use: { ...devices["Desktop Chrome"] }, + }, + { + name: "firefox", + use: { ...devices["Desktop Firefox"] }, + }, + ], +});