.nyc_output
.reuse
.xmake.cfg
+.sandcastle/
# Logs
logs/
*.log
steps:
- uses: actions/checkout@v6
+ - name: Validate configuration
+ run: |
+ if [ -z "$GIST_ID" ]; then
+ echo "::error::GIST_ID variable is not configured"
+ exit 1
+ fi
+ if [ -z "$SECRET_TOKEN" ]; then
+ echo "::error::SECRET_TOKEN secret is not configured"
+ exit 1
+ fi
+ env:
+ GIST_ID: ${{ vars.GIST_ID }}
+ SECRET_TOKEN: ${{ secrets.SECRET_TOKEN }}
+
- name: gh login
run: echo "${{ secrets.SECRET_TOKEN }}" | gh auth login --with-token
https://api.github.com/repos/${{ github.repository }}/traffic/clones \
> clone.json
- - name: create gist and download previous count
- id: set-gist
+ - name: download previous count
run: |
- if gh secret list | grep -q "GIST_ID"
- then
- echo "GIST_ID found"
- echo "GIST=${{ secrets.GIST_ID }}" >> $GITHUB_OUTPUT
- curl https://gist.githubusercontent.com/${{ github.actor }}/${{ secrets.GIST_ID }}/raw/clone.json > clone_before.json
- if cat clone_before.json | grep '404: Not Found'; then
- echo "GIST_ID not valid anymore. Creating another gist..."
- gist_id=$(gh gist create clone.json | awk -F / '{print $NF}')
- echo $gist_id | gh secret set GIST_ID
- echo "GIST=${gist_id}" >> $GITHUB_OUTPUT
- cp clone.json clone_before.json
- git rm --ignore-unmatch CLONE.md
- fi
- else
- echo "GIST_ID not found. Creating a gist..."
- gist_id=$(gh gist create clone.json | awk -F / '{print $NF}')
- echo $gist_id | gh secret set GIST_ID
- echo "GIST=${gist_id}" >> $GITHUB_OUTPUT
- cp clone.json clone_before.json
+ curl https://gist.githubusercontent.com/${{ github.actor }}/${{ vars.GIST_ID }}/raw/clone.json > clone_before.json
+ if cat clone_before.json | grep '404: Not Found'; then
+ cp clone.json clone_before.json
fi
- name: update clone.json
run: |
content=$(sed -e 's/\\/\\\\/g' -e 's/\t/\\t/g' -e 's/\"/\\"/g' -e 's/\r//g' "clone.json" | sed -E ':a;N;$!ba;s/\r{0,1}\n/\\n/g')
echo '{"description": "${{ github.repository }} clone statistics", "files": {"clone.json": {"content": "'"$content"'"}}}' > post_clone.json
- curl -s -X PATCH \
+ curl -sf -X PATCH \
--user "${{ github.actor }}:${{ secrets.SECRET_TOKEN }}" \
-H "Content-Type: application/json" \
- -d @post_clone.json https://api.github.com/gists/${{ steps.set-gist.outputs.GIST }} > /dev/null 2>&1
-
- if [ ! -f CLONE.md ]; then
- shields="https://img.shields.io/badge/dynamic/json?color=success&label=Clone&query=count&url="
- url="https://gist.githubusercontent.com/${{ github.actor }}/${{ steps.set-gist.outputs.GIST }}/raw/clone.json"
- repo="https://github.com/MShawon/github-clone-count-badge"
- echo ''> CLONE.md
- echo '
- **Markdown**
-
- ```markdown' >> CLONE.md
- echo "[]($repo)" >> CLONE.md
- echo '
- ```
-
- **HTML**
- ```html' >> CLONE.md
- echo "<a href='$repo'><img alt='GitHub Clones' src='$shields$url&logo=github'></a>" >> CLONE.md
- echo '```' >> CLONE.md
-
- git config --local user.name "GitHub Action"
- git config --local user.email "action@github.com"
- git add CLONE.md
- git commit -m "docs: create clone count badge"
- fi
+ -d @post_clone.json https://api.github.com/gists/${{ vars.GIST_ID }}
- name: push
uses: CasperWA/push-protected@v2
--- /dev/null
+name: Sandcastle
+
+on:
+ # schedule:
+ # - cron: '0 2 * * 1'
+ workflow_dispatch:
+
+concurrency:
+ group: sandcastle
+ cancel-in-progress: false
+
+permissions:
+ contents: write
+ pull-requests: write
+ issues: read
+
+jobs:
+ sandcastle:
+ runs-on: ubuntu-latest
+ timeout-minutes: 45
+ steps:
+ - uses: actions/checkout@v6
+
+ - uses: pnpm/action-setup@v6
+
+ - uses: actions/setup-node@v6
+ with:
+ node-version: 24
+ cache: 'pnpm'
+
+ - run: pnpm install --ignore-scripts --frozen-lockfile
+
+ - name: Validate secrets
+ run: |
+ if [ -z "$OPENCODE_AUTH_CONTENT" ]; then
+ echo "::error::OPENCODE_AUTH_CONTENT secret is not configured"
+ exit 1
+ fi
+ env:
+ OPENCODE_AUTH_CONTENT: ${{ secrets.OPENCODE_AUTH_CONTENT }}
+
+ - run: docker build -t sandcastle-sandbox .sandcastle/
+
+ - name: Create sandbox env
+ run: |
+ printf 'GH_TOKEN=%s\n' "$GH_TOKEN" > .sandcastle/.env
+ printf 'GITHUB_TOKEN=%s\n' "$GITHUB_TOKEN" >> .sandcastle/.env
+ printf 'OPENCODE_AUTH_CONTENT=%s\n' "$OPENCODE_AUTH_CONTENT" >> .sandcastle/.env
+ env:
+ GH_TOKEN: ${{ github.token }}
+ OPENCODE_AUTH_CONTENT: ${{ secrets.OPENCODE_AUTH_CONTENT }}
+
+ - run: pnpm run sandcastle
+ env:
+ GH_TOKEN: ${{ github.token }}
--- /dev/null
+GH_TOKEN=
+GITHUB_TOKEN=
+OPENCODE_AUTH_CONTENT=
--- /dev/null
+.env
+logs/
+worktrees/
--- /dev/null
+FROM node:24-trixie
+
+RUN apt-get update && apt-get install -y \
+ git \
+ curl \
+ jq \
+ ca-certificates \
+ && rm -rf /var/lib/apt/lists/*
+
+# Install GitHub CLI
+RUN curl -fsSL https://cli.github.com/packages/githubcli-archive-keyring.gpg \
+ | dd of=/usr/share/keyrings/githubcli-archive-keyring.gpg \
+ && echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" \
+ | tee /etc/apt/sources.list.d/github-cli.list > /dev/null \
+ && apt-get update && apt-get install -y gh \
+ && rm -rf /var/lib/apt/lists/*
+
+# Install coding agents globally
+RUN npm install -g \
+ @anthropic-ai/claude-code \
+ @mariozechner/pi-coding-agent \
+ opencode-ai
+
+RUN corepack enable && corepack prepare pnpm@latest --activate
+
+# Rename the base image's "node" user to "agent" and align UID/GID.
+# Default 1001 matches GitHub Actions runner UID (ubuntu-latest since May 2025).
+# On macOS, build with --build-arg AGENT_UID=$(id -u) to match host UID.
+ARG AGENT_UID=1001
+RUN usermod -u ${AGENT_UID} -d /home/agent -m -l agent node \
+ && groupmod -g ${AGENT_UID} -n agent node \
+ && chown -R ${AGENT_UID}:${AGENT_UID} /home/agent
+USER agent
+
+WORKDIR /home/agent
+ENTRYPOINT ["sleep", "infinity"]
--- /dev/null
+/** Internal node for the O(1) FIFO waiting queue. Not exported. */
+interface QueueNode {
+ next: null | QueueNode
+ resolve: () => void
+}
+
+/**
+ * A concurrency limiter that restricts parallel execution to a maximum number of tasks.
+ * Queue operations are O(1) amortized (singly-linked list).
+ */
+export class ConcurrencyPool {
+ private head: null | QueueNode = null
+ private running = 0
+ private tail: null | QueueNode = null
+
+ /**
+ * @param max - Maximum number of concurrent tasks. Must be a positive integer >= 1.
+ */
+ constructor (private readonly max: number) {
+ if (!Number.isInteger(max) || max < 1) {
+ throw new RangeError('ConcurrencyPool max must be a positive integer >= 1')
+ }
+ }
+
+ /**
+ * Executes the given async function, waiting if the pool is at capacity.
+ * @param fn - Async function to execute within the pool.
+ * @returns The result of the function.
+ * @remarks Re-entrant calls using the same pool instance may deadlock when all slots are occupied.
+ */
+ async run<T>(fn: () => Promise<T>): Promise<T> {
+ await this.acquire()
+ try {
+ return await fn()
+ } finally {
+ this.release()
+ }
+ }
+
+ private acquire (): Promise<void> {
+ if (this.running < this.max) {
+ this.running++
+ return Promise.resolve()
+ }
+ return new Promise<void>(resolve => {
+ const node: QueueNode = { next: null, resolve }
+ if (this.tail === null) {
+ this.head = node
+ this.tail = node
+ } else {
+ this.tail.next = node
+ this.tail = node
+ }
+ })
+ }
+
+ private release (): void {
+ this.running--
+ const next = this.head
+ if (next !== null) {
+ this.head = next.next
+ if (this.head === null) {
+ this.tail = null
+ }
+ this.running++
+ next.resolve()
+ }
+ }
+}
--- /dev/null
+import { execFileSync } from 'node:child_process'
+import { existsSync } from 'node:fs'
+
+export const AGENT_IDLE_TIMEOUT_S = 300
+
+export const AGENT_MODEL = 'github-copilot/claude-sonnet-4.6'
+
+export const BRANCH_PREFIX = 'agent/issue'
+
+export const COMPLETION_SIGNAL = '<promise>COMPLETE</promise>'
+
+export const CONTEXT_HASH_RADIUS = 3
+
+export const DOCKER_IMAGE = 'sandcastle-sandbox'
+
+export const DOCKER_MOUNTS = resolveDockerMounts()
+
+/**
+ * @returns Mount entries for pnpm store, or empty if store path is unavailable.
+ */
+function resolveDockerMounts (): { hostPath: string; readonly: boolean; sandboxPath: string }[] {
+ const pnpmStore = resolvePnpmStorePath()
+ if (pnpmStore != null && existsSync(pnpmStore)) {
+ return [
+ { hostPath: pnpmStore, readonly: true, sandboxPath: '/home/agent/.local/share/pnpm/store' },
+ ]
+ }
+ return []
+}
+
+/**
+ * @returns The pnpm store directory path, or undefined if pnpm is unavailable.
+ */
+function resolvePnpmStorePath (): string | undefined {
+ try {
+ return execFileSync('pnpm', ['store', 'path'], { encoding: 'utf-8' }).trim()
+ } catch {
+ return undefined
+ }
+}
+
+export const GIT_TIMEOUT_MS = 30_000
+
+export const GRACE_TIMEOUT_MS = 30_000
+
+export const HASH_PREFIX_LENGTH = 16
+
+export const ITERATION_BUDGET_PER_ROUND = 50
+
+export const ISSUE_LABEL = 'sandcastle'
+
+export const MAX_ISSUES_FETCH = 50
+
+export const MAX_PRS_FETCH = 200
+
+export const MAX_PARALLEL = 3
+
+export const MAX_STDERR_CHARS = 500
+
+export const MAX_CRITIC_ROUNDS = 5
+
+export const MAX_TITLE_LENGTH = 200
+
+export const PLANNER_MODEL = 'github-copilot/claude-opus-4.6'
+
+export const PUSH_TIMEOUT_MS = 60_000
+
+export const TASK_TIMEOUT_MS = 15 * 60 * 1000
+
+export const VALIDATION_COMMAND =
+ 'pnpm format && pnpm typecheck && pnpm lint && pnpm build && pnpm test'
+
+export const VALIDATION_TIMEOUT_MS = 300_000
--- /dev/null
+import crypto from 'node:crypto'
+
+import type { LoopResult, TaskSpec } from './types.js'
+
+import {
+ GIT_TIMEOUT_MS,
+ MAX_STDERR_CHARS,
+ PUSH_TIMEOUT_MS,
+ VALIDATION_COMMAND,
+ VALIDATION_TIMEOUT_MS,
+} from './constants.js'
+import { execFileAsync, toErrorMessage } from './utils.js'
+
+/**
+ * Fetches origin/main and rebases the current branch onto it.
+ * On failure, aborts the rebase cleanly.
+ * @param cwd - Working directory (worktree path).
+ * @returns `true` if rebase succeeded, `false` otherwise.
+ */
+export async function attemptRebase (cwd: string): Promise<boolean> {
+ try {
+ await execFileAsync('git', ['fetch', 'origin', 'main'], {
+ cwd,
+ timeout: GIT_TIMEOUT_MS,
+ })
+ await execFileAsync('git', ['rebase', 'origin/main'], { cwd, timeout: GIT_TIMEOUT_MS })
+ return true
+ } catch {
+ try {
+ await execFileAsync('git', ['rebase', '--abort'], { cwd })
+ } catch {
+ /* empty */
+ }
+ return false
+ }
+}
+
+/**
+ * Builds the PR title, body, and `gh pr create` argument list.
+ * @param spec - The task specification.
+ * @param loopResult - The result from the refinement loop.
+ * @param validationPassed - Whether the validation suite passed.
+ * @param rebaseSucceeded - Whether the rebase onto main succeeded.
+ * @returns Object with `isDraft` flag and `prArgs` string array.
+ */
+export function buildPrArgs (
+ spec: TaskSpec,
+ loopResult: LoopResult,
+ validationPassed: boolean,
+ rebaseSucceeded: boolean
+): { isDraft: boolean; prArgs: string[] } {
+ const converged = loopResult.status === 'converged'
+ const isDraft = !converged || !validationPassed
+ const outstandingNote =
+ loopResult.lastFindings.length > 0
+ ? `\n\n${converged ? 'ℹ️ Known findings (not addressed):' : '⚠️ Outstanding findings:'}\n${loopResult.lastFindings.map(f => `- [${f.severity}] ${f.file}: ${f.title}`).join('\n')}`
+ : ''
+ const validationNote = !validationPassed
+ ? '\n\n⚠️ Validation did not pass. Manual review required.'
+ : ''
+ const rebaseNote = !rebaseSucceeded
+ ? '\n\n⚠️ Rebase failed. Branch is not rebased onto main.'
+ : ''
+
+ const validationCheck = validationPassed ? '- [x]' : '- [ ]'
+ const commitPrefix = spec.labels.includes('feature request')
+ ? 'feat'
+ : spec.labels.includes('bug')
+ ? 'fix'
+ : 'chore'
+ const prTitle = `${commitPrefix}: resolve #${spec.id} \u2014 ${spec.title}`
+ const typeOfChange =
+ commitPrefix === 'feat'
+ ? 'New feature (non-breaking change that adds functionality)'
+ : commitPrefix === 'fix'
+ ? 'Bug fix (non-breaking change that fixes an issue)'
+ : 'Refactoring (no functional changes)'
+ const prBody = `## Description\n\nAutomated ${commitPrefix} for #${spec.id}: ${spec.title}\n\n## Type of Change\n\n- [x] ${typeOfChange}\n\n## Checklist\n\n${validationCheck} I have run validation suite\n- [x] My changes follow the existing code style\n\n## Related Issues\n\nFixes #${spec.id}${outstandingNote}${validationNote}${rebaseNote}`
+
+ const prArgs = [
+ 'pr',
+ 'create',
+ ...(isDraft ? ['--draft'] : []),
+ '--head',
+ spec.branch,
+ '--base',
+ 'main',
+ '--title',
+ prTitle,
+ '--body',
+ prBody,
+ ]
+
+ return { isDraft, prArgs }
+}
+
+/**
+ * Extracts stderr from a caught error, truncated to 500 chars.
+ * @param err - The caught error value.
+ * @returns Stderr string or empty string if unavailable.
+ */
+export function extractStderr (err: unknown): string {
+ return err instanceof Error && 'stderr' in err
+ ? String((err as { stderr: unknown }).stderr).slice(0, MAX_STDERR_CHARS)
+ : ''
+}
+
+/**
+ * Pushes the branch to origin. When rebase succeeded, uses force-with-lease
+ * with a rescue-branch fallback. When rebase was aborted, does a plain push.
+ * @param cwd - Working directory (worktree path).
+ * @param spec - The task specification.
+ * @param rebaseSucceeded - Whether the preceding rebase completed successfully.
+ * @returns `true` if the primary push succeeded, `false` otherwise.
+ */
+export async function pushBranch (
+ cwd: string,
+ spec: TaskSpec,
+ rebaseSucceeded: boolean
+): Promise<boolean> {
+ if (rebaseSucceeded) {
+ try {
+ await execFileAsync('git', ['push', '--force-with-lease', 'origin', 'HEAD'], {
+ cwd,
+ timeout: PUSH_TIMEOUT_MS,
+ })
+ return true
+ } catch (pushErr: unknown) {
+ const pushMsg = toErrorMessage(pushErr)
+ try {
+ const suffix = crypto.randomBytes(4).toString('hex')
+ await execFileAsync(
+ 'git',
+ ['push', 'origin', `HEAD:refs/heads/rescue/${spec.branch}-${suffix}`],
+ {
+ cwd,
+ timeout: PUSH_TIMEOUT_MS,
+ }
+ )
+ console.warn(
+ ` #${spec.id}: Push failed. Commits preserved at rescue/${spec.branch}-${suffix}`
+ )
+ } catch {
+ console.error(
+ ` #${spec.id}: Push failed and rescue failed. Commits will be lost on sandbox disposal: ${pushMsg}`
+ )
+ }
+ return false
+ }
+ } else {
+ try {
+ await execFileAsync('git', ['push', '-u', 'origin', 'HEAD'], {
+ cwd,
+ timeout: PUSH_TIMEOUT_MS,
+ })
+ return true
+ } catch (pushErr: unknown) {
+ const pushMsg = toErrorMessage(pushErr)
+ console.warn(` #${spec.id}: git push failed after rebase abort: ${pushMsg}`)
+ return false
+ }
+ }
+}
+
+/**
+ * Runs the full validation suite.
+ * @param cwd - Working directory (worktree path).
+ * @param spec - Optional task specification (used for logging).
+ * @returns `true` if validation passed, `false` otherwise.
+ */
+export async function runValidation (cwd: string, spec?: TaskSpec): Promise<boolean> {
+ try {
+ await execFileAsync('sh', ['-c', VALIDATION_COMMAND], {
+ cwd,
+ maxBuffer: 8 * 1024 * 1024,
+ timeout: VALIDATION_TIMEOUT_MS,
+ })
+ return true
+ } catch (err: unknown) {
+ if (err && typeof err === 'object' && 'killed' in err && (err as { killed: boolean }).killed) {
+ const label = spec ? `#${spec.id}` : 'mid-loop'
+ console.warn(` ${label}: Validation timed out after ${String(VALIDATION_TIMEOUT_MS)}ms.`)
+ } else if (spec) {
+ const stderr = extractStderr(err)
+ console.warn(` #${spec.id}: Validation failed.${stderr ? `\n${stderr}` : ''}`)
+ }
+ return false
+ }
+}
--- /dev/null
+import * as sandcastle from '@ai-hero/sandcastle'
+import { docker } from '@ai-hero/sandcastle/sandboxes/docker'
+
+import type { TaskSpec } from './types.js'
+
+import { ConcurrencyPool } from './concurrency-pool.js'
+import {
+ BRANCH_PREFIX,
+ DOCKER_IMAGE,
+ DOCKER_MOUNTS,
+ ISSUE_LABEL,
+ ITERATION_BUDGET_PER_ROUND,
+ MAX_CRITIC_ROUNDS,
+ MAX_PARALLEL,
+ TASK_TIMEOUT_MS,
+} from './constants.js'
+import { runRefinementLoop } from './refinement-loop.js'
+import { implementStrategy } from './strategies/implement/strategy.js'
+import { GithubIssueSource } from './task-source.js'
+
+const source = new GithubIssueSource({
+ branchPrefix: BRANCH_PREFIX,
+ dockerImage: DOCKER_IMAGE,
+ label: ISSUE_LABEL,
+})
+
+let tasks: TaskSpec[]
+try {
+ tasks = await source.discover()
+} catch (err) {
+ console.error(err instanceof Error ? err.message : String(err))
+ process.exitCode = 1
+ process.exit()
+}
+
+if (tasks.length === 0) {
+ console.log('No tasks to process.')
+} else {
+ const pool = new ConcurrencyPool(MAX_PARALLEL)
+
+ const settled = await Promise.allSettled(
+ tasks.map(spec =>
+ pool.run(async () => {
+ const ac = new AbortController()
+ const timer = setTimeout(() => {
+ ac.abort(new Error(`Task #${spec.id} timed out after ${String(TASK_TIMEOUT_MS)}ms`))
+ }, TASK_TIMEOUT_MS)
+ timer.unref()
+
+ try {
+ await using sandbox = await sandcastle.createSandbox({
+ branch: spec.branch,
+ copyToWorktree: ['node_modules'],
+ hooks: {
+ sandbox: { onSandboxReady: [{ command: 'pnpm install && pnpm run build' }] },
+ },
+ sandbox: docker({ imageName: DOCKER_IMAGE, mounts: [...DOCKER_MOUNTS] }),
+ })
+
+ const loopResult = await runRefinementLoop(spec, sandbox, implementStrategy, {
+ iterationBudget: ITERATION_BUDGET_PER_ROUND,
+ maxRounds: MAX_CRITIC_ROUNDS,
+ postLoopValidationRetry: true,
+ signal: ac.signal,
+ })
+
+ let workSuccess = false
+ if (loopResult.totalCommits > 0) {
+ const finalizeResult = await implementStrategy.finalize(spec, loopResult, sandbox)
+ workSuccess = implementStrategy.isWorkComplete(finalizeResult)
+ }
+
+ return { spec, success: workSuccess }
+ } finally {
+ clearTimeout(timer)
+ }
+ })
+ )
+ )
+
+ const workCompleted = settled.some(
+ outcome => outcome.status === 'fulfilled' && outcome.value.success
+ )
+
+ for (const [i, outcome] of settled.entries()) {
+ if (outcome.status === 'rejected') {
+ const reason: unknown = outcome.reason
+ const msg = reason instanceof Error ? (reason.stack ?? reason.message) : String(reason)
+ console.error(` ✗ #${tasks[i].id} failed: ${msg}`)
+ }
+ }
+
+ console.log('\nAll done.')
+
+ if (!workCompleted) {
+ process.exitCode = 1
+ }
+}
--- /dev/null
+# Plan Agent
+
+Read open GitHub issues and produce a parallelizable execution plan.
+
+## Context
+
+This is a Node.js TypeScript monorepo (pnpm workspace) simulating OCPP charging stations.
+Structure: root simulator (`src/`), `ui/common`, `ui/cli`, `ui/web`, `tests/ocpp-server` (Python).
+Test runner: Node.js native (`node:test`). Build tool: esbuild. Linter: ESLint (neostandard).
+Read `AGENTS.md` for project conventions.
+
+## Open Issues
+
+{{ISSUES_JSON}}
+
+## Steps
+
+1. Analyze the issues above. For each, determine:
+ - Can it be implemented independently (no blocking dependency on another open issue)?
+ - Is the scope clear enough to implement without further clarification?
+
+2. Select all issues that are independent and actionable.
+
+3. For each selected issue, assign a branch name: `{{BRANCH_PREFIX}}-<number>-<slug>` where slug is a short kebab-case summary (e.g., `{{BRANCH_PREFIX}}-42-fix-streaming-id`).
+
+4. Output the plan in this exact format:
+
+ ```text
+ <plan>{ "issues": [{ "id": "<number>", "title": "<title>", "branch": "{{BRANCH_PREFIX}}-<number>-<slug>" }] }</plan>
+ ```
+
+## Rules
+
+- Exclude issues labeled `wontfix`, `duplicate`, or `question`.
+- Exclude issues that depend on another open issue (mention "blocked by #N" or similar).
+- Prefer issues where scope fits a single-file change over cross-cutting refactors.
+- If every issue is blocked, include the single highest-priority candidate (fewest/weakest dependencies).
+- If no actionable issues exist, output:
+
+ ```text
+ <plan>{ "issues": [] }</plan>
+ ```
+
+- Do not implement anything. Only produce the plan.
+
+## Completion
+
+After outputting the plan, output:
+
+```text
+<promise>COMPLETE</promise>
+```
--- /dev/null
+import * as sandcastle from '@ai-hero/sandcastle'
+import crypto from 'node:crypto'
+import { readFile, realpath } from 'node:fs/promises'
+import { join, sep } from 'node:path'
+
+import type {
+ Finding,
+ LoopResult,
+ LoopStatus,
+ LoopStrategy,
+ SandboxInstance,
+ TaskSpec,
+} from './types.js'
+
+import {
+ AGENT_IDLE_TIMEOUT_S,
+ AGENT_MODEL,
+ COMPLETION_SIGNAL,
+ CONTEXT_HASH_RADIUS,
+ HASH_PREFIX_LENGTH,
+ ITERATION_BUDGET_PER_ROUND,
+ MAX_CRITIC_ROUNDS,
+} from './constants.js'
+import { runValidation } from './finalizer.js'
+import { parseFindingsSafe } from './types.js'
+import { execFileAsync } from './utils.js'
+
+/** Options for configuring the refinement loop. */
+export interface RefinementLoopOptions {
+ /** Budget of iterations per round (flat constant applied to every round). */
+ iterationBudget?: number
+ /** Maximum number of implement↔critic rounds. */
+ maxRounds?: number
+ /** Optional callback invoked after each round completes. */
+ onRoundComplete?: (round: number, findings: Finding[]) => void
+ /** When true, run one extra implementer attempt if post-loop validation fails. */
+ postLoopValidationRetry?: boolean
+ /** Abort signal for cooperative cancellation (kills in-flight agent subprocesses). */
+ signal?: AbortSignal
+}
+
+/** Result of a convergence check. */
+interface ConvergenceResult {
+ /** Best SHA to restore (empty string = no update). */
+ bestSha: string
+ /** Updated last findings. */
+ lastFindings: Finding[]
+ /** New loop status. */
+ status: LoopStatus
+}
+
+/**
+ * Input descriptor for hashing a window of source lines around a finding.
+ */
+interface HashInput {
+ /** Working directory (worktree path) for resolving the file. */
+ readonly cwd: string
+ /** Relative file path of the finding. */
+ readonly file: string
+ /** Line number of the finding (1-indexed). */
+ readonly line: number
+}
+
+/**
+ * Context passed to the quality ratchet check.
+ * Groups the per-round identifiers needed for regression detection and rollback.
+ */
+interface RatchetContext {
+ /** SHA of HEAD before the implementer ran (used for rollback). */
+ readonly beforeSha: string
+ /** Working directory for git operations. */
+ readonly cwd: string
+ /** Current round number (1-indexed). */
+ readonly round: number
+ /** The task specification. */
+ readonly spec: TaskSpec
+}
+
+/** Resolved loop options with defaults applied. */
+interface ResolvedLoopOptions {
+ /** Iteration budget per round. */
+ budget: number
+ /** Maximum number of rounds. */
+ maxRounds: number
+ /** Optional round-complete callback (no-op if not provided). */
+ onRoundComplete: (round: number, findings: Finding[]) => void
+}
+
+/** Result of a single implement↔critic round. */
+interface RoundResult {
+ /** SHA of HEAD before the implementer ran. */
+ beforeSha: string
+ /** Number of commits made by the implementer. */
+ commits: number
+ /** Parsed findings from the critic, or null on critic failure. */
+ findings: Finding[] | null
+}
+
+/**
+ * Runs the implement↔critic refinement loop for a given task.
+ * @param spec - The task specification.
+ * @param sandbox - The sandcastle sandbox instance.
+ * @param strategy - Strategy config for prompt/arg customization.
+ * @param opts - Optional configuration for rounds, budget, and callbacks.
+ * @returns The loop result with status, commits, findings, and rounds completed.
+ */
+export async function runRefinementLoop (
+ spec: TaskSpec,
+ sandbox: SandboxInstance,
+ strategy: LoopStrategy,
+ opts?: RefinementLoopOptions
+): Promise<LoopResult> {
+ const { budget, maxRounds, onRoundComplete } = resolveLoopOptions(opts)
+ const signal = opts?.signal
+
+ const seenKeys = new Set<string>()
+ let lastFindings: Finding[] = []
+ let status: LoopStatus = 'exhausted'
+ let totalCommits = 0
+ let roundsCompleted = 0
+ let previousFindingsCount = Infinity
+ let bestSha = ''
+ let bestFindingsCount = Infinity
+
+ for (let round = 1; round <= maxRounds; round++) {
+ signal?.throwIfAborted()
+ roundsCompleted = round
+
+ console.log(
+ ` #${spec.id} round ${String(round)}/${String(maxRounds)} (budget: ${String(budget)})`
+ )
+
+ const result = await executeRound(spec, sandbox, round, budget, lastFindings, strategy, signal)
+
+ const earlyExit = checkEarlyExit(spec, round, result, totalCommits)
+ if (earlyExit !== null) {
+ totalCommits = earlyExit.totalCommits
+ status = earlyExit.status
+ break
+ }
+
+ if (result.findings === null) break
+ const findings: Finding[] = result.findings
+
+ if (result.commits > 0 && (await runValidation(sandbox.worktreePath, spec))) {
+ totalCommits += result.commits
+ status = 'converged'
+ break
+ }
+
+ const cwd = sandbox.worktreePath
+ const newFindings = await deduplicateFindings(findings, seenKeys, cwd)
+
+ console.log(
+ ` #${spec.id}: ${String(findings.length)} findings, ${String(newFindings.length)} new`
+ )
+
+ const nonLowFindings = findings.filter(f => f.confidence !== 'LOW')
+ if (
+ await checkQualityRatchet(
+ { beforeSha: result.beforeSha, cwd, round, spec },
+ nonLowFindings.length,
+ previousFindingsCount
+ )
+ ) {
+ status = 'exhausted'
+ break
+ }
+
+ if (newFindings.length < bestFindingsCount) {
+ bestFindingsCount = newFindings.length
+ bestSha = await captureHeadSha(cwd)
+ }
+
+ totalCommits += result.commits
+ previousFindingsCount = nonLowFindings.length
+ onRoundComplete(round, findings)
+
+ if (strategy.shouldConverge?.(findings, round, totalCommits)) {
+ lastFindings = findings
+ status = 'converged'
+ break
+ }
+
+ const convergenceResult = await checkConvergence(cwd, findings, newFindings, nonLowFindings)
+ if (convergenceResult !== null) {
+ lastFindings = convergenceResult.lastFindings
+ status = convergenceResult.status
+ bestSha = convergenceResult.bestSha
+ break
+ }
+
+ lastFindings = newFindings
+ }
+
+ // Post-loop validation retry (if enabled)
+ if (opts?.postLoopValidationRetry && totalCommits > 0 && status !== 'converged') {
+ signal?.throwIfAborted()
+ const validationPassed = await runValidation(sandbox.worktreePath, spec)
+ if (validationPassed) {
+ status = 'converged'
+ } else if (roundsCompleted < maxRounds) {
+ const result = await executeRound(
+ spec,
+ sandbox,
+ roundsCompleted + 1,
+ budget,
+ lastFindings,
+ strategy,
+ signal
+ )
+ if (result.commits > 0) {
+ totalCommits += result.commits
+ if (await runValidation(sandbox.worktreePath, spec)) {
+ status = 'converged'
+ }
+ }
+ }
+ }
+
+ if (shouldResetToBest(status, bestSha)) {
+ totalCommits = await resetToBestState(sandbox.worktreePath, bestSha, totalCommits)
+ }
+
+ return { lastFindings, roundsCompleted, status, totalCommits }
+}
+
+/**
+ * Captures the current HEAD SHA, returning empty string on failure.
+ * @param cwd - Working directory for git operations.
+ * @returns The HEAD SHA or empty string.
+ */
+async function captureHeadSha (cwd: string): Promise<string> {
+ try {
+ const { stdout } = await execFileAsync('git', ['rev-parse', 'HEAD'], { cwd })
+ return stdout.trim()
+ } catch {
+ return ''
+ }
+}
+
+/**
+ * Checks whether the current round converged (no new findings).
+ * @param cwd - Working directory for git operations.
+ * @param allFindings - All findings from the critic.
+ * @param newFindings - Deduplicated new findings.
+ * @param nonLowFindings - Non-LOW-confidence findings.
+ * @returns A ConvergenceResult if the loop should break, or null to continue.
+ */
+async function checkConvergence (
+ cwd: string,
+ allFindings: Finding[],
+ newFindings: Finding[],
+ nonLowFindings: Finding[]
+): Promise<ConvergenceResult | null> {
+ if (newFindings.length !== 0) return null
+
+ // Severity-weighted convergence (OpenHands pattern):
+ // Don't converge if CRITICAL/HIGH findings persist, even if already seen
+ const criticalPersistent = allFindings.filter(
+ f => (f.severity === 'CRITICAL' || f.severity === 'HIGH') && f.confidence !== 'LOW'
+ )
+ if (criticalPersistent.length > 0) {
+ // Capture current HEAD so post-loop reset is a no-op (code matches findings)
+ return {
+ bestSha: await captureHeadSha(cwd),
+ lastFindings: criticalPersistent,
+ status: 'exhausted',
+ }
+ }
+
+ return {
+ bestSha: '',
+ lastFindings: nonLowFindings.length > 0 ? nonLowFindings : [],
+ status: 'converged',
+ }
+}
+
+/**
+ * Checks whether the round result warrants an early exit from the loop.
+ * @param spec - The task specification.
+ * @param round - Current round number.
+ * @param result - The round result.
+ * @param totalCommits - Running total of commits before this round.
+ * @returns An object with updated status and totalCommits if early exit, or null to continue.
+ */
+function checkEarlyExit (
+ spec: TaskSpec,
+ round: number,
+ result: RoundResult,
+ totalCommits: number
+): null | { status: LoopStatus; totalCommits: number } {
+ if (round === 1 && result.commits === 0) {
+ console.warn(` #${spec.id}: 0 commits on round 1. Skipping.`)
+ return { status: 'skipped', totalCommits }
+ }
+ if (result.findings === null) {
+ console.warn(` #${spec.id}: Critic failed twice. Breaking (non-converged).`)
+ return { status: 'failed', totalCommits: totalCommits + result.commits }
+ }
+ if (round > 1 && result.commits === 0) {
+ return { status: 'exhausted', totalCommits }
+ }
+ return null
+}
+
+/**
+ * @param ctx - Ratchet context containing spec, round, beforeSha, and cwd.
+ * @param findingsCount - Number of non-LOW findings this round.
+ * @param previousCount - Number of non-LOW findings from the previous round.
+ * @returns True if a regression was detected and rollback performed.
+ */
+async function checkQualityRatchet (
+ ctx: RatchetContext,
+ findingsCount: number,
+ previousCount: number
+): Promise<boolean> {
+ const { beforeSha, cwd, round, spec } = ctx
+ if (round <= 2 || findingsCount <= previousCount) {
+ return false
+ }
+
+ // Validate SHA format before passing to execFileAsync
+ if (!/^[0-9a-f]{40}$/.test(beforeSha)) {
+ console.warn(` #${spec.id}: Invalid SHA for rollback, skipping reset.`)
+ return true
+ }
+
+ try {
+ await execFileAsync('git', ['reset', '--hard', beforeSha], { cwd })
+ console.warn(
+ ` #${spec.id} R${String(round)}: Regression detected (${String(previousCount)} → ${String(findingsCount)}). Rolled back.`
+ )
+ } catch {
+ console.warn(` #${spec.id}: Failed to reset to ${beforeSha} after regression.`)
+ }
+
+ return true
+}
+
+/**
+ * Computes a deduplication key for a finding using a context hash of surrounding lines.
+ * @param f - Finding to compute a key for.
+ * @param cwd - Working directory (worktree path) for reading file context.
+ * @param fileCache - Optional cache of file contents keyed by resolved path.
+ * @returns Composite dedup key.
+ */
+async function computeFindingKey (
+ f: Finding,
+ cwd: string,
+ fileCache?: Map<string, string>
+): Promise<string> {
+ if (!f.file || f.line == null) {
+ const normalizedTitle = f.title
+ .toLowerCase()
+ .replace(/[^\w\s]/g, '')
+ .replace(/\s+/g, ' ')
+ .trim()
+ const titleHash = crypto
+ .createHash('sha256')
+ .update(normalizedTitle)
+ .digest('hex')
+ .slice(0, HASH_PREFIX_LENGTH)
+ return `${f.file || 'global'}::${f.category}::${titleHash}`
+ }
+ const contextHash = await hashContextLines(
+ { cwd, file: f.file, line: f.line },
+ CONTEXT_HASH_RADIUS,
+ fileCache
+ )
+ return `${f.file}::${f.category}::${contextHash}`
+}
+
+/**
+ * Filters findings by confidence and deduplicates against previously seen keys.
+ * @param findings - Raw findings from the critic.
+ * @param seenKeys - Set of previously seen dedup keys (mutated: new keys are added).
+ * @param cwd - Working directory for context hashing.
+ * @returns Array of new, non-LOW-confidence findings.
+ */
+async function deduplicateFindings (
+ findings: Finding[],
+ seenKeys: Set<string>,
+ cwd: string
+): Promise<Finding[]> {
+ const fileCache = new Map<string, string>()
+ const keys = await Promise.all(findings.map(f => computeFindingKey(f, cwd, fileCache)))
+ const newFindings = findings.filter((f, i) => {
+ const key = keys[i]
+ return f.confidence !== 'LOW' && !seenKeys.has(key)
+ })
+ for (const f of newFindings) {
+ const idx = findings.indexOf(f)
+ const key = keys[idx]
+ seenKeys.add(key)
+ }
+ return newFindings
+}
+
+/**
+ * Executes a single implement↔critic round.
+ * @param spec - The task specification.
+ * @param sandbox - The sandcastle sandbox instance.
+ * @param round - Current round number (1-indexed).
+ * @param budget - Iteration budget for the implementer.
+ * @param lastFindings - Findings from the previous round to feed to the implementer.
+ * @param strategy - Strategy config for prompt/arg customization.
+ * @param signal - Abort signal for cooperative cancellation.
+ * @returns The round result containing commits, findings, and the pre-round SHA.
+ */
+async function executeRound (
+ spec: TaskSpec,
+ sandbox: SandboxInstance,
+ round: number,
+ budget: number,
+ lastFindings: Finding[],
+ strategy: LoopStrategy,
+ signal?: AbortSignal
+): Promise<RoundResult> {
+ // Capture SHA before implementer runs (for quality ratchet rollback)
+ let beforeSha = ''
+ try {
+ const { stdout } = await execFileAsync('git', ['rev-parse', 'HEAD'], {
+ cwd: sandbox.worktreePath,
+ })
+ beforeSha = stdout.trim()
+ } catch {
+ console.warn(` #${spec.id}: Failed to capture HEAD SHA before round ${String(round)}.`)
+ }
+
+ // Implementer
+ let implementerResult: Awaited<ReturnType<typeof sandbox.run>>
+ try {
+ implementerResult = await sandbox.run({
+ agent: sandcastle.opencode(AGENT_MODEL),
+ completionSignal: COMPLETION_SIGNAL,
+ idleTimeoutSeconds: AGENT_IDLE_TIMEOUT_S,
+ maxIterations: budget,
+ name: `Implementer #${spec.id} R${String(round)}`,
+ promptArgs: strategy.buildActorArgs(spec, lastFindings),
+ promptFile: strategy.actorPromptFile,
+ signal,
+ })
+ } catch (err: unknown) {
+ if (signal?.aborted === true) {
+ throw err
+ }
+ const msg = err instanceof Error ? (err.stack ?? err.message) : String(err)
+ console.error(` #${spec.id} R${String(round)}: Implementer threw: ${msg}`)
+ return { beforeSha, commits: 0, findings: null }
+ }
+
+ // Critic
+ const nonce = crypto.randomBytes(4).toString('hex')
+ let findings: Finding[] | null
+ try {
+ findings = await runCritic(sandbox, spec, round, nonce, strategy, signal)
+ } catch (err: unknown) {
+ if (signal?.aborted === true) {
+ throw err
+ }
+ const msg = err instanceof Error ? err.message : String(err)
+ console.error(` #${spec.id} R${String(round)}: Critic threw: ${msg}`)
+ findings = null
+ }
+
+ return { beforeSha, commits: implementerResult.commits.length, findings }
+}
+
+/**
+ * Hashes a window of lines around the finding for dedup stability.
+ * @param input - Hash input containing cwd, file, and line.
+ * @param radius - Number of lines above/below to include in the context window.
+ * @param fileCache - Optional cache of file contents keyed by resolved path.
+ * @returns Truncated SHA-256 hex digest.
+ */
+async function hashContextLines (
+ input: HashInput,
+ radius: number,
+ fileCache?: Map<string, string>
+): Promise<string> {
+ const { cwd, file, line } = input
+ try {
+ const fullPath = await realpath(join(cwd, file))
+ if (!fullPath.startsWith((await realpath(cwd)) + sep)) {
+ throw new Error('Path traversal')
+ }
+ let raw: string
+ const cached = fileCache?.get(fullPath)
+ if (cached !== undefined) {
+ raw = cached
+ } else {
+ raw = await readFile(fullPath, 'utf-8')
+ if (fileCache) fileCache.set(fullPath, raw)
+ }
+ const lines = raw.split('\n')
+ const idx = Math.min(Math.max(0, line - 1), lines.length - 1)
+ const start = Math.max(0, idx - radius)
+ const end = Math.min(lines.length - 1, idx + radius)
+ const window = lines.slice(start, end + 1).join('\n')
+ const normalized = window.replace(/\s+/g, ' ').trim()
+ return crypto
+ .createHash('sha256')
+ .update(`${file}:${String(line)}:${normalized}`)
+ .digest('hex')
+ .slice(0, HASH_PREFIX_LENGTH)
+ } catch {
+ return crypto
+ .createHash('sha256')
+ .update(`${file}:${String(line)}:fallback`)
+ .digest('hex')
+ .slice(0, HASH_PREFIX_LENGTH)
+ }
+}
+
+/**
+ * Parses findings from agent stdout using nonce-tagged delimiters.
+ * @param stdout - Agent stdout to parse findings from.
+ * @param nonce - Unique tag identifier for this run.
+ * @returns Parsed findings array or null on parse failure.
+ */
+function parseFindings (stdout: string, nonce: string): Finding[] | null {
+ if (!/^[0-9a-f]+$/.test(nonce)) return null
+ const tagPattern = new RegExp(`<findings-${nonce}>([\\s\\S]*?)<\\/findings-${nonce}>`, 'g')
+ const matches = [...stdout.matchAll(tagPattern)]
+ if (matches.length === 0) return null
+ // Find last non-trivial match
+ for (let i = matches.length - 1; i >= 0; i--) {
+ const raw = matches[i]?.[1]?.trim() ?? ''
+ if (raw.length < 2) continue
+ const cleaned = raw.replace(/^```(?:json)?\s*\n?/g, '').replace(/\n?```\s*$/g, '')
+ try {
+ return parseFindingsSafe(JSON.parse(cleaned))
+ } catch {
+ continue
+ }
+ }
+ return null
+}
+
+/**
+ * Resets the worktree to the best intermediate state and recounts commits.
+ * @param cwd - Working directory for git operations.
+ * @param bestSha - The SHA to reset to.
+ * @param currentCommits - Current total commits (fallback if recount fails).
+ * @returns Updated total commit count.
+ */
+async function resetToBestState (
+ cwd: string,
+ bestSha: string,
+ currentCommits: number
+): Promise<number> {
+ if (!/^[0-9a-f]{40}$/.test(bestSha)) return currentCommits
+ try {
+ await execFileAsync('git', ['reset', '--hard', bestSha], { cwd })
+ const { stdout } = await execFileAsync('git', ['rev-list', '--count', 'main..HEAD'], { cwd })
+ return parseInt(stdout.trim(), 10) || 0
+ } catch {
+ return currentCommits
+ }
+}
+
+/**
+ * Resolves loop options, applying defaults for missing values.
+ * @param opts - Optional loop options.
+ * @returns Resolved options with all fields populated.
+ */
+function resolveLoopOptions (opts: RefinementLoopOptions | undefined): ResolvedLoopOptions {
+ return {
+ budget: opts?.iterationBudget ?? ITERATION_BUDGET_PER_ROUND,
+ maxRounds: opts?.maxRounds ?? MAX_CRITIC_ROUNDS,
+ onRoundComplete: opts?.onRoundComplete ?? (() => undefined),
+ }
+}
+
+/**
+ * Runs the critic agent, retrying once on parse failure.
+ * @param sandbox - The sandcastle sandbox instance.
+ * @param spec - The task specification.
+ * @param round - Current round number.
+ * @param nonce - Unique nonce for parsing.
+ * @param strategy - Strategy config for prompt/arg customization.
+ * @param signal - Abort signal for cooperative cancellation.
+ * @returns Parsed findings or null if both attempts failed.
+ */
+async function runCritic (
+ sandbox: SandboxInstance,
+ spec: TaskSpec,
+ round: number,
+ nonce: string,
+ strategy: LoopStrategy,
+ signal?: AbortSignal
+): Promise<Finding[] | null> {
+ let critic = await sandbox.run({
+ agent: sandcastle.opencode(AGENT_MODEL),
+ completionSignal: COMPLETION_SIGNAL,
+ idleTimeoutSeconds: AGENT_IDLE_TIMEOUT_S,
+ maxIterations: 1,
+ name: `Critic #${spec.id} R${String(round)}`,
+ promptArgs: strategy.buildCriticArgs(spec, nonce),
+ promptFile: strategy.criticPromptFile,
+ signal,
+ })
+
+ let findings = parseFindings(critic.stdout, nonce)
+
+ if (findings === null) {
+ console.warn(` #${spec.id}: Critic parse failed. Retrying.`)
+ critic = await sandbox.run({
+ agent: sandcastle.opencode(AGENT_MODEL),
+ completionSignal: COMPLETION_SIGNAL,
+ idleTimeoutSeconds: AGENT_IDLE_TIMEOUT_S,
+ maxIterations: 1,
+ name: `Critic #${spec.id} R${String(round)} retry`,
+ promptArgs: strategy.buildCriticArgs(spec, nonce),
+ promptFile: strategy.criticPromptFile,
+ signal,
+ })
+ findings = parseFindings(critic.stdout, nonce)
+ }
+
+ return findings
+}
+
+/**
+ * Returns true if the best-state reset should be applied after the loop.
+ * @param status - Final loop status.
+ * @param bestSha - Best intermediate SHA (empty string if none captured).
+ * @returns True if reset should be applied.
+ */
+function shouldResetToBest (status: LoopStatus, bestSha: string): boolean {
+ return status !== 'converged' && /^[0-9a-f]{40}$/.test(bestSha)
+}
--- /dev/null
+# Critic Agent
+
+Analyze the implementation on branch `{{BRANCH}}` and produce structured findings.
+
+## Task
+
+Run `git diff main...{{BRANCH}}` to see all changes. Examine the diff carefully. For each issue found, produce a structured finding.
+
+Read `AGENTS.md` and `CONTRIBUTING.md` for the project's coding standards.
+
+## Output Format
+
+Output your findings as JSON wrapped in nonce-tagged delimiters. Use EXACTLY this tag format:
+
+```text
+<findings-{{NONCE}}>[...]</findings-{{NONCE}}>
+```
+
+Each finding must have this structure:
+
+```json
+{
+ "file": "path/to/file.ts",
+ "line": 42,
+ "title": "short description of the issue",
+ "severity": "CRITICAL|HIGH|MEDIUM|LOW",
+ "category": "security|logic|performance|architecture|style",
+ "confidence": "HIGH|MEDIUM|LOW",
+ "description": "detailed explanation of why this is a problem",
+ "suggestion": "how to fix it"
+}
+```
+
+If no issues are found, output:
+
+```text
+<findings-{{NONCE}}>[]</findings-{{NONCE}}>
+```
+
+## Rules
+
+- Report ≤5 findings. HIGH and CRITICAL only. Omit LOW/MEDIUM unless zero higher-severity issues exist.
+- If >5 HIGH/CRITICAL issues exist, report the top 5 and add a summary note in the last finding's description.
+- Do NOT modify any files. Do NOT commit. Do NOT push.
+- Only report issues in the CHANGED code (not pre-existing issues).
+- Use HIGH confidence only when you've verified the issue by reading the relevant code.
+- Use MEDIUM confidence for pattern-based detection.
+- Use LOW confidence for style preferences or uncertain issues.
+- Focus on: logic errors, missing edge cases, security issues, type safety violations, test gaps.
+- Do NOT report formatting issues (prettier handles those).
+
+## Known Design Decisions (do not flag)
+
+- Mid-loop validation convergence bypasses critic (ARCS pattern — deterministic tests > subjective review).
+- Agent runs use `idleTimeoutSeconds` and `completionSignal` for cooperative cancellation.
+- Content-addressed dedup hash includes line number (collision reduction tradeoff, bounded by hard cap).
+
+## Completion
+
+After outputting the findings, output:
+
+```text
+<promise>COMPLETE</promise>
+```
--- /dev/null
+# Implement Agent
+
+Implement issue **#{{TASK_ID}}** ("{{ISSUE_TITLE}}") on branch `{{BRANCH}}`.
+
+## Issue Details
+
+{{ISSUE_BODY}}
+
+## Review Findings
+
+{{FINDINGS}}
+
+## Exploration
+
+Explore the repo to understand the architecture before coding. Pay attention to:
+
+- Files related to the issue
+- Test files touching relevant modules
+- Existing patterns in similar code
+
+Read `AGENTS.md` and `CONTRIBUTING.md` for project conventions.
+
+## Implementation
+
+1. If review findings are provided above, cross-validate each one against the code. Fix findings you agree with. Ignore findings that are incorrect or not applicable.
+
+2. If no findings are provided, implement the issue from scratch following existing patterns:
+ - Strict TypeScript, no `any`/`@ts-ignore`
+ - Use existing error classes (BaseError, OCPPError)
+ - Follow existing naming conventions (camelCase functions, PascalCase classes/types)
+ - Tests use Node.js native test runner (`node:test` + `node:assert`)
+
+3. Before every commit, run the quality gates for the affected sub-project(s):
+
+ ```bash
+ pnpm format && pnpm typecheck && pnpm lint && pnpm build && pnpm test
+ ```
+
+ If changes affect `ui/web`:
+
+ ```bash
+ cd ui/web && pnpm format && pnpm typecheck && pnpm lint && pnpm build && pnpm test:coverage
+ ```
+
+ If changes affect `ui/cli`:
+
+ ```bash
+ cd ui/cli && pnpm format && pnpm typecheck && pnpm lint && pnpm build && pnpm test
+ ```
+
+4. Commit with conventional commits:
+ - `fix: <description>` — bug fix
+ - `feat: <description>` — new feature
+ - `refactor: <description>` — restructuring
+ - `chore: <description>` — tooling/config
+
+5. Push the branch:
+
+ ```bash
+ git push -u origin {{BRANCH}}
+ ```
+
+## Rules
+
+- One logical change per commit.
+- Tests must pass before pushing. Zero type errors, zero test failures.
+- Do not modify unrelated files.
+- Do not bump version numbers.
+- Push BEFORE signaling completion.
+
+## Completion
+
+When validation passes and the branch is pushed, output:
+
+```text
+<promise>COMPLETE</promise>
+```
--- /dev/null
+import type { StrategyConfig } from '../../types.js'
+
+import { GIT_TIMEOUT_MS } from '../../constants.js'
+import { attemptRebase, buildPrArgs, pushBranch, runValidation } from '../../finalizer.js'
+import { execFileAsync, toErrorMessage } from '../../utils.js'
+
+export const implementStrategy: StrategyConfig = {
+ actorPromptFile: './.sandcastle/strategies/implement/implement-prompt.md',
+
+ buildActorArgs: (spec, findings) => ({
+ BRANCH: spec.branch,
+ FINDINGS: findings.length > 0 ? JSON.stringify(findings, null, 2) : '',
+ ISSUE_BODY: spec.body,
+ ISSUE_TITLE: spec.title,
+ TASK_ID: spec.id,
+ }),
+
+ buildCriticArgs: (spec, nonce) => ({
+ BRANCH: spec.branch,
+ NONCE: nonce,
+ }),
+
+ criticPromptFile: './.sandcastle/strategies/implement/critic-prompt.md',
+
+ finalize: async (spec, loopResult, sandbox) => {
+ const cwd = sandbox.worktreePath
+ let validationPassed = await runValidation(cwd, spec)
+
+ const rebaseSucceeded = await attemptRebase(cwd)
+ if (rebaseSucceeded && validationPassed) {
+ if (!(await runValidation(cwd, spec))) {
+ validationPassed = false
+ }
+ }
+
+ const pushSucceeded = await pushBranch(cwd, spec, rebaseSucceeded)
+ if (!pushSucceeded) {
+ console.error(` #${spec.id}: Push failed; cannot create PR without remote branch.`)
+ return { success: false }
+ }
+
+ const { isDraft, prArgs } = buildPrArgs(spec, loopResult, validationPassed, rebaseSucceeded)
+
+ let prCreated = false
+ try {
+ await execFileAsync('gh', prArgs, {
+ cwd,
+ maxBuffer: 8 * 1024 * 1024,
+ timeout: GIT_TIMEOUT_MS,
+ })
+ console.log(` #${spec.id}: PR created${isDraft ? ' (draft)' : ''}.`)
+ prCreated = true
+ } catch (err: unknown) {
+ console.error(` #${spec.id}: PR creation failed: ${toErrorMessage(err)}`)
+ }
+
+ return { success: prCreated }
+ },
+
+ isWorkComplete: result => result.success,
+}
--- /dev/null
+import * as sandcastle from '@ai-hero/sandcastle'
+import { docker } from '@ai-hero/sandcastle/sandboxes/docker'
+import { z } from 'zod'
+
+import type { TaskSpec } from './types.js'
+
+import {
+ AGENT_IDLE_TIMEOUT_S,
+ COMPLETION_SIGNAL,
+ DOCKER_MOUNTS,
+ GIT_TIMEOUT_MS,
+ MAX_ISSUES_FETCH,
+ MAX_PRS_FETCH,
+ MAX_TITLE_LENGTH,
+ PLANNER_MODEL,
+ TASK_TIMEOUT_MS,
+} from './constants.js'
+import { execFileAsync, toErrorMessage } from './utils.js'
+
+const RawIssueSchema = z.object({
+ body: z
+ .string()
+ .nullable()
+ .transform(b => b ?? ''),
+ labels: z.array(z.object({ name: z.string() })),
+ number: z.number(),
+ title: z.string(),
+})
+const RawIssuesSchema = z.array(RawIssueSchema)
+
+/** Configuration for the GitHub issue task source. */
+export interface GithubIssueSourceConfig {
+ /** Git branch prefix for issue branches. */
+ branchPrefix: string
+ /** Docker image name for the sandbox. */
+ dockerImage: string
+ /** GitHub issue label to filter by. */
+ label: string
+ /** Maximum planner retries. */
+ maxRetries?: number
+}
+
+/** Interface for task discovery sources. */
+export interface TaskSource {
+ /** Discovers tasks to work on. */
+ discover(): Promise<TaskSpec[]>
+}
+
+/**
+ * Task source that discovers work from GitHub issues via planner agent.
+ */
+export class GithubIssueSource implements TaskSource {
+ private readonly branchPattern: RegExp
+ private readonly branchPrefix: string
+ private readonly dockerImage: string
+ private readonly escapedPrefix: string
+ private readonly label: string
+ private readonly maxRetries: number
+
+ /**
+ * @param config - Configuration for the GitHub issue source.
+ */
+ constructor (config: GithubIssueSourceConfig) {
+ this.branchPrefix = config.branchPrefix
+ this.dockerImage = config.dockerImage
+ this.label = config.label
+ this.maxRetries = config.maxRetries ?? 5
+
+ this.escapedPrefix = this.branchPrefix.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')
+ this.branchPattern = new RegExp(`^${this.escapedPrefix}-\\d+-[\\w-]+$`)
+ }
+
+ /**
+ * Discovers tasks by fetching GitHub issues, running the planner, and validating the plan.
+ * @returns Array of task specifications to implement.
+ */
+ async discover (): Promise<TaskSpec[]> {
+ const issuesJson = await this.fetchAndSanitizeIssues()
+
+ if (issuesJson.length === 0) {
+ console.log("No issues with label '%s'. Exiting.", this.label)
+ return []
+ }
+
+ const coveredIssues = await this.fetchIssuesWithOpenPRs()
+ const actionableIssues = issuesJson.filter(issue => !coveredIssues.has(issue.number))
+
+ if (actionableIssues.length === 0) {
+ console.log('All sandcastle issues already have open PRs. Exiting.')
+ return []
+ }
+
+ for (let attempt = 1; attempt <= this.maxRetries; attempt++) {
+ console.log(`\n=== Planner attempt ${String(attempt)}/${String(this.maxRetries)} ===\n`)
+
+ let plan: Awaited<ReturnType<typeof sandcastle.run>>
+ try {
+ plan = await sandcastle.run({
+ agent: sandcastle.opencode(PLANNER_MODEL),
+ completionSignal: COMPLETION_SIGNAL,
+ idleTimeoutSeconds: AGENT_IDLE_TIMEOUT_S,
+ maxIterations: 1,
+ name: 'Planner',
+ promptArgs: {
+ BRANCH_PREFIX: this.branchPrefix,
+ ISSUES_JSON: JSON.stringify(actionableIssues, null, 2),
+ },
+ promptFile: './.sandcastle/plan-prompt.md',
+ sandbox: docker({ imageName: this.dockerImage, mounts: [...DOCKER_MOUNTS] }),
+ signal: AbortSignal.timeout(TASK_TIMEOUT_MS),
+ })
+ } catch {
+ console.error('Planner timed out or failed. Retrying.')
+ continue
+ }
+
+ const planMatches = [...plan.stdout.matchAll(/<plan>([\s\S]*?)<\/plan>/g)]
+ const planMatch = planMatches.at(-1)
+ if (!planMatch) {
+ console.error('Planner did not produce a <plan> tag. Retrying.')
+ continue
+ }
+
+ // eslint-disable-next-line @typescript-eslint/no-unnecessary-condition -- regex group always captures but TS types array access as possibly undefined
+ const planContent = planMatch[1] ?? ''
+ const tasks = this.validatePlan(planContent, actionableIssues)
+ if (tasks === null) {
+ continue
+ }
+
+ if (tasks.length === 0) {
+ console.log('No actionable issues. Exiting.')
+ return []
+ }
+
+ console.log(`Plan: ${String(tasks.length)} issue(s) to work on:`)
+ for (const task of tasks) {
+ console.log(` #${task.id}: ${task.title} → ${task.branch}`)
+ }
+
+ return tasks
+ }
+
+ console.warn('Planner failed to produce a valid plan after all retries.')
+ process.exitCode = 1
+ return []
+ }
+
+ private async fetchAndSanitizeIssues (): Promise<
+ {
+ body: string
+ labels: string[]
+ number: number
+ title: string
+ }[]
+ > {
+ let rawIssuesJson: string
+ try {
+ const { stdout } = await execFileAsync(
+ 'gh',
+ [
+ 'issue',
+ 'list',
+ '--state',
+ 'open',
+ '--json',
+ 'number,title,labels,body',
+ '--limit',
+ String(MAX_ISSUES_FETCH),
+ '--label',
+ this.label,
+ ],
+ { encoding: 'utf-8', maxBuffer: 8 * 1024 * 1024, timeout: GIT_TIMEOUT_MS }
+ )
+ rawIssuesJson = stdout
+ } catch (err: unknown) {
+ throw new Error(
+ `Failed to fetch issues: ${toErrorMessage(err)}. Ensure gh is installed and authenticated.`,
+ { cause: err }
+ )
+ }
+
+ let rawIssues: z.infer<typeof RawIssuesSchema>
+ try {
+ rawIssues = RawIssuesSchema.parse(JSON.parse(rawIssuesJson))
+ } catch (err: unknown) {
+ throw new Error(
+ `Failed to parse issues JSON: ${toErrorMessage(err)}. Unexpected format from gh CLI.`,
+ { cause: err }
+ )
+ }
+
+ return rawIssues.map(issue => ({
+ body: sanitizeForPrompt(issue.body),
+ labels: issue.labels.map(label => label.name),
+ number: issue.number,
+ title: sanitizeForPrompt(issue.title),
+ }))
+ }
+
+ private async fetchIssuesWithOpenPRs (): Promise<Set<number>> {
+ try {
+ const { stdout } = await execFileAsync(
+ 'gh',
+ [
+ 'pr',
+ 'list',
+ '--state',
+ 'open',
+ '--json',
+ 'headRefName',
+ '--limit',
+ String(MAX_PRS_FETCH),
+ ],
+ { encoding: 'utf-8', maxBuffer: 8 * 1024 * 1024, timeout: GIT_TIMEOUT_MS }
+ )
+ const prs = z.array(z.object({ headRefName: z.string() })).parse(JSON.parse(stdout))
+ const issueNumbers = new Set<number>()
+ const pattern = new RegExp(`^${this.escapedPrefix}-(\\d+)-`)
+ for (const pr of prs) {
+ const match = pattern.exec(pr.headRefName)
+ if (match) {
+ issueNumbers.add(Number(match[1]))
+ }
+ }
+ return issueNumbers
+ } catch (err: unknown) {
+ console.warn(`Failed to check open PRs: ${toErrorMessage(err)}. Processing all issues.`)
+ return new Set()
+ }
+ }
+
+ private validatePlan (
+ planContent: string,
+ issuesJson: { body: string; labels: string[]; number: number; title: string }[]
+ ): null | TaskSpec[] {
+ try {
+ const PlanSchema = z.object({ issues: z.array(z.unknown()) })
+ const parseResult = PlanSchema.safeParse(JSON.parse(planContent))
+ if (!parseResult.success) {
+ console.error('Planner output missing valid issues array. Retrying.')
+ return null
+ }
+ const parsed = parseResult.data
+ const validated = parsed.issues.filter(
+ (entry): entry is { branch: string; id: string; title: string } => {
+ if (typeof entry !== 'object' || entry === null) return false
+ const item = entry as Record<string, unknown>
+ if (typeof item.id !== 'string' || !/^\d+$/.test(item.id)) return false
+ if (typeof item.branch !== 'string' || !this.branchPattern.test(item.branch)) return false
+ if (typeof item.title !== 'string') return false
+ if (item.title.length > MAX_TITLE_LENGTH) return false
+ // eslint-disable-next-line no-control-regex
+ if (/[\x00-\x1f]/.test(item.title)) return false
+ return true
+ }
+ )
+
+ const issueMap = new Map(issuesJson.map(issue => [String(issue.number), issue]))
+ return validated
+ .map(entry => {
+ const source = issueMap.get(entry.id)
+ if (!source) return null
+ return {
+ ...entry,
+ body: source.body,
+ labels: source.labels,
+ }
+ })
+ .filter((entry): entry is NonNullable<typeof entry> => entry !== null)
+ } catch (err: unknown) {
+ console.error(`Planner produced invalid JSON: ${toErrorMessage(err)}. Retrying.`)
+ return null
+ }
+ }
+}
+
+/**
+ * Strips agent-control tags from text to reduce prompt-injection risk.
+ * @param text - Raw text to sanitize.
+ * @returns Text with plan/findings/promise tags removed.
+ */
+function sanitizeForPrompt (text: string): string {
+ const normalized = text.normalize('NFKC')
+ return normalized.replace(
+ /<\/?(?:plan|findings|promise|system|code|instructions|implement|review|tool_call)[^>]*>/gi,
+ ''
+ )
+}
--- /dev/null
+import type * as sandcastle from '@ai-hero/sandcastle'
+
+import { z } from 'zod'
+
+/** Zod schema for a single critic finding. */
+export const FindingSchema = z.object({
+ category: z.enum(['security', 'logic', 'performance', 'architecture', 'style']),
+ confidence: z.enum(['HIGH', 'MEDIUM', 'LOW']),
+ description: z.string(),
+ file: z.string(),
+ line: z.number().optional(),
+ severity: z.enum(['CRITICAL', 'HIGH', 'MEDIUM', 'LOW']),
+ suggestion: z.string().optional(),
+ title: z.string(),
+})
+
+/**
+ * Configuration for post-loop finalization (PR creation, push, etc.).
+ */
+// eslint-disable-next-line @typescript-eslint/consistent-type-definitions
+export type FinalizationConfig = {
+ /** Finalizes the task after the loop completes. Returns success indicator. */
+ finalize: (
+ spec: TaskSpec,
+ loopResult: LoopResult,
+ sandbox: SandboxInstance
+ ) => Promise<{ success: boolean }>
+ /** Determines if the finalization result counts as completed work. */
+ isWorkComplete: (finalizeResult: { success: boolean }) => boolean
+}
+
+/** A single critic finding parsed from agent output. */
+export type Finding = z.infer<typeof FindingSchema>
+
+/** Result returned by the refinement loop. */
+export interface LoopResult {
+ /** Outstanding findings from the last round. */
+ lastFindings: Finding[]
+ /** Number of rounds completed. */
+ roundsCompleted: number
+ /** Termination status. */
+ status: LoopStatus
+ /** Total commits produced across all rounds. */
+ totalCommits: number
+}
+
+/** Outcome status of the refinement loop. */
+export type LoopStatus = 'converged' | 'exhausted' | 'failed' | 'skipped'
+
+/**
+ * Configuration for the refinement loop strategy.
+ * Defines prompts, argument builders, and optional convergence logic.
+ */
+// eslint-disable-next-line @typescript-eslint/consistent-type-definitions
+export type LoopStrategy = {
+ /** Path to the actor (implementer) prompt file. */
+ actorPromptFile: string
+ /** Builds promptArgs for the actor run from task spec and previous findings. */
+ buildActorArgs: (spec: TaskSpec, findings: Finding[]) => Record<string, string>
+ /** Builds promptArgs for the critic run from task spec and nonce. */
+ buildCriticArgs: (spec: TaskSpec, nonce: string) => Record<string, string>
+ /** Path to the critic prompt file. */
+ criticPromptFile: string
+ /** Optional custom convergence check. When omitted, default loop logic applies. */
+ shouldConverge?: (findings: Finding[], round: number, totalCommits: number) => boolean
+}
+
+/** Type alias for a sandcastle sandbox instance. */
+export type SandboxInstance = Awaited<ReturnType<typeof sandcastle.createSandbox>>
+
+/** Combined strategy (backward compat alias). */
+export type StrategyConfig = FinalizationConfig & LoopStrategy
+
+/** Specification for a task to be implemented. */
+export interface TaskSpec {
+ /** Sanitized issue body text. */
+ body: string
+ /** Git branch name for this task. */
+ branch: string
+ /** Task identifier (e.g. GitHub issue number as string). */
+ id: string
+ /** Label names associated with the task. */
+ labels: string[]
+ /** Task title. */
+ title: string
+}
+
+/**
+ * Parses a findings array with partial recovery — invalid entries are discarded.
+ * @param data - Raw parsed JSON value to validate as a findings array.
+ * @returns Array of valid findings (may be empty).
+ */
+export function parseFindingsSafe (data: unknown): Finding[] {
+ if (!Array.isArray(data)) return []
+ return data
+ .map(entry => FindingSchema.safeParse(entry))
+ .filter((r): r is z.ZodSafeParseSuccess<Finding> => r.success)
+ .map(r => r.data)
+}
--- /dev/null
+import { execFile } from 'node:child_process'
+import util from 'node:util'
+
+/** Async execFile — does not block the event loop. Same error shape as execFileSync. */
+export const execFileAsync = util.promisify(execFile)
+
+/**
+ * Converts an unknown thrown value to a human-readable error message.
+ * @param err - The caught value (may be an `Error` or any other type).
+ * @returns The `message` property if `err` is an `Error`, otherwise `String(err)`.
+ */
+export function toErrorMessage (err: unknown): string {
+ return err instanceof Error ? err.message : String(err)
+}
'varh',
'rfid',
'workerset',
+ 'worktree',
+ 'dedup',
+ 'unpushed',
'logform',
'mnemonist',
'poolifier',
'idtoken',
'issuerkeyhash',
'issuernamehash',
- // OCPP SRPC (Simple Remote Procedure Call) message types
+ // OCPP SRPC
'SRPC',
'CALLRESULT',
'CALLERROR',
"clean:node_modules": "pnpm exec rimraf node_modules",
"typecheck": "tsc --noEmit --skipLibCheck",
"circular-deps": "skott --no-trackTypeOnlyDependencies --showCircularDependencies --exitCodeOnCircularDependencies 0 --displayMode=raw --fileExtensions=.ts --cwd=src",
- "lint": "cross-env TIMING=1 eslint --cache src tests scripts ./*.js ./*.ts",
- "lint:fix": "cross-env TIMING=1 eslint --cache --fix src tests scripts ./*.js ./*.ts",
- "format": "prettier --cache --write .; eslint --cache --fix src tests scripts ./*.js ./*.ts",
+ "lint": "cross-env TIMING=1 eslint --cache src .sandcastle tests scripts ./*.js ./*.ts",
+ "lint:fix": "cross-env TIMING=1 eslint --cache --fix src .sandcastle tests scripts ./*.js ./*.ts",
+ "format": "prettier --cache --write .; eslint --cache --fix src .sandcastle tests scripts ./*.js ./*.ts",
"test": "cross-env NODE_ENV=test node --import tsx --test --test-force-exit 'tests/**/*.test.ts'",
"test:debug": "cross-env NODE_ENV=test node --import tsx --test --inspect 'tests/**/*.test.ts'",
"test:coverage": "mkdir -p coverage && cross-env NODE_ENV=test node --import tsx --test --test-force-exit --experimental-test-coverage --test-coverage-include='src/**/*.ts' --test-reporter=lcov --test-reporter-destination=coverage/lcov.info 'tests/**/*.test.ts'",
"clinic:clean": "clinic clean",
+ "sandcastle": "tsx .sandcastle/main.ts",
"sea": "pnpm exec rimraf ./dist/evse-simulator ./dist/evse-simulator.blob && node --experimental-sea-config sea-config.json && pnpm dlx ncp $(volta which node || n which lts || nvm which node || command -v node) ./dist/evse-simulator && pnpm dlx postject ./dist/evse-simulator NODE_SEA_BLOB ./dist/evse-simulator.blob --sentinel-fuse NODE_SEA_FUSE_fce680ab2cc467b6e072b8b5df1996b2 && pnpm exec rimraf ./dist/evse-simulator.blob"
},
"dependencies": {
"utf-8-validate": "^6.0.6"
},
"devDependencies": {
+ "@ai-hero/sandcastle": "^0.5.8",
"@commitlint/cli": "^20.5.3",
"@commitlint/config-conventional": "^20.5.3",
"@cspell/eslint-plugin": "^10.0.0",
specifier: ^4.4.3
version: 4.4.3
devDependencies:
+ '@ai-hero/sandcastle':
+ specifier: ^0.5.8
+ version: 0.5.8(@effect/cluster@0.57.0(@effect/platform@0.95.0(effect@3.21.0))(@effect/rpc@0.74.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/sql@0.50.0(@effect/experimental@0.59.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/workflow@0.17.0(@effect/experimental@0.59.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/platform@0.95.0(effect@3.21.0))(@effect/rpc@0.74.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(effect@3.21.0))(effect@3.21.0))(@effect/rpc@0.74.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/sql@0.50.0(@effect/experimental@0.59.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/typeclass@0.39.0(effect@3.21.0))(bufferutil@4.1.0)(utf-8-validate@6.0.6)
'@commitlint/cli':
specifier: ^20.5.3
version: 20.5.3(@types/node@24.12.2)(conventional-commits-parser@6.4.0)(typescript@6.0.3)
engines: {node: '>=8.5.0'}
hasBin: true
+ '@ai-hero/sandcastle@0.5.8':
+ resolution: {integrity: sha512-dOBQ5MzzO8zMJ4R3tI9SKjlezq+3JdKZDi3fYyIPoaqbdQJWpw4w429BKRry9m2+uO94Ur7W48J0wEPQKd6ybg==}
+ hasBin: true
+ peerDependencies:
+ '@daytona/sdk': ^0.164.0
+ '@vercel/sandbox': '>=1.0.0'
+ peerDependenciesMeta:
+ '@daytona/sdk':
+ optional: true
+ '@vercel/sandbox':
+ optional: true
+
'@arr/every@1.0.1':
resolution: {integrity: sha512-UQFQ6SgyJ6LX42W8rHCs8KVc0JS0tzVL9ct4XYedJukskYVWTo49tNiMEK9C2HTyarbNiT/RVIRSY82vH+6sTg==}
engines: {node: '>=4'}
resolution: {integrity: sha512-ctxtJ/eA+t+6q2++vj5j7FYX3nRu311q1wfYH3xjlLOsczhlhxAg2FWNUXhpGvAw3BWo1xBcvOV6/YLc2r5FJw==}
hasBin: true
+ '@clack/core@1.3.0':
+ resolution: {integrity: sha512-xJPHpAmEQUBrXSLx0gF+q5K/IyihXpsHZcha+jB+tyahsKRK3Dxo4D0coZDewHo12NhiuzC3dTtMPbm53GEAAA==}
+ engines: {node: '>= 20.12.0'}
+
+ '@clack/prompts@1.3.0':
+ resolution: {integrity: sha512-GgcWwRCs/xPtaqlMy8qRhPnZf9vlWcWZNHAitnVQ3yk7JmSralSiq5q07yaffYE8SogtDm7zFeKccx1QNVARpw==}
+ engines: {node: '>= 20.12.0'}
+
'@clinic/bubbleprof@10.0.0':
resolution: {integrity: sha512-7Y0uYO4cz7+Y1advV891uMJLXbZMIriLsV1IHSSVJxmf8tEFm8vogKi/GdYyi4CY0D5heuqOFze/WNrv+U3LRw==}
'@dabh/diagnostics@2.0.8':
resolution: {integrity: sha512-R4MSXTVnuMzGD7bzHdW2ZhhdPC/igELENcq5IjEverBvq5hn1SXCWcsi6eSsdWP0/Ur+SItRRjAktmdoX/8R/Q==}
+ '@effect/cli@0.74.0':
+ resolution: {integrity: sha512-vjMJWJWQ2zMRVcZJj2ZGr7vFgVoX6lsCuqAsNiN2ndWZAidkEJ6g1Euuib2V2nTXeWvRyd3FY2Fw2UvX48Uenw==}
+ peerDependencies:
+ '@effect/platform': ^0.95.0
+ '@effect/printer': ^0.48.0
+ '@effect/printer-ansi': ^0.48.0
+ effect: ^3.20.0
+
+ '@effect/cluster@0.57.0':
+ resolution: {integrity: sha512-VjZoZ4hmgDb0GtGjktypTk/nArA3ntsXU2O9vOBzDjJLRKVBt7IS0/cllHrHwK5Jxkfz86B2k+Prw4/+nrLFlw==}
+ peerDependencies:
+ '@effect/platform': ^0.95.0
+ '@effect/rpc': ^0.74.0
+ '@effect/sql': ^0.50.0
+ '@effect/workflow': ^0.17.0
+ effect: ^3.20.0
+
+ '@effect/experimental@0.59.0':
+ resolution: {integrity: sha512-XqdBpIH5VLlkRxKlyPYp8TAYUeBPjoWYgtrxDebDab14K4kkrpkHk0ZsmmOiQUZ+LY5veRn/PBSogXor9gtPqg==}
+ peerDependencies:
+ '@effect/platform': ^0.95.0
+ effect: ^3.20.0
+ ioredis: ^5
+ lmdb: ^3
+ peerDependenciesMeta:
+ ioredis:
+ optional: true
+ lmdb:
+ optional: true
+
+ '@effect/platform-node-shared@0.58.0':
+ resolution: {integrity: sha512-kl8ejYM1xvjRlk+4/R1YzB6A3E3hVWY4jIfEl21uu4S43V0S15gHvcur7iMIEXfJTX1a25EKF+Buef+Yv5wZZQ==}
+ peerDependencies:
+ '@effect/cluster': ^0.57.0
+ '@effect/platform': ^0.95.0
+ '@effect/rpc': ^0.74.0
+ '@effect/sql': ^0.50.0
+ effect: ^3.20.0
+
+ '@effect/platform-node@0.105.0':
+ resolution: {integrity: sha512-6JxOLqLJMm+m1ZQavIb75S7YJ4fRvrDaYUZ4rqv2IMq5ZK9HVaU/LeejE9tip9zAG9yNM/6mn183iiIV/xge5w==}
+ peerDependencies:
+ '@effect/cluster': ^0.57.0
+ '@effect/platform': ^0.95.0
+ '@effect/rpc': ^0.74.0
+ '@effect/sql': ^0.50.0
+ effect: ^3.20.0
+
+ '@effect/platform@0.95.0':
+ resolution: {integrity: sha512-WDlRiWRSWlmhCPq09bvAofK0qr5vM4yNklXjoJdZHmugKRRTpN/Okn3ODnjgM/Kb/4hjMrRyrsUeH/Brieq7KA==}
+ peerDependencies:
+ effect: ^3.20.0
+
+ '@effect/printer-ansi@0.48.0':
+ resolution: {integrity: sha512-CzQ5kiomjR9DZ6LPfKAaWmys6JU65c2Q/VQcTKRK4RfaDWeTAehpAVmgOIyKSPkcr9XBhjo2cJx4xyZ4E5nN7g==}
+ peerDependencies:
+ '@effect/typeclass': ^0.39.0
+ effect: ^3.20.0
+
+ '@effect/printer@0.48.0':
+ resolution: {integrity: sha512-f/+QVyqACuLkoB+HDDX2XxloslmgMDL+C6ecHBV0cB0zJzJmLCOybwOkRcCI2xJ/DWHEIpoRyvq+Bfdza0AIrA==}
+ peerDependencies:
+ '@effect/typeclass': ^0.39.0
+ effect: ^3.20.0
+
+ '@effect/rpc@0.74.0':
+ resolution: {integrity: sha512-EV/cHQqJxLtY+RTlPlVQU1KyTzml1wFne+Sh91RacGRRVh6uTm4UdhRh9TNtbYHD4rM9yD3T6zqUgKr0AH8MvQ==}
+ peerDependencies:
+ '@effect/platform': ^0.95.0
+ effect: ^3.20.0
+
+ '@effect/sql@0.50.0':
+ resolution: {integrity: sha512-sOTzsC+ICASgSmX1RITYo6ut7ZbkX+hMG6YagJEyhtptxco9MgSflpF/ix/L92haJ+YTS5Zur/Dm2bDNfVes4w==}
+ peerDependencies:
+ '@effect/experimental': ^0.59.0
+ '@effect/platform': ^0.95.0
+ effect: ^3.20.0
+
+ '@effect/typeclass@0.39.0':
+ resolution: {integrity: sha512-V8qGpm4BTMS4pW9e7aCdxC0sy/TYsdxmnpWtokkNWnggZ6kvh1Psp3AfUuuZLyNmUk4T+lYB/ItEsga/+hryig==}
+ peerDependencies:
+ effect: ^3.20.0
+
+ '@effect/workflow@0.17.0':
+ resolution: {integrity: sha512-JiayvFTTMrp36P0cVFcgu6Nb7ZJxQv+FRqs3DPORkVAcCZlWOKa3KyuYebN3qZbRsmLzS7cxuC8BAeMuqb+WaQ==}
+ peerDependencies:
+ '@effect/experimental': ^0.59.0
+ '@effect/platform': ^0.95.0
+ '@effect/rpc': ^0.74.0
+ effect: ^3.20.0
+
'@emnapi/core@1.10.0':
resolution: {integrity: sha512-yq6OkJ4p82CAfPl0u9mQebQHKPJkY7WrIuk205cTYnYe+k2Z8YBh11FrbRG/H6ihirqcacOgl2BIO8oyMQLeXw==}
'@mongodb-js/saslprep@1.4.8':
resolution: {integrity: sha512-kpjr2jy2w71w0oqAMI8oibBmiF9lXxWkEQs5gMkW4hVE48bsqINGLxnCSYW62ck/NHXJQpQEfA9WlJ1sY0eqBg==}
+ '@msgpackr-extract/msgpackr-extract-darwin-arm64@3.0.3':
+ resolution: {integrity: sha512-QZHtlVgbAdy2zAqNA9Gu1UpIuI8Xvsd1v8ic6B2pZmeFnFcMWiPLfWXh7TVw4eGEZ/C9TH281KwhVoeQUKbyjw==}
+ cpu: [arm64]
+ os: [darwin]
+
+ '@msgpackr-extract/msgpackr-extract-darwin-x64@3.0.3':
+ resolution: {integrity: sha512-mdzd3AVzYKuUmiWOQ8GNhl64/IoFGol569zNRdkLReh6LRLHOXxU4U8eq0JwaD8iFHdVGqSy4IjFL4reoWCDFw==}
+ cpu: [x64]
+ os: [darwin]
+
+ '@msgpackr-extract/msgpackr-extract-linux-arm64@3.0.3':
+ resolution: {integrity: sha512-YxQL+ax0XqBJDZiKimS2XQaf+2wDGVa1enVRGzEvLLVFeqa5kx2bWbtcSXgsxjQB7nRqqIGFIcLteF/sHeVtQg==}
+ cpu: [arm64]
+ os: [linux]
+
+ '@msgpackr-extract/msgpackr-extract-linux-arm@3.0.3':
+ resolution: {integrity: sha512-fg0uy/dG/nZEXfYilKoRe7yALaNmHoYeIoJuJ7KJ+YyU2bvY8vPv27f7UKhGRpY6euFYqEVhxCFZgAUNQBM3nw==}
+ cpu: [arm]
+ os: [linux]
+
+ '@msgpackr-extract/msgpackr-extract-linux-x64@3.0.3':
+ resolution: {integrity: sha512-cvwNfbP07pKUfq1uH+S6KJ7dT9K8WOE4ZiAcsrSes+UY55E/0jLYc+vq+DO7jlmqRb5zAggExKm0H7O/CBaesg==}
+ cpu: [x64]
+ os: [linux]
+
+ '@msgpackr-extract/msgpackr-extract-win32-x64@3.0.3':
+ resolution: {integrity: sha512-x0fWaQtYp4E6sktbsdAqnehxDgEc/VwM7uLsRCYWaiGu0ykYdZPiS8zCWdnjHwyiumousxfBm4SO31eXqwEZhQ==}
+ cpu: [x64]
+ os: [win32]
+
'@napi-rs/wasm-runtime@1.1.4':
resolution: {integrity: sha512-3NQNNgA1YSlJb/kMH1ildASP9HW7/7kYnRI2szWJaofaS1hWmbGI4H+d3+22aGzXXN9IJ+n+GiFVcGipJP18ow==}
peerDependencies:
fast-safe-stringify@2.1.1:
resolution: {integrity: sha512-W+KJc2dmILlPplD/H4K9l9LcAHAfPtP6BY84uVLXQ6Evcz9Lcg33Y2z1IVblT6xdY54PXYVHEv+0Wpq8Io6zkA==}
+ fast-string-truncated-width@3.0.3:
+ resolution: {integrity: sha512-0jjjIEL6+0jag3l2XWWizO64/aZVtpiGE3t0Zgqxv0DPuxiMjvB3M24fCyhZUO4KomJQPj3LTSUnDP3GpdwC0g==}
+
+ fast-string-width@3.0.2:
+ resolution: {integrity: sha512-gX8LrtNEI5hq8DVUfRQMbr5lpaS4nMIWV+7XEbXk2b8kiQIizgnlr12B4dA3ZEx3308ze0O4Q1R+cHts8kyUJg==}
+
fast-uri@3.1.0:
resolution: {integrity: sha512-iPeeDKJSWf4IEOasVVrknXpaBV0IApz/gp7S2bb7Z4Lljbl2MGJRqInZiUrQwV16cpzw/D3S5j5Julj/gT52AA==}
+ fast-wrap-ansi@0.2.0:
+ resolution: {integrity: sha512-rLV8JHxTyhVmFYhBJuMujcrHqOT2cnO5Zxj37qROj23CP39GXubJRBUFF0z8KFK77Uc0SukZUf7JZhsVEQ6n8w==}
+
fastq@1.20.1:
resolution: {integrity: sha512-GGToxJ/w1x32s/D2EKND7kTil4n8OVk/9mycTc4VDza13lOvpUZTGX3mFSCtV9ksdGBVzvsyAVLM6mHFThxXxw==}
resolution: {integrity: sha512-S8KoZgRZN+a5rNwqTxlZZePjT/4cnm0ROV70LedRHZ0p8u9fRID0hJUZQpkKLzro8LfmC8sx23bY6tVNxv8pQA==}
engines: {node: '>= 18.0.0'}
+ find-my-way-ts@0.1.6:
+ resolution: {integrity: sha512-a85L9ZoXtNAey3Y6Z+eBWW658kO/MwR7zIafkIUPUMf3isZG0NCs2pjW2wtjxAKuJPxMAsHUIP4ZPGv0o5gyTA==}
+
find-up@3.0.0:
resolution: {integrity: sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==}
engines: {node: '>=6'}
resolution: {integrity: sha512-7PnF4oN3CvZF23ADhA5wRaYEQpJ8qygSkbtTXWBeXWXmEVRXK+1ITciHWwHhsjv1TmW0MgacIv6hEi5pX5NQdA==}
engines: {node: '>=10'}
+ ini@4.1.3:
+ resolution: {integrity: sha512-X7rqawQBvfdjS10YU1y1YVreA3SsLrW9dX2CewP2EbBJM4ypVNLDkO5y04gejPwKIY9lR+7r9gn3rFPt/kmWFg==}
+ engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0}
+
ini@6.0.0:
resolution: {integrity: sha512-IBTdIkzZNOpqm7q3dRqJvMaldXjDHWkEDfrwGEQTs5eaQMWV+djAhR+wahyNNMAa+qpbDUhBMVt4ZKNwpPm7xQ==}
engines: {node: ^20.17.0 || >=22.9.0}
tedious:
optional: true
+ kubernetes-types@1.30.0:
+ resolution: {integrity: sha512-Dew1okvhM/SQcIa2rcgujNndZwU8VnSapDgdxlYoB84ZlpAD43U6KLAFqYo17ykSFGHNPrg0qry0bP+GJd9v7Q==}
+
kuler@2.0.0:
resolution: {integrity: sha512-Xq9nH7KlWZmXAtodXDDRE7vs6DU1gTU8zYDHDiWLSip45Egwq3plLHzPn27NgvzL2r1LMPC1vdqh98sQxtqj4A==}
resolution: {integrity: sha512-Lbgzdk0h4juoQ9fCKXW4by0UJqj+nOOrI9MJ1sSj4nI8aI2eo1qmvQEie4VD1glsS250n15LsWsYtCugiStS5A==}
engines: {node: '>=18'}
+ mime@3.0.0:
+ resolution: {integrity: sha512-jSCU7/VB1loIWBZe14aEYHU/+1UMEHoaO7qxCOVJOw9GgH72VAWppxNcjU+x9a2k3GSIBXNKxXQFqRvvZ7vr3A==}
+ engines: {node: '>=10.0.0'}
+ hasBin: true
+
mimic-fn@1.2.0:
resolution: {integrity: sha512-jf84uxzwiuiIVKiOLpfYk7N46TSy8ubTonmneY9vrpHNAnp0QBt2BxWV9dO3/j+BoVAb+a5G6YDPW3M5HOdMWQ==}
engines: {node: '>=4'}
ms@2.1.3:
resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==}
+ msgpackr-extract@3.0.3:
+ resolution: {integrity: sha512-P0efT1C9jIdVRefqjzOQ9Xml57zpOXnIuS+csaB4MdZbTdmGDLo8XhzBG1N7aO11gKDDkJvBLULeFTo46wwreA==}
+ hasBin: true
+
+ msgpackr@1.11.12:
+ resolution: {integrity: sha512-RBdJ1Un7yGlXWajrkxcSa93nvQ0w4zBf60c0yYv7YtBelP8H2FA7XsfBbMHtXKXUMUxH7zV3Zuozh+kUQWhHvg==}
+
muggle-string@0.4.1:
resolution: {integrity: sha512-VNTrAak/KhO2i8dqqnqnAHOa3cYBwXEZe9h+D5h/1ZqFSTEFHdM65lR7RoIqq3tBBYavsOXV84NoHXZ0AkPyqQ==}
resolution: {integrity: sha512-ypMKuglUrZUD99Tk2bUQ+xNQj43lPEfAeX2o9cTteAmShXy2VHDJpuwu1o0xqoKCt9jLVAvwyFKdLTPXKAfJyA==}
engines: {node: '>=10'}
+ multipasta@0.2.7:
+ resolution: {integrity: sha512-KPA58d68KgGil15oDqXjkUBEBYc00XvbPj5/X+dyzeo/lWm9Nc25pQRlf1D+gv4OpK7NM0J1odrbu9JNNGvynA==}
+
multistream@2.1.1:
resolution: {integrity: sha512-xasv76hl6nr1dEy3lPvy7Ej7K/Lx3O/FCvwge8PeVJpciPPoNCbaANcNiBug3IpdvTveZUcAV0DJzdnUDMesNQ==}
encoding:
optional: true
+ node-gyp-build-optional-packages@5.2.2:
+ resolution: {integrity: sha512-s+w+rBWnpTMwSFbaE0UXsRlg7hU4FjekKU4eyAih5T8nJuNZT1nNsskXpxmeqSK9UzkBl6UgRlnKc8hz8IEqOw==}
+ hasBin: true
+
node-gyp-build@4.8.4:
resolution: {integrity: sha512-LA4ZjwlnUblHVgq0oBF3Jl/6h/Nvs5fzBLwdEF4nuxnFdsfajde4WfxtJr3CaiH+F6ewcIB/q4jQ4UzPyid+CQ==}
hasBin: true
resolution: {integrity: sha512-94Bdh3cC2PKrbgSOUqTiGPWVZeSiXfKOVZNJniWoqrWrRkB1CJzBU3NEbiTsPcYy1lDsANA/THzS+9WBiy5nfQ==}
engines: {node: '>= 10'}
+ sisteransi@1.0.5:
+ resolution: {integrity: sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==}
+
skott-webapp@2.3.0:
resolution: {integrity: sha512-nmt+ilxGOqX5zN2WDKv1Y5gLfxy/lceHgbB8HM/ym/Cm8572ypD1s2S+pcN+jOw13xqoavHJPonX1WT2QvkpDg==}
resolution: {integrity: sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==}
engines: {node: '>=0.6'}
+ toml@3.0.0:
+ resolution: {integrity: sha512-y/mWCZinnvxjTKYhJ+pYxwD0mRLVvOtdS2Awbgxln6iEnt4rk0yBxeSBHkGJcPucRiG0e55mwWp+g/05rsrd6w==}
+
totalist@3.0.1:
resolution: {integrity: sha512-sf4i37nQ2LBx4m3wB74y+ubopq6W/dIzXg0FDGjsYnZHVa1Da8FH853wlL2gtUhg+xJXjfk3kUZS3BRoQeoQBQ==}
engines: {node: '>=6'}
uuid-parse@1.1.0:
resolution: {integrity: sha512-OdmXxA8rDsQ7YpNVbKSJkNzTw2I+S5WsbMDnCtIWSQaosNAcWtFuI/YK1TjzUI6nbkgiqEyh8gWngfcv8Asd9A==}
+ uuid@11.1.1:
+ resolution: {integrity: sha512-vIYxrBCC/N/K+Js3qSN88go7kIfNPssr/hHCesKCQNAjmgvYS2oqr69kIufEG+O4+PfezOH4EbIeHCfFov8ZgQ==}
+ hasBin: true
+
uuid@8.3.2:
resolution: {integrity: sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==}
deprecated: uuid@10 and below is no longer supported. For ESM codebases, update to uuid@latest. For CommonJS codebases, use uuid@11 (but be aware this version will likely be deprecated in 2028).
transitivePeerDependencies:
- supports-color
+ '@ai-hero/sandcastle@0.5.8(@effect/cluster@0.57.0(@effect/platform@0.95.0(effect@3.21.0))(@effect/rpc@0.74.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/sql@0.50.0(@effect/experimental@0.59.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/workflow@0.17.0(@effect/experimental@0.59.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/platform@0.95.0(effect@3.21.0))(@effect/rpc@0.74.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(effect@3.21.0))(effect@3.21.0))(@effect/rpc@0.74.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/sql@0.50.0(@effect/experimental@0.59.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/typeclass@0.39.0(effect@3.21.0))(bufferutil@4.1.0)(utf-8-validate@6.0.6)':
+ dependencies:
+ '@clack/prompts': 1.3.0
+ '@effect/cli': 0.74.0(@effect/platform@0.95.0(effect@3.21.0))(@effect/printer-ansi@0.48.0(@effect/typeclass@0.39.0(effect@3.21.0))(effect@3.21.0))(@effect/printer@0.48.0(@effect/typeclass@0.39.0(effect@3.21.0))(effect@3.21.0))(effect@3.21.0)
+ '@effect/platform': 0.95.0(effect@3.21.0)
+ '@effect/platform-node': 0.105.0(@effect/cluster@0.57.0(@effect/platform@0.95.0(effect@3.21.0))(@effect/rpc@0.74.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/sql@0.50.0(@effect/experimental@0.59.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/workflow@0.17.0(@effect/experimental@0.59.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/platform@0.95.0(effect@3.21.0))(@effect/rpc@0.74.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(effect@3.21.0))(effect@3.21.0))(@effect/platform@0.95.0(effect@3.21.0))(@effect/rpc@0.74.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/sql@0.50.0(@effect/experimental@0.59.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(bufferutil@4.1.0)(effect@3.21.0)(utf-8-validate@6.0.6)
+ '@effect/printer': 0.48.0(@effect/typeclass@0.39.0(effect@3.21.0))(effect@3.21.0)
+ '@effect/printer-ansi': 0.48.0(@effect/typeclass@0.39.0(effect@3.21.0))(effect@3.21.0)
+ effect: 3.21.0
+ transitivePeerDependencies:
+ - '@effect/cluster'
+ - '@effect/rpc'
+ - '@effect/sql'
+ - '@effect/typeclass'
+ - bufferutil
+ - utf-8-validate
+
'@arr/every@1.0.1': {}
'@asamuzakjp/css-color@5.1.11':
dependencies:
css-tree: 3.2.1
+ '@clack/core@1.3.0':
+ dependencies:
+ fast-wrap-ansi: 0.2.0
+ sisteransi: 1.0.5
+
+ '@clack/prompts@1.3.0':
+ dependencies:
+ '@clack/core': 1.3.0
+ fast-string-width: 3.0.2
+ fast-wrap-ansi: 0.2.0
+ sisteransi: 1.0.5
+
'@clinic/bubbleprof@10.0.0':
dependencies:
'@clinic/clinic-common': 7.1.0
enabled: 2.0.0
kuler: 2.0.0
+ '@effect/cli@0.74.0(@effect/platform@0.95.0(effect@3.21.0))(@effect/printer-ansi@0.48.0(@effect/typeclass@0.39.0(effect@3.21.0))(effect@3.21.0))(@effect/printer@0.48.0(@effect/typeclass@0.39.0(effect@3.21.0))(effect@3.21.0))(effect@3.21.0)':
+ dependencies:
+ '@effect/platform': 0.95.0(effect@3.21.0)
+ '@effect/printer': 0.48.0(@effect/typeclass@0.39.0(effect@3.21.0))(effect@3.21.0)
+ '@effect/printer-ansi': 0.48.0(@effect/typeclass@0.39.0(effect@3.21.0))(effect@3.21.0)
+ effect: 3.21.0
+ ini: 4.1.3
+ toml: 3.0.0
+ yaml: 2.8.3
+
+ '@effect/cluster@0.57.0(@effect/platform@0.95.0(effect@3.21.0))(@effect/rpc@0.74.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/sql@0.50.0(@effect/experimental@0.59.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/workflow@0.17.0(@effect/experimental@0.59.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/platform@0.95.0(effect@3.21.0))(@effect/rpc@0.74.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(effect@3.21.0))(effect@3.21.0)':
+ dependencies:
+ '@effect/platform': 0.95.0(effect@3.21.0)
+ '@effect/rpc': 0.74.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0)
+ '@effect/sql': 0.50.0(@effect/experimental@0.59.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0)
+ '@effect/workflow': 0.17.0(@effect/experimental@0.59.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/platform@0.95.0(effect@3.21.0))(@effect/rpc@0.74.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(effect@3.21.0)
+ effect: 3.21.0
+ kubernetes-types: 1.30.0
+
+ '@effect/experimental@0.59.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0)':
+ dependencies:
+ '@effect/platform': 0.95.0(effect@3.21.0)
+ effect: 3.21.0
+ uuid: 11.1.1
+
+ '@effect/platform-node-shared@0.58.0(@effect/cluster@0.57.0(@effect/platform@0.95.0(effect@3.21.0))(@effect/rpc@0.74.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/sql@0.50.0(@effect/experimental@0.59.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/workflow@0.17.0(@effect/experimental@0.59.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/platform@0.95.0(effect@3.21.0))(@effect/rpc@0.74.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(effect@3.21.0))(effect@3.21.0))(@effect/platform@0.95.0(effect@3.21.0))(@effect/rpc@0.74.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/sql@0.50.0(@effect/experimental@0.59.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(bufferutil@4.1.0)(effect@3.21.0)(utf-8-validate@6.0.6)':
+ dependencies:
+ '@effect/cluster': 0.57.0(@effect/platform@0.95.0(effect@3.21.0))(@effect/rpc@0.74.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/sql@0.50.0(@effect/experimental@0.59.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/workflow@0.17.0(@effect/experimental@0.59.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/platform@0.95.0(effect@3.21.0))(@effect/rpc@0.74.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(effect@3.21.0))(effect@3.21.0)
+ '@effect/platform': 0.95.0(effect@3.21.0)
+ '@effect/rpc': 0.74.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0)
+ '@effect/sql': 0.50.0(@effect/experimental@0.59.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0)
+ '@parcel/watcher': 2.5.6
+ effect: 3.21.0
+ multipasta: 0.2.7
+ ws: 8.20.0(bufferutil@4.1.0)(utf-8-validate@6.0.6)
+ transitivePeerDependencies:
+ - bufferutil
+ - utf-8-validate
+
+ '@effect/platform-node@0.105.0(@effect/cluster@0.57.0(@effect/platform@0.95.0(effect@3.21.0))(@effect/rpc@0.74.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/sql@0.50.0(@effect/experimental@0.59.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/workflow@0.17.0(@effect/experimental@0.59.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/platform@0.95.0(effect@3.21.0))(@effect/rpc@0.74.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(effect@3.21.0))(effect@3.21.0))(@effect/platform@0.95.0(effect@3.21.0))(@effect/rpc@0.74.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/sql@0.50.0(@effect/experimental@0.59.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(bufferutil@4.1.0)(effect@3.21.0)(utf-8-validate@6.0.6)':
+ dependencies:
+ '@effect/cluster': 0.57.0(@effect/platform@0.95.0(effect@3.21.0))(@effect/rpc@0.74.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/sql@0.50.0(@effect/experimental@0.59.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/workflow@0.17.0(@effect/experimental@0.59.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/platform@0.95.0(effect@3.21.0))(@effect/rpc@0.74.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(effect@3.21.0))(effect@3.21.0)
+ '@effect/platform': 0.95.0(effect@3.21.0)
+ '@effect/platform-node-shared': 0.58.0(@effect/cluster@0.57.0(@effect/platform@0.95.0(effect@3.21.0))(@effect/rpc@0.74.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/sql@0.50.0(@effect/experimental@0.59.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/workflow@0.17.0(@effect/experimental@0.59.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/platform@0.95.0(effect@3.21.0))(@effect/rpc@0.74.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(effect@3.21.0))(effect@3.21.0))(@effect/platform@0.95.0(effect@3.21.0))(@effect/rpc@0.74.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/sql@0.50.0(@effect/experimental@0.59.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(bufferutil@4.1.0)(effect@3.21.0)(utf-8-validate@6.0.6)
+ '@effect/rpc': 0.74.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0)
+ '@effect/sql': 0.50.0(@effect/experimental@0.59.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0)
+ effect: 3.21.0
+ mime: 3.0.0
+ undici: 7.25.0
+ ws: 8.20.0(bufferutil@4.1.0)(utf-8-validate@6.0.6)
+ transitivePeerDependencies:
+ - bufferutil
+ - utf-8-validate
+
+ '@effect/platform@0.95.0(effect@3.21.0)':
+ dependencies:
+ effect: 3.21.0
+ find-my-way-ts: 0.1.6
+ msgpackr: 1.11.12
+ multipasta: 0.2.7
+
+ '@effect/printer-ansi@0.48.0(@effect/typeclass@0.39.0(effect@3.21.0))(effect@3.21.0)':
+ dependencies:
+ '@effect/printer': 0.48.0(@effect/typeclass@0.39.0(effect@3.21.0))(effect@3.21.0)
+ '@effect/typeclass': 0.39.0(effect@3.21.0)
+ effect: 3.21.0
+
+ '@effect/printer@0.48.0(@effect/typeclass@0.39.0(effect@3.21.0))(effect@3.21.0)':
+ dependencies:
+ '@effect/typeclass': 0.39.0(effect@3.21.0)
+ effect: 3.21.0
+
+ '@effect/rpc@0.74.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0)':
+ dependencies:
+ '@effect/platform': 0.95.0(effect@3.21.0)
+ effect: 3.21.0
+ msgpackr: 1.11.12
+
+ '@effect/sql@0.50.0(@effect/experimental@0.59.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0)':
+ dependencies:
+ '@effect/experimental': 0.59.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0)
+ '@effect/platform': 0.95.0(effect@3.21.0)
+ effect: 3.21.0
+ uuid: 11.1.1
+
+ '@effect/typeclass@0.39.0(effect@3.21.0)':
+ dependencies:
+ effect: 3.21.0
+
+ '@effect/workflow@0.17.0(@effect/experimental@0.59.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(@effect/platform@0.95.0(effect@3.21.0))(@effect/rpc@0.74.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0))(effect@3.21.0)':
+ dependencies:
+ '@effect/experimental': 0.59.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0)
+ '@effect/platform': 0.95.0(effect@3.21.0)
+ '@effect/rpc': 0.74.0(@effect/platform@0.95.0(effect@3.21.0))(effect@3.21.0)
+ effect: 3.21.0
+
'@emnapi/core@1.10.0':
dependencies:
'@emnapi/wasi-threads': 1.2.1
dependencies:
sparse-bitfield: 3.0.3
+ '@msgpackr-extract/msgpackr-extract-darwin-arm64@3.0.3':
+ optional: true
+
+ '@msgpackr-extract/msgpackr-extract-darwin-x64@3.0.3':
+ optional: true
+
+ '@msgpackr-extract/msgpackr-extract-linux-arm64@3.0.3':
+ optional: true
+
+ '@msgpackr-extract/msgpackr-extract-linux-arm@3.0.3':
+ optional: true
+
+ '@msgpackr-extract/msgpackr-extract-linux-x64@3.0.3':
+ optional: true
+
+ '@msgpackr-extract/msgpackr-extract-win32-x64@3.0.3':
+ optional: true
+
'@napi-rs/wasm-runtime@1.1.4(@emnapi/core@1.10.0)(@emnapi/runtime@1.10.0)':
dependencies:
'@emnapi/core': 1.10.0
fast-safe-stringify@2.1.1: {}
+ fast-string-truncated-width@3.0.3: {}
+
+ fast-string-width@3.0.2:
+ dependencies:
+ fast-string-truncated-width: 3.0.3
+
fast-uri@3.1.0: {}
+ fast-wrap-ansi@0.2.0:
+ dependencies:
+ fast-string-width: 3.0.2
+
fastq@1.20.1:
dependencies:
reusify: 1.1.0
transitivePeerDependencies:
- supports-color
+ find-my-way-ts@0.1.6: {}
+
find-up@3.0.0:
dependencies:
locate-path: 3.0.0
ini@2.0.0: {}
+ ini@4.1.3: {}
+
ini@6.0.0: {}
inline-source-map@0.6.3:
transitivePeerDependencies:
- supports-color
+ kubernetes-types@1.30.0: {}
+
kuler@2.0.0: {}
labeled-stream-splicer@2.0.2:
dependencies:
mime-db: 1.54.0
+ mime@3.0.0: {}
+
mimic-fn@1.2.0: {}
mimic-fn@2.1.0: {}
ms@2.1.3: {}
+ msgpackr-extract@3.0.3:
+ dependencies:
+ node-gyp-build-optional-packages: 5.2.2
+ optionalDependencies:
+ '@msgpackr-extract/msgpackr-extract-darwin-arm64': 3.0.3
+ '@msgpackr-extract/msgpackr-extract-darwin-x64': 3.0.3
+ '@msgpackr-extract/msgpackr-extract-linux-arm': 3.0.3
+ '@msgpackr-extract/msgpackr-extract-linux-arm64': 3.0.3
+ '@msgpackr-extract/msgpackr-extract-linux-x64': 3.0.3
+ '@msgpackr-extract/msgpackr-extract-win32-x64': 3.0.3
+ optional: true
+
+ msgpackr@1.11.12:
+ optionalDependencies:
+ msgpackr-extract: 3.0.3
+
muggle-string@0.4.1: {}
multimatch@5.0.0:
arrify: 2.0.1
minimatch: 3.1.5
+ multipasta@0.2.7: {}
+
multistream@2.1.1:
dependencies:
inherits: 2.0.4
dependencies:
whatwg-url: 5.0.0
+ node-gyp-build-optional-packages@5.2.2:
+ dependencies:
+ detect-libc: 2.1.2
+ optional: true
+
node-gyp-build@4.8.4:
optional: true
mrmime: 2.0.1
totalist: 3.0.1
+ sisteransi@1.0.5: {}
+
skott-webapp@2.3.0:
dependencies:
digraph-js: 2.2.4
toidentifier@1.0.1: {}
+ toml@3.0.0: {}
+
totalist@3.0.1: {}
tough-cookie@6.0.1:
uuid-parse@1.1.0: {}
+ uuid@11.1.1: {}
+
uuid@8.3.2: {}
v8-compile-cache-lib@3.0.1: {}
"forceConsistentCasingInFileNames": true,
"noImplicitOverride": true
},
- "include": ["*.ts", "src/**/*.ts", "tests/**/*.ts"]
+ "include": ["*.ts", "src/**/*.ts", ".sandcastle/**/*.ts", "tests/**/*.ts"]
}