diff --git a/.codeforge/config/rules/spec-workflow.md b/.codeforge/config/rules/spec-workflow.md deleted file mode 100644 index 7dbd799..0000000 --- a/.codeforge/config/rules/spec-workflow.md +++ /dev/null @@ -1,48 +0,0 @@ -# Specification Workflow - -Every project uses `.specs/` as the specification directory. These rules are mandatory. - -## Rules - -1. Every non-trivial feature MUST have a spec before implementation begins. - Use `/spec-new` to create one from the standard template. -2. Every implementation MUST end with an as-built spec update. - Use `/spec-update` to perform the update. -3. Specs should aim for ~200 lines. Split by feature boundary when - significantly longer into separate specs in the domain folder. - Completeness matters more than hitting a number. -4. Specs MUST reference file paths, never reproduce source code, - schemas, or type definitions inline. The code is the source of truth. -5. Each spec file MUST be independently loadable — include domain, - status, last-updated, intent, key files, and acceptance criteria. -6. Before starting a new milestone, MUST run `/spec-check` to audit spec health. -7. To bootstrap `.specs/` for a project that doesn't have one, use `/spec-init`. -8. New specs start with `**Approval:** draft` and all requirements tagged - `[assumed]`. Use `/spec-refine` to validate assumptions with the user - and upgrade to `[user-approved]` before implementation begins. -9. A spec-reminder advisory hook fires at Stop when code was modified but - specs weren't updated. Use `/spec-update` to close the loop. -10. For approved specs, use `/spec-build` to orchestrate the full - implementation lifecycle — plan, build, review, and close the spec - in one pass. Phase 5 handles as-built closure, so a separate - `/spec-update` is not needed afterward. -11. Use `/spec-review` for standalone implementation verification against - a spec — after manual implementation, post-change regression checks, - or pre-release audits. It reads code, verifies requirements and - acceptance criteria, and recommends `/spec-update` when done. - -## Acceptance Criteria Markers - -Acceptance criteria use three states during implementation: - -| Marker | Meaning | -|--------|---------| -| `[ ]` | Not started | -| `[~]` | Implemented, not yet verified — code written, tests not confirmed | -| `[x]` | Verified — tests pass, behavior confirmed | - -`/spec-build` Phase 3 flips `[ ]` to `[~]` as criteria are addressed. -Phase 4 upgrades `[~]` to `[x]` after verification. `/spec-update` -treats any remaining `[~]` as `[ ]` if they were never verified. - -See the system prompt's `` section for the full template, directory structure, and as-built workflow. diff --git a/.devcontainer/plugins/devs-marketplace/plugins/agent-system/scripts/inject-cwd.py b/.devcontainer/plugins/devs-marketplace/plugins/agent-system/scripts/inject-cwd.py deleted file mode 100644 index 2b67e5a..0000000 --- a/.devcontainer/plugins/devs-marketplace/plugins/agent-system/scripts/inject-cwd.py +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/env python3 -""" -CWD injector — SubagentStart hook that tells subagents the working directory. - -Reads hook input from stdin (JSON), extracts cwd, and returns it as -additionalContext so every subagent knows where to scope its work. - -Always exits 0 (advisory, never blocking). -""" - -import json -import os -import sys - - -def main(): - cwd = os.getcwd() - try: - input_data = json.load(sys.stdin) - cwd = input_data.get("cwd", cwd) - except (json.JSONDecodeError, ValueError): - pass - - json.dump( - { - "hookSpecificOutput": { - "hookEventName": "SubagentStart", - "additionalContext": ( - f"Working Directory: {cwd} — restrict all file operations to " - f"this directory unless explicitly instructed otherwise." - ), - } - }, - sys.stdout, - ) - sys.exit(0) - - -if __name__ == "__main__": - main() diff --git a/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/.claude-plugin/plugin.json b/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/.claude-plugin/plugin.json deleted file mode 100644 index ad3e896..0000000 --- a/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/.claude-plugin/plugin.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "name": "spec-workflow", - "description": "Specification lifecycle management: creation, refinement, building, reviewing, updating, and auditing", - "author": { - "name": "AnExiledDev" - } -} diff --git a/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/README.md b/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/README.md deleted file mode 100644 index deadbd1..0000000 --- a/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/README.md +++ /dev/null @@ -1,192 +0,0 @@ -# spec-workflow - -Claude Code plugin that manages the full specification lifecycle: creating, refining, building, reviewing, updating, and auditing feature specs. Includes an advisory hook that reminds about spec updates when code changes but specs don't. - -## What It Does - -Two capabilities: - -1. **Spec lifecycle skills** — 8 skills that cover the complete journey from bootstrapping a `.specs/` directory to closing out an as-built spec after implementation. - -2. **Spec reminder hook** — A `Stop` hook that fires when source code was modified but no `.specs/` files were updated, advising Claude to run `/spec-update`. - -### Skill Catalog - -| Skill | Slash Command | Purpose | -|-------|---------------|---------| -| spec-init | `/spec-init` | Bootstrap `.specs/` directory with BACKLOG.md, MILESTONES.md, ROADMAP.md | -| spec-new | `/spec-new` | Create a new feature spec from EARS template | -| spec-refine | `/spec-refine` | Validate assumptions with user, upgrade requirements to `[user-approved]` | -| spec-build | `/spec-build` | Orchestrate full implementation: plan, build, review, close | -| spec-check | `/spec-check` | Audit all specs for health issues | -| spec-review | `/spec-review` | Verify implementation against a spec | -| spec-update | `/spec-update` | As-built closure: update spec to match implementation | -| specification-writing | `/skill specification-writing` | Domain knowledge for writing high-quality specs | - -### Spec Lifecycle - -``` -/spec-init Bootstrap .specs/ directory - | -/spec-new Create feature spec (draft, [assumed] requirements) - | -/spec-refine Validate with user -> [user-approved] requirements - | -/spec-build 5-phase implementation orchestration: - | Phase 1: Discovery - | Phase 2: Planning - | Phase 3: Building ([ ] -> [~]) - | Phase 4: Review ([~] -> [x]) - | Phase 5: Closure (as-built update) - | -/spec-review Standalone verification (post-change audits) - | -/spec-update Manual as-built closure - | -/spec-check Health audit across all specs -``` - -### Acceptance Criteria Markers - -| Marker | Meaning | -|--------|---------| -| `[ ]` | Not started | -| `[~]` | Implemented, not yet verified | -| `[x]` | Verified — tests pass, behavior confirmed | - -### Approval and Requirement Tags - -- `**Approval:** draft` — Spec is in draft, not ready for implementation -- `**Approval:** user-approved` — Spec reviewed and approved by user -- `[assumed]` — Requirement inferred by Claude, needs validation -- `[user-approved]` — Requirement explicitly approved by user - -## How It Works - -### Hook Lifecycle - -``` -Claude stops responding (Stop event) - | - +-> Stop fires - | - +-> spec-reminder.py - | - +-> .specs/ directory exists? - | | - | +-> No -> Silent exit (no output) - | +-> Yes -> Continue - | - +-> Source code modified this session? - | | - | +-> No -> Silent exit - | +-> Yes -> Continue - | - +-> .specs/ files also modified? - | - +-> Yes -> Silent exit (already updated) - +-> No -> Inject advisory: "Run /spec-update" -``` - -### Monitored Source Directories - -The spec reminder watches for changes in these directories: - -`src/`, `lib/`, `app/`, `pkg/`, `internal/`, `cmd/`, `tests/`, `api/`, `frontend/`, `backend/`, `packages/`, `services/`, `components/`, `pages/`, `routes/` - -### Exit Code Behavior - -| Exit Code | Meaning | -|-----------|---------| -| 0 | Advisory injected (or silent — no action needed) | - -The hook never blocks operations. - -### Error Handling - -| Scenario | Behavior | -|----------|----------| -| No `.specs/` directory | Silent exit | -| Not a git repository | Silent exit | -| JSON parse failure | Silent exit | - -### Timeouts - -| Hook | Timeout | -|------|---------| -| Spec reminder (Stop) | 8s | - -## Installation - -### CodeForge DevContainer - -Pre-installed and activated automatically — no setup needed. - -### From GitHub - -Use this plugin in any Claude Code setup: - -1. Clone the [CodeForge](https://github.com/AnExiledDev/CodeForge) repository: - - ```bash - git clone https://github.com/AnExiledDev/CodeForge.git - ``` - -2. Enable the plugin in your `.claude/settings.json`: - - ```json - { - "enabledPlugins": { - "spec-workflow@/.devcontainer/plugins/devs-marketplace": true - } - } - ``` - - Replace `` with the absolute path to your CodeForge clone. - -## Plugin Structure - -``` -spec-workflow/ -+-- .claude-plugin/ -| +-- plugin.json # Plugin metadata -+-- hooks/ -| +-- hooks.json # Stop hook registration -+-- scripts/ -| +-- spec-reminder.py # Spec update advisory (Stop) -+-- skills/ -| +-- spec-init/ # Bootstrap .specs/ directory -| | +-- SKILL.md -| | +-- references/ -| | +-- backlog-template.md -| | +-- milestones-template.md -| | +-- roadmap-template.md -| +-- spec-new/ # Create new feature spec -| | +-- SKILL.md -| | +-- references/ -| | +-- template.md -| +-- spec-refine/ # Validate assumptions with user -| | +-- SKILL.md -| +-- spec-build/ # Full implementation orchestration -| | +-- SKILL.md -| | +-- references/ -| | +-- review-checklist.md -| +-- spec-check/ # Spec health audit -| | +-- SKILL.md -| +-- spec-review/ # Implementation verification -| | +-- SKILL.md -| +-- spec-update/ # As-built closure -| | +-- SKILL.md -| +-- specification-writing/ # Domain knowledge skill -| +-- SKILL.md -| +-- references/ -| +-- criteria-patterns.md -| +-- ears-templates.md -+-- README.md # This file -``` - -## Requirements - -- Python 3.11+ -- Git (for detecting file changes) -- Claude Code with plugin hook support (skills) diff --git a/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/skills/spec-build/SKILL.md b/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/skills/spec-build/SKILL.md deleted file mode 100644 index 5dd809d..0000000 --- a/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/skills/spec-build/SKILL.md +++ /dev/null @@ -1,356 +0,0 @@ ---- -name: spec-build -description: >- - Orchestrates full implementation of an approved specification through - 5 phases: discovery, planning, building, review, and closure. USE WHEN - the user asks to "implement the spec", "build from spec", "start building - the feature", "implement this feature", "build what the spec describes", - "run spec-build", or works with phased implementation workflows. - DO NOT USE for creating, refining, or updating specs — use spec-new, - spec-refine, or spec-update instead. -version: 0.2.0 -argument-hint: "[spec-path]" ---- - -# Spec-Driven Implementation - -## Mental Model - -An approved spec is a contract — it defines exactly what to build, what to skip, and how to verify success. This skill takes a `user-approved` spec and orchestrates the full implementation lifecycle: plan the work, build it, review everything against the spec, and close the loop. No separate `/spec-update` run is needed afterward — Phase 5 performs full as-built closure. - -The workflow is five phases executed in strict order. Each phase has a clear gate before the next can begin. - -``` -/spec-new -> /spec-refine -> /spec-build - | - +-> Phase 1: Discovery & Gate Check - +-> Phase 2: Implementation Planning - +-> Phase 3: Implementation - +-> Phase 4: Comprehensive Review - +-> Phase 5: Spec Closure -``` - -> **Note:** Phase 4's review functionality is also available standalone via `/spec-review` for features implemented outside of `/spec-build`. - ---- - -## Acceptance Criteria Markers - -During implementation, acceptance criteria use three states: - -| Marker | Meaning | -|--------|---------| -| `[ ]` | Not started | -| `[~]` | Implemented, not yet verified — code written, tests not confirmed | -| `[x]` | Verified — tests pass, behavior confirmed | - -Phase 3 flips `[ ]` to `[~]` as criteria are addressed in code. Phase 4 upgrades `[~]` to `[x]` after verification. This convention is the only spec edit during active implementation. - ---- - -## CRITICAL: Planning Before Implementation - -Phase 2 generates an implementation plan. This plan MUST be created and approved before any code changes begin in Phase 3. Use `EnterPlanMode` to create the plan. The plan MUST include Phases 3, 4, and 5 instructions verbatim — these phases run after plan approval, and the instructions must be preserved so they execute correctly even across context boundaries. - -Do NOT skip planning. Do NOT begin writing code during Phase 2. The plan is a contract with the user — get approval first. - ---- - -## Complexity Assessment - -Before planning, assess the spec's complexity to determine whether team spawning would benefit the implementation. - -**Complexity indicators** — if two or more apply, the spec is complex: -- 8+ functional requirements (FR-*) -- Cross-layer work (backend + frontend + tests spanning different frameworks) -- 3+ independent workstreams that could run in parallel -- Multiple services or modules affected - -### When Complexity is High: Recommend Team Spawning - -Decompose work into parallel workstreams and recommend team composition using the project's existing custom agents. These agents carry frontloaded skills, safety hooks, and tailored instructions — always prefer them over generalist agents. - -**Recommended compositions by spec type:** - -| Spec Type | Teammates | -|-----------|-----------| -| Full-stack feature | researcher + test-writer + documenter | -| Backend-heavy | researcher + test-writer | -| Security-sensitive | security-auditor + test-writer | -| Refactoring work | refactorer + test-writer | -| Multi-service | researcher per service + test-writer | - -**Available specialist agents:** `architect`, `bash-exec`, `claude-guide`, `debug-logs`, `dependency-analyst`, `documenter`, `explorer`, `generalist`, `git-archaeologist`, `migrator`, `perf-profiler`, `refactorer`, `researcher`, `security-auditor`, `spec-writer`, `statusline-config`, `test-writer` - -Use `generalist` only when no specialist matches the workstream. Hard limit: 3-5 active teammates maximum. - -**When complexity is low** (< 8 requirements, single layer, sequential work): skip team spawning, implement directly in the main thread. Still follow all 5 phases. - -The user can override the team recommendation in either direction. - ---- - -## Phase 1: Discovery & Gate Check - -### Step 1: Find the Spec - -``` -Glob: .specs/**/*.md -``` - -Match by `$ARGUMENTS` — the user provides a feature name or path. If ambiguous, list matching specs and ask which one to implement. - -### Step 2: Read the Full Spec - -Read every line. Extract structured data: - -- **All `[user-approved]` requirements** — every FR-* and NFR-* with their EARS-format text -- **All acceptance criteria** — every `[ ]` checkbox item -- **Key Files** — existing files to read for implementation context -- **Dependencies** — prerequisite features, systems, or libraries -- **Out of Scope** — explicit exclusions that define boundaries to respect - -### Step 3: Gate Check - -**Hard gate**: Verify the spec has `**Approval:** user-approved`. - -- If `user-approved` -> proceed to Step 4 -- If `draft` or missing -> **STOP**. Print: "This spec is not approved for implementation. Run `/spec-refine ` first to validate assumptions and get user approval." Do not continue. - -This gate is non-negotiable. Draft specs contain unvalidated assumptions — building against them risks wasted work. - -### Step 4: Build Context - -Read every file listed in the spec's `## Key Files` section. These are the files the spec author identified as most relevant to implementation. Understanding them is prerequisite to planning. - -After reading, note: -- Which key files exist vs. which are new (to be created) -- Patterns, conventions, and interfaces in existing files -- Any dependencies or constraints discovered in the code - -### Step 5: Assess Complexity - -Apply the complexity indicators from the assessment section above. Note the result for Phase 2 — it determines whether to recommend team spawning. - ---- - -## Phase 2: Implementation Planning - -**Do NOT write any code in this phase.** This phase produces a plan only. - -Use `EnterPlanMode` to enter plan mode. Create a structured implementation plan covering: - -### Plan Structure - -1. **Spec Reference** — path to the spec file, domain, feature name -2. **Complexity Assessment** — indicators found, team recommendation (if applicable) -3. **Requirement-to-File Mapping** — each FR-*/NFR-* mapped to specific file changes -4. **Implementation Steps** — ordered by dependency, grouped by related requirements: - - For each step: files to create/modify, requirements addressed, acceptance criteria to verify - - Mark which steps depend on others completing first -5. **Out-of-Scope Boundaries** — items from the spec's Out of Scope section, noted as "do not touch" -6. **Verification Checkpoints** — acceptance criteria listed as checkpoints after each logical group of steps - -### Preserving Phase Instructions - -The plan MUST include the following phases verbatim so they survive context across the implementation session. Include them as a "Post-Implementation Phases" section in the plan: - -**Phase 3 instructions**: Execute steps, flip `[ ]` to `[~]` after addressing each criterion in code. - -**Phase 4 instructions**: Run comprehensive review using the Spec Implementation Review Checklist at `skills/spec-build/references/review-checklist.md`. Walk every requirement, verify every criterion, audit code quality, check spec consistency. Produce a summary report. - -**Phase 5 instructions**: Update spec status, add Implementation Notes, update Key Files, add Discrepancies, set Last Updated date. - -### Team Plan (if applicable) - -If complexity assessment recommends team spawning, the plan should additionally include: -- Workstream decomposition with clear boundaries -- Teammate assignments by specialist type -- Task dependencies between workstreams -- Integration points where workstreams converge - -Present the plan via `ExitPlanMode` and wait for explicit user approval before proceeding. - ---- - -## Phase 3: Implementation - -Execute the approved plan step by step. This is where code gets written. - -### Execution Rules - -1. **Follow the plan order** — implement steps in the sequence approved by the user -2. **Live spec updates** — after completing work on an acceptance criterion, immediately edit the spec file: - - Flip `[ ]` to `[~]` for criteria addressed in code - - This is the ONLY spec edit during Phase 3 — no structural changes to the spec -3. **Track requirement coverage** — mentally track which FR-*/NFR-* requirements have been addressed as you work through the steps -4. **Note deviations** — if the implementation must deviate from the plan (unexpected constraint, better approach discovered, missing dependency), note the deviation for Phase 4. Do not silently diverge. -5. **Respect boundaries** — do not implement anything listed in the spec's Out of Scope section - -### If Using a Team - -If team spawning was approved in Phase 2: - -1. Create the team using `TeamCreate` -2. Create tasks in the team task list mapped to spec requirements -3. Spawn teammates using the recommended specialist agent types -4. Assign tasks by domain match -5. Coordinate integration points as workstreams converge -6. Collect results and ensure all `[ ]` criteria are flipped to `[~]` - -### Progress Tracking - -The spec file itself is the progress tracker. At any point during Phase 3: -- `[ ]` criteria = not yet addressed -- `[~]` criteria = addressed in code, awaiting verification -- Count of `[~]` vs total criteria shows implementation progress - ---- - -## Phase 4: Comprehensive Review - -The most critical phase. Audit everything built against the spec. Use the Spec Implementation Review Checklist at `skills/spec-build/references/review-checklist.md` as the authoritative guide. - -### 4A: Requirement Coverage Audit - -Walk through every FR-* and NFR-* requirement from the spec: - -1. For each requirement: identify the specific files and functions that address it -2. Verify the implementation matches the EARS-format requirement text -3. Flag requirements that were missed entirely -4. Flag requirements only partially addressed -5. Flag code written outside the spec's scope (scope creep) - -### 4B: Acceptance Criteria Verification - -For each `[~]` criterion in the spec: - -1. Find or write the corresponding test -2. Run the test and confirm it passes -3. If the test passes -> upgrade `[~]` to `[x]` in the spec -4. If the test fails -> note the failure, do not upgrade -5. For criteria without tests: write the test, run it, then decide - -Report any criteria that cannot be verified and explain why. - -### 4C: Code Quality Review - -Check the implementation against code quality standards: - -- Error handling at appropriate boundaries -- No hardcoded values that should be configurable -- Function sizes within limits (short, single-purpose) -- Nesting depth within limits -- Test coverage for new code paths -- No regressions in existing tests - -### 4D: Spec Consistency Check - -Compare implemented behavior against each EARS requirement: - -- Does the code actually do what each requirement says? -- Are there behavioral differences between spec intent and actual implementation? -- Are Key Files in the spec still accurate? Any new files missing from the list? -- Are there files created during implementation that should be added? - -### 4E: Summary Report - -Present a structured summary to the user: - -``` -## Implementation Review Summary - -**Requirements:** N/M addressed (list any gaps) -**Acceptance Criteria:** N verified [x], M in progress [~], K not started [ ] -**Deviations from Plan:** (list any, or "None") -**Discrepancies Found:** (spec vs reality gaps, or "None") -**Code Quality Issues:** (list any, or "None") - -**Recommendation:** Proceed to Phase 5 / Fix issues first (with specific list) -``` - -If issues are found, address them before moving to Phase 5. If issues require user input, present them and wait for direction. - ---- - -## Phase 5: Spec Closure - -The final phase. Update the spec to reflect what was actually built. This replaces the need for a separate `/spec-update` run. - -### Step 1: Update Status - -Set `**Status:**` to: -- `implemented` — if all acceptance criteria are `[x]` -- `partial` — if any criteria remain `[ ]` or `[~]` - -### Step 2: Update Metadata - -- Set `**Last Updated:**` to today's date (YYYY-MM-DD) -- Preserve `**Approval:** user-approved` — never downgrade - -### Step 3: Add Implementation Notes - -In the `## Implementation Notes` section, document: - -- **Deviations from the original spec** — what changed and why -- **Key design decisions** — choices made during implementation not in the original spec -- **Trade-offs accepted** — what was sacrificed and the reasoning -- **Surprising findings** — edge cases, performance characteristics, limitations discovered - -Reference file paths, not code. Keep notes concise. - -### Step 4: Update Key Files - -In `## Key Files`: -- Add files created during implementation -- Remove files that no longer exist -- Update paths that changed -- Verify every path listed actually exists - -### Step 5: Add Discrepancies - -In `## Discrepancies`, document any gaps between spec intent and actual build: -- Requirements that were met differently than specified -- Behavioral differences from the original EARS requirements -- Scope adjustments that happened during implementation - -If no discrepancies exist, leave the section empty or note "None." - -### Step 6: Final Message - -Print: "Implementation complete. Spec updated to `[status]`. Run `/spec-check` to verify spec health." - ---- - -## Persistence Policy - -Complete all five phases. Stop only when: -- Gate check fails in Phase 1 (spec not approved) — hard stop -- User explicitly requests stop -- A genuine blocker requires user input that cannot be resolved - -If interrupted mid-phase, resume from the last completed step. Phase 3 progress is tracked via acceptance criteria markers in the spec — `[~]` markers show exactly where implementation left off. - -Do not skip phases. Do not combine phases. Each phase exists because it surfaces different types of issues. Phase 4 in particular catches problems that are invisible during Phase 3. - ---- - -## Ambiguity Policy - -- If `$ARGUMENTS` matches multiple specs, list them and ask the user which to implement. -- If a spec has no acceptance criteria, warn the user and suggest adding criteria before implementation. Offer to proceed anyway if the user confirms. -- If Key Files reference paths that don't exist, note this in Phase 1 and proceed — they may be files to create. -- If the spec has both `[assumed]` and `[user-approved]` requirements, the gate check still fails — all requirements must be `[user-approved]` before implementation begins. -- If Phase 4 reveals significant gaps, do not silently proceed to Phase 5. Present the gaps and get user direction on whether to fix them first or document them as discrepancies. -- If the spec is already `implemented`, ask: is this a re-implementation, an update, or an error? - ---- - -## Anti-Patterns - -- **Skipping the plan**: Jumping from Phase 1 to Phase 3 without a plan leads to unstructured work and missed requirements. Always plan first. -- **Optimistic verification**: Marking `[~]` as `[x]` without running the actual test. Every `[x]` must be backed by a passing test or confirmed behavior. -- **Scope creep during implementation**: Building features not in the spec because they "seem useful." Respect Out of Scope boundaries. -- **Deferring Phase 4**: "I'll review later" means "I won't review." Phase 4 runs immediately after Phase 3. -- **Silent deviations**: Changing the implementation approach without noting it. Every deviation gets documented in Phase 4/5. -- **Skipping Phase 5**: The spec-reminder hook will catch this, but it's better to close the loop immediately. Phase 5 is not optional. diff --git a/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/skills/spec-build/references/review-checklist.md b/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/skills/spec-build/references/review-checklist.md deleted file mode 100644 index b740383..0000000 --- a/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/skills/spec-build/references/review-checklist.md +++ /dev/null @@ -1,175 +0,0 @@ -# Spec Implementation Review Checklist - -Comprehensive checklist for spec implementation reviews. Used by `/spec-build` Phase 4 and `/spec-review`. Walk through every section methodically. Do not skip sections — each catches different categories of issues. - ---- - -## 4A: Requirement Coverage Audit - -For each FR-* requirement in the spec: - -- [ ] Identify the file(s) and function(s) that implement this requirement -- [ ] Verify the implementation matches the EARS-format requirement text -- [ ] Confirm the requirement is fully addressed (not partially) -- [ ] Note if the requirement was met through a different approach than planned - -For each NFR-* requirement in the spec: - -- [ ] Identify how the non-functional requirement is enforced (e.g., timeout config, index, validation) -- [ ] Verify measurable NFRs have been tested or measured (response time, throughput, size limits) -- [ ] Confirm the NFR is met under expected conditions, not just ideal conditions - -Cross-checks: - -- [ ] Every FR-* has corresponding code — no requirements were skipped -- [ ] Every NFR-* has corresponding enforcement — no hand-waving -- [ ] No code was written that doesn't map to a requirement (scope creep check) -- [ ] Out of Scope items from the spec were NOT implemented - ---- - -## 4B: Acceptance Criteria Verification - -For each criterion currently marked `[~]` (implemented, not yet verified): - -- [ ] Locate the corresponding test (unit, integration, or manual verification) -- [ ] If no test exists: write one -- [ ] Run the test -- [ ] If test passes: upgrade `[~]` to `[x]` in the spec -- [ ] If test fails: note the failure, keep as `[~]`, document the issue - -Summary checks: - -- [ ] Count total criteria vs. verified `[x]` — report the ratio -- [ ] Any criteria still `[ ]` (not started)? Flag as missed -- [ ] Any criteria that cannot be tested? Document why and note as discrepancy -- [ ] Do the tests actually verify the criterion, or just exercise the code path? - ---- - -## 4C: Code Quality Review - -### Error Handling - -- [ ] Errors are caught at appropriate boundaries (not swallowed, not over-caught) -- [ ] Error messages are informative (include context, not just "error occurred") -- [ ] External call failures (I/O, network, subprocess) have explicit handling -- [ ] No bare except/catch-all that hides real errors - -### Code Structure - -- [ ] Functions are short and single-purpose -- [ ] Nesting depth is within limits (2-3 for Python, 3-4 for other languages) -- [ ] No duplicated logic that should be extracted -- [ ] Names are descriptive (functions, variables, parameters) - -### Hardcoded Values - -- [ ] No magic numbers without explanation -- [ ] Configuration values that may change are externalized (not inline) -- [ ] File paths, URLs, and credentials are not hardcoded - -### Test Quality - -- [ ] New code has corresponding tests -- [ ] Tests verify behavior, not implementation details -- [ ] Tests cover happy path, error cases, and key edge cases -- [ ] No over-mocking that makes tests trivially pass -- [ ] Existing tests still pass (no regressions introduced) - -### Dependencies - -- [ ] New imports/dependencies are necessary (no unused imports) -- [ ] No circular dependencies introduced -- [ ] Third-party dependencies are justified (not added for trivial functionality) - ---- - -## 4D: Spec Consistency Check - -### Requirement-to-Implementation Fidelity - -- [ ] Re-read each EARS requirement and compare against the actual implementation -- [ ] For "When [event], the system shall [action]" — does the code handle that event and perform that action? -- [ ] For "If [unwanted condition], the system shall [action]" — is the unwanted condition detected and handled? -- [ ] For ubiquitous requirements ("The system shall...") — is the behavior always active? - -### Key Files Accuracy - -- [ ] Every file in the spec's Key Files section still exists at that path -- [ ] New files created during implementation are listed in Key Files -- [ ] Deleted or moved files have been removed/updated in Key Files -- [ ] File descriptions in Key Files are still accurate - -### Schema and API Consistency - -- [ ] If the spec has a Schema/Data Model section, verify referenced files are current -- [ ] If the spec has API Endpoints, verify routes match the implementation -- [ ] Any new endpoints or schema changes are reflected in the spec - -### Behavioral Alignment - -- [ ] Edge cases discovered during implementation are documented -- [ ] Performance characteristics match NFR expectations -- [ ] Integration points work as the spec describes -- [ ] Default values and fallback behaviors match spec intent - ---- - -## 4E: Summary Report Template - -After completing sections 4A through 4D, compile findings into this format: - -``` -## Implementation Review Summary - -**Spec:** [feature name] ([spec file path]) -**Date:** YYYY-MM-DD - -### Requirement Coverage -- Functional: N/M addressed -- Non-Functional: N/M addressed -- Gaps: [list or "None"] - -### Acceptance Criteria -- [x] Verified: N -- [~] Implemented, pending verification: N -- [ ] Not started: N -- Failures: [list or "None"] - -### Code Quality -- Issues found: [list or "None"] -- Regressions: [list or "None"] - -### Spec Consistency -- Key Files updates needed: [list or "None"] -- Discrepancies: [list or "None"] - -### Deviations from Plan -[list or "None"] - -### Recommendation -[ ] Proceed to Phase 5 — all clear -[ ] Fix issues first: [specific list] -[ ] Requires user input: [specific questions] -``` - ---- - -## When to Fail the Review - -The review should recommend "fix issues first" when: - -- Any FR-* requirement has no corresponding implementation -- Any acceptance criterion test fails -- Existing tests regress (new code broke something) -- Code was written outside the spec's scope without user approval -- Critical error handling is missing (crashes on expected error conditions) - -The review should recommend "proceed to Phase 5" when: - -- All requirements have corresponding implementations -- All acceptance criteria are `[x]` (or `[~]` with documented reason) -- No test regressions -- Code quality is acceptable (no critical issues) -- Discrepancies are documented, not hidden diff --git a/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/skills/spec-check/SKILL.md b/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/skills/spec-check/SKILL.md deleted file mode 100644 index def3afc..0000000 --- a/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/skills/spec-check/SKILL.md +++ /dev/null @@ -1,104 +0,0 @@ ---- -name: spec-check -description: >- - Audits all specifications in a project for health issues including stale - status, missing sections, unapproved drafts, and assumed requirements. - USE WHEN the user asks to "check spec health", "audit specs", "which - specs are stale", "find missing specs", "review spec quality", - "run spec-check", "are my specs up to date", or works with .specs/ - directory maintenance and specification metadata. - DO NOT USE for single-spec code review or implementation verification - — use spec-review for deep code-level audits against one spec. -version: 0.2.0 -argument-hint: "[domain or path]" -context: fork -agent: explorer ---- - -# Spec Health Audit - -Audit all specifications in the current project and report their health status. - -## Workflow - -### Step 1: Discover Specs - -``` -Glob: .specs/**/*.md -``` - -If `.specs/` does not exist, report: "No specification directory found. Use `/spec-new` to create your first spec." - -Exclude non-spec files: -- `MILESTONES.md` -- `BACKLOG.md` -- `LESSONS_LEARNED.md` -- Files in `archive/` - -### Step 2: Read Each Spec - -For each spec file, extract: -- **Feature name** from the `# Feature: [Name]` header -- **Domain** from the `**Domain:**` field -- **Status** from the `**Status:**` field -- **Last Updated** from the `**Last Updated:**` field -- **Approval** from the `**Approval:**` field (default `draft` if missing) -- **Line count** (wc -l) -- **Sections present** — check for each required section header -- **Acceptance criteria** — count total, count checked `[x]`, count in-progress `[~]` -- **Requirements** — count total, count `[assumed]`, count `[user-approved]` -- **Discrepancies** — check if section has content - -### Step 3: Flag Issues - -For each spec, check these conditions: - -| Issue | Condition | Severity | -|-------|-----------|----------| -| **Unapproved** | Approval is `draft` or missing | High | -| **Assumed requirements** | Has requirements tagged `[assumed]` | Medium | -| **Stale** | Status is `planned` but Last Updated is >30 days ago | High | -| **Incomplete** | Missing required sections (Intent, Acceptance Criteria, Key Files, Requirements, Out of Scope) | High | -| **Long spec** | Exceeds ~200 lines — consider splitting | Info | -| **No criteria** | Acceptance Criteria section is empty or has no checkboxes | High | -| **Open discrepancies** | Discrepancies section has content | Medium | -| **Missing as-built** | Status is `implemented` but Implementation Notes is empty | Medium | -| **Stale paths** | Key Files references paths that don't exist | Low | -| **Draft + implemented** | Status is `implemented` but Approval is `draft` — approval gate was bypassed | High | -| **Inconsistent approval** | Approval is `user-approved` but spec has `[assumed]` requirements | High | -| **In-progress criteria** | Has acceptance criteria marked `[~]` (implemented, not yet verified) | Info | - -### Step 4: Report - -Output a summary table: - -``` -## Spec Health Report - -| Feature | Domain | Status | Approval | Updated | Lines | Issues | -|---------|--------|--------|----------|---------|-------|--------| -| Session History | sessions | implemented | user-approved | 2026-02-08 | 74 | None | -| Auth Flow | auth | planned | draft | 2026-01-15 | 45 | Unapproved, Stale (26 days) | -| Settings Page | ui | partial | draft | 2026-02-05 | 210 | Unapproved, Long spec | - -## Issues Found - -### High Priority -- **Auth Flow** (`.specs/auth/auth-flow.md`): Status is `planned` but last updated 26 days ago. Either implementation is stalled or the spec needs an as-built update. - -### Medium Priority -- **Settings Page** (`.specs/ui/settings-page.md`): 210 lines — consider splitting into separate specs in the domain folder. - -### Suggested Actions -1. Run `/spec-refine auth-flow` to validate assumptions and get user approval -2. Run `/spec-review auth-flow` to verify implementation against the spec -3. Run `/spec-update auth-flow` to update the auth flow spec -4. Split settings-page.md into sub-specs - -### Approval Summary -- **User-approved:** 1 spec -- **Draft (needs /spec-refine):** 2 specs -- **Assumed requirements across all specs:** 8 -``` - -If no issues are found, report: "All specs healthy. N specs across M domains. All user-approved." diff --git a/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/skills/spec-init/SKILL.md b/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/skills/spec-init/SKILL.md deleted file mode 100644 index 85e445b..0000000 --- a/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/skills/spec-init/SKILL.md +++ /dev/null @@ -1,104 +0,0 @@ ---- -name: spec-init -description: >- - Bootstraps the .specs/ directory structure for a project, creating - MILESTONES.md and BACKLOG.md from starter templates so spec-new has - a home. USE WHEN the user asks to "initialize specs", "set up specs", - "bootstrap specs", "start using specs", "create spec directory", - "init specs for this project", "set up .specs", or works with first- - time specification setup and project onboarding. - DO NOT USE if .specs/ already exists — use spec-check to audit health - or spec-new to add individual specs. -version: 0.2.0 ---- - -# Initialize Specification Directory - -## Mental Model - -Before any spec can be created, the project needs a `.specs/` directory with its supporting files: a MILESTONES tracker (what each milestone delivers) and a BACKLOG (deferred items). This skill bootstraps that structure so `/spec-new` has a home. - ---- - -## Workflow - -### Step 1: Check Existing State - -``` -Glob: .specs/**/*.md -``` - -**If `.specs/` already exists:** -- Report current state: how many specs, domains, whether MILESTONES.md and BACKLOG.md exist -- Suggest `/spec-check` to audit health instead -- Do NOT recreate or overwrite anything -- Stop here - -**If `.specs/` does not exist:** proceed to Step 2. - -### Step 2: Create Directory Structure - -Create the `.specs/` directory at the project root. - -### Step 3: Create MILESTONES.md - -Write `.specs/MILESTONES.md` using the template from `references/milestones-template.md`. - -### Step 4: Create BACKLOG.md - -Write `.specs/BACKLOG.md` using the template from `references/backlog-template.md`. - -### Step 5: Retroactive Documentation - -Ask the user: - -> "Are there existing features in this project that should be documented retroactively? I can help create specs for them using `/spec-new`." - -If yes, guide the user through creating a spec for each feature using `/spec-new`. - -If no, proceed to Step 6. - -### Step 6: Report - -Summarize what was created: - -``` -## Spec Directory Initialized - -Created: -- `.specs/` directory -- `.specs/MILESTONES.md` — milestone tracker -- `.specs/BACKLOG.md` — deferred items list - -Next steps: -- Add features to `BACKLOG.md` with priority grades (P0–P3) -- Pull features into a milestone in `MILESTONES.md` when ready to scope -- Use `/spec-new ` to create a spec (domain is inferred) -- Use `/spec-refine ` to validate before implementation -- After implementing, use `/spec-review ` to verify against the spec -- Then use `/spec-update` to close the loop -- Use `/spec-check` to audit spec health at any time -``` - ---- - -## Constraints - -- **Never overwrite** an existing `.specs/` directory or its contents. -- Templates are starting points — the user will extend them as the project grows. - ---- - -## Ambiguity Policy - -- If the user runs this in a workspace root with multiple projects, ask which project to initialize. -- If `.specs/` exists but is missing MILESTONES.md or BACKLOG.md, offer to create only the missing files. - ---- - -## Reference Files - -| File | Contents | -|------|----------| -| `references/milestones-template.md` | Starter MILESTONES with milestone table format | -| `references/backlog-template.md` | Starter BACKLOG with item format | diff --git a/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/skills/spec-init/references/backlog-template.md b/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/skills/spec-init/references/backlog-template.md deleted file mode 100644 index 9018f4a..0000000 --- a/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/skills/spec-init/references/backlog-template.md +++ /dev/null @@ -1,23 +0,0 @@ -# Backlog - -Priority-graded feature and infrastructure backlog. Items are pulled into milestones when ready to scope and spec. See `MILESTONES.md` for the milestone workflow. - -## P0 — High Priority - -- [ ] [Feature] — [Description] - -## P1 — Important - -- [ ] [Feature] — [Description] - -## P2 — Desired - -- [ ] [Feature] — [Description] - -## P3 — Nice to Have - -- [ ] [Feature] — [Description] - -## Infrastructure & CI - -- [ ] [Item] — [Description] diff --git a/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/skills/spec-init/references/milestones-template.md b/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/skills/spec-init/references/milestones-template.md deleted file mode 100644 index bb7fee6..0000000 --- a/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/skills/spec-init/references/milestones-template.md +++ /dev/null @@ -1,32 +0,0 @@ -# Milestones - -> Features are organized by domain in `.specs/`. Milestones group features -> into deliverable increments. See `BACKLOG.md` for the feature backlog. - -## How Milestones Work - -1. **Backlog** — All desired features live in `BACKLOG.md`, graded by priority. -2. **Milestone scoping** — When ready to plan a deliverable, pull features from the backlog. -3. **Spec first** — Each feature gets a spec (via `/spec-new`) before implementation begins. -4. **Ship** — A milestone is done when all its specs are implemented and verified. - -Only the **current milestone** is defined in detail. Everything else is backlog. - -## Released - -_None yet._ - -## Current - -### [Milestone Name] - -- [ ] `domain/feature-name.md` — [Brief description] -- [ ] `domain/feature-name.md` — [Brief description] - -## Next - -> Scoped from `BACKLOG.md` when the current milestone is complete. - -## Out of Scope - -- [Items explicitly not planned] diff --git a/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/skills/spec-init/references/roadmap-template.md b/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/skills/spec-init/references/roadmap-template.md deleted file mode 100644 index fce785f..0000000 --- a/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/skills/spec-init/references/roadmap-template.md +++ /dev/null @@ -1,33 +0,0 @@ -# Roadmap - -> Features live in the priority-graded backlog until pulled into a version. -> Versions are scoped and spec'd when ready to build — not pre-assigned. -> See `BACKLOG.md` for the feature backlog. - -## How Versioning Works - -1. **Backlog** — All desired features live in `BACKLOG.md`, graded by priority. -2. **Version scoping** — When ready to start a new version, pull features from the backlog. -3. **Spec first** — Each feature in a version gets a spec before implementation begins. -4. **Ship** — Version is done when all its specs are implemented and verified. - -Only the **next version** is defined in detail. Everything else is backlog. - -## Released - -_None yet._ - -## Current - -### v0.1.0 — [Name] 🔧 - -- [ ] [Feature pulled from backlog] -- [ ] [Feature pulled from backlog] - -## Next - -> Scoped from `BACKLOG.md` when current version is complete. - -## Out of Scope - -- [Items explicitly not planned] diff --git a/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/skills/spec-new/SKILL.md b/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/skills/spec-new/SKILL.md deleted file mode 100644 index 18bd574..0000000 --- a/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/skills/spec-new/SKILL.md +++ /dev/null @@ -1,113 +0,0 @@ ---- -name: spec-new -description: >- - Creates a new feature specification from the standard EARS template - with domain inference, acceptance criteria, and requirement tagging. - USE WHEN the user asks to "create a spec", "new feature spec", "write - a spec for", "spec this feature", "start a new spec", "plan a feature", - "add a spec", or works with .specs/ directory and feature planning. - DO NOT USE for updating existing specs after implementation — use - spec-update instead. Not for refining draft specs — use spec-refine. -version: 0.2.0 -argument-hint: "[feature-name] [domain]" ---- - -# Create New Feature Specification - -## Mental Model - -A specification is a contract between the person requesting a feature and the person building it. Writing the spec BEFORE implementation forces you to think through edge cases, acceptance criteria, and scope boundaries while changes are cheap — before any code exists. - -Every project uses `.specs/` as the specification directory. Specs are domain-organized, independently loadable, and should aim for ~200 lines. - ---- - -## Workflow - -### Step 1: Parse Arguments - -Extract the feature name from `$ARGUMENTS`: -- **Feature name**: kebab-case identifier (e.g., `session-history`, `auth-flow`) - -If the feature name is missing, ask the user what they want to spec. - -**Note:** Features should be pulled from the project's backlog (`BACKLOG.md`) into a milestone before creating a spec. If the feature isn't in the backlog yet, add it first, then assign it to a milestone. - -### Step 2: Determine Domain and File Path - -Analyze the feature name and description to infer an appropriate domain folder: -- Look at existing domain folders in `.specs/` for a natural fit -- Consider the feature's area: `auth`, `search`, `ui`, `api`, `onboarding`, etc. -- Present the inferred domain to the user for confirmation or override - -The file path is always: `.specs/{domain}/{feature-name}.md` - -If `.specs/` does not exist at the project root, create it. - -If `.specs/{domain}/` does not exist, create it. - -### Step 3: Create the Spec File - -Write the file using the standard template from `references/template.md`. - -Pre-fill: -- **Domain**: from the inferred/confirmed domain -- **Status**: `planned` -- **Last Updated**: today's date (YYYY-MM-DD) -- **Approval**: `draft` -- **Feature name**: from arguments - -Leave all other sections as placeholders for the user to fill. - -### Step 4: Guide Content Creation - -After creating the file, guide the user through filling it out: - -1. **Intent** — What problem does this solve? Who has this problem? (2-3 sentences) -2. **Acceptance Criteria** — Use the `specification-writing` skill for EARS format and Given/When/Then patterns -3. **Key Files** — Glob the codebase to identify existing files relevant to this feature -4. **Schema / Data Model** — Reference file paths only, never inline schemas -5. **API Endpoints** — Table format: Method | Path | Description -6. **Requirements** — EARS format, numbered FR-1, FR-2, NFR-1, etc. Tag all requirements `[assumed]` at creation time — they become `[user-approved]` only after explicit user validation via `/spec-refine`. -7. **Dependencies** — What this feature depends on -8. **Out of Scope** — Explicit non-goals to prevent scope creep -9. **Resolved Questions** — Leave empty at creation; populated by `/spec-refine` - -### Step 5: Validate - -Before finishing: -- [ ] If the file exceeds ~200 lines, consider splitting into separate specs in the domain folder -- [ ] No source code, SQL, or type definitions reproduced inline -- [ ] Status is `planned` and Approval is `draft` -- [ ] All required sections present (even if some are "N/A" or "TBD") -- [ ] Acceptance criteria are testable -- [ ] All requirements are tagged `[assumed]` - -After validation, inform the user: **"This spec MUST go through `/spec-refine` before implementation begins.** All requirements are marked `[assumed]` until explicitly validated." - -The `/spec-refine` skill walks through every `[assumed]` requirement with the user, validates tech decisions and scope boundaries, and upgrades approved items to `[user-approved]`. The spec's `**Approval:**` becomes `user-approved` only after all requirements pass review. - ---- - -## Sizing Guidelines - -- **Aim for ~200 lines per spec.** If a feature needs more, consider splitting into separate specs in the domain folder. -- **Reference, don't reproduce.** Write `see src/engine/db/migrations/002.sql lines 48-70` — never paste the SQL. -- **Independently loadable.** Each spec file must be useful without loading any other file. -- **EARS format for requirements.** Use the `specification-writing` skill for templates and examples. - ---- - -## Ambiguity Policy - -- If the user doesn't specify a domain, infer one from the feature name and existing `.specs/` structure, then confirm with the user. -- If the feature scope is unclear, write a minimal spec with `## Open Questions` listing what needs clarification. -- If a spec already exists for this feature, inform the user and suggest `/spec-update` instead. - ---- - -## Reference Files - -| File | Contents | -|------|----------| -| `references/template.md` | Full standard template with field descriptions and examples | diff --git a/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/skills/spec-new/references/template.md b/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/skills/spec-new/references/template.md deleted file mode 100644 index 877761b..0000000 --- a/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/skills/spec-new/references/template.md +++ /dev/null @@ -1,139 +0,0 @@ -# Specification Template - -Standard template for all feature specifications. Copy this structure when creating a new spec. - ---- - -## Template - -```markdown -# Feature: [Name] - -**Domain:** [domain-name] -**Status:** planned -**Last Updated:** YYYY-MM-DD -**Approval:** draft - -## Intent - -[What problem does this solve? Who has this problem? What's the cost of not solving it? 2-3 sentences.] - -## Acceptance Criteria - -[Testable criteria. Use Given/When/Then for complex flows, checklists for simple features, or tables for business rules. Every criterion must be verifiable.] - -[Markers: `[ ]` = not started, `[~]` = implemented but not yet verified, `[x]` = verified (tests pass). During `/spec-build`, criteria progress from `[ ]` to `[~]` during implementation, then to `[x]` after review.] - -- [ ] [Criterion 1] -- [ ] [Criterion 2] -- [ ] [Criterion 3] - -## Key Files - -[File paths most relevant to implementation — paths an implementer should read first.] - -**Backend:** -- `src/path/to/file.py` — [brief description] - -**Frontend:** -- `src/web/path/to/component.svelte` — [brief description] - -**Tests:** -- `tests/path/to/test_file.py` — [brief description] - -## Schema / Data Model - -[Reference migration files and model files by path. Describe what changes — do NOT paste DDL, Pydantic models, or TypeScript interfaces.] - -- New table: `table_name` — see `src/db/migrations/NNN.sql` -- Modified: `existing_table` — added `column_name` column - -## API Endpoints - -| Method | Path | Description | -|--------|------|-------------| -| GET | `/api/resource` | List resources with pagination | -| POST | `/api/resource` | Create a new resource | - -## Requirements - -### Functional Requirements - -- FR-1 [assumed]: [EARS format requirement — see specification-writing skill for templates] -- FR-2 [assumed]: When [event], the system shall [action]. -- FR-3 [assumed]: If [unwanted condition], then the system shall [action]. - -### Non-Functional Requirements - -- NFR-1 [assumed]: The system shall respond to [endpoint] within [N]ms at the [percentile] percentile. -- NFR-2 [assumed]: [Security, accessibility, scalability requirement] - -## Dependencies - -- [External system, library, or feature this depends on] -- [Blocked by: feature X must ship first] - -## Out of Scope - -- [Explicit non-goal 1 — prevents scope creep] -- [Explicit non-goal 2] - -## Resolved Questions - -[Decisions explicitly approved by the user via `/spec-refine`. Each entry: decision topic, chosen option, date, brief rationale.] - -## Implementation Notes - -[Post-implementation only. Leave empty in planned specs. After building, document what actually shipped vs. what was planned.] - -## Discrepancies - -[Post-implementation only. Document gaps between spec intent and actual build. Prevents next session from re-planning decided work.] -``` - ---- - -## Field Descriptions - -| Section | Required | When to Fill | -|---------|----------|-------------| -| Intent | Always | At creation | -| Acceptance Criteria | Always | At creation | -| Key Files | Always | At creation (update post-implementation) | -| Schema / Data Model | If applicable | At creation | -| API Endpoints | If applicable | At creation | -| Requirements | Always | At creation | -| Dependencies | If applicable | At creation | -| Out of Scope | Always | At creation | -| Implementation Notes | Post-implementation | After building | -| Discrepancies | Post-implementation | After building | - -## Status Values - -| Status | Meaning | -|--------|---------| -| `planned` | Spec written, implementation not started | -| `partial` | Some acceptance criteria implemented, work ongoing | -| `implemented` | All acceptance criteria met, as-built notes complete | - -## Approval Workflow - -| Tag | Meaning | -|-----|---------| -| `[assumed]` | Requirement was drafted by AI or inferred — treated as a hypothesis | -| `[user-approved]` | Requirement was explicitly reviewed and approved by the user via `/spec-refine` | - -| Approval Status | Meaning | -|-----------------|---------| -| `draft` | Spec has unvalidated assumptions — NOT approved for implementation | -| `user-approved` | All requirements are `[user-approved]` — ready for implementation | - -## Acceptance Criteria Markers - -| Marker | Meaning | -|--------|---------| -| `[ ]` | Not started | -| `[~]` | Implemented, not yet verified — code written, tests not confirmed | -| `[x]` | Verified — tests pass, behavior confirmed | - -**Workflow:** `/spec-new` creates → `/spec-refine` validates → `/spec-build` implements + closes the loop (or implement manually → `/spec-review` verifies → `/spec-update` closes the loop). diff --git a/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/skills/spec-refine/SKILL.md b/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/skills/spec-refine/SKILL.md deleted file mode 100644 index 7c7caa1..0000000 --- a/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/skills/spec-refine/SKILL.md +++ /dev/null @@ -1,197 +0,0 @@ ---- -name: spec-refine -description: >- - Guides iterative user-driven spec refinement through structured - questioning rounds that validate assumptions, tech decisions, and scope - boundaries. USE WHEN the user asks to "refine the spec", "review spec - assumptions", "validate spec decisions", "approve the spec", "walk me - through the spec", "check spec for assumptions", "iterate on the spec", - or works with [assumed] requirements needing user-approved upgrade. - DO NOT USE for creating new specs (use spec-new) or for post- - implementation updates (use spec-update). -version: 0.2.0 -argument-hint: "[spec-path]" ---- - -# Iterative Spec Refinement - -## Mental Model - -A draft spec is a hypothesis, not a commitment. Every requirement, tech decision, and scope boundary in a draft is an assumption until the user explicitly validates it. This skill systematically mines a spec for unvalidated assumptions, presents each to the user for review via structured questions, and iterates until every decision has explicit user approval. - -No implementation begins on a spec with `**Approval:** draft`. This skill is the gate. - ---- - -## Workflow - -### Step 1: Load & Inventory - -Find the target spec: -- If `$ARGUMENTS` contains a path or feature name, use it directly -- Otherwise, glob `.specs/**/*.md` and ask the user which spec to refine - -Read the full spec. Catalog: -- Every section and whether it has content -- The `**Approval:**` status (should be `draft`) -- All requirements and their current markers (`[assumed]` vs `[user-approved]`) -- The `## Open Questions` section (if any) -- The `## Resolved Questions` section (if any) - -If the spec is already `**Approval:** user-approved` and all requirements are `[user-approved]`, report this and ask if the user wants to re-review anyway. - -### Step 2: Assumption Mining - -Scan each section systematically for unvalidated decisions. Look for: - -| Category | What to look for | -|----------|-----------------| -| **Tech decisions** | Database choices, auth mechanisms, API formats, libraries, protocols | -| **Scope boundaries** | What's included/excluded without stated rationale | -| **Performance targets** | Numbers (response times, limits, thresholds) that were assumed | -| **Architecture choices** | Where logic lives, service boundaries, data flow patterns | -| **Behavioral defaults** | Error handling, retry logic, fallback behavior, timeout values | -| **Unstated dependencies** | Systems, services, or libraries the spec assumes exist | -| **Security assumptions** | Auth requirements, data sensitivity, access control patterns | - -For each assumption found, prepare a question with 2-4 alternatives including the current assumption. - -Present findings via `AskUserQuestion` in rounds of 1-4 questions. Group related assumptions together. Example: - -``` -Question: "Which authentication mechanism should this feature use?" -Options: -- JWT with refresh tokens (current assumption) -- Session cookies with httpOnly flag -- OAuth2 with external provider -``` - -Record each answer. After the user responds, check: did any answer reveal new assumptions or contradictions? If yes, add follow-up questions to the queue. - -### Step 3: Requirement Validation - -Walk through every requirement tagged `[assumed]`: - -1. **Read the requirement** aloud to the user (via the question text) -2. **Assess** — is it specific? testable? complete? -3. **Present via AskUserQuestion** with options: - - Approve as-is - - Needs revision (user provides direction via "Other") - - Remove (not needed) - - Defer to Open Questions (not decidable yet) - -Process requirements in batches of 1-4 per question round. Prioritize: -- Requirements with the most ambiguity first -- Requirements that other requirements depend on -- Requirements involving tech decisions or external systems - -For approved requirements, update the marker from `[assumed]` to `[user-approved]`. -For revised requirements, rewrite per user direction and mark `[user-approved]`. -For removed requirements, delete them. -For deferred requirements, move to `## Open Questions`. - -### Step 4: Acceptance Criteria Review - -For each acceptance criterion: -1. Is it measurable and testable? -2. Does it map to a specific requirement? -3. Are there requirements without corresponding criteria? - -Present gaps to the user: -- Missing criteria for existing requirements -- Criteria that don't map to any requirement -- Criteria with vague or unmeasurable outcomes - -Get approval on each criterion or batch of related criteria. - -### Step 5: Scope & Dependency Audit - -Review the spec from four perspectives: - -**User perspective:** -- Does the feature solve the stated problem? -- Are there user needs not addressed? -- Is the scope too broad or too narrow? - -**Developer perspective:** -- Is this implementable with the current architecture? -- Are the key files accurate? -- Are there missing technical constraints? - -**Security perspective:** -- Are there data sensitivity issues? -- Is authentication/authorization addressed? -- Are there input validation gaps? - -**Operations perspective:** -- Deployment considerations? -- Monitoring and observability needs? -- Rollback strategy needed? - -Surface any missing items via `AskUserQuestion`. Get explicit decisions on scope boundaries and dependency completeness. - -### Step 6: Final Approval - -1. Present a summary of all changes made during refinement: - - Assumptions resolved (count) - - Requirements approved/revised/removed - - New criteria added - - Scope changes - -2. Ask for final approval via `AskUserQuestion`: - - "Approve spec — all decisions validated, ready for implementation" - - "More refinement needed — specific concerns remain" - -3. On approval: - - Set `**Approval:** user-approved` - - Update `**Last Updated:**` to today - - Verify all requirements are tagged `[user-approved]` - - Populate `## Resolved Questions` with the decision trail from this session - -4. On "more refinement needed": - - Ask what concerns remain - - Loop back to the relevant phase - ---- - -## Convergence Rules - -- After each phase, check: did answers from this phase raise new questions? If yes, run another questioning round before advancing. -- The skill does NOT terminate until ALL of: - - Every `[assumed]` requirement is resolved (approved, revised, removed, or deferred) - - All acceptance criteria are reviewed - - The user gives explicit final approval -- If the user wants to stop early, leave `**Approval:** draft` and note remaining items in `## Open Questions`. - ---- - -## Resolved Questions Format - -Each resolved question follows this format: - -```markdown -1. **[Decision topic]** — [Chosen option] (user-approved, YYYY-MM-DD) - - Options considered: [list] - - Rationale: [brief user reasoning or context] -``` - -Keep entries concise — decision + options + rationale in 2-3 lines each. - ---- - -## Ambiguity Policy - -- If the spec has no `**Approval:**` field, treat it as `draft` and add the field. -- If requirements lack `[assumed]`/`[user-approved]` tags, treat all as `[assumed]`. -- If the user says "approve everything" without reviewing individual items, warn that blanket approval defeats the purpose — offer to fast-track by presenting summaries of each batch. -- If the spec is very short (< 30 lines), the full 6-phase process may be unnecessary. Adapt: merge phases 2-4 into a single review pass. Still require explicit final approval. -- If the user provides a feature name that matches multiple specs, list them and ask which to refine. - ---- - -## Anti-Patterns - -- **Rubber-stamping**: Presenting assumptions and immediately suggesting "approve all." Every assumption gets its own question with real alternatives. -- **Leading questions**: "Should we use JWT as planned?" is leading. Present alternatives neutrally: "Which auth mechanism should this feature use? Options: JWT, sessions, OAuth2." -- **Skipping phases**: Every phase surfaces different types of assumptions. Don't skip phases even if earlier phases had few findings. -- **Silent upgrades**: Never change `[assumed]` to `[user-approved]` without presenting the item to the user first. diff --git a/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/skills/spec-review/SKILL.md b/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/skills/spec-review/SKILL.md deleted file mode 100644 index 1ddbf89..0000000 --- a/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/skills/spec-review/SKILL.md +++ /dev/null @@ -1,233 +0,0 @@ ---- -name: spec-review -description: >- - Performs a standalone deep implementation review by reading code and - verifying full adherence to a specification's requirements and acceptance - criteria. USE WHEN the user asks to "review the spec", "verify - implementation", "does code match spec", "audit implementation", - "check spec adherence", "run spec-review", "regression check", or - works with post-implementation verification and pre-release audits. - DO NOT USE for batch metadata audits across all specs (use spec-check) - or for updating spec status after review (use spec-update). -version: 0.2.0 -argument-hint: "[spec-path]" ---- - -# Spec Implementation Review - -## Mental Model - -A spec is a contract — but contracts only matter if someone verifies them. `/spec-review` is the verification step: given a spec and the code that claims to implement it, does the code actually do what the spec says? - -This is a standalone, single-spec, deep implementation review. Unlike `/spec-check` (which audits metadata health across all specs without reading code) and unlike `/spec-build` Phase 4 (which is locked inside the build workflow), `/spec-review` can be invoked independently at any time after implementation exists. - -Use cases: - -- **Manual implementation** — you built a feature without `/spec-build` and want to verify the work before running `/spec-update` -- **Post-change regression check** — re-verify after modifying an already-implemented feature -- **Pre-release audit** — confirm a feature still matches its spec before shipping -- **Onboarding verification** — check if what's in the code matches what the spec says - -``` -Lifecycle positioning: - -/spec-new → /spec-refine → implement (manually or via /spec-build) → /spec-review → /spec-update - -Or with /spec-build (which has its own Phase 4): -/spec-new → /spec-refine → /spec-build (includes review) → done - -/spec-review is independent — usable at any time after implementation exists. -``` - ---- - -## Relationship to Other Skills - -| Skill | What it does | How `/spec-review` differs | -|-------|-------------|---------------------------| -| `/spec-check` | Batch metadata audit (all specs, no code reading) | Single-spec deep code audit | -| `/spec-build` Phase 4 | Same depth, but embedded in the build workflow | Standalone, invokable independently | -| `/spec-update` | Updates spec metadata after implementation | `/spec-review` audits first, then recommends `/spec-update` | - ---- - -## Spec Edits During Review - -`/spec-review` makes limited spec edits — just enough to record what it verified: - -- Upgrade `[ ]` or `[~]` → `[x]` for criteria verified by passing tests -- Downgrade `[x]` → `[ ]` if a previously-verified criterion now fails (regression) -- Add entries to `## Discrepancies` for gaps found -- Update `## Key Files` if paths are stale (files moved/deleted/added) -- Update `**Last Updated:**` date - -It does NOT change `**Status:**` or add `## Implementation Notes` — that's `/spec-update`'s job. Clear boundary: `/spec-review` verifies and records findings; `/spec-update` closes the loop. - ---- - -## Workflow - -### Step 1: Discovery - -**Find the spec.** Match `$ARGUMENTS` (feature name or path) against: - -``` -Glob: .specs/**/*.md -``` - -If ambiguous, list matching specs and ask which one to review. - -**Read the full spec.** Extract: - -- All FR-* and NFR-* requirements with their EARS-format text -- All acceptance criteria with current markers (`[ ]`, `[~]`, `[x]`) -- Key Files — every file path listed -- Out of Scope — boundaries to respect -- Discrepancies — any existing entries - -**Gate check.** `/spec-review` works on any spec with implementation to review: - -| Approval | Status | Action | -|----------|--------|--------| -| `user-approved` | `planned` | Proceed (reviewing work done against approved spec) | -| `user-approved` | `partial` or `implemented` | Proceed (re-reviewing) | -| `draft` | any | **Warn**: "This spec is `draft`. Requirements may not be validated. Consider running `/spec-refine` first. Proceed anyway?" | - -Unlike `/spec-build` which hard-blocks on `draft` (because it's about to write code), `/spec-review` is read-heavy — reviewing existing code against a draft spec is still useful, even if the spec itself isn't finalized. - -**Read every Key File.** Read all files listed in the spec's `## Key Files` section. These are the files the spec author identified as implementing the feature. Understanding them is prerequisite to the audit. - ---- - -### Step 2: Requirement Coverage Audit - -Walk every FR-* and NFR-* requirement from the spec. Use the Spec Implementation Review Checklist at `spec-build/references/review-checklist.md` sections 4A and 4D as the authoritative guide. - -**For each FR-* requirement:** - -1. Identify the file(s) and function(s) that implement it -2. Verify the implementation matches the EARS-format requirement text -3. Confirm the requirement is fully addressed (not partially) -4. Note if the requirement was met through a different approach than planned - -**For each NFR-* requirement:** - -1. Identify how the non-functional requirement is enforced -2. Verify measurable NFRs have been tested or measured -3. Confirm the NFR is met under expected conditions, not just ideal conditions - -**Cross-checks:** - -- Every FR-* has corresponding code — no requirements were skipped -- Every NFR-* has corresponding enforcement — no hand-waving -- No code was written that doesn't map to a requirement (scope creep check) -- Out of Scope items from the spec were NOT implemented - ---- - -### Step 3: Acceptance Criteria Verification - -For each acceptance criterion, locate or write the corresponding test. Use the Spec Implementation Review Checklist at `spec-build/references/review-checklist.md` sections 4B and 4C as the authoritative guide. - -**For each criterion:** - -1. Locate the corresponding test (unit, integration, or manual verification) -2. If no test exists: **write one** — verification requires evidence, "no test exists" is not a valid review outcome -3. Run the test -4. If test passes → upgrade marker to `[x]` in the spec -5. If test fails → note the failure, set marker to `[ ]`, document the issue - -**Summary checks:** - -- Count total criteria vs. verified `[x]` — report the ratio -- Flag any criteria still `[ ]` (not started or regressed) -- Flag any criteria that cannot be tested — document why and note as discrepancy -- Verify tests actually test the criterion, not just exercise the code path - -**Code quality spot-check** per checklist section 4C: - -- Error handling at appropriate boundaries -- No hardcoded values that should be configurable -- Functions short and single-purpose -- Nesting depth within limits -- No regressions in existing tests - ---- - -### Step 4: Report & Spec Updates - -#### Summary Report - -Present a structured report: - -``` -## Spec Implementation Review - -**Spec:** [feature name] ([spec file path]) -**Date:** YYYY-MM-DD -**Reviewer:** /spec-review - -### Requirement Coverage -- Functional: N/M addressed -- Non-Functional: N/M addressed -- Gaps: [list or "None"] - -### Acceptance Criteria -- [x] Verified: N -- [~] Implemented, pending: N -- [ ] Not started / regressed: N -- Failures: [list or "None"] - -### Code Quality -- Issues found: [list or "None"] -- Regressions: [list or "None"] - -### Spec Consistency -- Key Files updates needed: [list or "None"] -- Discrepancies: [list or "None"] - -### Recommendation -[ ] All clear — run `/spec-update` to close the loop -[ ] Fix issues first: [specific list] -[ ] Requires user input: [specific questions] -``` - -#### Spec Edits - -Apply limited edits to the spec file: - -1. **Acceptance criteria markers** — update based on test results: - - Passed tests: upgrade `[ ]` or `[~]` → `[x]` - - Failed tests: downgrade `[x]` → `[ ]` (regression), keep `[ ]` or `[~]` as-is -2. **Discrepancies** — add entries for any gaps found between spec and implementation -3. **Key Files** — update paths if files moved, were deleted, or new files were created -4. **Last Updated** — set to today's date - -Do NOT modify `**Status:**` or `## Implementation Notes` — those are `/spec-update`'s responsibility. - -#### Next Action - -Based on the review outcome, recommend: - -- **All clear**: "Run `/spec-update` to close the loop and mark the spec as implemented." -- **Issues found**: "Fix the issues listed above, then re-run `/spec-review` to verify." -- **User input needed**: Present specific questions and wait for direction. - ---- - -## Ambiguity Policy - -- If `$ARGUMENTS` matches multiple specs, list them and ask which to review. -- If a spec has no acceptance criteria, warn and offer to review requirements only. -- If Key Files reference paths that don't exist, flag them as stale in the report and update the spec's Key Files section. -- If the spec has no requirements section, warn that there's nothing to audit against and suggest running `/spec-new` or `/spec-update` to add requirements. -- If all criteria are already `[x]`, still run the full review — regressions happen. - ---- - -## Anti-Patterns - -- **Skipping test verification**: Marking criteria as `[x]` without running actual tests. Every `[x]` must be backed by a passing test or confirmed behavior. -- **Reviewing without reading code**: The review must read the implementation files, not just check metadata. That's what `/spec-check` is for. -- **Modifying implementation**: `/spec-review` is a review, not a fix. Report issues; don't fix them. The user decides what to do next. -- **Changing spec status**: `/spec-review` records findings. `/spec-update` changes status. Respect the boundary. diff --git a/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/skills/spec-update/SKILL.md b/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/skills/spec-update/SKILL.md deleted file mode 100644 index 884a254..0000000 --- a/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/skills/spec-update/SKILL.md +++ /dev/null @@ -1,151 +0,0 @@ ---- -name: spec-update -description: >- - Performs the as-built spec update after implementation, closing the loop - between what was planned and what was built by setting status, checking - off acceptance criteria, and adding implementation notes. USE WHEN the - user asks to "update the spec", "mark spec as implemented", "as-built - update", "finish the spec", "close the spec", "update spec status", - "sync spec with code", or works with post-implementation documentation. - DO NOT USE for verifying code against a spec (use spec-review first) - or for creating new specs (use spec-new). -version: 0.2.0 -argument-hint: "[spec-path]" ---- - -# As-Built Spec Update - -## Mental Model - -Specs that say "planned" after code ships cause the next AI session to re-plan already-done work. The as-built update is the final step of every implementation — it closes the loop between what was planned and what was built. - -This is not optional. Every implementation ends with a spec update. - ---- - -## Approval Gate - -Before performing an as-built update, check the spec's `**Approval:**` status: -- If `user-approved` → proceed with the update -- If `draft` → warn the user: "This spec is still `draft`. It should have gone through `/spec-refine` before implementation. Run `/spec-refine` now to validate, or proceed with the as-built update if the user confirms." - -This is a warning, not a blocker — the user decides whether to refine first or update as-is. - -For manually-implemented features (not using `/spec-build`), consider running `/spec-review` first to verify implementation adherence before updating the spec. - ---- - -## The 6-Step Workflow - -### Step 1: Find the Spec - -``` -Glob: .specs/**/*.md -``` - -Search for the feature name in spec file names and content. If the user provides a spec path or feature name as `$ARGUMENTS`, use that directly. - -If no spec exists: -- For substantial changes: create one using `/spec-new` -- For trivial changes (bug fixes, config): note "spec not needed" and stop - -### Step 2: Set Status - -Update the `**Status:**` field: -- `implemented` — all acceptance criteria are met -- `partial` — some criteria met, work ongoing or deferred - -Never leave status as `planned` after implementation work has been done. - -### Step 3: Check Off Acceptance Criteria - -Review each acceptance criterion in the spec: -- Mark as `[x]` if the criterion is met and verified (tests pass, behavior confirmed) -- Leave as `[ ]` if not yet implemented -- Add a note next to deferred criteria explaining why -- If a criterion is marked `[~]` (implemented but not yet verified from a `/spec-build` run), treat it as `[ ]` — verify it now and upgrade to `[x]` if confirmed, or leave as `[ ]` if unverifiable - -If criteria were met through different means than originally planned, note the deviation. - -### Step 4: Add Implementation Notes - -In the `## Implementation Notes` section, document: -- **Deviations from the original spec** — what changed and why -- **Key design decisions made during implementation** — choices that weren't in the spec -- **Surprising findings** — edge cases discovered, performance characteristics, limitations -- **Trade-offs accepted** — what was sacrificed and why - -Keep notes concise. Reference file paths, not code. - -### Step 5: Update File Paths - -In the `## Key Files` section: -- Add files that were created during implementation -- Remove files that no longer exist -- Update paths that moved - -Verify paths exist before listing them. Use absolute project-relative paths. - -### Step 6: Update Metadata - -- Set `**Last Updated:**` to today's date (YYYY-MM-DD) -- Verify `**Domain:**` is correct -- Preserve the `**Approval:**` status — do NOT downgrade `user-approved` to `draft` -- If the as-built update introduces new decisions not in the original spec, add them to `## Resolved Questions` if the user confirmed them, or `## Open Questions` if they were assumed during implementation - ---- - -## Handling Edge Cases - -### Spec Already "Implemented" - -If the spec is already marked `implemented` and new changes affect the feature: -1. Check if acceptance criteria still hold -2. Update Implementation Notes with the new changes -3. Add any new Discrepancies between spec and current code -4. Update Last Updated date - -### No Spec Exists - -If there is no spec for the feature: -1. Ask: is this a substantial feature or a minor fix? -2. For substantial features: create one with `/spec-new`, then update it -3. For minor fixes: no spec needed — report this and stop - -### Spec Has Unresolved Discrepancies - -If the `## Discrepancies` section has open items: -1. Check if the current implementation resolves any of them -2. Remove resolved discrepancies -3. Add any new discrepancies discovered - ---- - -## Validation Checklist - -Before finishing the update: -- [ ] Status reflects the actual implementation state -- [ ] All implemented acceptance criteria are checked off -- [ ] Implementation Notes document deviations from original spec -- [ ] File paths in Key Files are accurate and verified -- [ ] Last Updated date is today -- [ ] `**Domain:**` is correct for the spec's location -- [ ] `**Approval:**` status is preserved (not downgraded) -- [ ] New implementation decisions are tracked in Resolved Questions or Open Questions -- [ ] If the spec has grown past ~200 lines, note it and suggest splitting into separate specs in the domain folder -- [ ] If `**Approval:**` is still `draft`, user was warned and confirmed proceeding -- [ ] No source code was pasted inline (references only) - ---- - -## Ambiguity Policy - -- If unclear which spec to update, list all candidates and ask the user. -- If the implementation deviated significantly from the spec, document it - honestly in Implementation Notes — do not retroactively change the original - requirements to match what was built. -- If acceptance criteria are ambiguous about whether they're met, note the - ambiguity in Discrepancies rather than checking them off optimistically. -- A spec-reminder advisory hook fires at Stop when code was modified but - specs weren't updated. If you see "[Spec Reminder]" in context, that's - the trigger — use this skill to resolve it. diff --git a/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/skills/specification-writing/SKILL.md b/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/skills/specification-writing/SKILL.md deleted file mode 100644 index 39625e4..0000000 --- a/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/skills/specification-writing/SKILL.md +++ /dev/null @@ -1,327 +0,0 @@ ---- -name: specification-writing -description: >- - Teaches EARS requirement formats, Given/When/Then acceptance criteria, - and structured specification patterns for feature definitions. USE WHEN - the user asks to "write requirements", "use EARS format", "define - acceptance criteria", "write Given/When/Then scenarios", "create a - feature spec", "structure requirements", "write user stories", or works - with Gherkin syntax, FR/NFR numbering, and completeness checklists. - DO NOT USE for managing the spec lifecycle (create, refine, build, - review, update) — use the dedicated spec-* skills instead. -version: 0.2.0 ---- - -# Specification Writing - -## Mental Model - -Specifications are **contracts between humans** -- between the person requesting a feature and the person building it. The goal is to eliminate ambiguity so that both parties agree on what "done" means before work begins. - -A specification is not prose. It's a structured document with testable claims. Every requirement should be verifiable: can you write a test (automated or manual) that proves the requirement is met? If you can't test it, it's not a requirement -- it's a wish. - -The most common source of project failure is not bad code but bad specifications. Specifically: -- **Missing edge cases** -- "What happens when the list is empty?" -- **Ambiguous language** -- "The system should respond quickly" (how quickly?) -- **Implicit assumptions** -- "Users will authenticate" (how? OAuth? Password? SSO?) -- **Missing error cases** -- "The system saves the file" (what if the disk is full?) - -Write specifications with a hostile reader in mind -- someone who will interpret every ambiguity in the worst possible way. If a requirement can be misunderstood, it will be. - ---- - -## Spec Sizing Guidelines - -Specifications are loaded into AI context windows with limited capacity. Design for consumption. - -**Recommended target:** ~200 lines per spec file. When a spec grows beyond that, consider splitting into sub-specs (one per sub-feature) with a concise overview linking them. Complex features may justify longer specs — completeness matters more than hitting a number. - -**Reference, don't reproduce:** Never inline source code, SQL DDL, Pydantic models, or TypeScript interfaces. Reference the file path and line range instead. The code is the source of truth — duplicated snippets go stale silently. - -**Structure for independent loading:** Each spec file must be useful on its own. Include: version, status, last-updated date, intent, key file paths, and acceptance criteria in every spec. - ---- - -## EARS Requirement Formats - -EARS (Easy Approach to Requirements Syntax) provides five templates that eliminate the most common ambiguities in natural-language requirements. Each template has a specific trigger pattern. - -### Ubiquitous - -Requirements that are always active, with no trigger condition: - -``` -The shall . -``` - -**Example:** The API shall return responses in JSON format. - -### Event-Driven - -Requirements triggered by a specific event: - -``` -When , the shall . -``` - -**Example:** When a user submits a login form with invalid credentials, the system shall display an error message and increment the failed login counter. - -### State-Driven - -Requirements that apply while the system is in a specific state: - -``` -While , the shall . -``` - -**Example:** While the system is in maintenance mode, the API shall return HTTP 503 for all non-health-check endpoints. - -### Unwanted Behavior - -Requirements for handling error conditions and edge cases: - -``` -If , then the shall . -``` - -**Example:** If the database connection pool is exhausted, then the system shall queue incoming requests for up to 30 seconds before returning HTTP 503. - -### Optional Feature - -Requirements that depend on a configurable feature: - -``` -Where , the shall . -``` - -**Example:** Where two-factor authentication is enabled, the system shall require a TOTP code after successful password verification. - -> **Deep dive:** See `references/ears-templates.md` for EARS format templates with filled examples for each pattern type. - ---- - -## Acceptance Criteria Patterns - -Acceptance criteria define when a requirement is satisfied. Use these patterns to write criteria that are directly testable. - -### Given/When/Then (Gherkin) - -The most structured pattern. Each scenario is a test case: - -```gherkin -Feature: User Login - - Scenario: Successful login with valid credentials - Given a registered user with email "alice@example.com" - And the user has a verified account - When the user submits the login form with correct credentials - Then the system returns a 200 response with an auth token - And the auth token expires in 24 hours - - Scenario: Failed login with invalid password - Given a registered user with email "alice@example.com" - When the user submits the login form with an incorrect password - Then the system returns a 401 response - And the failed login attempt is logged - And the response does not reveal whether the email exists -``` - -**When to use:** Complex workflows with multiple actors, preconditions, or state transitions. Best for user-facing features. - -### Checklist - -A flat list of verifiable statements. Simpler than Gherkin but less precise: - -```markdown -## Acceptance Criteria: Password Reset - -- [ ] User receives reset email within 60 seconds of request -- [ ] Reset link expires after 1 hour -- [ ] Reset link is single-use (invalidated after first use) -- [ ] Password must meet strength requirements (min 12 chars, 1 uppercase, 1 number) -- [ ] All existing sessions are invalidated after password change -- [ ] User receives confirmation email after successful reset -``` - -**When to use:** Simpler features where the preconditions are obvious and each criterion is independent. - -### Table-Driven - -For requirements with multiple input/output combinations: - -```markdown -## Discount Rules - -| Customer Type | Order Total | Discount | Notes | -|---------------|-------------|----------|-------| -| Standard | < $50 | 0% | | -| Standard | >= $50 | 5% | | -| Premium | < $50 | 5% | Minimum premium discount | -| Premium | >= $50 | 10% | | -| Premium | >= $200 | 15% | Max discount cap | -| Employee | any | 25% | Requires valid employee ID | -``` - -**When to use:** Business rules with multiple conditions and outcomes. The table format makes gaps and overlaps visible. - -> **Deep dive:** See `references/criteria-patterns.md` for acceptance criteria examples across different domains. - ---- - -## Specification Structure - -A complete specification follows this structure. Not every section is needed for every feature -- scale the document to the complexity. - -Every spec file starts with metadata: - -``` -# Feature: [Name] -**Domain:** [domain-name] -**Status:** implemented | partial | planned -**Last Updated:** YYYY-MM-DD -**Approval:** draft | user-approved -``` - -Status tells you whether to trust it, version tells you where it belongs, last-updated tells you when it was last verified. Approval tells you whether decisions in the spec have been explicitly validated by the user (`user-approved`) or are AI-generated hypotheses (`draft`). - -### 1. Problem Statement -What problem does this feature solve? Who has this problem? What's the cost of not solving it? (2-3 sentences) - -### 2. Scope -What's in scope and what's explicitly out of scope? Out-of-scope items prevent scope creep. - -```markdown -## Scope - -**In scope:** -- User-initiated password reset via email -- Password strength validation -- Session invalidation on reset - -**Out of scope:** -- Admin-initiated password reset (separate spec) -- Password expiration policies -- Account recovery without email access -``` - -### 3. User Stories -Who are the actors and what do they want to achieve? - -```markdown -As a [registered user], I want to [reset my password via email] -so that [I can regain access to my account when I forget my password]. - -As a [security admin], I want to [see password reset audit logs] -so that [I can detect suspicious reset patterns]. -``` - -### 4. Functional Requirements -Use EARS format. Number each requirement for traceability: - -```markdown -- FR-1 [assumed]: When a user requests a password reset, the system shall send a reset email - to the registered email address within 60 seconds. -- FR-2 [assumed]: The reset link shall contain a cryptographically random token (min 32 bytes). -- FR-3 [assumed]: If the reset token is expired or already used, then the system shall display - an error message and offer to send a new reset email. - -Tag each requirement `[assumed]` when first written. Requirements become `[user-approved]` only after explicit user validation via `/spec-refine`. -``` - -### 5. Non-Functional Requirements -Performance, security, scalability, accessibility: - -```markdown -- NFR-1 [assumed]: The password reset endpoint shall respond within 200ms (p95). -- NFR-2 [assumed]: Reset tokens shall be stored as bcrypt hashes, not plaintext. -- NFR-3 [assumed]: The reset flow shall be accessible with screen readers (WCAG 2.1 AA). -``` - -### 6. Edge Cases -The cases nobody thinks about until they happen: - -```markdown -- What if the user requests multiple resets before using any link? - → Only the most recent token is valid; previous tokens are invalidated. -- What if the email is associated with multiple accounts? - → Send separate reset links for each account. -- What if the user's email provider is down? - → The system logs the failure and retries up to 3 times over 5 minutes. -``` - -### 7. Out of Scope -Explicit non-goals to prevent scope creep (can reference the Scope section or expand here). - -### 8. Resolved Questions -Decisions explicitly approved by the user via `/spec-refine`. Each entry: decision topic, chosen option, options considered, date, brief rationale. This section starts empty and is populated during the refinement process. - -### 9. Key Files -Source files most relevant to this feature — paths an implementer should read. - -### 10. Implementation Notes -Post-implementation only. Capture deviations from the original spec — what changed and why. - -### 11. Discrepancies -Gaps between spec intent and actual build. Prevents the next session from re-planning decided work. - ---- - -## Completeness Checklist - -Before marking a specification as ready for implementation, verify: - -**Happy path:** -- [ ] Primary use case described with acceptance criteria -- [ ] All actors identified (user, admin, system, external service) -- [ ] Success response/outcome defined - -**Error cases:** -- [ ] Invalid input handled (empty, too long, wrong type, malicious) -- [ ] External service failures handled (timeout, 500, unavailable) -- [ ] Concurrent access conflicts addressed -- [ ] Rate limiting defined for public-facing endpoints - -**Boundary conditions:** -- [ ] Empty collections (zero items) -- [ ] Maximum limits defined (max file size, max items, max length) -- [ ] Pagination for unbounded lists -- [ ] Time zones and date boundaries - -**Performance:** -- [ ] Response time targets (p50, p95, p99) -- [ ] Throughput requirements (requests per second) -- [ ] Data volume expectations (rows, storage size) - -**Security:** -- [ ] Authentication required? Which methods? -- [ ] Authorization rules per role -- [ ] Data sensitivity classification -- [ ] Audit logging requirements - -**Accessibility:** -- [ ] WCAG compliance level specified -- [ ] Keyboard navigation requirements -- [ ] Screen reader compatibility - ---- - -## Ambiguity Policy - -These defaults apply when the user does not specify a preference. State the assumption when making a choice: - -- **Format:** Default to EARS format for requirements and Given/When/Then for acceptance criteria. Use checklists for simple features with obvious preconditions. -- **Detail level:** Default to enough detail that a developer unfamiliar with the codebase could implement the feature without asking clarifying questions. -- **Non-functional requirements:** Always include response time targets (default: 200ms p95 for API endpoints, 3s for page loads) and note when these are assumptions. -- **Edge cases:** Always include at least: empty input, maximum input, concurrent access, and external service failure. -- **Out of scope:** Always include an out-of-scope section, even if brief, to establish boundaries. -- **Numbering:** Number all requirements (FR-1, NFR-1) for traceability in code reviews and tests. -- **Approval markers:** All requirements start as `[assumed]`. Only `/spec-refine` with explicit user validation upgrades them to `[user-approved]`. Spec-level `**Approval:**` starts as `draft` and becomes `user-approved` only when all requirements are `[user-approved]`. - ---- - -## Reference Files - -| File | Contents | -|------|----------| -| `references/ears-templates.md` | EARS format templates with filled examples for each pattern type, including compound requirements and requirement hierarchies | -| `references/criteria-patterns.md` | Acceptance criteria examples organized by domain: authentication, payments, file upload, search, notifications, and data import | diff --git a/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/skills/specification-writing/references/criteria-patterns.md b/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/skills/specification-writing/references/criteria-patterns.md deleted file mode 100644 index 1be3344..0000000 --- a/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/skills/specification-writing/references/criteria-patterns.md +++ /dev/null @@ -1,245 +0,0 @@ -# Acceptance Criteria Patterns by Domain - -Examples of acceptance criteria organized by common feature domains. - -## Contents - -- [Authentication](#authentication) -- [Payments](#payments) -- [File Upload](#file-upload) -- [Search](#search) -- [Notifications](#notifications) -- [Data Import](#data-import) -- [Cross-Domain Edge Cases](#cross-domain-edge-cases) - ---- - -## Authentication - -### Login Flow - -```gherkin -Feature: User Login - - Scenario: Successful login with email and password - Given a verified user with email "alice@example.com" - When the user submits the login form with correct credentials - Then the system returns a 200 response with an auth token - And the token expires in 24 hours - And a "login_success" event is logged with the user ID - - Scenario: Login with unverified email - Given a user with email "bob@example.com" who has not verified their email - When the user submits the login form with correct credentials - Then the system returns a 403 response - And the response body contains "Please verify your email address" - And a new verification email is sent - - Scenario: Login with invalid credentials - Given no user exists with email "unknown@example.com" - When the user submits the login form with email "unknown@example.com" - Then the system returns a 401 response - And the response body contains "Invalid email or password" - And the response time is similar to a valid-email failure (timing-safe) - - Scenario: Account lockout after repeated failures - Given a user with email "alice@example.com" - When the user submits 5 incorrect passwords within 10 minutes - Then the account is locked for 15 minutes - And subsequent login attempts return "Account temporarily locked" - And a security alert email is sent to the user -``` - -### Password Reset - -**Checklist format:** - -- [ ] Reset email sent within 60 seconds of request -- [ ] Reset token is a cryptographically random 32-byte value -- [ ] Token expires after 1 hour -- [ ] Token is single-use (invalidated after first use) -- [ ] Using an expired or used token shows "This link has expired" with option to request a new one -- [ ] New password must meet strength requirements -- [ ] All existing sessions invalidated after successful reset -- [ ] Reset request for non-existent email returns success (no enumeration) -- [ ] Rate limited to 3 reset requests per email per hour - ---- - -## Payments - -### Checkout Flow - -```gherkin -Feature: Order Checkout - - Scenario: Successful payment - Given a cart with items totaling $49.99 - And the user has a valid payment method on file - When the user confirms the checkout - Then the payment is authorized for $49.99 - And the order status changes to "confirmed" - And a confirmation email is sent with the order details - And inventory is decremented for each item - - Scenario: Payment declined - Given a cart with items totaling $49.99 - When the payment gateway returns "card_declined" - Then the order status remains "pending" - And the user sees "Your card was declined. Please try another payment method." - And inventory is NOT decremented - And no confirmation email is sent - - Scenario: Payment gateway timeout - Given a cart with items totaling $49.99 - When the payment gateway does not respond within 10 seconds - Then the system retries once after 3 seconds - And if the retry also fails, shows "Payment processing is delayed" - And the order enters "payment_pending" status - And a background job checks payment status every 60 seconds for 30 minutes -``` - -### Discount Rules - -**Table-driven format:** - -| Customer Type | Order Total | Coupon | Expected Discount | Final Price | -|---------------|-------------|--------|-------------------|-------------| -| Standard | $30.00 | None | 0% | $30.00 | -| Standard | $30.00 | SAVE10 | 10% | $27.00 | -| Premium | $30.00 | None | 5% | $28.50 | -| Premium | $30.00 | SAVE10 | 10% (higher wins) | $27.00 | -| Premium | $100.00 | SAVE10 | 15% (premium tier) | $85.00 | -| Any | $0.00 | SAVE10 | 0% | $0.00 | -| Standard | $30.00 | EXPIRED| 0% + error shown | $30.00 | - ---- - -## File Upload - -### Image Upload - -```gherkin -Feature: Profile Image Upload - - Scenario: Successful image upload - Given the user is on the profile settings page - When the user uploads a valid JPEG image under 5MB - Then the image is resized to 256x256 pixels - And the image is stored in the CDN - And the user's profile displays the new image within 5 seconds - - Scenario: File too large - When the user uploads an image larger than 5MB - Then the upload is rejected before the file is fully transferred - And the error message reads "Image must be under 5MB. Your file is [X]MB." - - Scenario: Invalid file type - When the user uploads a .exe file renamed to .jpg - Then the system validates the file's MIME type (not just extension) - And rejects the upload with "Supported formats: JPEG, PNG, WebP" - - Scenario: Concurrent uploads - When the user uploads two images simultaneously - Then only the last uploaded image is saved as the profile picture - And both uploads complete without errors -``` - ---- - -## Search - -### Full-Text Search - -**Checklist format:** - -- [ ] Empty search query returns validation error, not all results -- [ ] Search results appear within 500ms for queries across 1M documents -- [ ] Results are ranked by relevance (BM25 or equivalent) -- [ ] Search highlights matching terms in results with `` tags -- [ ] Queries with no results show "No results found" with spelling suggestions -- [ ] Special characters in queries are escaped (no injection) -- [ ] Results are paginated with 20 items per page -- [ ] Search query is preserved in the URL for shareability -- [ ] Minimum query length: 2 characters -- [ ] Maximum query length: 200 characters - ---- - -## Notifications - -### Email Notifications - -```gherkin -Feature: Notification Preferences - - Scenario: User opts out of marketing emails - Given a user subscribed to all notification types - When the user unchecks "Marketing updates" in notification preferences - Then marketing emails stop within 24 hours - And transactional emails (receipts, password resets) continue normally - And the preference change is logged for compliance - - Scenario: Notification delivery failure - Given a notification is queued for delivery - When the email provider returns a 5xx error - Then the system retries after 1 minute, 5 minutes, and 30 minutes - And after 3 failures, marks the notification as "failed" - And does NOT send further retries for this notification - And the failure is recorded in the admin dashboard -``` - ---- - -## Data Import - -### CSV Import - -```gherkin -Feature: User Data Import - - Scenario: Valid CSV import - Given an admin uploads a CSV with 500 valid user records - When the import is processed - Then all 500 users are created with correct field mapping - And the admin sees a summary: "500 created, 0 skipped, 0 errors" - And each user receives a welcome email - - Scenario: CSV with validation errors - Given a CSV where row 3 has an invalid email and row 7 has a duplicate email - When the import is processed - Then valid rows (498) are imported successfully - And invalid rows are skipped with error details: - | Row | Field | Error | - | 3 | email | "not.valid" is not a valid email format | - | 7 | email | "alice@example.com" already exists | - And the admin can download an error report CSV - - Scenario: Large file import - Given a CSV with 100,000 records - When the import is initiated - Then the import runs asynchronously (not blocking the UI) - And the admin sees a progress indicator - And the import completes within 5 minutes - And the system sends an email when import finishes -``` - ---- - -## Cross-Domain Edge Cases - -These edge cases apply to most features and should be checked: - -```markdown -## Universal Edge Cases - -- [ ] Empty input: What happens when required fields are blank? -- [ ] Maximum length: What happens at the field's max length? At max + 1? -- [ ] Unicode: Does the feature handle emoji, CJK characters, RTL text? -- [ ] Concurrent access: What if two users edit the same resource simultaneously? -- [ ] Network interruption: What if connectivity drops mid-operation? -- [ ] Timezone: Do date-dependent features work correctly across timezones? -- [ ] Pagination boundary: What happens when viewing the last page as items are deleted? -- [ ] Authorization: Can the feature be accessed without authentication? With wrong role? -- [ ] Idempotency: What happens if the same request is sent twice? -``` diff --git a/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/skills/specification-writing/references/ears-templates.md b/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/skills/specification-writing/references/ears-templates.md deleted file mode 100644 index af049be..0000000 --- a/.devcontainer/plugins/devs-marketplace/plugins/spec-workflow/skills/specification-writing/references/ears-templates.md +++ /dev/null @@ -1,239 +0,0 @@ -# EARS Requirement Templates - -Templates and filled examples for each EARS (Easy Approach to Requirements Syntax) pattern type. - -## Contents - -- [Ubiquitous Requirements](#ubiquitous-requirements) -- [Event-Driven Requirements](#event-driven-requirements) -- [State-Driven Requirements](#state-driven-requirements) -- [Unwanted Behavior Requirements](#unwanted-behavior-requirements) -- [Optional Feature Requirements](#optional-feature-requirements) -- [Compound Requirements](#compound-requirements) -- [Writing Tips](#writing-tips) - ---- - -## Ubiquitous Requirements - -**Template:** -``` -The shall . -``` - -Requirements that are always active, with no trigger condition. These define invariant behaviors. - -### Examples - -``` -The API shall return responses in JSON format with UTF-8 encoding. - -The system shall log all authentication events with timestamp, user ID, and outcome. - -The application shall enforce HTTPS for all client-server communication. - -The database shall store timestamps in UTC. - -The API shall include a request-id header in every response. -``` - -### Anti-patterns - -``` -❌ The system should be fast. - → Not testable. How fast? Measured how? - -❌ The system shall be user-friendly. - → Not testable. Define specific interaction requirements. - -✅ The API shall respond to health check requests within 50ms (p99). -✅ The login form shall support keyboard navigation (tab order: email → password → submit). -``` - ---- - -## Event-Driven Requirements - -**Template:** -``` -When , the shall . -``` - -Requirements triggered by a specific, detectable event. The event is the precondition. - -### Examples - -``` -When a user submits a registration form, the system shall validate all fields -and return validation errors within 200ms. - -When a payment transaction fails, the system shall: - 1. Log the failure with transaction ID, error code, and timestamp. - 2. Send a failure notification to the user within 60 seconds. - 3. Release the reserved inventory. - -When a file upload exceeds 50MB, the system shall reject the upload with -HTTP 413 and a message indicating the maximum file size. - -When a user's session has been inactive for 30 minutes, the system shall -invalidate the session and redirect to the login page. - -When the system receives a webhook event with an unrecognized event type, -the system shall log the event payload and return HTTP 200 (acknowledge but ignore). -``` - -### Anti-patterns - -``` -❌ When the user does something wrong, show an error. - → What action? What error? How displayed? - -✅ When the user submits a form with an invalid email format, - the system shall display an inline error message below the email field - stating "Please enter a valid email address". -``` - ---- - -## State-Driven Requirements - -**Template:** -``` -While , the shall . -``` - -Requirements that apply continuously while the system is in a specific state. - -### Examples - -``` -While the system is in maintenance mode, the API shall return HTTP 503 -with a "Retry-After" header for all endpoints except /health. - -While a user account is locked, the system shall reject all login attempts -and display a message with the unlock time. - -While the message queue depth exceeds 10,000 messages, the system shall -activate the secondary consumer group. - -While the database is performing a backup, the system shall serve read -requests from the read replica and queue write requests. - -While the system is operating in degraded mode, the dashboard shall display -a banner indicating limited functionality and estimated recovery time. -``` - ---- - -## Unwanted Behavior Requirements - -**Template:** -``` -If , then the shall . -``` - -Requirements for handling errors, failures, and edge cases. These cover what happens when things go wrong. - -### Examples - -``` -If the external payment gateway does not respond within 5 seconds, -then the system shall retry once after 2 seconds, and if the retry -also fails, return a "payment processing delayed" message to the user. - -If the database connection pool is exhausted, then the system shall -queue incoming requests for up to 30 seconds before returning HTTP 503. - -If a user attempts to access a resource they do not own, then the system -shall return HTTP 403, log the access attempt with the user ID and resource ID, -and increment the security audit counter. - -If the uploaded file contains an unsupported MIME type, then the system shall -reject the file with a message listing the supported types. - -If the disk usage exceeds 90%, then the system shall send an alert to the -operations team and begin purging temporary files older than 24 hours. -``` - ---- - -## Optional Feature Requirements - -**Template:** -``` -Where , the shall . -``` - -Requirements that depend on a configurable feature flag or setting. - -### Examples - -``` -Where two-factor authentication is enabled, the system shall require -a TOTP code after successful password verification. - -Where the audit log feature is enabled, the system shall record all -CRUD operations with the actor, action, resource, and timestamp. - -Where dark mode is enabled, the system shall render all pages using -the dark color palette defined in the theme configuration. - -Where rate limiting is configured, the system shall enforce the configured -request limit per API key per minute and return HTTP 429 when exceeded. - -Where email notifications are enabled for a user, the system shall send -a daily digest of unread notifications at the user's configured time. -``` - ---- - -## Compound Requirements - -Complex requirements often combine multiple EARS patterns: - -### Event + Unwanted Behavior - -``` -When a user submits a password reset request: - - The system shall send a reset email within 60 seconds. - - If the email address is not associated with an account, then the system - shall still return a success message (to prevent email enumeration). - - If the email service is unavailable, then the system shall queue the - email for retry and inform the user that the email may be delayed. -``` - -### State + Event - -``` -While the system is in read-only mode: - - When a user attempts a write operation, the system shall return HTTP 503 - with a message indicating when write access will be restored. - - When an admin issues a "restore write access" command, the system shall - exit read-only mode and process any queued write operations in order. -``` - -### Requirement Hierarchies - -For complex features, use parent-child numbering: - -``` -FR-1: User Registration - FR-1.1: When a user submits the registration form, the system shall - create an account and send a verification email. - FR-1.2: If the email is already registered, then the system shall - display "An account with this email already exists". - FR-1.3: The system shall require passwords of at least 12 characters - with at least one uppercase letter and one digit. - FR-1.4: Where CAPTCHA is enabled, the registration form shall include - a CAPTCHA challenge before submission. -``` - ---- - -## Writing Tips - -1. **One requirement per statement.** Don't combine multiple behaviors in one sentence. -2. **Use "shall" for requirements, "should" for recommendations, "may" for optional.** This is standard requirement language (RFC 2119). -3. **Be specific about quantities.** Not "quickly" but "within 200ms". Not "many" but "up to 1000". -4. **Name the actor.** "The system shall..." or "The user shall..." -- never the passive "It should be done". -5. **State the observable behavior.** Requirements describe what the system does, not how it does it internally. diff --git a/.devcontainer/plugins/devs-marketplace/plugins/workspace-scope-guard/scripts/inject-workspace-cwd.py b/.devcontainer/plugins/devs-marketplace/plugins/workspace-scope-guard/scripts/inject-workspace-cwd.py deleted file mode 100644 index 64a3efb..0000000 --- a/.devcontainer/plugins/devs-marketplace/plugins/workspace-scope-guard/scripts/inject-workspace-cwd.py +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/env python3 -""" -CWD context injector — injects working directory into Claude's context -on every session start, user prompt, tool call, and subagent spawn. - -Fires on: SessionStart, UserPromptSubmit, PreToolUse, SubagentStart -Always exits 0 (advisory, never blocking). -""" - -import json -import os -import sys - - -def main(): - cwd = os.getcwd() - try: - input_data = json.load(sys.stdin) - # Some hook events provide cwd override - cwd = input_data.get("cwd", cwd) - hook_event = input_data.get("hook_event_name", "PreToolUse") - except (json.JSONDecodeError, ValueError): - hook_event = "PreToolUse" - - context = ( - f"Working Directory: {cwd}\n" - f"All file operations and commands MUST target paths within {cwd}. " - f"Do not read, write, or execute commands against paths outside this directory." - ) - - json.dump( - { - "hookSpecificOutput": { - "hookEventName": hook_event, - "additionalContext": context, - } - }, - sys.stdout, - ) - sys.exit(0) - - -if __name__ == "__main__": - main() diff --git a/.github/dependabot.yml b/.github/dependabot.yml index cef2c79..c9d1903 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -1,7 +1,12 @@ version: 2 updates: - package-ecosystem: "npm" - directory: "/" + directory: "/container" + schedule: + interval: "weekly" + + - package-ecosystem: "npm" + directory: "/cli" schedule: interval: "weekly" diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 60d01a8..d682e8d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -3,8 +3,10 @@ name: CI on: push: branches: [main, staging] + paths: ['container/**', 'cli/**'] pull_request: branches: [main, staging] + paths: ['container/**', 'cli/**'] jobs: test: @@ -15,6 +17,7 @@ jobs: with: node-version: 18 - run: npm test + working-directory: container lint: runs-on: ubuntu-latest @@ -24,6 +27,7 @@ jobs: with: node-version: 18 - run: npx @biomejs/biome check setup.js test.js + working-directory: container test-plugins: runs-on: ubuntu-latest @@ -34,3 +38,17 @@ jobs: python-version: "3.x" - run: pip install pytest - run: pytest tests/ -v + working-directory: container + + test-cli: + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest, windows-latest, macos-latest] + steps: + - uses: actions/checkout@v6 + - uses: oven-sh/setup-bun@v2 + - run: bun install + working-directory: cli + - run: bun test + working-directory: cli diff --git a/.github/workflows/deploy-docs.yml b/.github/workflows/deploy-docs.yml index da53372..26ed88d 100644 --- a/.github/workflows/deploy-docs.yml +++ b/.github/workflows/deploy-docs.yml @@ -3,7 +3,7 @@ name: Deploy Docs on: push: branches: [main] - paths: ['docs/**', '.devcontainer/CHANGELOG.md'] + paths: ['docs/**', 'container/.devcontainer/CHANGELOG.md'] workflow_dispatch: permissions: diff --git a/.github/workflows/docs-ci.yml b/.github/workflows/docs-ci.yml index eedb48c..ed50f71 100644 --- a/.github/workflows/docs-ci.yml +++ b/.github/workflows/docs-ci.yml @@ -2,7 +2,7 @@ name: Docs CI on: pull_request: - paths: ['docs/**', '.devcontainer/CHANGELOG.md'] + paths: ['docs/**', 'container/.devcontainer/CHANGELOG.md'] jobs: build: diff --git a/.github/workflows/publish-features.yml b/.github/workflows/publish-features.yml index b63eec5..694db62 100644 --- a/.github/workflows/publish-features.yml +++ b/.github/workflows/publish-features.yml @@ -4,7 +4,7 @@ on: workflow_dispatch: push: branches: [main] - paths: ['.devcontainer/features/**'] + paths: ['container/.devcontainer/features/**'] jobs: publish: @@ -19,7 +19,7 @@ jobs: uses: devcontainers/action@v1 with: publish-features: "true" - base-path-to-features: "./.devcontainer/features" + base-path-to-features: "./container/.devcontainer/features" generate-docs: "false" env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/release-cli.yml b/.github/workflows/release-cli.yml new file mode 100644 index 0000000..0d36c09 --- /dev/null +++ b/.github/workflows/release-cli.yml @@ -0,0 +1,71 @@ +name: Release CLI + +on: + push: + tags: ['cli-v*'] + +jobs: + validate: + runs-on: ubuntu-latest + outputs: + version: ${{ steps.extract.outputs.version }} + steps: + - uses: actions/checkout@v6 + - id: extract + name: Extract and validate version + run: | + TAG="${GITHUB_REF#refs/tags/cli-v}" + PKG=$(node -p "require('./cli/package.json').version") + echo "version=$TAG" >> "$GITHUB_OUTPUT" + if [ "$TAG" != "$PKG" ]; then + echo "::error::Tag cli-v${TAG} does not match cli/package.json version ${PKG}" + exit 1 + fi + + publish-and-release: + needs: validate + runs-on: ubuntu-latest + permissions: + contents: write + steps: + - uses: actions/checkout@v6 + + - uses: oven-sh/setup-bun@v2 + + - name: Install dependencies + run: bun install + working-directory: cli + + - name: Run tests + run: bun test + working-directory: cli + + - name: Build + run: bun run build + working-directory: cli + + - uses: actions/setup-node@v6 + with: + node-version: 18 + registry-url: https://registry.npmjs.org + + - name: Publish to npm + run: npm publish + working-directory: cli + env: + NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} + + - name: Extract changelog section + id: changelog + run: | + VERSION="${{ needs.validate.outputs.version }}" + NOTES=$(sed -n "/^## v${VERSION}/,/^## v/{ /^## v${VERSION}/d; /^## v/d; p; }" cli/CHANGELOG.md) + [ -z "$NOTES" ] && NOTES="CLI Release v${VERSION}" + echo "$NOTES" > /tmp/release-notes.md + + - name: Create GitHub Release + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + VERSION="cli-v${{ needs.validate.outputs.version }}" + gh release create "$VERSION" --title "$VERSION" --notes-file /tmp/release-notes.md diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 2b376f3..d6e07cb 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -16,7 +16,7 @@ jobs: name: Extract and validate version run: | TAG="${GITHUB_REF#refs/tags/v}" - PKG=$(node -p "require('./package.json').version") + PKG=$(node -p "require('./container/package.json').version") echo "version=$TAG" >> "$GITHUB_OUTPUT" if [ "$TAG" != "$PKG" ]; then echo "::error::Tag v${TAG} does not match package.json version ${PKG}" @@ -38,9 +38,11 @@ jobs: - name: Run tests run: npm test + working-directory: container - name: Publish to npm run: npm publish + working-directory: container env: NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} @@ -48,7 +50,7 @@ jobs: id: changelog run: | VERSION="${{ needs.validate.outputs.version }}" - NOTES=$(sed -n "/^## \[v${VERSION}\]/,/^## \[v/{ /^## \[v${VERSION}\]/d; /^## \[v/d; p; }" .devcontainer/CHANGELOG.md) + NOTES=$(sed -n "/^## \[v${VERSION}\]/,/^## \[v/{ /^## \[v${VERSION}\]/d; /^## \[v/d; p; }" container/.devcontainer/CHANGELOG.md) if [ -z "$NOTES" ]; then NOTES="Release v${VERSION}" fi diff --git a/.gitignore b/.gitignore index 14980e1..dd19880 100644 --- a/.gitignore +++ b/.gitignore @@ -1,84 +1,65 @@ -# CodeForge Git Ignore - -# Environment files with API keys +# Shared +node_modules/ +dist/ .env -.env.local -.env.development.local -.env.test.local -.env.production.local - -# DevContainer build cache -.devcontainer/.dockercache/ - -# IDE files -.vscode/settings.json -*.swp -*.swo -*~ - -# Temporary files -*.tmp -*.temp +.env.* +!.env.example +!.env.*.example +*.log .DS_Store Thumbs.db - -# Logs -*.log -npm-debug.log* -yarn-debug.log* -yarn-error.log* - -# Runtime data -pids -*.pid -*.seed -*.pid.lock - -# Coverage directory used by tools like istanbul +__pycache__/ +*.pyc coverage/ *.lcov - -# nyc test coverage .nyc_output -logs -dev-debug.log -# Python bytecode -__pycache__/ -*.pyc +# Temporary files +*.tmp +*.temp +*.swp +*.swo +*~ -# Dependency directories -node_modules/ -# Environment variables -# Editor directories and files -.idea -.vscode +# IDE +.vscode/ +.idea/ *.suo *.ntvs* *.njsproj *.sln -*.sw? -# OS specific -# Claude Code directory (user-specific) -.claude/ +# Runtime +pids/ +*.pid +*.seed +*.pid.lock -# All hidden directories except devcontainer and codeforge -.* -!.devcontainer/ -!.devcontainer/**/.claude-plugin/ -!.codeforge/ -!.git/ -!.github/ -!.gitignore -!.gitattributes -!.npmignore +# Container-specific +container/.claude/ +container/.tmp/ +container/.codeforge/.checksums/ +container/.codeforge/.markers/ +container/.devcontainer/.env +container/.devcontainer/.secrets +container/.devcontainer/state/ +container/.devcontainer/config/backups/ +container/.devcontainer/.dockercache/ +container/.devcontainer/**/*.codeforge-new +container/.devcontainer/**/*.bak +container/.devcontainer/.codeforge-preserve -# .codeforge per-installation state (not tracked) -.codeforge/.checksums/ -.codeforge/.markers/ +# CLI-specific +cli/.pytest_cache/ +cli/.ruff_cache/ +.codeforge/data/ -# Docs -docs/node_modules/ -docs/dist/ +# Docs-specific docs/.astro/ + +# OS-generated +logs/ +dev-debug.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* diff --git a/CLAUDE.md b/CLAUDE.md index 8e7a882..02a2f69 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -1,8 +1,14 @@ -# CodeForge +# CodeForge Monorepo -DevContainer configuration project for AI-assisted development with Claude Code. +This repository contains three packages. Each package manages its own dependencies independently. -See `.devcontainer/CLAUDE.md` for full devcontainer documentation. +## Packages + +| Package | Runtime | Package Manager | +|---------|---------|-----------------| +| `container/` | Node.js | npm | +| `cli/` | Bun | bun | +| `docs/` | Node.js | npm | ## Development Rules @@ -13,25 +19,23 @@ See `.devcontainer/CLAUDE.md` for full devcontainer documentation. - Feature and fix branches should be created from `staging` and PRed back to `staging`. - PRs from `staging` to `main` are used for releases. -### Changelog +### Package-Specific Rules -Every change MUST have a corresponding entry in `.devcontainer/CHANGELOG.md`. +Each package has its own `CLAUDE.md` with package-specific development rules: -- New features, enhancements, fixes, and removals each get their own bullet -- Group related changes under domain headings (`###`) by area (e.g., `### Security`, `### Agent System`, `### Documentation`, `### Configuration`) -- If an unreleased version section doesn't exist, add changes to the current version's section -- Write entries from the user's perspective — what changed, not how it was implemented +- [`container/CLAUDE.md`](container/CLAUDE.md) — changelog, documentation, and configuration rules for the devcontainer package +- `cli/` — Bun/TypeScript CLI; run `bun test` for tests +- `docs/` — Astro/Starlight site; run `npm run build` to verify -### Documentation +### Cross-Package Changes -All user-facing changes MUST be reflected in documentation: +When a change spans multiple packages, make the changes in a single branch and PR. +Group related changes in the commit message by package. -- **Plugin changes** → update the plugin's `README.md` -- **Feature changes** → update `features/README.md` and the feature's `devcontainer-feature.json` if applicable -- **Config system changes** → update `.devcontainer/CLAUDE.md` -- **New config files in `.codeforge/config/`** → add entry to `.codeforge/file-manifest.json` -- **Docs site** → update relevant pages in `docs/` if the docs site exists +### Testing -### User Configuration +Run tests for each affected package before committing: -All user-customizable configuration files belong in `.codeforge/`. New config files go in `.codeforge/config/`, with a corresponding entry in `.codeforge/file-manifest.json`. +- **Container**: `cd container && npm test` +- **CLI**: `cd cli && bun test` +- **Docs**: `cd docs && npm run build` diff --git a/README.md b/README.md index 11bfeb1..5b7f244 100644 --- a/README.md +++ b/README.md @@ -1,146 +1,47 @@ -# CodeForge DevContainer +# CodeForge -[![License: GPL-3.0](https://img.shields.io/badge/License-GPL%203.0-blue.svg)](https://www.gnu.org/licenses/gpl-3.0) -[![npm version](https://img.shields.io/npm/v/codeforge-dev.svg)](https://www.npmjs.com/package/codeforge-dev) -[![Changelog](https://img.shields.io/badge/changelog-view-blue)](.devcontainer/CHANGELOG.md) -[![GitHub last commit](https://img.shields.io/github/last-commit/AnExiledDev/CodeForge)](https://github.com/AnExiledDev/CodeForge/commits) -[![npm downloads](https://img.shields.io/npm/dm/codeforge-dev)](https://www.npmjs.com/package/codeforge-dev) -[![Node.js](https://img.shields.io/badge/node-%3E%3D14.0.0-brightgreen)](https://nodejs.org/) -[![GitHub issues](https://img.shields.io/github/issues/AnExiledDev/CodeForge)](https://github.com/AnExiledDev/CodeForge/issues) -[![CI](https://github.com/AnExiledDev/CodeForge/actions/workflows/ci.yml/badge.svg)](https://github.com/AnExiledDev/CodeForge/actions/workflows/ci.yml) +Monorepo for CodeForge — an AI-powered development environment for Claude Code. -A curated development environment optimized for AI-powered coding with Claude Code. CodeForge comes pre-configured with language servers, code intelligence tools, and official Anthropic plugins to streamline your development workflow. +## Packages -## Why CodeForge? +| Package | Description | Version | +|---------|-------------|---------| +| [`container/`](container/) | CodeForge DevContainer (`codeforge-dev` on npm) | 2.0.0 | +| [`cli/`](cli/) | CodeForge CLI (`codeforge-cli`) | 0.1.0 | +| [`docs/`](docs/) | Documentation site ([codeforge.core-directive.com](https://codeforge.core-directive.com)) | — | -Claude Code is powerful out of the box, but getting the most from it takes significant configuration — custom agents, safety plugins, code quality hooks, system prompts, and development tools that aren't obvious from the docs. CodeForge is a Claude Code power user's personal development environment, packaged so anyone can use it. - -Instead of spending hours discovering and configuring advanced features like built-in agent replacement, automated code quality pipelines, or spec-driven workflows, you get a production-tested setup in one command. It's opinionated by design — every default reflects real daily use, not theoretical best practices. - -## Installation - -Add CodeForge to any project: +## Quick Start ```bash +# Install the devcontainer into any project npx codeforge-dev ``` -This copies the `.devcontainer/` directory to your project. Then open in VS Code and select "Reopen in Container". +See [`container/README.md`](container/README.md) for full setup instructions, prerequisites, and usage. -### Options - -```bash -npx codeforge-dev --force # Smart update (preserves your customizations) -npx codeforge-dev -f # Short form -npx codeforge-dev --reset # Fresh install (wipes .devcontainer, keeps .codeforge) -``` +## Development -### Alternative Install Methods +Each package manages its own dependencies independently: ```bash -# Install globally -npm install -g codeforge-dev -codeforge-dev - -# Run specific version -npx codeforge-dev@1.2.3 -``` - -## Prerequisites - -- **Docker Desktop** (or compatible container runtime like Podman) -- **A DevContainer client** — any of: - - **VS Code** with the [Dev Containers extension](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) - - **DevContainer CLI** — `npm install -g @devcontainers/cli` ([docs](https://containers.dev/supporting#devcontainer-cli)) - - **GitHub Codespaces** — zero local setup - - **JetBrains Gateway** with [Dev Containers plugin](https://plugins.jetbrains.com/plugin/21962-dev-containers) - - **DevPod** — open-source, editor-agnostic ([devpod.sh](https://devpod.sh/)) -- **Claude Code authentication** — run `claude` on first start to authenticate - -## What's Included - -### Languages & Runtimes - -Python 3.14, Node.js LTS, TypeScript, Rust, Bun, Go (optional) - -### Package Managers - -`uv`, `npm`, `bun`, `pip` / `pipx` - -### Development Tools - -`gh` (GitHub CLI), `docker`, `git`, `jq`, `curl`, `tmux`, `biome`, `ruff`, `ccms`, `agent-browser` - -### Code Intelligence - -tree-sitter (JS/TS/Python), ast-grep, Pyright, TypeScript LSP - -### Claude Code Tools - -`claude`, `cc` (wrapper), `ccw` (writing mode wrapper), `ccusage`, `ccburn`, `ccstatusline`, `claude-monitor` - -### Custom Features (21) - -tmux, agent-browser, claude-monitor, ccusage, ccburn, ccstatusline, ast-grep, tree-sitter, lsp-servers, biome, ruff, shfmt, shellcheck, hadolint, dprint, ccms, notify-hook, mcp-qdrant, chromaterm, kitty-terminfo, claude-session-dashboard - -### Agents (17) & Skills (35) - -The `agent-system` plugin includes 17 specialized agents (architect, explorer, test-writer, security-auditor, etc.). The `skill-engine` plugin provides 22 general coding skills, `spec-workflow` adds 8 spec lifecycle skills, and `ticket-workflow` provides 4 ticket management skills. +# Container (npm) +cd container && npm test -## Architecture +# CLI (Bun) +cd cli && bun test -CodeForge operates in three layers, each building on the one below: - -``` -┌──────────────────────────────────────────────┐ -│ Claude Code │ -│ AI assistant, tool execution, Agent Teams │ -├──────────────────────────────────────────────┤ -│ CodeForge Layer │ -│ Plugins · Agents · Skills · Hooks · Rules │ -├──────────────────────────────────────────────┤ -│ DevContainer │ -│ Runtimes · CLI Tools · LSP Servers │ -└──────────────────────────────────────────────┘ +# Docs (npm) +cd docs && npm run build ``` -**DevContainer** — The foundation. A Python 3.14 container with Node.js, Rust, and Bun runtimes, plus 22 custom features that install development tools (ast-grep, tree-sitter, biome, ruff, and others). - -**CodeForge Layer** — The intelligence. 17 plugins register hooks that validate commands, inject context, and enforce safety. 21 agents provide specialized personas. 38 skills offer on-demand reference material. System prompts and rules shape behavior. - -**Claude Code** — The AI assistant, executing tools and coordinating work. CodeForge enhances it through configuration — replacing built-in subagents, adding safety guardrails, and wiring up quality checks that run automatically. - -For the full architecture breakdown — hook pipeline, agent routing, skill loading, and design principles — see the [Architecture Reference](https://codeforge.core-directive.com/reference/architecture/). - -## Configuration - -All configuration lives in `.devcontainer/` and deploys automatically on container start. Key files: +See [`CLAUDE.md`](CLAUDE.md) for branching strategy and development rules. -| File | What It Configures | User-Modifiable? | -|------|--------------------|------------------| -| `config/defaults/settings.json` | Model, plugins, permissions, environment variables | Yes | -| `config/defaults/main-system-prompt.md` | Claude's behavioral guidelines and directives | Yes | -| `config/defaults/keybindings.json` | Keyboard shortcuts | Yes | -| `config/defaults/ccstatusline-settings.json` | Terminal status bar widgets and layout | Yes | -| `config/file-manifest.json` | Which config files deploy and how they update | Yes | -| `devcontainer.json` | Container image, features, runtimes, ports | Yes | -| `.env` | Setup phase toggles (auth, plugins, aliases, etc.) | Yes | +## Links -Config files use SHA-256 change detection — your edits persist across container rebuilds unless the source changes. Set a file's overwrite mode to `"never"` in `file-manifest.json` to permanently preserve your customizations. - -For the complete configuration guide, see the [documentation site](https://codeforge.core-directive.com/customization/configuration/). - -## Quick Start - -1. **Install**: `npx codeforge-dev` -2. **Open in Container**: - - **VS Code**: "Reopen in Container" from the Command Palette - - **CLI**: `devcontainer up --workspace-folder .` then `docker exec -it zsh` - - **Codespaces**: Create a Codespace from the repo -3. **Authenticate**: Run `claude` and follow prompts -4. **Start coding**: Run `cc` - -CodeForge uses the open [Dev Containers specification](https://containers.dev/) — any compatible client works. For full usage documentation — authentication, configuration, tools, agents, and keybindings — see [`.devcontainer/README.md`](.devcontainer/README.md). +- [Documentation](https://codeforge.core-directive.com) +- [npm package](https://www.npmjs.com/package/codeforge-dev) +- [GitHub](https://github.com/AnExiledDev/CodeForge) +- [Changelog](container/.devcontainer/CHANGELOG.md) ## Contributing @@ -157,32 +58,3 @@ without GPL-3.0 obligations. Contact [696222+AnExiledDev@users.noreply.github.com](mailto:696222+AnExiledDev@users.noreply.github.com) or [open a GitHub issue](https://github.com/AnExiledDev/CodeForge/issues/new) for terms. - -## Development - -### Testing Locally - -```bash -git clone https://github.com/AnExiledDev/CodeForge.git -cd CodeForge -npm test -``` - -### Publishing - -```bash -# Bump version in package.json, then: -npm publish -``` - -## Changelog - -See [CHANGELOG.md](.devcontainer/CHANGELOG.md) for release history. Current version: **2.0.0**. - -## Further Reading - -- [Full Usage Guide](.devcontainer/README.md) -- [Changelog](.devcontainer/CHANGELOG.md) -- [Claude Code Documentation](https://docs.anthropic.com/en/docs/claude-code) -- [Dev Containers Specification](https://containers.dev/) -- [GitHub CLI Manual](https://cli.github.com/manual/) diff --git a/cli/CHANGELOG.md b/cli/CHANGELOG.md new file mode 100644 index 0000000..0638d91 --- /dev/null +++ b/cli/CHANGELOG.md @@ -0,0 +1,20 @@ +# CodeForge CLI Changelog + +## v0.1.0 — 2026-03-14 (Experimental) + +Initial release. Ships with CodeForge v2.1.1. + +### Command Groups + +- **`codeforge session`** — search, list, and show Claude Code session history +- **`codeforge task`** — search tasks +- **`codeforge plan`** — search plans +- **`codeforge plugin`** — manage plugins (list, show, enable, disable, hooks, agents, skills) +- **`codeforge config`** — show and apply configuration (`apply` deploys config to `~/.claude/`) +- **`codeforge index`** — build and search a codebase symbol index (build, search, show, stats, tree, clean) +- **`codeforge container`** — manage CodeForge devcontainers (up, down, rebuild, exec, ls, shell) + +### Features + +- Container proxy — commands auto-proxy into the running devcontainer when run from the host; use `--local` to bypass +- `--container ` flag to target a specific container diff --git a/cli/bun.lock b/cli/bun.lock new file mode 100644 index 0000000..7884482 --- /dev/null +++ b/cli/bun.lock @@ -0,0 +1,33 @@ +{ + "lockfileVersion": 1, + "configVersion": 1, + "workspaces": { + "": { + "name": "codeforge-cli", + "dependencies": { + "chalk": "^5.4.0", + "commander": "^13.0.0", + }, + "devDependencies": { + "@types/bun": "^1.3.10", + "@types/node": "^22.0.0", + "typescript": "^5.7.0", + }, + }, + }, + "packages": { + "@types/bun": ["@types/bun@1.3.10", "", { "dependencies": { "bun-types": "1.3.10" } }, "sha512-0+rlrUrOrTSskibryHbvQkDOWRJwJZqZlxrUs1u4oOoTln8+WIXBPmAuCF35SWB2z4Zl3E84Nl/D0P7803nigQ=="], + + "@types/node": ["@types/node@22.19.13", "", { "dependencies": { "undici-types": "~6.21.0" } }, "sha512-akNQMv0wW5uyRpD2v2IEyRSZiR+BeGuoB6L310EgGObO44HSMNT8z1xzio28V8qOrgYaopIDNA18YgdXd+qTiw=="], + + "bun-types": ["bun-types@1.3.10", "", { "dependencies": { "@types/node": "*" } }, "sha512-tcpfCCl6XWo6nCVnpcVrxQ+9AYN1iqMIzgrSKYMB/fjLtV2eyAVEg7AxQJuCq/26R6HpKWykQXuSOq/21RYcbg=="], + + "chalk": ["chalk@5.6.2", "", {}, "sha512-7NzBL0rN6fMUW+f7A6Io4h40qQlG+xGmtMxfbnH/K7TAtt8JQWVQK+6g0UXKMeVJoyV5EkkNsErQ8pVD3bLHbA=="], + + "commander": ["commander@13.1.0", "", {}, "sha512-/rFeCpNJQbhSZjGVwO9RFV3xPqbnERS8MmIQzCtD/zl6gpJuV/bMLuN92oG3F7d8oDEHHRrujSXNUr8fpjntKw=="], + + "typescript": ["typescript@5.9.3", "", { "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" } }, "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw=="], + + "undici-types": ["undici-types@6.21.0", "", {}, "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ=="], + } +} diff --git a/cli/package.json b/cli/package.json new file mode 100644 index 0000000..b2628ca --- /dev/null +++ b/cli/package.json @@ -0,0 +1,56 @@ +{ + "name": "codeforge-dev-cli", + "version": "0.1.0", + "description": "CLI for CodeForge development workflows", + "keywords": [ + "codeforge", + "cli", + "code-review", + "developer-tools", + "devcontainer", + "claude" + ], + "type": "module", + "bin": { + "codeforge": "./dist/codeforge.js" + }, + "scripts": { + "build": "bun build src/index.ts --outfile dist/codeforge.js --target bun", + "dev": "bun run src/index.ts", + "test": "bun test", + "build:binary": "bun build --compile src/index.ts --outfile dist/codeforge", + "build:binary:linux": "bun build --compile src/index.ts --outfile dist/codeforge-linux-x64 --target bun-linux-x64", + "build:binary:darwin": "bun build --compile src/index.ts --outfile dist/codeforge-darwin-arm64 --target bun-darwin-arm64", + "build:binary:darwin-x64": "bun build --compile src/index.ts --outfile dist/codeforge-darwin-x64 --target bun-darwin-x64", + "prepublishOnly": "bun run build && bun test" + }, + "dependencies": { + "@devcontainers/cli": "^0.71.0", + "commander": "^13.0.0", + "chalk": "^5.4.0" + }, + "devDependencies": { + "@types/bun": "^1.3.10", + "@types/node": "^22.0.0", + "typescript": "^5.7.0" + }, + "engines": { + "node": ">=18.0.0" + }, + "license": "GPL-3.0", + "author": "AnExiledDev", + "repository": { + "type": "git", + "url": "git+https://github.com/AnExiledDev/CodeForge.git", + "directory": "cli" + }, + "homepage": "https://github.com/AnExiledDev/CodeForge/tree/main/cli#readme", + "files": [ + "dist/", + "prompts/", + "README.md" + ], + "bugs": { + "url": "https://github.com/AnExiledDev/CodeForge/issues" + } +} diff --git a/cli/src/commands/config/apply.ts b/cli/src/commands/config/apply.ts new file mode 100644 index 0000000..a85959c --- /dev/null +++ b/cli/src/commands/config/apply.ts @@ -0,0 +1,147 @@ +import chalk from "chalk"; +import type { Command } from "commander"; +import { copyFileSync, existsSync, mkdirSync, readFileSync } from "fs"; +import { homedir } from "os"; +import { basename, dirname, resolve } from "path"; +import { loadFileManifest } from "../../loaders/config-loader.js"; + +interface ConfigApplyOptions { + dryRun?: boolean; + force?: boolean; + color?: boolean; +} + +function findWorkspaceRoot(): string | null { + let dir = process.cwd(); + + while (true) { + if (existsSync(resolve(dir, ".codeforge"))) { + return dir; + } + const parent = resolve(dir, ".."); + if (parent === dir) return null; + dir = parent; + } +} + +function expandVariables(path: string): string { + return path + .replace(/\$\{CLAUDE_CONFIG_DIR\}/g, resolve(homedir(), ".claude")) + .replace(/\$\{HOME\}/g, homedir()); +} + +function filesAreIdentical(a: string, b: string): boolean { + try { + const contentA = readFileSync(a); + const contentB = readFileSync(b); + return contentA.equals(contentB); + } catch { + return false; + } +} + +export function registerConfigApplyCommand(parent: Command): void { + parent + .command("apply") + .description("Deploy configuration files from workspace to system") + .option("--dry-run", "Show what would happen without writing files") + .option("--force", "Override overwrite strategy and deploy all files") + .option("--no-color", "Disable colored output") + .action(async (options: ConfigApplyOptions) => { + try { + if (!options.color) { + chalk.level = 0; + } + + const workspaceRoot = findWorkspaceRoot(); + if (!workspaceRoot) { + console.error( + "Error: Could not find .codeforge/ directory in any parent", + ); + process.exit(1); + } + + const manifest = await loadFileManifest(workspaceRoot); + if (manifest.length === 0) { + console.log("No files in manifest."); + return; + } + + console.log( + options.dryRun + ? "Dry run — no files will be written:\n" + : "Deploying configuration files...\n", + ); + + let updated = 0; + let unchanged = 0; + let skipped = 0; + + for (const entry of manifest) { + if (entry.enabled === false) { + skipped++; + continue; + } + + const src = resolve(workspaceRoot, ".codeforge", entry.src); + const destDir = expandVariables(entry.dest); + const destFilename = entry.destFilename ?? basename(entry.src); + const dest = resolve(destDir, destFilename); + + const displayDest = dest + .replace(homedir(), "~") + .replace(/\/\//g, "/"); + + const destExists = existsSync(dest); + + if (!options.force) { + if (entry.overwrite === "never" && destExists) { + skipped++; + console.log( + ` ${chalk.yellow("\u2717")} ${entry.src} \u2192 ${displayDest} (skipped, never overwrite)`, + ); + continue; + } + + if ( + entry.overwrite === "if-changed" && + destExists && + filesAreIdentical(src, dest) + ) { + unchanged++; + console.log( + ` ${chalk.dim("\u25CB")} ${entry.src} \u2192 ${displayDest} (unchanged)`, + ); + continue; + } + } + + if (options.dryRun) { + updated++; + console.log( + ` ${chalk.green("\u2713")} ${entry.src} \u2192 ${displayDest} (would update)`, + ); + } else { + mkdirSync(dirname(dest), { recursive: true }); + copyFileSync(src, dest); + updated++; + console.log( + ` ${chalk.green("\u2713")} ${entry.src} \u2192 ${displayDest} (updated)`, + ); + } + } + + const total = updated + unchanged + skipped; + const parts: string[] = []; + if (updated > 0) parts.push(`${updated} updated`); + if (unchanged > 0) parts.push(`${unchanged} unchanged`); + if (skipped > 0) parts.push(`${skipped} skipped`); + + console.log(`\n${total} files processed (${parts.join(", ")})`); + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + console.error(`Error: ${message}`); + process.exit(1); + } + }); +} diff --git a/cli/src/commands/config/show.ts b/cli/src/commands/config/show.ts new file mode 100644 index 0000000..36eea6f --- /dev/null +++ b/cli/src/commands/config/show.ts @@ -0,0 +1,60 @@ +import chalk from "chalk"; +import type { Command } from "commander"; +import { loadSettings } from "../../loaders/config-loader.js"; +import { findSettingsPaths } from "../../loaders/plugin-loader.js"; +import { + formatConfigShowJson, + formatConfigShowText, +} from "../../output/config-show.js"; + +interface ConfigShowOptions { + format: string; + color?: boolean; + source?: boolean; +} + +export function registerConfigShowCommand(parent: Command): void { + parent + .command("show") + .description("Show current Claude Code configuration") + .option("-f, --format ", "Output format: text|json", "text") + .option("--no-color", "Disable colored output") + .option("--source", "Show workspace source copy instead of deployed") + .action(async (options: ConfigShowOptions) => { + try { + if (!options.color) { + chalk.level = 0; + } + + let settingsPath: string; + + if (options.source) { + const paths = findSettingsPaths(); + if (!paths.source) { + console.error("Error: Source settings.json not found"); + process.exit(1); + } + settingsPath = paths.source; + } else { + const paths = findSettingsPaths(); + settingsPath = paths.deployed; + } + + const settings = await loadSettings(settingsPath); + + if (options.format === "json") { + console.log(formatConfigShowJson(settings)); + } else { + console.log( + formatConfigShowText(settings, { + noColor: !options.color, + }), + ); + } + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + console.error(`Error: ${message}`); + process.exit(1); + } + }); +} diff --git a/cli/src/commands/container/down.ts b/cli/src/commands/container/down.ts new file mode 100644 index 0000000..64da092 --- /dev/null +++ b/cli/src/commands/container/down.ts @@ -0,0 +1,31 @@ +import chalk from "chalk"; +import type { Command } from "commander"; +import { isInsideContainer } from "../../utils/context.js"; +import { dockerStop, resolveContainer } from "../../utils/docker.js"; + +export function registerContainerDownCommand(parent: Command): void { + parent + .command("down [name]") + .description("Stop a running CodeForge devcontainer") + .action(async (name?: string) => { + if (isInsideContainer()) { + console.error( + "Already inside a container. This command runs on the host.", + ); + process.exit(1); + } + + try { + const container = await resolveContainer(name); + console.log( + `${chalk.blue("▶")} Stopping container ${container.name}...`, + ); + await dockerStop(container.id); + console.log(`${chalk.green("✓")} Stopped ${container.name}`); + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + console.error(`${chalk.red("✗")} ${message}`); + process.exit(1); + } + }); +} diff --git a/cli/src/commands/container/exec.ts b/cli/src/commands/container/exec.ts new file mode 100644 index 0000000..ae8ab66 --- /dev/null +++ b/cli/src/commands/container/exec.ts @@ -0,0 +1,42 @@ +import chalk from "chalk"; +import type { Command } from "commander"; +import { isInsideContainer } from "../../utils/context.js"; +import { dockerExec, resolveContainer } from "../../utils/docker.js"; + +export function registerContainerExecCommand(parent: Command): void { + parent + .command("exec [name]") + .description("Execute a command inside a running devcontainer") + .allowUnknownOption(true) + .allowExcessArguments(true) + .action( + async ( + name: string | undefined, + _options: unknown, + _command: Command, + ) => { + if (isInsideContainer()) { + console.error("Already inside a container."); + process.exit(1); + } + + const dashDashIndex = process.argv.indexOf("--"); + if (dashDashIndex === -1 || dashDashIndex === process.argv.length - 1) { + console.error( + "Usage: codeforge container exec [name] -- ", + ); + process.exit(1); + } + const cmd = process.argv.slice(dashDashIndex + 1); + + try { + const container = await resolveContainer(name); + await dockerExec(container.id, cmd); + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + console.error(`${chalk.red("✗")} ${message}`); + process.exit(1); + } + }, + ); +} diff --git a/cli/src/commands/container/ls.ts b/cli/src/commands/container/ls.ts new file mode 100644 index 0000000..3cf4a09 --- /dev/null +++ b/cli/src/commands/container/ls.ts @@ -0,0 +1,51 @@ +import chalk from "chalk"; +import type { Command } from "commander"; +import { basename } from "path"; +import { isDockerAvailable, listDevcontainers } from "../../utils/docker.js"; + +export function registerContainerLsCommand(parent: Command): void { + parent + .command("ls") + .description("List running CodeForge devcontainers") + .action(async () => { + if (!isDockerAvailable()) { + console.error( + "Docker is not available. Install Docker Desktop to manage containers.", + ); + process.exit(1); + } + + try { + const containers = await listDevcontainers(); + if (containers.length === 0) { + console.log("No running CodeForge devcontainers found."); + return; + } + + console.log( + chalk.bold("NAME".padEnd(25)) + + chalk.bold("STATUS".padEnd(15)) + + chalk.bold("WORKSPACE".padEnd(40)) + + chalk.bold("PORTS"), + ); + console.log("─".repeat(90)); + + for (const c of containers) { + const name = basename(c.workspacePath); + const statusColor = c.status.includes("Up") + ? chalk.green + : chalk.yellow; + console.log( + name.padEnd(25) + + statusColor(c.status.padEnd(15)) + + c.workspacePath.padEnd(40) + + (c.ports || "—"), + ); + } + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + console.error(`${chalk.red("✗")} ${message}`); + process.exit(1); + } + }); +} diff --git a/cli/src/commands/container/rebuild.ts b/cli/src/commands/container/rebuild.ts new file mode 100644 index 0000000..fa24ccb --- /dev/null +++ b/cli/src/commands/container/rebuild.ts @@ -0,0 +1,44 @@ +import chalk from "chalk"; +import type { Command } from "commander"; +import { isInsideContainer } from "../../utils/context.js"; +import { + devcontainerRebuild, + findWorkspacePath, +} from "../../utils/devcontainer.js"; + +export function registerContainerRebuildCommand(parent: Command): void { + parent + .command("rebuild [workspace-path]") + .description("Rebuild a CodeForge devcontainer") + .action(async (workspacePath?: string) => { + if (isInsideContainer()) { + console.error( + "Already inside a container. This command runs on the host.", + ); + process.exit(1); + } + + const resolved = workspacePath || findWorkspacePath(); + if (!resolved) { + console.error( + "Could not find a .devcontainer/devcontainer.json in the current directory tree.", + ); + console.error( + "Provide a workspace path: codeforge container rebuild ", + ); + process.exit(1); + } + + try { + console.log( + `${chalk.blue("▶")} Rebuilding devcontainer at ${resolved}...`, + ); + await devcontainerRebuild(resolved); + console.log(`${chalk.green("✓")} Devcontainer rebuilt`); + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + console.error(`${chalk.red("✗")} Failed to rebuild: ${message}`); + process.exit(1); + } + }); +} diff --git a/cli/src/commands/container/shell.ts b/cli/src/commands/container/shell.ts new file mode 100644 index 0000000..3111ac7 --- /dev/null +++ b/cli/src/commands/container/shell.ts @@ -0,0 +1,29 @@ +import chalk from "chalk"; +import type { Command } from "commander"; +import { isInsideContainer } from "../../utils/context.js"; +import { dockerExec, resolveContainer } from "../../utils/docker.js"; + +export function registerContainerShellCommand(parent: Command): void { + parent + .command("shell [name]") + .description("Open an interactive shell in a running devcontainer") + .action(async (name?: string) => { + if (isInsideContainer()) { + console.error("Already inside a container."); + process.exit(1); + } + + try { + const container = await resolveContainer(name); + try { + await dockerExec(container.id, ["/bin/zsh"], { interactive: true }); + } catch { + await dockerExec(container.id, ["/bin/bash"], { interactive: true }); + } + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + console.error(`${chalk.red("✗")} ${message}`); + process.exit(1); + } + }); +} diff --git a/cli/src/commands/container/up.ts b/cli/src/commands/container/up.ts new file mode 100644 index 0000000..762825e --- /dev/null +++ b/cli/src/commands/container/up.ts @@ -0,0 +1,41 @@ +import chalk from "chalk"; +import type { Command } from "commander"; +import { isInsideContainer } from "../../utils/context.js"; +import { devcontainerUp, findWorkspacePath } from "../../utils/devcontainer.js"; + +export function registerContainerUpCommand(parent: Command): void { + parent + .command("up [workspace-path]") + .description("Start a CodeForge devcontainer") + .action(async (workspacePath?: string) => { + if (isInsideContainer()) { + console.error( + "Already inside a container. This command runs on the host.", + ); + process.exit(1); + } + + const resolved = workspacePath || findWorkspacePath(); + if (!resolved) { + console.error( + "Could not find a .devcontainer/devcontainer.json in the current directory tree.", + ); + console.error( + "Provide a workspace path: codeforge container up ", + ); + process.exit(1); + } + + try { + console.log( + `${chalk.blue("▶")} Starting devcontainer at ${resolved}...`, + ); + await devcontainerUp(resolved); + console.log(`${chalk.green("✓")} Devcontainer started`); + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + console.error(`${chalk.red("✗")} Failed to start: ${message}`); + process.exit(1); + } + }); +} diff --git a/cli/src/commands/index/build.ts b/cli/src/commands/index/build.ts new file mode 100644 index 0000000..161d7f8 --- /dev/null +++ b/cli/src/commands/index/build.ts @@ -0,0 +1,171 @@ +import chalk from "chalk"; +import type { Command } from "commander"; +import { existsSync, mkdirSync } from "fs"; +import { relative, resolve } from "path"; +import { + closeDatabase, + deleteFileAndSymbols, + insertFiles, + insertSymbols, + openDatabase, + rebuildFts, + upsertFolders, +} from "../../indexer/db.js"; +import { checkSgInstalled, extractSymbols } from "../../indexer/extractor.js"; +import { extractFolderDocs } from "../../indexer/folders.js"; +import { + collectDirectories, + getLanguageForExtension, + hashFileContent, + scanDirectory, +} from "../../indexer/scanner.js"; +import { formatBuildJson } from "../../output/index-json.js"; +import { formatBuildSummary } from "../../output/index-text.js"; +import type { IndexedFile } from "../../schemas/index.js"; + +interface BuildCommandOptions { + format: string; + color?: boolean; +} + +function findWorkspaceRoot(): string | null { + let dir = process.cwd(); + while (true) { + if (existsSync(resolve(dir, ".codeforge"))) return dir; + const parent = resolve(dir, ".."); + if (parent === dir) return null; + dir = parent; + } +} + +export function registerIndexBuildCommand(parent: Command): void { + parent + .command("build") + .description("Build or incrementally update the codebase symbol index") + .argument("[path]", "Target directory (defaults to workspace root)") + .option("-f, --format ", "Output format: text|json", "text") + .option("--no-color", "Disable colored output") + .action(async (path: string | undefined, options: BuildCommandOptions) => { + try { + if (!options.color) chalk.level = 0; + + const start = Date.now(); + const workspaceRoot = findWorkspaceRoot(); + if (!workspaceRoot) { + console.error( + "Error: No .codeforge directory found. Are you in a CodeForge workspace?", + ); + process.exit(1); + } + + const targetPath = path ? resolve(process.cwd(), path) : workspaceRoot; + const dataDir = resolve(workspaceRoot, ".codeforge", "data"); + mkdirSync(dataDir, { recursive: true }); + + const dbPath = resolve(dataDir, "code-index.db"); + + console.error(chalk.dim("Checking ast-grep installation...")); + const sgInstalled = await checkSgInstalled(); + if (!sgInstalled) { + console.error( + "Error: ast-grep (sg) is not installed. Install it with: npm i -g @ast-grep/cli", + ); + process.exit(1); + } + + console.error(chalk.dim("Scanning files...")); + const db = openDatabase(dbPath); + const scanned = await scanDirectory(targetPath, db, workspaceRoot); + + const filesToProcess = [...scanned.newFiles, ...scanned.changedFiles]; + let totalSymbols = 0; + + if (filesToProcess.length > 0) { + // Group files by language + const byLang = new Map(); + for (const relPath of filesToProcess) { + const ext = "." + (relPath.split(".").pop() ?? ""); + const lang = getLanguageForExtension(ext); + if (lang) { + const group = byLang.get(lang) ?? []; + group.push(relPath); + byLang.set(lang, group); + } + } + + // Delete old data for changed + deleted files + for (const file of [ + ...scanned.changedFiles, + ...scanned.deletedFiles, + ]) { + deleteFileAndSymbols(db, file); + } + + // Insert file records first (symbols have FK to files) + const fileRecords: IndexedFile[] = []; + for (const relPath of filesToProcess) { + const absPath = resolve(workspaceRoot, relPath); + const ext = "." + (relPath.split(".").pop() ?? ""); + const lang = getLanguageForExtension(ext) ?? "unknown"; + const hash = await hashFileContent(absPath); + const content = await Bun.file(absPath).text(); + const lineCount = content.split("\n").length; + const size = Buffer.byteLength(content, "utf-8"); + fileRecords.push({ + path: relPath, + hash, + size, + language: lang, + lineCount, + lastIndexed: new Date() + .toISOString() + .replace("T", " ") + .substring(0, 19), + }); + } + insertFiles(db, fileRecords); + + console.error(chalk.dim("Extracting symbols...")); + for (const [lang, relPaths] of byLang) { + const absPaths = relPaths.map((r) => resolve(workspaceRoot, r)); + const symbols = await extractSymbols(absPaths, lang); + if (symbols.length > 0) { + const remapped = symbols.map((s: (typeof symbols)[number]) => ({ + ...s, + filePath: relative(workspaceRoot, s.filePath), + })); + insertSymbols(db, remapped); + totalSymbols += symbols.length; + } + } + } else { + // Still handle deletions + for (const file of scanned.deletedFiles) { + deleteFileAndSymbols(db, file); + } + } + + console.error(chalk.dim("Updating folder index...")); + const directories = await collectDirectories(targetPath, workspaceRoot); + const folderDocs = await extractFolderDocs(directories, workspaceRoot); + upsertFolders(db, folderDocs); + + console.error(chalk.dim("Rebuilding search index...")); + rebuildFts(db); + closeDatabase(db); + + const durationMs = Date.now() - start; + const buildResult = { scanned, symbolCount: totalSymbols, durationMs }; + + if (options.format === "json") { + console.log(formatBuildJson(buildResult)); + } else { + console.log(formatBuildSummary(buildResult)); + } + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + console.error(`Error: ${message}`); + process.exit(1); + } + }); +} diff --git a/cli/src/commands/index/clean.ts b/cli/src/commands/index/clean.ts new file mode 100644 index 0000000..b270f20 --- /dev/null +++ b/cli/src/commands/index/clean.ts @@ -0,0 +1,58 @@ +import chalk from "chalk"; +import type { Command } from "commander"; +import { existsSync, unlinkSync } from "fs"; +import { resolve } from "path"; + +function findWorkspaceRoot(): string | null { + let dir = process.cwd(); + while (true) { + if (existsSync(resolve(dir, ".codeforge"))) return dir; + const parent = resolve(dir, ".."); + if (parent === dir) return null; + dir = parent; + } +} + +export function registerIndexCleanCommand(parent: Command): void { + parent + .command("clean") + .description("Remove the codebase index database") + .option("--no-color", "Disable colored output") + .action(async (options: { color?: boolean }) => { + try { + if (!options.color) chalk.level = 0; + + const workspaceRoot = findWorkspaceRoot(); + if (!workspaceRoot) { + console.error( + "Error: No .codeforge directory found. Are you in a CodeForge workspace?", + ); + process.exit(1); + } + + const dbPath = resolve( + workspaceRoot, + ".codeforge", + "data", + "code-index.db", + ); + if (!existsSync(dbPath)) { + console.log("No index database found."); + return; + } + + // Remove main DB and WAL/SHM files + unlinkSync(dbPath); + const walPath = dbPath + "-wal"; + const shmPath = dbPath + "-shm"; + if (existsSync(walPath)) unlinkSync(walPath); + if (existsSync(shmPath)) unlinkSync(shmPath); + + console.log("Index database removed."); + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + console.error(`Error: ${message}`); + process.exit(1); + } + }); +} diff --git a/cli/src/commands/index/search.ts b/cli/src/commands/index/search.ts new file mode 100644 index 0000000..58436af --- /dev/null +++ b/cli/src/commands/index/search.ts @@ -0,0 +1,204 @@ +import chalk from "chalk"; +import type { Command } from "commander"; +import { existsSync, mkdirSync } from "fs"; +import { relative, resolve } from "path"; +import { createInterface } from "readline"; +import { + closeDatabase, + deleteFileAndSymbols, + insertFiles, + insertSymbols, + openDatabase, + rebuildFts, + searchSymbols, + upsertFolders, +} from "../../indexer/db.js"; +import { checkSgInstalled, extractSymbols } from "../../indexer/extractor.js"; +import { extractFolderDocs } from "../../indexer/folders.js"; +import { + collectDirectories, + getLanguageForExtension, + hashFileContent, + scanDirectory, +} from "../../indexer/scanner.js"; +import { formatSearchJson } from "../../output/index-json.js"; +import { + formatBuildSummary, + formatSearchText, +} from "../../output/index-text.js"; +import type { + IndexedFile, + SearchHit, + SymbolKind, +} from "../../schemas/index.js"; + +interface SearchCommandOptions { + format: string; + color?: boolean; + limit: string; + kind?: string; +} + +function findWorkspaceRoot(): string | null { + let dir = process.cwd(); + while (true) { + if (existsSync(resolve(dir, ".codeforge"))) return dir; + const parent = resolve(dir, ".."); + if (parent === dir) return null; + dir = parent; + } +} + +async function autoBuild(workspaceRoot: string, dbPath: string): Promise { + const dataDir = resolve(workspaceRoot, ".codeforge", "data"); + mkdirSync(dataDir, { recursive: true }); + + const sgInstalled = await checkSgInstalled(); + if (!sgInstalled) { + console.error( + "Error: ast-grep (sg) is not installed. Install it with: npm i -g @ast-grep/cli", + ); + process.exit(1); + } + + console.error(chalk.dim("Building index...")); + const start = Date.now(); + const db = openDatabase(dbPath); + const scanned = await scanDirectory(workspaceRoot, db); + + const filesToProcess = [...scanned.newFiles, ...scanned.changedFiles]; + let totalSymbols = 0; + + for (const file of [...scanned.changedFiles, ...scanned.deletedFiles]) { + deleteFileAndSymbols(db, file); + } + + // Insert file records first (symbols have FK to files) + const fileRecords: IndexedFile[] = []; + for (const relPath of filesToProcess) { + const absPath = resolve(workspaceRoot, relPath); + const ext = "." + (relPath.split(".").pop() ?? ""); + const lang = getLanguageForExtension(ext) ?? "unknown"; + const hash = await hashFileContent(absPath); + const content = await Bun.file(absPath).text(); + const lineCount = content.split("\n").length; + const size = Buffer.byteLength(content, "utf-8"); + fileRecords.push({ + path: relPath, + hash, + size, + language: lang, + lineCount, + lastIndexed: new Date().toISOString().replace("T", " ").substring(0, 19), + }); + } + insertFiles(db, fileRecords); + + // Group by language and extract symbols + const byLang = new Map(); + for (const relPath of filesToProcess) { + const ext = "." + (relPath.split(".").pop() ?? ""); + const lang = getLanguageForExtension(ext); + if (lang) { + const group = byLang.get(lang) ?? []; + group.push(relPath); + byLang.set(lang, group); + } + } + + for (const [lang, relPaths] of byLang) { + const absPaths = relPaths.map((r) => resolve(workspaceRoot, r)); + const symbols = await extractSymbols(absPaths, lang); + if (symbols.length > 0) { + const remapped = symbols.map((s: (typeof symbols)[number]) => ({ + ...s, + filePath: relative(workspaceRoot, s.filePath), + })); + insertSymbols(db, remapped); + totalSymbols += symbols.length; + } + } + + const directories = await collectDirectories(workspaceRoot); + const folderDocs = await extractFolderDocs(directories, workspaceRoot); + upsertFolders(db, folderDocs); + rebuildFts(db); + closeDatabase(db); + + const durationMs = Date.now() - start; + console.error( + formatBuildSummary({ scanned, symbolCount: totalSymbols, durationMs }), + ); +} + +export function registerIndexSearchCommand(parent: Command): void { + parent + .command("search") + .description("Search for symbols in the codebase index") + .argument("", "Search query (FTS5 syntax)") + .option("-f, --format ", "Output format: text|json", "text") + .option("--no-color", "Disable colored output") + .option("-n, --limit ", "Maximum number of results", "50") + .option("-k, --kind ", "Filter by symbol kind") + .action(async (query: string, options: SearchCommandOptions) => { + try { + if (!options.color) chalk.level = 0; + + const workspaceRoot = findWorkspaceRoot(); + if (!workspaceRoot) { + console.error( + "Error: No .codeforge directory found. Are you in a CodeForge workspace?", + ); + process.exit(1); + } + + const dbPath = resolve( + workspaceRoot, + ".codeforge", + "data", + "code-index.db", + ); + + if (!existsSync(dbPath)) { + const rl = createInterface({ + input: process.stdin, + output: process.stderr, + }); + const answer = await new Promise((resolve) => + rl.question("No index found. Build one now? (y/n) ", resolve), + ); + rl.close(); + + if (answer.toLowerCase() === "y") { + await autoBuild(workspaceRoot, dbPath); + } else { + console.error( + "Run `codeforge index build` to create an index first.", + ); + process.exit(0); + } + } + + const db = openDatabase(dbPath); + const limit = parseInt(options.limit, 10); + let hits: SearchHit[] = searchSymbols(db, query, limit); + + if (options.kind) { + const kind = options.kind as SymbolKind; + hits = hits.filter((h) => h.symbol.kind === kind); + } + + if (options.format === "json") { + console.log(formatSearchJson(hits)); + } else { + console.log(formatSearchText(hits, { noColor: !options.color })); + } + + closeDatabase(db); + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + console.error(`Error: ${message}`); + process.exit(1); + } + }); +} diff --git a/cli/src/commands/index/show.ts b/cli/src/commands/index/show.ts new file mode 100644 index 0000000..4dcffd9 --- /dev/null +++ b/cli/src/commands/index/show.ts @@ -0,0 +1,82 @@ +import chalk from "chalk"; +import type { Command } from "commander"; +import { existsSync } from "fs"; +import { relative, resolve } from "path"; +import { + closeDatabase, + getFileSymbols, + openDatabase, +} from "../../indexer/db.js"; +import { formatShowJson } from "../../output/index-json.js"; +import { formatShowText } from "../../output/index-text.js"; + +interface ShowCommandOptions { + format: string; + color?: boolean; +} + +function findWorkspaceRoot(): string | null { + let dir = process.cwd(); + while (true) { + if (existsSync(resolve(dir, ".codeforge"))) return dir; + const parent = resolve(dir, ".."); + if (parent === dir) return null; + dir = parent; + } +} + +export function registerIndexShowCommand(parent: Command): void { + parent + .command("show") + .description("Show all symbols in a specific file") + .argument("", "File path to inspect") + .option("-f, --format ", "Output format: text|json", "text") + .option("--no-color", "Disable colored output") + .action(async (file: string, options: ShowCommandOptions) => { + try { + if (!options.color) chalk.level = 0; + + const workspaceRoot = findWorkspaceRoot(); + if (!workspaceRoot) { + console.error( + "Error: No .codeforge directory found. Are you in a CodeForge workspace?", + ); + process.exit(1); + } + + const dbPath = resolve( + workspaceRoot, + ".codeforge", + "data", + "code-index.db", + ); + if (!existsSync(dbPath)) { + console.error("No index found. Run `codeforge index build` first."); + process.exit(1); + } + + // Resolve file path relative to workspace root + const absoluteFile = resolve(process.cwd(), file); + const relativePath = relative(workspaceRoot, absoluteFile); + + const db = openDatabase(dbPath); + const symbols = getFileSymbols(db, relativePath); + + if (symbols.length === 0) { + console.log(`No symbols found for ${relativePath}`); + } else if (options.format === "json") { + console.log(formatShowJson(relativePath, symbols)); + } else { + console.log( + formatShowText(relativePath, symbols, { noColor: !options.color }), + ); + } + + closeDatabase(db); + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + console.error(`Error: ${message}`); + process.exit(1); + } + }); +} diff --git a/cli/src/commands/index/stats.ts b/cli/src/commands/index/stats.ts new file mode 100644 index 0000000..9fb338e --- /dev/null +++ b/cli/src/commands/index/stats.ts @@ -0,0 +1,69 @@ +import chalk from "chalk"; +import type { Command } from "commander"; +import { existsSync } from "fs"; +import { resolve } from "path"; +import { closeDatabase, getStats, openDatabase } from "../../indexer/db.js"; +import { formatStatsJson } from "../../output/index-json.js"; +import { formatStatsText } from "../../output/index-text.js"; + +interface StatsCommandOptions { + format: string; + color?: boolean; +} + +function findWorkspaceRoot(): string | null { + let dir = process.cwd(); + while (true) { + if (existsSync(resolve(dir, ".codeforge"))) return dir; + const parent = resolve(dir, ".."); + if (parent === dir) return null; + dir = parent; + } +} + +export function registerIndexStatsCommand(parent: Command): void { + parent + .command("stats") + .description("Show codebase index statistics") + .option("-f, --format ", "Output format: text|json", "text") + .option("--no-color", "Disable colored output") + .action(async (options: StatsCommandOptions) => { + try { + if (!options.color) chalk.level = 0; + + const workspaceRoot = findWorkspaceRoot(); + if (!workspaceRoot) { + console.error( + "Error: No .codeforge directory found. Are you in a CodeForge workspace?", + ); + process.exit(1); + } + + const dbPath = resolve( + workspaceRoot, + ".codeforge", + "data", + "code-index.db", + ); + if (!existsSync(dbPath)) { + console.error("No index found. Run `codeforge index build` first."); + process.exit(1); + } + + const db = openDatabase(dbPath); + const stats = getStats(db, dbPath); + + if (options.format === "json") { + console.log(formatStatsJson(stats)); + } else { + console.log(formatStatsText(stats, { noColor: !options.color })); + } + + closeDatabase(db); + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + console.error(`Error: ${message}`); + process.exit(1); + } + }); +} diff --git a/cli/src/commands/index/tree.ts b/cli/src/commands/index/tree.ts new file mode 100644 index 0000000..3c090fa --- /dev/null +++ b/cli/src/commands/index/tree.ts @@ -0,0 +1,160 @@ +import chalk from "chalk"; +import type { Command } from "commander"; +import { existsSync } from "fs"; +import { resolve } from "path"; +import { + closeDatabase, + getAllFolders, + openDatabase, +} from "../../indexer/db.js"; +import { formatTreeJson } from "../../output/index-json.js"; +import { formatTreeText } from "../../output/index-text.js"; +import type { IndexedFolder, TreeEntry } from "../../schemas/index.js"; + +interface TreeCommandOptions { + format: string; + color?: boolean; + depth?: string; +} + +function findWorkspaceRoot(): string | null { + let dir = process.cwd(); + while (true) { + if (existsSync(resolve(dir, ".codeforge"))) return dir; + const parent = resolve(dir, ".."); + if (parent === dir) return null; + dir = parent; + } +} + +function buildTree( + folders: IndexedFolder[], + symbolCounts: Map, + pathFilter?: string, + maxDepth?: number, +): TreeEntry[] { + // Filter folders by path prefix if specified + let filtered = folders; + if (pathFilter) { + filtered = folders.filter( + (f) => f.path === pathFilter || f.path.startsWith(pathFilter + "/"), + ); + } + + // Build nested tree from flat folder list + const root: TreeEntry[] = []; + const nodeMap = new Map(); + + // Sort folders so parents come before children + const sorted = [...filtered].sort((a, b) => a.path.localeCompare(b.path)); + + for (const folder of sorted) { + const entry: TreeEntry = { + path: folder.path.split("/").pop() ?? folder.path, + type: "folder", + description: folder.description ?? undefined, + symbolCount: symbolCounts.get(folder.path) ?? 0, + children: [], + }; + + nodeMap.set(folder.path, entry); + + // Find parent + const parts = folder.path.split("/"); + if (parts.length > 1) { + const parentPath = parts.slice(0, -1).join("/"); + const parent = nodeMap.get(parentPath); + if (parent) { + parent.children!.push(entry); + continue; + } + } + + root.push(entry); + } + + // Apply depth limit + if (maxDepth !== undefined) { + pruneDepth(root, 0, maxDepth); + } + + return root; +} + +function pruneDepth(entries: TreeEntry[], current: number, max: number): void { + for (const entry of entries) { + if (current >= max) { + entry.children = undefined; + } else if (entry.children) { + pruneDepth(entry.children, current + 1, max); + } + } +} + +export function registerIndexTreeCommand(parent: Command): void { + parent + .command("tree") + .description("Show directory tree with symbol counts") + .argument("[path]", "Subtree path to display") + .option("-f, --format ", "Output format: text|json", "text") + .option("--no-color", "Disable colored output") + .option("-d, --depth ", "Maximum tree depth") + .action(async (path: string | undefined, options: TreeCommandOptions) => { + try { + if (!options.color) chalk.level = 0; + + const workspaceRoot = findWorkspaceRoot(); + if (!workspaceRoot) { + console.error( + "Error: No .codeforge directory found. Are you in a CodeForge workspace?", + ); + process.exit(1); + } + + const dbPath = resolve( + workspaceRoot, + ".codeforge", + "data", + "code-index.db", + ); + if (!existsSync(dbPath)) { + console.error("No index found. Run `codeforge index build` first."); + process.exit(1); + } + + const db = openDatabase(dbPath); + const folders = getAllFolders(db); + + // Count symbols per folder by prefix-matching file paths + const symbolCounts = new Map(); + for (const folder of folders) { + const prefix = folder.path.endsWith("/") + ? folder.path + : folder.path + "/"; + const rows = db + .prepare( + "SELECT COUNT(*) as cnt FROM symbols WHERE file_path LIKE ? || '%'", + ) + .get(prefix) as { cnt: number }; + symbolCounts.set(folder.path, rows.cnt); + } + + const maxDepth = options.depth + ? parseInt(options.depth, 10) + : undefined; + const tree = buildTree(folders, symbolCounts, path, maxDepth); + + if (options.format === "json") { + console.log(formatTreeJson(tree)); + } else { + console.log(formatTreeText(tree, { noColor: !options.color })); + } + + closeDatabase(db); + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + console.error(`Error: ${message}`); + process.exit(1); + } + }); +} diff --git a/cli/src/commands/plan/search.ts b/cli/src/commands/plan/search.ts new file mode 100644 index 0000000..d4df661 --- /dev/null +++ b/cli/src/commands/plan/search.ts @@ -0,0 +1,107 @@ +import chalk from "chalk"; +import type { Command } from "commander"; +import { loadPlans } from "../../loaders/plan-loader.js"; +import { + formatPlanJson, + formatPlanText, + type PlanSearchResult, +} from "../../output/plan-text.js"; +import { evaluate, parse } from "../../search/query-parser.js"; + +interface PlanSearchOptions { + limit: string; + format: string; + color?: boolean; + fullText?: boolean; +} + +function extractContextLines(content: string, query: string): string[] { + const contentLines = content.split("\n"); + // Extract individual terms from the query (simple word extraction) + const terms = query + .replace(/\b(AND|OR|NOT)\b/gi, "") + .replace(/[()]/g, "") + .split(/\s+/) + .filter((t) => t.length > 0) + .map((t) => t.replace(/^["']|["']$/g, "").toLowerCase()); + + if (terms.length === 0) return []; + + const matchingIndices = new Set(); + + for (let i = 0; i < contentLines.length; i++) { + const lower = contentLines[i].toLowerCase(); + for (const term of terms) { + if (lower.includes(term)) { + matchingIndices.add(i); + break; + } + } + } + + // Add context lines (+/- 1 line) + const contextIndices = new Set(); + for (const idx of matchingIndices) { + if (idx > 0) contextIndices.add(idx - 1); + contextIndices.add(idx); + if (idx < contentLines.length - 1) contextIndices.add(idx + 1); + } + + // Sort and deduplicate, cap at 5 + const sorted = [...contextIndices].sort((a, b) => a - b).slice(0, 5); + return sorted.map((i) => contentLines[i]); +} + +export function registerPlanSearchCommand(parent: Command): void { + parent + .command("search") + .description("Search across plan files") + .argument("[query]", "Search query (supports AND, OR, NOT, quotes)") + .option("-n, --limit ", "Maximum number of results", "20") + .option("-f, --format ", "Output format: text|json", "text") + .option("--no-color", "Disable colored output") + .option("--full-text", "Disable content truncation") + .action(async (query: string | undefined, options: PlanSearchOptions) => { + try { + if (!options.color) { + chalk.level = 0; + } + + const plans = await loadPlans(); + + let results: PlanSearchResult[]; + + if (query) { + const queryNode = parse(query); + results = []; + for (const plan of plans) { + if (evaluate(queryNode, plan.content)) { + const matchingLines = extractContextLines(plan.content, query); + results.push({ plan, matchingLines }); + } + } + } else { + results = plans.map((plan) => ({ plan })); + } + + // Apply limit + const limit = parseInt(options.limit, 10); + results = results.slice(0, limit); + + if (options.format === "json") { + console.log(formatPlanJson(results)); + } else { + console.log( + formatPlanText(results, { + noColor: !options.color, + fullText: options.fullText, + }), + ); + } + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + console.error(`Error: ${message}`); + process.exit(1); + } + }); +} diff --git a/cli/src/commands/plugin/agents.ts b/cli/src/commands/plugin/agents.ts new file mode 100644 index 0000000..a25e02d --- /dev/null +++ b/cli/src/commands/plugin/agents.ts @@ -0,0 +1,52 @@ +import chalk from "chalk"; +import type { Command } from "commander"; +import { loadInstalledPlugins } from "../../loaders/plugin-loader.js"; +import { + formatAgentsJson, + formatAgentsText, +} from "../../output/plugin-components.js"; + +interface PluginAgentsOptions { + format: string; + color?: boolean; + plugin?: string; +} + +export function registerPluginAgentsCommand(parent: Command): void { + parent + .command("agents") + .description("List agents from installed plugins") + .option("-f, --format ", "Output format: text|json", "text") + .option("--no-color", "Disable colored output") + .option("--plugin ", "Filter to a specific plugin") + .action(async (options: PluginAgentsOptions) => { + try { + if (!options.color) { + chalk.level = 0; + } + + let plugins = await loadInstalledPlugins(); + + if (options.plugin) { + plugins = plugins.filter( + (p) => + p.name === options.plugin || p.qualifiedName === options.plugin, + ); + } + + if (options.format === "json") { + console.log(formatAgentsJson(plugins)); + } else { + console.log( + formatAgentsText(plugins, { + noColor: !options.color, + }), + ); + } + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + console.error(`Error: ${message}`); + process.exit(1); + } + }); +} diff --git a/cli/src/commands/plugin/disable.ts b/cli/src/commands/plugin/disable.ts new file mode 100644 index 0000000..0b086ae --- /dev/null +++ b/cli/src/commands/plugin/disable.ts @@ -0,0 +1,52 @@ +import chalk from "chalk"; +import type { Command } from "commander"; +import { + findSettingsPaths, + loadInstalledPlugins, +} from "../../loaders/plugin-loader.js"; +import { setPluginEnabled } from "../../loaders/settings-writer.js"; + +interface PluginDisableOptions { + color?: boolean; +} + +export function registerPluginDisableCommand(parent: Command): void { + parent + .command("disable ") + .description("Disable a plugin") + .option("--no-color", "Disable colored output") + .action(async (name: string, options: PluginDisableOptions) => { + try { + if (!options.color) { + chalk.level = 0; + } + + const plugins = await loadInstalledPlugins(); + const plugin = plugins.find( + (p) => p.name === name || p.qualifiedName === name, + ); + + if (!plugin) { + console.error(`Plugin not found: ${name}`); + process.exit(1); + } + + const paths = findSettingsPaths(); + const result = await setPluginEnabled(plugin.qualifiedName, false); + + console.log(`${chalk.red("✓")} Disabled ${plugin.qualifiedName}`); + if (result.deployed) { + console.log(" Updated: ~/.claude/settings.json"); + } + if (result.source) { + console.log(` Updated: ${paths.source}`); + } else { + console.log(" Source settings.json not found — deployed copy only"); + } + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + console.error(`Error: ${message}`); + process.exit(1); + } + }); +} diff --git a/cli/src/commands/plugin/enable.ts b/cli/src/commands/plugin/enable.ts new file mode 100644 index 0000000..e8e41d9 --- /dev/null +++ b/cli/src/commands/plugin/enable.ts @@ -0,0 +1,52 @@ +import chalk from "chalk"; +import type { Command } from "commander"; +import { + findSettingsPaths, + loadInstalledPlugins, +} from "../../loaders/plugin-loader.js"; +import { setPluginEnabled } from "../../loaders/settings-writer.js"; + +interface PluginEnableOptions { + color?: boolean; +} + +export function registerPluginEnableCommand(parent: Command): void { + parent + .command("enable ") + .description("Enable a plugin") + .option("--no-color", "Disable colored output") + .action(async (name: string, options: PluginEnableOptions) => { + try { + if (!options.color) { + chalk.level = 0; + } + + const plugins = await loadInstalledPlugins(); + const plugin = plugins.find( + (p) => p.name === name || p.qualifiedName === name, + ); + + if (!plugin) { + console.error(`Plugin not found: ${name}`); + process.exit(1); + } + + const paths = findSettingsPaths(); + const result = await setPluginEnabled(plugin.qualifiedName, true); + + console.log(`${chalk.green("✓")} Enabled ${plugin.qualifiedName}`); + if (result.deployed) { + console.log(" Updated: ~/.claude/settings.json"); + } + if (result.source) { + console.log(` Updated: ${paths.source}`); + } else { + console.log(" Source settings.json not found — deployed copy only"); + } + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + console.error(`Error: ${message}`); + process.exit(1); + } + }); +} diff --git a/cli/src/commands/plugin/hooks.ts b/cli/src/commands/plugin/hooks.ts new file mode 100644 index 0000000..b1ec0d7 --- /dev/null +++ b/cli/src/commands/plugin/hooks.ts @@ -0,0 +1,52 @@ +import chalk from "chalk"; +import type { Command } from "commander"; +import { loadInstalledPlugins } from "../../loaders/plugin-loader.js"; +import { + formatHooksJson, + formatHooksText, +} from "../../output/plugin-components.js"; + +interface PluginHooksOptions { + format: string; + color?: boolean; + plugin?: string; +} + +export function registerPluginHooksCommand(parent: Command): void { + parent + .command("hooks") + .description("List hooks from installed plugins") + .option("-f, --format ", "Output format: text|json", "text") + .option("--no-color", "Disable colored output") + .option("--plugin ", "Filter to a specific plugin") + .action(async (options: PluginHooksOptions) => { + try { + if (!options.color) { + chalk.level = 0; + } + + let plugins = await loadInstalledPlugins(); + + if (options.plugin) { + plugins = plugins.filter( + (p) => + p.name === options.plugin || p.qualifiedName === options.plugin, + ); + } + + if (options.format === "json") { + console.log(formatHooksJson(plugins)); + } else { + console.log( + formatHooksText(plugins, { + noColor: !options.color, + }), + ); + } + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + console.error(`Error: ${message}`); + process.exit(1); + } + }); +} diff --git a/cli/src/commands/plugin/list.ts b/cli/src/commands/plugin/list.ts new file mode 100644 index 0000000..1508968 --- /dev/null +++ b/cli/src/commands/plugin/list.ts @@ -0,0 +1,53 @@ +import chalk from "chalk"; +import type { Command } from "commander"; +import { loadInstalledPlugins } from "../../loaders/plugin-loader.js"; +import { + formatPluginListJson, + formatPluginListText, +} from "../../output/plugin-list.js"; + +interface PluginListOptions { + format: string; + color?: boolean; + enabledOnly?: boolean; + disabledOnly?: boolean; +} + +export function registerPluginListCommand(parent: Command): void { + parent + .command("list") + .description("List installed plugins") + .option("-f, --format ", "Output format: text|json", "text") + .option("--no-color", "Disable colored output") + .option("--enabled-only", "Show only enabled plugins") + .option("--disabled-only", "Show only disabled plugins") + .action(async (options: PluginListOptions) => { + try { + if (!options.color) { + chalk.level = 0; + } + + let plugins = await loadInstalledPlugins(); + + if (options.enabledOnly) { + plugins = plugins.filter((p) => p.enabled); + } else if (options.disabledOnly) { + plugins = plugins.filter((p) => !p.enabled); + } + + if (options.format === "json") { + console.log(formatPluginListJson(plugins)); + } else { + console.log( + formatPluginListText(plugins, { + noColor: !options.color, + }), + ); + } + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + console.error(`Error: ${message}`); + process.exit(1); + } + }); +} diff --git a/cli/src/commands/plugin/show.ts b/cli/src/commands/plugin/show.ts new file mode 100644 index 0000000..e5a7556 --- /dev/null +++ b/cli/src/commands/plugin/show.ts @@ -0,0 +1,51 @@ +import chalk from "chalk"; +import type { Command } from "commander"; +import { loadInstalledPlugins } from "../../loaders/plugin-loader.js"; +import { + formatPluginShowJson, + formatPluginShowText, +} from "../../output/plugin-show.js"; + +interface PluginShowOptions { + format: string; + color?: boolean; +} + +export function registerPluginShowCommand(parent: Command): void { + parent + .command("show ") + .description("Show detailed information about a plugin") + .option("-f, --format ", "Output format: text|json", "text") + .option("--no-color", "Disable colored output") + .action(async (name: string, options: PluginShowOptions) => { + try { + if (!options.color) { + chalk.level = 0; + } + + const plugins = await loadInstalledPlugins(); + const plugin = plugins.find( + (p) => p.name === name || p.qualifiedName === name, + ); + + if (!plugin) { + console.error(`Plugin not found: ${name}`); + process.exit(1); + } + + if (options.format === "json") { + console.log(formatPluginShowJson(plugin)); + } else { + console.log( + formatPluginShowText(plugin, { + noColor: !options.color, + }), + ); + } + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + console.error(`Error: ${message}`); + process.exit(1); + } + }); +} diff --git a/cli/src/commands/plugin/skills.ts b/cli/src/commands/plugin/skills.ts new file mode 100644 index 0000000..803a6d4 --- /dev/null +++ b/cli/src/commands/plugin/skills.ts @@ -0,0 +1,52 @@ +import chalk from "chalk"; +import type { Command } from "commander"; +import { loadInstalledPlugins } from "../../loaders/plugin-loader.js"; +import { + formatSkillsJson, + formatSkillsText, +} from "../../output/plugin-components.js"; + +interface PluginSkillsOptions { + format: string; + color?: boolean; + plugin?: string; +} + +export function registerPluginSkillsCommand(parent: Command): void { + parent + .command("skills") + .description("List skills from installed plugins") + .option("-f, --format ", "Output format: text|json", "text") + .option("--no-color", "Disable colored output") + .option("--plugin ", "Filter to a specific plugin") + .action(async (options: PluginSkillsOptions) => { + try { + if (!options.color) { + chalk.level = 0; + } + + let plugins = await loadInstalledPlugins(); + + if (options.plugin) { + plugins = plugins.filter( + (p) => + p.name === options.plugin || p.qualifiedName === options.plugin, + ); + } + + if (options.format === "json") { + console.log(formatSkillsJson(plugins)); + } else { + console.log( + formatSkillsText(plugins, { + noColor: !options.color, + }), + ); + } + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + console.error(`Error: ${message}`); + process.exit(1); + } + }); +} diff --git a/cli/src/commands/session/list.ts b/cli/src/commands/session/list.ts new file mode 100644 index 0000000..1bdd8b1 --- /dev/null +++ b/cli/src/commands/session/list.ts @@ -0,0 +1,96 @@ +import chalk from "chalk"; +import type { Command } from "commander"; +import { basename } from "path"; +import { loadHistory } from "../../loaders/history-loader.js"; +import { extractSessionMeta } from "../../loaders/session-meta.js"; +import { + formatSessionListJson, + formatSessionListText, + type SessionListEntry, +} from "../../output/session-list.js"; +import { discoverSessionFiles } from "../../utils/glob.js"; +import { parseRelativeTime, parseTime } from "../../utils/time.js"; + +interface ListCommandOptions { + project?: string; + since?: string; + after?: string; + before?: string; + limit: string; + format: string; + color?: boolean; +} + +export function registerListCommand(parent: Command): void { + parent + .command("list") + .description("List previous Claude Code sessions") + .option("--project ", "Project directory filter") + .option("--since