Claude (review-harness setup) f3ee4722a8 Phase A + B (MVP) — local review harness
Implements the MVP cutline from the planning artifact:
- Phase A: skeleton + CLI dispatch + provider interface + stub model doctor
- Phase B: scanner + git probe + 12 static analyzers + reporters + pipeline
- Phase B fixtures: clean-repo, insecure-repo, degraded-repo

12 static analyzers per PROMPT.md "Suggested Static Checks For MVP":
hardcoded_paths, shell_execution, raw_sql_interpolation, broad_cors,
secret_patterns, large_files, todo_comments, missing_tests,
env_file_committed, unsafe_file_io, exposed_mutation_endpoint,
hardcoded_local_ip.

Acceptance gates passing:
- B1 (intake produces accurate counts) ✓
- B2 (insecure fixture fires ≥8 distinct check_ids — actually 11/12) ✓
- B3 (clean fixture produces 0 confirmed findings — no false positives) ✓
- B4 (scrum mode produces all 6 required markdown + JSON reports) ✓
- B5 (receipts.json marks degraded phases honestly) ✓
- F  (self-review on this repo runs without crashing) ✓ — exit 66 (degraded
  because Phase C LLM review is hardcoded skipped)

Phases C (LLM review), D (validation cross-check), E (memory + diff +
rules subcommands) deferred per the cutline. The MVP delivers the
evidence-first path; LLM is purely additive.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-30 00:56:02 -05:00

84 lines
2.1 KiB
Go

// runRepo + runScrum are Phase B entry points. Phase A leaves them
// as compilable stubs that produce the JSON shapes the gates expect
// but with zero analyzer findings — letting the pipeline structure
// be exercised end-to-end before analyzers land.
package cli
import (
"context"
"fmt"
"os"
"path/filepath"
"local-review-harness/internal/config"
"local-review-harness/internal/pipeline"
)
func runRepo(ctx context.Context, repoPath string, cf commonFlags) int {
if _, err := os.Stat(repoPath); err != nil {
fmt.Fprintln(os.Stderr, "repo: target path:", err)
return 65
}
rp, err := config.LoadReviewProfile(cf.reviewProfilePath)
if err != nil {
fmt.Fprintln(os.Stderr, "config:", err)
return 65
}
mp, err := config.LoadModelProfile(cf.modelProfilePath)
if err != nil {
fmt.Fprintln(os.Stderr, "config:", err)
return 65
}
outDir := resolveOutputDir(&cf, rp, repoPath)
res, err := pipeline.RunRepo(ctx, pipeline.Inputs{
RepoPath: repoPath,
ReviewProfile: rp,
ModelProfile: mp,
OutputDir: outDir,
EmitScrum: false,
})
if err != nil {
fmt.Fprintln(os.Stderr, "pipeline:", err)
return 65
}
for _, f := range res.OutputFiles {
fmt.Println(filepath.Join(outDir, f))
}
return res.ExitCode
}
func runScrum(ctx context.Context, repoPath string, cf commonFlags) int {
if _, err := os.Stat(repoPath); err != nil {
fmt.Fprintln(os.Stderr, "scrum: target path:", err)
return 65
}
rp, err := config.LoadReviewProfile(cf.reviewProfilePath)
if err != nil {
fmt.Fprintln(os.Stderr, "config:", err)
return 65
}
mp, err := config.LoadModelProfile(cf.modelProfilePath)
if err != nil {
fmt.Fprintln(os.Stderr, "config:", err)
return 65
}
outDir := resolveOutputDir(&cf, rp, repoPath)
res, err := pipeline.RunRepo(ctx, pipeline.Inputs{
RepoPath: repoPath,
ReviewProfile: rp,
ModelProfile: mp,
OutputDir: outDir,
EmitScrum: true,
})
if err != nil {
fmt.Fprintln(os.Stderr, "pipeline:", err)
return 65
}
for _, f := range res.OutputFiles {
fmt.Println(filepath.Join(outDir, f))
}
return res.ExitCode
}