Implements PROMPT.md / docs/REVIEW_PIPELINE.md Phase 2:
- internal/llm/ollama.go — real Ollama provider:
- HealthCheck probes /api/tags + a 1-token completion + a JSON-mode
probe ({"ok": true} round-trip), populating the model-doctor.json
schema documented in docs/LOCAL_MODEL_SETUP.md
- Complete + CompleteJSON via /api/chat with stream=false
- think=false set for ALL completions (qwen3.5:latest is reasoning-
capable but the inner-loop hot path wants direct answers, not
reasoning traces consuming the token budget — same finding as
the Lakehouse-Go chatd 2026-04-30 wave)
- internal/llm/review.go — Reviewer wrapper:
- 2-attempt flow: prompt → parse → repair-prompt → parse
- Strict JSON shape enforced; markdown fences stripped before parse
- Severity normalized to enum; out-of-range confidence clamped
- Per-file chunking (file-level for v0; function-level Phase D+)
- Bounded by review-profile max_file_bytes + max_llm_chunk_chars
- pipeline.go — Phase 2 wired between static scan + report gen:
- --enable-llm flag opts in (off by default — static-only is
cheaper and faster)
- Raw output ALWAYS saved to llm-findings.raw.json (forensics)
- Normalized findings → llm-findings.normalized.json
- LLM findings merged into the report findings list (sourced
"llm" so consumers can filter)
- Receipts honestly mark phase status: "ok" | "degraded" | "skipped"
- cli model doctor — real probes replace the Phase A stub.
Verified:
- model doctor: status="ok" with qwen3.5:latest + qwen3:latest both
loaded, basic_prompt_ok=true, json_mode_ok=true
- insecure-repo with --enable-llm: 9 LLM findings; qwen3.5 correctly
flagged SQLi, RCE, hardcoded credentials as critical with verbatim
evidence; 27s wall for 3 chunks
- clean-repo with --enable-llm: 0 LLM findings, 4 parsed chunks, 2.8s
- self-review with --enable-llm: 77 LLM findings + 83 static; 3 of
~30 chunks needed retry (PROMPT.md, REPORT_SCHEMA.md,
SCRUM_TEST_TEMPLATE.md — all eventually parsed); 5min wall
go vet + go test -short clean. Fixture stray.go now `package fixture`
so go-tooling doesn't choke on the orphan.
Phase D (validator cross-check) + Phase E (memory + diff/rules
subcommands) remain.
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
86 lines
2.1 KiB
Go
86 lines
2.1 KiB
Go
// runRepo + runScrum are Phase B entry points. Phase A leaves them
|
|
// as compilable stubs that produce the JSON shapes the gates expect
|
|
// but with zero analyzer findings — letting the pipeline structure
|
|
// be exercised end-to-end before analyzers land.
|
|
package cli
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"os"
|
|
"path/filepath"
|
|
|
|
"local-review-harness/internal/config"
|
|
"local-review-harness/internal/pipeline"
|
|
)
|
|
|
|
func runRepo(ctx context.Context, repoPath string, cf commonFlags) int {
|
|
if _, err := os.Stat(repoPath); err != nil {
|
|
fmt.Fprintln(os.Stderr, "repo: target path:", err)
|
|
return 65
|
|
}
|
|
rp, err := config.LoadReviewProfile(cf.reviewProfilePath)
|
|
if err != nil {
|
|
fmt.Fprintln(os.Stderr, "config:", err)
|
|
return 65
|
|
}
|
|
mp, err := config.LoadModelProfile(cf.modelProfilePath)
|
|
if err != nil {
|
|
fmt.Fprintln(os.Stderr, "config:", err)
|
|
return 65
|
|
}
|
|
|
|
outDir := resolveOutputDir(&cf, rp, repoPath)
|
|
res, err := pipeline.RunRepo(ctx, pipeline.Inputs{
|
|
RepoPath: repoPath,
|
|
ReviewProfile: rp,
|
|
ModelProfile: mp,
|
|
OutputDir: outDir,
|
|
EmitScrum: false,
|
|
EnableLLM: cf.enableLLM,
|
|
})
|
|
if err != nil {
|
|
fmt.Fprintln(os.Stderr, "pipeline:", err)
|
|
return 65
|
|
}
|
|
for _, f := range res.OutputFiles {
|
|
fmt.Println(filepath.Join(outDir, f))
|
|
}
|
|
return res.ExitCode
|
|
}
|
|
|
|
func runScrum(ctx context.Context, repoPath string, cf commonFlags) int {
|
|
if _, err := os.Stat(repoPath); err != nil {
|
|
fmt.Fprintln(os.Stderr, "scrum: target path:", err)
|
|
return 65
|
|
}
|
|
rp, err := config.LoadReviewProfile(cf.reviewProfilePath)
|
|
if err != nil {
|
|
fmt.Fprintln(os.Stderr, "config:", err)
|
|
return 65
|
|
}
|
|
mp, err := config.LoadModelProfile(cf.modelProfilePath)
|
|
if err != nil {
|
|
fmt.Fprintln(os.Stderr, "config:", err)
|
|
return 65
|
|
}
|
|
|
|
outDir := resolveOutputDir(&cf, rp, repoPath)
|
|
res, err := pipeline.RunRepo(ctx, pipeline.Inputs{
|
|
RepoPath: repoPath,
|
|
ReviewProfile: rp,
|
|
ModelProfile: mp,
|
|
OutputDir: outDir,
|
|
EmitScrum: true,
|
|
EnableLLM: cf.enableLLM,
|
|
})
|
|
if err != nil {
|
|
fmt.Fprintln(os.Stderr, "pipeline:", err)
|
|
return 65
|
|
}
|
|
for _, f := range res.OutputFiles {
|
|
fmt.Println(filepath.Join(outDir, f))
|
|
}
|
|
return res.ExitCode
|
|
}
|