Implements PROMPT.md / docs/REVIEW_PIPELINE.md Phase 2:
- internal/llm/ollama.go — real Ollama provider:
- HealthCheck probes /api/tags + a 1-token completion + a JSON-mode
probe ({"ok": true} round-trip), populating the model-doctor.json
schema documented in docs/LOCAL_MODEL_SETUP.md
- Complete + CompleteJSON via /api/chat with stream=false
- think=false set for ALL completions (qwen3.5:latest is reasoning-
capable but the inner-loop hot path wants direct answers, not
reasoning traces consuming the token budget — same finding as
the Lakehouse-Go chatd 2026-04-30 wave)
- internal/llm/review.go — Reviewer wrapper:
- 2-attempt flow: prompt → parse → repair-prompt → parse
- Strict JSON shape enforced; markdown fences stripped before parse
- Severity normalized to enum; out-of-range confidence clamped
- Per-file chunking (file-level for v0; function-level Phase D+)
- Bounded by review-profile max_file_bytes + max_llm_chunk_chars
- pipeline.go — Phase 2 wired between static scan + report gen:
- --enable-llm flag opts in (off by default — static-only is
cheaper and faster)
- Raw output ALWAYS saved to llm-findings.raw.json (forensics)
- Normalized findings → llm-findings.normalized.json
- LLM findings merged into the report findings list (sourced
"llm" so consumers can filter)
- Receipts honestly mark phase status: "ok" | "degraded" | "skipped"
- cli model doctor — real probes replace the Phase A stub.
Verified:
- model doctor: status="ok" with qwen3.5:latest + qwen3:latest both
loaded, basic_prompt_ok=true, json_mode_ok=true
- insecure-repo with --enable-llm: 9 LLM findings; qwen3.5 correctly
flagged SQLi, RCE, hardcoded credentials as critical with verbatim
evidence; 27s wall for 3 chunks
- clean-repo with --enable-llm: 0 LLM findings, 4 parsed chunks, 2.8s
- self-review with --enable-llm: 77 LLM findings + 83 static; 3 of
~30 chunks needed retry (PROMPT.md, REPORT_SCHEMA.md,
SCRUM_TEST_TEMPLATE.md — all eventually parsed); 5min wall
go vet + go test -short clean. Fixture stray.go now `package fixture`
so go-tooling doesn't choke on the orphan.
Phase D (validator cross-check) + Phase E (memory + diff/rules
subcommands) remain.
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
181 lines
5.5 KiB
Go
181 lines
5.5 KiB
Go
// Package cli holds per-subcommand handlers. Each returns the process
|
|
// exit code (0=ok, 64=usage, 65=runtime error, 66=degraded — a
|
|
// degraded-mode run is NOT a hard failure but operators may want to
|
|
// gate CI on it).
|
|
package cli
|
|
|
|
import (
|
|
"context"
|
|
"encoding/json"
|
|
"flag"
|
|
"fmt"
|
|
"os"
|
|
"path/filepath"
|
|
"time"
|
|
|
|
"local-review-harness/internal/config"
|
|
"local-review-harness/internal/llm"
|
|
)
|
|
|
|
// commonFlags wires the flags every subcommand accepts.
|
|
type commonFlags struct {
|
|
reviewProfilePath string
|
|
modelProfilePath string
|
|
outputDir string
|
|
enableLLM bool
|
|
}
|
|
|
|
func bindCommonFlags(fs *flag.FlagSet, cf *commonFlags) {
|
|
fs.StringVar(&cf.reviewProfilePath, "review-profile", "", "review profile YAML (defaults applied if empty)")
|
|
fs.StringVar(&cf.modelProfilePath, "model-profile", "", "model profile YAML (defaults applied if empty)")
|
|
fs.StringVar(&cf.outputDir, "output-dir", "", "override review profile output dir")
|
|
fs.BoolVar(&cf.enableLLM, "enable-llm", false, "Phase C: also run local-Ollama LLM review (default off — static-only)")
|
|
}
|
|
|
|
// resolveOutputDir picks the output dir from flag > review profile >
|
|
// hardcoded fallback. Always relative to the target repo, NOT the
|
|
// harness's own cwd — operators pointing at a remote checkout want
|
|
// reports landing inside that checkout.
|
|
func resolveOutputDir(cf *commonFlags, rp config.ReviewProfile, repoPath string) string {
|
|
dir := cf.outputDir
|
|
if dir == "" {
|
|
dir = rp.Reports.OutputDir
|
|
}
|
|
if dir == "" {
|
|
dir = "reports/latest"
|
|
}
|
|
if filepath.IsAbs(dir) {
|
|
return dir
|
|
}
|
|
return filepath.Join(repoPath, dir)
|
|
}
|
|
|
|
// writeJSON marshals v to path with indent, creating the dir.
|
|
func writeJSON(path string, v any) error {
|
|
if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil {
|
|
return err
|
|
}
|
|
bs, err := json.MarshalIndent(v, "", " ")
|
|
if err != nil {
|
|
return err
|
|
}
|
|
bs = append(bs, '\n')
|
|
return os.WriteFile(path, bs, 0o644)
|
|
}
|
|
|
|
// nowUTC returns ISO-8601 UTC for receipt timestamps.
|
|
func nowUTC() string { return time.Now().UTC().Format(time.RFC3339Nano) }
|
|
|
|
|
|
// Repo runs Phase 0 (intake) + Phase 1 (static scan) + Phase 4
|
|
// (report gen). Phase B implements the analyzers + scanner; Phase
|
|
// A leaves repoCmd as a stub until B lands.
|
|
func Repo(args []string) int {
|
|
fs := flag.NewFlagSet("repo", flag.ContinueOnError)
|
|
var cf commonFlags
|
|
bindCommonFlags(fs, &cf)
|
|
if err := fs.Parse(args); err != nil {
|
|
return 64
|
|
}
|
|
if fs.NArg() < 1 {
|
|
fmt.Fprintln(os.Stderr, "repo: missing target path")
|
|
return 64
|
|
}
|
|
repoPath := fs.Arg(0)
|
|
return runRepo(context.Background(), repoPath, cf)
|
|
}
|
|
|
|
// Scrum runs the same pipeline as Repo but emits the full Scrum
|
|
// report bundle. In Phase B both subcommands share the pipeline;
|
|
// scrum just toggles the markdown report set on.
|
|
func Scrum(args []string) int {
|
|
fs := flag.NewFlagSet("scrum", flag.ContinueOnError)
|
|
var cf commonFlags
|
|
bindCommonFlags(fs, &cf)
|
|
if err := fs.Parse(args); err != nil {
|
|
return 64
|
|
}
|
|
if fs.NArg() < 1 {
|
|
fmt.Fprintln(os.Stderr, "scrum: missing target path")
|
|
return 64
|
|
}
|
|
repoPath := fs.Arg(0)
|
|
return runScrum(context.Background(), repoPath, cf)
|
|
}
|
|
|
|
// ModelDoctor probes the configured model provider and writes
|
|
// reports/latest/model-doctor.json. Phase A returns degraded status
|
|
// (no real probe yet); Phase C wires the Ollama HealthCheck call.
|
|
func ModelDoctor(args []string) int {
|
|
fs := flag.NewFlagSet("model doctor", flag.ContinueOnError)
|
|
var cf commonFlags
|
|
bindCommonFlags(fs, &cf)
|
|
if err := fs.Parse(args); err != nil {
|
|
return 64
|
|
}
|
|
|
|
rp, err := config.LoadReviewProfile(cf.reviewProfilePath)
|
|
if err != nil {
|
|
fmt.Fprintln(os.Stderr, "config:", err)
|
|
return 65
|
|
}
|
|
mp, err := config.LoadModelProfile(cf.modelProfilePath)
|
|
if err != nil {
|
|
fmt.Fprintln(os.Stderr, "config:", err)
|
|
return 65
|
|
}
|
|
|
|
// Output dir is local cwd for `model doctor` since it's not
|
|
// repo-bound (no positional path argument).
|
|
outDir := cf.outputDir
|
|
if outDir == "" {
|
|
outDir = rp.Reports.OutputDir
|
|
}
|
|
|
|
// Phase C: real Ollama probe. Provider's HealthCheck does the
|
|
// actual work; we package the result into the shape REPORT_SCHEMA.md
|
|
// documents. status="ok" iff server up + at least one named
|
|
// model loaded + basic prompt + json mode all green.
|
|
prov := llm.NewOllama(mp.BaseURL, time.Duration(mp.TimeoutSeconds)*time.Second)
|
|
hctx, cancel := context.WithTimeout(context.Background(), time.Duration(mp.TimeoutSeconds)*time.Second)
|
|
defer cancel()
|
|
hs := prov.HealthCheck(hctx, mp.Model, mp.FallbackModel)
|
|
|
|
status := "ok"
|
|
if !hs.ServerAvailable {
|
|
status = "failed"
|
|
} else if !hs.BasicPromptOK || !hs.JSONModeOK || (!hs.PrimaryModelAvailable && !hs.FallbackModelAvailable) {
|
|
status = "degraded"
|
|
}
|
|
|
|
doc := map[string]any{
|
|
"provider": mp.Provider,
|
|
"base_url": mp.BaseURL,
|
|
"primary_model": mp.Model,
|
|
"fallback_model": mp.FallbackModel,
|
|
"server_available": hs.ServerAvailable,
|
|
"primary_model_available": hs.PrimaryModelAvailable,
|
|
"fallback_model_available": hs.FallbackModelAvailable,
|
|
"basic_prompt_ok": hs.BasicPromptOK,
|
|
"json_mode_ok": hs.JSONModeOK,
|
|
"timeout_seconds": mp.TimeoutSeconds,
|
|
"status": status,
|
|
"errors": hs.Errors,
|
|
"generated_at": nowUTC(),
|
|
}
|
|
out := filepath.Join(outDir, "model-doctor.json")
|
|
if err := writeJSON(out, doc); err != nil {
|
|
fmt.Fprintln(os.Stderr, "write:", err)
|
|
return 65
|
|
}
|
|
fmt.Println(out)
|
|
switch status {
|
|
case "ok":
|
|
return 0
|
|
case "degraded":
|
|
return 66
|
|
default:
|
|
return 65
|
|
}
|
|
}
|