root 9ee7fc5550 G2: embedd — text → vector via Ollama · 2 scrum fixes
Bridges the missing piece for the staffing co-pilot: text inputs to
vectord-shaped vectors. Standalone cmd/embedd on :3216 fronted by
gateway at /v1/embed. Pluggable embed.Provider interface (G2 ships
Ollama; OpenAI/Voyage swap in via the same interface in G3+).

Wire format:
  POST /v1/embed {"texts":[...], "model":"..."}  // model optional
  → 200 {"model","dimension","vectors":[[...]]}

Default model: nomic-embed-text (768-d). Ollama returns float64;
provider converts to float32 at the boundary so vectors flow through
vectord/HNSW without re-conversion.

Acceptance smoke 5/5 PASS — including the architectural payoff:
end-to-end embed → vectord add → search by re-embedded text returns
recall=1 at distance 5.96e-8 (float32 precision noise on identical
unit vectors). The staffing co-pilot pipeline (text → vector →
similarity search) is now functional end-to-end.

All 9 smokes (D1-D6 + G1 + G1P + G2) PASS deterministically.

Cross-lineage scrum on shipped code:
  - Opus 4.7 (opencode):                    0 BLOCK + 4 WARN + 3 INFO
  - Kimi K2-0905 (openrouter):              0 BLOCK + 2 WARN + 1 INFO
  - Qwen3-coder (openrouter):               "No BLOCKs" (3 tokens)

Fixed (2 — 1 convergent + 1 single-reviewer):
  C1 (Opus + Kimi convergent WARN): per-text 60s timeout × N-text
    batch was up to N×60s with no batch-level cap. One stuck Ollama
    call would stall the whole handler indefinitely. Fix:
    context.WithTimeout(r.Context(), 60s) wraps the entire batch.
  O-W3 (Opus WARN): empty strings in texts went to Ollama unchecked,
    producing version-dependent garbage. Fix: reject "" with 400 at
    the handler boundary so callers get a deterministic answer
    instead of an upstream-conditional 502.

Deferred (4): drainAndClose 64KiB cap (matches G0 pattern), no
concurrency limit on /embed (single-tenant G2), missing Accept
header (exotic-proxy concern), MaxBytesError string-match
redundancy (paranoia layer kept consistent across codebase).

Zero false positives this round — Qwen returned 3 tokens "No BLOCKs"
and the other two reviewers' findings were all real.

Setup confirmed: Ollama 0.21.0 on :11434 with nomic-embed-text loaded.
Per-text /api/embeddings used (forward-compat with 0.21+); newer
0.4+ /api/embed batch endpoint can swap in via the Provider interface.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-29 01:42:27 -05:00

141 lines
3.9 KiB
Go

// embedd is the embedding service. Turns text into vectors via a
// pluggable Provider (G2: Ollama at :11434). Vectors flow through
// the rest of the stack as float32 — see internal/embed for the
// boundary conversion. Default model is config-resolved; callers
// can override per request.
package main
import (
"context"
"encoding/json"
"errors"
"flag"
"log/slog"
"net/http"
"os"
"strings"
"time"
"github.com/go-chi/chi/v5"
"git.agentview.dev/profit/golangLAKEHOUSE/internal/embed"
"git.agentview.dev/profit/golangLAKEHOUSE/internal/shared"
)
const (
maxRequestBytes = 4 << 20 // 4 MiB cap on /embed body — texts plural
batchDeadline = 60 * time.Second // upper bound on a single /embed batch
)
func main() {
configPath := flag.String("config", "lakehouse.toml", "path to TOML config")
flag.Parse()
cfg, err := shared.LoadConfig(*configPath)
if err != nil {
slog.Error("config", "err", err)
os.Exit(1)
}
if cfg.Embedd.ProviderURL == "" {
slog.Error("config", "err", "embedd.provider_url is required")
os.Exit(1)
}
h := &handlers{
provider: embed.NewOllama(cfg.Embedd.ProviderURL, cfg.Embedd.DefaultModel),
}
if err := shared.Run("embedd", cfg.Embedd.Bind, h.register); err != nil {
slog.Error("server", "err", err)
os.Exit(1)
}
}
type handlers struct {
provider embed.Provider
}
func (h *handlers) register(r chi.Router) {
r.Post("/embed", h.handleEmbed)
}
// embedRequest is the POST /embed body. Texts is the list to
// embed; Model is optional (empty → use server default).
type embedRequest struct {
Texts []string `json:"texts"`
Model string `json:"model,omitempty"`
}
func (h *handlers) handleEmbed(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
r.Body = http.MaxBytesReader(w, r.Body, maxRequestBytes)
var req embedRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
var maxErr *http.MaxBytesError
if errors.As(err, &maxErr) || strings.Contains(err.Error(), "http: request body too large") {
http.Error(w, "body too large", http.StatusRequestEntityTooLarge)
return
}
http.Error(w, "decode body: "+err.Error(), http.StatusBadRequest)
return
}
// Per scrum O-W3 (Opus): reject empty strings up front. Ollama's
// behavior on empty prompt is version-dependent (some return
// errors, some return zero vectors); rejecting at the boundary
// gives callers a deterministic 400 instead of 502.
for j, t := range req.Texts {
if t == "" {
http.Error(w, "texts["+itoa(j)+"]: empty string", http.StatusBadRequest)
return
}
}
// Per scrum C1 (Opus + Kimi convergent): per-text 60s timeout
// without a batch-level cap means a 100-text batch with one
// stuck call can pin the handler for ~6000s. Set a hard batch
// ceiling derived from the request ctx so a wedged Ollama
// surfaces as 504-ish (mapped to 502 by the upstream-error
// path below) rather than holding the connection forever.
ctx, cancel := context.WithTimeout(r.Context(), batchDeadline)
defer cancel()
res, err := h.provider.Embed(ctx, req.Texts, req.Model)
if errors.Is(err, embed.ErrEmptyTexts) {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
if errors.Is(err, embed.ErrModelMismatch) {
http.Error(w, err.Error(), http.StatusBadGateway)
return
}
if err != nil {
// Upstream-shape errors (Ollama down, model missing,
// 5xx body) bubble up as 502 — distinguishes "your input
// was wrong" (400) from "the embedding backend was wrong" (502).
slog.Warn("embed", "err", err)
http.Error(w, "embed: "+err.Error(), http.StatusBadGateway)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(res); err != nil {
slog.Warn("embed encode", "err", err)
}
}
// itoa is a tiny helper for error messages without pulling strconv
// in just for one call site.
func itoa(i int) string {
if i == 0 {
return "0"
}
var buf [20]byte
pos := len(buf)
for i > 0 {
pos--
buf[pos] = byte('0' + i%10)
i /= 10
}
return string(buf[pos:])
}