Implements the auth posture from ADR-003 (commit 0d18ffa). Two independent layers — Bearer token (constant-time compare via crypto/subtle) and IP allowlist (CIDR set) — composed in shared.Run so every binary inherits the same gate without per-binary wiring. Together with the bind-gate from commit 6af0520, this mechanically closes audit risks R-001 + R-007: - non-loopback bind without auth.token = startup refuse - non-loopback bind WITH auth.token + override env = allowed - loopback bind = all gates open (G0 dev unchanged) internal/shared/auth.go (NEW) RequireAuth(cfg AuthConfig) returns chi-compatible middleware. Empty Token + empty AllowedIPs → pass-through (G0 dev mode). Token-only → 401 Bearer mismatch. AllowedIPs-only → 403 source IP not in CIDR set. Both → both gates apply. /health bypasses both layers (load-balancer / liveness probes shouldn't carry tokens). CIDR parsing pre-runs at boot; bare IP (no /N) treated as /32 (or /128 for IPv6). Invalid entries log warn and drop, fail-loud-but- not-fatal so a typo doesn't kill the binary. Token comparison: subtle.ConstantTimeCompare on the full "Bearer <token>" wire-format string. Length-mismatch returns 0 (per stdlib spec), so wrong-length tokens reject without timing leak. Pre-encoded comparison slice stored in the middleware closure — one allocation per request. Source-IP extraction prefers net.SplitHostPort fallback to RemoteAddr-as-is for httptest compatibility. X-Forwarded-For support is a follow-up when a trusted proxy fronts the gateway (config knob TBD per ADR-003 §"Future"). internal/shared/server.go Run signature: gained AuthConfig parameter (4th arg). /health stays mounted on the outer router (public). Registered routes go inside chi.Group with RequireAuth applied — empty config = transparent group. Added requireAuthOnNonLoopback startup check: non-loopback bind with empty Token = refuse to start (cites R-001 + R-007 by name). internal/shared/config.go AuthConfig type added with TOML tags. Fields: Token, AllowedIPs. Composed into Config under [auth]. cmd/<svc>/main.go × 7 (catalogd, embedd, gateway, ingestd, queryd, storaged, vectord, mcpd is unaffected — stdio doesn't bind a port) Each call site adds cfg.Auth as the 4th arg to shared.Run. No other changes — middleware applies via shared.Run uniformly. internal/shared/auth_test.go (12 test funcs) Empty config pass-through, missing-token 401, wrong-token 401, correct-token 200, raw-token-without-Bearer-prefix 401, /health always public, IP allowlist allow + reject, bare IP /32, both layers when both configured, invalid CIDR drop-with-warn, RemoteAddr shape extraction. The constant-time comparison is verified by inspection (comments in auth.go) plus the existence of the passthrough test (length-mismatch case). Verified: go test -count=1 ./internal/shared/ — all green (was 21, now 33 funcs) just verify — vet + test + 9 smokes 33s just proof contract — 53/0/1 unchanged Smokes + proof harness keep working without any token configuration: default Auth is empty struct → middleware is no-op → existing tests pass unchanged. To exercise the gate, operators set [auth].token in lakehouse.toml (or, per the "future" note in the ADR, via env var). Closes audit findings: R-001 HIGH — fully mechanically closed (was: partial via bind gate) R-007 MED — fully mechanically closed (was: design-only ADR-003) Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
178 lines
5.4 KiB
Go
178 lines
5.4 KiB
Go
// embedd is the embedding service. Turns text into vectors via a
|
|
// pluggable Provider (G2: Ollama at :11434). Vectors flow through
|
|
// the rest of the stack as float32 — see internal/embed for the
|
|
// boundary conversion. Default model is config-resolved; callers
|
|
// can override per request.
|
|
package main
|
|
|
|
import (
|
|
"context"
|
|
"encoding/json"
|
|
"errors"
|
|
"flag"
|
|
"log/slog"
|
|
"net/http"
|
|
"os"
|
|
"strings"
|
|
"time"
|
|
|
|
"github.com/go-chi/chi/v5"
|
|
|
|
"git.agentview.dev/profit/golangLAKEHOUSE/internal/embed"
|
|
"git.agentview.dev/profit/golangLAKEHOUSE/internal/shared"
|
|
)
|
|
|
|
const (
|
|
maxRequestBytes = 4 << 20 // 4 MiB cap on /embed body — texts plural
|
|
batchDeadline = 60 * time.Second // upper bound on a single /embed batch
|
|
)
|
|
|
|
func main() {
|
|
configPath := flag.String("config", "lakehouse.toml", "path to TOML config")
|
|
flag.Parse()
|
|
|
|
cfg, err := shared.LoadConfig(*configPath)
|
|
if err != nil {
|
|
slog.Error("config", "err", err)
|
|
os.Exit(1)
|
|
}
|
|
if cfg.Embedd.ProviderURL == "" {
|
|
slog.Error("config", "err", "embedd.provider_url is required")
|
|
os.Exit(1)
|
|
}
|
|
|
|
// Wrap the upstream provider in an LRU cache so repeat queries
|
|
// (the staffing co-pilot replays many of the same texts) bypass
|
|
// the ~50ms Ollama round-trip. Cache size 0 = pass-through.
|
|
base := embed.NewOllama(cfg.Embedd.ProviderURL, cfg.Embedd.DefaultModel)
|
|
cached, err := embed.NewCachedProvider(base, cfg.Embedd.DefaultModel, cfg.Embedd.CacheSize)
|
|
if err != nil {
|
|
slog.Error("embed cache", "err", err)
|
|
os.Exit(1)
|
|
}
|
|
slog.Info("embed cache",
|
|
"size", cfg.Embedd.CacheSize,
|
|
"default_model", cfg.Embedd.DefaultModel,
|
|
"enabled", cfg.Embedd.CacheSize > 0)
|
|
h := &handlers{provider: cached, cache: cached}
|
|
|
|
if err := shared.Run("embedd", cfg.Embedd.Bind, h.register, cfg.Auth); err != nil {
|
|
slog.Error("server", "err", err)
|
|
os.Exit(1)
|
|
}
|
|
}
|
|
|
|
type handlers struct {
|
|
provider embed.Provider
|
|
// cache is the same instance as provider when caching is enabled,
|
|
// kept as a typed pointer so /v1/embed/stats can expose hit-rate
|
|
// without type-asserting through the Provider interface. nil when
|
|
// CacheSize=0 (pass-through mode).
|
|
cache *embed.CachedProvider
|
|
}
|
|
|
|
func (h *handlers) register(r chi.Router) {
|
|
r.Post("/embed", h.handleEmbed)
|
|
r.Get("/embed/stats", h.handleStats)
|
|
}
|
|
|
|
// handleStats reports cache hits/misses + hit rate + size. Operators
|
|
// use this to confirm the cache is doing its job (high hit rate) or
|
|
// is sized wrong (low hit rate + many misses on a workload that
|
|
// should have repeats).
|
|
func (h *handlers) handleStats(w http.ResponseWriter, _ *http.Request) {
|
|
w.Header().Set("Content-Type", "application/json")
|
|
if h.cache == nil {
|
|
_ = json.NewEncoder(w).Encode(map[string]any{"enabled": false})
|
|
return
|
|
}
|
|
hits, misses := h.cache.Stats()
|
|
_ = json.NewEncoder(w).Encode(map[string]any{
|
|
"enabled": true,
|
|
"hits": hits,
|
|
"misses": misses,
|
|
"hit_rate": h.cache.HitRate(),
|
|
"size": h.cache.Len(),
|
|
})
|
|
}
|
|
|
|
// embedRequest is the POST /embed body. Texts is the list to
|
|
// embed; Model is optional (empty → use server default).
|
|
type embedRequest struct {
|
|
Texts []string `json:"texts"`
|
|
Model string `json:"model,omitempty"`
|
|
}
|
|
|
|
func (h *handlers) handleEmbed(w http.ResponseWriter, r *http.Request) {
|
|
defer r.Body.Close()
|
|
r.Body = http.MaxBytesReader(w, r.Body, maxRequestBytes)
|
|
var req embedRequest
|
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
|
var maxErr *http.MaxBytesError
|
|
if errors.As(err, &maxErr) || strings.Contains(err.Error(), "http: request body too large") {
|
|
http.Error(w, "body too large", http.StatusRequestEntityTooLarge)
|
|
return
|
|
}
|
|
http.Error(w, "decode body: "+err.Error(), http.StatusBadRequest)
|
|
return
|
|
}
|
|
|
|
// Per scrum O-W3 (Opus): reject empty strings up front. Ollama's
|
|
// behavior on empty prompt is version-dependent (some return
|
|
// errors, some return zero vectors); rejecting at the boundary
|
|
// gives callers a deterministic 400 instead of 502.
|
|
for j, t := range req.Texts {
|
|
if t == "" {
|
|
http.Error(w, "texts["+itoa(j)+"]: empty string", http.StatusBadRequest)
|
|
return
|
|
}
|
|
}
|
|
|
|
// Per scrum C1 (Opus + Kimi convergent): per-text 60s timeout
|
|
// without a batch-level cap means a 100-text batch with one
|
|
// stuck call can pin the handler for ~6000s. Set a hard batch
|
|
// ceiling derived from the request ctx so a wedged Ollama
|
|
// surfaces as 504-ish (mapped to 502 by the upstream-error
|
|
// path below) rather than holding the connection forever.
|
|
ctx, cancel := context.WithTimeout(r.Context(), batchDeadline)
|
|
defer cancel()
|
|
|
|
res, err := h.provider.Embed(ctx, req.Texts, req.Model)
|
|
if errors.Is(err, embed.ErrEmptyTexts) {
|
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
|
return
|
|
}
|
|
if errors.Is(err, embed.ErrModelMismatch) {
|
|
http.Error(w, err.Error(), http.StatusBadGateway)
|
|
return
|
|
}
|
|
if err != nil {
|
|
// Upstream-shape errors (Ollama down, model missing,
|
|
// 5xx body) bubble up as 502 — distinguishes "your input
|
|
// was wrong" (400) from "the embedding backend was wrong" (502).
|
|
slog.Warn("embed", "err", err)
|
|
http.Error(w, "embed: "+err.Error(), http.StatusBadGateway)
|
|
return
|
|
}
|
|
w.Header().Set("Content-Type", "application/json")
|
|
if err := json.NewEncoder(w).Encode(res); err != nil {
|
|
slog.Warn("embed encode", "err", err)
|
|
}
|
|
}
|
|
|
|
// itoa is a tiny helper for error messages without pulling strconv
|
|
// in just for one call site.
|
|
func itoa(i int) string {
|
|
if i == 0 {
|
|
return "0"
|
|
}
|
|
var buf [20]byte
|
|
pos := len(buf)
|
|
for i > 0 {
|
|
pos--
|
|
buf[pos] = byte('0' + i%10)
|
|
i /= 10
|
|
}
|
|
return string(buf[pos:])
|
|
}
|