// embedd is the embedding service. Turns text into vectors via a // pluggable Provider (G2: Ollama at :11434). Vectors flow through // the rest of the stack as float32 — see internal/embed for the // boundary conversion. Default model is config-resolved; callers // can override per request. package main import ( "context" "encoding/json" "errors" "flag" "log/slog" "net/http" "os" "strings" "time" "github.com/go-chi/chi/v5" "git.agentview.dev/profit/golangLAKEHOUSE/internal/embed" "git.agentview.dev/profit/golangLAKEHOUSE/internal/shared" ) const ( maxRequestBytes = 4 << 20 // 4 MiB cap on /embed body — texts plural batchDeadline = 60 * time.Second // upper bound on a single /embed batch ) func main() { configPath := flag.String("config", "lakehouse.toml", "path to TOML config") flag.Parse() cfg, err := shared.LoadConfig(*configPath) if err != nil { slog.Error("config", "err", err) os.Exit(1) } if cfg.Embedd.ProviderURL == "" { slog.Error("config", "err", "embedd.provider_url is required") os.Exit(1) } h := &handlers{ provider: embed.NewOllama(cfg.Embedd.ProviderURL, cfg.Embedd.DefaultModel), } if err := shared.Run("embedd", cfg.Embedd.Bind, h.register); err != nil { slog.Error("server", "err", err) os.Exit(1) } } type handlers struct { provider embed.Provider } func (h *handlers) register(r chi.Router) { r.Post("/embed", h.handleEmbed) } // embedRequest is the POST /embed body. Texts is the list to // embed; Model is optional (empty → use server default). type embedRequest struct { Texts []string `json:"texts"` Model string `json:"model,omitempty"` } func (h *handlers) handleEmbed(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() r.Body = http.MaxBytesReader(w, r.Body, maxRequestBytes) var req embedRequest if err := json.NewDecoder(r.Body).Decode(&req); err != nil { var maxErr *http.MaxBytesError if errors.As(err, &maxErr) || strings.Contains(err.Error(), "http: request body too large") { http.Error(w, "body too large", http.StatusRequestEntityTooLarge) return } http.Error(w, "decode body: "+err.Error(), http.StatusBadRequest) return } // Per scrum O-W3 (Opus): reject empty strings up front. Ollama's // behavior on empty prompt is version-dependent (some return // errors, some return zero vectors); rejecting at the boundary // gives callers a deterministic 400 instead of 502. for j, t := range req.Texts { if t == "" { http.Error(w, "texts["+itoa(j)+"]: empty string", http.StatusBadRequest) return } } // Per scrum C1 (Opus + Kimi convergent): per-text 60s timeout // without a batch-level cap means a 100-text batch with one // stuck call can pin the handler for ~6000s. Set a hard batch // ceiling derived from the request ctx so a wedged Ollama // surfaces as 504-ish (mapped to 502 by the upstream-error // path below) rather than holding the connection forever. ctx, cancel := context.WithTimeout(r.Context(), batchDeadline) defer cancel() res, err := h.provider.Embed(ctx, req.Texts, req.Model) if errors.Is(err, embed.ErrEmptyTexts) { http.Error(w, err.Error(), http.StatusBadRequest) return } if errors.Is(err, embed.ErrModelMismatch) { http.Error(w, err.Error(), http.StatusBadGateway) return } if err != nil { // Upstream-shape errors (Ollama down, model missing, // 5xx body) bubble up as 502 — distinguishes "your input // was wrong" (400) from "the embedding backend was wrong" (502). slog.Warn("embed", "err", err) http.Error(w, "embed: "+err.Error(), http.StatusBadGateway) return } w.Header().Set("Content-Type", "application/json") if err := json.NewEncoder(w).Encode(res); err != nil { slog.Warn("embed encode", "err", err) } } // itoa is a tiny helper for error messages without pulling strconv // in just for one call site. func itoa(i int) string { if i == 0 { return "0" } var buf [20]byte pos := len(buf) for i > 0 { pos-- buf[pos] = byte('0' + i%10) i /= 10 } return string(buf[pos:]) }