Lands the matrix indexer's first piece per docs/SPEC.md §3.4:
multi-corpus retrieve+merge with corpus attribution per result.
Future components (relevance filter, downgrade gate, learning-loop
integration) layer on top of this surface.
Architecture:
- internal/matrix/retrieve.go — Retriever takes (query, corpora,
k, per_corpus_k), parallel-fans across vectord indexes, merges
by distance ascending, preserves corpus origin per hit
- cmd/matrixd — HTTP service on :3217, fronts /v1/matrix/*
- gateway proxy + [matrixd] config + lakehouse.toml entry
- Either query_text (matrix calls embedd) or query_vector
(caller pre-embedded) — vector takes precedence if both set
Error policy: fail-loud on any corpus error. Silent partial returns
would lie about coverage, defeating the matrix's whole purpose.
Bubbles vectord errors as 502 (upstream), validation as 400.
Smoke (scripts/matrix_smoke.sh, 6 assertions PASS first try):
- /matrix/corpora lists indexes
- Multi-corpus search returns hits from BOTH corpora
- Top hit is the globally-closest across all corpora
(b-near beats a-near at distance 0.05 vs 0.1 — proves merge)
- Metadata round-trips through the merge
- Distances ascending in result list
- Negative paths: empty corpora → 400, missing corpus → 502,
no query → 400
12-smoke regression sweep all green (D1-D6, G1, G1P, G2,
storaged_cap, pathway, matrix).
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
126 lines
3.6 KiB
Go
126 lines
3.6 KiB
Go
// matrixd is the matrix indexer service. Wraps internal/matrix's
|
|
// Retriever with HTTP routes per docs/SPEC.md §3.4.
|
|
//
|
|
// Routes:
|
|
// POST /matrix/search — multi-corpus retrieve+merge
|
|
// GET /matrix/corpora — list known vectord indexes (proxy)
|
|
//
|
|
// matrixd talks to embedd (for query-text embedding) and vectord
|
|
// (for per-corpus search) via HTTP. Both URLs come from
|
|
// [matrixd] config; gateway sets them to its own upstream URLs so
|
|
// matrixd inherits the same provider topology.
|
|
package main
|
|
|
|
import (
|
|
"encoding/json"
|
|
"errors"
|
|
"flag"
|
|
"log/slog"
|
|
"net/http"
|
|
"os"
|
|
"strings"
|
|
|
|
"github.com/go-chi/chi/v5"
|
|
|
|
"git.agentview.dev/profit/golangLAKEHOUSE/internal/matrix"
|
|
"git.agentview.dev/profit/golangLAKEHOUSE/internal/shared"
|
|
)
|
|
|
|
const maxRequestBytes = 4 << 20 // 4 MiB cap on request bodies
|
|
|
|
func main() {
|
|
configPath := flag.String("config", "lakehouse.toml", "path to TOML config")
|
|
flag.Parse()
|
|
|
|
cfg, err := shared.LoadConfig(*configPath)
|
|
if err != nil {
|
|
slog.Error("config", "err", err)
|
|
os.Exit(1)
|
|
}
|
|
if cfg.Matrixd.EmbeddURL == "" || cfg.Matrixd.VectordURL == "" {
|
|
slog.Error("matrixd: embedd_url and vectord_url required in [matrixd]")
|
|
os.Exit(1)
|
|
}
|
|
|
|
retriever := matrix.New(cfg.Matrixd.EmbeddURL, cfg.Matrixd.VectordURL)
|
|
h := &handlers{r: retriever}
|
|
|
|
if err := shared.Run("matrixd", cfg.Matrixd.Bind, h.register, cfg.Auth); err != nil {
|
|
slog.Error("server", "err", err)
|
|
os.Exit(1)
|
|
}
|
|
}
|
|
|
|
type handlers struct {
|
|
r *matrix.Retriever
|
|
}
|
|
|
|
func (h *handlers) register(r chi.Router) {
|
|
r.Post("/matrix/search", h.handleSearch)
|
|
r.Get("/matrix/corpora", h.handleCorpora)
|
|
}
|
|
|
|
func (h *handlers) handleSearch(w http.ResponseWriter, r *http.Request) {
|
|
var req matrix.SearchRequest
|
|
if !decodeJSON(w, r, &req) {
|
|
return
|
|
}
|
|
resp, err := h.r.Search(r.Context(), req)
|
|
if err != nil {
|
|
writeMatrixError(w, err)
|
|
return
|
|
}
|
|
writeJSON(w, http.StatusOK, resp)
|
|
}
|
|
|
|
func (h *handlers) handleCorpora(w http.ResponseWriter, r *http.Request) {
|
|
names, err := h.r.Corpora(r.Context())
|
|
if err != nil {
|
|
slog.Error("matrix corpora", "err", err)
|
|
http.Error(w, "vectord unavailable", http.StatusBadGateway)
|
|
return
|
|
}
|
|
writeJSON(w, http.StatusOK, map[string]any{"corpora": names, "count": len(names)})
|
|
}
|
|
|
|
func decodeJSON(w http.ResponseWriter, r *http.Request, v any) bool {
|
|
defer r.Body.Close()
|
|
r.Body = http.MaxBytesReader(w, r.Body, maxRequestBytes)
|
|
if err := json.NewDecoder(r.Body).Decode(v); err != nil {
|
|
var maxErr *http.MaxBytesError
|
|
if errors.As(err, &maxErr) || strings.Contains(err.Error(), "http: request body too large") {
|
|
http.Error(w, "body too large", http.StatusRequestEntityTooLarge)
|
|
return false
|
|
}
|
|
http.Error(w, "decode body: "+err.Error(), http.StatusBadRequest)
|
|
return false
|
|
}
|
|
return true
|
|
}
|
|
|
|
func writeJSON(w http.ResponseWriter, code int, v any) {
|
|
w.Header().Set("Content-Type", "application/json")
|
|
w.WriteHeader(code)
|
|
if err := json.NewEncoder(w).Encode(v); err != nil {
|
|
slog.Warn("matrix write json", "err", err)
|
|
}
|
|
}
|
|
|
|
// writeMatrixError maps internal/matrix sentinels to HTTP statuses.
|
|
// Corpus / embed failures bubble up as 502 (the upstream service is
|
|
// what's wrong); validation errors are 400.
|
|
func writeMatrixError(w http.ResponseWriter, err error) {
|
|
switch {
|
|
case errors.Is(err, matrix.ErrEmptyCorpora),
|
|
errors.Is(err, matrix.ErrEmptyQuery):
|
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
|
case errors.Is(err, matrix.ErrCorpus),
|
|
errors.Is(err, matrix.ErrEmbed):
|
|
slog.Warn("matrix upstream", "err", err)
|
|
http.Error(w, err.Error(), http.StatusBadGateway)
|
|
default:
|
|
slog.Error("matrix", "err", err)
|
|
http.Error(w, "internal", http.StatusInternalServerError)
|
|
}
|
|
}
|