First G1+ piece. Standalone vectord service with in-memory HNSW
indexes keyed by string IDs and optional opaque JSON metadata.
Wraps github.com/coder/hnsw v0.6.1 (pure Go, no cgo). New port
:3215 with /v1/vectors/* routed through gateway.
API:
POST /v1/vectors/index create
GET /v1/vectors/index list
GET /v1/vectors/index/{name} get info
DELETE /v1/vectors/index/{name}
POST /v1/vectors/index/{name}/add (batch)
POST /v1/vectors/index/{name}/search
Acceptance smoke 7/7 PASS — including recall=1 on inserted vector
w-042 (cosine distance 5.96e-8, float32 precision noise), 200-
vector batch round-trip, dim mismatch → 400, missing index → 404,
duplicate create → 409.
Two upstream library quirks worked around in the wrapper:
1. coder/hnsw.Add panics with "node not added" on re-adding an
existing key (length-invariant fires because internal
delete+re-add doesn't change Len). Pre-Delete fixes for n>1.
2. Delete of the LAST node leaves layers[0] non-empty but
entryless; next Add SIGSEGVs in Dims(). Workaround: when
re-adding to a 1-node graph, recreate the underlying graph
fresh via resetGraphLocked().
Cross-lineage scrum on shipped code:
- Opus 4.7 (opencode): 0 BLOCK + 4 WARN + 3 INFO
- Kimi K2-0905 (openrouter): 2 BLOCK + 2 WARN + 1 INFO
- Qwen3-coder (openrouter): "No BLOCKs" (4 tokens)
Fixed (4 real + 2 cleanup):
O-W1: Lookup returned the raw []float32 from coder/hnsw — caller
mutation would corrupt index. Now copies before return.
O-W3: NaN/Inf vectors poison HNSW (distance comparisons return
false for both < and >, breaking heap invariants). Zero-norm
under cosine produces NaN. Now validated at Add time.
K-B1: Re-adding with nil metadata silently cleared the existing
entry — JSON-omitted "metadata" field deserializes as nil,
making upsert non-idempotent. Now nil = "leave alone"; explicit
{} or Delete to clear.
O-W4: Batch Add with mid-batch failure left items 0..N-1
committed and item N rejected. Now pre-validates all IDs+dims
before any Add.
O-I1: jsonItoa hand-roll replaced with strconv.Itoa — no
measured allocation win.
O-I2: distanceFn re-resolved per Search → use stored i.g.Distance.
Dismissed (2 false positives):
K-B2 "MaxBytesReader applied after full read" — false, applied
BEFORE Decode in decodeJSON
K-W1 "Search distances under read lock might see invalidated
slices from concurrent Add" — false, RWMutex serializes
write-lock during Add against read-lock during Search
Deferred (3): HTTP server timeouts (consistent G0 punt),
Content-Type validation (internal service behind gateway), Lookup
dim assertion (in-memory state can't drift).
The K-B1 finding is worth pausing on: nil metadata on re-add is
the kind of API ergonomics bug only a code-reading reviewer
catches — smoke would never detect it because the smoke always
sends explicit metadata. Three lines changed in Add; the resulting
API matches what callers actually expect.
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
268 lines
7.8 KiB
Go
268 lines
7.8 KiB
Go
// vectord is the vector-search service. In-memory HNSW indexes
|
|
// keyed by string IDs with optional opaque JSON metadata. Wraps
|
|
// github.com/coder/hnsw (pure Go, no cgo).
|
|
//
|
|
// G1 scope: in-memory only, single-process. Persistence to storaged
|
|
// + rehydrate-on-restart is the next piece. The HTTP surface is
|
|
// stable enough to build the staffing co-pilot's "find workers
|
|
// like X" path on top right now — the indexes just have to be
|
|
// rebuilt after a restart.
|
|
package main
|
|
|
|
import (
|
|
"encoding/json"
|
|
"errors"
|
|
"flag"
|
|
"log/slog"
|
|
"net/http"
|
|
"os"
|
|
"strconv"
|
|
"strings"
|
|
|
|
"github.com/go-chi/chi/v5"
|
|
|
|
"git.agentview.dev/profit/golangLAKEHOUSE/internal/shared"
|
|
"git.agentview.dev/profit/golangLAKEHOUSE/internal/vectord"
|
|
)
|
|
|
|
const (
|
|
maxRequestBytes = 64 << 20 // 64 MiB cap for batched Add payloads
|
|
defaultK = 10
|
|
maxK = 1000
|
|
)
|
|
|
|
func main() {
|
|
configPath := flag.String("config", "lakehouse.toml", "path to TOML config")
|
|
flag.Parse()
|
|
|
|
cfg, err := shared.LoadConfig(*configPath)
|
|
if err != nil {
|
|
slog.Error("config", "err", err)
|
|
os.Exit(1)
|
|
}
|
|
|
|
h := &handlers{reg: vectord.NewRegistry()}
|
|
|
|
if err := shared.Run("vectord", cfg.Vectord.Bind, h.register); err != nil {
|
|
slog.Error("server", "err", err)
|
|
os.Exit(1)
|
|
}
|
|
}
|
|
|
|
type handlers struct {
|
|
reg *vectord.Registry
|
|
}
|
|
|
|
func (h *handlers) register(r chi.Router) {
|
|
r.Post("/vectors/index", h.handleCreate)
|
|
r.Get("/vectors/index", h.handleList)
|
|
r.Get("/vectors/index/{name}", h.handleGetIndex)
|
|
r.Delete("/vectors/index/{name}", h.handleDelete)
|
|
r.Post("/vectors/index/{name}/add", h.handleAdd)
|
|
r.Post("/vectors/index/{name}/search", h.handleSearch)
|
|
}
|
|
|
|
// createRequest mirrors POST /vectors/index body.
|
|
type createRequest struct {
|
|
Name string `json:"name"`
|
|
Dimension int `json:"dimension"`
|
|
M int `json:"m,omitempty"`
|
|
EfSearch int `json:"ef_search,omitempty"`
|
|
Distance string `json:"distance,omitempty"`
|
|
}
|
|
|
|
// indexInfo is the GET /vectors/index/{name} response shape.
|
|
type indexInfo struct {
|
|
Params vectord.IndexParams `json:"params"`
|
|
Length int `json:"length"`
|
|
}
|
|
|
|
// addRequest is the body for POST /vectors/index/{name}/add. Items
|
|
// are batched so callers can amortize HTTP overhead — the smoke
|
|
// inserts hundreds of vectors per request.
|
|
type addRequest struct {
|
|
Items []addItem `json:"items"`
|
|
}
|
|
|
|
type addItem struct {
|
|
ID string `json:"id"`
|
|
Vector []float32 `json:"vector"`
|
|
Metadata json.RawMessage `json:"metadata,omitempty"`
|
|
}
|
|
|
|
// searchRequest is the body for POST /vectors/index/{name}/search.
|
|
type searchRequest struct {
|
|
Vector []float32 `json:"vector"`
|
|
K int `json:"k,omitempty"`
|
|
}
|
|
|
|
type searchResponse struct {
|
|
Results []vectord.Result `json:"results"`
|
|
}
|
|
|
|
func (h *handlers) handleCreate(w http.ResponseWriter, r *http.Request) {
|
|
var req createRequest
|
|
if !decodeJSON(w, r, &req) {
|
|
return
|
|
}
|
|
idx, err := h.reg.Create(vectord.IndexParams{
|
|
Name: req.Name,
|
|
Dimension: req.Dimension,
|
|
M: req.M,
|
|
EfSearch: req.EfSearch,
|
|
Distance: req.Distance,
|
|
})
|
|
if errors.Is(err, vectord.ErrIndexAlreadyExists) {
|
|
http.Error(w, err.Error(), http.StatusConflict)
|
|
return
|
|
}
|
|
if errors.Is(err, vectord.ErrInvalidParams) || errors.Is(err, vectord.ErrUnknownDistance) {
|
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
|
return
|
|
}
|
|
if err != nil {
|
|
slog.Error("create index", "name", req.Name, "err", err)
|
|
http.Error(w, "internal", http.StatusInternalServerError)
|
|
return
|
|
}
|
|
writeJSON(w, http.StatusCreated, indexInfo{Params: idx.Params(), Length: idx.Len()})
|
|
}
|
|
|
|
func (h *handlers) handleList(w http.ResponseWriter, _ *http.Request) {
|
|
names := h.reg.Names()
|
|
writeJSON(w, http.StatusOK, map[string]any{"names": names, "count": len(names)})
|
|
}
|
|
|
|
func (h *handlers) handleGetIndex(w http.ResponseWriter, r *http.Request) {
|
|
name := chi.URLParam(r, "name")
|
|
idx, err := h.reg.Get(name)
|
|
if errors.Is(err, vectord.ErrIndexNotFound) {
|
|
http.Error(w, "not found", http.StatusNotFound)
|
|
return
|
|
}
|
|
if err != nil {
|
|
http.Error(w, "internal", http.StatusInternalServerError)
|
|
return
|
|
}
|
|
writeJSON(w, http.StatusOK, indexInfo{Params: idx.Params(), Length: idx.Len()})
|
|
}
|
|
|
|
func (h *handlers) handleDelete(w http.ResponseWriter, r *http.Request) {
|
|
name := chi.URLParam(r, "name")
|
|
if err := h.reg.Delete(name); errors.Is(err, vectord.ErrIndexNotFound) {
|
|
http.Error(w, "not found", http.StatusNotFound)
|
|
return
|
|
} else if err != nil {
|
|
http.Error(w, "internal", http.StatusInternalServerError)
|
|
return
|
|
}
|
|
w.WriteHeader(http.StatusNoContent)
|
|
}
|
|
|
|
func (h *handlers) handleAdd(w http.ResponseWriter, r *http.Request) {
|
|
name := chi.URLParam(r, "name")
|
|
idx, err := h.reg.Get(name)
|
|
if errors.Is(err, vectord.ErrIndexNotFound) {
|
|
http.Error(w, "not found", http.StatusNotFound)
|
|
return
|
|
}
|
|
var req addRequest
|
|
if !decodeJSON(w, r, &req) {
|
|
return
|
|
}
|
|
if len(req.Items) == 0 {
|
|
http.Error(w, "items must be non-empty", http.StatusBadRequest)
|
|
return
|
|
}
|
|
// Per scrum O-W4 (Opus): pre-validate all items before any Add,
|
|
// so a bad item at position N doesn't leave items 0..N-1 already
|
|
// committed and item N rejected. Both checks (empty id, dim
|
|
// mismatch) are local; running them up-front is O(N) extra work
|
|
// that the success path already paid in idx.Add.
|
|
dim := idx.Params().Dimension
|
|
for j, it := range req.Items {
|
|
if it.ID == "" {
|
|
http.Error(w, "items["+strconv.Itoa(j)+"]: empty id", http.StatusBadRequest)
|
|
return
|
|
}
|
|
if len(it.Vector) != dim {
|
|
http.Error(w, "items["+strconv.Itoa(j)+"]: dim mismatch (index="+strconv.Itoa(dim)+", got="+strconv.Itoa(len(it.Vector))+")", http.StatusBadRequest)
|
|
return
|
|
}
|
|
}
|
|
for j, it := range req.Items {
|
|
if err := idx.Add(it.ID, it.Vector, it.Metadata); err != nil {
|
|
// Vector-validation errors (NaN/Inf, zero-norm under
|
|
// cosine) only surface here; pre-validation is intentional
|
|
// minimal scope (id + dim only).
|
|
if errors.Is(err, vectord.ErrDimensionMismatch) ||
|
|
strings.Contains(err.Error(), "non-finite") ||
|
|
strings.Contains(err.Error(), "zero-norm") {
|
|
http.Error(w, "items["+strconv.Itoa(j)+"]: "+err.Error(), http.StatusBadRequest)
|
|
return
|
|
}
|
|
slog.Error("add", "name", name, "id", it.ID, "err", err)
|
|
http.Error(w, "internal", http.StatusInternalServerError)
|
|
return
|
|
}
|
|
}
|
|
writeJSON(w, http.StatusOK, map[string]any{"added": len(req.Items), "length": idx.Len()})
|
|
}
|
|
|
|
func (h *handlers) handleSearch(w http.ResponseWriter, r *http.Request) {
|
|
name := chi.URLParam(r, "name")
|
|
idx, err := h.reg.Get(name)
|
|
if errors.Is(err, vectord.ErrIndexNotFound) {
|
|
http.Error(w, "not found", http.StatusNotFound)
|
|
return
|
|
}
|
|
var req searchRequest
|
|
if !decodeJSON(w, r, &req) {
|
|
return
|
|
}
|
|
k := req.K
|
|
if k <= 0 {
|
|
k = defaultK
|
|
}
|
|
if k > maxK {
|
|
k = maxK
|
|
}
|
|
hits, err := idx.Search(req.Vector, k)
|
|
if errors.Is(err, vectord.ErrDimensionMismatch) {
|
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
|
return
|
|
}
|
|
if err != nil {
|
|
slog.Error("search", "name", name, "err", err)
|
|
http.Error(w, "internal", http.StatusInternalServerError)
|
|
return
|
|
}
|
|
writeJSON(w, http.StatusOK, searchResponse{Results: hits})
|
|
}
|
|
|
|
// decodeJSON reads + decodes a JSON body with a body-size cap.
|
|
// Returns false (and writes the error response) on failure.
|
|
func decodeJSON(w http.ResponseWriter, r *http.Request, v any) bool {
|
|
defer r.Body.Close()
|
|
r.Body = http.MaxBytesReader(w, r.Body, maxRequestBytes)
|
|
if err := json.NewDecoder(r.Body).Decode(v); err != nil {
|
|
var maxErr *http.MaxBytesError
|
|
if errors.As(err, &maxErr) || strings.Contains(err.Error(), "http: request body too large") {
|
|
http.Error(w, "body too large", http.StatusRequestEntityTooLarge)
|
|
return false
|
|
}
|
|
http.Error(w, "decode body: "+err.Error(), http.StatusBadRequest)
|
|
return false
|
|
}
|
|
return true
|
|
}
|
|
|
|
func writeJSON(w http.ResponseWriter, code int, v any) {
|
|
w.Header().Set("Content-Type", "application/json")
|
|
w.WriteHeader(code)
|
|
if err := json.NewEncoder(w).Encode(v); err != nil {
|
|
slog.Warn("write json", "err", err)
|
|
}
|
|
}
|
|
|