Two threads landing together — the doc edits interleave so they ship in a single commit. 1. **vectord substrate fix verified at original scale** (closes the 2026-05-01 thread). Re-ran multitier 5min @ conc=50: 132,211 scenarios at 438/sec, 6/6 classes at 0% failure (was 4/6 pre-fix). Throughput dropped 1,115 → 438/sec because previously-broken scenarios now do real HNSW Add work — honest cost of correctness. The fix (i.vectors side-store + safeGraphAdd recover wrappers + smallIndexRebuildThreshold=32 + saveTask coalescing) holds at the footprint that originally surfaced the bug. 2. **Materializer port** — internal/materializer + cmd/materializer + scripts/materializer_smoke.sh. Ports scripts/distillation/transforms.ts (12 transforms) + build_evidence_index.ts (idempotency, day-partition, receipt). On-wire JSON shape matches TS so Bun and Go runs are interchangeable. 14 tests green. 3. **Replay port** — internal/replay + cmd/replay + scripts/replay_smoke.sh. Ports scripts/distillation/replay.ts (retrieve → bundle → /v1/chat → validate → log). Closes audit-FULL phase 7 live invocation on the Go side. Both runtimes append to the same data/_kb/replay_runs.jsonl (schema=replay_run.v1). 14 tests green. Side effect on internal/distillation/types.go: EvidenceRecord gained prompt_tokens, completion_tokens, and metadata fields to mirror the TS shape the materializer transforms produce. STATE_OF_PLAY refreshed to 2026-05-02; ARCHITECTURE_COMPARISON decisions tracker moves the materializer + replay items from _open_ to DONE and adds the substrate-fix scale verification row. Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
553 lines
17 KiB
Go
553 lines
17 KiB
Go
// vectord is the vector-search service. HNSW indexes keyed by
|
|
// string IDs with optional opaque JSON metadata. Wraps
|
|
// github.com/coder/hnsw (pure Go, no cgo).
|
|
//
|
|
// G1 + persistence: indexes are persisted to storaged at
|
|
// _vectors/<name>.{json,hnsw} and rehydrated on startup. Setting
|
|
// [vectord].storaged_url empty disables persistence (dev mode).
|
|
package main
|
|
|
|
import (
|
|
"context"
|
|
"encoding/json"
|
|
"errors"
|
|
"flag"
|
|
"log/slog"
|
|
"net/http"
|
|
"os"
|
|
"strconv"
|
|
"strings"
|
|
"sync"
|
|
"time"
|
|
|
|
"github.com/go-chi/chi/v5"
|
|
|
|
"git.agentview.dev/profit/golangLAKEHOUSE/internal/shared"
|
|
"git.agentview.dev/profit/golangLAKEHOUSE/internal/storeclient"
|
|
"git.agentview.dev/profit/golangLAKEHOUSE/internal/vectord"
|
|
)
|
|
|
|
const (
|
|
maxRequestBytes = 64 << 20 // 64 MiB cap for batched Add payloads
|
|
defaultK = 10
|
|
maxK = 1000
|
|
)
|
|
|
|
func main() {
|
|
configPath := flag.String("config", "lakehouse.toml", "path to TOML config")
|
|
flag.Parse()
|
|
|
|
cfg, err := shared.LoadConfig(*configPath)
|
|
if err != nil {
|
|
slog.Error("config", "err", err)
|
|
os.Exit(1)
|
|
}
|
|
|
|
h := &handlers{reg: vectord.NewRegistry()}
|
|
|
|
// Persistence is optional — empty StoragedURL = dev/ephemeral mode.
|
|
if cfg.Vectord.StoragedURL != "" {
|
|
h.persist = vectord.NewPersistor(storeclient.New(cfg.Vectord.StoragedURL))
|
|
|
|
// Rehydrate any persisted indexes at startup. Failures are
|
|
// logged-not-fatal: storaged might be coming up after vectord,
|
|
// and an index that failed to load is still recoverable by
|
|
// re-ingesting on top of an empty registry.
|
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
|
n, err := h.rehydrate(ctx)
|
|
cancel()
|
|
if err != nil {
|
|
slog.Warn("rehydrate", "err", err, "loaded", n)
|
|
} else {
|
|
slog.Info("rehydrated", "indexes", n)
|
|
}
|
|
}
|
|
|
|
if err := shared.Run("vectord", cfg.Vectord.Bind, h.register, cfg.Auth); err != nil {
|
|
slog.Error("server", "err", err)
|
|
os.Exit(1)
|
|
}
|
|
}
|
|
|
|
type handlers struct {
|
|
reg *vectord.Registry
|
|
persist *vectord.Persistor // nil when persistence is disabled
|
|
|
|
// saversMu guards lazy initialization of per-index save tasks.
|
|
// Each task coalesces synchronous Save calls into single-flight
|
|
// async saves so high-write-rate indexes (playbook_memory under
|
|
// multitier_100k load) don't pay one MinIO PUT per Add. See the
|
|
// saveTask docstring for the coalescing semantics.
|
|
saversMu sync.Mutex
|
|
savers map[string]*saveTask
|
|
}
|
|
|
|
// saveTask coalesces saves for one index into a single-flight async
|
|
// goroutine. While a save is in-flight, additional triggers mark
|
|
// "pending" — the in-flight goroutine reruns the save after it
|
|
// finishes, collapsing N concurrent triggers into at most 2 saves
|
|
// (the current in-flight + one catch-up).
|
|
//
|
|
// Why: pre-2026-05-01 each successful Add called Persistor.Save
|
|
// synchronously inside the request handler. For playbook_memory at
|
|
// 1900-entry / 768-d, Encode + MinIO PUT cost 100-300ms. With 50
|
|
// concurrent writers, end-to-end Add latency hit 2-2.5s purely from
|
|
// save serialization (Save takes the index RLock for Encode, which
|
|
// blocks new Adds taking the Lock).
|
|
//
|
|
// Trade-off: RPO. Add now returns OK before the save completes, so
|
|
// a crash can lose up to ~1 save's worth of data. Acceptable for
|
|
// the playbook-memory shape (learning loop — lost trace re-recorded
|
|
// on next run) and consistent with ADR-005's fail-open posture.
|
|
type saveTask struct {
|
|
mu sync.Mutex
|
|
inflight bool
|
|
pending bool
|
|
}
|
|
|
|
// trigger schedules a save. If a save is already in-flight, marks
|
|
// pending and returns. If none in-flight, starts a goroutine that
|
|
// runs save and any queued pending saves.
|
|
//
|
|
// save is the actual save operation (parameterized for testability).
|
|
// Errors are logged via slog and not returned — same fail-open
|
|
// posture as the prior synchronous saveAfter.
|
|
func (s *saveTask) trigger(save func() error) {
|
|
s.mu.Lock()
|
|
if s.inflight {
|
|
s.pending = true
|
|
s.mu.Unlock()
|
|
return
|
|
}
|
|
s.inflight = true
|
|
s.mu.Unlock()
|
|
|
|
go func() {
|
|
for {
|
|
if err := save(); err != nil {
|
|
slog.Warn("persist save", "err", err)
|
|
}
|
|
s.mu.Lock()
|
|
if !s.pending {
|
|
s.inflight = false
|
|
s.mu.Unlock()
|
|
return
|
|
}
|
|
s.pending = false
|
|
s.mu.Unlock()
|
|
// Loop: re-run save to capture changes that arrived
|
|
// while we were saving.
|
|
}
|
|
}()
|
|
}
|
|
|
|
// rehydrate enumerates persisted indexes and loads each into the
|
|
// registry. Returns the count of successfully loaded indexes plus
|
|
// the first error (if any) — caller decides fatality.
|
|
func (h *handlers) rehydrate(ctx context.Context) (int, error) {
|
|
if h.persist == nil {
|
|
return 0, nil
|
|
}
|
|
names, err := h.persist.List(ctx)
|
|
if err != nil {
|
|
return 0, err
|
|
}
|
|
loaded := 0
|
|
for _, name := range names {
|
|
idx, err := h.persist.Load(ctx, name)
|
|
if err != nil {
|
|
slog.Warn("rehydrate skip", "name", name, "err", err)
|
|
continue
|
|
}
|
|
// The registry's Create rebuilds an empty Index from params;
|
|
// we want the LOADED one (with vectors). Bypass via a
|
|
// helper that registers a pre-built Index.
|
|
if err := h.reg.RegisterPrebuilt(idx); err != nil {
|
|
slog.Warn("rehydrate register", "name", name, "err", err)
|
|
continue
|
|
}
|
|
loaded++
|
|
}
|
|
return loaded, nil
|
|
}
|
|
|
|
// saveAfter triggers a coalesced async persistence for the index.
|
|
// In-memory state is the source of truth in flight; a failed save
|
|
// re-runs on the next mutation, and the operator log shows the
|
|
// storaged outage.
|
|
//
|
|
// Coalescing semantics (added 2026-05-01 after multitier_100k
|
|
// follow-up): rapid concurrent writes collapse into at most two
|
|
// MinIO PUTs per index (current + one catch-up), instead of one
|
|
// per Add. See the saveTask docstring.
|
|
func (h *handlers) saveAfter(idx *vectord.Index) {
|
|
if h.persist == nil {
|
|
return
|
|
}
|
|
name := idx.Params().Name
|
|
h.saversMu.Lock()
|
|
if h.savers == nil {
|
|
h.savers = make(map[string]*saveTask)
|
|
}
|
|
s, ok := h.savers[name]
|
|
if !ok {
|
|
s = &saveTask{}
|
|
h.savers[name] = s
|
|
}
|
|
h.saversMu.Unlock()
|
|
s.trigger(func() error {
|
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
|
defer cancel()
|
|
if err := h.persist.Save(ctx, idx); err != nil {
|
|
return err
|
|
}
|
|
return nil
|
|
})
|
|
}
|
|
|
|
// deleteAfter mirrors saveAfter for the Delete path.
|
|
func (h *handlers) deleteAfter(name string) {
|
|
if h.persist == nil {
|
|
return
|
|
}
|
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
|
defer cancel()
|
|
if err := h.persist.Delete(ctx, name); err != nil {
|
|
slog.Warn("persist delete", "name", name, "err", err)
|
|
}
|
|
}
|
|
|
|
func (h *handlers) register(r chi.Router) {
|
|
r.Post("/vectors/index", h.handleCreate)
|
|
r.Get("/vectors/index", h.handleList)
|
|
r.Get("/vectors/index/{name}", h.handleGetIndex)
|
|
r.Delete("/vectors/index/{name}", h.handleDelete)
|
|
r.Post("/vectors/index/{name}/add", h.handleAdd)
|
|
r.Post("/vectors/index/{name}/search", h.handleSearch)
|
|
r.Post("/vectors/index/{name}/merge", h.handleMerge)
|
|
}
|
|
|
|
// createRequest mirrors POST /vectors/index body.
|
|
type createRequest struct {
|
|
Name string `json:"name"`
|
|
Dimension int `json:"dimension"`
|
|
M int `json:"m,omitempty"`
|
|
EfSearch int `json:"ef_search,omitempty"`
|
|
Distance string `json:"distance,omitempty"`
|
|
}
|
|
|
|
// indexInfo is the GET /vectors/index/{name} response shape.
|
|
type indexInfo struct {
|
|
Params vectord.IndexParams `json:"params"`
|
|
Length int `json:"length"`
|
|
}
|
|
|
|
// addRequest is the body for POST /vectors/index/{name}/add. Items
|
|
// are batched so callers can amortize HTTP overhead — the smoke
|
|
// inserts hundreds of vectors per request.
|
|
type addRequest struct {
|
|
Items []addItem `json:"items"`
|
|
}
|
|
|
|
type addItem struct {
|
|
ID string `json:"id"`
|
|
Vector []float32 `json:"vector"`
|
|
Metadata json.RawMessage `json:"metadata,omitempty"`
|
|
}
|
|
|
|
// searchRequest is the body for POST /vectors/index/{name}/search.
|
|
type searchRequest struct {
|
|
Vector []float32 `json:"vector"`
|
|
K int `json:"k,omitempty"`
|
|
}
|
|
|
|
type searchResponse struct {
|
|
Results []vectord.Result `json:"results"`
|
|
}
|
|
|
|
func (h *handlers) handleCreate(w http.ResponseWriter, r *http.Request) {
|
|
var req createRequest
|
|
if !decodeJSON(w, r, &req) {
|
|
return
|
|
}
|
|
idx, err := h.reg.Create(vectord.IndexParams{
|
|
Name: req.Name,
|
|
Dimension: req.Dimension,
|
|
M: req.M,
|
|
EfSearch: req.EfSearch,
|
|
Distance: req.Distance,
|
|
})
|
|
if errors.Is(err, vectord.ErrIndexAlreadyExists) {
|
|
http.Error(w, err.Error(), http.StatusConflict)
|
|
return
|
|
}
|
|
if errors.Is(err, vectord.ErrInvalidParams) || errors.Is(err, vectord.ErrUnknownDistance) {
|
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
|
return
|
|
}
|
|
if err != nil {
|
|
slog.Error("create index", "name", req.Name, "err", err)
|
|
http.Error(w, "internal", http.StatusInternalServerError)
|
|
return
|
|
}
|
|
h.saveAfter(idx)
|
|
writeJSON(w, http.StatusCreated, indexInfo{Params: idx.Params(), Length: idx.Len()})
|
|
}
|
|
|
|
func (h *handlers) handleList(w http.ResponseWriter, _ *http.Request) {
|
|
names := h.reg.Names()
|
|
writeJSON(w, http.StatusOK, map[string]any{"names": names, "count": len(names)})
|
|
}
|
|
|
|
func (h *handlers) handleGetIndex(w http.ResponseWriter, r *http.Request) {
|
|
name := chi.URLParam(r, "name")
|
|
idx, err := h.reg.Get(name)
|
|
if errors.Is(err, vectord.ErrIndexNotFound) {
|
|
http.Error(w, "not found", http.StatusNotFound)
|
|
return
|
|
}
|
|
if err != nil {
|
|
http.Error(w, "internal", http.StatusInternalServerError)
|
|
return
|
|
}
|
|
writeJSON(w, http.StatusOK, indexInfo{Params: idx.Params(), Length: idx.Len()})
|
|
}
|
|
|
|
func (h *handlers) handleDelete(w http.ResponseWriter, r *http.Request) {
|
|
name := chi.URLParam(r, "name")
|
|
if err := h.reg.Delete(name); errors.Is(err, vectord.ErrIndexNotFound) {
|
|
http.Error(w, "not found", http.StatusNotFound)
|
|
return
|
|
} else if err != nil {
|
|
http.Error(w, "internal", http.StatusInternalServerError)
|
|
return
|
|
}
|
|
h.deleteAfter(name)
|
|
w.WriteHeader(http.StatusNoContent)
|
|
}
|
|
|
|
func (h *handlers) handleAdd(w http.ResponseWriter, r *http.Request) {
|
|
name := chi.URLParam(r, "name")
|
|
idx, err := h.reg.Get(name)
|
|
if errors.Is(err, vectord.ErrIndexNotFound) {
|
|
http.Error(w, "not found", http.StatusNotFound)
|
|
return
|
|
}
|
|
var req addRequest
|
|
if !decodeJSON(w, r, &req) {
|
|
return
|
|
}
|
|
if len(req.Items) == 0 {
|
|
http.Error(w, "items must be non-empty", http.StatusBadRequest)
|
|
return
|
|
}
|
|
// Per scrum O-W4 (Opus, D5): pre-validate all items before any
|
|
// Add, so a bad item at position N doesn't leave items 0..N-1
|
|
// committed and item N rejected. Per scrum O-I3 (Opus, G1P):
|
|
// extend pre-validation to cover NaN/Inf and zero-norm — these
|
|
// were caught inside idx.Add but only after partial commits.
|
|
params := idx.Params()
|
|
dim := params.Dimension
|
|
for j, it := range req.Items {
|
|
if it.ID == "" {
|
|
http.Error(w, "items["+strconv.Itoa(j)+"]: empty id", http.StatusBadRequest)
|
|
return
|
|
}
|
|
if len(it.Vector) != dim {
|
|
http.Error(w, "items["+strconv.Itoa(j)+"]: dim mismatch (index="+strconv.Itoa(dim)+", got="+strconv.Itoa(len(it.Vector))+")", http.StatusBadRequest)
|
|
return
|
|
}
|
|
if err := vectord.ValidateVector(it.Vector, params.Distance); err != nil {
|
|
http.Error(w, "items["+strconv.Itoa(j)+"]: "+err.Error(), http.StatusBadRequest)
|
|
return
|
|
}
|
|
}
|
|
// Pre-validation above is exhaustive (id, dim, finite, zero-norm),
|
|
// so BatchAdd takes the write-lock once and pushes the whole batch
|
|
// into coder/hnsw via one variadic Graph.Add. Saves N-1 lock
|
|
// acquisitions per HTTP batch.
|
|
batch := make([]vectord.BatchItem, len(req.Items))
|
|
for j, it := range req.Items {
|
|
batch[j] = vectord.BatchItem{ID: it.ID, Vector: it.Vector, Metadata: it.Metadata}
|
|
}
|
|
if err := idx.BatchAdd(batch); err != nil {
|
|
slog.Error("batch add", "name", name, "err", err)
|
|
http.Error(w, "internal", http.StatusInternalServerError)
|
|
return
|
|
}
|
|
// One save per batch (post-loop), not per item. Per scrum
|
|
// O-W4-style discipline: HTTP-batch boundary is the natural unit.
|
|
h.saveAfter(idx)
|
|
writeJSON(w, http.StatusOK, map[string]any{"added": len(req.Items), "length": idx.Len()})
|
|
}
|
|
|
|
func (h *handlers) handleSearch(w http.ResponseWriter, r *http.Request) {
|
|
name := chi.URLParam(r, "name")
|
|
idx, err := h.reg.Get(name)
|
|
if errors.Is(err, vectord.ErrIndexNotFound) {
|
|
http.Error(w, "not found", http.StatusNotFound)
|
|
return
|
|
}
|
|
var req searchRequest
|
|
if !decodeJSON(w, r, &req) {
|
|
return
|
|
}
|
|
k := req.K
|
|
if k <= 0 {
|
|
k = defaultK
|
|
}
|
|
if k > maxK {
|
|
k = maxK
|
|
}
|
|
hits, err := idx.Search(req.Vector, k)
|
|
if errors.Is(err, vectord.ErrDimensionMismatch) {
|
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
|
return
|
|
}
|
|
if err != nil {
|
|
slog.Error("search", "name", name, "err", err)
|
|
http.Error(w, "internal", http.StatusInternalServerError)
|
|
return
|
|
}
|
|
writeJSON(w, http.StatusOK, searchResponse{Results: hits})
|
|
}
|
|
|
|
// mergeRequest body for POST /vectors/index/{name}/merge:
|
|
//
|
|
// {"dest": "workers", "clear_source": true}
|
|
//
|
|
// "name" in the URL is the SOURCE index. Every item from source is
|
|
// added to dest with the same id + vector + metadata. clear_source=
|
|
// true (default false) deletes each successfully-merged item from
|
|
// the source after add — leaves source empty when merge succeeds in
|
|
// full. clear_source=false leaves source untouched (useful for dry-
|
|
// run or "copy not move" semantics).
|
|
//
|
|
// Closes OPEN #1: periodic fresh→main index merge. The fresh_workers
|
|
// two-tier index pattern grows monotonically; this endpoint is the
|
|
// drain that operators (or a cron) call when fresh_workers crosses
|
|
// the operational ceiling (~500 items per the original gating
|
|
// criterion).
|
|
//
|
|
// Returns counts: {merged, skipped_already_present, failed, length_dest, length_source}.
|
|
type mergeRequest struct {
|
|
Dest string `json:"dest"`
|
|
ClearSource bool `json:"clear_source,omitempty"`
|
|
}
|
|
|
|
type mergeResponse struct {
|
|
Merged int `json:"merged"`
|
|
SkippedAlreadyPresent int `json:"skipped_already_present"`
|
|
Failed int `json:"failed"`
|
|
LengthDest int `json:"length_dest"`
|
|
LengthSource int `json:"length_source"`
|
|
FirstError string `json:"first_error,omitempty"`
|
|
}
|
|
|
|
func (h *handlers) handleMerge(w http.ResponseWriter, r *http.Request) {
|
|
name := chi.URLParam(r, "name")
|
|
src, err := h.reg.Get(name)
|
|
if errors.Is(err, vectord.ErrIndexNotFound) {
|
|
http.Error(w, "source not found", http.StatusNotFound)
|
|
return
|
|
} else if err != nil {
|
|
// Per scrum post_role_gate_v1 (Opus): non-ErrIndexNotFound
|
|
// errors must NOT fall through — src would be nil and the
|
|
// next deref panics.
|
|
slog.Error("merge: get source", "name", name, "err", err)
|
|
http.Error(w, "internal", http.StatusInternalServerError)
|
|
return
|
|
}
|
|
var req mergeRequest
|
|
if !decodeJSON(w, r, &req) {
|
|
return
|
|
}
|
|
if req.Dest == "" || req.Dest == name {
|
|
http.Error(w, "dest must be set and differ from source", http.StatusBadRequest)
|
|
return
|
|
}
|
|
dest, err := h.reg.Get(req.Dest)
|
|
if errors.Is(err, vectord.ErrIndexNotFound) {
|
|
http.Error(w, "dest not found", http.StatusNotFound)
|
|
return
|
|
} else if err != nil {
|
|
// Same fix as source — non-ErrIndexNotFound dest errors
|
|
// can't reach the merge body with a nil dest pointer.
|
|
slog.Error("merge: get dest", "name", req.Dest, "err", err)
|
|
http.Error(w, "internal", http.StatusInternalServerError)
|
|
return
|
|
}
|
|
// Dimension match is non-negotiable — silently moving a 768-d
|
|
// vector into a 384-d index would corrupt search forever.
|
|
if src.Params().Dimension != dest.Params().Dimension {
|
|
http.Error(w, "source/dest dimension mismatch", http.StatusBadRequest)
|
|
return
|
|
}
|
|
|
|
resp := mergeResponse{}
|
|
for _, id := range src.IDs() {
|
|
vec, meta, ok := src.Lookup(id)
|
|
if !ok {
|
|
// Vanished between IDs() snapshot and Lookup — concurrent
|
|
// delete. Treat as skip; not a failure.
|
|
continue
|
|
}
|
|
// Skip if dest already has the id (idempotent re-runs don't
|
|
// double-add). Operators expect "merge again" to be safe.
|
|
if _, _, exists := dest.Lookup(id); exists {
|
|
resp.SkippedAlreadyPresent++
|
|
if req.ClearSource {
|
|
src.Delete(id)
|
|
}
|
|
continue
|
|
}
|
|
if err := dest.Add(id, vec, meta); err != nil {
|
|
resp.Failed++
|
|
if resp.FirstError == "" {
|
|
resp.FirstError = "add " + id + ": " + err.Error()
|
|
}
|
|
continue
|
|
}
|
|
resp.Merged++
|
|
if req.ClearSource {
|
|
src.Delete(id)
|
|
}
|
|
}
|
|
// Persist both. Saving in-line under the merge endpoint is fine
|
|
// here because operators run this as a deliberate one-shot job,
|
|
// not a hot-path batch.
|
|
h.saveAfter(dest)
|
|
if req.ClearSource {
|
|
h.saveAfter(src)
|
|
}
|
|
resp.LengthDest = dest.Len()
|
|
resp.LengthSource = src.Len()
|
|
writeJSON(w, http.StatusOK, resp)
|
|
}
|
|
|
|
// decodeJSON reads + decodes a JSON body with a body-size cap.
|
|
// Returns false (and writes the error response) on failure.
|
|
func decodeJSON(w http.ResponseWriter, r *http.Request, v any) bool {
|
|
defer r.Body.Close()
|
|
r.Body = http.MaxBytesReader(w, r.Body, maxRequestBytes)
|
|
if err := json.NewDecoder(r.Body).Decode(v); err != nil {
|
|
var maxErr *http.MaxBytesError
|
|
if errors.As(err, &maxErr) || strings.Contains(err.Error(), "http: request body too large") {
|
|
http.Error(w, "body too large", http.StatusRequestEntityTooLarge)
|
|
return false
|
|
}
|
|
http.Error(w, "decode body: "+err.Error(), http.StatusBadRequest)
|
|
return false
|
|
}
|
|
return true
|
|
}
|
|
|
|
func writeJSON(w http.ResponseWriter, code int, v any) {
|
|
w.Header().Set("Content-Type", "application/json")
|
|
w.WriteHeader(code)
|
|
if err := json.NewEncoder(w).Encode(v); err != nil {
|
|
slog.Warn("write json", "err", err)
|
|
}
|
|
}
|
|
|