Implements the auth posture from ADR-003 (commit 0d18ffa). Two independent layers — Bearer token (constant-time compare via crypto/subtle) and IP allowlist (CIDR set) — composed in shared.Run so every binary inherits the same gate without per-binary wiring. Together with the bind-gate from commit 6af0520, this mechanically closes audit risks R-001 + R-007: - non-loopback bind without auth.token = startup refuse - non-loopback bind WITH auth.token + override env = allowed - loopback bind = all gates open (G0 dev unchanged) internal/shared/auth.go (NEW) RequireAuth(cfg AuthConfig) returns chi-compatible middleware. Empty Token + empty AllowedIPs → pass-through (G0 dev mode). Token-only → 401 Bearer mismatch. AllowedIPs-only → 403 source IP not in CIDR set. Both → both gates apply. /health bypasses both layers (load-balancer / liveness probes shouldn't carry tokens). CIDR parsing pre-runs at boot; bare IP (no /N) treated as /32 (or /128 for IPv6). Invalid entries log warn and drop, fail-loud-but- not-fatal so a typo doesn't kill the binary. Token comparison: subtle.ConstantTimeCompare on the full "Bearer <token>" wire-format string. Length-mismatch returns 0 (per stdlib spec), so wrong-length tokens reject without timing leak. Pre-encoded comparison slice stored in the middleware closure — one allocation per request. Source-IP extraction prefers net.SplitHostPort fallback to RemoteAddr-as-is for httptest compatibility. X-Forwarded-For support is a follow-up when a trusted proxy fronts the gateway (config knob TBD per ADR-003 §"Future"). internal/shared/server.go Run signature: gained AuthConfig parameter (4th arg). /health stays mounted on the outer router (public). Registered routes go inside chi.Group with RequireAuth applied — empty config = transparent group. Added requireAuthOnNonLoopback startup check: non-loopback bind with empty Token = refuse to start (cites R-001 + R-007 by name). internal/shared/config.go AuthConfig type added with TOML tags. Fields: Token, AllowedIPs. Composed into Config under [auth]. cmd/<svc>/main.go × 7 (catalogd, embedd, gateway, ingestd, queryd, storaged, vectord, mcpd is unaffected — stdio doesn't bind a port) Each call site adds cfg.Auth as the 4th arg to shared.Run. No other changes — middleware applies via shared.Run uniformly. internal/shared/auth_test.go (12 test funcs) Empty config pass-through, missing-token 401, wrong-token 401, correct-token 200, raw-token-without-Bearer-prefix 401, /health always public, IP allowlist allow + reject, bare IP /32, both layers when both configured, invalid CIDR drop-with-warn, RemoteAddr shape extraction. The constant-time comparison is verified by inspection (comments in auth.go) plus the existence of the passthrough test (length-mismatch case). Verified: go test -count=1 ./internal/shared/ — all green (was 21, now 33 funcs) just verify — vet + test + 9 smokes 33s just proof contract — 53/0/1 unchanged Smokes + proof harness keep working without any token configuration: default Auth is empty struct → middleware is no-op → existing tests pass unchanged. To exercise the gate, operators set [auth].token in lakehouse.toml (or, per the "future" note in the ADR, via env var). Closes audit findings: R-001 HIGH — fully mechanically closed (was: partial via bind gate) R-007 MED — fully mechanically closed (was: design-only ADR-003) Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
244 lines
6.8 KiB
Go
244 lines
6.8 KiB
Go
// queryd is the SQL execution layer. DuckDB engine (via cgo), reads
|
|
// Parquet directly from S3 via httpfs, registers catalog datasets as
|
|
// views, exposes POST /sql. The interesting glue is in
|
|
// internal/queryd/{db,registrar}.go; main.go wires the lifecycle.
|
|
package main
|
|
|
|
import (
|
|
"context"
|
|
"database/sql"
|
|
"encoding/json"
|
|
"errors"
|
|
"flag"
|
|
"log/slog"
|
|
"net/http"
|
|
"os"
|
|
"strings"
|
|
"time"
|
|
|
|
"github.com/go-chi/chi/v5"
|
|
|
|
"git.agentview.dev/profit/golangLAKEHOUSE/internal/catalogclient"
|
|
"git.agentview.dev/profit/golangLAKEHOUSE/internal/queryd"
|
|
"git.agentview.dev/profit/golangLAKEHOUSE/internal/secrets"
|
|
"git.agentview.dev/profit/golangLAKEHOUSE/internal/shared"
|
|
)
|
|
|
|
const (
|
|
primaryBucket = "primary"
|
|
maxSQLBodyBytes = 64 << 10 // 64 KiB cap on POST /sql body — SQL strings are not large
|
|
defaultRefresh = 30 * time.Second
|
|
)
|
|
|
|
func main() {
|
|
configPath := flag.String("config", "lakehouse.toml", "path to TOML config")
|
|
flag.Parse()
|
|
|
|
cfg, err := shared.LoadConfig(*configPath)
|
|
if err != nil {
|
|
slog.Error("config", "err", err)
|
|
os.Exit(1)
|
|
}
|
|
if cfg.Queryd.CatalogdURL == "" {
|
|
slog.Error("config", "err", "queryd.catalogd_url is required")
|
|
os.Exit(1)
|
|
}
|
|
if cfg.S3.Bucket == "" {
|
|
slog.Error("config", "err", "s3.bucket is required")
|
|
os.Exit(1)
|
|
}
|
|
|
|
refreshEvery := defaultRefresh
|
|
if cfg.Queryd.RefreshEvery != "" {
|
|
d, err := time.ParseDuration(cfg.Queryd.RefreshEvery)
|
|
if err != nil {
|
|
slog.Error("config", "err", "queryd.refresh_every is not a valid duration: "+err.Error())
|
|
os.Exit(1)
|
|
}
|
|
refreshEvery = d
|
|
}
|
|
|
|
// Long-running ctx tied to lifetime of the process for the
|
|
// background refresh goroutine. Cancelled when shared.Run returns.
|
|
procCtx, procCancel := context.WithCancel(context.Background())
|
|
defer procCancel()
|
|
|
|
prov, err := secrets.NewFileProvider(cfg.Queryd.SecretsPath, secrets.S3Credentials{
|
|
AccessKeyID: cfg.S3.AccessKeyID,
|
|
SecretAccessKey: cfg.S3.SecretAccessKey,
|
|
})
|
|
if err != nil {
|
|
slog.Error("secrets", "err", err)
|
|
os.Exit(1)
|
|
}
|
|
|
|
db, err := queryd.OpenDB(procCtx, cfg.S3, prov, primaryBucket)
|
|
if err != nil {
|
|
slog.Error("duckdb open", "err", err)
|
|
os.Exit(1)
|
|
}
|
|
defer db.Close()
|
|
|
|
catalog := catalogclient.New(cfg.Queryd.CatalogdURL)
|
|
registrar := queryd.NewRegistrar(db, catalog, cfg.S3.Bucket)
|
|
|
|
// Initial refresh — log if non-empty so the operator sees what
|
|
// got loaded. A failure here is non-fatal: catalogd may be coming
|
|
// up later in the boot order, the TTL goroutine will retry.
|
|
refreshCtx, refreshCancel := context.WithTimeout(procCtx, 10*time.Second)
|
|
stats, err := registrar.Refresh(refreshCtx)
|
|
refreshCancel()
|
|
if err != nil {
|
|
slog.Warn("initial refresh failed (will retry)", "err", err)
|
|
} else {
|
|
slog.Info("initial refresh", "created", stats.Created, "skipped", stats.Skipped)
|
|
}
|
|
|
|
// Background ticker — drives Refresh on the configured interval.
|
|
go runRefreshLoop(procCtx, registrar, refreshEvery)
|
|
|
|
h := &handlers{db: db}
|
|
|
|
if err := shared.Run("queryd", cfg.Queryd.Bind, h.register, cfg.Auth); err != nil {
|
|
slog.Error("server", "err", err)
|
|
os.Exit(1)
|
|
}
|
|
}
|
|
|
|
// runRefreshLoop drives Registrar.Refresh on a ticker. Cancellable
|
|
// via ctx. Logs every refresh; in a quiet run that's chatty but
|
|
// useful for tracking when datasets land.
|
|
func runRefreshLoop(ctx context.Context, r *queryd.Registrar, every time.Duration) {
|
|
ticker := time.NewTicker(every)
|
|
defer ticker.Stop()
|
|
for {
|
|
select {
|
|
case <-ctx.Done():
|
|
return
|
|
case <-ticker.C:
|
|
rctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
|
stats, err := r.Refresh(rctx)
|
|
cancel()
|
|
if err != nil {
|
|
slog.Warn("refresh failed", "err", err)
|
|
continue
|
|
}
|
|
if stats.Created+stats.Updated+stats.Dropped > 0 {
|
|
slog.Info("refresh",
|
|
"created", stats.Created,
|
|
"updated", stats.Updated,
|
|
"dropped", stats.Dropped,
|
|
"skipped", stats.Skipped)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
type handlers struct {
|
|
db *sql.DB
|
|
}
|
|
|
|
func (h *handlers) register(r chi.Router) {
|
|
r.Post("/sql", h.handleSQL)
|
|
}
|
|
|
|
// sqlRequest is the POST /sql body shape.
|
|
type sqlRequest struct {
|
|
SQL string `json:"sql"`
|
|
}
|
|
|
|
// sqlResponse is the result envelope. Columns + rows is the compact
|
|
// form; verbose row-as-object form is post-G0 if anyone needs it.
|
|
type sqlResponse struct {
|
|
Columns []sqlColumn `json:"columns"`
|
|
Rows [][]any `json:"rows"`
|
|
RowCount int `json:"row_count"`
|
|
}
|
|
|
|
type sqlColumn struct {
|
|
Name string `json:"name"`
|
|
Type string `json:"type"`
|
|
}
|
|
|
|
func (h *handlers) handleSQL(w http.ResponseWriter, r *http.Request) {
|
|
defer r.Body.Close()
|
|
r.Body = http.MaxBytesReader(w, r.Body, maxSQLBodyBytes)
|
|
var req sqlRequest
|
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
|
var maxErr *http.MaxBytesError
|
|
if errors.As(err, &maxErr) || strings.Contains(err.Error(), "http: request body too large") {
|
|
http.Error(w, "sql body too large", http.StatusRequestEntityTooLarge)
|
|
return
|
|
}
|
|
http.Error(w, "decode body: "+err.Error(), http.StatusBadRequest)
|
|
return
|
|
}
|
|
if strings.TrimSpace(req.SQL) == "" {
|
|
http.Error(w, "sql is empty", http.StatusBadRequest)
|
|
return
|
|
}
|
|
|
|
rows, err := h.db.QueryContext(r.Context(), req.SQL)
|
|
if err != nil {
|
|
// DuckDB errors are user-facing for ad-hoc SQL — expose them
|
|
// at 400 so callers can see what went wrong (table not found,
|
|
// syntax error, etc.). Internal infra issues would surface as
|
|
// 500 once we distinguish them later.
|
|
http.Error(w, "query: "+err.Error(), http.StatusBadRequest)
|
|
return
|
|
}
|
|
defer rows.Close()
|
|
|
|
cols, err := rows.Columns()
|
|
if err != nil {
|
|
http.Error(w, "columns: "+err.Error(), http.StatusInternalServerError)
|
|
return
|
|
}
|
|
colTypes, err := rows.ColumnTypes()
|
|
if err != nil {
|
|
http.Error(w, "column types: "+err.Error(), http.StatusInternalServerError)
|
|
return
|
|
}
|
|
|
|
resp := sqlResponse{
|
|
Columns: make([]sqlColumn, len(cols)),
|
|
Rows: [][]any{},
|
|
}
|
|
for i, name := range cols {
|
|
resp.Columns[i] = sqlColumn{Name: name, Type: colTypes[i].DatabaseTypeName()}
|
|
}
|
|
|
|
for rows.Next() {
|
|
dest := make([]any, len(cols))
|
|
ptrs := make([]any, len(cols))
|
|
for i := range dest {
|
|
ptrs[i] = &dest[i]
|
|
}
|
|
if err := rows.Scan(ptrs...); err != nil {
|
|
http.Error(w, "scan: "+err.Error(), http.StatusInternalServerError)
|
|
return
|
|
}
|
|
// JSON can't encode []byte as text by default — DuckDB returns
|
|
// VARCHAR as []byte through database/sql. Convert here.
|
|
for i, v := range dest {
|
|
if b, ok := v.([]byte); ok {
|
|
dest[i] = string(b)
|
|
}
|
|
}
|
|
resp.Rows = append(resp.Rows, dest)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
http.Error(w, "rows err: "+err.Error(), http.StatusInternalServerError)
|
|
return
|
|
}
|
|
resp.RowCount = len(resp.Rows)
|
|
|
|
w.Header().Set("Content-Type", "application/json")
|
|
if err := json.NewEncoder(w).Encode(resp); err != nil {
|
|
// Headers already sent — can't change the status code, but log
|
|
// so an unsupported DuckDB column type (Decimal, INTERVAL, etc.)
|
|
// surfaces in the operator log. Per scrum JSON-ERR (Opus).
|
|
slog.Warn("sql encode response", "err", err)
|
|
}
|
|
}
|