Validated G0 substrate against the production workers_500k.parquet dataset (18 cols × 500,000 rows). Findings + one applied fix: Finding #1 (FIXED): ingestd's hardcoded 256 MiB cap rejected the 500K CSV (344 MiB) with 413. Cap fired correctly, no OOM. Extracted to [ingestd].max_ingest_bytes config field; default 256 MiB, override per deployment for known-large workloads. With cap bumped to 512 MiB, 500K ingest succeeds in 3.12s with ingestd peak RSS 209 MiB. Finding #2 (deferred): ingestd doesn't release memory between ingests. Go runtime conservative; long-running daemon, fine. Finding #3: DuckDB-via-httpfs is healthy at 500K. GROUP BY 45ms, count(*) 24ms, AVG 47ms, schema introspection 25ms. Sub-linear scaling vs 100K — the s3:// read path is not a bottleneck. Finding #4: ADR-010 type inference correctly handled real staffing data. worker_id → BIGINT, numeric scores → DOUBLE, multi-line resume_text → VARCHAR. 1000-row sample sufficient. Finding #5: Go's encoding/csv handles RFC 4180 quoted-comma fields and multi-line quoted text without LazyQuotes — confirming the D4 scrum's dismissal of Qwen's BLOCK on this point. Net: substrate handles production-scale data with one config knob. No correctness issues, no OOMs, no silent type errors. All 6 G0 smokes still PASS after the cap-config change. Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
167 lines
5.9 KiB
Go
167 lines
5.9 KiB
Go
// Package shared also provides the TOML config loader. Per ADR
|
||
// equivalent of Rust ADR-006 (TOML config over env vars), every
|
||
// service reads `lakehouse.toml` with sane defaults and env
|
||
// overrides. Config is hot-reload-unaware in G0; reload-on-SIGHUP
|
||
// is a G1+ concern.
|
||
package shared
|
||
|
||
import (
|
||
"errors"
|
||
"fmt"
|
||
"io/fs"
|
||
"log/slog"
|
||
"os"
|
||
|
||
"github.com/pelletier/go-toml/v2"
|
||
)
|
||
|
||
// Config is the unified Lakehouse config. Each service reads only
|
||
// the section it cares about, but they all share the same file so
|
||
// operators have one place to look.
|
||
type Config struct {
|
||
Gateway GatewayConfig `toml:"gateway"`
|
||
Storaged ServiceConfig `toml:"storaged"`
|
||
Catalogd CatalogConfig `toml:"catalogd"`
|
||
Ingestd IngestConfig `toml:"ingestd"`
|
||
Queryd QuerydConfig `toml:"queryd"`
|
||
S3 S3Config `toml:"s3"`
|
||
Log LogConfig `toml:"log"`
|
||
}
|
||
|
||
// IngestConfig adds ingestd-specific knobs. ingestd needs to PUT
|
||
// parquet to storaged AND register manifests with catalogd, so it
|
||
// holds two upstream URLs in addition to its own bind.
|
||
//
|
||
// MaxIngestBytes caps the multipart body size. CSVs are typically
|
||
// 4-6× larger than the resulting Snappy-compressed Parquet, so 256
|
||
// MiB CSV → ~50 MiB Parquet — well under storaged's 256 MiB PUT
|
||
// cap. Real-scale validation (2026-04-29) showed 500K workers ×
|
||
// 18 cols = 344 MiB CSV → 71 MiB Parquet; bumping this knob to
|
||
// 512 MiB is the documented path for that workload.
|
||
type IngestConfig struct {
|
||
Bind string `toml:"bind"`
|
||
StoragedURL string `toml:"storaged_url"`
|
||
CatalogdURL string `toml:"catalogd_url"`
|
||
MaxIngestBytes int64 `toml:"max_ingest_bytes"`
|
||
}
|
||
|
||
// GatewayConfig adds the upstream URLs the reverse proxy fronts.
|
||
// Each route family (/v1/storage, /v1/catalog, /v1/ingest, /v1/sql)
|
||
// has its own upstream so we can scale services independently or
|
||
// move them to different boxes without touching gateway code.
|
||
type GatewayConfig struct {
|
||
Bind string `toml:"bind"`
|
||
StoragedURL string `toml:"storaged_url"`
|
||
CatalogdURL string `toml:"catalogd_url"`
|
||
IngestdURL string `toml:"ingestd_url"`
|
||
QuerydURL string `toml:"queryd_url"`
|
||
}
|
||
|
||
// QuerydConfig adds queryd-specific knobs. queryd talks DuckDB
|
||
// directly to MinIO via DuckDB's httpfs extension (so no storaged
|
||
// URL needed), and reads the catalog over HTTP for view registration.
|
||
// SecretsPath defaults to /etc/lakehouse/secrets-go.toml — the same
|
||
// file storaged uses, since both services need the S3 credentials.
|
||
type QuerydConfig struct {
|
||
Bind string `toml:"bind"`
|
||
CatalogdURL string `toml:"catalogd_url"`
|
||
SecretsPath string `toml:"secrets_path"`
|
||
RefreshEvery string `toml:"refresh_every"` // duration string, e.g. "30s"
|
||
}
|
||
|
||
// CatalogConfig adds catalogd-specific knobs on top of the standard
|
||
// bind. StoragedURL points at the storaged service for manifest
|
||
// persistence; G0 defaults to the localhost bind.
|
||
type CatalogConfig struct {
|
||
Bind string `toml:"bind"`
|
||
StoragedURL string `toml:"storaged_url"`
|
||
}
|
||
|
||
// ServiceConfig is the per-binary bind config. Default Bind ""
|
||
// means "use the service's hardcoded G0 default" — see DefaultConfig.
|
||
type ServiceConfig struct {
|
||
Bind string `toml:"bind"`
|
||
}
|
||
|
||
// S3Config holds S3-compatible storage settings. Endpoint blank →
|
||
// AWS default. Bucket "" → "lakehouse-primary".
|
||
type S3Config struct {
|
||
Endpoint string `toml:"endpoint"`
|
||
Region string `toml:"region"`
|
||
Bucket string `toml:"bucket"`
|
||
AccessKeyID string `toml:"access_key_id"`
|
||
SecretAccessKey string `toml:"secret_access_key"`
|
||
UsePathStyle bool `toml:"use_path_style"`
|
||
}
|
||
|
||
// LogConfig — slog level for now; structured fields land G1+.
|
||
type LogConfig struct {
|
||
Level string `toml:"level"`
|
||
}
|
||
|
||
// DefaultConfig returns the G0 dev defaults. Ports are shifted to
|
||
// 3110+ to coexist with the live Rust lakehouse on 3100/3201-3204
|
||
// during the migration. G5 cutover flips gateway back to 3100.
|
||
func DefaultConfig() Config {
|
||
return Config{
|
||
Gateway: GatewayConfig{
|
||
Bind: "127.0.0.1:3110",
|
||
StoragedURL: "http://127.0.0.1:3211",
|
||
CatalogdURL: "http://127.0.0.1:3212",
|
||
IngestdURL: "http://127.0.0.1:3213",
|
||
QuerydURL: "http://127.0.0.1:3214",
|
||
},
|
||
Storaged: ServiceConfig{Bind: "127.0.0.1:3211"},
|
||
Catalogd: CatalogConfig{Bind: "127.0.0.1:3212", StoragedURL: "http://127.0.0.1:3211"},
|
||
Ingestd: IngestConfig{
|
||
Bind: "127.0.0.1:3213",
|
||
StoragedURL: "http://127.0.0.1:3211",
|
||
CatalogdURL: "http://127.0.0.1:3212",
|
||
MaxIngestBytes: 256 << 20, // 256 MiB; bump per deployment via lakehouse.toml
|
||
},
|
||
Queryd: QuerydConfig{
|
||
Bind: "127.0.0.1:3214",
|
||
CatalogdURL: "http://127.0.0.1:3212",
|
||
SecretsPath: "/etc/lakehouse/secrets-go.toml",
|
||
RefreshEvery: "30s",
|
||
},
|
||
S3: S3Config{
|
||
Endpoint: "http://localhost:9000",
|
||
Region: "us-east-1",
|
||
Bucket: "lakehouse-primary",
|
||
UsePathStyle: true,
|
||
},
|
||
Log: LogConfig{Level: "info"},
|
||
}
|
||
}
|
||
|
||
// LoadConfig reads `lakehouse.toml` from path; if path is empty or
|
||
// the file doesn't exist, returns DefaultConfig. Any decode error is
|
||
// fatal (we don't want a misconfigured service silently falling back
|
||
// to defaults — that's the kind of bug you find at 2am).
|
||
//
|
||
// Per Opus + Qwen WARN #3: when path WAS given but the file is
|
||
// missing, log a warning so silent default-fallback doesn't hide
|
||
// misconfiguration. Empty path is fine (caller didn't ask for a
|
||
// file); non-empty + missing is suspicious.
|
||
func LoadConfig(path string) (Config, error) {
|
||
cfg := DefaultConfig()
|
||
if path == "" {
|
||
return cfg, nil
|
||
}
|
||
b, err := os.ReadFile(path)
|
||
if errors.Is(err, fs.ErrNotExist) {
|
||
slog.Warn("config file not found, using defaults",
|
||
"path", path,
|
||
"hint", "create the file or pass -config /path/to/lakehouse.toml")
|
||
return cfg, nil
|
||
}
|
||
if err != nil {
|
||
return cfg, fmt.Errorf("read config: %w", err)
|
||
}
|
||
if err := toml.Unmarshal(b, &cfg); err != nil {
|
||
return cfg, fmt.Errorf("parse config: %w", err)
|
||
}
|
||
return cfg, nil
|
||
}
|