// Package secrets resolves credentials for storaged + future bucket // federation (G2). The G0 surface is one method — S3Credentials — // looked up by logical bucket name. Multi-bucket lands in G2; until // then every lookup returns the same credentials, but callers already // pass the name so the API doesn't need to change later. // // FileProvider reads /etc/lakehouse/secrets.toml. If that file is // absent OR doesn't contain credentials for the requested bucket, the // provider falls back to the values supplied via the inline config // (lakehouse.toml [s3] block). G0 is dev-only so the inline fallback // is convenient; G1 will tighten this to "secrets file required". package secrets import ( "errors" "fmt" "io/fs" "os" "sync" "github.com/pelletier/go-toml/v2" ) // S3Credentials is what storaged hands to aws-sdk-go-v2 to sign // requests. Region/endpoint/bucket are config (non-secret) and live // on shared.S3Config — those don't pass through this provider. type S3Credentials struct { AccessKeyID string `toml:"access_key_id"` SecretAccessKey string `toml:"secret_access_key"` } // Provider is the interface storaged depends on. Keeping this small // is deliberate — every method here is a future migration point when // secrets move to Vault / SOPS / SSM. type Provider interface { S3Credentials(bucket string) (S3Credentials, error) } // FileProvider is the G0 implementation. It loads the file once on // construction; reload-on-SIGHUP is a G1 concern. type FileProvider struct { path string parsed secretsFile fallback S3Credentials mu sync.RWMutex } // secretsFile mirrors the on-disk TOML shape: // // [s3.primary] // access_key_id = "..." // secret_access_key = "..." // // [s3.archive] # G2 multi-bucket // access_key_id = "..." type secretsFile struct { S3 map[string]S3Credentials `toml:"s3"` } // NewFileProvider loads `path`. If the file is missing the provider // is still usable with the inline fallback — that's a deliberate // G0 affordance. Any other read/parse error is fatal. func NewFileProvider(path string, fallback S3Credentials) (*FileProvider, error) { p := &FileProvider{path: path, fallback: fallback} b, err := os.ReadFile(path) if errors.Is(err, fs.ErrNotExist) { return p, nil } if err != nil { return nil, fmt.Errorf("read secrets %q: %w", path, err) } if err := toml.Unmarshal(b, &p.parsed); err != nil { return nil, fmt.Errorf("parse secrets %q: %w", path, err) } return p, nil } // S3Credentials resolves bucket → credentials. Lookup order: // 1. secrets file [s3.] section // 2. inline fallback (lakehouse.toml [s3]) // // If neither produces a non-empty AccessKeyID, returns an error so a // misconfigured deployment fails loud instead of trying anonymous S3. func (p *FileProvider) S3Credentials(bucket string) (S3Credentials, error) { p.mu.RLock() defer p.mu.RUnlock() if creds, ok := p.parsed.S3[bucket]; ok && creds.AccessKeyID != "" { return creds, nil } if p.fallback.AccessKeyID != "" { return p.fallback, nil } return S3Credentials{}, fmt.Errorf("no credentials for bucket %q (file=%q)", bucket, p.path) } // StaticProvider is a test/dev helper that returns the same creds for // every bucket. Use NewFileProvider in production code paths. type StaticProvider struct { Creds S3Credentials } func (p StaticProvider) S3Credentials(_ string) (S3Credentials, error) { if p.Creds.AccessKeyID == "" { return S3Credentials{}, errors.New("StaticProvider: no AccessKeyID") } return p.Creds, nil }