diff --git a/crates/storaged/src/registry.rs b/crates/storaged/src/registry.rs index 36f3b32..a77d6e5 100644 --- a/crates/storaged/src/registry.rs +++ b/crates/storaged/src/registry.rs @@ -361,6 +361,12 @@ async fn build_store( .with_secret_access_key(&creds.secret_key); if let Some(endpoint) = &bc.endpoint { builder = builder.with_endpoint(endpoint); + // MinIO and other S3-compatible services often run on plain + // HTTP. object_store refuses HTTP by default — opt in when + // a custom endpoint is configured (TLS endpoints work either way). + if endpoint.starts_with("http://") { + builder = builder.with_allow_http(true); + } } let s3 = builder.build() .map_err(|e| format!("init s3 bucket '{}': {e}", bc.name))?; diff --git a/crates/vectord-lance/Cargo.toml b/crates/vectord-lance/Cargo.toml index bdeed1f..f48a198 100644 --- a/crates/vectord-lance/Cargo.toml +++ b/crates/vectord-lance/Cargo.toml @@ -13,7 +13,13 @@ edition = "2024" # vectord-lance crate." This is that firewall. [dependencies] -lance = { version = "4.0", default-features = false } +# S3 support: Lance delegates to its internal object_store crate when +# given s3:// URIs. The "dynamodb" feature enables DynamoDB-based +# commit locking for multi-writer S3; we don't need that (single-writer) +# so just the base AWS/S3 feature is enough. +# Lance 4.0 feature "aws" enables S3-compatible storage via its internal +# object_store + opendal crates. Reads AWS_* env vars for credentials. +lance = { version = "4.0", default-features = false, features = ["aws"] } lance-index = { version = "4.0", default-features = false } lance-linalg = { version = "4.0", default-features = false } diff --git a/crates/vectord/src/lance_backend.rs b/crates/vectord/src/lance_backend.rs index 9f019c4..8002c3c 100644 --- a/crates/vectord/src/lance_backend.rs +++ b/crates/vectord/src/lance_backend.rs @@ -22,16 +22,19 @@ use vectord_lance::LanceVectorStore; use crate::index_registry::IndexRegistry; /// Convert a bucket+index pair into the URI Lance should use as the -/// dataset path. Local-only for MVP; S3 when we wire that backend. +/// dataset path. Supports both local (filesystem) and S3 buckets. /// -/// Path resolution mirrors lakehouse.toml's convention for local -/// buckets: ./data for primary, ./data/_rescue for rescue, ./data/_testing -/// for testing, ./data/_profiles/{sanitized} for profile:* buckets, and -/// ./data/_buckets/{sanitized} for everything else. Sanitization replaces -/// `:` with `_` so paths are filesystem-safe. +/// **Local buckets:** path resolution mirrors lakehouse.toml's convention. +/// Returns an absolute filesystem path. +/// +/// **S3 buckets:** returns `s3://{s3_bucket}/lance/{index_name}`. Lance's +/// internal object_store crate reads `AWS_ACCESS_KEY_ID` / `AWS_SECRET_ACCESS_KEY` +/// / `AWS_ENDPOINT` from environment (or the S3 feature's default chain). +/// For MinIO: set `AWS_ENDPOINT=http://localhost:9000` and +/// `AWS_ALLOW_HTTP=true` before starting the gateway. /// /// Refuses unknown buckets so a typo doesn't silently land Lance data -/// in a directory the rest of the system can't see. +/// in a directory / prefix the rest of the system can't see. pub fn lance_uri_for( buckets: &BucketRegistry, bucket: &str, @@ -40,6 +43,28 @@ pub fn lance_uri_for( if !buckets.contains(bucket) { return Err(format!("bucket '{bucket}' not registered")); } + // Check if this bucket is S3-backed by looking for a bucket config + // with backend="s3". BucketRegistry exposes backend type through + // the list() info, but that's async. The simpler signal: if the + // bucket name matches one we know is S3 (configured via lakehouse.toml + // with backend="s3"), use the s3:// URI scheme. + // + // For the synchronous path, we check a naming convention: buckets + // whose name starts with "s3:" are treated as S3 targets. The rest + // of the name is the S3 bucket name. Convention-based, explicit, + // no async needed. + // + // Additionally, any bucket registered with backend="s3" in the + // config will have its BucketConfig.bucket field set — that's the + // actual S3 bucket name. We can't access BucketConfig synchronously + // from the current registry API, so for now the naming convention + // is the primary signal. + if bucket.starts_with("s3:") { + let s3_bucket = &bucket["s3:".len()..]; + return Ok(format!("s3://{s3_bucket}/lance/{index_name}")); + } + + // Local path resolution. let root: PathBuf = match bucket { "primary" => PathBuf::from("./data"), "rescue" => PathBuf::from("./data/_rescue"), @@ -50,16 +75,10 @@ pub fn lance_uri_for( } b => PathBuf::from(format!("./data/_buckets/{}", b.replace(':', "_"))), }; - let dataset_dir = root.join("lance").join(index_name); - // Pre-create the parent so Lance's first write doesn't trip on a - // missing ancestor. Lance handles the dataset directory itself. let _ = std::fs::create_dir_all(root.join("lance")); - // Canonicalize after the parent is guaranteed to exist; if the - // dataset dir hasn't been created yet, canonicalize the parent and - // tack on the leaf name. let abs = match std::fs::canonicalize(&root) { Ok(p) => p.join("lance").join(index_name), - Err(_) => dataset_dir.clone(), + Err(_) => root.join("lance").join(index_name), }; Ok(abs.to_string_lossy().to_string()) } diff --git a/lakehouse.toml b/lakehouse.toml index a569b27..97688ce 100644 --- a/lakehouse.toml +++ b/lakehouse.toml @@ -24,6 +24,18 @@ name = "testing" backend = "local" root = "./data/_testing" +# S3 bucket via MinIO. The name "s3:lakehouse" is the convention +# lance_backend.rs uses to emit s3:// URIs for Lance datasets. +# Credentials resolved via environment (AWS_ACCESS_KEY_ID etc) or +# the secrets provider. +[[storage.buckets]] +name = "s3:lakehouse" +backend = "s3" +bucket = "lakehouse" +endpoint = "http://localhost:9000" +region = "us-east-1" +secret_ref = "minio-lakehouse" + [catalog] # Manifests persisted to object storage under this prefix manifest_prefix = "_catalog/manifests"