/// Vector storage as Parquet files. /// Each embedding index is stored as: source, doc_id, chunk_idx, chunk_text, vector (binary blob). /// Vectors are stored as raw f32 bytes for compact storage and fast loading. use arrow::array::{ArrayRef, BinaryArray, Int32Array, RecordBatch, StringArray}; use arrow::datatypes::{DataType, Field, Schema}; use object_store::ObjectStore; use std::sync::Arc; use storaged::ops; use crate::chunker::TextChunk; /// A stored embedding — chunk text + its vector. #[derive(Debug, Clone)] pub struct StoredEmbedding { pub source: String, pub doc_id: String, pub chunk_idx: u32, pub chunk_text: String, pub vector: Vec, } /// Store embeddings as a Parquet file in object storage. pub async fn store_embeddings( store: &Arc, index_name: &str, chunks: &[TextChunk], vectors: &[Vec], // from embedding API (f64), we store as f32 ) -> Result { if chunks.len() != vectors.len() { return Err(format!("chunk count ({}) != vector count ({})", chunks.len(), vectors.len())); } let n = chunks.len(); let sources: Vec<&str> = chunks.iter().map(|c| c.source.as_str()).collect(); let doc_ids: Vec<&str> = chunks.iter().map(|c| c.doc_id.as_str()).collect(); let chunk_idxs: Vec = chunks.iter().map(|c| c.chunk_idx as i32).collect(); let texts: Vec<&str> = chunks.iter().map(|c| c.text.as_str()).collect(); // Store vectors as raw f32 bytes (compact binary blob) let vector_bytes: Vec> = vectors.iter().map(|v| { v.iter().map(|&x| x as f32).flat_map(|f| f.to_le_bytes()).collect() }).collect(); let vector_refs: Vec<&[u8]> = vector_bytes.iter().map(|v| v.as_slice()).collect(); let schema = Arc::new(Schema::new(vec![ Field::new("source", DataType::Utf8, false), Field::new("doc_id", DataType::Utf8, false), Field::new("chunk_idx", DataType::Int32, false), Field::new("chunk_text", DataType::Utf8, false), Field::new("vector", DataType::Binary, false), ])); let arrays: Vec = vec![ Arc::new(StringArray::from(sources)), Arc::new(StringArray::from(doc_ids)), Arc::new(Int32Array::from(chunk_idxs)), Arc::new(StringArray::from(texts)), Arc::new(BinaryArray::from(vector_refs)), ]; let batch = RecordBatch::try_new(schema, arrays) .map_err(|e| format!("RecordBatch error: {e}"))?; let parquet = shared::arrow_helpers::record_batch_to_parquet(&batch)?; let key = format!("vectors/{index_name}.parquet"); ops::put(store, &key, parquet).await?; tracing::info!("stored {n} embeddings in {key}"); Ok(key) } /// Load all embeddings from a vector index file. pub async fn load_embeddings( store: &Arc, index_name: &str, ) -> Result, String> { let key = format!("vectors/{index_name}.parquet"); let data = ops::get(store, &key).await?; let (_, batches) = shared::arrow_helpers::parquet_to_record_batches(&data)?; let mut embeddings = Vec::new(); for batch in &batches { let sources = batch.column(0).as_any().downcast_ref::() .ok_or("source column not string")?; let doc_ids = batch.column(1).as_any().downcast_ref::() .ok_or("doc_id column not string")?; let chunk_idxs = batch.column(2).as_any().downcast_ref::() .ok_or("chunk_idx column not int")?; let texts = batch.column(3).as_any().downcast_ref::() .ok_or("chunk_text column not string")?; let vectors = batch.column(4).as_any().downcast_ref::() .ok_or("vector column not binary")?; for i in 0..batch.num_rows() { let vec_bytes = vectors.value(i); let vector: Vec = vec_bytes.chunks_exact(4) .map(|b| f32::from_le_bytes([b[0], b[1], b[2], b[3]])) .collect(); embeddings.push(StoredEmbedding { source: sources.value(i).to_string(), doc_id: doc_ids.value(i).to_string(), chunk_idx: chunk_idxs.value(i) as u32, chunk_text: texts.value(i).to_string(), vector, }); } } tracing::info!("loaded {} embeddings from {key}", embeddings.len()); Ok(embeddings) }