From d122703e9ac7bf1d8ca3bc39a6689239681230f2 Mon Sep 17 00:00:00 2001 From: root Date: Fri, 24 Apr 2026 06:22:27 -0500 Subject: [PATCH] =?UTF-8?q?vectord:=20delete=20=5Frun=5Fembedding=5Fjob=5F?= =?UTF-8?q?legacy=20=E2=80=94=2044=20lines=20of=20explicit=20dead=20code?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Function was labeled "Legacy single-pipeline embedding (replaced by supervisor)" with a #[allow(dead_code)] attribute. Zero callers across the workspace. This is exactly what `#[allow(dead_code)]` is supposed to silently flag as "I know this is dead but I'm not committing to removing it" — so let's commit to removing it. Iter memory grep for this pattern showed 5 remaining #[allow(dead_code)] attributes in the workspace (1 here, 4 in gateway/access.rs). The four in access.rs are waiting on P13-001 (queryd → AccessControl wiring) before removing — that's cross-crate work. This one was self-contained. Net: -44 lines of dead code + comment. Workspace warnings unchanged at 11. Co-Authored-By: Claude Opus 4.7 (1M context) --- crates/vectord/src/service.rs | 45 ----------------------------------- 1 file changed, 45 deletions(-) diff --git a/crates/vectord/src/service.rs b/crates/vectord/src/service.rs index 5af0283..aa2ce91 100644 --- a/crates/vectord/src/service.rs +++ b/crates/vectord/src/service.rs @@ -470,51 +470,6 @@ async fn copy_key( storaged::ops::put(dst, key, data).await } -// --- unused legacy function below, kept for reference --- - -#[allow(dead_code)] -/// Legacy single-pipeline embedding (replaced by supervisor). -async fn _run_embedding_job_legacy( - job_id: &str, - index_name: &str, - chunks: &[chunker::TextChunk], - ai_client: &AiClient, - store: &Arc, - tracker: &jobs::JobTracker, -) -> Result { - let batch_size = 32; - let mut all_vectors: Vec> = Vec::new(); - let start = std::time::Instant::now(); - - for (i, batch) in chunks.chunks(batch_size).enumerate() { - let texts: Vec = batch.iter().map(|c| c.text.clone()).collect(); - - let embed_resp = ai_client.embed(EmbedRequest { - texts, - model: None, - }).await.map_err(|e| format!("embed batch {} error: {e}", i))?; - - all_vectors.extend(embed_resp.embeddings); - - // Update progress - let elapsed = start.elapsed().as_secs_f32(); - let rate = if elapsed > 0.0 { all_vectors.len() as f32 / elapsed } else { 0.0 }; - tracker.update_embed_progress(job_id, all_vectors.len(), rate).await; - - // Log every 100 batches - if (i + 1) % 100 == 0 { - let pct = (all_vectors.len() as f32 / chunks.len() as f32) * 100.0; - let eta = if rate > 0.0 { (chunks.len() - all_vectors.len()) as f32 / rate } else { 0.0 }; - tracing::info!("job {job_id}: {}/{} chunks ({pct:.0}%), {rate:.0}/sec, ETA {eta:.0}s", - all_vectors.len(), chunks.len()); - } - } - - // Store - let key = store::store_embeddings(store, index_name, chunks, &all_vectors).await?; - Ok(key) -} - // --- Job Status --- async fn list_jobs(State(state): State) -> impl IntoResponse {