#!/usr/bin/env bash # 03_ingest_csv_to_parquet.sh — GOLAKE-030. # Ingests fixtures/csv/workers.csv via /v1/ingest, verifies the parquet # object lands on storaged and catalogd registers a matching manifest. # Leaves data in place so 04_query_correctness can SELECT against it. set -uo pipefail SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" source "${SCRIPT_DIR}/../lib/env.sh" source "${SCRIPT_DIR}/../lib/http.sh" source "${SCRIPT_DIR}/../lib/assert.sh" CASE_ID="GOLAKE-030" CASE_NAME="Ingest CSV → Parquet → catalog manifest" CASE_TYPE="integration" if [ "${1:-}" = "--metadata-only" ]; then return 0 2>/dev/null || exit 0; fi DATASET="proof_workers" CSV_FIXTURE="${PROOF_REPO_ROOT}/tests/proof/fixtures/csv/workers.csv" # Record fixture sha for the evidence chain. CSV_SHA=$(sha256sum "$CSV_FIXTURE" | awk '{print $1}') echo "{\"fixture\":\"workers.csv\",\"sha256\":\"$CSV_SHA\"}" \ > "${PROOF_REPORT_DIR}/raw/outputs/${CASE_ID}_fixture.json" # Idempotent prelude — schema-drift would 409, but identical-fp is fine. # We can't easily delete a catalog entry; rely on idempotent re-ingest. # If a prior run with different csv content registered DATASET, this # would 409 — which would be a real finding worth surfacing. # Ingest. /v1/ingest takes ?name= in the query and a multipart form # with the CSV file under any field name (handler reads the first file). # proof_post / proof_put set Content-Type + --data which conflict with # multipart -F; use proof_call for direct curl-arg pass-through. proof_call "$CASE_ID" "ingest" POST \ "${PROOF_GATEWAY_URL}/v1/ingest?name=${DATASET}" \ -F "file=@${CSV_FIXTURE}" >/dev/null ingest_status=$(proof_status_of "$CASE_ID" "ingest") proof_assert_eq "$CASE_ID" "ingest → 200" "200" "$ingest_status" # Halt the rest of the case if ingest didn't succeed — the downstream # claims would all fail for the same reason, no point recording N # duplicate failures. if [ "$ingest_status" != "200" ]; then proof_skip "$CASE_ID" "downstream claims skipped — ingest failed" \ "see raw/http/${CASE_ID}/ingest.body for upstream error" return 0 2>/dev/null || exit 0 fi ingest_body="${PROOF_REPORT_DIR}/raw/http/${CASE_ID}/ingest.body" # Response shape: {manifest, existing, row_count, parquet_size, parquet_key}. row_count=$(jq -r '.row_count' "$ingest_body") proof_assert_eq "$CASE_ID" "ingest reports row_count = 5" "5" "$row_count" parquet_size=$(jq -r '.parquet_size' "$ingest_body") proof_assert_gt "$CASE_ID" "parquet_size > 0" "$parquet_size" "0" parquet_key=$(jq -r '.parquet_key' "$ingest_body") proof_assert_ne "$CASE_ID" "parquet_key non-empty" "" "$parquet_key" # Content-addressed keys are datasets//.parquet per memory `c1e4113`. proof_assert_contains "$CASE_ID" "parquet_key is content-addressed under datasets/${DATASET}/" \ "datasets/${DATASET}/" "$parquet_key" # Verify the parquet object actually exists on storaged. proof_get "$CASE_ID" "storage_list" \ "${PROOF_GATEWAY_URL}/v1/storage/list" >/dev/null list_body=$(proof_body_of "$CASE_ID" "storage_list") proof_assert_contains "$CASE_ID" "storaged LIST contains parquet_key" \ "$parquet_key" "$list_body" # Verify catalogd has a matching manifest. proof_get "$CASE_ID" "catalog_manifest" \ "${PROOF_GATEWAY_URL}/v1/catalog/manifest/${DATASET}" >/dev/null proof_assert_eq "$CASE_ID" "catalog manifest GET → 200" "200" \ "$(proof_status_of "$CASE_ID" "catalog_manifest")" manifest_body="${PROOF_REPORT_DIR}/raw/http/${CASE_ID}/catalog_manifest.body" manifest_row_count=$(jq -r '.row_count' "$manifest_body") proof_assert_eq "$CASE_ID" "manifest row_count = 5" "5" "$manifest_row_count"