#!/usr/bin/env bash # Re-create the SPE-1 Arrow dataset from scratch via CRCNS download. # # Usage (from the dataset root, e.g. systems/datasets/spe1/): # CRCNS_USERNAME=your_user CRCNS_PASSWORD=your_pass \ # bash scripts/prepare.sh # # If CRCNS_PASSWORD is unset you will be prompted interactively. # # Environment overrides: # CRCNS_USERNAME CRCNS account username (REQUIRED) # CRCNS_PASSWORD CRCNS account password (prompted if unset) # SPE1_RAW_DIR Where to store the downloaded raw data (default: ./.raw) # SPE1_OUT_DIR Where to write Arrow datasets (default: .) # SPE1_CELLS Space-separated cell IDs (default: all 12) # SPE1_DURATION Recording seconds per cell (default: 300) # PYTHON Python interpreter to use (default: python3) # # Required Python packages: # pip install requests scipy pandas openpyxl spikeinterface datasets numpy set -euo pipefail SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" DATASET_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)" PYTHON="${PYTHON:-python3}" SPE1_OUT_DIR="${SPE1_OUT_DIR:-${DATASET_ROOT}}" SPE1_RAW_DIR="${SPE1_RAW_DIR:-${SPE1_OUT_DIR}/.raw}" SPE1_DURATION="${SPE1_DURATION:-300}" # Default: 11 Zhao et al. 2026 cells + c5 (longest WC-IC, used for V(t) plots). DEFAULT_CELLS="c5 c14 c15 c16 c19 c24 c26 c28 c29 c37 c45 c46" SPE1_CELLS="${SPE1_CELLS:-${DEFAULT_CELLS}}" CONVERTER="${SCRIPT_DIR}/convert_to_arrow.py" LOG_DIR="${SPE1_OUT_DIR}/.logs" DATA_DIR="${SPE1_RAW_DIR}/Recordings" CHAN_MAP="${SPE1_RAW_DIR}/chanMap.mat" SUMMARY_XLS="${SPE1_RAW_DIR}/Data Summary.xlsx" # CRCNS dataset paths. CRCNS_DATA_PREFIX="spe-1/data" CRCNS_ANCILLARY=( "spe-1/chanMap.mat" "spe-1/Data Summary.xlsx" ) mkdir -p "${SPE1_OUT_DIR}" "${LOG_DIR}" "${SPE1_RAW_DIR}" "${DATA_DIR}" log() { echo "[$(date '+%H:%M:%S')] $*"; } die() { echo "ERROR: $*" >&2; exit 1; } if [[ -z "${CRCNS_USERNAME:-}" ]]; then die "CRCNS_USERNAME is required. Register a free account at https://crcns.org/register and re-run with: CRCNS_USERNAME= CRCNS_PASSWORD= bash $0" fi if [[ -z "${CRCNS_PASSWORD:-}" ]]; then read -r -s -p "CRCNS password for ${CRCNS_USERNAME}: " CRCNS_PASSWORD echo export CRCNS_PASSWORD fi log "SPE-1 prepare pipeline (CRCNS)" log " Output: ${SPE1_OUT_DIR}" log " Raw data: ${SPE1_RAW_DIR}" log " Cells: ${SPE1_CELLS}" log " Duration: ${SPE1_DURATION}s per cell" # --------------------------------------------------------------------------- # Validate Python environment # --------------------------------------------------------------------------- "${PYTHON}" - <<'PYCHECK' import sys missing = [] for pkg in ["requests", "scipy", "spikeinterface", "datasets", "numpy", "pandas"]: try: __import__(pkg) except ImportError: missing.append(pkg) if missing: sys.exit("Missing packages: " + ", ".join(missing) + "\nInstall with: pip install " + " ".join(missing)) print(" All required packages present.") PYCHECK crcns_download() { # crcns_download local fn="$1" local out="$2" if [[ -s "${out}" ]]; then return 0 fi mkdir -p "$(dirname "${out}")" CRCNS_FN="${fn}" CRCNS_OUT="${out}" \ "${PYTHON}" - <<'PYDL' import os, sys, requests URL = "https://portal.nersc.gov/project/crcns/download/index.php" fn = os.environ["CRCNS_FN"] out = os.environ["CRCNS_OUT"] data = dict(username=os.environ["CRCNS_USERNAME"], password=os.environ["CRCNS_PASSWORD"], fn=fn, submit="Login") tmp = out + ".part" total = 0 with requests.Session() as s, requests.post( URL, data=data, stream=True, timeout=60 ) as r: r.raise_for_status() if r.headers.get("Content-Type", "").startswith("text/html"): sys.exit("CRCNS returned HTML (likely auth failure): " + r.text[:300].replace("\n", " ")) with open(tmp, "wb") as f: for chunk in r.iter_content(chunk_size=1 << 20): if chunk: f.write(chunk) total += len(chunk) if total % (256 << 20) < (1 << 20): print(f" ... {total / (1<<20):.0f} MiB", flush=True) os.replace(tmp, out) print(f" Done: {total / (1<<20):.1f} MiB -> {out}") PYDL } # --------------------------------------------------------------------------- # Step 1 - download ancillary files (chanMap + Data Summary) # --------------------------------------------------------------------------- for src in "${CRCNS_ANCILLARY[@]}"; do fname="$(basename "${src}")" target="${SPE1_RAW_DIR}/${fname}" if [[ ! -s "${target}" ]]; then log "Downloading ${fname} from CRCNS …" crcns_download "${src}" "${target}" fi done # Mirror the small ancillary files at the dataset root so consumers don't # need .raw/ to load probe geometry or ground-truth electrode assignments. cp -u "${CHAN_MAP}" "${SPE1_OUT_DIR}/chanMap.mat" cp -u "${SUMMARY_XLS}" "${SPE1_OUT_DIR}/Data Summary.xlsx" # --------------------------------------------------------------------------- # Step 2 - download + extract per-cell tar.gz archives # --------------------------------------------------------------------------- for cell in ${SPE1_CELLS}; do if [[ -n "$(find "${DATA_DIR}/${cell}" -maxdepth 1 -name '*npx_raw.bin' 2>/dev/null | head -1)" ]]; then log " ${cell}: already extracted — skipping" continue fi archive="${SPE1_RAW_DIR}/${cell}.tar.gz" if [[ ! -s "${archive}" ]]; then log "Downloading ${cell}.tar.gz from CRCNS …" crcns_download "${CRCNS_DATA_PREFIX}/${cell}.tar.gz" "${archive}" \ 2>&1 | tee "${LOG_DIR}/download_${cell}.log" if [[ ! -s "${archive}" ]]; then log " ERROR: download failed for ${cell}.tar.gz" rm -f "${archive}" "${archive}.part" continue fi fi log "Extracting ${cell}.tar.gz into ${DATA_DIR}/ …" tar -xzf "${archive}" -C "${DATA_DIR}" \ || { log " ERROR: extraction failed for ${cell}.tar.gz"; continue; } done # --------------------------------------------------------------------------- # Step 3 - convert each cell to Arrow # --------------------------------------------------------------------------- log "Converting cells to Arrow (one cell per subprocess to bound memory) …" CONVERT_ERRORS=0 SKIPPED_CELLS="" for cell in ${SPE1_CELLS}; do npx_bin=$(find "${DATA_DIR}/${cell}" -maxdepth 1 -name "*npx_raw.bin" 2>/dev/null | head -1) if [[ -z "${npx_bin}" ]]; then log " Skipping ${cell}: npx_raw.bin not present" SKIPPED_CELLS="${SKIPPED_CELLS} ${cell}" continue fi log " Converting ${cell} …" "${PYTHON}" "${CONVERTER}" \ --data-dir "${DATA_DIR}" \ --chan-map "${CHAN_MAP}" \ --summary "${SUMMARY_XLS}" \ --output "${SPE1_OUT_DIR}" \ --cells "${cell}" \ --duration "${SPE1_DURATION}" \ 2>&1 | tee "${LOG_DIR}/convert_${cell}.log" if [[ ${PIPESTATUS[0]} -ne 0 ]]; then log " ERROR: conversion failed for ${cell}" CONVERT_ERRORS=$((CONVERT_ERRORS + 1)) fi done if [[ -n "${SKIPPED_CELLS}" ]]; then log "Skipped (npx_raw.bin missing):${SKIPPED_CELLS}" fi if [[ ${CONVERT_ERRORS} -gt 0 ]]; then die "${CONVERT_ERRORS} cell(s) failed to convert. Check ${LOG_DIR}/" fi log "Pipeline complete. Datasets at: ${SPE1_OUT_DIR}"