#!/usr/bin/env bash MODEL_ROOT="out/pretrain/qwen2_7b_question_focus_lr_plus" #"out/pretrain/qwen2_7b_question_focus" TASKS="arxiv_mc" BATCH=4 OUT_ROOT="out/eval/qwen2_7b_question_focus_lr_plus" #"out/eval/qwen2_7b_question_focus" #"out/eval/qwen2_arxiv_mc" #"out/eval/tinyllama_arxiv_mc" CKPTS=(010 020 030 040 050 060 070 080 090 100) export CUDA_VISIBLE_DEVICES="4,5,6,7" for ((i=0; i<${#CKPTS[@]}; i+=1)); do m=${CKPTS[$i]} ckpt="${MODEL_ROOT}/step-00000${m}" out="${OUT_ROOT}/step-00000${m}" mkdir -p "$out" litgpt evaluate "${ckpt}" \ --batch_size ${BATCH} \ --tasks "${TASKS}" \ --out_dir "${out}" \ 2>&1 | tee "${out}/log.txt" done #litgpt evaluate "${MODEL_ROOT}" \ # --batch_size ${BATCH} \ # --tasks "${TASKS}" \ # --out_dir "out/eval/qwen2_7b_question_focus/step-00000000" \ # 2>&1 | tee "${out}/log.txt" echo "All evaluated."