text
stringlengths
0
445
# maxQuestions=("16" "24" "32" "48" "64" "80")
maxQuestions=("80")
top_k=("1" "3" "5" "10" "20")
# # all k = 7405 article, tokens = 10,038,084
# # when k = 16, tokens = 21,000
# # when k = 24, tokens = 32,667
# # when k = 32, tokens = 43,000
# # when k = 48, tokens = 64,000
# # when k = 64, tokens = 85,000
# # when k = 80, tokens = 106,000
for dataset in "${datasets[@]}"; do
for model in "${models[@]}"; do
for maxQuestion in "${maxQuestions[@]}"; do
k=$maxQuestion
# Run KVCACHE without cache
echo "Running KVCACHE for $dataset, maxQuestion $maxQuestion, model $model"
python ./kvcache.py --kvcache file --dataset "$dataset" --similarity bertscore \
--maxKnowledge "$k" --maxQuestion "$maxQuestion" --usePrompt \
--modelname "meta-llama/Llama-${model}-Instruct" \
--output "./results/${dataset}/${maxQuestion}/result_${model}_k${k}_q${maxQuestion}_${dataset}_bertscore_kvcache_nokv.txt"
# # Run KVCACHE
# echo "Running KVCACHE for $dataset, maxQuestion $maxQuestion, model $model"
# python ./kvcache.py --kvcache file --dataset "$dataset" --similarity bertscore \
# --maxKnowledge "$k" --maxQuestion "$maxQuestion" \
# --modelname "meta-llama/Llama-${model}-Instruct" \
# --output "./results/${dataset}/${maxQuestion}/result_${model}_k${k}_q${maxQuestion}_${dataset}_bertscore_kvcache.txt"
# # Run RAG
# for topk in "${top_k[@]}"; do
# for index in "${indices[@]}"; do
# echo "Running RAG with $index for $dataset, maxKnowledge $k, maxParagraph $p, maxQuestion $maxQuestion, model $model, topk ${topk}"
# python ./rag.py --index "$index" --dataset "$dataset" --similarity bertscore \
# --maxKnowledge "$k" --maxQuestion "$maxQuestion" --topk "$topk" \
# --modelname "meta-llama/Llama-${model}-Instruct" \
# --output "./results/${dataset}/${maxQuestion}/result_${model}_k${k}_q${maxQuestion}_${dataset}_bertscore_rag_Index_${index}.txt_top${topk}"
# done
# done
done
done
done