nvan15's picture
Batch upload part 2
6bb0065 verified
export OMINI_CONFIG=./config/commonsense.yaml
#echo $OMINI_CONFIG
export TOKENIZERS_PARALLELISM=true
# CUDA Include (/cuda.h)
CUDA_INCLUDE_PATH="/home/work/miniconda3/envs/allm/include"
# 3. Add into CPATH & CPLUS_INCLUDE_PATH (C/C++ compiler)
export CPATH=$CPATH:$CUDA_INCLUDE_PATH
export CPLUS_INCLUDE_PATH=$CPLUS_INCLUDE_PATH:$CUDA_INCLUDE_PATH
# echo "CPATH is set to: $CPATH"
# echo "CPLUS_INCLUDE_PATH is set to: $CPLUS_INCLUDE_PATH"
export WANDB_PROJECT="Llama2_7B_FT_Math40k_2"
export OMP_NUM_THREADS=1
export MKL_NUM_THREADS=1
export OPENBLAS_NUM_THREADS=1
export NUMEXPR_NUM_THREADS=1
date +"%F %T"
TEXT=("oft" "boft" "loco" "hra")
# --run_text "$text" --dynamo_backend no
export ACCELERATE_DYNAMO_BACKEND="no"
# --trainer_args.max_steps=81 \
accelerate launch --dynamo_backend no --main_process_port 41353 -m src.testLlama \
--config_path $OMINI_CONFIG --trainer_args.output_dir "./expsBOFT/seed44/" --trainer_args.learning_rate=8e-4 \
--run_text "boft" --trainer_args.per_device_train_batch_size 32 \
--rotation_adapter_config.num_rotations 1 --rotation_adapter_config.r 16 \
--trainer_args.gradient_accumulation_steps 2 \
--trainer_args.num_train_epochs 2.0 --data.dataset_split train \
--trainer_args.eval_strategy '"no"' \
--trainer_args.load_best_model_at_end False \
--trainer_args.save_strategy '"no"' \
--trainer_args.logging_step 50 \
--trainer_args.report_to none --trainer_args.warmup_steps 100 \
--seed 44
date +"%F %T"
accelerate launch --dynamo_backend no --main_process_port 41353 -m src.testLlama \
--config_path $OMINI_CONFIG --trainer_args.output_dir "./expsBOFT/seed43/" --trainer_args.learning_rate=8e-4 \
--run_text "boft" --trainer_args.per_device_train_batch_size 32 \
--rotation_adapter_config.num_rotations 1 --rotation_adapter_config.r 16 \
--trainer_args.gradient_accumulation_steps 2 \
--trainer_args.num_train_epochs 2.0 --data.dataset_split train \
--trainer_args.eval_strategy '"no"' \
--trainer_args.load_best_model_at_end False \
--trainer_args.save_strategy '"no"' \
--trainer_args.logging_step 50 \
--trainer_args.report_to none --trainer_args.warmup_steps 100 \
--seed 43
date +"%F %T"
accelerate launch --main_process_port 41353 -m src.testLlama \
--config_path $OMINI_CONFIG --trainer_args.output_dir "./expsOFT/seed43/" --trainer_args.learning_rate=8e-4 \
--run_text "oft" --trainer_args.per_device_train_batch_size 64 \
--rotation_adapter_config.num_rotations 1 --rotation_adapter_config.r 16 \
--trainer_args.gradient_accumulation_steps 1 \
--trainer_args.num_train_epochs 2.0 --data.dataset_split train \
--trainer_args.eval_strategy '"no"' \
--trainer_args.load_best_model_at_end False \
--trainer_args.save_strategy '"no"' \
--trainer_args.logging_step 50 \
--trainer_args.report_to none --trainer_args.warmup_steps 100 \
--seed 43
date +"%F %T"
accelerate launch --main_process_port 41353 -m src.testLlama \
--config_path $OMINI_CONFIG --trainer_args.output_dir "./expsOFT/seed44/" --trainer_args.learning_rate=8e-4 \
--run_text "oft" --trainer_args.per_device_train_batch_size 64 \
--rotation_adapter_config.num_rotations 1 --rotation_adapter_config.r 16 \
--trainer_args.gradient_accumulation_steps 1 \
--trainer_args.num_train_epochs 2.0 --data.dataset_split train \
--trainer_args.eval_strategy '"no"' \
--trainer_args.load_best_model_at_end False \
--trainer_args.save_strategy '"no"' \
--trainer_args.logging_step 50 \
--trainer_args.report_to none --trainer_args.warmup_steps 100 \
--seed 44
date +"%F %T"