gregjanik commited on
Commit
20145da
·
verified ·
1 Parent(s): 01ed052

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. scripts/entrypoint.sh +146 -0
  2. scripts/infer.sh +24 -0
  3. scripts/train.sh +223 -0
scripts/entrypoint.sh ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ set -euo pipefail
3
+
4
+ # Force verbose download progress even in non-TTY environments
5
+ export HF_HUB_DISABLE_PROGRESS_BARS=0
6
+ export TRANSFORMERS_VERBOSITY=info
7
+ export HF_HUB_VERBOSITY=info
8
+ export WANDB_DISABLE_WEAVE="${WANDB_DISABLE_WEAVE:-true}"
9
+
10
+ echo "============================================"
11
+ echo " F13 Qwen 3.5 Fine-Tuning Container"
12
+ echo "============================================"
13
+
14
+ if [ -n "${RUNPOD_POD_ID:-}" ]; then
15
+ echo "Running on RunPod pod: ${RUNPOD_POD_ID}"
16
+ elif [ -n "${AWS_INSTANCE_ID:-}" ]; then
17
+ echo "Running on AWS instance: ${AWS_INSTANCE_ID}"
18
+ fi
19
+
20
+ NUM_NODES="${NUM_NODES:-1}"
21
+ NODE_RANK="${NODE_RANK:-0}"
22
+ if [ "${NUM_NODES}" -gt 1 ]; then
23
+ echo ""
24
+ echo "--- Multi-Node Cluster ---"
25
+ echo " Nodes: ${NUM_NODES}"
26
+ echo " This rank: ${NODE_RANK}"
27
+ echo " Master: ${MASTER_ADDR:-unknown}:${MASTER_PORT:-29500}"
28
+ echo " Node addr: ${NODE_ADDR:-unknown}"
29
+ echo " World size: ${WORLD_SIZE:-unknown}"
30
+ echo " GPUs/node: ${NUM_TRAINERS:-unknown}"
31
+
32
+ if [ -n "${PRIMARY_ADDR:-}" ]; then
33
+ export NCCL_SOCKET_IFNAME="${NCCL_SOCKET_IFNAME:-ens1}"
34
+ echo " NCCL iface: ${NCCL_SOCKET_IFNAME}"
35
+ fi
36
+ echo "--------------------------"
37
+ echo ""
38
+ echo "--- Network Interfaces ---"
39
+ ip -4 addr show 2>/dev/null || echo "(ip command not available)"
40
+ echo ""
41
+ echo "--- Connectivity Test ---"
42
+ if [ -n "${MASTER_ADDR:-}" ]; then
43
+ echo -n " TCP to ${MASTER_ADDR}:${MASTER_PORT:-29500} -> "
44
+ timeout 3 bash -c "echo > /dev/tcp/${MASTER_ADDR}/${MASTER_PORT:-29500}" 2>/dev/null && echo "OK" || echo "UNREACHABLE (expected before torchrun)"
45
+ echo -n " TCP to ${MASTER_ADDR}:8080 -> "
46
+ timeout 3 bash -c "echo > /dev/tcp/${MASTER_ADDR}/8080" 2>/dev/null && echo "OK" || echo "UNREACHABLE (master API may not be up yet)"
47
+ fi
48
+ echo "--------------------------"
49
+ fi
50
+
51
+ nvidia-smi || echo "Warning: nvidia-smi not available"
52
+ echo ""
53
+
54
+ # --- SSH Setup ---
55
+ SSH_PORT="${SSH_PORT:-22}"
56
+ mkdir -p /var/run/sshd /root/.ssh
57
+ chmod 700 /root/.ssh
58
+
59
+ if [ -n "${PUBLIC_KEY:-}" ]; then
60
+ echo "${PUBLIC_KEY}" > /root/.ssh/authorized_keys
61
+ chmod 600 /root/.ssh/authorized_keys
62
+ echo "SSH public key installed."
63
+ fi
64
+
65
+ if [ -n "${SSH_PASSWORD:-}" ]; then
66
+ echo "root:${SSH_PASSWORD}" | chpasswd
67
+ sed -i 's/#*PermitRootLogin.*/PermitRootLogin yes/' /etc/ssh/sshd_config
68
+ sed -i 's/#*PasswordAuthentication.*/PasswordAuthentication yes/' /etc/ssh/sshd_config
69
+ echo "SSH password auth enabled."
70
+ else
71
+ sed -i 's/#*PasswordAuthentication.*/PasswordAuthentication no/' /etc/ssh/sshd_config
72
+ fi
73
+
74
+ sed -i "s/#*Port .*/Port ${SSH_PORT}/" /etc/ssh/sshd_config
75
+
76
+ if [ ! -f /etc/ssh/ssh_host_rsa_key ]; then
77
+ ssh-keygen -A
78
+ fi
79
+
80
+ /usr/sbin/sshd
81
+ echo "SSH daemon started on port ${SSH_PORT}"
82
+
83
+ # --- HuggingFace / W&B ---
84
+ if [ -n "${HF_TOKEN:-}" ]; then
85
+ echo "HF_TOKEN is set — logging into HuggingFace Hub..."
86
+ hf auth login --token "${HF_TOKEN}" --add-to-git-credential 2>/dev/null || true
87
+ echo "HuggingFace login complete."
88
+ else
89
+ echo "HF_TOKEN not set — only public models/datasets available."
90
+ fi
91
+
92
+ if [ -n "${WANDB_API_KEY:-}" ]; then
93
+ echo "WANDB_API_KEY is set — W&B logging available."
94
+ wandb login "${WANDB_API_KEY}" 2>/dev/null || true
95
+ else
96
+ echo "WANDB_API_KEY not set — W&B logging disabled by default."
97
+ fi
98
+ echo ""
99
+
100
+ # --- Default dataset ---
101
+ if [ ! -f "/workspace/data/train.jsonl" ]; then
102
+ echo "No custom train.jsonl found, copying bundled default dataset."
103
+ mkdir -p /workspace/data
104
+ cp /opt/f13/data/train.jsonl /workspace/data/train.jsonl
105
+ cp /opt/f13/data/test.jsonl /workspace/data/test.jsonl
106
+ else
107
+ echo "Found custom dataset at /workspace/data/train.jsonl"
108
+ fi
109
+
110
+ # --- Pre-flight: verify key CUDA packages ---
111
+ echo "Verifying GPU-accelerated packages..."
112
+ python3 -c "
113
+ checks = []
114
+ try:
115
+ import flash_attn; checks.append(f'flash-attn {flash_attn.__version__}')
116
+ except Exception as e: checks.append(f'flash-attn MISSING: {e}')
117
+ try:
118
+ import fla; checks.append('flash-linear-attention OK')
119
+ from fla.ops.gated_delta_rule import fused_recurrent_gated_delta_rule
120
+ checks.append(' GatedDeltaNet FLA kernel OK')
121
+ except Exception as e: checks.append(f'flash-linear-attention MISSING: {e}')
122
+ try:
123
+ import causal_conv1d; checks.append('causal-conv1d OK')
124
+ except Exception as e: checks.append(f'causal-conv1d MISSING: {e}')
125
+ for c in checks: print(f' [{\"OK\" if \"MISSING\" not in c else \"WARN\"}] {c}')
126
+ "
127
+ echo ""
128
+
129
+ # --- API Server ---
130
+ API_PORT="${API_PORT:-8080}"
131
+ echo ""
132
+ echo "Starting API server on port ${API_PORT}..."
133
+
134
+ if [ -z "${API_SECRET:-}" ]; then
135
+ echo "WARNING: API_SECRET is not set. All API calls will be rejected."
136
+ echo " Set API_SECRET env var to enable the training API."
137
+ fi
138
+
139
+ if [ "${AUTO_TRAIN:-false}" = "true" ]; then
140
+ echo "AUTO_TRAIN=true — training will start after API server is up."
141
+ fi
142
+
143
+ exec uvicorn server.app:app \
144
+ --host 0.0.0.0 \
145
+ --port "${API_PORT}" \
146
+ --app-dir /opt/f13
scripts/infer.sh ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ set -euo pipefail
3
+
4
+ OUTPUT_DIR="${OUTPUT_DIR:-/workspace/output}"
5
+ USE_HF="${USE_HF:-true}"
6
+
7
+ CHECKPOINT=$(ls -td "${OUTPUT_DIR}"/v*-*/checkpoint-* 2>/dev/null | head -1)
8
+
9
+ if [ -z "${CHECKPOINT}" ]; then
10
+ echo "Error: No checkpoint found in ${OUTPUT_DIR}"
11
+ echo "Train the model first with: bash /opt/scripts/train.sh"
12
+ exit 1
13
+ fi
14
+
15
+ echo "============================================"
16
+ echo " Inference with checkpoint: ${CHECKPOINT}"
17
+ echo "============================================"
18
+
19
+ swift infer \
20
+ --adapters "${CHECKPOINT}" \
21
+ --stream true \
22
+ --temperature 0 \
23
+ --max_new_tokens 2048 \
24
+ --use_hf "${USE_HF}"
scripts/train.sh ADDED
@@ -0,0 +1,223 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ set -euo pipefail
3
+
4
+ # Verbose download progress in non-TTY environments (RunPod logs)
5
+ export HF_HUB_DISABLE_PROGRESS_BARS=0
6
+ export TRANSFORMERS_VERBOSITY=info
7
+ export HF_HUB_VERBOSITY=info
8
+
9
+ MODEL="${MODEL:-Qwen/Qwen3.5-4B}"
10
+ TRAIN_DATA="${TRAIN_DATA:-/workspace/data/train.jsonl}"
11
+ TEST_DATA="${TEST_DATA:-/workspace/data/test.jsonl}"
12
+ OUTPUT_DIR="${OUTPUT_DIR:-/workspace/output}"
13
+ NUM_EPOCHS="${NUM_EPOCHS:-3}"
14
+ BATCH_SIZE="${BATCH_SIZE:-1}"
15
+ GRAD_ACCUM="${GRAD_ACCUM:-4}"
16
+ LR="${LR:-2e-5}"
17
+ MAX_LENGTH="${MAX_LENGTH:-2048}"
18
+ SAVE_STEPS="${SAVE_STEPS:-10}"
19
+ EVAL_STEPS="${EVAL_STEPS:-${SAVE_STEPS}}"
20
+ SAVE_TOTAL_LIMIT="${SAVE_TOTAL_LIMIT:-5}"
21
+ LOGGING_STEPS="${LOGGING_STEPS:-5}"
22
+ USE_HF="${USE_HF:-true}"
23
+ TUNER_TYPE="${TUNER_TYPE:-full}"
24
+ WARMUP_RATIO="${WARMUP_RATIO:-0.1}"
25
+ LR_SCHEDULER_TYPE="${LR_SCHEDULER_TYPE:-cosine}"
26
+ WEIGHT_DECAY="${WEIGHT_DECAY:-0.1}"
27
+ MAX_GRAD_NORM="${MAX_GRAD_NORM:-1.0}"
28
+ OPTIMIZER="${OPTIMIZER:-adamw_torch}"
29
+ SEED="${SEED:-42}"
30
+ NEFTUNE_ALPHA="${NEFTUNE_ALPHA:-0}"
31
+ PACKING="${PACKING:-false}"
32
+ SHUFFLE_DATASET="${SHUFFLE_DATASET:-false}"
33
+ LAZY_TOKENIZE="${LAZY_TOKENIZE:-true}"
34
+ DATASET_NUM_PROC="${DATASET_NUM_PROC:-4}"
35
+ ATTN_IMPL="${ATTN_IMPL:-flash_attn}"
36
+ DEEPSPEED_CONFIG="${DEEPSPEED_CONFIG:-}"
37
+ WANDB_PROJECT="${WANDB_PROJECT:-}"
38
+ RESUME_FROM="${RESUME_FROM:-}"
39
+ USE_FLASH_CKPT="${USE_FLASH_CKPT:-false}"
40
+ EARLY_STOPPING_PATIENCE="${EARLY_STOPPING_PATIENCE:-}"
41
+ EARLY_STOPPING_THRESHOLD="${EARLY_STOPPING_THRESHOLD:-0.0}"
42
+
43
+ NUM_GPUS=$(nvidia-smi -L 2>/dev/null | wc -l)
44
+ NUM_GPUS=${NUM_GPUS:-1}
45
+ if [ "${NUM_GPUS}" -lt 1 ]; then
46
+ NUM_GPUS=1
47
+ fi
48
+ NPROC_PER_NODE="${NPROC_PER_NODE:-${NUM_GPUS}}"
49
+
50
+ if [ "${NPROC_PER_NODE}" -gt 1 ] && [ -z "${DEEPSPEED_CONFIG}" ]; then
51
+ DEEPSPEED_CONFIG="zero3"
52
+ fi
53
+
54
+ GPU_IDS=$(seq -s, 0 $((NPROC_PER_NODE - 1)))
55
+ export CUDA_VISIBLE_DEVICES="${CUDA_VISIBLE_DEVICES:-${GPU_IDS}}"
56
+
57
+ echo "============================================"
58
+ echo " Qwen 3.5 Fine-Tuning with ms-swift"
59
+ echo "============================================"
60
+ echo "Model: ${MODEL}"
61
+ echo "Train data: ${TRAIN_DATA}"
62
+ echo "Test data: ${TEST_DATA}"
63
+ echo "Output: ${OUTPUT_DIR}"
64
+ echo "Tuner: ${TUNER_TYPE}"
65
+ echo "Epochs: ${NUM_EPOCHS}"
66
+ echo "Batch size: ${BATCH_SIZE}"
67
+ echo "Grad accum: ${GRAD_ACCUM}"
68
+ echo "LR: ${LR}"
69
+ echo "Max length: ${MAX_LENGTH}"
70
+ echo "GPUs: ${NPROC_PER_NODE} (CUDA_VISIBLE_DEVICES=${CUDA_VISIBLE_DEVICES})"
71
+ echo "Eval steps: ${EVAL_STEPS}"
72
+ echo "Save limit: ${SAVE_TOTAL_LIMIT}"
73
+ echo "Warmup ratio: ${WARMUP_RATIO}"
74
+ echo "LR scheduler: ${LR_SCHEDULER_TYPE}"
75
+ echo "Weight decay: ${WEIGHT_DECAY}"
76
+ echo "Grad clip: ${MAX_GRAD_NORM}"
77
+ echo "Optimizer: ${OPTIMIZER}"
78
+ echo "Seed: ${SEED}"
79
+ echo "NEFTune: ${NEFTUNE_ALPHA}"
80
+ echo "Packing: ${PACKING}"
81
+ echo "Lazy tokenize:${LAZY_TOKENIZE}"
82
+ echo "Dataset procs:${DATASET_NUM_PROC}"
83
+ echo "Attn impl: ${ATTN_IMPL}"
84
+ echo "DeepSpeed: ${DEEPSPEED_CONFIG:-none}"
85
+ echo "W&B project: ${WANDB_PROJECT:-disabled}"
86
+ echo "Flash ckpt: ${USE_FLASH_CKPT}"
87
+ echo "Early stop: ${EARLY_STOPPING_PATIENCE:-disabled}"
88
+ echo "Resume from: ${RESUME_FROM:-none}"
89
+ echo "============================================"
90
+
91
+ EXTRA_ARGS=()
92
+
93
+ HAS_VAL=false
94
+ if [ -f "${TEST_DATA}" ] || [[ "${TEST_DATA}" == */* && ! "${TEST_DATA}" == /* ]]; then
95
+ EXTRA_ARGS+=(--val_dataset "${TEST_DATA}")
96
+ HAS_VAL=true
97
+ fi
98
+
99
+ if [ -n "${EARLY_STOPPING_PATIENCE}" ] && [ "${HAS_VAL}" = "true" ]; then
100
+ EXTRA_ARGS+=(
101
+ --load_best_model_at_end true
102
+ --metric_for_best_model eval_loss
103
+ --greater_is_better false
104
+ --early_stopping_patience "${EARLY_STOPPING_PATIENCE}"
105
+ )
106
+ if [ "${EARLY_STOPPING_THRESHOLD}" != "0.0" ] && [ -n "${EARLY_STOPPING_THRESHOLD}" ]; then
107
+ EXTRA_ARGS+=(--early_stopping_threshold "${EARLY_STOPPING_THRESHOLD}")
108
+ fi
109
+ if [ "${SAVE_TOTAL_LIMIT}" -lt 2 ]; then
110
+ SAVE_TOTAL_LIMIT=2
111
+ echo "Bumped SAVE_TOTAL_LIMIT to 2 (required for load_best_model_at_end)"
112
+ fi
113
+ elif [ -n "${EARLY_STOPPING_PATIENCE}" ]; then
114
+ echo "WARNING: EARLY_STOPPING_PATIENCE ignored — no validation data configured"
115
+ fi
116
+
117
+ if [ -n "${DEEPSPEED_CONFIG}" ]; then
118
+ EXTRA_ARGS+=(--deepspeed "${DEEPSPEED_CONFIG}")
119
+ fi
120
+
121
+ if [ -n "${WANDB_PROJECT}" ] || [ -n "${WANDB_API_KEY:-}" ]; then
122
+ EXTRA_ARGS+=(--report_to wandb)
123
+ fi
124
+
125
+ if [ -n "${RESUME_FROM}" ]; then
126
+ if [ "${RESUME_FROM}" = "auto" ]; then
127
+ LATEST_CKPT=$(ls -td "${OUTPUT_DIR}"/*/checkpoint-* "${OUTPUT_DIR}"/checkpoint-* 2>/dev/null | head -1)
128
+ if [ -n "${LATEST_CKPT}" ]; then
129
+ echo "Auto-resume: found ${LATEST_CKPT}"
130
+ EXTRA_ARGS+=(--resume_from_checkpoint "${LATEST_CKPT}")
131
+ else
132
+ echo "Auto-resume: no checkpoint found, starting fresh"
133
+ fi
134
+ else
135
+ EXTRA_ARGS+=(--resume_from_checkpoint "${RESUME_FROM}")
136
+ fi
137
+ fi
138
+
139
+ export NPROC_PER_NODE
140
+
141
+ # Pre-flight: verify flash-linear-attention is available.
142
+ # Qwen 3.5 silently falls back to O(n²) GatedDeltaNet without it.
143
+ echo ""
144
+ echo "Pre-flight checks..."
145
+ python3 -c "
146
+ try:
147
+ import fla
148
+ from fla.ops.gated_delta_rule import fused_recurrent_gated_delta_rule
149
+ print('[Pre-flight] flash-linear-attention OK — GatedDeltaNet uses FLA kernels')
150
+ except ImportError as e:
151
+ print(f'[Pre-flight] WARNING: flash-linear-attention not importable: {e}')
152
+ print(' GatedDeltaNet layers will use naive O(n^2) fallback — expect 2-3x VRAM')
153
+ try:
154
+ import causal_conv1d
155
+ print('[Pre-flight] causal-conv1d OK')
156
+ except ImportError:
157
+ print('[Pre-flight] WARNING: causal-conv1d not available')
158
+ "
159
+
160
+ # Pre-download model with visible progress before ms-swift starts.
161
+ # ms-swift/transformers download logging is minimal in non-TTY envs.
162
+ echo ""
163
+ echo "Pre-downloading model ${MODEL} (if not cached)..."
164
+ python3 -c "
165
+ import os
166
+ os.environ['HF_HUB_DISABLE_PROGRESS_BARS'] = '0'
167
+ from huggingface_hub import snapshot_download, logging
168
+ logging.set_verbosity_info()
169
+ snapshot_download('${MODEL}', ignore_patterns=['*.gguf', '*.ggml'])
170
+ print('Model download complete.', flush=True)
171
+ "
172
+ echo ""
173
+
174
+ CMD_ARGS=(
175
+ --model "${MODEL}"
176
+ --dataset "${TRAIN_DATA}"
177
+ --tuner_type "${TUNER_TYPE}"
178
+ --torch_dtype bfloat16
179
+ --num_train_epochs "${NUM_EPOCHS}"
180
+ --per_device_train_batch_size "${BATCH_SIZE}"
181
+ --per_device_eval_batch_size "${BATCH_SIZE}"
182
+ --learning_rate "${LR}"
183
+ --gradient_accumulation_steps "${GRAD_ACCUM}"
184
+ --eval_strategy steps
185
+ --eval_steps "${EVAL_STEPS}"
186
+ --save_steps "${SAVE_STEPS}"
187
+ --save_total_limit "${SAVE_TOTAL_LIMIT}"
188
+ --logging_steps "${LOGGING_STEPS}"
189
+ --max_length "${MAX_LENGTH}"
190
+ --output_dir "${OUTPUT_DIR}"
191
+ --warmup_ratio "${WARMUP_RATIO}"
192
+ --lr_scheduler_type "${LR_SCHEDULER_TYPE}"
193
+ --weight_decay "${WEIGHT_DECAY}"
194
+ --max_grad_norm "${MAX_GRAD_NORM}"
195
+ --optim "${OPTIMIZER}"
196
+ --seed "${SEED}"
197
+ --dataloader_num_workers 4
198
+ --lazy_tokenize "${LAZY_TOKENIZE}"
199
+ --dataset_num_proc "${DATASET_NUM_PROC}"
200
+ --attn_impl "${ATTN_IMPL}"
201
+ --use_hf "${USE_HF}"
202
+ --gradient_checkpointing true
203
+ --use_flash_ckpt "${USE_FLASH_CKPT}"
204
+ )
205
+
206
+ if [ "${NEFTUNE_ALPHA}" != "0" ] && [ -n "${NEFTUNE_ALPHA}" ]; then
207
+ CMD_ARGS+=(--neftune_noise_alpha "${NEFTUNE_ALPHA}")
208
+ fi
209
+
210
+ if [ "${PACKING}" = "true" ]; then
211
+ CMD_ARGS+=(--packing true)
212
+ fi
213
+
214
+ if [ "${SHUFFLE_DATASET}" = "true" ]; then
215
+ CMD_ARGS+=(--dataset_shuffle true)
216
+ fi
217
+
218
+ swift sft "${CMD_ARGS[@]}" "${EXTRA_ARGS[@]}"
219
+
220
+ echo "============================================"
221
+ echo " Training complete!"
222
+ echo " Output saved to: ${OUTPUT_DIR}"
223
+ echo "============================================"