Datasets:
Upload models/tinyaya_v3_100k/training_summary.json with huggingface_hub
Browse files
models/tinyaya_v3_100k/training_summary.json
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"command": "train.py --corpus-dir data/v3_group_23 --initial-vocab /workspace/hf_home/hub/models--CohereLabs--tiny-aya-global/snapshots/b2f40192d990d66a6f28a4b90d72817ab9c613da/tokenizer.json --per-lang-counts-method soft --em-iterations 10 --results-dir results_tinyaya_v3_100k --lang-batch-size 5 --max-lang-samples-per-lang 100000 --reuse-base",
|
| 3 |
+
"timestamp": "2026-03-23T22:13:01.474209+00:00",
|
| 4 |
+
"training_completed": true,
|
| 5 |
+
"source": {
|
| 6 |
+
"format": "corpus",
|
| 7 |
+
"path": "/workspace/runpod/tinyAyaLid/data/v3_group_23",
|
| 8 |
+
"max_samples": null,
|
| 9 |
+
"total_samples": 5007276,
|
| 10 |
+
"num_languages": 3,
|
| 11 |
+
"samples_per_language": {
|
| 12 |
+
"ell_Grek": 3745712,
|
| 13 |
+
"kan_Knda": 677804,
|
| 14 |
+
"tel_Telu": 583760
|
| 15 |
+
}
|
| 16 |
+
},
|
| 17 |
+
"method": {
|
| 18 |
+
"vocab_size": 261000,
|
| 19 |
+
"base_training_method": "hf",
|
| 20 |
+
"per_lang_counts_method": "soft",
|
| 21 |
+
"byte_level": true,
|
| 22 |
+
"seed": 42,
|
| 23 |
+
"initial_vocab": "/workspace/hf_home/hub/models--CohereLabs--tiny-aya-global/snapshots/b2f40192d990d66a6f28a4b90d72817ab9c613da/tokenizer.json",
|
| 24 |
+
"lang_batch_size": 5,
|
| 25 |
+
"sampling": {
|
| 26 |
+
"mode": "separate",
|
| 27 |
+
"max_base_samples_per_lang": 10000,
|
| 28 |
+
"max_lang_samples_per_lang": 100000,
|
| 29 |
+
"shared_samples_per_lang": null
|
| 30 |
+
},
|
| 31 |
+
"reuse": {
|
| 32 |
+
"reuse_corpus": true,
|
| 33 |
+
"reuse_base": true,
|
| 34 |
+
"skip_existing_langs": true
|
| 35 |
+
}
|
| 36 |
+
},
|
| 37 |
+
"timing": {
|
| 38 |
+
"total_seconds": 1053.14,
|
| 39 |
+
"base_tokenizer_seconds": 0.0,
|
| 40 |
+
"language_tokenizers_seconds": 1053.14,
|
| 41 |
+
"base_tokenizer_reused": true
|
| 42 |
+
},
|
| 43 |
+
"output": {
|
| 44 |
+
"results_dir": "/workspace/runpod/tinyAyaLid/results_tinyaya_v3_100k",
|
| 45 |
+
"corpus_dir": "/workspace/runpod/tinyAyaLid/data/v3_group_23",
|
| 46 |
+
"tokenizers_dir": "/workspace/runpod/tinyAyaLid/results_tinyaya_v3_100k/tokenizers",
|
| 47 |
+
"base_tokenizer": "/workspace/runpod/tinyAyaLid/results_tinyaya_v3_100k/tokenizers/langspec_base_tokenizer.json",
|
| 48 |
+
"num_languages_trained_this_run": 3,
|
| 49 |
+
"language_tokenizers": {
|
| 50 |
+
"base_path": "results_tinyaya_v3_100k/tokenizers/langspec_base_tokenizer.json",
|
| 51 |
+
"language_paths": {
|
| 52 |
+
"ell_Grek": "results_tinyaya_v3_100k/tokenizers/langspec_soft_ell_Grek.tokenizer.json",
|
| 53 |
+
"kan_Knda": "results_tinyaya_v3_100k/tokenizers/langspec_soft_kan_Knda.tokenizer.json",
|
| 54 |
+
"tel_Telu": "results_tinyaya_v3_100k/tokenizers/langspec_soft_tel_Telu.tokenizer.json"
|
| 55 |
+
}
|
| 56 |
+
}
|
| 57 |
+
},
|
| 58 |
+
"languages": [
|
| 59 |
+
"ell_Grek",
|
| 60 |
+
"kan_Knda",
|
| 61 |
+
"tel_Telu"
|
| 62 |
+
]
|
| 63 |
+
}
|