Datasets:
Upload models/commonlid_50pct/training_summary.json with huggingface_hub
Browse files
models/commonlid_50pct/training_summary.json
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"command": "train.py --corpus-dir data/cl50_group_6 --initial-vocab /workspace/hf_home/hub/models--CohereLabs--tiny-aya-global/snapshots/b2f40192d990d66a6f28a4b90d72817ab9c613da/tokenizer.json --per-lang-counts-method soft --em-iterations 10 --results-dir results_commonlid_50pct --lang-batch-size 10 --reuse-base",
|
| 3 |
+
"timestamp": "2026-03-24T07:30:59.527633+00:00",
|
| 4 |
+
"training_completed": true,
|
| 5 |
+
"source": {
|
| 6 |
+
"format": "corpus",
|
| 7 |
+
"path": "/workspace/runpod/tinyAyaLid/data/cl50_group_6",
|
| 8 |
+
"max_samples": null,
|
| 9 |
+
"total_samples": 31714,
|
| 10 |
+
"num_languages": 7,
|
| 11 |
+
"samples_per_language": {
|
| 12 |
+
"arb": 21229,
|
| 13 |
+
"crh": 202,
|
| 14 |
+
"gcr": 55,
|
| 15 |
+
"jav": 828,
|
| 16 |
+
"nso": 49,
|
| 17 |
+
"swh": 8206,
|
| 18 |
+
"yor": 1145
|
| 19 |
+
}
|
| 20 |
+
},
|
| 21 |
+
"method": {
|
| 22 |
+
"vocab_size": 261000,
|
| 23 |
+
"base_training_method": "hf",
|
| 24 |
+
"per_lang_counts_method": "soft",
|
| 25 |
+
"byte_level": true,
|
| 26 |
+
"seed": 42,
|
| 27 |
+
"initial_vocab": "/workspace/hf_home/hub/models--CohereLabs--tiny-aya-global/snapshots/b2f40192d990d66a6f28a4b90d72817ab9c613da/tokenizer.json",
|
| 28 |
+
"lang_batch_size": 10,
|
| 29 |
+
"sampling": {
|
| 30 |
+
"mode": "base_only",
|
| 31 |
+
"max_base_samples_per_lang": 10000,
|
| 32 |
+
"max_lang_samples_per_lang": null,
|
| 33 |
+
"shared_samples_per_lang": null
|
| 34 |
+
},
|
| 35 |
+
"reuse": {
|
| 36 |
+
"reuse_corpus": true,
|
| 37 |
+
"reuse_base": true,
|
| 38 |
+
"skip_existing_langs": true
|
| 39 |
+
}
|
| 40 |
+
},
|
| 41 |
+
"timing": {
|
| 42 |
+
"total_seconds": 350.76,
|
| 43 |
+
"base_tokenizer_seconds": 0.0,
|
| 44 |
+
"language_tokenizers_seconds": 350.76,
|
| 45 |
+
"base_tokenizer_reused": true
|
| 46 |
+
},
|
| 47 |
+
"output": {
|
| 48 |
+
"results_dir": "/workspace/runpod/tinyAyaLid/results_commonlid_50pct",
|
| 49 |
+
"corpus_dir": "/workspace/runpod/tinyAyaLid/data/cl50_group_6",
|
| 50 |
+
"tokenizers_dir": "/workspace/runpod/tinyAyaLid/results_commonlid_50pct/tokenizers",
|
| 51 |
+
"base_tokenizer": "/workspace/runpod/tinyAyaLid/results_commonlid_50pct/tokenizers/langspec_base_tokenizer.json",
|
| 52 |
+
"num_languages_trained_this_run": 7,
|
| 53 |
+
"language_tokenizers": {
|
| 54 |
+
"base_path": "results_commonlid_50pct/tokenizers/langspec_base_tokenizer.json",
|
| 55 |
+
"language_paths": {
|
| 56 |
+
"arb": "results_commonlid_50pct/tokenizers/langspec_soft_arb.tokenizer.json",
|
| 57 |
+
"crh": "results_commonlid_50pct/tokenizers/langspec_soft_crh.tokenizer.json",
|
| 58 |
+
"gcr": "results_commonlid_50pct/tokenizers/langspec_soft_gcr.tokenizer.json",
|
| 59 |
+
"jav": "results_commonlid_50pct/tokenizers/langspec_soft_jav.tokenizer.json",
|
| 60 |
+
"nso": "results_commonlid_50pct/tokenizers/langspec_soft_nso.tokenizer.json",
|
| 61 |
+
"swh": "results_commonlid_50pct/tokenizers/langspec_soft_swh.tokenizer.json",
|
| 62 |
+
"yor": "results_commonlid_50pct/tokenizers/langspec_soft_yor.tokenizer.json"
|
| 63 |
+
}
|
| 64 |
+
}
|
| 65 |
+
},
|
| 66 |
+
"languages": [
|
| 67 |
+
"arb",
|
| 68 |
+
"crh",
|
| 69 |
+
"gcr",
|
| 70 |
+
"jav",
|
| 71 |
+
"nso",
|
| 72 |
+
"swh",
|
| 73 |
+
"yor"
|
| 74 |
+
]
|
| 75 |
+
}
|