OLMoE (November 2024)
Collection
Artifacts for open mixture-of-experts language models. • 13 items • Updated • 31
Error code: DatasetGenerationError
Exception: TypeError
Message: Couldn't cast array of type
struct<human_eval: double, human_eval_cpp: double, human_eval_js: double, human_eval_return_simple: double, human_eval_return_complex: double, human_eval_25: double, human_eval_50: double, human_eval_75: double>
to
{'mmlu_zeroshot': Value(dtype='float64', id=None), 'hellaswag_zeroshot': Value(dtype='float64', id=None), 'jeopardy': Value(dtype='float64', id=None), 'triviaqa_sm_sub': Value(dtype='float64', id=None), 'gsm8k_cot': Value(dtype='float64', id=None), 'agi_eval_sat_math_cot': Value(dtype='float64', id=None), 'aqua_cot': Value(dtype='float64', id=None), 'svamp_cot': Value(dtype='float64', id=None), 'bigbench_qa_wikidata': Value(dtype='float64', id=None), 'arc_easy': Value(dtype='float64', id=None), 'arc_challenge': Value(dtype='float64', id=None), 'mmlu_fewshot': Value(dtype='float64', id=None), 'bigbench_misconceptions': Value(dtype='float64', id=None), 'copa': Value(dtype='float64', id=None), 'siqa': Value(dtype='float64', id=None), 'commonsense_qa': Value(dtype='float64', id=None), 'piqa': Value(dtype='float64', id=None), 'openbook_qa': Value(dtype='float64', id=None), 'bigbench_novel_concepts': Value(dtype='float64', id=None), 'bigbench_strange_stories': Value(dtype='float64', id=None), 'bigbench_strategy_qa': Value(dtype='float64', id=None), 'lambada_openai': Value(dtype='float64', id=None), 'hellaswag': Value(dtype='float64', id=None), 'winograd': Value(dtype='float64', id=None), 'winogrande': Value(dtype='float64', id=None), 'bigbench_conlang_translation': Value(dtype='float64', id=None), 'bigbench_language_identification': Value(dtype='float64', id=None), 'bigbench_conceptual_combinations': Value(dtype='float64', id=None), 'bigbench_elementary_math_qa': Value(dtype='float64', id=None), 'bigbench_dyck_languages': Value(dtype='float64', id=None), 'agi_eval_lsat_ar': Value(dtype='float64', id=None), 'bigbench_cs_algorithms': Value(dtype='float64', id=None), 'bigbench_logical_deduction': Value(dtype='float64', id=None), 'bigbench_operators': Value(dtype='float64', id=None), 'bigbench_repeat_copy_logic': Value(dtype='float64', id=None), 'simple_arithmetic_nospaces': Value(dtype='float64', id=None), 'simple_arithmetic_withspaces': Value(dtype='float64', id=None), 'math_qa': Value(dtype='float64', id=None), 'logi_qa': Value(dtype='float64', id=None), 'pubmed_qa_labeled': Value(dtype='float64', id=None), 'squad': Value(dtype='float64', id=None), 'agi_eval_lsat_rc': Value(dtype='float64', id=None), 'agi_eval_lsat_lr': Value(dtype='float64', id=None), 'coqa': Value(dtype='float64', id=None), 'bigbench_understanding_fables': Value(dtype='float64', id=None), 'boolq': Value(dtype='float64', id=None), 'agi_eval_sat_en': Value(dtype='float64', id=None), 'winogender_mc_female': Value(dtype='float64', id=None), 'winogender_mc_male': Value(dtype='float64', id=None), 'enterprise_pii_classification': Value(dtype='float64', id=None), 'bbq': Value(dtype='float64', id=None), 'gpqa_main': Value(dtype='float64', id=None), 'gpqa_diamond': Value(dtype='float64', id=None)}
Traceback: Traceback (most recent call last):
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 2013, in _prepare_split_single
writer.write_table(table)
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/arrow_writer.py", line 585, in write_table
pa_table = table_cast(pa_table, self._schema)
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/table.py", line 2302, in table_cast
return cast_table_to_schema(table, schema)
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/table.py", line 2261, in cast_table_to_schema
arrays = [cast_array_to_feature(table[name], feature) for name, feature in features.items()]
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/table.py", line 2261, in <listcomp>
arrays = [cast_array_to_feature(table[name], feature) for name, feature in features.items()]
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/table.py", line 1802, in wrapper
return pa.chunked_array([func(chunk, *args, **kwargs) for chunk in array.chunks])
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/table.py", line 1802, in <listcomp>
return pa.chunked_array([func(chunk, *args, **kwargs) for chunk in array.chunks])
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/table.py", line 2020, in cast_array_to_feature
arrays = [_c(array.field(name), subfeature) for name, subfeature in feature.items()]
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/table.py", line 2020, in <listcomp>
arrays = [_c(array.field(name), subfeature) for name, subfeature in feature.items()]
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/table.py", line 1804, in wrapper
return func(array, *args, **kwargs)
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/table.py", line 2122, in cast_array_to_feature
raise TypeError(f"Couldn't cast array of type\n{_short_str(array.type)}\nto\n{_short_str(feature)}")
TypeError: Couldn't cast array of type
struct<human_eval: double, human_eval_cpp: double, human_eval_js: double, human_eval_return_simple: double, human_eval_return_complex: double, human_eval_25: double, human_eval_50: double, human_eval_75: double>
to
{'mmlu_zeroshot': Value(dtype='float64', id=None), 'hellaswag_zeroshot': Value(dtype='float64', id=None), 'jeopardy': Value(dtype='float64', id=None), 'triviaqa_sm_sub': Value(dtype='float64', id=None), 'gsm8k_cot': Value(dtype='float64', id=None), 'agi_eval_sat_math_cot': Value(dtype='float64', id=None), 'aqua_cot': Value(dtype='float64', id=None), 'svamp_cot': Value(dtype='float64', id=None), 'bigbench_qa_wikidata': Value(dtype='float64', id=None), 'arc_easy': Value(dtype='float64', id=None), 'arc_challenge': Value(dtype='float64', id=None), 'mmlu_fewshot': Value(dtype='float64', id=None), 'bigbench_misconceptions': Value(dtype='float64', id=None), 'copa': Value(dtype='float64', id=None), 'siqa': Value(dtype='float64', id=None), 'commonsense_qa': Value(dtype='float64', id=None), 'piqa': Value(dtype='float64', id=None), 'openbook_qa': Value(dtype='float64', id=None), 'bigbench_novel_concepts': Value(dtype='float64', id=None), 'bigbench_strange_stories': Value(dtype='float64', id=None), 'bigbench_strategy_qa': Value(dtype='float64', id=None), 'lambada_openai': Value(dtype='float64', id=None), 'hellaswag': Value(dtype='float64', id=None), 'winograd': Value(dtype='float64', id=None), 'winogrande': Value(dtype='float64', id=None), 'bigbench_conlang_translation': Value(dtype='float64', id=None), 'bigbench_language_identification': Value(dtype='float64', id=None), 'bigbench_conceptual_combinations': Value(dtype='float64', id=None), 'bigbench_elementary_math_qa': Value(dtype='float64', id=None), 'bigbench_dyck_languages': Value(dtype='float64', id=None), 'agi_eval_lsat_ar': Value(dtype='float64', id=None), 'bigbench_cs_algorithms': Value(dtype='float64', id=None), 'bigbench_logical_deduction': Value(dtype='float64', id=None), 'bigbench_operators': Value(dtype='float64', id=None), 'bigbench_repeat_copy_logic': Value(dtype='float64', id=None), 'simple_arithmetic_nospaces': Value(dtype='float64', id=None), 'simple_arithmetic_withspaces': Value(dtype='float64', id=None), 'math_qa': Value(dtype='float64', id=None), 'logi_qa': Value(dtype='float64', id=None), 'pubmed_qa_labeled': Value(dtype='float64', id=None), 'squad': Value(dtype='float64', id=None), 'agi_eval_lsat_rc': Value(dtype='float64', id=None), 'agi_eval_lsat_lr': Value(dtype='float64', id=None), 'coqa': Value(dtype='float64', id=None), 'bigbench_understanding_fables': Value(dtype='float64', id=None), 'boolq': Value(dtype='float64', id=None), 'agi_eval_sat_en': Value(dtype='float64', id=None), 'winogender_mc_female': Value(dtype='float64', id=None), 'winogender_mc_male': Value(dtype='float64', id=None), 'enterprise_pii_classification': Value(dtype='float64', id=None), 'bbq': Value(dtype='float64', id=None), 'gpqa_main': Value(dtype='float64', id=None), 'gpqa_diamond': Value(dtype='float64', id=None)}
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 1396, in compute_config_parquet_and_info_response
parquet_operations = convert_to_parquet(builder)
File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 1045, in convert_to_parquet
builder.download_and_prepare(
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1029, in download_and_prepare
self._download_and_prepare(
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1124, in _download_and_prepare
self._prepare_split(split_generator, **prepare_split_kwargs)
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1884, in _prepare_split
for job_id, done, content in self._prepare_split_single(
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 2040, in _prepare_split_single
raise DatasetGenerationError("An error occurred while generating the dataset") from e
datasets.exceptions.DatasetGenerationError: An error occurred while generating the datasetNeed help to make the dataset viewer work? Make sure to review how to configure the dataset viewer, and open a discussion for direct support.
name string | uuid string | model string | creation_date string | eval_metrics dict | missing tasks string | aggregated_task_categories_centered dict | aggregated_centered_results float64 | aggregated_results float64 | rw_small float64 | rw_small_centered float64 | 95%_CI_above float64 | 95%_CI_above_centered float64 | 99%_CI_above float64 | 99%_CI_above_centered float64 | low_variance_datasets float64 | low_variance_datasets_centered float64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
heavy | 83bb37f6-ddd0-43d8-894e-446d345ff7c1 | /net/nfs.cirrascale/allennlp/davidw/checkpoints/moe-release/OLMoE-7B-A1B/main | 2024_08_06-17_10_22 | {
"icl": {
"mmlu_zeroshot": 0.4163723833728255,
"hellaswag_zeroshot": 0.7599083781242371,
"jeopardy": 0.4884525716304779,
"triviaqa_sm_sub": 0.5419999957084656,
"gsm8k_cot": 0.06444276124238968,
"agi_eval_sat_math_cot": 0.05454545468091965,
"aqua_cot": 0.02448979578912258,
"svamp_cot":... | [] | {
"commonsense reasoning": 0.4523028646368181,
"language understanding": 0.4706549446387432,
"reading comprehension": 0.39537327579761805,
"safety": 0.03534456274726175,
"symbolic problem solving": 0.15998990932590068,
"world knowledge": 0.3526500500886761
} | 0.313479 | 0.463348 | 0.714722 | 0.505301 | 0.56121 | 0.415861 | 0.572426 | 0.457942 | 0.563676 | 0.462567 |
heavy | 70f3f719-28b5-46f5-b58e-bd89765d1e40 | /net/nfs.cirrascale/allennlp/davidw/checkpoints/moe-release/OLMoE-7B-A1B/step1220000-tokens5117B | 2024_08_06-17_05_21 | {
"icl": {
"mmlu_zeroshot": 0.4273025756865217,
"hellaswag_zeroshot": 0.7597092390060425,
"jeopardy": 0.4871806979179382,
"triviaqa_sm_sub": 0.5303333401679993,
"gsm8k_cot": 0.07354056090116501,
"agi_eval_sat_math_cot": 0.040909089148044586,
"aqua_cot": 0.02857142873108387,
"svamp_cot"... | [] | {
"commonsense reasoning": 0.46081640452671535,
"language understanding": 0.4762512398893946,
"reading comprehension": 0.36885401178478144,
"safety": 0.03524814952503552,
"symbolic problem solving": 0.15957477013304083,
"world knowledge": 0.34119598718414534
} | 0.30932 | 0.459965 | 0.715261 | 0.504311 | 0.558937 | 0.414268 | 0.570154 | 0.456544 | 0.565945 | 0.465203 |
heavy | c260eedc-dbb5-4ff4-afa9-d163e8d7585b | /net/nfs.cirrascale/allennlp/davidw/checkpoints/moe-release/OLMoE-7B-A1B/step1223842-tokens5100B | 2024_08_06-16_39_52 | {
"icl": {
"mmlu_zeroshot": 0.43332746128241223,
"hellaswag_zeroshot": 0.7701653242111206,
"jeopardy": 0.5032301664352417,
"triviaqa_sm_sub": 0.5586666464805603,
"gsm8k_cot": 0.07429870963096619,
"agi_eval_sat_math_cot": 0.06363636255264282,
"aqua_cot": 0.020408162847161293,
"svamp_cot... | [] | {
"commonsense reasoning": 0.46523631517512387,
"language understanding": 0.4930994285627118,
"reading comprehension": 0.3874464948710642,
"safety": 0.06361036679961463,
"symbolic problem solving": 0.1715689478790787,
"world knowledge": 0.36090664066069306
} | 0.324591 | 0.471675 | 0.714308 | 0.502137 | 0.571701 | 0.429074 | 0.581191 | 0.468356 | 0.571693 | 0.472335 |
heavy | 72557004-cff2-408c-b3e2-6a004215f09d | /net/nfs.cirrascale/allennlp/davidw/checkpoints/moe-release/jetmoe-8b/main | 2024_08_06-16_55_05 | {
"icl": {
"mmlu_zeroshot": 0.4326617845840621,
"hellaswag_zeroshot": 0.7868950366973877,
"jeopardy": 0.45192375779151917,
"triviaqa_sm_sub": 0.4986666738986969,
"gsm8k_cot": 0.3025018870830536,
"agi_eval_sat_math_cot": 0.11363636702299118,
"aqua_cot": 0.05306122452020645,
"svamp_cot":... | [] | {
"commonsense reasoning": 0.4857313116330063,
"language understanding": 0.4936472507617851,
"reading comprehension": 0.3914801840458,
"safety": 0.11027789386835968,
"symbolic problem solving": 0.24344731914942666,
"world knowledge": 0.3286514173125663
} | 0.346155 | 0.48857 | 0.720248 | 0.535608 | 0.577331 | 0.443234 | 0.592481 | 0.489897 | 0.578146 | 0.485983 |
/net/nfs.cirrascale/allennlp/davidw/proj/OLMoE/script/humaneval | dc03f2af-e3c1-479d-98bd-6efc73e684e4 | /net/nfs.cirrascale/allennlp/davidw/checkpoints/moe-release/OLMoE-7B-A1B/main | 2024_08_06-02_09_56 | {
"icl": {
"human_eval": 0.024390242993831635,
"human_eval_cpp": 0.1304347813129425,
"human_eval_js": 0,
"human_eval_return_simple": 0.6486486196517944,
"human_eval_return_complex": 0.14960630238056183,
"human_eval_25": 0.03658536449074745,
"human_eval_50": 0.08536585420370102,
"human_... | ['mmlu_zeroshot', 'hellaswag_zeroshot', 'jeopardy', 'triviaqa_sm_sub', 'gsm8k_cot', 'agi_eval_sat_math_cot', 'aqua_cot', 'svamp_cot', 'bigbench_qa_wikidata', 'arc_easy', 'arc_challenge', 'mmlu_fewshot', 'bigbench_misconceptions', 'copa', 'siqa', 'commonsense_qa', 'piqa', 'openbook_qa', 'bigbench_novel_concepts', 'bigbe... | {
"commonsense reasoning": null,
"language understanding": null,
"reading comprehension": null,
"safety": null,
"symbolic problem solving": null,
"world knowledge": null
} | null | null | null | null | null | null | null | null | null | null |
/net/nfs.cirrascale/allennlp/davidw/proj/OLMoE/script/humaneval | d31d6b3f-15ee-4d96-ac2e-b2d780bf206e | /net/nfs.cirrascale/allennlp/davidw/checkpoints/moe-release/OLMoE-7B-A1B/step1220000-tokens5117B | 2024_08_06-02_14_00 | {
"icl": {
"human_eval": 0.006097560748457909,
"human_eval_cpp": 0.12422360479831696,
"human_eval_js": 0,
"human_eval_return_simple": 0.7567567825317383,
"human_eval_return_complex": 0.14173229038715363,
"human_eval_25": 0.060975611209869385,
"human_eval_50": 0.1036585345864296,
"human... | ['mmlu_zeroshot', 'hellaswag_zeroshot', 'jeopardy', 'triviaqa_sm_sub', 'gsm8k_cot', 'agi_eval_sat_math_cot', 'aqua_cot', 'svamp_cot', 'bigbench_qa_wikidata', 'arc_easy', 'arc_challenge', 'mmlu_fewshot', 'bigbench_misconceptions', 'copa', 'siqa', 'commonsense_qa', 'piqa', 'openbook_qa', 'bigbench_novel_concepts', 'bigbe... | {
"commonsense reasoning": null,
"language understanding": null,
"reading comprehension": null,
"safety": null,
"symbolic problem solving": null,
"world knowledge": null
} | null | null | null | null | null | null | null | null | null | null |
/net/nfs.cirrascale/allennlp/davidw/proj/OLMoE/script/humaneval | 3a11e770-7075-42f3-81f6-c14a6db4953d | /net/nfs.cirrascale/allennlp/davidw/checkpoints/moe-release/OLMoE-7B-A1B/step1223842-tokens5100B | 2024_08_06-02_21_03 | {
"icl": {
"human_eval": 0.012195121496915817,
"human_eval_cpp": 0.1304347813129425,
"human_eval_js": 0,
"human_eval_return_simple": 0.8648648858070374,
"human_eval_return_complex": 0.14960630238056183,
"human_eval_25": 0.04268292710185051,
"human_eval_50": 0.09146341681480408,
"human_... | ['mmlu_zeroshot', 'hellaswag_zeroshot', 'jeopardy', 'triviaqa_sm_sub', 'gsm8k_cot', 'agi_eval_sat_math_cot', 'aqua_cot', 'svamp_cot', 'bigbench_qa_wikidata', 'arc_easy', 'arc_challenge', 'mmlu_fewshot', 'bigbench_misconceptions', 'copa', 'siqa', 'commonsense_qa', 'piqa', 'openbook_qa', 'bigbench_novel_concepts', 'bigbe... | {
"commonsense reasoning": null,
"language understanding": null,
"reading comprehension": null,
"safety": null,
"symbolic problem solving": null,
"world knowledge": null
} | null | null | null | null | null | null | null | null | null | null |
/net/nfs.cirrascale/allennlp/davidw/proj/OLMoE/script/humaneval | 9a26c983-2b2b-46bc-bc2f-9e1d9c153710 | /net/nfs.cirrascale/allennlp/davidw/checkpoints/moe-release/jetmoe-8b/main | 2024_08_05-22_40_40 | {
"icl": {
"human_eval": 0.32926830649375916,
"human_eval_cpp": 0.2732919156551361,
"human_eval_js": 0,
"human_eval_return_simple": 0.7837837934494019,
"human_eval_return_complex": 0.4488188922405243,
"human_eval_25": 0.4390243887901306,
"human_eval_50": 0.5243902206420898,
"human_eval... | ['mmlu_zeroshot', 'hellaswag_zeroshot', 'jeopardy', 'triviaqa_sm_sub', 'gsm8k_cot', 'agi_eval_sat_math_cot', 'aqua_cot', 'svamp_cot', 'bigbench_qa_wikidata', 'arc_easy', 'arc_challenge', 'mmlu_fewshot', 'bigbench_misconceptions', 'copa', 'siqa', 'commonsense_qa', 'piqa', 'openbook_qa', 'bigbench_novel_concepts', 'bigbe... | {
"commonsense reasoning": null,
"language understanding": null,
"reading comprehension": null,
"safety": null,
"symbolic problem solving": null,
"world knowledge": null
} | null | null | null | null | null | null | null | null | null | null |
No dataset card yet