Dataset Preview
The full dataset viewer is not available (click to read why). Only showing a preview of the rows.
The dataset generation failed
Error code: DatasetGenerationError
Exception: ArrowNotImplementedError
Message: Cannot write struct type 'model_kwargs' with no child field to Parquet. Consider adding a dummy child field.
Traceback: Traceback (most recent call last):
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1870, in _prepare_split_single
writer.write_table(table)
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/arrow_writer.py", line 620, in write_table
self._build_writer(inferred_schema=pa_table.schema)
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/arrow_writer.py", line 441, in _build_writer
self.pa_writer = self._WRITER_CLASS(self.stream, schema)
File "/src/services/worker/.venv/lib/python3.9/site-packages/pyarrow/parquet/core.py", line 1010, in __init__
self.writer = _parquet.ParquetWriter(
File "pyarrow/_parquet.pyx", line 2157, in pyarrow._parquet.ParquetWriter.__cinit__
File "pyarrow/error.pxi", line 154, in pyarrow.lib.pyarrow_internal_check_status
File "pyarrow/error.pxi", line 91, in pyarrow.lib.check_status
pyarrow.lib.ArrowNotImplementedError: Cannot write struct type 'model_kwargs' with no child field to Parquet. Consider adding a dummy child field.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1886, in _prepare_split_single
num_examples, num_bytes = writer.finalize()
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/arrow_writer.py", line 639, in finalize
self._build_writer(self.schema)
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/arrow_writer.py", line 441, in _build_writer
self.pa_writer = self._WRITER_CLASS(self.stream, schema)
File "/src/services/worker/.venv/lib/python3.9/site-packages/pyarrow/parquet/core.py", line 1010, in __init__
self.writer = _parquet.ParquetWriter(
File "pyarrow/_parquet.pyx", line 2157, in pyarrow._parquet.ParquetWriter.__cinit__
File "pyarrow/error.pxi", line 154, in pyarrow.lib.pyarrow_internal_check_status
File "pyarrow/error.pxi", line 91, in pyarrow.lib.check_status
pyarrow.lib.ArrowNotImplementedError: Cannot write struct type 'model_kwargs' with no child field to Parquet. Consider adding a dummy child field.
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 1417, in compute_config_parquet_and_info_response
parquet_operations = convert_to_parquet(builder)
File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 1049, in convert_to_parquet
builder.download_and_prepare(
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 924, in download_and_prepare
self._download_and_prepare(
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1000, in _download_and_prepare
self._prepare_split(split_generator, **prepare_split_kwargs)
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1741, in _prepare_split
for job_id, done, content in self._prepare_split_single(
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1897, in _prepare_split_single
raise DatasetGenerationError("An error occurred while generating the dataset") from e
datasets.exceptions.DatasetGenerationError: An error occurred while generating the datasetNeed help to make the dataset viewer work? Make sure to review how to configure the dataset viewer, and open a discussion for direct support.
name string | backend dict | scenario dict | launcher dict | environment dict | print_report bool | log_report bool | load_model dict | prefill dict | decode dict | per_token dict |
|---|---|---|---|---|---|---|---|---|---|---|
2024-12-12-16:40:44/openvino | {
"name": "openvino",
"version": "2024.5.0",
"_target_": "optimum_benchmark.backends.openvino.backend.OVBackend",
"task": "text-generation",
"library": "transformers",
"model_type": "gpt2",
"model": "openai-community/gpt2",
"processor": "openai-community/gpt2",
"device": "cpu",
"device_ids": null,
... | {
"name": "inference",
"_target_": "optimum_benchmark.scenarios.inference.scenario.InferenceScenario",
"iterations": 10,
"duration": 10,
"warmup_runs": 10,
"input_shapes": {
"batch_size": 2,
"sequence_length": 16
},
"new_tokens": null,
"memory": true,
"latency": true,
"energy": false,
"f... | {
"name": "process",
"_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
"device_isolation": false,
"device_isolation_action": null,
"numactl": true,
"numactl_kwargs": {
"cpunodebind": 0,
"membind": 0
},
"start_method": "spawn"
} | {
"cpu": " AMD EPYC 7R13 Processor",
"cpu_count": 64,
"cpu_ram_mb": 529717.02272,
"system": "Linux",
"machine": "x86_64",
"platform": "Linux-5.10.205-195.807.amzn2.x86_64-x86_64-with-glibc2.36",
"processor": "",
"python_version": "3.10.16",
"optimum_benchmark_version": "0.5.0.dev0",
"optimum_benchma... | false | true | null | null | null | null |
null | null | null | null | null | null | null | {
"memory": {
"unit": "MB",
"max_ram": 2397.077504,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"values": [
15.132873546332121
],
"count": 1,
"total": 15.132873546332121,
"mean": 1... | {
"memory": {
"unit": "MB",
"max_ram": 2679.709696,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"values": [
0.14973318204283714,
0.18666228279471397,
0.027380209416151047,
0.0281... | {
"memory": {
"unit": "MB",
"max_ram": 2680.049664,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"values": [
1.4166215844452381,
1.5008420012891293,
1.3766007609665394,
1.27054375... | {
"memory": null,
"latency": {
"unit": "s",
"values": [
0.016260690987110138,
0.0998876541852951,
0.016087815165519714,
0.019835863262414932,
0.08787507191300392,
0.014099866151809692,
0.08814588561654091,
0.057777125388383865,
0.10743596032261848,
... |
2024-12-12-16:40:44/pytorch | {
"name": "pytorch",
"version": "2.5.1",
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
"task": "text-generation",
"library": "transformers",
"model_type": "gpt2",
"model": "openai-community/gpt2",
"processor": "openai-community/gpt2",
"device": "cpu",
"device_ids": null,
... | {
"name": "inference",
"_target_": "optimum_benchmark.scenarios.inference.scenario.InferenceScenario",
"iterations": 10,
"duration": 10,
"warmup_runs": 10,
"input_shapes": {
"batch_size": 2,
"sequence_length": 16
},
"new_tokens": null,
"memory": true,
"latency": true,
"energy": false,
"f... | {
"name": "process",
"_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
"device_isolation": false,
"device_isolation_action": null,
"numactl": true,
"numactl_kwargs": {
"cpunodebind": 0,
"membind": 0
},
"start_method": "spawn"
} | {
"cpu": " AMD EPYC 7R13 Processor",
"cpu_count": 64,
"cpu_ram_mb": 529717.02272,
"system": "Linux",
"machine": "x86_64",
"platform": "Linux-5.10.205-195.807.amzn2.x86_64-x86_64-with-glibc2.36",
"processor": "",
"python_version": "3.10.16",
"optimum_benchmark_version": "0.5.0.dev0",
"optimum_benchma... | false | true | null | null | null | null |
null | null | null | null | null | null | null | {
"memory": {
"unit": "MB",
"max_ram": 1917.493248,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"values": [
1.2822308279573917
],
"count": 1,
"total": 1.2822308279573917,
"mean": 1... | {
"memory": {
"unit": "MB",
"max_ram": 1572.794368,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"values": [
0.17518383637070656,
0.1307593509554863,
0.11524972692131996,
0.173815... | {
"memory": {
"unit": "MB",
"max_ram": 1572.794368,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"values": [
2.0932118706405163,
2.1575567424297333,
2.111031912267208,
2.122941717... | {
"memory": null,
"latency": {
"unit": "s",
"values": [
0.07864363119006157,
0.03083161637187004,
0.08973810821771622,
0.07993792369961739,
0.03112538903951645,
0.08748161420226097,
0.08177611604332924,
0.031392719596624374,
0.07924764230847359,
0.... |
2024-12-12-16:43:18/openvino | {
"name": "openvino",
"version": "2024.5.0",
"_target_": "optimum_benchmark.backends.openvino.backend.OVBackend",
"task": "text-generation",
"library": "transformers",
"model_type": "gpt2",
"model": "openai-community/gpt2",
"processor": "openai-community/gpt2",
"device": "cpu",
"device_ids": null,
... | {
"name": "inference",
"_target_": "optimum_benchmark.scenarios.inference.scenario.InferenceScenario",
"iterations": 10,
"duration": 10,
"warmup_runs": 10,
"input_shapes": {
"batch_size": 2,
"sequence_length": 16
},
"new_tokens": null,
"memory": true,
"latency": true,
"energy": false,
"f... | {
"name": "process",
"_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
"device_isolation": false,
"device_isolation_action": null,
"numactl": true,
"numactl_kwargs": {
"cpunodebind": 0,
"membind": 0
},
"start_method": "spawn"
} | {
"cpu": " AMD EPYC 7R13 Processor",
"cpu_count": 64,
"cpu_ram_mb": 529717.02272,
"system": "Linux",
"machine": "x86_64",
"platform": "Linux-5.10.205-195.807.amzn2.x86_64-x86_64-with-glibc2.36",
"processor": "",
"python_version": "3.10.16",
"optimum_benchmark_version": "0.5.0.dev0",
"optimum_benchma... | false | true | null | null | null | null |
null | null | null | null | null | null | null | {
"memory": {
"unit": "MB",
"max_ram": 2096.529408,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"values": [
16.868647765368223
],
"count": 1,
"total": 16.868647765368223,
"mean": 1... | {
"memory": {
"unit": "MB",
"max_ram": 2336.976896,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"values": [
0.09469575062394142,
0.095076534897089,
0.2746337614953518,
0.01849853... | {
"memory": {
"unit": "MB",
"max_ram": 2336.976896,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"values": [
0.7958299443125725,
0.7852410674095154,
0.9105948507785797,
0.98392741... | {
"memory": null,
"latency": {
"unit": "s",
"values": [
0.052110690623521805,
0.011760182678699493,
0.00886562466621399,
0.08736258000135422,
0.007470604032278061,
0.006973788142204285,
0.013317998498678207,
0.06832902878522873,
0.008988596498966217,
... |
2024-12-12-16:43:18/pytorch | {
"name": "pytorch",
"version": "2.5.1",
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
"task": "text-generation",
"library": "transformers",
"model_type": "gpt2",
"model": "openai-community/gpt2",
"processor": "openai-community/gpt2",
"device": "cpu",
"device_ids": null,
... | {
"name": "inference",
"_target_": "optimum_benchmark.scenarios.inference.scenario.InferenceScenario",
"iterations": 10,
"duration": 10,
"warmup_runs": 10,
"input_shapes": {
"batch_size": 2,
"sequence_length": 16
},
"new_tokens": null,
"memory": true,
"latency": true,
"energy": false,
"f... | {
"name": "process",
"_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
"device_isolation": false,
"device_isolation_action": null,
"numactl": true,
"numactl_kwargs": {
"cpunodebind": 0,
"membind": 0
},
"start_method": "spawn"
} | {
"cpu": " AMD EPYC 7R13 Processor",
"cpu_count": 64,
"cpu_ram_mb": 529717.02272,
"system": "Linux",
"machine": "x86_64",
"platform": "Linux-5.10.205-195.807.amzn2.x86_64-x86_64-with-glibc2.36",
"processor": "",
"python_version": "3.10.16",
"optimum_benchmark_version": "0.5.0.dev0",
"optimum_benchma... | false | true | null | null | null | null |
null | null | null | null | null | null | null | {
"memory": {
"unit": "MB",
"max_ram": 1768.452096,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"values": [
0.16886640712618828
],
"count": 1,
"total": 0.16886640712618828,
"mean":... | {
"memory": {
"unit": "MB",
"max_ram": 1475.366912,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"values": [
0.11910442635416985,
0.17319708317518234,
0.16478652879595757,
0.11591... | {
"memory": {
"unit": "MB",
"max_ram": 1475.366912,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"values": [
2.093795470893383,
1.9385161995887756,
1.9993691183626652,
1.998684812... | {
"memory": null,
"latency": {
"unit": "s",
"values": [
0.08753624185919762,
0.07851441949605942,
0.030955448746681213,
0.08736911788582802,
0.07869019731879234,
0.031381867825984955,
0.0881793163716793,
0.07993457466363907,
0.030889153480529785,
0... |
2024-12-12-17:51:29/openvino | {
"name": "openvino",
"version": "2024.5.0",
"_target_": "optimum_benchmark.backends.openvino.backend.OVBackend",
"task": "text-generation",
"library": "transformers",
"model_type": "gpt2",
"model": "openai-community/gpt2",
"processor": "openai-community/gpt2",
"device": "cpu",
"device_ids": null,
... | {
"name": "inference",
"_target_": "optimum_benchmark.scenarios.inference.scenario.InferenceScenario",
"iterations": 10,
"duration": 10,
"warmup_runs": 10,
"input_shapes": {
"batch_size": 2,
"sequence_length": 16
},
"new_tokens": null,
"memory": true,
"latency": true,
"energy": false,
"f... | {
"name": "process",
"_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
"device_isolation": false,
"device_isolation_action": null,
"numactl": true,
"numactl_kwargs": {
"cpunodebind": 0,
"membind": 0
},
"start_method": "spawn"
} | {
"cpu": " AMD EPYC 7R13 Processor",
"cpu_count": 64,
"cpu_ram_mb": 529717.02272,
"system": "Linux",
"machine": "x86_64",
"platform": "Linux-5.10.205-195.807.amzn2.x86_64-x86_64-with-glibc2.36",
"processor": "",
"python_version": "3.10.16",
"optimum_benchmark_version": "0.5.0.dev0",
"optimum_benchma... | false | true | null | null | null | null |
null | null | null | null | null | null | null | {"memory":{"unit":"MB","max_ram":2390.089728,"max_global_vram":null,"max_process_vram":null,"max_res(...TRUNCATED) | {"memory":{"unit":"MB","max_ram":2688.08192,"max_global_vram":null,"max_process_vram":null,"max_rese(...TRUNCATED) | {"memory":{"unit":"MB","max_ram":2688.08192,"max_global_vram":null,"max_process_vram":null,"max_rese(...TRUNCATED) | {"memory":null,"latency":{"unit":"s","values":[0.08446569368243217,0.016460534185171127,0.1711131371(...TRUNCATED) |
End of preview.
No dataset card yet
- Downloads last month
- 12