Saauan commited on
Commit
7239e9d
·
verified ·
1 Parent(s): 4a678ab

Upload 5 reports on code generation and optimization

Browse files
data/report_Inria_inference_code generation_publicCloud_2025-10-30T14:03:54.887286-698cfce8-cc52-4f09-8ab1-9aee2dd785dc.json ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "header": {
3
+ "licensing": "Creative Commons 4.0",
4
+ "formatVersion": "0.1",
5
+ "reportId": "2025-10-30T14:03:54.887286-698cfce8-cc52-4f09-8ab1-9aee2dd785dc",
6
+ "reportStatus": "final",
7
+ "reportDatetime": "2025-10-30T14:03:54.887331",
8
+ "publisher": {
9
+ "name": "Inria",
10
+ "confidentialityLevel": "public"
11
+ }
12
+ },
13
+ "task": {
14
+ "taskStage": "inference",
15
+ "taskFamily": "code generation",
16
+ "nbRequest": 4720,
17
+ "algorithms": [
18
+ {
19
+ "algorithmType": "llm",
20
+ "foundationModelName": "Qwen/Qwen2.5-Coder-14B-Instruct",
21
+ "framework": "vllm",
22
+ "frameworkVersion": "0.7.3",
23
+ "parametersNumber": 7
24
+ }
25
+ ],
26
+ "dataset": [
27
+ {
28
+ "dataUsage": "input",
29
+ "dataType": "token",
30
+ "dataQuantity": 583440,
31
+ "source": "public",
32
+ "sourceUri": "https://huggingface.co/datasets/evalplus/evalperf",
33
+ "owner": "evalplus"
34
+ },
35
+ {
36
+ "dataUsage": "output",
37
+ "dataType": "token",
38
+ "dataQuantity": 692469.999999998,
39
+ "source": "public",
40
+ "sourceUri": "https://huggingface.co/datasets/evalplus/evalperf",
41
+ "owner": "evalplus"
42
+ }
43
+ ],
44
+ "taskDescription": "Code generation from EvalPerf prompts - Continuous batching"
45
+ },
46
+ "measures": [
47
+ {
48
+ "measurementMethod": "perf",
49
+ "version": "5.10.237",
50
+ "cpuTrackingMode": "rapl",
51
+ "powerCalibrationMeasurement": 0.0035959666666666667,
52
+ "durationCalibrationMeasurement": 210.1,
53
+ "powerConsumption": 0.005637386111111111,
54
+ "measurementDuration": 227.6,
55
+ "measurementDateTime": "2025-04-26T16:23:00"
56
+ },
57
+ {
58
+ "measurementMethod": "nvidia-smi",
59
+ "version": "v535.183.06",
60
+ "gpuTrackingMode": "nvml",
61
+ "powerCalibrationMeasurement": 0.015566341666666665,
62
+ "durationCalibrationMeasurement": 210.1,
63
+ "powerConsumption": 0.0481359,
64
+ "measurementDuration": 227.6,
65
+ "measurementDateTime": "2025-04-26T16:23:00"
66
+ }
67
+ ],
68
+ "system": {
69
+ "os": "Linux",
70
+ "distribution": "Debian",
71
+ "distributionVersion": "11"
72
+ },
73
+ "software": {
74
+ "language": "python",
75
+ "version": "3.12.5"
76
+ },
77
+ "infrastructure": {
78
+ "infraType": "publicCloud",
79
+ "cloudProvider": "grid5000",
80
+ "cloudInstance": "chuc",
81
+ "components": [
82
+ {
83
+ "componentName": "Nvidia A100-SXM4-40GB",
84
+ "componentType": "gpu",
85
+ "nbComponent": 4,
86
+ "memorySize": 40,
87
+ "manufacturer": "Nvidia",
88
+ "share": 1
89
+ },
90
+ {
91
+ "componentName": "AMD EPYC 7513 (Zen 3)",
92
+ "componentType": "cpu",
93
+ "nbComponent": 1,
94
+ "manufacturer": "AMD",
95
+ "share": 1
96
+ },
97
+ {
98
+ "componentName": "ram",
99
+ "componentType": "ram",
100
+ "nbComponent": 1,
101
+ "memorySize": 512
102
+ }
103
+ ]
104
+ },
105
+ "environment": {
106
+ "country": "france",
107
+ "location": "lille"
108
+ },
109
+ "quality": "high"
110
+ }
data/report_Inria_inference_code generation_publicCloud_2025-10-30T14:03:54.888017-e5347a1e-1541-4a22-90ae-881167d81dfa.json ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "header": {
3
+ "licensing": "Creative Commons 4.0",
4
+ "formatVersion": "0.1",
5
+ "reportId": "2025-10-30T14:03:54.888017-e5347a1e-1541-4a22-90ae-881167d81dfa",
6
+ "reportStatus": "final",
7
+ "reportDatetime": "2025-10-30T14:03:54.888042",
8
+ "publisher": {
9
+ "name": "Inria",
10
+ "confidentialityLevel": "public"
11
+ }
12
+ },
13
+ "task": {
14
+ "taskStage": "inference",
15
+ "taskFamily": "code generation",
16
+ "nbRequest": 4720,
17
+ "algorithms": [
18
+ {
19
+ "algorithmType": "llm",
20
+ "foundationModelName": "Qwen/Qwen2.5-Coder-14B-Instruct",
21
+ "framework": "vllm",
22
+ "frameworkVersion": "0.7.3",
23
+ "parametersNumber": 7
24
+ }
25
+ ],
26
+ "dataset": [
27
+ {
28
+ "dataUsage": "input",
29
+ "dataType": "token",
30
+ "dataQuantity": 2771185,
31
+ "source": "public",
32
+ "sourceUri": "https://huggingface.co/datasets/evalplus/evalperf",
33
+ "owner": "evalplus"
34
+ },
35
+ {
36
+ "dataUsage": "output",
37
+ "dataType": "token",
38
+ "dataQuantity": 578510.0,
39
+ "source": "public",
40
+ "sourceUri": "https://huggingface.co/datasets/evalplus/evalperf",
41
+ "owner": "evalplus"
42
+ }
43
+ ],
44
+ "taskDescription": "Code optimization - Continuous batching"
45
+ },
46
+ "measures": [
47
+ {
48
+ "measurementMethod": "perf",
49
+ "version": "5.10.237",
50
+ "cpuTrackingMode": "rapl",
51
+ "powerCalibrationMeasurement": 0.0010283083333333333,
52
+ "durationCalibrationMeasurement": 60.0,
53
+ "powerConsumption": 0.033736777777777775,
54
+ "measurementDuration": 1323.0,
55
+ "measurementDateTime": "2025-04-26T16:23:00"
56
+ },
57
+ {
58
+ "measurementMethod": "nvidia-smi",
59
+ "version": "v535.183.06",
60
+ "gpuTrackingMode": "nvml",
61
+ "powerCalibrationMeasurement": 0.004474944444444445,
62
+ "durationCalibrationMeasurement": 60.0,
63
+ "powerConsumption": 0.33071247777777774,
64
+ "measurementDuration": 1323.0,
65
+ "measurementDateTime": "2025-04-26T16:23:00"
66
+ }
67
+ ],
68
+ "system": {
69
+ "os": "Linux",
70
+ "distribution": "Debian",
71
+ "distributionVersion": "11"
72
+ },
73
+ "software": {
74
+ "language": "python",
75
+ "version": "3.12.5"
76
+ },
77
+ "infrastructure": {
78
+ "infraType": "publicCloud",
79
+ "cloudProvider": "grid5000",
80
+ "cloudInstance": "chuc",
81
+ "components": [
82
+ {
83
+ "componentName": "Nvidia A100-SXM4-40GB",
84
+ "componentType": "gpu",
85
+ "nbComponent": 4,
86
+ "memorySize": 40,
87
+ "manufacturer": "Nvidia",
88
+ "share": 1
89
+ },
90
+ {
91
+ "componentName": "AMD EPYC 7513 (Zen 3)",
92
+ "componentType": "cpu",
93
+ "nbComponent": 1,
94
+ "manufacturer": "AMD",
95
+ "share": 1
96
+ },
97
+ {
98
+ "componentName": "ram",
99
+ "componentType": "ram",
100
+ "nbComponent": 1,
101
+ "memorySize": 512
102
+ }
103
+ ]
104
+ },
105
+ "environment": {
106
+ "country": "france",
107
+ "location": "lille"
108
+ },
109
+ "quality": "high"
110
+ }
data/report_Inria_inference_code generation_publicCloud_2025-10-30T14:03:54.888403-46605bb3-cc7f-4df2-a4db-2bd2152a48d3.json ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "header": {
3
+ "licensing": "Creative Commons 4.0",
4
+ "formatVersion": "0.1",
5
+ "reportId": "2025-10-30T14:03:54.888403-46605bb3-cc7f-4df2-a4db-2bd2152a48d3",
6
+ "reportStatus": "final",
7
+ "reportDatetime": "2025-10-30T14:03:54.888420",
8
+ "publisher": {
9
+ "name": "Inria",
10
+ "confidentialityLevel": "public"
11
+ }
12
+ },
13
+ "task": {
14
+ "taskStage": "inference",
15
+ "taskFamily": "code generation",
16
+ "nbRequest": 4720,
17
+ "algorithms": [
18
+ {
19
+ "algorithmType": "llm",
20
+ "foundationModelName": "Qwen/Qwen2.5-Coder-14B-Instruct",
21
+ "framework": "vllm",
22
+ "frameworkVersion": "0.7.3",
23
+ "parametersNumber": 7
24
+ }
25
+ ],
26
+ "dataset": [
27
+ {
28
+ "dataUsage": "input",
29
+ "dataType": "token",
30
+ "dataQuantity": 2906519,
31
+ "source": "public",
32
+ "sourceUri": "https://huggingface.co/datasets/evalplus/evalperf",
33
+ "owner": "evalplus"
34
+ },
35
+ {
36
+ "dataUsage": "output",
37
+ "dataType": "token",
38
+ "dataQuantity": 615092.0,
39
+ "source": "public",
40
+ "sourceUri": "https://huggingface.co/datasets/evalplus/evalperf",
41
+ "owner": "evalplus"
42
+ }
43
+ ],
44
+ "taskDescription": "Code optimization - Continuous batching"
45
+ },
46
+ "measures": [
47
+ {
48
+ "measurementMethod": "perf",
49
+ "version": "5.10.237",
50
+ "cpuTrackingMode": "rapl",
51
+ "powerCalibrationMeasurement": 0.0010273083333333334,
52
+ "durationCalibrationMeasurement": 60.1,
53
+ "powerConsumption": 0.033168591666666664,
54
+ "measurementDuration": 1290.3,
55
+ "measurementDateTime": "2025-04-26T16:23:00"
56
+ },
57
+ {
58
+ "measurementMethod": "nvidia-smi",
59
+ "version": "v535.183.06",
60
+ "gpuTrackingMode": "nvml",
61
+ "powerCalibrationMeasurement": 0.0043726611111111115,
62
+ "durationCalibrationMeasurement": 60.1,
63
+ "powerConsumption": 0.3306251805555555,
64
+ "measurementDuration": 1290.3,
65
+ "measurementDateTime": "2025-04-26T16:23:00"
66
+ }
67
+ ],
68
+ "system": {
69
+ "os": "Linux",
70
+ "distribution": "Debian",
71
+ "distributionVersion": "11"
72
+ },
73
+ "software": {
74
+ "language": "python",
75
+ "version": "3.12.5"
76
+ },
77
+ "infrastructure": {
78
+ "infraType": "publicCloud",
79
+ "cloudProvider": "grid5000",
80
+ "cloudInstance": "chuc",
81
+ "components": [
82
+ {
83
+ "componentName": "Nvidia A100-SXM4-40GB",
84
+ "componentType": "gpu",
85
+ "nbComponent": 4,
86
+ "memorySize": 40,
87
+ "manufacturer": "Nvidia",
88
+ "share": 1
89
+ },
90
+ {
91
+ "componentName": "AMD EPYC 7513 (Zen 3)",
92
+ "componentType": "cpu",
93
+ "nbComponent": 1,
94
+ "manufacturer": "AMD",
95
+ "share": 1
96
+ },
97
+ {
98
+ "componentName": "ram",
99
+ "componentType": "ram",
100
+ "nbComponent": 1,
101
+ "memorySize": 512
102
+ }
103
+ ]
104
+ },
105
+ "environment": {
106
+ "country": "france",
107
+ "location": "lille"
108
+ },
109
+ "quality": "high"
110
+ }
data/report_Inria_inference_code generation_publicCloud_2025-10-30T14:03:54.888708-fb6dd1ba-b75e-4bb8-ac30-25b947422c47.json ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "header": {
3
+ "licensing": "Creative Commons 4.0",
4
+ "formatVersion": "0.1",
5
+ "reportId": "2025-10-30T14:03:54.888708-fb6dd1ba-b75e-4bb8-ac30-25b947422c47",
6
+ "reportStatus": "final",
7
+ "reportDatetime": "2025-10-30T14:03:54.888724",
8
+ "publisher": {
9
+ "name": "Inria",
10
+ "confidentialityLevel": "public"
11
+ }
12
+ },
13
+ "task": {
14
+ "taskStage": "inference",
15
+ "taskFamily": "code generation",
16
+ "nbRequest": 4720,
17
+ "algorithms": [
18
+ {
19
+ "algorithmType": "llm",
20
+ "foundationModelName": "Qwen/Qwen2.5-Coder-14B-Instruct",
21
+ "framework": "vllm",
22
+ "frameworkVersion": "0.7.3",
23
+ "parametersNumber": 7
24
+ }
25
+ ],
26
+ "dataset": [
27
+ {
28
+ "dataUsage": "input",
29
+ "dataType": "token",
30
+ "dataQuantity": 2979921,
31
+ "source": "public",
32
+ "sourceUri": "https://huggingface.co/datasets/evalplus/evalperf",
33
+ "owner": "evalplus"
34
+ },
35
+ {
36
+ "dataUsage": "output",
37
+ "dataType": "token",
38
+ "dataQuantity": 654137.0,
39
+ "source": "public",
40
+ "sourceUri": "https://huggingface.co/datasets/evalplus/evalperf",
41
+ "owner": "evalplus"
42
+ }
43
+ ],
44
+ "taskDescription": "Code optimization - Continuous batching"
45
+ },
46
+ "measures": [
47
+ {
48
+ "measurementMethod": "perf",
49
+ "version": "5.10.237",
50
+ "cpuTrackingMode": "rapl",
51
+ "powerCalibrationMeasurement": 0.0010351222222222223,
52
+ "durationCalibrationMeasurement": 60.1,
53
+ "powerConsumption": 0.040236850000000005,
54
+ "measurementDuration": 1560.9,
55
+ "measurementDateTime": "2025-04-26T16:23:00"
56
+ },
57
+ {
58
+ "measurementMethod": "nvidia-smi",
59
+ "version": "v535.183.06",
60
+ "gpuTrackingMode": "nvml",
61
+ "powerCalibrationMeasurement": 0.004401880555555556,
62
+ "durationCalibrationMeasurement": 60.1,
63
+ "powerConsumption": 0.3983822166666666,
64
+ "measurementDuration": 1560.9,
65
+ "measurementDateTime": "2025-04-26T16:23:00"
66
+ }
67
+ ],
68
+ "system": {
69
+ "os": "Linux",
70
+ "distribution": "Debian",
71
+ "distributionVersion": "11"
72
+ },
73
+ "software": {
74
+ "language": "python",
75
+ "version": "3.12.5"
76
+ },
77
+ "infrastructure": {
78
+ "infraType": "publicCloud",
79
+ "cloudProvider": "grid5000",
80
+ "cloudInstance": "chuc",
81
+ "components": [
82
+ {
83
+ "componentName": "Nvidia A100-SXM4-40GB",
84
+ "componentType": "gpu",
85
+ "nbComponent": 4,
86
+ "memorySize": 40,
87
+ "manufacturer": "Nvidia",
88
+ "share": 1
89
+ },
90
+ {
91
+ "componentName": "AMD EPYC 7513 (Zen 3)",
92
+ "componentType": "cpu",
93
+ "nbComponent": 1,
94
+ "manufacturer": "AMD",
95
+ "share": 1
96
+ },
97
+ {
98
+ "componentName": "ram",
99
+ "componentType": "ram",
100
+ "nbComponent": 1,
101
+ "memorySize": 512
102
+ }
103
+ ]
104
+ },
105
+ "environment": {
106
+ "country": "france",
107
+ "location": "lille"
108
+ },
109
+ "quality": "high"
110
+ }
data/report_Inria_inference_code generation_publicCloud_2025-10-30T14:03:54.888999-e877f810-734d-4a74-bdb1-41c7a9e70de2.json ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "header": {
3
+ "licensing": "Creative Commons 4.0",
4
+ "formatVersion": "0.1",
5
+ "reportId": "2025-10-30T14:03:54.888999-e877f810-734d-4a74-bdb1-41c7a9e70de2",
6
+ "reportStatus": "final",
7
+ "reportDatetime": "2025-10-30T14:03:54.889014",
8
+ "publisher": {
9
+ "name": "Inria",
10
+ "confidentialityLevel": "public"
11
+ }
12
+ },
13
+ "task": {
14
+ "taskStage": "inference",
15
+ "taskFamily": "code generation",
16
+ "nbRequest": 4720,
17
+ "algorithms": [
18
+ {
19
+ "algorithmType": "llm",
20
+ "foundationModelName": "Qwen/Qwen2.5-Coder-14B-Instruct",
21
+ "framework": "vllm",
22
+ "frameworkVersion": "0.7.3",
23
+ "parametersNumber": 7
24
+ }
25
+ ],
26
+ "dataset": [
27
+ {
28
+ "dataUsage": "input",
29
+ "dataType": "token",
30
+ "dataQuantity": 3177286,
31
+ "source": "public",
32
+ "sourceUri": "https://huggingface.co/datasets/evalplus/evalperf",
33
+ "owner": "evalplus"
34
+ },
35
+ {
36
+ "dataUsage": "output",
37
+ "dataType": "token",
38
+ "dataQuantity": 699898.0,
39
+ "source": "public",
40
+ "sourceUri": "https://huggingface.co/datasets/evalplus/evalperf",
41
+ "owner": "evalplus"
42
+ }
43
+ ],
44
+ "taskDescription": "Code optimization - Continuous batching"
45
+ },
46
+ "measures": [
47
+ {
48
+ "measurementMethod": "perf",
49
+ "version": "5.10.237",
50
+ "cpuTrackingMode": "rapl",
51
+ "powerCalibrationMeasurement": 0.0010327833333333334,
52
+ "durationCalibrationMeasurement": 60.1,
53
+ "powerConsumption": 0.045946347222222225,
54
+ "measurementDuration": 1778.8,
55
+ "measurementDateTime": "2025-04-26T16:23:00"
56
+ },
57
+ {
58
+ "measurementMethod": "nvidia-smi",
59
+ "version": "v535.183.06",
60
+ "gpuTrackingMode": "nvml",
61
+ "powerCalibrationMeasurement": 0.004410644444444444,
62
+ "durationCalibrationMeasurement": 60.1,
63
+ "powerConsumption": 0.4179077888888889,
64
+ "measurementDuration": 1778.8,
65
+ "measurementDateTime": "2025-04-26T16:23:00"
66
+ }
67
+ ],
68
+ "system": {
69
+ "os": "Linux",
70
+ "distribution": "Debian",
71
+ "distributionVersion": "11"
72
+ },
73
+ "software": {
74
+ "language": "python",
75
+ "version": "3.12.5"
76
+ },
77
+ "infrastructure": {
78
+ "infraType": "publicCloud",
79
+ "cloudProvider": "grid5000",
80
+ "cloudInstance": "chuc",
81
+ "components": [
82
+ {
83
+ "componentName": "Nvidia A100-SXM4-40GB",
84
+ "componentType": "gpu",
85
+ "nbComponent": 4,
86
+ "memorySize": 40,
87
+ "manufacturer": "Nvidia",
88
+ "share": 1
89
+ },
90
+ {
91
+ "componentName": "AMD EPYC 7513 (Zen 3)",
92
+ "componentType": "cpu",
93
+ "nbComponent": 1,
94
+ "manufacturer": "AMD",
95
+ "share": 1
96
+ },
97
+ {
98
+ "componentName": "ram",
99
+ "componentType": "ram",
100
+ "nbComponent": 1,
101
+ "memorySize": 512
102
+ }
103
+ ]
104
+ },
105
+ "environment": {
106
+ "country": "france",
107
+ "location": "lille"
108
+ },
109
+ "quality": "high"
110
+ }