File size: 4,986 Bytes
81d74b0
 
 
 
 
8816e43
81d74b0
 
19cbfee
7d2a5a1
48a870b
 
81d74b0
 
 
 
 
48a870b
 
 
81d74b0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83108dc
 
81d74b0
 
 
 
 
d31dd12
81d74b0
 
a8b429f
81d74b0
 
324f955
321629f
3f1ec13
81d74b0
a8b429f
81d74b0
8816e43
81d74b0
 
 
8816e43
81d74b0
 
 
 
 
 
 
 
 
321629f
81d74b0
 
 
 
 
 
 
96a09c4
321629f
 
 
 
 
 
 
81d74b0
321629f
 
 
 
81d74b0
 
 
 
321629f
81d74b0
 
 
321629f
 
81d74b0
321629f
81d74b0
 
 
321629f
81d74b0
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
{
    "config": {
        "name": "cpu_inference_transformers_image-classification_google/vit-base-patch16-224",
        "backend": {
            "name": "pytorch",
            "version": "2.3.1+cpu",
            "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
            "task": "image-classification",
            "library": "transformers",
            "model_type": "vit",
            "model": "google/vit-base-patch16-224",
            "processor": "google/vit-base-patch16-224",
            "device": "cpu",
            "device_ids": null,
            "seed": 42,
            "inter_op_num_threads": null,
            "intra_op_num_threads": null,
            "model_kwargs": {},
            "processor_kwargs": {},
            "hub_kwargs": {},
            "no_weights": true,
            "device_map": null,
            "torch_dtype": null,
            "eval_mode": true,
            "to_bettertransformer": false,
            "low_cpu_mem_usage": null,
            "attn_implementation": null,
            "cache_implementation": null,
            "autocast_enabled": false,
            "autocast_dtype": null,
            "torch_compile": false,
            "torch_compile_target": "forward",
            "torch_compile_config": {},
            "quantization_scheme": null,
            "quantization_config": {},
            "deepspeed_inference": false,
            "deepspeed_inference_config": {},
            "peft_type": null,
            "peft_config": {}
        },
        "scenario": {
            "name": "inference",
            "_target_": "optimum_benchmark.scenarios.inference.scenario.InferenceScenario",
            "iterations": 1,
            "duration": 1,
            "warmup_runs": 1,
            "input_shapes": {
                "batch_size": 1,
                "num_choices": 2,
                "sequence_length": 2
            },
            "new_tokens": null,
            "latency": true,
            "memory": true,
            "energy": true,
            "forward_kwargs": {},
            "generate_kwargs": {
                "max_new_tokens": 2,
                "min_new_tokens": 2
            },
            "call_kwargs": {
                "num_inference_steps": 2
            }
        },
        "launcher": {
            "name": "process",
            "_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
            "device_isolation": false,
            "device_isolation_action": "error",
            "numactl": false,
            "numactl_kwargs": {},
            "start_method": "spawn"
        },
        "environment": {
            "cpu": " AMD EPYC 7763 64-Core Processor",
            "cpu_count": 4,
            "cpu_ram_mb": 16757.342208,
            "system": "Linux",
            "machine": "x86_64",
            "platform": "Linux-6.5.0-1023-azure-x86_64-with-glibc2.35",
            "processor": "x86_64",
            "python_version": "3.10.14",
            "optimum_benchmark_version": "0.3.1",
            "optimum_benchmark_commit": "2a33a472f309c43b5bd16946ef9cec843d02f70a",
            "transformers_version": "4.42.4",
            "transformers_commit": null,
            "accelerate_version": "0.32.1",
            "accelerate_commit": null,
            "diffusers_version": "0.29.2",
            "diffusers_commit": null,
            "optimum_version": null,
            "optimum_commit": null,
            "timm_version": "1.0.7",
            "timm_commit": null,
            "peft_version": null,
            "peft_commit": null
        }
    },
    "report": {
        "forward": {
            "memory": {
                "unit": "MB",
                "max_ram": 815.382528,
                "max_global_vram": null,
                "max_process_vram": null,
                "max_reserved": null,
                "max_allocated": null
            },
            "latency": {
                "unit": "s",
                "count": 4,
                "total": 1.0272348810000267,
                "mean": 0.2568087202500067,
                "stdev": 0.007817052402553875,
                "p50": 0.260123090999997,
                "p90": 0.262673669100019,
                "p95": 0.26307925005002347,
                "p99": 0.26340371481002706,
                "values": [
                    0.25946522399999594,
                    0.260780957999998,
                    0.26348483100002795,
                    0.2435038680000048
                ]
            },
            "throughput": {
                "unit": "samples/s",
                "value": 3.8939487686652026
            },
            "energy": {
                "unit": "kWh",
                "cpu": 9.147991604275175e-06,
                "ram": 3.8232736514987664e-07,
                "gpu": 0.0,
                "total": 9.530318969425051e-06
            },
            "efficiency": {
                "unit": "samples/kWh",
                "value": 104928.28238049292
            }
        }
    }
}