File size: 3,173 Bytes
fd1a946
 
 
 
0c25878
fd1a946
 
36613e2
56d8b0a
4e56ee4
 
fd1a946
 
 
 
 
4e56ee4
 
fd1a946
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88b11a9
fd1a946
 
 
 
 
 
 
 
 
 
 
 
 
 
41a925c
ed7b003
 
fd1a946
 
 
 
 
6b5a8b6
fd1a946
 
6b5a8b6
fd1a946
765b83b
2be3c6c
 
80d9431
fd1a946
80d9431
fd1a946
7add3a2
fd1a946
dc8a6a7
fd1a946
0ad738a
fd1a946
 
531bec3
e17b592
 
 
fd1a946
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
{
    "name": "cpu_inference_transformers_fill-mask_google-bert/bert-base-uncased",
    "backend": {
        "name": "pytorch",
        "version": "2.4.1+cpu",
        "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
        "task": "fill-mask",
        "library": "transformers",
        "model_type": "bert",
        "model": "google-bert/bert-base-uncased",
        "processor": "google-bert/bert-base-uncased",
        "device": "cpu",
        "device_ids": null,
        "seed": 42,
        "inter_op_num_threads": null,
        "intra_op_num_threads": null,
        "model_kwargs": {},
        "processor_kwargs": {},
        "no_weights": true,
        "device_map": null,
        "torch_dtype": null,
        "eval_mode": true,
        "to_bettertransformer": false,
        "low_cpu_mem_usage": null,
        "attn_implementation": null,
        "cache_implementation": null,
        "autocast_enabled": false,
        "autocast_dtype": null,
        "torch_compile": false,
        "torch_compile_target": "forward",
        "torch_compile_config": {},
        "quantization_scheme": null,
        "quantization_config": {},
        "deepspeed_inference": false,
        "deepspeed_inference_config": {},
        "peft_type": null,
        "peft_config": {}
    },
    "scenario": {
        "name": "inference",
        "_target_": "optimum_benchmark.scenarios.inference.scenario.InferenceScenario",
        "iterations": 1,
        "duration": 1,
        "warmup_runs": 1,
        "input_shapes": {
            "batch_size": 1,
            "num_choices": 2,
            "sequence_length": 2
        },
        "new_tokens": null,
        "memory": true,
        "latency": true,
        "energy": true,
        "forward_kwargs": {},
        "generate_kwargs": {
            "max_new_tokens": 2,
            "min_new_tokens": 2
        },
        "call_kwargs": {
            "num_inference_steps": 2
        }
    },
    "launcher": {
        "name": "process",
        "_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
        "device_isolation": false,
        "device_isolation_action": null,
        "numactl": false,
        "numactl_kwargs": {},
        "start_method": "spawn"
    },
    "environment": {
        "cpu": " AMD EPYC 7763 64-Core Processor",
        "cpu_count": 4,
        "cpu_ram_mb": 16766.7712,
        "system": "Linux",
        "machine": "x86_64",
        "platform": "Linux-6.8.0-1014-azure-x86_64-with-glibc2.35",
        "processor": "x86_64",
        "python_version": "3.10.15",
        "optimum_benchmark_version": "0.5.0.dev0",
        "optimum_benchmark_commit": "0a6df5b9a995b4bb9aebaf68a47898df52335978",
        "transformers_version": "4.45.2",
        "transformers_commit": null,
        "accelerate_version": "1.0.0",
        "accelerate_commit": null,
        "diffusers_version": "0.30.3",
        "diffusers_commit": null,
        "optimum_version": null,
        "optimum_commit": null,
        "timm_version": "1.0.9",
        "timm_commit": null,
        "peft_version": null,
        "peft_commit": null
    },
    "print_report": true,
    "log_report": true
}