File size: 3,182 Bytes
e09b92c
 
 
 
4554dd8
e09b92c
 
42984c1
801939c
85e515d
 
e09b92c
 
 
 
 
85e515d
 
e09b92c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144149d
 
 
e09b92c
 
 
afeaad1
e09b92c
 
 
 
 
 
 
 
 
 
 
 
 
 
4886cf8
93fb52f
 
e09b92c
 
 
 
 
33cfc54
e09b92c
 
4554dd8
e09b92c
cdc9733
08c8850
33cfc54
4554dd8
e09b92c
4554dd8
e09b92c
4554dd8
e09b92c
d5cb63d
e09b92c
4554dd8
e09b92c
 
a3b5bac
9d92290
 
 
e09b92c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
{
    "name": "cpu_inference_transformers_text-classification_FacebookAI/roberta-base",
    "backend": {
        "name": "pytorch",
        "version": "2.5.1+cpu",
        "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
        "task": "text-classification",
        "library": "transformers",
        "model_type": "roberta",
        "model": "FacebookAI/roberta-base",
        "processor": "FacebookAI/roberta-base",
        "device": "cpu",
        "device_ids": null,
        "seed": 42,
        "inter_op_num_threads": null,
        "intra_op_num_threads": null,
        "model_kwargs": {},
        "processor_kwargs": {},
        "no_weights": true,
        "device_map": null,
        "torch_dtype": null,
        "eval_mode": true,
        "to_bettertransformer": false,
        "low_cpu_mem_usage": null,
        "attn_implementation": null,
        "cache_implementation": null,
        "autocast_enabled": false,
        "autocast_dtype": null,
        "torch_compile": false,
        "torch_compile_target": "forward",
        "torch_compile_config": {},
        "quantization_scheme": null,
        "quantization_config": {},
        "deepspeed_inference": false,
        "deepspeed_inference_config": {},
        "peft_type": null,
        "peft_config": {}
    },
    "scenario": {
        "name": "inference",
        "_target_": "optimum_benchmark.scenarios.inference.scenario.InferenceScenario",
        "iterations": 1,
        "duration": 1,
        "warmup_runs": 1,
        "input_shapes": {
            "batch_size": 2,
            "sequence_length": 16,
            "num_choices": 2
        },
        "new_tokens": null,
        "memory": true,
        "latency": true,
        "energy": true,
        "forward_kwargs": {},
        "generate_kwargs": {
            "max_new_tokens": 2,
            "min_new_tokens": 2
        },
        "call_kwargs": {
            "num_inference_steps": 2
        }
    },
    "launcher": {
        "name": "process",
        "_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
        "device_isolation": false,
        "device_isolation_action": null,
        "numactl": false,
        "numactl_kwargs": {},
        "start_method": "spawn"
    },
    "environment": {
        "cpu": " AMD EPYC 7763 64-Core Processor",
        "cpu_count": 4,
        "cpu_ram_mb": 16757.342208,
        "system": "Linux",
        "machine": "x86_64",
        "platform": "Linux-6.5.0-1025-azure-x86_64-with-glibc2.35",
        "processor": "x86_64",
        "python_version": "3.10.15",
        "optimum_benchmark_version": "0.5.0.dev0",
        "optimum_benchmark_commit": "9104793fa9ba932870d9ceb029cd628a1388b11e",
        "transformers_version": "4.46.3",
        "transformers_commit": null,
        "accelerate_version": "1.1.1",
        "accelerate_commit": null,
        "diffusers_version": "0.31.0",
        "diffusers_commit": null,
        "optimum_version": null,
        "optimum_commit": null,
        "timm_version": "1.0.11",
        "timm_commit": null,
        "peft_version": null,
        "peft_commit": null
    },
    "print_report": true,
    "log_report": true
}