File size: 3,112 Bytes
35a424a
 
 
 
f6805bd
35a424a
 
171f2d2
0da2aed
fdaa873
 
35a424a
 
 
 
 
fdaa873
 
35a424a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
be9bc61
35a424a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7439eb9
 
35a424a
 
 
 
 
3c2acc9
35a424a
 
5feaf04
35a424a
b4f915e
f207a99
3c2acc9
89518bf
35a424a
f6805bd
35a424a
89518bf
35a424a
 
 
89518bf
35a424a
 
aa86a6a
35a424a
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
{
    "name": "cpu_inference_transformers_text-generation_openai-community/gpt2",
    "backend": {
        "name": "pytorch",
        "version": "2.4.0+cpu",
        "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
        "task": "text-generation",
        "library": "transformers",
        "model_type": "gpt2",
        "model": "openai-community/gpt2",
        "processor": "openai-community/gpt2",
        "device": "cpu",
        "device_ids": null,
        "seed": 42,
        "inter_op_num_threads": null,
        "intra_op_num_threads": null,
        "model_kwargs": {},
        "processor_kwargs": {},
        "no_weights": true,
        "device_map": null,
        "torch_dtype": null,
        "eval_mode": true,
        "to_bettertransformer": false,
        "low_cpu_mem_usage": null,
        "attn_implementation": null,
        "cache_implementation": null,
        "autocast_enabled": false,
        "autocast_dtype": null,
        "torch_compile": false,
        "torch_compile_target": "forward",
        "torch_compile_config": {},
        "quantization_scheme": null,
        "quantization_config": {},
        "deepspeed_inference": false,
        "deepspeed_inference_config": {},
        "peft_type": null,
        "peft_config": {}
    },
    "scenario": {
        "name": "inference",
        "_target_": "optimum_benchmark.scenarios.inference.scenario.InferenceScenario",
        "iterations": 1,
        "duration": 1,
        "warmup_runs": 1,
        "input_shapes": {
            "batch_size": 1,
            "num_choices": 2,
            "sequence_length": 2
        },
        "new_tokens": null,
        "memory": true,
        "latency": true,
        "energy": true,
        "forward_kwargs": {},
        "generate_kwargs": {
            "max_new_tokens": 2,
            "min_new_tokens": 2
        },
        "call_kwargs": {
            "num_inference_steps": 2
        }
    },
    "launcher": {
        "name": "process",
        "_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
        "device_isolation": false,
        "device_isolation_action": "error",
        "numactl": false,
        "numactl_kwargs": {},
        "start_method": "spawn"
    },
    "environment": {
        "cpu": " AMD EPYC 7763 64-Core Processor",
        "cpu_count": 4,
        "cpu_ram_mb": 16757.338112,
        "system": "Linux",
        "machine": "x86_64",
        "platform": "Linux-6.5.0-1025-azure-x86_64-with-glibc2.35",
        "processor": "x86_64",
        "python_version": "3.10.14",
        "optimum_benchmark_version": "0.4.0",
        "optimum_benchmark_commit": "bbd38855156b81c8eada2deda937e3adeb3b496b",
        "transformers_version": "4.44.2",
        "transformers_commit": null,
        "accelerate_version": "0.33.0",
        "accelerate_commit": null,
        "diffusers_version": "0.30.1",
        "diffusers_commit": null,
        "optimum_version": null,
        "optimum_commit": null,
        "timm_version": "1.0.9",
        "timm_commit": null,
        "peft_version": null,
        "peft_commit": null
    }
}