File size: 3,208 Bytes
903743d
 
 
 
04b12b1
903743d
 
4b7d104
13d6399
13cf760
 
903743d
 
 
 
 
13cf760
 
903743d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
464b824
903743d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
01f352d
 
903743d
 
 
 
 
54e2d39
903743d
 
0160c45
903743d
 
9150557
54e2d39
9c581d3
903743d
0160c45
903743d
04b12b1
903743d
 
 
04b12b1
903743d
 
54e2d39
 
 
903743d
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
{
    "name": "cpu_inference_transformers_image-classification_google/vit-base-patch16-224",
    "backend": {
        "name": "pytorch",
        "version": "2.3.1+cpu",
        "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
        "task": "image-classification",
        "library": "transformers",
        "model_type": "vit",
        "model": "google/vit-base-patch16-224",
        "processor": "google/vit-base-patch16-224",
        "device": "cpu",
        "device_ids": null,
        "seed": 42,
        "inter_op_num_threads": null,
        "intra_op_num_threads": null,
        "model_kwargs": {},
        "processor_kwargs": {},
        "no_weights": true,
        "device_map": null,
        "torch_dtype": null,
        "eval_mode": true,
        "to_bettertransformer": false,
        "low_cpu_mem_usage": null,
        "attn_implementation": null,
        "cache_implementation": null,
        "autocast_enabled": false,
        "autocast_dtype": null,
        "torch_compile": false,
        "torch_compile_target": "forward",
        "torch_compile_config": {},
        "quantization_scheme": null,
        "quantization_config": {},
        "deepspeed_inference": false,
        "deepspeed_inference_config": {},
        "peft_type": null,
        "peft_config": {}
    },
    "scenario": {
        "name": "inference",
        "_target_": "optimum_benchmark.scenarios.inference.scenario.InferenceScenario",
        "iterations": 1,
        "duration": 1,
        "warmup_runs": 1,
        "input_shapes": {
            "batch_size": 1,
            "num_choices": 2,
            "sequence_length": 2
        },
        "new_tokens": null,
        "memory": true,
        "latency": true,
        "energy": true,
        "forward_kwargs": {},
        "generate_kwargs": {
            "max_new_tokens": 2,
            "min_new_tokens": 2
        },
        "call_kwargs": {
            "num_inference_steps": 2
        }
    },
    "launcher": {
        "name": "process",
        "_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
        "device_isolation": false,
        "device_isolation_action": "error",
        "numactl": false,
        "numactl_kwargs": {},
        "start_method": "spawn"
    },
    "environment": {
        "cpu": " AMD EPYC 7763 64-Core Processor",
        "cpu_count": 4,
        "cpu_ram_mb": 16757.342208,
        "system": "Linux",
        "machine": "x86_64",
        "platform": "Linux-6.5.0-1023-azure-x86_64-with-glibc2.35",
        "processor": "x86_64",
        "python_version": "3.10.14",
        "optimum_benchmark_version": "0.3.1",
        "optimum_benchmark_commit": "3b109cf557e2496fe3c954817385781e6dc5cad1",
        "transformers_version": "4.42.4",
        "transformers_commit": null,
        "accelerate_version": "0.32.1",
        "accelerate_commit": null,
        "diffusers_version": "0.29.2",
        "diffusers_commit": null,
        "optimum_version": null,
        "optimum_commit": null,
        "timm_version": "1.0.7",
        "timm_commit": null,
        "peft_version": null,
        "peft_commit": null,
        "llama_cpp_version": null,
        "llama_cpp_commit": null
    }
}