File size: 3,140 Bytes
8c29929
 
 
 
d9cf307
8c29929
 
005ca50
54462ee
4ff790f
 
8c29929
 
 
 
 
4ff790f
 
8c29929
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7c250b1
8c29929
 
 
 
 
 
 
 
 
 
 
 
 
 
 
538ae45
 
8c29929
 
 
 
 
55bb8ba
8c29929
 
8653a9f
8c29929
 
ea2954c
c4c4022
04dd000
8c29929
d9cf307
8c29929
04dd000
8c29929
 
 
c78cc1b
8c29929
 
65d7508
8c29929
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
{
    "name": "cpu_inference_transformers_token-classification_microsoft/deberta-v3-base",
    "backend": {
        "name": "pytorch",
        "version": "2.4.0+cpu",
        "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
        "task": "token-classification",
        "library": "transformers",
        "model_type": "deberta-v2",
        "model": "microsoft/deberta-v3-base",
        "processor": "microsoft/deberta-v3-base",
        "device": "cpu",
        "device_ids": null,
        "seed": 42,
        "inter_op_num_threads": null,
        "intra_op_num_threads": null,
        "model_kwargs": {},
        "processor_kwargs": {},
        "no_weights": true,
        "device_map": null,
        "torch_dtype": null,
        "eval_mode": true,
        "to_bettertransformer": false,
        "low_cpu_mem_usage": null,
        "attn_implementation": null,
        "cache_implementation": null,
        "autocast_enabled": false,
        "autocast_dtype": null,
        "torch_compile": false,
        "torch_compile_target": "forward",
        "torch_compile_config": {},
        "quantization_scheme": null,
        "quantization_config": {},
        "deepspeed_inference": false,
        "deepspeed_inference_config": {},
        "peft_type": null,
        "peft_config": {}
    },
    "scenario": {
        "name": "inference",
        "_target_": "optimum_benchmark.scenarios.inference.scenario.InferenceScenario",
        "iterations": 1,
        "duration": 1,
        "warmup_runs": 1,
        "input_shapes": {
            "batch_size": 1,
            "num_choices": 2,
            "sequence_length": 2
        },
        "new_tokens": null,
        "memory": true,
        "latency": true,
        "energy": true,
        "forward_kwargs": {},
        "generate_kwargs": {
            "max_new_tokens": 2,
            "min_new_tokens": 2
        },
        "call_kwargs": {
            "num_inference_steps": 2
        }
    },
    "launcher": {
        "name": "process",
        "_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
        "device_isolation": false,
        "device_isolation_action": "error",
        "numactl": false,
        "numactl_kwargs": {},
        "start_method": "spawn"
    },
    "environment": {
        "cpu": " AMD EPYC 7763 64-Core Processor",
        "cpu_count": 4,
        "cpu_ram_mb": 16757.342208,
        "system": "Linux",
        "machine": "x86_64",
        "platform": "Linux-6.5.0-1025-azure-x86_64-with-glibc2.35",
        "processor": "x86_64",
        "python_version": "3.10.14",
        "optimum_benchmark_version": "0.4.0",
        "optimum_benchmark_commit": "70bf04e46123aae57236d23126d446ccb3802129",
        "transformers_version": "4.44.0",
        "transformers_commit": null,
        "accelerate_version": "0.33.0",
        "accelerate_commit": null,
        "diffusers_version": "0.30.0",
        "diffusers_commit": null,
        "optimum_version": null,
        "optimum_commit": null,
        "timm_version": "1.0.8",
        "timm_commit": null,
        "peft_version": null,
        "peft_commit": null
    }
}