{
    "config": {
        "name": "cpu_inference_transformers_multiple-choice_FacebookAI/roberta-base",
        "backend": {
            "name": "pytorch",
            "version": "2.3.0+cpu",
            "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
            "task": "multiple-choice",
            "model": "FacebookAI/roberta-base",
            "library": "transformers",
            "device": "cpu",
            "device_ids": null,
            "seed": 42,
            "inter_op_num_threads": null,
            "intra_op_num_threads": null,
            "hub_kwargs": {
                "revision": "main",
                "force_download": false,
                "local_files_only": false,
                "trust_remote_code": false
            },
            "no_weights": true,
            "device_map": null,
            "torch_dtype": null,
            "eval_mode": true,
            "to_bettertransformer": false,
            "low_cpu_mem_usage": null,
            "attn_implementation": null,
            "cache_implementation": null,
            "autocast_enabled": false,
            "autocast_dtype": null,
            "torch_compile": false,
            "torch_compile_target": "forward",
            "torch_compile_config": {},
            "quantization_scheme": null,
            "quantization_config": {},
            "deepspeed_inference": false,
            "deepspeed_inference_config": {},
            "peft_type": null,
            "peft_config": {}
        },
        "scenario": {
            "name": "inference",
            "_target_": "optimum_benchmark.scenarios.inference.scenario.InferenceScenario",
            "iterations": 1,
            "duration": 1,
            "warmup_runs": 1,
            "input_shapes": {
                "batch_size": 1,
                "num_choices": 2,
                "sequence_length": 2
            },
            "new_tokens": null,
            "latency": true,
            "memory": true,
            "energy": true,
            "forward_kwargs": {},
            "generate_kwargs": {
                "max_new_tokens": 2,
                "min_new_tokens": 2
            },
            "call_kwargs": {
                "num_inference_steps": 2
            }
        },
        "launcher": {
            "name": "process",
            "_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
            "device_isolation": false,
            "device_isolation_action": "error",
            "start_method": "spawn"
        },
        "environment": {
            "cpu": " AMD EPYC 7763 64-Core Processor",
            "cpu_count": 4,
            "cpu_ram_mb": 16757.346304,
            "system": "Linux",
            "machine": "x86_64",
            "platform": "Linux-6.5.0-1018-azure-x86_64-with-glibc2.35",
            "processor": "x86_64",
            "python_version": "3.10.14",
            "optimum_benchmark_version": "0.2.0",
            "optimum_benchmark_commit": "e5cc8d8069420e159473795b6d1ea703cadf2a8b",
            "transformers_version": "4.40.2",
            "transformers_commit": null,
            "accelerate_version": "0.30.1",
            "accelerate_commit": null,
            "diffusers_version": "0.27.2",
            "diffusers_commit": null,
            "optimum_version": null,
            "optimum_commit": null,
            "timm_version": "0.9.16",
            "timm_commit": null,
            "peft_version": null,
            "peft_commit": null
        }
    },
    "report": {
        "forward": {
            "memory": {
                "unit": "MB",
                "max_ram": 944.324608,
                "max_global_vram": null,
                "max_process_vram": null,
                "max_reserved": null,
                "max_allocated": null
            },
            "latency": {
                "unit": "s",
                "count": 22,
                "total": 1.0021623279998835,
                "mean": 0.04555283309090379,
                "stdev": 0.002152527790271481,
                "p50": 0.04556934599995088,
                "p90": 0.047566229999955564,
                "p95": 0.04844541275000722,
                "p99": 0.04864016364997667,
                "values": [
                    0.04868002899996782,
                    0.047005327000022135,
                    0.047594568999954845,
                    0.045089945000029275,
                    0.04849019400000998,
                    0.04686959299999671,
                    0.0466439619999619,
                    0.04731117899996207,
                    0.04695400000002792,
                    0.045120822000001226,
                    0.04527401899997585,
                    0.04565265699994825,
                    0.045714482999983375,
                    0.04548603499995352,
                    0.04514487800003053,
                    0.046206432000019504,
                    0.04494417199998679,
                    0.04462748000003103,
                    0.045348879000016495,
                    0.04433364800001982,
                    0.03991992599998184,
                    0.03975009900000259
                ]
            },
            "throughput": {
                "unit": "samples/s",
                "value": 21.952531426627882
            },
            "energy": {
                "unit": "kWh",
                "cpu": 1.5556824512970754e-06,
                "ram": 6.501517854298792e-08,
                "gpu": 0.0,
                "total": 1.6206976298400633e-06
            },
            "efficiency": {
                "unit": "samples/kWh",
                "value": 617018.2405330499
            }
        }
    }
}