{
    "config": {
        "name": "cpu_inference_transformers_text-classification_FacebookAI/roberta-base",
        "backend": {
            "name": "pytorch",
            "version": "2.3.1+cpu",
            "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
            "task": "text-classification",
            "library": "transformers",
            "model": "FacebookAI/roberta-base",
            "processor": "FacebookAI/roberta-base",
            "device": "cpu",
            "device_ids": null,
            "seed": 42,
            "inter_op_num_threads": null,
            "intra_op_num_threads": null,
            "model_kwargs": {},
            "processor_kwargs": {},
            "hub_kwargs": {},
            "no_weights": true,
            "device_map": null,
            "torch_dtype": null,
            "eval_mode": true,
            "to_bettertransformer": false,
            "low_cpu_mem_usage": null,
            "attn_implementation": null,
            "cache_implementation": null,
            "autocast_enabled": false,
            "autocast_dtype": null,
            "torch_compile": false,
            "torch_compile_target": "forward",
            "torch_compile_config": {},
            "quantization_scheme": null,
            "quantization_config": {},
            "deepspeed_inference": false,
            "deepspeed_inference_config": {},
            "peft_type": null,
            "peft_config": {}
        },
        "scenario": {
            "name": "inference",
            "_target_": "optimum_benchmark.scenarios.inference.scenario.InferenceScenario",
            "iterations": 1,
            "duration": 1,
            "warmup_runs": 1,
            "input_shapes": {
                "batch_size": 1,
                "num_choices": 2,
                "sequence_length": 2
            },
            "new_tokens": null,
            "latency": true,
            "memory": true,
            "energy": true,
            "forward_kwargs": {},
            "generate_kwargs": {
                "max_new_tokens": 2,
                "min_new_tokens": 2
            },
            "call_kwargs": {
                "num_inference_steps": 2
            }
        },
        "launcher": {
            "name": "process",
            "_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
            "device_isolation": false,
            "device_isolation_action": "error",
            "numactl": false,
            "numactl_kwargs": {},
            "start_method": "spawn"
        },
        "environment": {
            "cpu": " AMD EPYC 7763 64-Core Processor",
            "cpu_count": 4,
            "cpu_ram_mb": 16757.342208,
            "system": "Linux",
            "machine": "x86_64",
            "platform": "Linux-6.5.0-1022-azure-x86_64-with-glibc2.35",
            "processor": "x86_64",
            "python_version": "3.10.14",
            "optimum_benchmark_version": "0.3.0",
            "optimum_benchmark_commit": "2a75c0bc0d007cc875fa0f75ca41d02e46f917be",
            "transformers_version": "4.42.3",
            "transformers_commit": null,
            "accelerate_version": "0.31.0",
            "accelerate_commit": null,
            "diffusers_version": "0.29.2",
            "diffusers_commit": null,
            "optimum_version": null,
            "optimum_commit": null,
            "timm_version": "1.0.7",
            "timm_commit": null,
            "peft_version": null,
            "peft_commit": null
        }
    },
    "report": {
        "forward": {
            "memory": {
                "unit": "MB",
                "max_ram": 937.02144,
                "max_global_vram": null,
                "max_process_vram": null,
                "max_reserved": null,
                "max_allocated": null
            },
            "latency": {
                "unit": "s",
                "count": 28,
                "total": 1.005964189000224,
                "mean": 0.035927292464293714,
                "stdev": 0.0019956385117658793,
                "p50": 0.03657449450000172,
                "p90": 0.03719419330002438,
                "p95": 0.03753912139999897,
                "p99": 0.03829891815997428,
                "values": [
                    0.03772244300000693,
                    0.0367225699999949,
                    0.03636450400000513,
                    0.03578389099999413,
                    0.036718060999987756,
                    0.03658677699996815,
                    0.036936869000044226,
                    0.03639611300002343,
                    0.03625726300003862,
                    0.035124133000010715,
                    0.0371922760000416,
                    0.03627540700000509,
                    0.03657813099999885,
                    0.03691887700000507,
                    0.036936900000000605,
                    0.036548726000035,
                    0.03645045399997571,
                    0.03706417699999065,
                    0.03657085800000459,
                    0.036661146000028566,
                    0.03598892400003706,
                    0.0371986669999842,
                    0.03704812400002311,
                    0.0385121349999622,
                    0.033050086999992345,
                    0.0311335270000086,
                    0.030543047000037404,
                    0.03068010200001936
                ]
            },
            "throughput": {
                "unit": "samples/s",
                "value": 27.833992806272516
            },
            "energy": {
                "unit": "kWh",
                "cpu": 1.202320811724422e-06,
                "ram": 5.0246350621757134e-08,
                "gpu": 0.0,
                "total": 1.2525671623461792e-06
            },
            "efficiency": {
                "unit": "samples/kWh",
                "value": 798360.3834279861
            }
        }
    }
}