File size: 3,419 Bytes
7a45fd8 ffc304a 7a45fd8 c9efb2a 5e28cf8 14a6b3a 7a45fd8 14a6b3a 7a45fd8 c41f5db db24a1b 7a45fd8 cf5fa5c dcfbcb8 7a45fd8 ce2417a 7a45fd8 23d7469 7a45fd8 7bf335c ad7e166 ce2417a 6a21336 7a45fd8 78ac344 7a45fd8 f5f4035 7a45fd8 6a21336 7a45fd8 e8c962f 7a45fd8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 |
{
"name": "cpu_training_transformers_text-generation_openai-community/gpt2",
"backend": {
"name": "pytorch",
"version": "2.4.1+cpu",
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
"task": "text-generation",
"library": "transformers",
"model_type": "gpt2",
"model": "openai-community/gpt2",
"processor": "openai-community/gpt2",
"device": "cpu",
"device_ids": null,
"seed": 42,
"inter_op_num_threads": null,
"intra_op_num_threads": null,
"model_kwargs": {},
"processor_kwargs": {},
"no_weights": true,
"device_map": null,
"torch_dtype": null,
"eval_mode": true,
"to_bettertransformer": false,
"low_cpu_mem_usage": null,
"attn_implementation": null,
"cache_implementation": null,
"autocast_enabled": false,
"autocast_dtype": null,
"torch_compile": false,
"torch_compile_target": "forward",
"torch_compile_config": {},
"quantization_scheme": null,
"quantization_config": {},
"deepspeed_inference": false,
"deepspeed_inference_config": {},
"peft_type": null,
"peft_config": {}
},
"scenario": {
"name": "training",
"_target_": "optimum_benchmark.scenarios.training.scenario.TrainingScenario",
"max_steps": 5,
"warmup_steps": 2,
"dataset_shapes": {
"dataset_size": 500,
"sequence_length": 16,
"num_choices": 1
},
"training_arguments": {
"per_device_train_batch_size": 2,
"gradient_accumulation_steps": 1,
"output_dir": "./trainer_output",
"evaluation_strategy": "no",
"eval_strategy": "no",
"save_strategy": "no",
"do_train": true,
"use_cpu": false,
"max_steps": 5,
"do_eval": false,
"do_predict": false,
"report_to": "none",
"skip_memory_metrics": true,
"ddp_find_unused_parameters": false
},
"latency": true,
"memory": true,
"energy": false
},
"launcher": {
"name": "process",
"_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
"device_isolation": false,
"device_isolation_action": null,
"numactl": false,
"numactl_kwargs": {},
"start_method": "spawn"
},
"environment": {
"cpu": " AMD EPYC 7763 64-Core Processor",
"cpu_count": 4,
"cpu_ram_mb": 16757.338112,
"system": "Linux",
"machine": "x86_64",
"platform": "Linux-6.5.0-1025-azure-x86_64-with-glibc2.35",
"processor": "x86_64",
"python_version": "3.10.14",
"optimum_benchmark_version": "0.4.0",
"optimum_benchmark_commit": "c32b0c58c97c176ab4cf930864469754db9b3f23",
"transformers_version": "4.44.2",
"transformers_commit": null,
"accelerate_version": "0.34.2",
"accelerate_commit": null,
"diffusers_version": "0.30.3",
"diffusers_commit": null,
"optimum_version": null,
"optimum_commit": null,
"timm_version": "1.0.9",
"timm_commit": null,
"peft_version": null,
"peft_commit": null
}
} |