import argparse

from optimum_benchmark.experiment import launch, ExperimentConfig
from optimum_benchmark.backends.pytorch.config import PyTorchConfig
from optimum_benchmark.launchers.torchrun.config import TorchrunConfig
from optimum_benchmark.benchmarks.inference.config import InferenceConfig

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='Run optimum-benchmark')
    parser.add_argument('--config-name', dest='experiment_name', type=str,
                        help='experiment name (text classification, etc.)')
    parser.add_argument('--backend-model', dest='backend_model', type=str,
                        help='model name')
    parser.add_argument('--hydra-run-dir', dest='run_dir', type=str)

    args = parser.parse_args()
    backend_model = args.backend_model
    run_dir = args.run_dir
    experiment_name = args.experiment_name

    launcher_config = TorchrunConfig(nproc_per_node=2)
    benchmark_config = InferenceConfig(latency=True, memory=True)
    backend_config = PyTorchConfig(model=backend_model)
    experiment_config = ExperimentConfig(
        experiment_name=experiment_name,
        benchmark=benchmark_config,
        launcher=launcher_config,
        backend=backend_config,
    )

    benchmark_report = launch(experiment_config)

    # push artifacts to the hub
    experiment_config.push_to_hub("EnergyStarAI/benchmarksDebug")
    benchmark_report.push_to_hub("EnergyStarAI/benchmarksDebug")