{ | |
"results": { | |
"arc_pt": { | |
"acc": 0.24444444444444444, | |
"acc_stderr": 0.012569442967524465, | |
"acc_norm": 0.305982905982906, | |
"acc_norm_stderr": 0.01347802974882896 | |
}, | |
"hellaswag_pt": { | |
"acc": 0.35572651424856433, | |
"acc_stderr": 0.004983557286529839, | |
"acc_norm": 0.42832376205439376, | |
"acc_norm_stderr": 0.005151187541296219 | |
}, | |
"truthfulqa_pt": { | |
"mc1": 0.23857868020304568, | |
"mc1_stderr": 0.015192910034567013, | |
"mc2": 0.411729137477953, | |
"mc2_stderr": 0.014880046850377518 | |
} | |
}, | |
"versions": { | |
"arc_pt": 0, | |
"hellaswag_pt": 1, | |
"truthfulqa_pt": 1 | |
}, | |
"config": { | |
"model": "hf-auto", | |
"model_args": "pretrained=/lustre/mlnvme/data/asen_hpc-mula/checkpoints-llama/slurm_job_17066349/step_21084", | |
"batch_size": 1, | |
"device": "cuda:0", | |
"no_cache": false, | |
"limit": null, | |
"bootstrap_iters": 100000, | |
"description_dict": {} | |
} | |
} |