run_id
large_stringlengths 64
64
| timestamp_utc
int64 1,736B
1,738B
| timestamp_day_hour_utc
int64 1,736B
1,738B
| model_name_or_path
large_stringclasses 5
values | unitxt_card
large_stringclasses 76
values | unitxt_recipe
large_stringlengths 330
400
| quantization_type
large_stringclasses 1
value | quantization_bit_count
large_stringclasses 1
value | inference_runtime_s
float64 1.1
745
| generation_args
large_stringclasses 1
value | model_args
large_stringclasses 5
values | inference_engine
large_stringclasses 1
value | packages_versions
large_stringclasses 1
value | scores
large_stringlengths 174
242
| num_gpu
int64 1
1
| device
large_stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
afde2b1281839ba8e93a898a1b395537dbaec6575c4ac90648ea71f7cc3c0afe | 1,738,205,281,620 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.business_ethics | card=cards.mmlu.business_ethics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHarness.enumerator_roman_choicesSeparator_semicolon_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 31.943244 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.82, 'accuracy_ci_low': 0.73, 'accuracy_ci_high': 0.88, 'score_name': 'accuracy', 'score': 0.82, 'score_ci_high': 0.88, 'score_ci_low': 0.73, 'num_of_instances': 100} | 1 | a100_80gb |
b6c213f133d4469fb35d1beb41439e58e23388464cca76bd0fbb1b3edd19ee85 | 1,738,205,734,692 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.business_ethics | card=cards.mmlu.business_ethics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_capitals_choicesSeparator_OrCapital_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 58.104157 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.6, 'accuracy_ci_low': 0.5, 'accuracy_ci_high': 0.69, 'score_name': 'accuracy', 'score': 0.6, 'score_ci_high': 0.69, 'score_ci_low': 0.5, 'num_of_instances': 100} | 1 | a100_80gb |
d5a410855810376e62b16ed34f8608050e97aafe3ff588659b97e8d19fa5024c | 1,738,205,321,673 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.security_studies | card=cards.mmlu.security_studies,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicFixed.enumerator_keyboard_choicesSeparator_OrCapital_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 39.152262 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.67, 'accuracy_ci_low': 0.57, 'accuracy_ci_high': 0.76, 'score_name': 'accuracy', 'score': 0.67, 'score_ci_high': 0.76, 'score_ci_low': 0.57, 'num_of_instances': 100} | 1 | a100_80gb |
979772fd890a5833bc7b4249f08eadbf173fc00955374450c2fc29fedbd07a74 | 1,738,205,485,806 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.security_studies | card=cards.mmlu.security_studies,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_greek_choicesSeparator_comma_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 44.055823 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.57, 'accuracy_ci_low': 0.46784838618019114, 'accuracy_ci_high': 0.67, 'score_name': 'accuracy', 'score': 0.57, 'score_ci_high': 0.67, 'score_ci_low': 0.46784838618019114, 'num_of_instances': 100} | 1 | a100_80gb |
1bf40cde94dbc6eef5e274336a2882151f8007a73b0b641353fe573626846cb6 | 1,738,205,352,756 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu_pro.economics | card=cards.mmlu_pro.economics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_capitals_choicesSeparator_semicolon_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 29.402437 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.45, 'accuracy_ci_low': 0.36, 'accuracy_ci_high': 0.55, 'score_name': 'accuracy', 'score': 0.45, 'score_ci_high': 0.55, 'score_ci_low': 0.36, 'num_of_instances': 100} | 1 | a100_80gb |
bd038550240ec551dea62d9947b2d191bd3942ca79fcba87d576f1319500446f | 1,738,205,374,339 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu_pro.other | card=cards.mmlu_pro.other,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesStructuredWithoutTopic.enumerator_roman_choicesSeparator_orLower_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 20.33165 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.11, 'accuracy_ci_low': 0.06, 'accuracy_ci_high': 0.18, 'score_name': 'accuracy', 'score': 0.11, 'score_ci_high': 0.18, 'score_ci_low': 0.06, 'num_of_instances': 100} | 1 | a100_80gb |
2bee072566c888087491eae1c8f3184b425ca42f4ecbfee695629ed7a9bf50d6 | 1,738,205,405,352 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.sociology | card=cards.mmlu.sociology,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelow.enumerator_capitals_choicesSeparator_semicolon_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 30.255831 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.66, 'accuracy_ci_low': 0.57, 'accuracy_ci_high': 0.7636949094463171, 'score_name': 'accuracy', 'score': 0.66, 'score_ci_high': 0.7636949094463171, 'score_ci_low': 0.57, 'num_of_instances': 100} | 1 | a100_80gb |
2dea6defce780f56291f5e3088fbe75891388f2784d57b302b8f57cac36f9884 | 1,738,205,851,426 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.sociology | card=cards.mmlu.sociology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelowPlease.enumerator_greek_choicesSeparator_orLower_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 55.828814 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.58, 'accuracy_ci_low': 0.48, 'accuracy_ci_high': 0.68, 'score_name': 'accuracy', 'score': 0.58, 'score_ci_high': 0.68, 'score_ci_low': 0.48, 'num_of_instances': 100} | 1 | a100_80gb |
3e510cab6b603a74a635798836c8d1929ed40941312a27297889d58f11eef18a | 1,738,205,441,216 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.elementary_mathematics | card=cards.mmlu.elementary_mathematics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHarness.enumerator_capitals_choicesSeparator_comma_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 34.924572 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.25, 'accuracy_ci_low': 0.17, 'accuracy_ci_high': 0.34, 'score_name': 'accuracy', 'score': 0.25, 'score_ci_high': 0.34, 'score_ci_low': 0.17, 'num_of_instances': 100} | 1 | a100_80gb |
fbfb936ab71091325a1e698bcef0b90206c45d9617885059833634dd54e4bead | 1,738,205,532,856 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.professional_psychology | card=cards.mmlu.professional_psychology,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateHere.enumerator_numbers_choicesSeparator_pipe_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 45.313163 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.62, 'accuracy_ci_low': 0.51, 'accuracy_ci_high': 0.71, 'score_name': 'accuracy', 'score': 0.62, 'score_ci_high': 0.71, 'score_ci_low': 0.51, 'num_of_instances': 100} | 1 | a100_80gb |
6b02f785d633f7e7b08a2247699ac67e1f6b746b35af0cbddac7da1b9de90d42 | 1,738,205,581,737 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu_pro.business | card=cards.mmlu_pro.business,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsStateHere.enumerator_greek_choicesSeparator_orLower_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 47.840601 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.17, 'accuracy_ci_low': 0.1, 'accuracy_ci_high': 0.26, 'score_name': 'accuracy', 'score': 0.17, 'score_ci_high': 0.26, 'score_ci_low': 0.1, 'num_of_instances': 100} | 1 | a100_80gb |
5ff45aa96557d186d770139351353dc2fefb74b3e311a5f24714ff656c1929eb | 1,738,205,794,915 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.abstract_algebra | card=cards.mmlu.abstract_algebra,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_lowercase_choicesSeparator_newline_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 59.302939 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.36, 'accuracy_ci_low': 0.27, 'accuracy_ci_high': 0.46, 'score_name': 'accuracy', 'score': 0.36, 'score_ci_high': 0.46, 'score_ci_low': 0.27, 'num_of_instances': 100} | 1 | a100_80gb |
bb5a27a62013dd137ff2bc0bd5851fcfe00c1ebd9b4b9ca016c9df034e565274 | 1,738,205,926,857 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.high_school_geography | card=cards.mmlu.high_school_geography,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelow.enumerator_roman_choicesSeparator_comma_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 74.756265 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.73, 'accuracy_ci_low': 0.64, 'accuracy_ci_high': 0.81, 'score_name': 'accuracy', 'score': 0.73, 'score_ci_high': 0.81, 'score_ci_low': 0.64, 'num_of_instances': 100} | 1 | a100_80gb |
b82512f63d3379458f79df9534ff3a48d85e8475f2b252f66e4c87969065c10a | 1,738,203,374,747 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.professional_medicine | card=cards.mmlu.professional_medicine,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithoutTopic.enumerator_greek_choicesSeparator_comma_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 344.675623 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.55, 'accuracy_ci_low': 0.46, 'accuracy_ci_high': 0.65, 'score_name': 'accuracy', 'score': 0.55, 'score_ci_high': 0.65, 'score_ci_low': 0.46, 'num_of_instances': 100} | 1 | a100_80gb |
33168f5c3d2d0c328cc9acef30b3badc3b61e315bc6e24137c3e43947fe323b1 | 1,738,203,420,655 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.professional_medicine | card=cards.mmlu.professional_medicine,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithoutTopic.enumerator_roman_choicesSeparator_pipe_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 44.64054 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.53, 'accuracy_ci_low': 0.44, 'accuracy_ci_high': 0.63, 'score_name': 'accuracy', 'score': 0.53, 'score_ci_high': 0.63, 'score_ci_low': 0.44, 'num_of_instances': 100} | 1 | a100_80gb |
a45c8c1cfd0f8d337a308b2ee4f15d8bc85ccf84613fbe1d4e8db1739243e766 | 1,738,203,909,682 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.professional_medicine | card=cards.mmlu.professional_medicine,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicFixed.enumerator_lowercase_choicesSeparator_semicolon_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 36.159885 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.67, 'accuracy_ci_low': 0.58, 'accuracy_ci_high': 0.76, 'score_name': 'accuracy', 'score': 0.67, 'score_ci_high': 0.76, 'score_ci_low': 0.58, 'num_of_instances': 100} | 1 | a100_80gb |
640c80e7846d3ff1bc739903389e6bda540e3c9ee41ca437df4a6486bc0cc2c8 | 1,738,203,471,946 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.logical_fallacies | card=cards.mmlu.logical_fallacies,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSACould.enumerator_greek_choicesSeparator_orLower_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 49.99798 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.54, 'accuracy_ci_low': 0.43, 'accuracy_ci_high': 0.64, 'score_name': 'accuracy', 'score': 0.54, 'score_ci_high': 0.64, 'score_ci_low': 0.43, 'num_of_instances': 100} | 1 | a100_80gb |
901a08946f35122cfb362a6592629cb7c8c7db6484d0249ce5afbd4e94c8ff7e | 1,738,203,546,586 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.machine_learning | card=cards.mmlu.machine_learning,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelowPlease.enumerator_lowercase_choicesSeparator_semicolon_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 73.756691 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.33, 'accuracy_ci_low': 0.24, 'accuracy_ci_high': 0.43, 'score_name': 'accuracy', 'score': 0.33, 'score_ci_high': 0.43, 'score_ci_low': 0.24, 'num_of_instances': 100} | 1 | a100_80gb |
7ab5d7f8f122a41a01f73a2c538e8145f466c6fa152e3348ff11d7f6952fde0f | 1,738,203,620,310 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.security_studies | card=cards.mmlu.security_studies,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateHere.enumerator_roman_choicesSeparator_pipe_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 72.641174 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.32, 'accuracy_ci_low': 0.23, 'accuracy_ci_high': 0.41, 'score_name': 'accuracy', 'score': 0.32, 'score_ci_high': 0.41, 'score_ci_low': 0.23, 'num_of_instances': 100} | 1 | a100_80gb |
b9dd8bf933b4da0b55ef7d14dd66985aba6f6f6714c24b7ffec8bcd2bed06b15 | 1,738,203,687,071 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.miscellaneous | card=cards.mmlu.miscellaneous,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_numbers_choicesSeparator_newline_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 65.924596 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.58, 'accuracy_ci_low': 0.48989001646485025, 'accuracy_ci_high': 0.67, 'score_name': 'accuracy', 'score': 0.58, 'score_ci_high': 0.67, 'score_ci_low': 0.48989001646485025, 'num_of_instances': 100} | 1 | a100_80gb |
eae121e581dfc580802c6b49c610e6235f1c655caa17209c37284a865c2c6387 | 1,738,203,752,991 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.electrical_engineering | card=cards.mmlu.electrical_engineering,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_capitals_choicesSeparator_pipe_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 65.383535 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.58, 'accuracy_ci_low': 0.48, 'accuracy_ci_high': 0.67, 'score_name': 'accuracy', 'score': 0.58, 'score_ci_high': 0.67, 'score_ci_low': 0.48, 'num_of_instances': 100} | 1 | a100_80gb |
dd4f3568c7b0861b0700d2230120552a61ec1d5a2a45b3b7fb1ede9d58428ddd | 1,738,203,845,547 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.college_medicine | card=cards.mmlu.college_medicine,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_capitals_choicesSeparator_comma_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 38.316546 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.49, 'accuracy_ci_low': 0.39, 'accuracy_ci_high': 0.58, 'score_name': 'accuracy', 'score': 0.49, 'score_ci_high': 0.58, 'score_ci_low': 0.39, 'num_of_instances': 100} | 1 | a100_80gb |
63b8c7a29f14d25ad749608228c62c7daed2a6c90fcca27eb019056cd4508177 | 1,738,203,872,346 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu_pro.philosophy | card=cards.mmlu_pro.philosophy,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesStructuredWithTopic.enumerator_numbers_choicesSeparator_semicolon_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 25.83806 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.21, 'accuracy_ci_low': 0.15, 'accuracy_ci_high': 0.31, 'score_name': 'accuracy', 'score': 0.21, 'score_ci_high': 0.31, 'score_ci_low': 0.15, 'num_of_instances': 100} | 1 | a100_80gb |
0d52be75b9dcd2c8ad641f153051beaf1ccea35abc20d33a3f1cb22a8f54f390 | 1,738,203,933,827 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu_pro.law | card=cards.mmlu_pro.law,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesStructuredWithoutTopic.enumerator_roman_choicesSeparator_comma_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 22.873062 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.13, 'accuracy_ci_low': 0.08, 'accuracy_ci_high': 0.21, 'score_name': 'accuracy', 'score': 0.13, 'score_ci_high': 0.21, 'score_ci_low': 0.08, 'num_of_instances': 100} | 1 | a100_80gb |
aa8e0917df5fc6171d13c3f19667f6d69456c37615651850cd4d1a1e28be42e3 | 1,738,209,127,313 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.formal_logic | card=cards.mmlu.formal_logic,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateHere.enumerator_numbers_choicesSeparator_comma_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 152.167226 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.44, 'accuracy_ci_low': 0.34, 'accuracy_ci_high': 0.54, 'score_name': 'accuracy', 'score': 0.44, 'score_ci_high': 0.54, 'score_ci_low': 0.34, 'num_of_instances': 100} | 1 | a100_80gb |
ad4e01f309fdbc01e52a7563b9544d55689d381acbd6e840739cc197d87b19fd | 1,738,209,309,562 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.us_foreign_policy | card=cards.mmlu.us_foreign_policy,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelow.enumerator_roman_choicesSeparator_semicolon_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 181.539279 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.8, 'accuracy_ci_low': 0.7194623006794447, 'accuracy_ci_high': 0.87, 'score_name': 'accuracy', 'score': 0.8, 'score_ci_high': 0.87, 'score_ci_low': 0.7194623006794447, 'num_of_instances': 100} | 1 | a100_80gb |
7f7c948478ef85f9089018cc2c4ceec1f5051a8f7b11f3f4a847d94e012ca4e2 | 1,738,209,132,213 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.professional_accounting | card=cards.mmlu.professional_accounting,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_lowercase_choicesSeparator_OrCapital_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 163.851988 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.54, 'accuracy_ci_low': 0.43, 'accuracy_ci_high': 0.63, 'score_name': 'accuracy', 'score': 0.54, 'score_ci_high': 0.63, 'score_ci_low': 0.43, 'num_of_instances': 100} | 1 | a100_80gb |
7aa24ad97b3d59f10a9870bac980fcb027de01e389aa08a27fb81b4a1f993598 | 1,738,209,348,591 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.human_aging | card=cards.mmlu.human_aging,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_capitals_choicesSeparator_orLower_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 215.355528 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.55, 'accuracy_ci_low': 0.45, 'accuracy_ci_high': 0.65, 'score_name': 'accuracy', 'score': 0.55, 'score_ci_high': 0.65, 'score_ci_low': 0.45, 'num_of_instances': 100} | 1 | a100_80gb |
ff3ee360f684abfd7dc6eaf2e8a399d49a25d97f37c0aa68a5afb931e555bbe1 | 1,738,208,903,451 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.us_foreign_policy | card=cards.mmlu.us_foreign_policy,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_keyboard_choicesSeparator_OrCapital_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 29.990567 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.81, 'accuracy_ci_low': 0.72, 'accuracy_ci_high': 0.88, 'score_name': 'accuracy', 'score': 0.81, 'score_ci_high': 0.88, 'score_ci_low': 0.72, 'num_of_instances': 100} | 1 | a100_80gb |
fa3560f83ad772c6109d48febc6c36da853658e69125ac4d04c3227a2113ba51 | 1,738,208,909,434 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.security_studies | card=cards.mmlu.security_studies,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithTopic.enumerator_keyboard_choicesSeparator_newline_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 5.164769 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.47, 'accuracy_ci_low': 0.37, 'accuracy_ci_high': 0.57, 'score_name': 'accuracy', 'score': 0.47, 'score_ci_high': 0.57, 'score_ci_low': 0.37, 'num_of_instances': 100} | 1 | a100_80gb |
852fdbe70b8b022f162a78c721f340b9696f5a527091b79ed01f5d749efb8f30 | 1,738,208,915,718 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.miscellaneous | card=cards.mmlu.miscellaneous,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSACould.enumerator_keyboard_choicesSeparator_space_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 5.476383 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.61, 'accuracy_ci_low': 0.52, 'accuracy_ci_high': 0.7, 'score_name': 'accuracy', 'score': 0.61, 'score_ci_high': 0.7, 'score_ci_low': 0.52, 'num_of_instances': 100} | 1 | a100_80gb |
d11774344f24246555b3772bcf5b8b71762286239b576fa1669cb90dd9a34ecf | 1,738,208,921,042 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.international_law | card=cards.mmlu.international_law,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSACould.enumerator_numbers_choicesSeparator_semicolon_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.695033 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.5, 'accuracy_ci_low': 0.4, 'accuracy_ci_high': 0.59, 'score_name': 'accuracy', 'score': 0.5, 'score_ci_high': 0.59, 'score_ci_low': 0.4, 'num_of_instances': 100} | 1 | a100_80gb |
58de0ce3cf467b2df120d97c1e7b9a7128a70cd3263a1ef70f97b328fc454372 | 1,738,208,926,338 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.professional_psychology | card=cards.mmlu.professional_psychology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSACould.enumerator_lowercase_choicesSeparator_space_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.604323 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.4, 'accuracy_ci_low': 0.3, 'accuracy_ci_high': 0.49, 'score_name': 'accuracy', 'score': 0.4, 'score_ci_high': 0.49, 'score_ci_low': 0.3, 'num_of_instances': 100} | 1 | a100_80gb |
b2f4494b50f594068b347362e846bf9c5a53e0438848a8e63ef3d21e1f0152e4 | 1,738,208,933,253 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.openbook_qa | card=cards.openbook_qa,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.OpenBookQA.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_keyboard_choicesSeparator_newline_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 6.232018 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.85, 'accuracy_ci_low': 0.77, 'accuracy_ci_high': 0.91, 'score_name': 'accuracy', 'score': 0.85, 'score_ci_high': 0.91, 'score_ci_low': 0.77, 'num_of_instances': 100} | 1 | a100_80gb |
f02e6552916b03afbdb2454765a0c275bf87d4bc199580097e3b59d6bb7f97b4 | 1,738,208,939,915 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.professional_law | card=cards.mmlu.professional_law,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicFixed.enumerator_greek_choicesSeparator_pipe_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 5.958459 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.47, 'accuracy_ci_low': 0.36, 'accuracy_ci_high': 0.57, 'score_name': 'accuracy', 'score': 0.47, 'score_ci_high': 0.57, 'score_ci_low': 0.36, 'num_of_instances': 100} | 1 | a100_80gb |
77e2f7ee154b03d062e4451c02425cd146cd7c7dd5f32c21f595e577692af103 | 1,738,208,959,738 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.college_chemistry | card=cards.mmlu.college_chemistry,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_capitals_choicesSeparator_OrCapital_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 18.966929 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.56, 'accuracy_ci_low': 0.46, 'accuracy_ci_high': 0.66, 'score_name': 'accuracy', 'score': 0.56, 'score_ci_high': 0.66, 'score_ci_low': 0.46, 'num_of_instances': 100} | 1 | a100_80gb |
ab0c823b17149ee90172af05f28683ffc5513f43387665635ec09e3d7b69eb55 | 1,738,208,965,216 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.logical_fallacies | card=cards.mmlu.logical_fallacies,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSACould.enumerator_capitals_choicesSeparator_newline_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.635083 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.7, 'accuracy_ci_low': 0.61, 'accuracy_ci_high': 0.78, 'score_name': 'accuracy', 'score': 0.7, 'score_ci_high': 0.78, 'score_ci_low': 0.61, 'num_of_instances': 100} | 1 | a100_80gb |
a7eb0ceaad25565d3a8f3f016e36f4fd8504203c668f90c33caf09ecaad98fe0 | 1,738,208,975,331 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.elementary_mathematics | card=cards.mmlu.elementary_mathematics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateHere.enumerator_numbers_choicesSeparator_OrCapital_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 9.488804 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.55, 'accuracy_ci_low': 0.45, 'accuracy_ci_high': 0.64, 'score_name': 'accuracy', 'score': 0.55, 'score_ci_high': 0.64, 'score_ci_low': 0.45, 'num_of_instances': 100} | 1 | a100_80gb |
2e2bbaa3f65cf075bd97739a53d72bf07569f9b98d756cc3945a9a5fef88d2bc | 1,738,209,112,666 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.business_ethics | card=cards.mmlu.business_ethics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithTopic.enumerator_lowercase_choicesSeparator_semicolon_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 136.456843 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.54, 'accuracy_ci_low': 0.44, 'accuracy_ci_high': 0.64, 'score_name': 'accuracy', 'score': 0.54, 'score_ci_high': 0.64, 'score_ci_low': 0.44, 'num_of_instances': 100} | 1 | a100_80gb |
e05da5eabe6364abf36a7058d29007f8751d73de467b1503e978700442d470d5 | 1,738,208,696,071 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.security_studies | card=cards.mmlu.security_studies,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateHere.enumerator_keyboard_choicesSeparator_semicolon_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 5.330251 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.56, 'accuracy_ci_low': 0.46, 'accuracy_ci_high': 0.66, 'score_name': 'accuracy', 'score': 0.56, 'score_ci_high': 0.66, 'score_ci_low': 0.46, 'num_of_instances': 100} | 1 | a100_80gb |
c6ad1161fbee73a033681470cc2ea0f9b3e2b93d4d46172fa53d2a8b0bf8bd79 | 1,738,208,782,588 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.security_studies | card=cards.mmlu.security_studies,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSACould.enumerator_keyboard_choicesSeparator_OrCapital_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.932801 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.47, 'accuracy_ci_low': 0.38, 'accuracy_ci_high': 0.57, 'score_name': 'accuracy', 'score': 0.47, 'score_ci_high': 0.57, 'score_ci_low': 0.38, 'num_of_instances': 100} | 1 | a100_80gb |
ae11d90f9957438639dff42d49a3626c9c8ac2e4b747e07a52a7f0b3eca6cfb8 | 1,738,208,708,522 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.formal_logic | card=cards.mmlu.formal_logic,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelow.enumerator_numbers_choicesSeparator_semicolon_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 11.596625 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.49, 'accuracy_ci_low': 0.4, 'accuracy_ci_high': 0.5836324917568834, 'score_name': 'accuracy', 'score': 0.49, 'score_ci_high': 0.5836324917568834, 'score_ci_low': 0.4, 'num_of_instances': 100} | 1 | a100_80gb |
2b5f13dd9fdbb03b4f63d2482ebbcf9fdf2142c27142055caff1c45d0e72b245 | 1,738,208,717,994 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.jurisprudence | card=cards.mmlu.jurisprudence,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_greek_choicesSeparator_comma_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 8.483996 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.75, 'accuracy_ci_low': 0.66, 'accuracy_ci_high': 0.83, 'score_name': 'accuracy', 'score': 0.75, 'score_ci_high': 0.83, 'score_ci_low': 0.66, 'num_of_instances': 100} | 1 | a100_80gb |
59f718ddae1438601c43861b2828f62b415da6b7598da8b4096a2d248612f6c1 | 1,738,208,723,465 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.econometrics | card=cards.mmlu.econometrics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_roman_choicesSeparator_OrCapital_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.569209 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.42, 'accuracy_ci_low': 0.32, 'accuracy_ci_high': 0.52, 'score_name': 'accuracy', 'score': 0.42, 'score_ci_high': 0.52, 'score_ci_low': 0.32, 'num_of_instances': 100} | 1 | a100_80gb |
0012e32136df87eff53a18be241d4ba6501ab02b351e6e2a4ee7c5759f1c889c | 1,738,208,730,919 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.miscellaneous | card=cards.mmlu.miscellaneous,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_roman_choicesSeparator_OrCapital_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 6.779324 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.75, 'accuracy_ci_low': 0.66, 'accuracy_ci_high': 0.83, 'score_name': 'accuracy', 'score': 0.75, 'score_ci_high': 0.83, 'score_ci_low': 0.66, 'num_of_instances': 100} | 1 | a100_80gb |
e579bf2ac777e36b5ecd7bdac8c030d431562cc5cf4064d5fb0f0c807d3bdc13 | 1,738,208,757,650 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu_pro.philosophy | card=cards.mmlu_pro.philosophy,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsWithoutTopicFixed.enumerator_roman_choicesSeparator_OrCapital_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 26.063056 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.37, 'accuracy_ci_low': 0.29, 'accuracy_ci_high': 0.48, 'score_name': 'accuracy', 'score': 0.37, 'score_ci_high': 0.48, 'score_ci_low': 0.29, 'num_of_instances': 100} | 1 | a100_80gb |
0aa4a716a79bd7ce3d892a9efb72c9bc457ee200994b83eedfd6cd3485cd7793 | 1,738,208,764,047 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu_pro.economics | card=cards.mmlu_pro.economics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsProSACould.enumerator_numbers_choicesSeparator_orLower_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 5.32272 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.34, 'accuracy_ci_low': 0.26, 'accuracy_ci_high': 0.445341085536636, 'score_name': 'accuracy', 'score': 0.34, 'score_ci_high': 0.445341085536636, 'score_ci_low': 0.26, 'num_of_instances': 100} | 1 | a100_80gb |
2b84512f407b42ef8c9fd3a7077fcb30b938d184a7db4cf421e939e93d138bac | 1,738,208,769,546 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.hellaswag | card=cards.hellaswag,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.HellaSwag.MultipleChoiceTemplatesInstructionsState7.enumerator_capitals_choicesSeparator_semicolon_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.687003 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.54, 'accuracy_ci_low': 0.45, 'accuracy_ci_high': 0.64, 'score_name': 'accuracy', 'score': 0.54, 'score_ci_high': 0.64, 'score_ci_low': 0.45, 'num_of_instances': 100} | 1 | a100_80gb |
77db99b2f29819a9268124c43560b96c2defaa8b5509232004c17cd769729a08 | 1,738,208,840,245 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.hellaswag | card=cards.hellaswag,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.HellaSwag.MultipleChoiceTemplatesInstructionsState4.enumerator_keyboard_choicesSeparator_comma_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 10.426439 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.59, 'accuracy_ci_low': 0.49, 'accuracy_ci_high': 0.69, 'score_name': 'accuracy', 'score': 0.59, 'score_ci_high': 0.69, 'score_ci_low': 0.49, 'num_of_instances': 100} | 1 | a100_80gb |
db0663d8a24ad261e4ce836af21aa4213a8e9aafb334f081f30d2ba7be74c94e | 1,738,208,776,774 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu_pro.history | card=cards.mmlu_pro.history,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsWithoutTopicHarness.enumerator_capitals_choicesSeparator_space_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 6.520484 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.28, 'accuracy_ci_low': 0.2, 'accuracy_ci_high': 0.38, 'score_name': 'accuracy', 'score': 0.28, 'score_ci_high': 0.38, 'score_ci_low': 0.2, 'num_of_instances': 100} | 1 | a100_80gb |
97c5677a794aef63242263699d991c238f44436c5dd330789c9506f75ae3018e | 1,738,208,792,077 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.college_biology | card=cards.mmlu.college_biology,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelowPlease.enumerator_capitals_choicesSeparator_comma_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 8.643702 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.86, 'accuracy_ci_low': 0.78, 'accuracy_ci_high': 0.92, 'score_name': 'accuracy', 'score': 0.86, 'score_ci_high': 0.92, 'score_ci_low': 0.78, 'num_of_instances': 100} | 1 | a100_80gb |
ab1db29b060e5a714c6d6c897362d8d042ff3d7e79847918894784d524094eac | 1,738,208,802,446 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.professional_accounting | card=cards.mmlu.professional_accounting,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_numbers_choicesSeparator_OrCapital_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 9.481503 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.48, 'accuracy_ci_low': 0.39, 'accuracy_ci_high': 0.58, 'score_name': 'accuracy', 'score': 0.48, 'score_ci_high': 0.58, 'score_ci_low': 0.39, 'num_of_instances': 100} | 1 | a100_80gb |
480e9c1ef3a00f9b1a2bfb12c57071847cd0d079a322956b4160705b6856ffa3 | 1,738,208,808,395 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.high_school_government_and_politics | card=cards.mmlu.high_school_government_and_politics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithoutTopic.enumerator_numbers_choicesSeparator_space_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.953137 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.91, 'accuracy_ci_low': 0.84, 'accuracy_ci_high': 0.96, 'score_name': 'accuracy', 'score': 0.91, 'score_ci_high': 0.96, 'score_ci_low': 0.84, 'num_of_instances': 100} | 1 | a100_80gb |
aba961d5f2a9234997e35c80e85c3c6df2d77a6cc8f92cf2d19923b89c3fc2fd | 1,738,208,817,544 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.high_school_european_history | card=cards.mmlu.high_school_european_history,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_keyboard_choicesSeparator_semicolon_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 7.516677 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.74, 'accuracy_ci_low': 0.65, 'accuracy_ci_high': 0.82, 'score_name': 'accuracy', 'score': 0.74, 'score_ci_high': 0.82, 'score_ci_low': 0.65, 'num_of_instances': 100} | 1 | a100_80gb |
57e0f52af56605e8b98da62395c0e8a3750b84f7ea4e0b5247e4b54f4f9626f1 | 1,738,208,888,932 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.high_school_european_history | card=cards.mmlu.high_school_european_history,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithoutTopic.enumerator_greek_choicesSeparator_comma_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 7.035328 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.63, 'accuracy_ci_low': 0.53, 'accuracy_ci_high': 0.72, 'score_name': 'accuracy', 'score': 0.63, 'score_ci_high': 0.72, 'score_ci_low': 0.53, 'num_of_instances': 100} | 1 | a100_80gb |
6152c67803a616538eeffa5a1816d389d968e216799afa713cb14f3aac5879a8 | 1,738,208,906,782 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.high_school_european_history | card=cards.mmlu.high_school_european_history,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelow.enumerator_capitals_choicesSeparator_OrCapital_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 6.805433 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.62, 'accuracy_ci_low': 0.5191568435069958, 'accuracy_ci_high': 0.71, 'score_name': 'accuracy', 'score': 0.62, 'score_ci_high': 0.71, 'score_ci_low': 0.5191568435069958, 'num_of_instances': 100} | 1 | a100_80gb |
83b5aff0ea2e500678421dfd3424e65b033b56c97c833eaccb6bf3a7bc316aa8 | 1,738,208,822,202 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.medical_genetics | card=cards.mmlu.medical_genetics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicFixed.enumerator_lowercase_choicesSeparator_semicolon_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.803539 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.73, 'accuracy_ci_low': 0.64, 'accuracy_ci_high': 0.82, 'score_name': 'accuracy', 'score': 0.73, 'score_ci_high': 0.82, 'score_ci_low': 0.64, 'num_of_instances': 100} | 1 | a100_80gb |
8f41e299264f15425c1134761dd2814dc1973605ec59965102ab9d59606bf9ba | 1,738,208,829,018 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.high_school_us_history | card=cards.mmlu.high_school_us_history,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_roman_choicesSeparator_newline_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 6.273439 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.69, 'accuracy_ci_low': 0.59, 'accuracy_ci_high': 0.78, 'score_name': 'accuracy', 'score': 0.69, 'score_ci_high': 0.78, 'score_ci_low': 0.59, 'num_of_instances': 100} | 1 | a100_80gb |
037699b1ba444a0599b794a83ca5da4d2bb5d1a3e04b7118f7c840dd0b6c4945 | 1,738,208,846,929 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu_pro.physics | card=cards.mmlu_pro.physics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_numbers_choicesSeparator_space_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 5.596188 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.06, 'accuracy_ci_low': 0.02, 'accuracy_ci_high': 0.12, 'score_name': 'accuracy', 'score': 0.06, 'score_ci_high': 0.12, 'score_ci_low': 0.02, 'num_of_instances': 100} | 1 | a100_80gb |
dada416787a515edc59c6b2b04781783d57288251e726edbd79027fdcc2e8db8 | 1,738,208,863,951 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.high_school_world_history | card=cards.mmlu.high_school_world_history,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_keyboard_choicesSeparator_OrCapital_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 16.3438 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.75, 'accuracy_ci_low': 0.66, 'accuracy_ci_high': 0.83, 'score_name': 'accuracy', 'score': 0.75, 'score_ci_high': 0.83, 'score_ci_low': 0.66, 'num_of_instances': 100} | 1 | a100_80gb |
18d0f6875459a4492b6375a48c9414095be2e39a9ea59874899f93873896be45 | 1,738,208,876,355 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.elementary_mathematics | card=cards.mmlu.elementary_mathematics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_capitals_choicesSeparator_orLower_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 10.798161 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.61, 'accuracy_ci_low': 0.51, 'accuracy_ci_high': 0.7, 'score_name': 'accuracy', 'score': 0.61, 'score_ci_high': 0.7, 'score_ci_low': 0.51, 'num_of_instances': 100} | 1 | a100_80gb |
752e0c0c87017ce71083fc149a2fe9b00473f909ac7d7b261fb8f5ee714e845b | 1,738,208,881,305 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.moral_disputes | card=cards.mmlu.moral_disputes,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicFixed.enumerator_keyboard_choicesSeparator_orLower_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.129815 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.61, 'accuracy_ci_low': 0.51, 'accuracy_ci_high': 0.7, 'score_name': 'accuracy', 'score': 0.61, 'score_ci_high': 0.7, 'score_ci_low': 0.51, 'num_of_instances': 100} | 1 | a100_80gb |
d7ce49986729df42d08153024f94008c89b06af09a6b2ca9c4a960958a88770c | 1,738,208,897,593 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.logical_fallacies | card=cards.mmlu.logical_fallacies,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_keyboard_choicesSeparator_orLower_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 7.813275 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.68, 'accuracy_ci_low': 0.58, 'accuracy_ci_high': 0.77, 'score_name': 'accuracy', 'score': 0.68, 'score_ci_high': 0.77, 'score_ci_low': 0.58, 'num_of_instances': 100} | 1 | a100_80gb |
a97b0fa276e98c10a9c742f45bad29bf32356cf55578d175a78c1b5c0a1b2daf | 1,738,208,913,083 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu_pro.biology | card=cards.mmlu_pro.biology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsProSACould.enumerator_capitals_choicesSeparator_space_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 5.454488 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.36, 'accuracy_ci_low': 0.27, 'accuracy_ci_high': 0.46, 'score_name': 'accuracy', 'score': 0.36, 'score_ci_high': 0.46, 'score_ci_low': 0.27, 'num_of_instances': 100} | 1 | a100_80gb |
a5cfb7380f5dc8d24e32f9184647b148158e3eb1075b5475557fce279c8a4fb3 | 1,738,208,917,924 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.us_foreign_policy | card=cards.mmlu.us_foreign_policy,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_capitals_choicesSeparator_space_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.987682 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.69, 'accuracy_ci_low': 0.59, 'accuracy_ci_high': 0.77, 'score_name': 'accuracy', 'score': 0.69, 'score_ci_high': 0.77, 'score_ci_low': 0.59, 'num_of_instances': 100} | 1 | a100_80gb |
d722c452f4aecb989b4654d9e4f34ec8b00a6d156ea12575abad9a924bd0e960 | 1,738,208,969,692 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.us_foreign_policy | card=cards.mmlu.us_foreign_policy,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_lowercase_choicesSeparator_OrCapital_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 8.398983 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.8, 'accuracy_ci_low': 0.71, 'accuracy_ci_high': 0.87, 'score_name': 'accuracy', 'score': 0.8, 'score_ci_high': 0.87, 'score_ci_low': 0.71, 'num_of_instances': 100} | 1 | a100_80gb |
13f2c6ad28ef4acbb3591a50e051d138dcb6700104920d839feebdaded5492f7 | 1,738,208,922,743 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.astronomy | card=cards.mmlu.astronomy,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateHere.enumerator_roman_choicesSeparator_OrCapital_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.229579 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.65, 'accuracy_ci_low': 0.55, 'accuracy_ci_high': 0.74, 'score_name': 'accuracy', 'score': 0.65, 'score_ci_high': 0.74, 'score_ci_low': 0.55, 'num_of_instances': 100} | 1 | a100_80gb |
2d834c6989e08ae1fa46579828cd105f621ee00758b54ef213305e0b2d29d60b | 1,738,208,937,974 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.high_school_computer_science | card=cards.mmlu.high_school_computer_science,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelowPlease.enumerator_numbers_choicesSeparator_comma_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 14.618003 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.65, 'accuracy_ci_low': 0.55, 'accuracy_ci_high': 0.74, 'score_name': 'accuracy', 'score': 0.65, 'score_ci_high': 0.74, 'score_ci_low': 0.55, 'num_of_instances': 100} | 1 | a100_80gb |
87fb5d5d0ad47352a2dbea4b57eb8c843b0ad589357f9f4f469dd7765a74f064 | 1,738,208,945,366 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.openbook_qa | card=cards.openbook_qa,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.OpenBookQA.MultipleChoiceTemplatesInstructionsProSACould.enumerator_capitals_choicesSeparator_space_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 6.143901 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.75, 'accuracy_ci_low': 0.67, 'accuracy_ci_high': 0.84, 'score_name': 'accuracy', 'score': 0.75, 'score_ci_high': 0.84, 'score_ci_low': 0.67, 'num_of_instances': 100} | 1 | a100_80gb |
66df28d7cc3646f041d365c822842c381da27618f677c3ed296201e70a10d130 | 1,738,208,951,155 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.professional_psychology | card=cards.mmlu.professional_psychology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_keyboard_choicesSeparator_OrCapital_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.23488 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.54, 'accuracy_ci_low': 0.44, 'accuracy_ci_high': 0.63, 'score_name': 'accuracy', 'score': 0.54, 'score_ci_high': 0.63, 'score_ci_low': 0.44, 'num_of_instances': 100} | 1 | a100_80gb |
3ad764be347e5dd188bb87485c6805451d079ba5c2ee7185f3b7371e58cfd36a | 1,738,208,960,540 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.electrical_engineering | card=cards.mmlu.electrical_engineering,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_greek_choicesSeparator_orLower_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 8.752513 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.58, 'accuracy_ci_low': 0.49, 'accuracy_ci_high': 0.67, 'score_name': 'accuracy', 'score': 0.58, 'score_ci_high': 0.67, 'score_ci_low': 0.49, 'num_of_instances': 100} | 1 | a100_80gb |
4456e8bb0880a5da4bae77154268a0142ee7cba73e3053c0895e00b526aa5104 | 1,738,208,974,533 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.global_facts | card=cards.mmlu.global_facts,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_greek_choicesSeparator_semicolon_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.970438 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.26, 'accuracy_ci_low': 0.18, 'accuracy_ci_high': 0.34, 'score_name': 'accuracy', 'score': 0.26, 'score_ci_high': 0.34, 'score_ci_low': 0.18, 'num_of_instances': 100} | 1 | a100_80gb |
345b1cead4d330fbce0024604d948b345337c60cb5f1b5c3204d3f3b3e60f04e | 1,738,209,201,018 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.global_facts | card=cards.mmlu.global_facts,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithoutTopic.enumerator_roman_choicesSeparator_orLower_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 114.435216 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.38, 'accuracy_ci_low': 0.3, 'accuracy_ci_high': 0.49, 'score_name': 'accuracy', 'score': 0.38, 'score_ci_high': 0.49, 'score_ci_low': 0.3, 'num_of_instances': 100} | 1 | a100_80gb |
106ee84f9a1cf538b9c6b6830039c97d0759508edf6a848ce40c4ae1ef1b82b6 | 1,738,209,085,818 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu.virology | card=cards.mmlu.virology,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateHere.enumerator_roman_choicesSeparator_comma_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 110.759276 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.52, 'accuracy_ci_low': 0.41, 'accuracy_ci_high': 0.61, 'score_name': 'accuracy', 'score': 0.52, 'score_ci_high': 0.61, 'score_ci_low': 0.41, 'num_of_instances': 100} | 1 | a100_80gb |
97f30147468304b3fd399c180e9d3439623e562e40391a7be8dd961e7a63ac4e | 1,738,209,352,916 | 1,738,206,000,000 | meta-llama_Meta-Llama-3-8B-Instruct | cards.mmlu_pro.health | card=cards.mmlu_pro.health,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_greek_choicesSeparator_pipe_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 151.377775 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.38, 'accuracy_ci_low': 0.29, 'accuracy_ci_high': 0.48, 'score_name': 'accuracy', 'score': 0.38, 'score_ci_high': 0.48, 'score_ci_low': 0.29, 'num_of_instances': 100} | 1 | a100_80gb |
d09b46bba2a810d8ef1726b51e20c8634d6171f9431924a6b159dbd363cc0b18 | 1,738,208,640,628 | 1,738,206,000,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.high_school_geography | card=cards.mmlu.high_school_geography,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHarness.enumerator_numbers_choicesSeparator_comma_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 33.904268 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.67, 'accuracy_ci_low': 0.58, 'accuracy_ci_high': 0.76, 'score_name': 'accuracy', 'score': 0.67, 'score_ci_high': 0.76, 'score_ci_low': 0.58, 'num_of_instances': 100} | 1 | a100_80gb |
fcea57a1f0fb779f88247a87390232a479d6b72a02bb5426109270946872a4a0 | 1,738,208,644,912 | 1,738,206,000,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.high_school_mathematics | card=cards.mmlu.high_school_mathematics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_roman_choicesSeparator_space_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.500743 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.18, 'accuracy_ci_low': 0.11, 'accuracy_ci_high': 0.2664660852519603, 'score_name': 'accuracy', 'score': 0.18, 'score_ci_high': 0.2664660852519603, 'score_ci_low': 0.11, 'num_of_instances': 100} | 1 | a100_80gb |
d3e74a9ad93f0bc56c8ad615dcb3fbee6a34567cce49f873892e30cc0364fe42 | 1,738,208,972,214 | 1,738,206,000,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.high_school_mathematics | card=cards.mmlu.high_school_mathematics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelow.enumerator_numbers_choicesSeparator_orLower_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 6.676302 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.22, 'accuracy_ci_low': 0.14, 'accuracy_ci_high': 0.3, 'score_name': 'accuracy', 'score': 0.22, 'score_ci_high': 0.3, 'score_ci_low': 0.14, 'num_of_instances': 100} | 1 | a100_80gb |
eb0613a48b9c831bd92cbb17289b22c1ad25d96489569ce379b15ec9fefd0bc2 | 1,738,208,651,015 | 1,738,206,000,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.jurisprudence | card=cards.mmlu.jurisprudence,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_capitals_choicesSeparator_OrCapital_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 5.529728 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.62, 'accuracy_ci_low': 0.52, 'accuracy_ci_high': 0.71, 'score_name': 'accuracy', 'score': 0.62, 'score_ci_high': 0.71, 'score_ci_low': 0.52, 'num_of_instances': 100} | 1 | a100_80gb |
a64dcf8c5d44e067419c77893f8739e5581b2e58aa216982d2ef74b32bbd918b | 1,738,208,750,884 | 1,738,206,000,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.jurisprudence | card=cards.mmlu.jurisprudence,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHarness.enumerator_roman_choicesSeparator_newline_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.908258 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.61, 'accuracy_ci_low': 0.52, 'accuracy_ci_high': 0.7, 'score_name': 'accuracy', 'score': 0.61, 'score_ci_high': 0.7, 'score_ci_low': 0.52, 'num_of_instances': 100} | 1 | a100_80gb |
bef49aaba69cc034786b9b1ca94dfd1a5c18131761938de630e0e098ab39bffc | 1,738,208,657,184 | 1,738,206,000,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.human_sexuality | card=cards.mmlu.human_sexuality,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelow.enumerator_keyboard_choicesSeparator_newline_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 5.292605 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.58, 'accuracy_ci_low': 0.49, 'accuracy_ci_high': 0.68, 'score_name': 'accuracy', 'score': 0.58, 'score_ci_high': 0.68, 'score_ci_low': 0.49, 'num_of_instances': 100} | 1 | a100_80gb |
61313d77ac59ed53d71e93ce1a9076c588eb9c1f71490b7f6e909b883bffab51 | 1,738,208,834,746 | 1,738,206,000,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.human_sexuality | card=cards.mmlu.human_sexuality,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_capitals_choicesSeparator_pipe_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.09034 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.55, 'accuracy_ci_low': 0.45, 'accuracy_ci_high': 0.65, 'score_name': 'accuracy', 'score': 0.55, 'score_ci_high': 0.65, 'score_ci_low': 0.45, 'num_of_instances': 100} | 1 | a100_80gb |
86a21a07a856c5bb84ad159d2ac8efdfb959bb4246cbe72eaf39bcef8b179c91 | 1,738,208,663,288 | 1,738,206,000,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.virology | card=cards.mmlu.virology,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithTopic.enumerator_roman_choicesSeparator_space_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 5.299625 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.69, 'accuracy_ci_low': 0.59, 'accuracy_ci_high': 0.78, 'score_name': 'accuracy', 'score': 0.69, 'score_ci_high': 0.78, 'score_ci_low': 0.59, 'num_of_instances': 100} | 1 | a100_80gb |
b2067a5269194271fa62fb92c2a68e8a2d7cede44d5caf167fce325681826b52 | 1,738,208,805,111 | 1,738,206,000,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.virology | card=cards.mmlu.virology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateHere.enumerator_lowercase_choicesSeparator_newline_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.680387 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.34, 'accuracy_ci_low': 0.2529910828872453, 'accuracy_ci_high': 0.44, 'score_name': 'accuracy', 'score': 0.34, 'score_ci_high': 0.44, 'score_ci_low': 0.2529910828872453, 'num_of_instances': 100} | 1 | a100_80gb |
e072f2953e53e6ede75b3846e1de7b5b2480946043f5a82228b3125202f23982 | 1,738,208,668,307 | 1,738,206,000,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.professional_psychology | card=cards.mmlu.professional_psychology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_keyboard_choicesSeparator_newline_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.239461 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.42, 'accuracy_ci_low': 0.33, 'accuracy_ci_high': 0.52, 'score_name': 'accuracy', 'score': 0.42, 'score_ci_high': 0.52, 'score_ci_low': 0.33, 'num_of_instances': 100} | 1 | a100_80gb |
25621917e888299b9e99b9785997a3fd7304e7a679a8fe7118d33680533e4c7b | 1,738,208,673,446 | 1,738,206,000,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu_pro.biology | card=cards.mmlu_pro.biology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_greek_choicesSeparator_space_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.450628 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.25, 'accuracy_ci_low': 0.17, 'accuracy_ci_high': 0.35, 'score_name': 'accuracy', 'score': 0.25, 'score_ci_high': 0.35, 'score_ci_low': 0.17, 'num_of_instances': 100} | 1 | a100_80gb |
02294c6f9a0e7c8510b3734ddbbffe6bf3c1d07c1068800e0d33765d1953c0c0 | 1,738,208,687,833 | 1,738,206,000,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.professional_law | card=cards.mmlu.professional_law,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelowPlease.enumerator_numbers_choicesSeparator_OrCapital_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 13.435408 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.41, 'accuracy_ci_low': 0.3171097670622741, 'accuracy_ci_high': 0.52, 'score_name': 'accuracy', 'score': 0.41, 'score_ci_high': 0.52, 'score_ci_low': 0.3171097670622741, 'num_of_instances': 100} | 1 | a100_80gb |
b82d30c4b3c58bdd8f8687bd3a1ba57b71e61029e6b3d4ad04f72ee2d94716e8 | 1,738,208,861,348 | 1,738,206,000,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.professional_law | card=cards.mmlu.professional_law,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelowPlease.enumerator_keyboard_choicesSeparator_comma_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.530562 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.25, 'accuracy_ci_low': 0.17, 'accuracy_ci_high': 0.34, 'score_name': 'accuracy', 'score': 0.25, 'score_ci_high': 0.34, 'score_ci_low': 0.17, 'num_of_instances': 100} | 1 | a100_80gb |
142ca4dc699d6a51553544c7717bb0335d79792f0fc97cc6c73325c0800db446 | 1,738,208,708,373 | 1,738,206,000,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.econometrics | card=cards.mmlu.econometrics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSACould.enumerator_greek_choicesSeparator_newline_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 18.610314 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.18, 'accuracy_ci_low': 0.12, 'accuracy_ci_high': 0.27, 'score_name': 'accuracy', 'score': 0.18, 'score_ci_high': 0.27, 'score_ci_low': 0.12, 'num_of_instances': 100} | 1 | a100_80gb |
2f7c2fb7f385e90b695ad281f135c7d93c50bd7993ed8578876014ecbc3db6ad | 1,738,208,820,100 | 1,738,206,000,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.econometrics | card=cards.mmlu.econometrics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_keyboard_choicesSeparator_orLower_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 6.212552 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.29, 'accuracy_ci_low': 0.21, 'accuracy_ci_high': 0.39, 'score_name': 'accuracy', 'score': 0.29, 'score_ci_high': 0.39, 'score_ci_low': 0.21, 'num_of_instances': 100} | 1 | a100_80gb |
3329eb23f9f8ec8e594807cdbf94c4347d65a78e87aecaf7bd6e293aef3728bf | 1,738,208,928,379 | 1,738,206,000,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.econometrics | card=cards.mmlu.econometrics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSACould.enumerator_capitals_choicesSeparator_newline_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 6.415285 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.88, 'accuracy_ci_low': 0.81, 'accuracy_ci_high': 0.9362936834299296, 'score_name': 'accuracy', 'score': 0.88, 'score_ci_high': 0.9362936834299296, 'score_ci_low': 0.81, 'num_of_instances': 100} | 1 | a100_80gb |
0fe0238f84f980d4abd85d48f97454e3d09866be24ffe78d2bad6416a1b1a53b | 1,738,208,715,473 | 1,738,206,000,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.business_ethics | card=cards.mmlu.business_ethics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicFixed.enumerator_lowercase_choicesSeparator_semicolon_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 6.463167 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.59, 'accuracy_ci_low': 0.49, 'accuracy_ci_high': 0.68, 'score_name': 'accuracy', 'score': 0.59, 'score_ci_high': 0.68, 'score_ci_low': 0.49, 'num_of_instances': 100} | 1 | a100_80gb |
3e318502422fd998cca1556a05d285c63d19cfbe40b06de0efb628c98cd85f3b | 1,738,208,783,824 | 1,738,206,000,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.business_ethics | card=cards.mmlu.business_ethics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHarness.enumerator_roman_choicesSeparator_semicolon_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 6.53132 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.83, 'accuracy_ci_low': 0.75, 'accuracy_ci_high': 0.9, 'score_name': 'accuracy', 'score': 0.83, 'score_ci_high': 0.9, 'score_ci_low': 0.75, 'num_of_instances': 100} | 1 | a100_80gb |
eab390293816c2f54e4f154a3748b1e2b50572de72a8ca5a4d5f09236bd01c9b | 1,738,208,934,983 | 1,738,206,000,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.business_ethics | card=cards.mmlu.business_ethics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelowPlease.enumerator_lowercase_choicesSeparator_OrCapital_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.252376 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.53, 'accuracy_ci_low': 0.43, 'accuracy_ci_high': 0.62, 'score_name': 'accuracy', 'score': 0.53, 'score_ci_high': 0.62, 'score_ci_low': 0.43, 'num_of_instances': 100} | 1 | a100_80gb |
76cc5d3905653c1f41b8d12d37ccec4c76948e667b9749340f3ef2bd9aee72a3 | 1,738,208,724,655 | 1,738,206,000,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu_pro.engineering | card=cards.mmlu_pro.engineering,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsWithoutTopicFixed.enumerator_capitals_choicesSeparator_semicolon_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 8.247718 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.2, 'accuracy_ci_low': 0.13, 'accuracy_ci_high': 0.28, 'score_name': 'accuracy', 'score': 0.2, 'score_ci_high': 0.28, 'score_ci_low': 0.13, 'num_of_instances': 100} | 1 | a100_80gb |
b647f9acdc89ee82de8475eb5ae87127c36af0b15d16b52fd43bac64b7580280 | 1,738,208,729,094 | 1,738,206,000,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.conceptual_physics | card=cards.mmlu.conceptual_physics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateHere.enumerator_keyboard_choicesSeparator_orLower_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.270372 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.28, 'accuracy_ci_low': 0.2, 'accuracy_ci_high': 0.38, 'score_name': 'accuracy', 'score': 0.28, 'score_ci_high': 0.38, 'score_ci_low': 0.2, 'num_of_instances': 100} | 1 | a100_80gb |
fa5b5cffa766f0c75792d81e48268afc89feb6cd60aa262dc63a0fb8f2d7b56b | 1,738,208,869,463 | 1,738,206,000,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.conceptual_physics | card=cards.mmlu.conceptual_physics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelow.enumerator_greek_choicesSeparator_comma_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 5.229435 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.46, 'accuracy_ci_low': 0.38, 'accuracy_ci_high': 0.58, 'score_name': 'accuracy', 'score': 0.46, 'score_ci_high': 0.58, 'score_ci_low': 0.38, 'num_of_instances': 100} | 1 | a100_80gb |
8d5bfc2aa100670112a39db24ede48e752724ecc37441fd233089621d73a1a9b | 1,738,208,736,692 | 1,738,206,000,000 | meta-llama_Llama-3.2-3B-Instruct | cards.ai2_arc.arc_challenge | card=cards.ai2_arc.arc_challenge,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.AI2_ARC.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_lowercase_choicesSeparator_pipe_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 7.03431 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.67, 'accuracy_ci_low': 0.58, 'accuracy_ci_high': 0.76, 'score_name': 'accuracy', 'score': 0.67, 'score_ci_high': 0.76, 'score_ci_low': 0.58, 'num_of_instances': 100} | 1 | a100_80gb |
fdb841454fb0ea99ab45933787520dcf46ec9096c537a28e48b1233db9e6fc58 | 1,738,208,893,557 | 1,738,206,000,000 | meta-llama_Llama-3.2-3B-Instruct | cards.ai2_arc.arc_challenge | card=cards.ai2_arc.arc_challenge,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.AI2_ARC.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_roman_choicesSeparator_newline_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.547322 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.48, 'accuracy_ci_low': 0.3802783002086529, 'accuracy_ci_high': 0.58, 'score_name': 'accuracy', 'score': 0.48, 'score_ci_high': 0.58, 'score_ci_low': 0.3802783002086529, 'num_of_instances': 100} | 1 | a100_80gb |
6cd2b95044fed94e8c00977edf5beec3ed88e18826c21d7fc3afcec286e216da | 1,738,208,921,135 | 1,738,206,000,000 | meta-llama_Llama-3.2-3B-Instruct | cards.ai2_arc.arc_challenge | card=cards.ai2_arc.arc_challenge,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.AI2_ARC.MultipleChoiceTemplatesInstructionsStateHere.enumerator_capitals_choicesSeparator_comma_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 6.76261 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.66, 'accuracy_ci_low': 0.55, 'accuracy_ci_high': 0.75, 'score_name': 'accuracy', 'score': 0.66, 'score_ci_high': 0.75, 'score_ci_low': 0.55, 'num_of_instances': 100} | 1 | a100_80gb |