Search is not available for this dataset
run_id
large_stringlengths 64
64
| timestamp_utc
int64 1,736B
1,738B
| timestamp_day_hour_utc
int64 1,736B
1,738B
| model_name_or_path
large_stringclasses 5
values | unitxt_card
large_stringclasses 76
values | unitxt_recipe
large_stringlengths 330
400
| quantization_type
large_stringclasses 1
value | quantization_bit_count
large_stringclasses 1
value | inference_runtime_s
float64 1.1
745
| generation_args
large_stringclasses 1
value | model_args
large_stringclasses 5
values | inference_engine
large_stringclasses 1
value | packages_versions
large_stringclasses 1
value | scores
large_stringlengths 174
242
| num_gpu
int64 1
1
| device
large_stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9f400daf723e2e7253893174d5b711d8cbe72b3416ec35e25790a3574ba9d2ba | 1,738,203,439,217 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.high_school_statistics | card=cards.mmlu.high_school_statistics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_keyboard_choicesSeparator_newline_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 13.817746 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.38, 'accuracy_ci_low': 0.29, 'accuracy_ci_high': 0.48, 'score_name': 'accuracy', 'score': 0.38, 'score_ci_high': 0.48, 'score_ci_low': 0.29, 'num_of_instances': 100} | 1 | a100_80gb |
1b9e21904ecdc188da954c2f23eec83cf19dd22597d89cbe5bc2fbf7cf59cb15 | 1,738,204,545,146 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.high_school_statistics | card=cards.mmlu.high_school_statistics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_keyboard_choicesSeparator_pipe_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 76.844325 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.26, 'accuracy_ci_low': 0.18, 'accuracy_ci_high': 0.35, 'score_name': 'accuracy', 'score': 0.26, 'score_ci_high': 0.35, 'score_ci_low': 0.18, 'num_of_instances': 100} | 1 | a100_80gb |
ff95bb53f33eb9e65c81453b439644d265c054e13971399208ff0ee34d7987c6 | 1,738,205,043,380 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.high_school_statistics | card=cards.mmlu.high_school_statistics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHarness.enumerator_roman_choicesSeparator_comma_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 31.105345 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.26, 'accuracy_ci_low': 0.18, 'accuracy_ci_high': 0.35, 'score_name': 'accuracy', 'score': 0.26, 'score_ci_high': 0.35, 'score_ci_low': 0.18, 'num_of_instances': 100} | 1 | a100_80gb |
0ca5cae1720fad226a93b8b966de3ee8416457ae9d3e4db707c0029b798ffa54 | 1,738,205,765,613 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.high_school_statistics | card=cards.mmlu.high_school_statistics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSACould.enumerator_capitals_choicesSeparator_space_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 51.995716 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.3, 'accuracy_ci_low': 0.22, 'accuracy_ci_high': 0.39, 'score_name': 'accuracy', 'score': 0.3, 'score_ci_high': 0.39, 'score_ci_low': 0.22, 'num_of_instances': 100} | 1 | a100_80gb |
9c6f35c958967604410c3567a39aec2f63341ddaf66a021ff725997142922272 | 1,738,203,490,805 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.public_relations | card=cards.mmlu.public_relations,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateHere.enumerator_keyboard_choicesSeparator_newline_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 50.394291 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.99, 'accuracy_ci_low': 0.95, 'accuracy_ci_high': 1.0, 'score_name': 'accuracy', 'score': 0.99, 'score_ci_high': 1.0, 'score_ci_low': 0.95, 'num_of_instances': 100} | 1 | a100_80gb |
eedd64b4c62a23e241b826d8e9567b1e66506d1bfeb1c83b1aea410e9fa12488 | 1,738,204,889,350 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.public_relations | card=cards.mmlu.public_relations,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_greek_choicesSeparator_newline_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 19.390764 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.56, 'accuracy_ci_low': 0.45682502076003406, 'accuracy_ci_high': 0.65, 'score_name': 'accuracy', 'score': 0.56, 'score_ci_high': 0.65, 'score_ci_low': 0.45682502076003406, 'num_of_instances': 100} | 1 | a100_80gb |
7a3935de74cbb8e690235cd5d9862f72e69d02a2803de1f0a55e80d679c483ad | 1,738,203,556,841 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.high_school_government_and_politics | card=cards.mmlu.high_school_government_and_politics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_lowercase_choicesSeparator_OrCapital_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 65.228 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.75, 'accuracy_ci_low': 0.66, 'accuracy_ci_high': 0.83, 'score_name': 'accuracy', 'score': 0.75, 'score_ci_high': 0.83, 'score_ci_low': 0.66, 'num_of_instances': 100} | 1 | a100_80gb |
e990368fb5b4586166f3052a190b7002d5dafc39e62056c781fef8d9c4b38395 | 1,738,203,624,000 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.econometrics | card=cards.mmlu.econometrics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_numbers_choicesSeparator_pipe_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 66.517862 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.25, 'accuracy_ci_low': 0.17, 'accuracy_ci_high': 0.34, 'score_name': 'accuracy', 'score': 0.25, 'score_ci_high': 0.34, 'score_ci_low': 0.17, 'num_of_instances': 100} | 1 | a100_80gb |
e2dcdbac1b950f5954ff7b070fb7e57c904b70d1fb3b6b8f68473ffe7837c12a | 1,738,203,742,642 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.nutrition | card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithoutTopic.enumerator_keyboard_choicesSeparator_orLower_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 57.466014 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.33, 'accuracy_ci_low': 0.24, 'accuracy_ci_high': 0.42, 'score_name': 'accuracy', 'score': 0.33, 'score_ci_high': 0.42, 'score_ci_low': 0.24, 'num_of_instances': 100} | 1 | a100_80gb |
75c6ec1c7c3ccac00f1d5fb949857424bc89c8e90442b6b940ae4d1fb6912957 | 1,738,203,927,515 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.nutrition | card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_keyboard_choicesSeparator_newline_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 24.095107 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.48, 'accuracy_ci_low': 0.39, 'accuracy_ci_high': 0.59, 'score_name': 'accuracy', 'score': 0.48, 'score_ci_high': 0.59, 'score_ci_low': 0.39, 'num_of_instances': 100} | 1 | a100_80gb |
8722895fb9ed3b1cb0876ed6385b13a3c3e34ea8295a4a3cbc372c032067da6d | 1,738,205,110,359 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.nutrition | card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithTopic.enumerator_keyboard_choicesSeparator_orLower_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 29.733636 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.55, 'accuracy_ci_low': 0.45, 'accuracy_ci_high': 0.65, 'score_name': 'accuracy', 'score': 0.55, 'score_ci_high': 0.65, 'score_ci_low': 0.45, 'num_of_instances': 100} | 1 | a100_80gb |
8e1780a51ef802f7b3b8f52075595d3b982c7bb5a713c14ccc316bb8f4bb776f | 1,738,203,799,115 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.high_school_macroeconomics | card=cards.mmlu.high_school_macroeconomics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_numbers_choicesSeparator_pipe_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 55.868695 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.46, 'accuracy_ci_low': 0.37, 'accuracy_ci_high': 0.55, 'score_name': 'accuracy', 'score': 0.46, 'score_ci_high': 0.55, 'score_ci_low': 0.37, 'num_of_instances': 100} | 1 | a100_80gb |
afa9cd7a6ab0bc3abd1d91ae781eedc946e3c5579f89da3eccb123118bedaeef | 1,738,205,425,847 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.high_school_macroeconomics | card=cards.mmlu.high_school_macroeconomics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateHere.enumerator_keyboard_choicesSeparator_OrCapital_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 31.93941 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.36, 'accuracy_ci_low': 0.27, 'accuracy_ci_high': 0.46, 'score_name': 'accuracy', 'score': 0.36, 'score_ci_high': 0.46, 'score_ci_low': 0.27, 'num_of_instances': 100} | 1 | a100_80gb |
32e2c3c980f93f8ec3e185e8ee5d86c654e3293e409f73bc631512d8df1e0220 | 1,738,203,839,467 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.high_school_microeconomics | card=cards.mmlu.high_school_microeconomics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_greek_choicesSeparator_newline_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 39.727965 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.34, 'accuracy_ci_low': 0.26, 'accuracy_ci_high': 0.44, 'score_name': 'accuracy', 'score': 0.34, 'score_ci_high': 0.44, 'score_ci_low': 0.26, 'num_of_instances': 100} | 1 | a100_80gb |
51d054db3ef63571288fbdf2b937ae367962c9d12f35addae9ad88c380a14111 | 1,738,205,656,925 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.high_school_microeconomics | card=cards.mmlu.high_school_microeconomics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHarness.enumerator_numbers_choicesSeparator_pipe_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 48.337127 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.58, 'accuracy_ci_low': 0.47, 'accuracy_ci_high': 0.67, 'score_name': 'accuracy', 'score': 0.58, 'score_ci_high': 0.67, 'score_ci_low': 0.47, 'num_of_instances': 100} | 1 | a100_80gb |
225d23a39ca2181bb0d0f5d91a4eb3559d562b846f2536d7e6468528dbd69a2a | 1,738,203,873,058 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.college_computer_science | card=cards.mmlu.college_computer_science,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_roman_choicesSeparator_semicolon_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 32.981755 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.3, 'accuracy_ci_low': 0.22, 'accuracy_ci_high': 0.4, 'score_name': 'accuracy', 'score': 0.3, 'score_ci_high': 0.4, 'score_ci_low': 0.22, 'num_of_instances': 100} | 1 | a100_80gb |
95e8f557a9ed0883478bd4ab43c0b54190832bcf7c8f6b907c1a7b9fff859132 | 1,738,204,988,325 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.college_computer_science | card=cards.mmlu.college_computer_science,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_roman_choicesSeparator_space_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 22.887208 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.76, 'accuracy_ci_low': 0.6733258635511192, 'accuracy_ci_high': 0.84, 'score_name': 'accuracy', 'score': 0.76, 'score_ci_high': 0.84, 'score_ci_low': 0.6733258635511192, 'num_of_instances': 100} | 1 | a100_80gb |
05a7dfebd1ba63bf023e154b991784a504dabcd9d30c6387070155606fb6ebd3 | 1,738,203,902,737 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.abstract_algebra | card=cards.mmlu.abstract_algebra,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSACould.enumerator_numbers_choicesSeparator_orLower_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 28.583069 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.27, 'accuracy_ci_low': 0.19, 'accuracy_ci_high': 0.36, 'score_name': 'accuracy', 'score': 0.27, 'score_ci_high': 0.36, 'score_ci_low': 0.19, 'num_of_instances': 100} | 1 | a100_80gb |
d868305e4268908b0c1a7d1819ad995f9bea3bdc93596fa0b841fbc992eb0165 | 1,738,203,950,553 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.high_school_physics | card=cards.mmlu.high_school_physics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_numbers_choicesSeparator_comma_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 22.450428 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.29, 'accuracy_ci_low': 0.21, 'accuracy_ci_high': 0.39, 'score_name': 'accuracy', 'score': 0.29, 'score_ci_high': 0.39, 'score_ci_low': 0.21, 'num_of_instances': 100} | 1 | a100_80gb |
16118feeb7a377f6eb6733d38d07dc96a4b0e128acf890831aed8f6f05840dfd | 1,738,205,538,771 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.high_school_physics | card=cards.mmlu.high_school_physics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelowPlease.enumerator_numbers_choicesSeparator_semicolon_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 40.575189 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.33, 'accuracy_ci_low': 0.24, 'accuracy_ci_high': 0.43, 'score_name': 'accuracy', 'score': 0.33, 'score_ci_high': 0.43, 'score_ci_low': 0.24, 'num_of_instances': 100} | 1 | a100_80gb |
edf65344c24a01d68d45792858a21cbc4ec8c30d565bdf419ee89e1485fbdc0b | 1,738,203,971,262 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.college_biology | card=cards.mmlu.college_biology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelow.enumerator_capitals_choicesSeparator_comma_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 20.076337 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.63, 'accuracy_ci_low': 0.54, 'accuracy_ci_high': 0.73, 'score_name': 'accuracy', 'score': 0.63, 'score_ci_high': 0.73, 'score_ci_low': 0.54, 'num_of_instances': 100} | 1 | a100_80gb |
493fc30a979ea4a408333a8a549d9804131d12bc8d7fb9309eedb5a75c3a4169 | 1,738,204,022,112 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.global_facts | card=cards.mmlu.global_facts,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithTopic.enumerator_keyboard_choicesSeparator_OrCapital_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 22.771725 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.26, 'accuracy_ci_low': 0.18, 'accuracy_ci_high': 0.35, 'score_name': 'accuracy', 'score': 0.26, 'score_ci_high': 0.35, 'score_ci_low': 0.18, 'num_of_instances': 100} | 1 | a100_80gb |
75f75e9ff8613c2bf56d0869a8a8bfd06723674714a9f5602ed1699efdfe8b32 | 1,738,204,819,595 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.global_facts | card=cards.mmlu.global_facts,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelowPlease.enumerator_numbers_choicesSeparator_comma_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 25.552147 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.34, 'accuracy_ci_low': 0.24, 'accuracy_ci_high': 0.44, 'score_name': 'accuracy', 'score': 0.34, 'score_ci_high': 0.44, 'score_ci_low': 0.24, 'num_of_instances': 100} | 1 | a100_80gb |
0316e6723638792c0696d0042fe37ea74cfccdd4158a69227baf711e66f48212 | 1,738,205,011,769 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.global_facts | card=cards.mmlu.global_facts,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_roman_choicesSeparator_semicolon_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 22.376907 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.24, 'accuracy_ci_low': 0.16, 'accuracy_ci_high': 0.33, 'score_name': 'accuracy', 'score': 0.24, 'score_ci_high': 0.33, 'score_ci_low': 0.16, 'num_of_instances': 100} | 1 | a100_80gb |
a21abd10f990e84b3e7adbbcee0a5ce98a98a16445817d529985f5bf56a67e2d | 1,738,204,048,045 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu_pro.law | card=cards.mmlu_pro.law,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsStateHere.enumerator_greek_choicesSeparator_newline_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 25.456422 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.17, 'accuracy_ci_low': 0.1, 'accuracy_ci_high': 0.25, 'score_name': 'accuracy', 'score': 0.17, 'score_ci_high': 0.25, 'score_ci_low': 0.1, 'num_of_instances': 100} | 1 | a100_80gb |
491f985b8037765586bd5e85bd21c1807880d03b0322ac13b0fa6cf8ff42a348 | 1,738,204,939,642 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu_pro.law | card=cards.mmlu_pro.law,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_lowercase_choicesSeparator_semicolon_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 26.859978 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.19, 'accuracy_ci_low': 0.12, 'accuracy_ci_high': 0.28, 'score_name': 'accuracy', 'score': 0.19, 'score_ci_high': 0.28, 'score_ci_low': 0.12, 'num_of_instances': 100} | 1 | a100_80gb |
543b08b01cbd780c68e144512b14e580e5c053c015075d688784b3a83e3cb8b5 | 1,738,204,092,005 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.high_school_biology | card=cards.mmlu.high_school_biology,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_keyboard_choicesSeparator_space_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 42.774663 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.87, 'accuracy_ci_low': 0.79, 'accuracy_ci_high': 0.93, 'score_name': 'accuracy', 'score': 0.87, 'score_ci_high': 0.93, 'score_ci_low': 0.79, 'num_of_instances': 100} | 1 | a100_80gb |
9726e381c0a589d55d24e6ad35c136508bfc28bfbfa0688c44b43e3e9b2a3aa9 | 1,738,204,145,435 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.clinical_knowledge | card=cards.mmlu.clinical_knowledge,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_roman_choicesSeparator_newline_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 52.602367 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.79, 'accuracy_ci_low': 0.7, 'accuracy_ci_high': 0.87, 'score_name': 'accuracy', 'score': 0.79, 'score_ci_high': 0.87, 'score_ci_low': 0.7, 'num_of_instances': 100} | 1 | a100_80gb |
0777d6b47379c79714bbdd3c8f718560d55f3de03e4847ccebfeb6c7985a401b | 1,738,204,200,924 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.us_foreign_policy | card=cards.mmlu.us_foreign_policy,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelow.enumerator_roman_choicesSeparator_orLower_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 54.658835 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.72, 'accuracy_ci_low': 0.62, 'accuracy_ci_high': 0.8, 'score_name': 'accuracy', 'score': 0.72, 'score_ci_high': 0.8, 'score_ci_low': 0.62, 'num_of_instances': 100} | 1 | a100_80gb |
749dff45c58a3ad170b42a105536d7942dca4755c31f20ca4f34fd15ffc050b7 | 1,738,204,246,757 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu_pro.philosophy | card=cards.mmlu_pro.philosophy,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_lowercase_choicesSeparator_orLower_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 44.946781 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.06, 'accuracy_ci_low': 0.02246489648764566, 'accuracy_ci_high': 0.13, 'score_name': 'accuracy', 'score': 0.06, 'score_ci_high': 0.13, 'score_ci_low': 0.02246489648764566, 'num_of_instances': 100} | 1 | a100_80gb |
419439aa09bfc2204b8ade8ea3672bf99ef3bdf5a7fcfde1a3893c23c1e7675c | 1,738,205,309,830 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu_pro.philosophy | card=cards.mmlu_pro.philosophy,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsWithoutTopicFixed.enumerator_keyboard_choicesSeparator_semicolon_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 28.373172 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.08, 'accuracy_ci_low': 0.04, 'accuracy_ci_high': 0.14169926053608947, 'score_name': 'accuracy', 'score': 0.08, 'score_ci_high': 0.14169926053608947, 'score_ci_low': 0.04, 'num_of_instances': 100} | 1 | a100_80gb |
34df57f92b6aa483b5f968b9ccbe838df20ad4aa90eadccb54947efd26ff099f | 1,738,204,306,907 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.professional_medicine | card=cards.mmlu.professional_medicine,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSACould.enumerator_lowercase_choicesSeparator_orLower_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 59.35997 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.39, 'accuracy_ci_low': 0.29, 'accuracy_ci_high': 0.48, 'score_name': 'accuracy', 'score': 0.39, 'score_ci_high': 0.48, 'score_ci_low': 0.29, 'num_of_instances': 100} | 1 | a100_80gb |
3b8724c93279d76e656bcfe3c305d3d0a40d8aaa05c3e257726676dc5dcd562b | 1,738,205,079,342 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.professional_medicine | card=cards.mmlu.professional_medicine,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_lowercase_choicesSeparator_pipe_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 35.301284 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.67, 'accuracy_ci_low': 0.57, 'accuracy_ci_high': 0.76, 'score_name': 'accuracy', 'score': 0.67, 'score_ci_high': 0.76, 'score_ci_low': 0.57, 'num_of_instances': 100} | 1 | a100_80gb |
456cba93b946256858b716872340982e341d5dcefc86b8be2edf2cf058de00ae | 1,738,204,382,765 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.world_religions | card=cards.mmlu.world_religions,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateHere.enumerator_greek_choicesSeparator_newline_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 75.181085 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.68, 'accuracy_ci_low': 0.5646909145053445, 'accuracy_ci_high': 0.77, 'score_name': 'accuracy', 'score': 0.68, 'score_ci_high': 0.77, 'score_ci_low': 0.5646909145053445, 'num_of_instances': 100} | 1 | a100_80gb |
8479e8e9c5ef9c66b3cbdbdd672fb400ebe6943cdfbde4749b26478767afad4e | 1,738,204,467,659 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.formal_logic | card=cards.mmlu.formal_logic,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithoutTopic.enumerator_roman_choicesSeparator_newline_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 84.220497 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.24, 'accuracy_ci_low': 0.16, 'accuracy_ci_high': 0.32, 'score_name': 'accuracy', 'score': 0.24, 'score_ci_high': 0.32, 'score_ci_low': 0.16, 'num_of_instances': 100} | 1 | a100_80gb |
5a030386550aa1e2519699f773306f86e3eeaf0c64009f7a1396e85ba8d4afd0 | 1,738,204,606,131 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.jurisprudence | card=cards.mmlu.jurisprudence,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSACould.enumerator_numbers_choicesSeparator_OrCapital_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 60.29369 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.54, 'accuracy_ci_low': 0.44, 'accuracy_ci_high': 0.63, 'score_name': 'accuracy', 'score': 0.54, 'score_ci_high': 0.63, 'score_ci_low': 0.44, 'num_of_instances': 100} | 1 | a100_80gb |
629b7c3bfec31c7475656b895ee62b1b3c379449d28a6b454f8b97911f2170cd | 1,738,204,964,575 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.jurisprudence | card=cards.mmlu.jurisprudence,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelow.enumerator_capitals_choicesSeparator_OrCapital_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 23.28618 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.56, 'accuracy_ci_low': 0.45746631616007666, 'accuracy_ci_high': 0.65, 'score_name': 'accuracy', 'score': 0.56, 'score_ci_high': 0.65, 'score_ci_low': 0.45746631616007666, 'num_of_instances': 100} | 1 | a100_80gb |
7b5cd413461f14d0b257bb90d3c3577910e49cb6422b31627cac4308d2ee6ebd | 1,738,204,647,250 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu_pro.history | card=cards.mmlu_pro.history,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsStateBelowPlease.enumerator_capitals_choicesSeparator_semicolon_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 40.509431 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.22, 'accuracy_ci_low': 0.15, 'accuracy_ci_high': 0.31, 'score_name': 'accuracy', 'score': 0.22, 'score_ci_high': 0.31, 'score_ci_low': 0.15, 'num_of_instances': 100} | 1 | a100_80gb |
654d89b401d841120387ec1eddb98346518c0ef2672e521b26dbc0a1ceb110f3 | 1,738,204,698,857 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.high_school_world_history | card=cards.mmlu.high_school_world_history,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithoutTopic.enumerator_keyboard_choicesSeparator_semicolon_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 50.706503 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.31, 'accuracy_ci_low': 0.22, 'accuracy_ci_high': 0.41, 'score_name': 'accuracy', 'score': 0.31, 'score_ci_high': 0.41, 'score_ci_low': 0.22, 'num_of_instances': 100} | 1 | a100_80gb |
251fd562357c78092ddaa6bef72864c1796926b7d64660006dcfcd1bb865e065 | 1,738,204,736,881 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.professional_law | card=cards.mmlu.professional_law,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelow.enumerator_roman_choicesSeparator_semicolon_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 37.170065 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.26, 'accuracy_ci_low': 0.18, 'accuracy_ci_high': 0.3599120539299179, 'score_name': 'accuracy', 'score': 0.26, 'score_ci_high': 0.3599120539299179, 'score_ci_low': 0.18, 'num_of_instances': 100} | 1 | a100_80gb |
ec518e239e2399da38b524c0619961ba80e6f21f2299e94d050fd37655e3c751 | 1,738,204,767,887 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.security_studies | card=cards.mmlu.security_studies,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateHere.enumerator_capitals_choicesSeparator_space_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 30.153031 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.44, 'accuracy_ci_low': 0.34, 'accuracy_ci_high': 0.54, 'score_name': 'accuracy', 'score': 0.44, 'score_ci_high': 0.54, 'score_ci_low': 0.34, 'num_of_instances': 100} | 1 | a100_80gb |
7b65826e9b411b9bdcb72e59b02d783662711ac945c33b2a09942fce817f90a8 | 1,738,204,792,765 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu_pro.psychology | card=cards.mmlu_pro.psychology,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsStateBelowPlease.enumerator_capitals_choicesSeparator_pipe_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 24.077027 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.44, 'accuracy_ci_low': 0.35, 'accuracy_ci_high': 0.54, 'score_name': 'accuracy', 'score': 0.44, 'score_ci_high': 0.54, 'score_ci_low': 0.35, 'num_of_instances': 100} | 1 | a100_80gb |
d9ec326c6fca3d09c2ebe77ab26ba4d4184b5dd0daf6df82560b0a4a48d8dfc0 | 1,738,204,843,967 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu_pro.business | card=cards.mmlu_pro.business,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsStateHere.enumerator_lowercase_choicesSeparator_newline_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 23.83571 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.31, 'accuracy_ci_low': 0.225502709112505, 'accuracy_ci_high': 0.41, 'score_name': 'accuracy', 'score': 0.31, 'score_ci_high': 0.41, 'score_ci_low': 0.225502709112505, 'num_of_instances': 100} | 1 | a100_80gb |
68194e3451814b30b6cc40147e24281246e694e8e58e7b0c4a632eb83f52c6c5 | 1,738,204,869,386 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.college_mathematics | card=cards.mmlu.college_mathematics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithTopic.enumerator_greek_choicesSeparator_comma_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 24.335532 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.16, 'accuracy_ci_low': 0.1, 'accuracy_ci_high': 0.24, 'score_name': 'accuracy', 'score': 0.16, 'score_ci_high': 0.24, 'score_ci_low': 0.1, 'num_of_instances': 100} | 1 | a100_80gb |
289f8fde21413868c7a2d96e9a69402ed860a1ff0b5370d0958ac223d47215d5 | 1,738,204,912,010 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.computer_security | card=cards.mmlu.computer_security,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_roman_choicesSeparator_space_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 22.099863 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.59, 'accuracy_ci_low': 0.49, 'accuracy_ci_high': 0.69, 'score_name': 'accuracy', 'score': 0.59, 'score_ci_high': 0.69, 'score_ci_low': 0.49, 'num_of_instances': 100} | 1 | a100_80gb |
2bf97088a3dcd8da07e41cacf9af158e4af57e80f313aa671ea8ee3829132e80 | 1,738,205,141,454 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.college_physics | card=cards.mmlu.college_physics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelow.enumerator_capitals_choicesSeparator_OrCapital_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 30.121044 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.27, 'accuracy_ci_low': 0.19, 'accuracy_ci_high': 0.36, 'score_name': 'accuracy', 'score': 0.27, 'score_ci_high': 0.36, 'score_ci_low': 0.19, 'num_of_instances': 100} | 1 | a100_80gb |
b0edd1f4a11977c402989a623ede31b09a0f2c54864b27cf6615f2287b213c8e | 1,738,205,177,008 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu_pro.economics | card=cards.mmlu_pro.economics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsStateBelowPlease.enumerator_keyboard_choicesSeparator_orLower_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 34.978654 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.34, 'accuracy_ci_low': 0.25, 'accuracy_ci_high': 0.43781908937145997, 'score_name': 'accuracy', 'score': 0.34, 'score_ci_high': 0.43781908937145997, 'score_ci_low': 0.25, 'num_of_instances': 100} | 1 | a100_80gb |
7649ea394498a222557b395924c3cc08297a7189dbeb427382f72c5275d4f4f1 | 1,738,205,215,033 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.professional_accounting | card=cards.mmlu.professional_accounting,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_greek_choicesSeparator_OrCapital_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 36.776864 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.29, 'accuracy_ci_low': 0.21, 'accuracy_ci_high': 0.39, 'score_name': 'accuracy', 'score': 0.29, 'score_ci_high': 0.39, 'score_ci_low': 0.21, 'num_of_instances': 100} | 1 | a100_80gb |
fb4e30b1d4323cc8f7db9cbf576bcc2270dcc7e8ba111cc07e7591168c4e6b7c | 1,738,205,280,774 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.openbook_qa | card=cards.openbook_qa,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.OpenBookQA.MultipleChoiceTemplatesStructuredWithoutTopic.enumerator_capitals_choicesSeparator_orLower_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 65.092505 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.89, 'accuracy_ci_low': 0.82, 'accuracy_ci_high': 0.94, 'score_name': 'accuracy', 'score': 0.89, 'score_ci_high': 0.94, 'score_ci_low': 0.82, 'num_of_instances': 100} | 1 | a100_80gb |
bbd334914189f3386bdf4e78bcaedf5c5f37b5cf404b6e81aa1a779a77c27bd2 | 1,738,205,343,641 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.marketing | card=cards.mmlu.marketing,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_lowercase_choicesSeparator_comma_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 33.051184 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.76, 'accuracy_ci_low': 0.67, 'accuracy_ci_high': 0.83, 'score_name': 'accuracy', 'score': 0.76, 'score_ci_high': 0.83, 'score_ci_low': 0.67, 'num_of_instances': 100} | 1 | a100_80gb |
589b74ca4932acef74e1d4fee47f388f966b570747e4c71f3a839ed8f10b2a3a | 1,738,205,497,405 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.marketing | card=cards.mmlu.marketing,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_capitals_choicesSeparator_space_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 37.131277 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.81, 'accuracy_ci_low': 0.73, 'accuracy_ci_high': 0.88, 'score_name': 'accuracy', 'score': 0.81, 'score_ci_high': 0.88, 'score_ci_low': 0.73, 'num_of_instances': 100} | 1 | a100_80gb |
13d8faae59585e313b15675339fc52237b753b03bc903af8b83f6c418651a43b | 1,738,205,368,539 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.high_school_geography | card=cards.mmlu.high_school_geography,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_lowercase_choicesSeparator_comma_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 24.115294 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.63, 'accuracy_ci_low': 0.53, 'accuracy_ci_high': 0.72, 'score_name': 'accuracy', 'score': 0.63, 'score_ci_high': 0.72, 'score_ci_low': 0.53, 'num_of_instances': 100} | 1 | a100_80gb |
6dfd3cf7e040f1e097bcd4d8739f9cf1964b488c5c36b286d05cacf339f48392 | 1,738,205,459,625 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.sociology | card=cards.mmlu.sociology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_numbers_choicesSeparator_pipe_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 33.168097 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.66, 'accuracy_ci_low': 0.56, 'accuracy_ci_high': 0.75, 'score_name': 'accuracy', 'score': 0.66, 'score_ci_high': 0.75, 'score_ci_low': 0.56, 'num_of_instances': 100} | 1 | a100_80gb |
a0a3e5a7fdd7b005726b1877443c99a2629ef44779c5511d11d050489b48dbf9 | 1,738,205,607,537 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.astronomy | card=cards.mmlu.astronomy,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelowPlease.enumerator_lowercase_choicesSeparator_newline_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 67.745894 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.71, 'accuracy_ci_low': 0.61, 'accuracy_ci_high': 0.79, 'score_name': 'accuracy', 'score': 0.71, 'score_ci_high': 0.79, 'score_ci_low': 0.61, 'num_of_instances': 100} | 1 | a100_80gb |
24a90580255dbf115a8cdf2dd68400b2fdb2aa5dba115ac9542f764aa2189298 | 1,738,205,713,011 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.astronomy | card=cards.mmlu.astronomy,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_keyboard_choicesSeparator_newline_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 55.473506 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.4, 'accuracy_ci_low': 0.31, 'accuracy_ci_high': 0.5, 'score_name': 'accuracy', 'score': 0.4, 'score_ci_high': 0.5, 'score_ci_low': 0.31, 'num_of_instances': 100} | 1 | a100_80gb |
71c3d6f7c555afe6d175e0c6d72363afcfd597bdfefcb4446fe7160f68cca98b | 1,738,205,896,273 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.high_school_psychology | card=cards.mmlu.high_school_psychology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithTopic.enumerator_numbers_choicesSeparator_newline_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 77.863105 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.63, 'accuracy_ci_low': 0.53, 'accuracy_ci_high': 0.72, 'score_name': 'accuracy', 'score': 0.63, 'score_ci_high': 0.72, 'score_ci_low': 0.53, 'num_of_instances': 100} | 1 | a100_80gb |
3092eb08320030faf5661e30288a993cd0a6da526e31d9a7141283dc9f622d7f | 1,738,202,731,450 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.hellaswag | card=cards.hellaswag,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.HellaSwag.MultipleChoiceTemplatesInstructionsState4.enumerator_lowercase_choicesSeparator_semicolon_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 124.826187 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.65, 'accuracy_ci_low': 0.55, 'accuracy_ci_high': 0.74, 'score_name': 'accuracy', 'score': 0.65, 'score_ci_high': 0.74, 'score_ci_low': 0.55, 'num_of_instances': 100} | 1 | a100_80gb |
037cd5c4e5a21da4b8839c60892582d8d927acaa75c1406cde7c663654e92ba3 | 1,738,204,081,055 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.hellaswag | card=cards.hellaswag,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.HellaSwag.MultipleChoiceTemplatesInstructionsState4.enumerator_keyboard_choicesSeparator_space_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 31.960559 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.28, 'accuracy_ci_low': 0.2, 'accuracy_ci_high': 0.38, 'score_name': 'accuracy', 'score': 0.28, 'score_ci_high': 0.38, 'score_ci_low': 0.2, 'num_of_instances': 100} | 1 | a100_80gb |
2f31680ea5f9c4d73525656347c4c313a6c3213581035953c3c1596912bc538f | 1,738,203,379,235 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu_pro.history | card=cards.mmlu_pro.history,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsStateHere.enumerator_numbers_choicesSeparator_orLower_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 109.391451 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.3, 'accuracy_ci_low': 0.22, 'accuracy_ci_high': 0.4, 'score_name': 'accuracy', 'score': 0.3, 'score_ci_high': 0.4, 'score_ci_low': 0.22, 'num_of_instances': 100} | 1 | a100_80gb |
fdff70128dd1595d391286ab1557be5a1fa8cd9140b2b97f0372c5a89873f7f2 | 1,738,203,996,248 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu_pro.history | card=cards.mmlu_pro.history,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsWithoutTopicFixed.enumerator_keyboard_choicesSeparator_pipe_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 22.105521 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.23, 'accuracy_ci_low': 0.15, 'accuracy_ci_high': 0.31, 'score_name': 'accuracy', 'score': 0.23, 'score_ci_high': 0.31, 'score_ci_low': 0.15, 'num_of_instances': 100} | 1 | a100_80gb |
949f97d0a9bd4fb856673bda97353a117e72053fdf134e5dc188577965771e49 | 1,738,204,537,823 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu_pro.history | card=cards.mmlu_pro.history,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_numbers_choicesSeparator_orLower_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 76.605521 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.24, 'accuracy_ci_low': 0.17, 'accuracy_ci_high': 0.33, 'score_name': 'accuracy', 'score': 0.24, 'score_ci_high': 0.33, 'score_ci_low': 0.17, 'num_of_instances': 100} | 1 | a100_80gb |
ee6c8104f884ffc9db28dfe8e455ed70009dc98bc1381cdcd00ee66a10d4539b | 1,738,203,436,730 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.machine_learning | card=cards.mmlu.machine_learning,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSACould.enumerator_lowercase_choicesSeparator_newline_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 56.573159 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.36, 'accuracy_ci_low': 0.27, 'accuracy_ci_high': 0.46, 'score_name': 'accuracy', 'score': 0.36, 'score_ci_high': 0.46, 'score_ci_low': 0.27, 'num_of_instances': 100} | 1 | a100_80gb |
e9f469019a26fa985064dead5346eada8d2abf466851a0d74620cd2fb75cd620 | 1,738,203,485,245 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.public_relations | card=cards.mmlu.public_relations,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_greek_choicesSeparator_pipe_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 47.936504 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.46, 'accuracy_ci_low': 0.37, 'accuracy_ci_high': 0.56, 'score_name': 'accuracy', 'score': 0.46, 'score_ci_high': 0.56, 'score_ci_low': 0.37, 'num_of_instances': 100} | 1 | a100_80gb |
449f18cc32ca48807ce527e401eab5fe202a871588e75befdcc738449cfe8c65 | 1,738,203,549,549 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.computer_security | card=cards.mmlu.computer_security,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_lowercase_choicesSeparator_newline_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 63.721557 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.68, 'accuracy_ci_low': 0.58, 'accuracy_ci_high': 0.76, 'score_name': 'accuracy', 'score': 0.68, 'score_ci_high': 0.76, 'score_ci_low': 0.58, 'num_of_instances': 100} | 1 | a100_80gb |
da32bd3a1689a4ad6edb838f5b488ab71737374c5ccdff3236e44348f676c37f | 1,738,203,844,582 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.computer_security | card=cards.mmlu.computer_security,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateHere.enumerator_keyboard_choicesSeparator_newline_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 36.788842 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.84, 'accuracy_ci_low': 0.75, 'accuracy_ci_high': 0.91, 'score_name': 'accuracy', 'score': 0.84, 'score_ci_high': 0.91, 'score_ci_low': 0.75, 'num_of_instances': 100} | 1 | a100_80gb |
b7c27b13797815f4286ef1726f834caf1863613d509821f61231c564c993c8be | 1,738,203,617,236 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.moral_disputes | card=cards.mmlu.moral_disputes,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_keyboard_choicesSeparator_newline_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 66.892447 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.36, 'accuracy_ci_low': 0.27, 'accuracy_ci_high': 0.46, 'score_name': 'accuracy', 'score': 0.36, 'score_ci_high': 0.46, 'score_ci_low': 0.27, 'num_of_instances': 100} | 1 | a100_80gb |
b7955c083dee1b2e058ae936faea6ac410ba39b19f611df26397479a87d7437a | 1,738,203,682,748 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.college_physics | card=cards.mmlu.college_physics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_numbers_choicesSeparator_semicolon_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 64.893092 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.28, 'accuracy_ci_low': 0.2, 'accuracy_ci_high': 0.38, 'score_name': 'accuracy', 'score': 0.28, 'score_ci_high': 0.38, 'score_ci_low': 0.2, 'num_of_instances': 100} | 1 | a100_80gb |
ff3172d6cec77b5f58b9a73b9712096645791b1b75f4f6a65ef67d53e4a1bf76 | 1,738,203,747,741 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.high_school_macroeconomics | card=cards.mmlu.high_school_macroeconomics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_numbers_choicesSeparator_pipe_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 64.405856 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.48, 'accuracy_ci_low': 0.38, 'accuracy_ci_high': 0.58, 'score_name': 'accuracy', 'score': 0.48, 'score_ci_high': 0.58, 'score_ci_low': 0.38, 'num_of_instances': 100} | 1 | a100_80gb |
d5623faf537903974e78545fbd94aa07d44317479737061ee78a118d7fe8c05b | 1,738,203,806,875 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.logical_fallacies | card=cards.mmlu.logical_fallacies,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithoutTopic.enumerator_lowercase_choicesSeparator_semicolon_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 58.31519 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.68, 'accuracy_ci_low': 0.58, 'accuracy_ci_high': 0.77, 'score_name': 'accuracy', 'score': 0.68, 'score_ci_high': 0.77, 'score_ci_low': 0.58, 'num_of_instances': 100} | 1 | a100_80gb |
c626123dcb451c1f738f6fa4c198234e2c42e2655a838245d7975660c810f5a1 | 1,738,203,871,081 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.high_school_us_history | card=cards.mmlu.high_school_us_history,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateHere.enumerator_keyboard_choicesSeparator_orLower_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 25.69893 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.5, 'accuracy_ci_low': 0.4, 'accuracy_ci_high': 0.6, 'score_name': 'accuracy', 'score': 0.5, 'score_ci_high': 0.6, 'score_ci_low': 0.4, 'num_of_instances': 100} | 1 | a100_80gb |
4130a9475b7f183f4d556402a728cdbff6217e243cf7fd0f3d85560814974dcf | 1,738,203,900,157 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.prehistory | card=cards.mmlu.prehistory,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_greek_choicesSeparator_pipe_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 28.248521 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.53, 'accuracy_ci_low': 0.43, 'accuracy_ci_high': 0.62, 'score_name': 'accuracy', 'score': 0.53, 'score_ci_high': 0.62, 'score_ci_low': 0.43, 'num_of_instances': 100} | 1 | a100_80gb |
aea9a576b2c96e7fcb486b3b8c7034bb8301cc78f4bacd6416f48637b592c340 | 1,738,205,084,960 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.prehistory | card=cards.mmlu.prehistory,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_numbers_choicesSeparator_OrCapital_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 34.60386 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.52, 'accuracy_ci_low': 0.43, 'accuracy_ci_high': 0.62, 'score_name': 'accuracy', 'score': 0.52, 'score_ci_high': 0.62, 'score_ci_low': 0.43, 'num_of_instances': 100} | 1 | a100_80gb |
82e6d326b2de7e731c8e12495d96a8007d7624577ce0cf1867f1699ae4fc8d0c | 1,738,205,178,177 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.prehistory | card=cards.mmlu.prehistory,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSACould.enumerator_roman_choicesSeparator_pipe_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 40.179559 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.55, 'accuracy_ci_low': 0.46, 'accuracy_ci_high': 0.65, 'score_name': 'accuracy', 'score': 0.55, 'score_ci_high': 0.65, 'score_ci_low': 0.46, 'num_of_instances': 100} | 1 | a100_80gb |
44d690ab3cbf609b379c758d3ae0fa5407be2fee2d15bc2e7dc2eefbe0f9c3f3 | 1,738,203,927,497 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.professional_accounting | card=cards.mmlu.professional_accounting,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_numbers_choicesSeparator_newline_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 26.734748 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.24, 'accuracy_ci_low': 0.16, 'accuracy_ci_high': 0.33, 'score_name': 'accuracy', 'score': 0.24, 'score_ci_high': 0.33, 'score_ci_low': 0.16, 'num_of_instances': 100} | 1 | a100_80gb |
4557508ba1d1a366571483225461318a5375ed2f1c9849d37e6a253933c79f3c | 1,738,203,949,924 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.clinical_knowledge | card=cards.mmlu.clinical_knowledge,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_capitals_choicesSeparator_newline_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 21.76333 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.59, 'accuracy_ci_low': 0.4948327005211808, 'accuracy_ci_high': 0.68, 'score_name': 'accuracy', 'score': 0.59, 'score_ci_high': 0.68, 'score_ci_low': 0.4948327005211808, 'num_of_instances': 100} | 1 | a100_80gb |
97f7c371b1eb3ceadda3c67fcdee62b42cb794c25a50b2efd1a4dea22a1d78a7 | 1,738,204,191,901 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.clinical_knowledge | card=cards.mmlu.clinical_knowledge,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_numbers_choicesSeparator_comma_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 56.034238 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.52, 'accuracy_ci_low': 0.43, 'accuracy_ci_high': 0.61, 'score_name': 'accuracy', 'score': 0.52, 'score_ci_high': 0.61, 'score_ci_low': 0.43, 'num_of_instances': 100} | 1 | a100_80gb |
748f75525e4cdae6f8673526a5075142aa4896ca85f78bff6b01635d23abee28 | 1,738,204,818,413 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.clinical_knowledge | card=cards.mmlu.clinical_knowledge,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHarness.enumerator_greek_choicesSeparator_comma_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 25.137875 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.57, 'accuracy_ci_low': 0.47, 'accuracy_ci_high': 0.67, 'score_name': 'accuracy', 'score': 0.57, 'score_ci_high': 0.67, 'score_ci_low': 0.47, 'num_of_instances': 100} | 1 | a100_80gb |
abca77a242337e364052f26adcff09ba991b3a6c8d1dfecec9b864d3880c35cb | 1,738,203,973,351 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.global_facts | card=cards.mmlu.global_facts,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithTopic.enumerator_roman_choicesSeparator_newline_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 22.634721 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.26, 'accuracy_ci_low': 0.18, 'accuracy_ci_high': 0.34, 'score_name': 'accuracy', 'score': 0.26, 'score_ci_high': 0.34, 'score_ci_low': 0.18, 'num_of_instances': 100} | 1 | a100_80gb |
d75cb51f9ddd95c48e65ab2287b1a025e8cb064b9a82f19c78a25a9ad44789bf | 1,738,205,212,181 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.global_facts | card=cards.mmlu.global_facts,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelowPlease.enumerator_lowercase_choicesSeparator_orLower_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 33.067527 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.36, 'accuracy_ci_low': 0.27, 'accuracy_ci_high': 0.47, 'score_name': 'accuracy', 'score': 0.36, 'score_ci_high': 0.47, 'score_ci_low': 0.27, 'num_of_instances': 100} | 1 | a100_80gb |
632b396aacc70901265d4c507a66c9f33f3f6d3f7ad6982fc41b31541463feec | 1,738,204,048,570 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.miscellaneous | card=cards.mmlu.miscellaneous,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_numbers_choicesSeparator_comma_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 32.380133 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.67, 'accuracy_ci_low': 0.57, 'accuracy_ci_high': 0.75, 'score_name': 'accuracy', 'score': 0.67, 'score_ci_high': 0.75, 'score_ci_low': 0.57, 'num_of_instances': 100} | 1 | a100_80gb |
6f6ec85e1dc19f359a452aa7a5af959e38e7a2c87e6c0543dbd4c88d9b35d736 | 1,738,204,244,693 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.professional_medicine | card=cards.mmlu.professional_medicine,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelow.enumerator_numbers_choicesSeparator_OrCapital_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 52.209124 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.62, 'accuracy_ci_low': 0.53, 'accuracy_ci_high': 0.71, 'score_name': 'accuracy', 'score': 0.62, 'score_ci_high': 0.71, 'score_ci_low': 0.53, 'num_of_instances': 100} | 1 | a100_80gb |
01e5e7b345b979471e5f76b89d9e14721c863d5033076304f014a3c00c69f1aa | 1,738,204,369,941 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.openbook_qa | card=cards.openbook_qa,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.OpenBookQA.MultipleChoiceTemplatesInstructionsStateBelowPlease.enumerator_greek_choicesSeparator_OrCapital_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 124.558565 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.56, 'accuracy_ci_low': 0.46, 'accuracy_ci_high': 0.65, 'score_name': 'accuracy', 'score': 0.56, 'score_ci_high': 0.65, 'score_ci_low': 0.46, 'num_of_instances': 100} | 1 | a100_80gb |
c1ce93a49d9b54a31bed0798796dba455dea9137935e720ba51b2e250202ff37 | 1,738,205,676,033 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.openbook_qa | card=cards.openbook_qa,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.OpenBookQA.MultipleChoiceTemplatesInstructionsStateBelowPlease.enumerator_roman_choicesSeparator_OrCapital_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 93.174759 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.49, 'accuracy_ci_low': 0.39, 'accuracy_ci_high': 0.58, 'score_name': 'accuracy', 'score': 0.49, 'score_ci_high': 0.58, 'score_ci_low': 0.39, 'num_of_instances': 100} | 1 | a100_80gb |
30d2ddb1e9afa9b4e5751088446d75dfdb6dcc83eacd6169b1cc0e2fbdc18283 | 1,738,204,460,604 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.jurisprudence | card=cards.mmlu.jurisprudence,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithTopic.enumerator_roman_choicesSeparator_OrCapital_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 90.1069 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.48, 'accuracy_ci_low': 0.38, 'accuracy_ci_high': 0.58, 'score_name': 'accuracy', 'score': 0.48, 'score_ci_high': 0.58, 'score_ci_low': 0.38, 'num_of_instances': 100} | 1 | a100_80gb |
0f33137723ef90296c9fb78d9398b4de29ec9caadeeff17f9171aaf7db144f6a | 1,738,204,649,151 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.jurisprudence | card=cards.mmlu.jurisprudence,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSACould.enumerator_greek_choicesSeparator_semicolon_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 54.447045 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.22, 'accuracy_ci_low': 0.15, 'accuracy_ci_high': 0.31, 'score_name': 'accuracy', 'score': 0.22, 'score_ci_high': 0.31, 'score_ci_low': 0.15, 'num_of_instances': 100} | 1 | a100_80gb |
82d6029d744dce108d32ac31240cdf9ce5ed51e6d8fc4364059a52fe4640f6a8 | 1,738,204,950,843 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.jurisprudence | card=cards.mmlu.jurisprudence,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHarness.enumerator_greek_choicesSeparator_space_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 21.867733 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.45, 'accuracy_ci_low': 0.36, 'accuracy_ci_high': 0.5422826783324888, 'score_name': 'accuracy', 'score': 0.45, 'score_ci_high': 0.5422826783324888, 'score_ci_low': 0.36, 'num_of_instances': 100} | 1 | a100_80gb |
b3aa1913fa6482b95d9bbf4a21972e4958703aeefb63331d80b81855f9b56ea1 | 1,738,204,699,426 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.virology | card=cards.mmlu.virology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSACould.enumerator_keyboard_choicesSeparator_newline_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 49.656525 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.34, 'accuracy_ci_low': 0.25, 'accuracy_ci_high': 0.44, 'score_name': 'accuracy', 'score': 0.34, 'score_ci_high': 0.44, 'score_ci_low': 0.25, 'num_of_instances': 100} | 1 | a100_80gb |
d1e5e350e70204b6578474d5c83d43fd3e598b74addc37cab121d963a1ee91c6 | 1,738,204,737,177 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.medical_genetics | card=cards.mmlu.medical_genetics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_roman_choicesSeparator_pipe_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 37.196977 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.65, 'accuracy_ci_low': 0.56, 'accuracy_ci_high': 0.74, 'score_name': 'accuracy', 'score': 0.65, 'score_ci_high': 0.74, 'score_ci_low': 0.56, 'num_of_instances': 100} | 1 | a100_80gb |
99c26d973ee09104dbbaa6db7507842f08b3fa3ef1f03e550197fd48de83e4f2 | 1,738,204,765,443 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.college_mathematics | card=cards.mmlu.college_mathematics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithoutTopic.enumerator_keyboard_choicesSeparator_space_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 27.53323 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.21, 'accuracy_ci_low': 0.13, 'accuracy_ci_high': 0.29, 'score_name': 'accuracy', 'score': 0.21, 'score_ci_high': 0.29, 'score_ci_low': 0.13, 'num_of_instances': 100} | 1 | a100_80gb |
b705fe3a2dc5b3a3643a6b2440691fa9d81d224edf71b5ab0df7758bae6d7fc0 | 1,738,204,792,117 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.international_law | card=cards.mmlu.international_law,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithoutTopic.enumerator_keyboard_choicesSeparator_orLower_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 26.092493 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.59, 'accuracy_ci_low': 0.48, 'accuracy_ci_high': 0.67, 'score_name': 'accuracy', 'score': 0.59, 'score_ci_high': 0.67, 'score_ci_low': 0.48, 'num_of_instances': 100} | 1 | a100_80gb |
f2610ebb4f520a7ab03d84d9700b90da2b3fbf6f8321220e3182b14d91f3edaf | 1,738,204,844,513 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.college_computer_science | card=cards.mmlu.college_computer_science,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_lowercase_choicesSeparator_pipe_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 25.279172 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.4, 'accuracy_ci_low': 0.32, 'accuracy_ci_high': 0.51, 'score_name': 'accuracy', 'score': 0.4, 'score_ci_high': 0.51, 'score_ci_low': 0.32, 'num_of_instances': 100} | 1 | a100_80gb |
dd546653c0777d8389ec3b9cd89087a60c4e3b184b8a63e530095095dccb59f8 | 1,738,204,868,392 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.anatomy | card=cards.mmlu.anatomy,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicFixed.enumerator_capitals_choicesSeparator_orLower_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 22.763646 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.48, 'accuracy_ci_low': 0.38, 'accuracy_ci_high': 0.58, 'score_name': 'accuracy', 'score': 0.48, 'score_ci_high': 0.58, 'score_ci_low': 0.38, 'num_of_instances': 100} | 1 | a100_80gb |
f90e2a068d7144d11565a9400fd0eb01fe015d800e904971aa93222f6aa84885 | 1,738,204,889,994 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.management | card=cards.mmlu.management,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithoutTopic.enumerator_roman_choicesSeparator_orLower_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 21.022671 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.38, 'accuracy_ci_low': 0.29, 'accuracy_ci_high': 0.48, 'score_name': 'accuracy', 'score': 0.38, 'score_ci_high': 0.48, 'score_ci_low': 0.29, 'num_of_instances': 100} | 1 | a100_80gb |
416af030a6704ff507c2e36e125db3f9b09ec52bad58f61885c4595394d43507 | 1,738,204,928,053 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.college_chemistry | card=cards.mmlu.college_chemistry,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelowPlease.enumerator_keyboard_choicesSeparator_semicolon_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 25.360572 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.89, 'accuracy_ci_low': 0.82, 'accuracy_ci_high': 0.94, 'score_name': 'accuracy', 'score': 0.89, 'score_ci_high': 0.94, 'score_ci_low': 0.82, 'num_of_instances': 100} | 1 | a100_80gb |
b22ce4fef7e8d9cdff6694ec1a89360864aca54e68e3004462a8cfc21015baad | 1,738,204,976,321 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.moral_scenarios | card=cards.mmlu.moral_scenarios,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicFixed.enumerator_keyboard_choicesSeparator_space_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 24.861692 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.26, 'accuracy_ci_low': 0.17, 'accuracy_ci_high': 0.35, 'score_name': 'accuracy', 'score': 0.26, 'score_ci_high': 0.35, 'score_ci_low': 0.17, 'num_of_instances': 100} | 1 | a100_80gb |
fba421033bc891e1e8b252bb7e1b247750230031d2dc03df55079bc29b6f0d99 | 1,738,205,017,914 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.high_school_mathematics | card=cards.mmlu.high_school_mathematics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicFixed.enumerator_numbers_choicesSeparator_pipe_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 28.088346 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.32, 'accuracy_ci_low': 0.23529241455953043, 'accuracy_ci_high': 0.42, 'score_name': 'accuracy', 'score': 0.32, 'score_ci_high': 0.42, 'score_ci_low': 0.23529241455953043, 'num_of_instances': 100} | 1 | a100_80gb |
330af36924dc350b47693b284aa016de16a6089db0a42940fcdbb33549300121 | 1,738,205,049,596 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.human_sexuality | card=cards.mmlu.human_sexuality,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithoutTopic.enumerator_keyboard_choicesSeparator_orLower_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 30.824935 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.53, 'accuracy_ci_low': 0.43, 'accuracy_ci_high': 0.63, 'score_name': 'accuracy', 'score': 0.53, 'score_ci_high': 0.63, 'score_ci_low': 0.43, 'num_of_instances': 100} | 1 | a100_80gb |
58bb63002d2091fe514684f0fa92643f50ce077e37e7018522eecbc3a56a1c3f | 1,738,205,110,689 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.high_school_chemistry | card=cards.mmlu.high_school_chemistry,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_keyboard_choicesSeparator_space_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 24.779819 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.37, 'accuracy_ci_low': 0.27, 'accuracy_ci_high': 0.47, 'score_name': 'accuracy', 'score': 0.37, 'score_ci_high': 0.47, 'score_ci_low': 0.27, 'num_of_instances': 100} | 1 | a100_80gb |
60af05940dfda62513a10762a101ab2e99a88d16f77af76cbcb2c9f70ff55da2 | 1,738,205,137,249 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu_pro.engineering | card=cards.mmlu_pro.engineering,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsStateBelowPlease.enumerator_roman_choicesSeparator_semicolon_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 25.719469 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.11, 'accuracy_ci_low': 0.06, 'accuracy_ci_high': 0.19, 'score_name': 'accuracy', 'score': 0.11, 'score_ci_high': 0.19, 'score_ci_low': 0.06, 'num_of_instances': 100} | 1 | a100_80gb |
32714a19a828c1ccb8a3bad7967d25817099c48c119205ac4c17e719059624ec | 1,738,205,249,054 | 1,738,202,400,000 | meta-llama_Llama-3.2-3B-Instruct | cards.mmlu.high_school_biology | card=cards.mmlu.high_school_biology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_numbers_choicesSeparator_comma_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 36.110758 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.61, 'accuracy_ci_low': 0.51, 'accuracy_ci_high': 0.69, 'score_name': 'accuracy', 'score': 0.61, 'score_ci_high': 0.69, 'score_ci_low': 0.51, 'num_of_instances': 100} | 1 | a100_80gb |