{
  "results": {
    "temporibus-ipsam-5238_logiqa2_cot": {
      "acc,none": 0.3187022900763359,
      "acc_stderr,none": 0.01175636237340839,
      "alias": "temporibus-ipsam-5238_logiqa2_cot"
    },
    "temporibus-ipsam-5238_logiqa_cot": {
      "acc,none": 0.2987220447284345,
      "acc_stderr,none": 0.018307908005960653,
      "alias": "temporibus-ipsam-5238_logiqa_cot"
    },
    "temporibus-ipsam-5238_lsat-ar_cot": {
      "acc,none": 0.21739130434782608,
      "acc_stderr,none": 0.027256850838819964,
      "alias": "temporibus-ipsam-5238_lsat-ar_cot"
    },
    "temporibus-ipsam-5238_lsat-lr_cot": {
      "acc,none": 0.22745098039215686,
      "acc_stderr,none": 0.01858009962260333,
      "alias": "temporibus-ipsam-5238_lsat-lr_cot"
    },
    "temporibus-ipsam-5238_lsat-rc_cot": {
      "acc,none": 0.2899628252788104,
      "acc_stderr,none": 0.027716877855226904,
      "alias": "temporibus-ipsam-5238_lsat-rc_cot"
    }
  },
  "configs": {
    "temporibus-ipsam-5238_logiqa2_cot": {
      "task": "temporibus-ipsam-5238_logiqa2_cot",
      "group": "logikon-bench",
      "dataset_path": "cot-leaderboard/cot-eval-traces",
      "dataset_kwargs": {
        "data_files": {
          "test": "temporibus-ipsam-5238-logiqa2/test-00000-of-00001.parquet"
        }
      },
      "test_split": "test",
      "doc_to_text": "def doc_to_text_cot(doc) -> str:\n    \"\"\"\n    Answer the following question about the given passage. [Base your answer on the reasoning below.]\n    \n    Passage: <passage>\n    \n    Question: <question>\n    A. <choice1>\n    B. <choice2>\n    C. <choice3>\n    D. <choice4>\n    [E. <choice5>]\n    \n    [Reasoning: <reasoning>]\n    \n    Answer:\n    \"\"\"\n    k = len(doc[\"options\"])\n    choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n    prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n    prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n    prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n    for choice, option in zip(choices, doc[\"options\"]):\n        prompt += f\"{choice.upper()}. {option}\\n\"\n    prompt += \"\\n\"\n    prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\"    \n    prompt += \"Answer:\"\n    return prompt\n",
      "doc_to_target": "{{answer}}",
      "doc_to_choice": "{{options}}",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "temporibus-ipsam-5238_logiqa_cot": {
      "task": "temporibus-ipsam-5238_logiqa_cot",
      "group": "logikon-bench",
      "dataset_path": "cot-leaderboard/cot-eval-traces",
      "dataset_kwargs": {
        "data_files": {
          "test": "temporibus-ipsam-5238-logiqa/test-00000-of-00001.parquet"
        }
      },
      "test_split": "test",
      "doc_to_text": "def doc_to_text_cot(doc) -> str:\n    \"\"\"\n    Answer the following question about the given passage. [Base your answer on the reasoning below.]\n    \n    Passage: <passage>\n    \n    Question: <question>\n    A. <choice1>\n    B. <choice2>\n    C. <choice3>\n    D. <choice4>\n    [E. <choice5>]\n    \n    [Reasoning: <reasoning>]\n    \n    Answer:\n    \"\"\"\n    k = len(doc[\"options\"])\n    choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n    prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n    prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n    prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n    for choice, option in zip(choices, doc[\"options\"]):\n        prompt += f\"{choice.upper()}. {option}\\n\"\n    prompt += \"\\n\"\n    prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\"    \n    prompt += \"Answer:\"\n    return prompt\n",
      "doc_to_target": "{{answer}}",
      "doc_to_choice": "{{options}}",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "temporibus-ipsam-5238_lsat-ar_cot": {
      "task": "temporibus-ipsam-5238_lsat-ar_cot",
      "group": "logikon-bench",
      "dataset_path": "cot-leaderboard/cot-eval-traces",
      "dataset_kwargs": {
        "data_files": {
          "test": "temporibus-ipsam-5238-lsat-ar/test-00000-of-00001.parquet"
        }
      },
      "test_split": "test",
      "doc_to_text": "def doc_to_text_cot(doc) -> str:\n    \"\"\"\n    Answer the following question about the given passage. [Base your answer on the reasoning below.]\n    \n    Passage: <passage>\n    \n    Question: <question>\n    A. <choice1>\n    B. <choice2>\n    C. <choice3>\n    D. <choice4>\n    [E. <choice5>]\n    \n    [Reasoning: <reasoning>]\n    \n    Answer:\n    \"\"\"\n    k = len(doc[\"options\"])\n    choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n    prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n    prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n    prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n    for choice, option in zip(choices, doc[\"options\"]):\n        prompt += f\"{choice.upper()}. {option}\\n\"\n    prompt += \"\\n\"\n    prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\"    \n    prompt += \"Answer:\"\n    return prompt\n",
      "doc_to_target": "{{answer}}",
      "doc_to_choice": "{{options}}",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "temporibus-ipsam-5238_lsat-lr_cot": {
      "task": "temporibus-ipsam-5238_lsat-lr_cot",
      "group": "logikon-bench",
      "dataset_path": "cot-leaderboard/cot-eval-traces",
      "dataset_kwargs": {
        "data_files": {
          "test": "temporibus-ipsam-5238-lsat-lr/test-00000-of-00001.parquet"
        }
      },
      "test_split": "test",
      "doc_to_text": "def doc_to_text_cot(doc) -> str:\n    \"\"\"\n    Answer the following question about the given passage. [Base your answer on the reasoning below.]\n    \n    Passage: <passage>\n    \n    Question: <question>\n    A. <choice1>\n    B. <choice2>\n    C. <choice3>\n    D. <choice4>\n    [E. <choice5>]\n    \n    [Reasoning: <reasoning>]\n    \n    Answer:\n    \"\"\"\n    k = len(doc[\"options\"])\n    choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n    prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n    prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n    prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n    for choice, option in zip(choices, doc[\"options\"]):\n        prompt += f\"{choice.upper()}. {option}\\n\"\n    prompt += \"\\n\"\n    prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\"    \n    prompt += \"Answer:\"\n    return prompt\n",
      "doc_to_target": "{{answer}}",
      "doc_to_choice": "{{options}}",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    },
    "temporibus-ipsam-5238_lsat-rc_cot": {
      "task": "temporibus-ipsam-5238_lsat-rc_cot",
      "group": "logikon-bench",
      "dataset_path": "cot-leaderboard/cot-eval-traces",
      "dataset_kwargs": {
        "data_files": {
          "test": "temporibus-ipsam-5238-lsat-rc/test-00000-of-00001.parquet"
        }
      },
      "test_split": "test",
      "doc_to_text": "def doc_to_text_cot(doc) -> str:\n    \"\"\"\n    Answer the following question about the given passage. [Base your answer on the reasoning below.]\n    \n    Passage: <passage>\n    \n    Question: <question>\n    A. <choice1>\n    B. <choice2>\n    C. <choice3>\n    D. <choice4>\n    [E. <choice5>]\n    \n    [Reasoning: <reasoning>]\n    \n    Answer:\n    \"\"\"\n    k = len(doc[\"options\"])\n    choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n    prompt = \"Answer the following question about the given passage. Base your answer on the reasoning below.\\n\\n\"\n    prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n    prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n    for choice, option in zip(choices, doc[\"options\"]):\n        prompt += f\"{choice.upper()}. {option}\\n\"\n    prompt += \"\\n\"\n    prompt += \"Reasoning: \" + doc[\"reasoning_trace\"] + \"\\n\\n\"    \n    prompt += \"Answer:\"\n    return prompt\n",
      "doc_to_target": "{{answer}}",
      "doc_to_choice": "{{options}}",
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_fewshot": 0,
      "metric_list": [
        {
          "metric": "acc",
          "aggregation": "mean",
          "higher_is_better": true
        }
      ],
      "output_type": "multiple_choice",
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {
        "version": 0.0
      }
    }
  },
  "versions": {
    "temporibus-ipsam-5238_logiqa2_cot": 0.0,
    "temporibus-ipsam-5238_logiqa_cot": 0.0,
    "temporibus-ipsam-5238_lsat-ar_cot": 0.0,
    "temporibus-ipsam-5238_lsat-lr_cot": 0.0,
    "temporibus-ipsam-5238_lsat-rc_cot": 0.0
  },
  "n-shot": {
    "temporibus-ipsam-5238_logiqa2_cot": 0,
    "temporibus-ipsam-5238_logiqa_cot": 0,
    "temporibus-ipsam-5238_lsat-ar_cot": 0,
    "temporibus-ipsam-5238_lsat-lr_cot": 0,
    "temporibus-ipsam-5238_lsat-rc_cot": 0
  },
  "config": {
    "model": "vllm",
    "model_args": "pretrained=Locutusque/Hercules-4.0-Mistral-v0.2-7B,revision=main,dtype=bfloat16,tensor_parallel_size=1,gpu_memory_utilization=0.8,trust_remote_code=true,max_length=2048",
    "batch_size": "auto",
    "batch_sizes": [],
    "device": null,
    "use_cache": null,
    "limit": null,
    "bootstrap_iters": 100000,
    "gen_kwargs": null
  },
  "git_hash": "741db1c"
}