File size: 2,354 Bytes
0d95002 1da2587 e332ae8 0d95002 731c791 0d95002 731c791 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 |
For the bAbI as used in [Scaling Data-Constrained Language Models](https://arxiv.org/abs/2305.16264) use commit e332ae8a626bb17178026dd14797abb9da31376e
Creation (Copied & adapted from https://github.com/stanford-crfm/helm/blob/0eaaa62a2263ddb94e9850ee629423b010f57e4a/src/helm/benchmark/scenarios/babi_qa_scenario.py):
```python
!wget http://www.thespermwhale.com/jaseweston/babi/tasks_1-20_v1-2.tar.gz
!tar -xf tasks_1-20_v1-2.tar.gz
import json
from typing import List
tasks = list(range(1, 21))
splits = ["train", "valid", "test"]
def process_path(path: str) -> str:
"""Turn a path string (task 19) from the original format 's,w' to a verbal model-friendly format 'south west'"""
steps: List[str] = path.split(",")
directions = {"s": "south", "n": "north", "e": "east", "w": "west"}
path = " ".join([directions[step] for step in steps])
return path
for split in splits:
with open(f"babi_{split}.jsonl", "w") as f_base:
for task in tasks:
split_path: str = f"./tasks_1-20_v1-2/en-valid/qa{task}_{split}.txt"
with open(split_path, "r") as f:
facts = list(f)
story: List[str] = []
for fact in facts:
fid = int(fact.split(" ")[0])
if fid == 1:
story = []
fact = " ".join(fact.split(" ")[1:])
is_question = "?" in fact
if is_question:
question, answer = fact.split("\t")[:2]
question, answer = question.strip(), answer.strip()
# All tasks except task 19 have a verbal single-word answer (e.g. kitchen, apple, yes).
# Task 19 (path finding) has a non verbal answer format (
if task == 19:
answer = process_path(answer)
f_base.write(json.dumps({
"passage": "".join(story),
"question": question,
"answer": answer,
"task": task,
}) + "\n")
if "?" in story:
print("STORY", "".join(story))
else:
story.append(fact)
```
|