|
import json |
|
|
|
import pandas as pd |
|
from Bio import Entrez |
|
from retry import retry |
|
from tqdm import tqdm |
|
import dask.dataframe as dd |
|
|
|
|
|
|
|
with open("credentials.json") as f: |
|
credentials = json.load(f) |
|
Entrez.email = credentials["email"] |
|
Entrez.api_key = credentials["api_key"] |
|
|
|
|
|
|
|
RAW_EVALUATION_DATASET = "./raw_data/training11b.json" |
|
PATH_TO_PASSAGE_DATASET = "./data/passages.parquet" |
|
PATH_TO_EVALUATION_DATASET = "./data/test.parquet" |
|
|
|
|
|
|
|
MAX_PASSAGES = None |
|
|
|
|
|
@retry() |
|
def get_abstract(passage_id): |
|
with Entrez.efetch( |
|
db="pubmed", id=passage_id, rettype="abstract", retmode="text" |
|
) as response: |
|
|
|
r = response.read() |
|
r = r.split("\n\n") |
|
abstract = max(r, key=len) |
|
return abstract |
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
|
|
with open(RAW_EVALUATION_DATASET) as f: |
|
eval_data = json.load(f)["questions"] |
|
|
|
eval_df = pd.DataFrame(eval_data, columns=["body", "documents", "ideal_answer"]) |
|
eval_df = eval_df.rename( |
|
columns={ |
|
"body": "question", |
|
"documents": "relevant_passage_ids", |
|
"ideal_answer": "answer", |
|
} |
|
) |
|
eval_df.answer = eval_df.answer.apply(lambda x: x[0]) |
|
|
|
eval_df.relevant_passage_ids = eval_df.relevant_passage_ids.apply( |
|
lambda x: [int(url.split("/")[-1]) for url in x] |
|
) |
|
if MAX_PASSAGES: |
|
eval_df["passage_count"] = eval_df.relevant_passage_ids.apply(lambda x: len(x)) |
|
eval_df = eval_df.drop(columns=["passage_count"]) |
|
|
|
|
|
eval_df.relevant_passage_ids = eval_df.relevant_passage_ids.apply(lambda x: set(x)) |
|
eval_df.relevant_passage_ids = eval_df.relevant_passage_ids.apply(lambda x: list(x)) |
|
|
|
|
|
passage_ids = set().union(*eval_df.relevant_passage_ids) |
|
passage_ids = list(passage_ids) |
|
passages = pd.DataFrame(index=passage_ids) |
|
|
|
for i, passage_id in enumerate(tqdm(passages.index)): |
|
passages.loc[passage_id, "passage"] = get_abstract(passage_id) |
|
|
|
|
|
if i % 1000 == 0: |
|
dd.from_pandas(passages, npartitions=1).to_parquet(PATH_TO_PASSAGE_DATASET) |
|
|
|
|
|
|
|
unavailable_passages = passages[passages["passage"] == "1. "] |
|
passages = passages[passages["passage"] != "1. "] |
|
passages.index.name = "id" |
|
dd.from_pandas(passages, npartitions=1).to_parquet(PATH_TO_PASSAGE_DATASET) |
|
|
|
|
|
unavailable_ids = unavailable_passages.index.tolist() |
|
eval_df["relevant_passage_ids"] = eval_df["relevant_passage_ids"].apply( |
|
lambda x: [i for i in x if i not in unavailable_ids] |
|
) |
|
eval_df.index.name = "id" |
|
eval_df = eval_df[["question", "answer", "relevant_passage_ids"]] |
|
dd.from_pandas(eval_df, npartitions=1).to_parquet(PATH_TO_EVALUATION_DATASET) |
|
|