|
from rank_bm25 import BM25Plus
|
|
import datasets
|
|
from sklearn.base import BaseEstimator
|
|
from sklearn.model_selection import GridSearchCV
|
|
|
|
from huggingface_hub import create_repo
|
|
from huggingface_hub.utils._errors import HfHubHTTPError
|
|
|
|
|
|
N_NEGATIVE_DOCS = 10
|
|
|
|
|
|
def create_text(example:dict) -> str:
|
|
return "\n".join([example["section"], example["title"], example["content"]])
|
|
|
|
documents = datasets.load_dataset("lyon-nlp/mteb-fr-retrieval-syntec-s2p", "documents")["test"]
|
|
documents = documents.add_column("text", [create_text(x) for x in documents])
|
|
documents = documents.rename_column("id", "doc_id")
|
|
documents = documents.remove_columns(["url", "title", "section", "content"])
|
|
|
|
|
|
queries = datasets.load_dataset("lyon-nlp/mteb-fr-retrieval-syntec-s2p", "queries")["test"]
|
|
queries = queries.rename_columns({"Question": "queries", "Article": "doc_id"})
|
|
queries = queries.map(lambda x: {"doc_id": [x["doc_id"]]})
|
|
|
|
|
|
|
|
class BM25Estimator(BaseEstimator):
|
|
|
|
def __init__(self, corpus_dataset:datasets.Dataset, *, k1:float=1.5, b:float=.75, delta:int=1):
|
|
"""Initialize BM25 estimator using the coprus dataset.
|
|
The dataset must contain 2 columns:
|
|
- "doc_id" : the documents ids
|
|
- "text" : the document texts
|
|
|
|
Args:
|
|
corpus_dataset (datasets.Dataset): _description_
|
|
k1 (float, optional): _description_. Defaults to 1.5.
|
|
b (float, optional): _description_. Defaults to .75.
|
|
delta (int, optional): _description_. Defaults to 1.
|
|
"""
|
|
self.is_fitted_ = False
|
|
|
|
self.corpus_dataset = corpus_dataset
|
|
self.k1 = k1
|
|
self.b = b
|
|
self.delta=delta
|
|
self.bm25 = None
|
|
|
|
def tokenize_corpus(self, corpus:list[str]) -> list[str]:
|
|
"""Tokenize a corpus of strings
|
|
|
|
Args:
|
|
corpus (list[str]): the list of string to tokenize
|
|
|
|
Returns:
|
|
list[str]: the tokeinzed corpus
|
|
"""
|
|
if isinstance(corpus, str):
|
|
return corpus.lower().split()
|
|
|
|
return [c.lower().split() for c in corpus]
|
|
|
|
def fit(self, X=None, y=None):
|
|
"""Fits the BM25 using the dataset of documents
|
|
Args are placeholders required by sklearn
|
|
"""
|
|
tokenized_corpus = self.tokenize_corpus(self.corpus_dataset["text"])
|
|
self.bm25 = BM25Plus(
|
|
corpus=tokenized_corpus,
|
|
k1=self.k1,
|
|
b=self.b,
|
|
delta=self.delta
|
|
)
|
|
self.is_fitted_ = True
|
|
|
|
return self
|
|
|
|
def predict(self, query:str, topN:int=10) -> list[str]:
|
|
"""Returns the best doc ids in order of best relevance first
|
|
|
|
Args:
|
|
query (str): _description_
|
|
topN (int, optional): _description_. Defaults to 10.
|
|
|
|
Returns:
|
|
list[str]: _description_
|
|
"""
|
|
if not self.is_fitted_:
|
|
self.fit()
|
|
|
|
tokenized_query = self.tokenize_corpus(query)
|
|
best_docs = self.bm25.get_top_n(tokenized_query, self.corpus_dataset["text"], n=topN)
|
|
best_docs_ids = [self.corpus_dataset["doc_id"][self.corpus_dataset["text"].index(doc)] for doc in best_docs]
|
|
|
|
return best_docs_ids
|
|
|
|
def score(self, queries:list[str], relevant_docs:list[list[str]]):
|
|
"""Scores the bm25 using the queries and relevant docs,
|
|
using MRR as the metric.
|
|
|
|
Args:
|
|
queries (list[str]): list of queries
|
|
relevant_docs (list[list[str]]): list of relevant documents ids for each query
|
|
"""
|
|
best_docs_ids_preds = [self.predict(q, len(self.corpus_dataset)) for q in queries]
|
|
best_docs_isrelevant = [
|
|
[
|
|
doc in rel_docs for doc in best_docs_ids_pred
|
|
]
|
|
for best_docs_ids_pred, rel_docs in zip(best_docs_ids_preds, relevant_docs)
|
|
]
|
|
mrrs = [self._compute_mrr(preds) for preds in best_docs_isrelevant]
|
|
mrr = sum(mrrs)/len(mrrs)
|
|
|
|
return mrr
|
|
|
|
def _compute_mrr(self, predictions:list[bool]) -> float:
|
|
"""Compute the mrr considering a list of boolean predictions.
|
|
Example:
|
|
if predictions = [False, False, True, False], it would indicate
|
|
that only the third document was labeled as relevant to the query
|
|
|
|
Args:
|
|
predictions (list[bool]): the binarized relevancy of predictions
|
|
|
|
Returns:
|
|
float: the mrr
|
|
"""
|
|
if any(predictions):
|
|
mrr = [1/(i+1) for i, pred in enumerate(predictions) if pred]
|
|
mrr = sum(mrr)/len(mrr)
|
|
return mrr
|
|
else:
|
|
return 0
|
|
|
|
|
|
print("Optimizing BM25 parameters...")
|
|
|
|
params = {
|
|
"k1":[1., 1.25, 1.5, 1.75],
|
|
"b": [.5, .75, 1.],
|
|
"delta": [0, 1, 2]
|
|
}
|
|
|
|
gscv = GridSearchCV(BM25Estimator(documents), params)
|
|
gscv.fit(queries["queries"], queries["doc_id"])
|
|
|
|
print("Best parameterss :", gscv.best_params_)
|
|
print("Best MRR score :", gscv.best_score_)
|
|
|
|
|
|
|
|
reranking_dataset = datasets.Dataset.from_dict(
|
|
{
|
|
"query": queries["queries"],
|
|
"positive": queries["doc_id"],
|
|
"negative": [
|
|
[doc_id for doc_id in gscv.estimator.predict(q, N_NEGATIVE_DOCS) if doc_id not in relevant_ids]
|
|
for q, relevant_ids in zip(queries["queries"], queries["doc_id"])
|
|
]
|
|
})
|
|
|
|
|
|
|
|
repo_id = "lyon-nlp/mteb-fr-reranking-syntec-s2p"
|
|
try:
|
|
create_repo(repo_id, repo_type="dataset")
|
|
except HfHubHTTPError as e:
|
|
print("HF repo already exist")
|
|
|
|
|
|
reranking_dataset.push_to_hub(repo_id, config_name="queries", split="test")
|
|
documents.push_to_hub(repo_id, config_name="documents", split="test") |