shuttie
commited on
Commit
•
250cec6
0
Parent(s):
initial commit
Browse files- .gitattributes +2 -0
- .gitignore +2 -0
- README.md +88 -0
- convert.py +120 -0
- data/train/train-00.jsonl.gz +3 -0
- data/train/train-01.jsonl.gz +3 -0
- data/train/train-02.jsonl.gz +3 -0
- data/train/train-03.jsonl.gz +3 -0
- data/train/train-04.jsonl.gz +3 -0
- data/train/train-05.jsonl.gz +3 -0
- requirements.txt +2 -0
.gitattributes
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
.venv
|
2 |
+
.mypy_cache
|
README.md
ADDED
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
language:
|
3 |
+
- en
|
4 |
+
license: apache-2.0
|
5 |
+
tags:
|
6 |
+
- text
|
7 |
+
pretty_name: MS MARCO hard negatives
|
8 |
+
size_categories:
|
9 |
+
- "100K<n<1M"
|
10 |
+
source_datasets:
|
11 |
+
- MSMARCO
|
12 |
+
task_categories:
|
13 |
+
- sentence-similarity
|
14 |
+
dataset_info:
|
15 |
+
config_name: default
|
16 |
+
features:
|
17 |
+
- name: query
|
18 |
+
dtype: string
|
19 |
+
- name: pos
|
20 |
+
list:
|
21 |
+
- name: doc
|
22 |
+
dtype: string
|
23 |
+
- name: score
|
24 |
+
dtype: float
|
25 |
+
- name: neg
|
26 |
+
list:
|
27 |
+
- name: doc
|
28 |
+
dtype: string
|
29 |
+
- name: score
|
30 |
+
dtype: float
|
31 |
+
splits:
|
32 |
+
- name: train
|
33 |
+
num_bytes: 89609915
|
34 |
+
num_examples: 502939
|
35 |
+
train-eval-index:
|
36 |
+
- config: default
|
37 |
+
task: sentence-similarity
|
38 |
+
splits:
|
39 |
+
train_split: train
|
40 |
+
eval_split: test
|
41 |
+
configs:
|
42 |
+
- config_name: default
|
43 |
+
data_files:
|
44 |
+
- split: train
|
45 |
+
path: "data/train/*"
|
46 |
+
---
|
47 |
+
|
48 |
+
# MS MARCO hard negatives dataset
|
49 |
+
|
50 |
+
A dataset in a [nixietune](https://github.com/nixiesearch/nixietune) compatible format:
|
51 |
+
|
52 |
+
```json
|
53 |
+
{
|
54 |
+
"query": ")what was the immediate impact of the success of the manhattan project?",
|
55 |
+
"pos": [
|
56 |
+
{
|
57 |
+
"doc": "The presence of communication amid scientific minds was equally important to the success of the Manhattan Project as scientific intellect was. The only cloud hanging over the impressive achievement of the atomic researchers and engineers is what their success truly meant; hundreds of thousands of innocent lives obliterated.",
|
58 |
+
"score": 1
|
59 |
+
}
|
60 |
+
],
|
61 |
+
"neg": [
|
62 |
+
{
|
63 |
+
"doc": "Abstract. The pivotal engineering and scientific success of the Twentieth century was the Manhattan Project. The Manhattan Project assimilated concepts and leaders from all scientific fields and engineering disciplines to construct the first two atomic bombs.",
|
64 |
+
"score": 0.0
|
65 |
+
},
|
66 |
+
{
|
67 |
+
"doc": "The pivotal engineering and scientific success of the Twentieth century was the Manhattan Project. The Manhattan Project assimilated concepts and leaders from all scientific fields and engineering disciplines to construct the first two atomic bombs.",
|
68 |
+
"score": 0.0
|
69 |
+
}
|
70 |
+
]
|
71 |
+
}
|
72 |
+
```
|
73 |
+
|
74 |
+
This is the original [BeIR-msmarco](https://huggingface.co/datasets/BeIR/msmarco) joined with the [msmarco-hard-negatives](https://huggingface.co/datasets/sentence-transformers/msmarco-hard-negatives) dataset with the following splits:
|
75 |
+
* train: 502939 queries, only positives.
|
76 |
+
|
77 |
+
## Usage
|
78 |
+
|
79 |
+
```python
|
80 |
+
from datasets import load_dataset
|
81 |
+
|
82 |
+
data = load_dataset('nixiesearch/ms_marco_hard_negatives')
|
83 |
+
print(data["train"].features)
|
84 |
+
```
|
85 |
+
|
86 |
+
## License
|
87 |
+
|
88 |
+
Apache 2.0
|
convert.py
ADDED
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from datasets import load_dataset, Features, Value, Sequence
|
2 |
+
from dataclasses import dataclass, field
|
3 |
+
import logging
|
4 |
+
from transformers import HfArgumentParser
|
5 |
+
from tqdm import tqdm
|
6 |
+
from typing import Dict, List
|
7 |
+
import json
|
8 |
+
import numpy as np
|
9 |
+
from itertools import islice
|
10 |
+
|
11 |
+
logger = logging.getLogger()
|
12 |
+
logger.setLevel(logging.INFO)
|
13 |
+
console_handler = logging.StreamHandler()
|
14 |
+
console_handler.setFormatter(
|
15 |
+
logging.Formatter("[%(asctime)s %(levelname)s] %(message)s")
|
16 |
+
)
|
17 |
+
logger.handlers = [console_handler]
|
18 |
+
|
19 |
+
|
20 |
+
@dataclass
|
21 |
+
class ConversionAgruments:
|
22 |
+
hardneg: str = field(metadata={"help": "Path to msmarco-hard-negatives.jsonl file"})
|
23 |
+
out: str = field(metadata={"help": "Output path"})
|
24 |
+
|
25 |
+
|
26 |
+
@dataclass
|
27 |
+
class QRel:
|
28 |
+
doc: int
|
29 |
+
score: int
|
30 |
+
|
31 |
+
|
32 |
+
def load_msmarco(path: str, split) -> Dict[int, str]:
|
33 |
+
dataset = load_dataset(path, split, split=split)
|
34 |
+
cache: Dict[int, str] = {}
|
35 |
+
for row in tqdm(dataset, desc=f"loading {path} split={split}"):
|
36 |
+
index = int(row["_id"])
|
37 |
+
cache[index] = row["text"]
|
38 |
+
return cache
|
39 |
+
|
40 |
+
|
41 |
+
def load_qrel(path: str, split: str) -> Dict[int, List[QRel]]:
|
42 |
+
dataset = load_dataset(path, split=split)
|
43 |
+
print(dataset.features)
|
44 |
+
cache: Dict[int, List[QRel]] = {}
|
45 |
+
for row in tqdm(dataset, desc=f"loading {path} split={split}"):
|
46 |
+
qid = int(row["query-id"])
|
47 |
+
qrel = QRel(int(row["corpus-id"]), int(row["score"]))
|
48 |
+
if qid in cache:
|
49 |
+
cache[qid].append(qrel)
|
50 |
+
else:
|
51 |
+
cache[qid] = [qrel]
|
52 |
+
return cache
|
53 |
+
|
54 |
+
|
55 |
+
def process_raw(
|
56 |
+
qrels: Dict[int, List[QRel]],
|
57 |
+
queries: Dict[int, str],
|
58 |
+
corpus: Dict[int, str],
|
59 |
+
hardneg: Dict[int, List[int]],
|
60 |
+
) -> List[Dict]:
|
61 |
+
result = []
|
62 |
+
for query, rels in tqdm(qrels.items(), desc="processing split"):
|
63 |
+
pos = [
|
64 |
+
{"doc": corpus[rel.doc], "score": rel.score}
|
65 |
+
for rel in rels
|
66 |
+
if rel.doc in corpus and rel.score > 0
|
67 |
+
]
|
68 |
+
neg = [
|
69 |
+
{"doc": corpus[doc], "score": 0.0}
|
70 |
+
for doc in hardneg.get(query, [])
|
71 |
+
if doc in corpus
|
72 |
+
]
|
73 |
+
group = {"query": queries[query], "pos": pos, "neg": neg}
|
74 |
+
result.append(group)
|
75 |
+
return result
|
76 |
+
|
77 |
+
|
78 |
+
def load_hardneg(path: str):
|
79 |
+
result: Dict[int, List[int]] = {}
|
80 |
+
with open(path, "r") as jsonfile:
|
81 |
+
for line in tqdm(jsonfile, total=808731, desc="loading hard negatives"):
|
82 |
+
row = json.loads(line)
|
83 |
+
scores: Dict[int, float] = {}
|
84 |
+
for method, docs in row["neg"].items():
|
85 |
+
for index, doc in enumerate(docs):
|
86 |
+
prev = scores.get(int(doc), 0.0)
|
87 |
+
scores[int(doc)] = prev + 1.0 / (60 + index)
|
88 |
+
topneg = [
|
89 |
+
doc
|
90 |
+
for doc, score in sorted(
|
91 |
+
scores.items(), key=lambda x: x[1], reverse=True
|
92 |
+
)
|
93 |
+
]
|
94 |
+
result[int(row["qid"])] = topneg[:32]
|
95 |
+
return result
|
96 |
+
|
97 |
+
|
98 |
+
def main():
|
99 |
+
parser = HfArgumentParser((ConversionAgruments))
|
100 |
+
(args,) = parser.parse_args_into_dataclasses()
|
101 |
+
print(f"Args: {args}")
|
102 |
+
hardneg = load_hardneg(args.hardneg)
|
103 |
+
qrels = {
|
104 |
+
"train": load_qrel("BeIR/msmarco-qrels", split="train"),
|
105 |
+
"dev": load_qrel("BeIR/msmarco-qrels", split="validation"),
|
106 |
+
}
|
107 |
+
queries = load_msmarco("BeIR/msmarco", split="queries")
|
108 |
+
corpus = load_msmarco("BeIR/msmarco", split="corpus")
|
109 |
+
print("processing done")
|
110 |
+
for split, data in qrels.items():
|
111 |
+
dataset = process_raw(data, queries, corpus, hardneg)
|
112 |
+
with open(f"{args.out}/{split}.jsonl", "w") as out:
|
113 |
+
for item in dataset:
|
114 |
+
json.dump(item, out)
|
115 |
+
out.write("\n")
|
116 |
+
print("done")
|
117 |
+
|
118 |
+
|
119 |
+
if __name__ == "__main__":
|
120 |
+
main()
|
data/train/train-00.jsonl.gz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c6034fd0aff6eda2d9ac8a5c3e304aec0183f127141a4cd49cd22c0c4a936fc2
|
3 |
+
size 371800261
|
data/train/train-01.jsonl.gz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:183d9bf9fdad674a537d9ba566c11b84281d35f9a6e07e46736bc7869d3fefdb
|
3 |
+
size 373548650
|
data/train/train-02.jsonl.gz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c394b2ed33aca6ded6d9acf4504cf721b452aa7045b455951b387ac51870d2d1
|
3 |
+
size 373875202
|
data/train/train-03.jsonl.gz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:da8771ccb47e681181fc8c7aa23fcab74f0566e1f953a3aa454298cbc4c4a1f9
|
3 |
+
size 371204499
|
data/train/train-04.jsonl.gz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:150cccc543b38f0d8b612ba688b4ffada310c239d95c931644b8a9550a300e5c
|
3 |
+
size 368843006
|
data/train/train-05.jsonl.gz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:513aa76ff5395eebbd4acaa48ab31b3ae7682a952c0ff1309e15f23b2ed292c9
|
3 |
+
size 10942759
|
requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
datasets
|
2 |
+
transformers
|