Update README.md
Browse files
README.md
CHANGED
@@ -23,3 +23,37 @@ configs:
|
|
23 |
- split: train
|
24 |
path: data/train-*
|
25 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
- split: train
|
24 |
path: data/train-*
|
25 |
---
|
26 |
+
|
27 |
+
this is a subset of the [wikimedia/wikipedia](https://huggingface.co/datasets/wikimedia/wikipedia) dataset
|
28 |
+
code for creating this dataset :
|
29 |
+
|
30 |
+
```python
|
31 |
+
from datasets import load_dataset, Dataset
|
32 |
+
from sentence_transformers import SentenceTransformer
|
33 |
+
model = SentenceTransformer("mixedbread-ai/mxbai-embed-large-v1")
|
34 |
+
|
35 |
+
# load dataset in streaming mode (no download and it's fast)
|
36 |
+
dataset = load_dataset(
|
37 |
+
"wikimedia/wikipedia", "20231101.en", split="train", streaming=True
|
38 |
+
)
|
39 |
+
|
40 |
+
# select 3000 samples
|
41 |
+
from tqdm import tqdm
|
42 |
+
data = Dataset.from_dict({})
|
43 |
+
for i, entry in enumerate(dataset):
|
44 |
+
# each entry has the following columns
|
45 |
+
# ['id', 'url', 'title', 'text']
|
46 |
+
data = data.add_item(entry)
|
47 |
+
if i == 3000:
|
48 |
+
break
|
49 |
+
# free memory
|
50 |
+
del dataset
|
51 |
+
|
52 |
+
# embed the dataset
|
53 |
+
def embed(batch):
|
54 |
+
return {"embedding" : model.encode(batch["text"])}
|
55 |
+
data = data.map(embed)
|
56 |
+
|
57 |
+
# push to hub
|
58 |
+
data.push_to_hub("not-lain/wikipedia-small-3000-embedded")
|
59 |
+
```
|