Datasets:
lmqg
/

Languages:
Russian
ArXiv:
License:
asahi417 commited on
Commit
30dd962
1 Parent(s): 93e4c03

model update

Browse files
data/processed/test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52ab26ed8774d096dd3a45ee4ad75bbdcf0b44ba57a8af59b339eaa6c867f925
3
+ size 69866106
data/processed/train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:481984d4253beff83e5cbd9e73030ab3579cf38bcd65ffadf3d4a84c56024a1e
3
+ size 561598053
data/processed/validation.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d9c505701981f912dbc64c1100d7fc2299a603c90d8dd739e7bc065ce36bfec
3
+ size 70239570
process.py CHANGED
@@ -17,7 +17,7 @@ from datasets import load_dataset
17
  DATASET_NAME = "sberquad"
18
  DATASET_TYPES = None
19
  HIGHLIGHT_TOKEN = '<hl>'
20
- GENERATE_TEST_SPLIT = False
21
  SPLITTER = spacy.load('ru_core_news_sm')
22
 
23
 
 
17
  DATASET_NAME = "sberquad"
18
  DATASET_TYPES = None
19
  HIGHLIGHT_TOKEN = '<hl>'
20
+ GENERATE_TEST_SPLIT = True
21
  SPLITTER = spacy.load('ru_core_news_sm')
22
 
23
 
qg_ruquad.py CHANGED
@@ -7,9 +7,12 @@ logger = datasets.logging.get_logger(__name__)
7
  _DESCRIPTION = """[SberSQuAD](https://huggingface.co/datasets/sberquad) dataset for question generation (QG) task."""
8
  _URL = 'https://huggingface.co/datasets/asahi417/qg_ruquad/raw/main/data/processed'
9
  _URLS = {
10
- str(datasets.Split.TEST): [f'{_URL}/test{i:02d}.jsonl' for i in range(40)],
11
- str(datasets.Split.TRAIN): [f'{_URL}/train{i:02d}.jsonl' for i in range(76)],
12
- str(datasets.Split.VALIDATION): [f'{_URL}/validation{i:02d}.jsonl' for i in range(9)],
 
 
 
13
  }
14
 
15
 
 
7
  _DESCRIPTION = """[SberSQuAD](https://huggingface.co/datasets/sberquad) dataset for question generation (QG) task."""
8
  _URL = 'https://huggingface.co/datasets/asahi417/qg_ruquad/raw/main/data/processed'
9
  _URLS = {
10
+ #str(datasets.Split.TEST): [f'{_URL}/test{i:02d}.jsonl' for i in range(40)],
11
+ #str(datasets.Split.TRAIN): [f'{_URL}/train{i:02d}.jsonl' for i in range(76)],
12
+ #str(datasets.Split.VALIDATION): [f'{_URL}/validation{i:02d}.jsonl' for i in range(9)],
13
+ str(datasets.Split.TEST): [f'{_URL}/test.jsonl'],
14
+ str(datasets.Split.TRAIN): [f'{_URL}/train.jsonl'],
15
+ str(datasets.Split.VALIDATION): [f'{_URL}/validation.jsonl']
16
  }
17
 
18