Ubuntu commited on
Commit
38395f2
1 Parent(s): ff0f97e

add load script

Browse files
Files changed (3) hide show
  1. .gitattributes +1 -0
  2. README.md +73 -12
  3. wudao.py +145 -0
.gitattributes CHANGED
@@ -53,3 +53,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
+ *.json filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,14 +1,4 @@
1
  ---
2
- configs:
3
- - config_name: zhs
4
- default: true
5
- data_files:
6
- - split: train
7
- path: data_zhs/*.json
8
- - config_name: zht
9
- data_files:
10
- - split: train
11
- path: data_zht/*.json
12
  language:
13
  - zh
14
  ---
@@ -17,11 +7,82 @@ language:
17
  非原製作者,僅搬移
18
 
19
  ## 使用
 
 
 
20
  ```python
21
  # 簡中
22
- load_dataset("/home/drc/dllm-training/wodu_dataset","zhs",split="train",streaming=True)
23
  # 繁中 (使用opencc轉換)
24
- load_dataset("/home/drc/dllm-training/wodu_dataset","zht",split="train",streaming=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  ```
26
 
27
  ## 原始連結
 
1
  ---
 
 
 
 
 
 
 
 
 
 
2
  language:
3
  - zh
4
  ---
 
7
  非原製作者,僅搬移
8
 
9
  ## 使用
10
+ ```bash
11
+ pip install patool wget opencc
12
+ ```
13
  ```python
14
  # 簡中
15
+ load_dataset("p208p2002/wudao",streaming=True,split="zht")
16
  # 繁中 (使用opencc轉換)
17
+ load_dataset("p208p2002/wudao",streaming=True,split="zhs")
18
+ ```
19
+
20
+ ## 清除資料
21
+ 當下載失敗的時候請手動清除資料
22
+ ```bash
23
+ rm -rf ~/.cache/wudao_dataset
24
+ ```
25
+
26
+ ## 資料類別統計
27
+ ```json
28
+ {
29
+ "_total": 59100001,
30
+ "豆瓣话题": 209027,
31
+ "科技": 1278068,
32
+ "经济": 1096215,
33
+ "汽车": 1368193,
34
+ "娱乐": 1581947,
35
+ "农业": 1129758,
36
+ "军事": 420949,
37
+ "社会": 446228,
38
+ "游戏": 754703,
39
+ "教育": 1133453,
40
+ "体育": 660858,
41
+ "旅行": 821573,
42
+ "国际": 630386,
43
+ "房产": 387786,
44
+ "文化": 710648,
45
+ "法律": 36585,
46
+ "股票": 1205,
47
+ "博客": 15467790,
48
+ "日报": 16971,
49
+ "评论": 13867,
50
+ "孕育常识": 48291,
51
+ "健康": 15291,
52
+ "财经": 54656,
53
+ "医学问答": 314771,
54
+ "资讯": 1066180,
55
+ "科普文章": 60581,
56
+ "百科": 27273280,
57
+ "酒业": 287,
58
+ "经验": 609195,
59
+ "新闻": 846810,
60
+ "小红书攻略": 185379,
61
+ "生活": 23,
62
+ "网页文本": 115830,
63
+ "观点": 1268,
64
+ "海外": 4,
65
+ "户外": 5,
66
+ "美容": 7,
67
+ "理论": 247,
68
+ "天气": 540,
69
+ "文旅": 2999,
70
+ "信托": 62,
71
+ "保险": 70,
72
+ "水利资讯": 17,
73
+ "时尚": 1123,
74
+ "亲子": 39,
75
+ "百家号文章": 335591,
76
+ "黄金": 216,
77
+ "党建": 1,
78
+ "期货": 330,
79
+ "快讯": 41,
80
+ "国内": 15,
81
+ "国学": 614,
82
+ "公益": 15,
83
+ "能源": 7,
84
+ "创新": 6
85
+ }
86
  ```
87
 
88
  ## 原始連結
wudao.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets
2
+ import json
3
+ from glob import glob
4
+ import os
5
+ logger = datasets.logging.get_logger(__name__)
6
+
7
+ _CITATION = """\
8
+ @misc{ c6a3fe684227415a9db8e21bac4a15ab,
9
+ author = {Zhao Xue and Hanyu Zhao and Sha Yuan and Yequan Wang},
10
+ title = {{WuDaoCorpora Text}},
11
+ year = 2022,
12
+ month = dec,
13
+ publisher = {Science Data Bank},
14
+ version = {V1},
15
+ doi = {10.57760/sciencedb.o00126.00004},
16
+ url = https://doi.org/10.57760/sciencedb.o00126.00004
17
+ }
18
+ """
19
+
20
+ _DESCRIPTION = """\
21
+ WuDaoCorpora Text is a large pretraining Chinese corpus constructed by Beijing Academy of Artificial Intelligence(BAAI). The total data volume of the dataset has exceeded 5TB, including 200GB open data.
22
+
23
+
24
+ Compared with other pretraining corpora, the WuDaoCorpora Text has the following advantages.
25
+
26
+ 1) In the process of data collection, we classify the quality of web pages according to the proportion of words in web pages and the integrity of DOM trees, and select high-quality web page for data collection to ensure the corpus quality.
27
+
28
+ 2) Through data cooperation with other institutions and web page data crawling, the dataset covers a wide range types of Chinese text, including news, comments, encyclopedias, forums, blogs, academic papers, etc.
29
+
30
+ 3) The dataset uses more than 20 cleaning rules to obtain the final corpus from the 100TB original web page data. In the cleaning process, special attention is paid to the removal of private information to avoid the risk of privacy disclosure.
31
+
32
+ 4) The dataset contains 50+ data tags, such as education and laws, which is convenient for users to extract specific-domain data for model training in that field.
33
+
34
+
35
+ Please obey the following agreement if you use our dataset.
36
+
37
+ https://data.baai.ac.cn/resources/agreement/BAAIDataAgreement.pdf
38
+ """
39
+
40
+ _URL = "https://china.scidb.cn/download?fileId=63a30383fed6a8a9e8454302&traceId=a505523f-775b-4261-ad0c-406126824b4d"
41
+
42
+
43
+ class WuDaoConfig(datasets.BuilderConfig):
44
+ """BuilderConfig for SQUAD."""
45
+
46
+ def __init__(self, **kwargs):
47
+ """BuilderConfig for SQUAD.
48
+ Args:
49
+ **kwargs: keyword arguments forwarded to super.
50
+ """
51
+ super(WuDaoConfig, self).__init__(**kwargs)
52
+ print(kwargs)
53
+
54
+ class WuDao(datasets.GeneratorBasedBuilder):
55
+
56
+ BUILDER_CONFIGS = [
57
+ WuDaoConfig(
58
+ name="default",
59
+ version=datasets.Version("1.0.0", ""),
60
+ description="Plain text",
61
+ ),
62
+ ]
63
+
64
+
65
+ def _info(self):
66
+ return datasets.DatasetInfo(
67
+ description=_DESCRIPTION,
68
+ features=datasets.Features(
69
+ {
70
+ "id": datasets.Value("int32"),
71
+ "uniqueKey": datasets.Value("string"),
72
+ "titleUkey": datasets.Value("string"),
73
+ "dataType": datasets.Value("string"),
74
+ "title": datasets.Value("string"),
75
+ "content": datasets.Value("string")
76
+ }
77
+ ),
78
+ homepage="https://www.scidb.cn/en/detail?dataSetId=c6a3fe684227415a9db8e21bac4a15ab",
79
+ citation=_CITATION,
80
+ )
81
+
82
+ def _split_generators(self, dl_manager):
83
+
84
+ # hf-dataset does not support .rar format
85
+ # simply implement a download and extract pipeline
86
+ import wget
87
+ from pathlib import Path
88
+ import patoolib
89
+
90
+
91
+ assert os.name in ["posix","Darwin"],"not a support os (Linux and Mac only)"
92
+
93
+ _cache_dir = os.path.join(Path.home(),'.cache/wudao_dataset')
94
+ print("dataset will save at:",_cache_dir)
95
+
96
+ _file_path = os.path.join(_cache_dir,"data.rar")
97
+ os.makedirs(_cache_dir,exist_ok=True)
98
+
99
+ # while file not exist, download and extract
100
+ if not os.path.isfile(_file_path):
101
+ wget.download(_URL,_file_path)
102
+ patoolib.extract_archive(_file_path, outdir=_cache_dir)
103
+
104
+ # rename
105
+ files = glob(os.path.join(_cache_dir,"WuDaoCorpus2.0_base_200G","*.json"))
106
+ os.makedirs(os.path.join(_cache_dir,"data_zhs"),exist_ok=True)
107
+
108
+ for f_idx,file_name in enumerate(files):
109
+ os.rename(file_name,os.path.join(_cache_dir,f"data_zhs/shard_{f_idx}.json"))
110
+ print(f_idx)
111
+
112
+ # clean
113
+ try:
114
+ os.removedirs(os.path.join(_cache_dir,"WuDaoCorpus2.0_base_200G"))
115
+ os.remove(_file_path)
116
+ except:
117
+ pass
118
+
119
+ return [
120
+ datasets.SplitGenerator(name="zhs", gen_kwargs={"data_dir": _cache_dir,"lng":"zhs"}),
121
+ datasets.SplitGenerator(name="zht", gen_kwargs={"data_dir": _cache_dir,"lng":'zht'})
122
+ ]
123
+
124
+ def _generate_examples(self, data_dir,lng="zhs"):
125
+ """This function returns the examples in the raw (text) form."""
126
+ if lng == 'zht':
127
+ import opencc
128
+ s2t = opencc.OpenCC("s2t.json")
129
+ filepaths = glob(os.path.join(data_dir,"data_zhs","*.json"))
130
+
131
+ for filepath in filepaths:
132
+ with open(filepath) as f:
133
+ data = json.load(f)
134
+ for x in data:
135
+ if lng == "zhs":
136
+ yield x["id"],x
137
+ elif lng =="zht":
138
+ yield x["id"],{
139
+ "id":x["id"],
140
+ "uniqueKey":x["uniqueKey"],
141
+ "titleUkey":x["titleUkey"],
142
+ "dataType":s2t.convert(x["dataType"]),
143
+ "title":s2t.convert(x["title"]),
144
+ "content":s2t.convert(x["content"])
145
+ }