Update edgar_corpus.py
Browse files- edgar_corpus.py +2 -5
edgar_corpus.py
CHANGED
@@ -52,13 +52,13 @@ _FEATURES = [
|
|
52 |
"section_15",
|
53 |
]
|
54 |
|
55 |
-
_URLS = {"full":"", **{"year_"+str(year):str(year)+"/" for year in range(1993,
|
56 |
|
57 |
class EdgarCorpus(datasets.GeneratorBasedBuilder):
|
58 |
|
59 |
BUILDER_CONFIGS = [
|
60 |
*[datasets.BuilderConfig(name="full", version=datasets.Version(_VERSION), description="The full dataset from 1993-2020")],
|
61 |
-
*[datasets.BuilderConfig(name="year_"+str(year), version=datasets.Version(_VERSION), description="The dataset containg only the year "+str(year)) for year in range(1993,
|
62 |
]
|
63 |
|
64 |
DEFAULT_CONFIG_NAME = "full"
|
@@ -83,7 +83,6 @@ class EdgarCorpus(datasets.GeneratorBasedBuilder):
|
|
83 |
# Add test/train/validate files to url bases
|
84 |
urls = {k+'_'+item: v+item+'.jsonl' for item in ['train', 'test', 'validate'] for k, v in urls.items()}
|
85 |
|
86 |
-
print(urls)
|
87 |
# We have the unzipped files by directory.
|
88 |
data_dir = dl_manager.download_and_extract(urls)
|
89 |
|
@@ -93,8 +92,6 @@ class EdgarCorpus(datasets.GeneratorBasedBuilder):
|
|
93 |
"train": {k: v for k, v in data_dir.items() if 'train' in k},
|
94 |
"validate": {k: v for k, v in data_dir.items() if 'validate' in k},
|
95 |
}
|
96 |
-
|
97 |
-
print(filepaths)
|
98 |
|
99 |
return [
|
100 |
datasets.SplitGenerator(
|
|
|
52 |
"section_15",
|
53 |
]
|
54 |
|
55 |
+
_URLS = {"full":"", **{"year_"+str(year):str(year)+"/" for year in range(1993,2021,1)}}
|
56 |
|
57 |
class EdgarCorpus(datasets.GeneratorBasedBuilder):
|
58 |
|
59 |
BUILDER_CONFIGS = [
|
60 |
*[datasets.BuilderConfig(name="full", version=datasets.Version(_VERSION), description="The full dataset from 1993-2020")],
|
61 |
+
*[datasets.BuilderConfig(name="year_"+str(year), version=datasets.Version(_VERSION), description="The dataset containg only the year "+str(year)) for year in range(1993, 2021, 1)]
|
62 |
]
|
63 |
|
64 |
DEFAULT_CONFIG_NAME = "full"
|
|
|
83 |
# Add test/train/validate files to url bases
|
84 |
urls = {k+'_'+item: v+item+'.jsonl' for item in ['train', 'test', 'validate'] for k, v in urls.items()}
|
85 |
|
|
|
86 |
# We have the unzipped files by directory.
|
87 |
data_dir = dl_manager.download_and_extract(urls)
|
88 |
|
|
|
92 |
"train": {k: v for k, v in data_dir.items() if 'train' in k},
|
93 |
"validate": {k: v for k, v in data_dir.items() if 'validate' in k},
|
94 |
}
|
|
|
|
|
95 |
|
96 |
return [
|
97 |
datasets.SplitGenerator(
|