williamgilpin commited on
Commit
972b142
1 Parent(s): 628060d

Upload a dataloader file

Browse files
Files changed (1) hide show
  1. dysts.py +228 -0
dysts.py ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Chaotic Dynamical Systems (Dysts) dataset."""
15
+ from dataclasses import dataclass
16
+
17
+ import pandas as pd
18
+
19
+ import datasets
20
+
21
+
22
+ _CITATION = """\
23
+ @article{gilpin2023model,
24
+ title={Model scale versus domain knowledge in statistical forecasting of chaotic systems},
25
+ author={Gilpin, William},
26
+ journal={Physical Review Research},
27
+ volume={5},
28
+ number={4},
29
+ pages={043252},
30
+ year={2023},
31
+ publisher={APS}
32
+ }
33
+ """
34
+
35
+ _DESCRIPTION = """\
36
+ A collection of long multivariate time series, each of which comes from a chaotic
37
+ dynamical system. The subdirectories coarse, medium, and fine each contain 135 .csv
38
+ files, each of which contains a single multivariate time series of length 10,000. The
39
+ number of channels varies depending on the specific dynamical system.
40
+ """
41
+
42
+ _HOMEPAGE = "https://github.com/williamgilpin/dysts"
43
+
44
+ _LICENSE = "The Creative Commons Attribution 4.0 International License. https://creativecommons.org/licenses/by/4.0/"
45
+
46
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
47
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
48
+ # _URLS = {
49
+ # "h1": "https://raw.githubusercontent.com/zhouhaoyi/ETDataset/main/Dysts-small/Dystsh1.csv",
50
+ # "h2": "https://raw.githubusercontent.com/zhouhaoyi/ETDataset/main/Dysts-small/Dystsh2.csv",
51
+ # "m1": "https://raw.githubusercontent.com/zhouhaoyi/ETDataset/main/Dysts-small/Dystsm1.csv",
52
+ # "m2": "https://raw.githubusercontent.com/zhouhaoyi/ETDataset/main/Dysts-small/Dystsm2.csv",
53
+ # }
54
+
55
+ @dataclass
56
+ class DystsBuilderConfig(datasets.BuilderConfig):
57
+ """Dysts builder config."""
58
+ prediction_length: int = 100
59
+ multivariate: bool = True
60
+
61
+
62
+ class Dysts(datasets.GeneratorBasedBuilder):
63
+ """Chaotic Dynamical Systems (Dysts) dataset"""
64
+
65
+ VERSION = datasets.Version("1.0.0")
66
+
67
+ # You will be able to load one or the other configurations in the following list with
68
+ # data = datasets.load_dataset('Dysts', 'h1')
69
+ # data = datasets.load_dataset('Dysts', 'm2')
70
+ BUILDER_CONFIGS = [
71
+ DystsBuilderConfig(
72
+ name="coarse",
73
+ version=VERSION,
74
+ description="Time series sampled at a coarse resolution of 10 points per period.",
75
+ ),
76
+ DystsBuilderConfig(
77
+ name="medium",
78
+ version=VERSION,
79
+ description="Time series sampled at a coarse resolution of 30 points per period.",
80
+ ),
81
+ DystsBuilderConfig(
82
+ name="m2",
83
+ version=VERSION,
84
+ description="Time series sampled at a coarse resolution of 100 points per period.",
85
+ ),
86
+ ]
87
+
88
+ DEFAULT_CONFIG_NAME = "medium"
89
+
90
+ def _info(self):
91
+ if self.config.multivariate:
92
+ features = datasets.Features(
93
+ {
94
+ "start": datasets.Value("Index"),
95
+ "target": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))),
96
+ "feat_static_cat": datasets.Sequence(datasets.Value("uint64")),
97
+ "item_id": datasets.Value("string"),
98
+ }
99
+ )
100
+ else:
101
+ features = datasets.Features(
102
+ {
103
+ "start": datasets.Value("Index"),
104
+ "target": datasets.Sequence(datasets.Value("float32")),
105
+ "feat_static_cat": datasets.Sequence(datasets.Value("uint64")),
106
+ "feat_dynamic_real": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))),
107
+ "item_id": datasets.Value("string"),
108
+ }
109
+ )
110
+
111
+ return datasets.DatasetInfo(
112
+ # This is the description that will appear on the datasets page.
113
+ description=_DESCRIPTION,
114
+ # This defines the different columns of the dataset and their types
115
+ features=features, # Here we define them above because they are different between the two configurations
116
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
117
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
118
+ # supervised_keys=("sentence", "label"),
119
+ # Homepage of the dataset for documentation
120
+ homepage=_HOMEPAGE,
121
+ # License for the dataset if available
122
+ license=_LICENSE,
123
+ # Citation for the dataset
124
+ citation=_CITATION,
125
+ )
126
+
127
+ def _split_generators(self, dl_manager):
128
+ # urls = _URLS[self.config.name]
129
+ # filepath = dl_manager.download_and_extract(urls)
130
+ filepath = ""
131
+
132
+ return [
133
+ datasets.SplitGenerator(
134
+ name=datasets.Split.TRAIN,
135
+ # These kwargs will be passed to _generate_examples
136
+ gen_kwargs={
137
+ "filepath": filepath,
138
+ "split": "train",
139
+ },
140
+ ),
141
+ datasets.SplitGenerator(
142
+ name=datasets.Split.TEST,
143
+ # These kwargs will be passed to _generate_examples
144
+ gen_kwargs={
145
+ "filepath": filepath,
146
+ "split": "test",
147
+ },
148
+ ),
149
+ datasets.SplitGenerator(
150
+ name=datasets.Split.VALIDATION,
151
+ # These kwargs will be passed to _generate_examples
152
+ gen_kwargs={
153
+ "filepath": filepath,
154
+ "split": "val",
155
+ },
156
+ ),
157
+ ]
158
+
159
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
160
+ def _generate_examples(self, filepath, split):
161
+ data = pd.read_csv(filepath, parse_dates=True, index_col=0)
162
+ # data = np.loadtxt(f"./{granularity}/{equation_name}_{granularity}.csv", delimiter=",", skiprows=1)
163
+ start_date = data.index.min()
164
+
165
+ # if self.config.name in ["m1", "m2"]:
166
+ # factor = 4 # 15-min frequency
167
+ # else:
168
+ # factor = 1 # hourly frequency
169
+ # train_end_index = 12 * 30 * 24 * factor # 1 year
170
+ train_end_index = 7000
171
+
172
+ if split == "val":
173
+ end_index = train_end_index + 1500
174
+ else:
175
+ end_index = train_end_index + 3000
176
+
177
+ if self.config.multivariate:
178
+ if split in ["test", "val"]:
179
+ # rolling windows of prediction_length for val and test
180
+ for i, index in enumerate(
181
+ range(
182
+ train_end_index,
183
+ end_index,
184
+ self.config.prediction_length,
185
+ )
186
+ ):
187
+ yield i, {
188
+ "start": start_date,
189
+ "target": data[: index + self.config.prediction_length].values.astype("float32").T,
190
+ "feat_static_cat": [0],
191
+ "item_id": "0",
192
+ }
193
+ else:
194
+ yield 0, {
195
+ "start": start_date,
196
+ "target": data[:train_end_index].values.astype("float32").T,
197
+ "feat_static_cat": [0],
198
+ "item_id": "0",
199
+ }
200
+ else:
201
+ if split in ["test", "val"]:
202
+ # rolling windows of prediction_length for val and test
203
+ for i, index in enumerate(
204
+ range(
205
+ train_end_index,
206
+ end_index,
207
+ self.config.prediction_length,
208
+ )
209
+ ):
210
+ target = data[: index + self.config.prediction_length].values.astype("float32")
211
+ feat_dynamic_real = data[: index + self.config.prediction_length].values.T.astype("float32")
212
+ yield i, {
213
+ "start": start_date,
214
+ "target": target,
215
+ "feat_dynamic_real": feat_dynamic_real,
216
+ "feat_static_cat": [0],
217
+ "item_id": "OT",
218
+ }
219
+ else:
220
+ target = data[:train_end_index].values.astype("float32")
221
+ feat_dynamic_real = data[:train_end_index].values.T.astype("float32")
222
+ yield 0, {
223
+ "start": start_date,
224
+ "target": target,
225
+ "feat_dynamic_real": feat_dynamic_real,
226
+ "feat_static_cat": [0],
227
+ "item_id": "OT",
228
+ }