TobiTob commited on
Commit
f2aabf8
1 Parent(s): a2625b9

Delete CityLearn.py

Browse files
Files changed (1) hide show
  1. CityLearn.py +0 -94
CityLearn.py DELETED
@@ -1,94 +0,0 @@
1
- import pickle
2
-
3
- import datasets
4
- import numpy as np
5
-
6
- _DESCRIPTION = """\
7
- This dataset is used to train a decision Transformer for the CityLearn 2022 environment https://www.aicrowd.com/challenges/neurips-2022-citylearn-challenge
8
- """
9
-
10
- # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
11
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
12
- _BASE_URL = "https://huggingface.co/datasets/TobiTob/CityLearn/resolve/main"
13
- _URLS = {
14
- "sequences": f"{_BASE_URL}/sequences.pkl",
15
- }
16
-
17
-
18
- class DecisionTransformerCityLearnDataset(datasets.GeneratorBasedBuilder):
19
- """The dataset comprises of tuples of (Observations, Actions, Rewards, Dones) sampled
20
- by an expert policy for various continuous control tasks"""
21
-
22
- VERSION = datasets.Version("1.1.0")
23
-
24
- # This is an example of a dataset with multiple configurations.
25
- # If you don't want/need to define several sub-sets in your dataset,
26
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
27
-
28
- # If you need to make complex sub-parts in the datasets with configurable options
29
- # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
30
- # BUILDER_CONFIG_CLASS = MyBuilderConfig
31
-
32
- # You will be able to load one or the other configurations in the following list with
33
- # data = datasets.load_dataset('my_dataset', 'first_domain')
34
- # data = datasets.load_dataset('my_dataset', 'second_domain')
35
- BUILDER_CONFIGS = [
36
- datasets.BuilderConfig(
37
- name="sequences",
38
- version=VERSION,
39
- description="Test Data sampled from an expert policy in CityLearn environment",
40
- ),
41
- ]
42
-
43
- def _info(self):
44
-
45
- features = datasets.Features(
46
- {
47
- "observations": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))),
48
- "actions": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))),
49
- "rewards": datasets.Sequence(datasets.Value("float32")),
50
- "dones": datasets.Sequence(datasets.Value("bool")),
51
- # These are the features of your dataset like images, labels ...
52
- }
53
- )
54
-
55
- return datasets.DatasetInfo(
56
- # This is the description that will appear on the datasets page.
57
- description=_DESCRIPTION,
58
- # This defines the different columns of the dataset and their types
59
- # Here we define them above because they are different between the two configurations
60
- features=features,
61
- # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
62
- # specify them. They'll be used if as_supervised=True in builder.as_dataset.
63
- # supervised_keys=("sentence", "label"),
64
- # Homepage of the dataset for documentation
65
- )
66
-
67
- def _split_generators(self, dl_manager):
68
- urls = _URLS[self.config.name]
69
- data_dir = dl_manager.download_and_extract(urls)
70
- return [
71
- datasets.SplitGenerator(
72
- name=datasets.Split.TRAIN,
73
- # These kwargs will be passed to _generate_examples
74
- gen_kwargs={
75
- "filepath": data_dir,
76
- "split": "train",
77
- },
78
- )
79
- ]
80
-
81
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
82
- def _generate_examples(self, filepath, split):
83
- with open(filepath, "rb") as f:
84
- trajectories = pickle.load(f)
85
- print(trajectories)
86
- print(type(trajectories))
87
-
88
- for idx, traj in enumerate(trajectories):
89
- yield idx, {
90
- "observations": traj["observations"],
91
- "actions": traj["actions"],
92
- "rewards": np.expand_dims(traj["rewards"], axis=1),
93
- "dones": np.expand_dims(traj.get("dones", traj.get("terminals")), axis=1),
94
- }