TobiTob commited on
Commit
b33ee2f
1 Parent(s): a2ee5a4

Create CityLearn.py

Browse files
Files changed (1) hide show
  1. CityLearn.py +147 -0
CityLearn.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pickle
2
+
3
+ import datasets
4
+ import numpy as np
5
+
6
+ _DESCRIPTION = """\
7
+ A subset of the D4RL dataset, used for training Decision Transformers
8
+ """
9
+
10
+ _HOMEPAGE = "https://github.com/rail-berkeley/d4rl"
11
+
12
+ _LICENSE = "Apache-2.0"
13
+
14
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
15
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
16
+ _BASE_URL = "https://huggingface.co/datasets/edbeeching/decision_transformer_gym_replay/resolve/main/data"
17
+ _URLS = {
18
+ "halfcheetah-expert-v2": f"{_BASE_URL}/halfcheetah-expert-v2.pkl",
19
+ "halfcheetah-medium-replay-v2": f"{_BASE_URL}/halfcheetah-medium-replay-v2.pkl",
20
+ "halfcheetah-medium-v2": f"{_BASE_URL}/halfcheetah-medium-v2.pkl",
21
+ "hopper-expert-v2": f"{_BASE_URL}/hopper-expert-v2.pkl",
22
+ "hopper-medium-replay-v2": f"{_BASE_URL}/hopper-medium-replay-v2.pkl",
23
+ "hopper-medium-v2": f"{_BASE_URL}/hopper-medium-v2.pkl",
24
+ "walker2d-expert-v2": f"{_BASE_URL}/walker2d-expert-v2.pkl",
25
+ "walker2d-medium-replay-v2": f"{_BASE_URL}/walker2d-medium-replay-v2.pkl",
26
+ "walker2d-medium-v2": f"{_BASE_URL}/walker2d-medium-v2.pkl",
27
+ }
28
+
29
+
30
+ class DecisionTransformerGymDataset(datasets.GeneratorBasedBuilder):
31
+ """The dataset comprises of tuples of (Observations, Actions, Rewards, Dones) sampled
32
+ by an expert policy for various continuous control tasks (halfcheetah, hopper, walker2d)"""
33
+
34
+ VERSION = datasets.Version("1.1.0")
35
+
36
+ # This is an example of a dataset with multiple configurations.
37
+ # If you don't want/need to define several sub-sets in your dataset,
38
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
39
+
40
+ # If you need to make complex sub-parts in the datasets with configurable options
41
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
42
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
43
+
44
+ # You will be able to load one or the other configurations in the following list with
45
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
46
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
47
+ BUILDER_CONFIGS = [
48
+ datasets.BuilderConfig(
49
+ name="halfcheetah-expert-v2",
50
+ version=VERSION,
51
+ description="Data sampled from an expert policy in the halfcheetah Mujoco environment",
52
+ ),
53
+ datasets.BuilderConfig(
54
+ name="halfcheetah-medium-replay-v2",
55
+ version=VERSION,
56
+ description="Data sampled from an medium policy in the halfcheetah Mujoco environment",
57
+ ),
58
+ datasets.BuilderConfig(
59
+ name="halfcheetah-medium-v2",
60
+ version=VERSION,
61
+ description="Data sampled from an medium policy in the halfcheetah Mujoco environment",
62
+ ),
63
+ datasets.BuilderConfig(
64
+ name="hopper-expert-v2",
65
+ version=VERSION,
66
+ description="Data sampled from an expert policy in the hopper Mujoco environment",
67
+ ),
68
+ datasets.BuilderConfig(
69
+ name="hopper-medium-replay-v2",
70
+ version=VERSION,
71
+ description="Data sampled from an medium policy in the hopper Mujoco environment",
72
+ ),
73
+ datasets.BuilderConfig(
74
+ name="hopper-medium-v2",
75
+ version=VERSION,
76
+ description="Data sampled from an medium policy in the hopper Mujoco environment",
77
+ ),
78
+ datasets.BuilderConfig(
79
+ name="walker2d-expert-v2",
80
+ version=VERSION,
81
+ description="Data sampled from an expert policy in the halfcheetah Mujoco environment",
82
+ ),
83
+ datasets.BuilderConfig(
84
+ name="walker2d-medium-replay-v2",
85
+ version=VERSION,
86
+ description="Data sampled from an medium policy in the halfcheetah Mujoco environment",
87
+ ),
88
+ datasets.BuilderConfig(
89
+ name="walker2d-medium-v2",
90
+ version=VERSION,
91
+ description="Data sampled from an medium policy in the halfcheetah Mujoco environment",
92
+ ),
93
+ ]
94
+
95
+ def _info(self):
96
+
97
+ features = datasets.Features(
98
+ {
99
+ "observations": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))),
100
+ "actions": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))),
101
+ "rewards": datasets.Sequence(datasets.Value("float32")),
102
+ "dones": datasets.Sequence(datasets.Value("bool")),
103
+ # These are the features of your dataset like images, labels ...
104
+ }
105
+ )
106
+
107
+ return datasets.DatasetInfo(
108
+ # This is the description that will appear on the datasets page.
109
+ description=_DESCRIPTION,
110
+ # This defines the different columns of the dataset and their types
111
+ # Here we define them above because they are different between the two configurations
112
+ features=features,
113
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
114
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
115
+ # supervised_keys=("sentence", "label"),
116
+ # Homepage of the dataset for documentation
117
+ homepage=_HOMEPAGE,
118
+ # License for the dataset if available
119
+ license=_LICENSE,
120
+ )
121
+
122
+ def _split_generators(self, dl_manager):
123
+ urls = _URLS[self.config.name]
124
+ data_dir = dl_manager.download_and_extract(urls)
125
+ return [
126
+ datasets.SplitGenerator(
127
+ name=datasets.Split.TRAIN,
128
+ # These kwargs will be passed to _generate_examples
129
+ gen_kwargs={
130
+ "filepath": data_dir,
131
+ "split": "train",
132
+ },
133
+ )
134
+ ]
135
+
136
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
137
+ def _generate_examples(self, filepath, split):
138
+ with open(filepath, "rb") as f:
139
+ trajectories = pickle.load(f)
140
+
141
+ for idx, traj in enumerate(trajectories):
142
+ yield idx, {
143
+ "observations": traj["observations"],
144
+ "actions": traj["actions"],
145
+ "rewards": np.expand_dims(traj["rewards"], axis=1),
146
+ "dones": np.expand_dims(traj.get("dones", traj.get("terminals")), axis=1),
147
+ }