aaronsu11 commited on
Commit
5a99403
·
verified ·
1 Parent(s): 6d43a75

Upload folder using huggingface_hub

Browse files
config.json ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "action_dim": 32,
3
+ "action_head_cfg": {
4
+ "action_dim": 32,
5
+ "action_horizon": 16,
6
+ "add_pos_embed": true,
7
+ "backbone_embedding_dim": 2048,
8
+ "diffusion_model_cfg": {
9
+ "attention_head_dim": 48,
10
+ "cross_attention_dim": 2048,
11
+ "dropout": 0.2,
12
+ "final_dropout": true,
13
+ "interleave_self_attention": true,
14
+ "norm_type": "ada_norm",
15
+ "num_attention_heads": 32,
16
+ "num_layers": 16,
17
+ "output_dim": 1024,
18
+ "positional_embeddings": null
19
+ },
20
+ "hidden_size": 1024,
21
+ "input_embedding_dim": 1536,
22
+ "max_action_dim": 32,
23
+ "max_state_dim": 64,
24
+ "model_dtype": "float32",
25
+ "noise_beta_alpha": 1.5,
26
+ "noise_beta_beta": 1.0,
27
+ "noise_s": 0.999,
28
+ "num_inference_timesteps": 4,
29
+ "num_target_vision_tokens": 32,
30
+ "num_timestep_buckets": 1000,
31
+ "tune_diffusion_model": true,
32
+ "tune_projector": true,
33
+ "use_vlln": true,
34
+ "vl_self_attention_cfg": {
35
+ "attention_head_dim": 64,
36
+ "dropout": 0.2,
37
+ "final_dropout": true,
38
+ "num_attention_heads": 32,
39
+ "num_layers": 4,
40
+ "positional_embeddings": null
41
+ }
42
+ },
43
+ "action_horizon": 16,
44
+ "architectures": [
45
+ "GR00T_N1_5"
46
+ ],
47
+ "attn_implementation": null,
48
+ "backbone_cfg": {
49
+ "eagle_path": "NVEagle/eagle_er-qwen3_1_7B-Siglip2_400M_stage1_5_128gpu_er_v7_1mlp_nops",
50
+ "load_bf16": false,
51
+ "project_to_dim": null,
52
+ "reproject_vision": false,
53
+ "select_layer": 12,
54
+ "tune_llm": false,
55
+ "tune_visual": true,
56
+ "use_flash_attention": true
57
+ },
58
+ "compute_dtype": "bfloat16",
59
+ "hidden_size": 2048,
60
+ "model_dtype": "float32",
61
+ "model_type": "gr00t_n1_5",
62
+ "torch_dtype": "bfloat16",
63
+ "transformers_version": "4.51.3"
64
+ }
experiment_cfg/metadata.json ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "new_embodiment": {
3
+ "statistics": {
4
+ "state": {
5
+ "single_arm": {
6
+ "max": [
7
+ 29.970703125,
8
+ 189.140625,
9
+ 175.869140625,
10
+ 101.513671875,
11
+ -13.623046875
12
+ ],
13
+ "min": [
14
+ -27.59765625,
15
+ 19.951171875,
16
+ -5.185546875,
17
+ 9.931640625,
18
+ -178.330078125
19
+ ],
20
+ "mean": [
21
+ 2.5756192207336426,
22
+ 100.96839141845703,
23
+ 100.7521743774414,
24
+ 63.33982467651367,
25
+ -89.53585815429688
26
+ ],
27
+ "std": [
28
+ 7.502103805541992,
29
+ 40.32708740234375,
30
+ 42.81830978393555,
31
+ 15.374650001525879,
32
+ 24.642671585083008
33
+ ],
34
+ "q01": [
35
+ -16.611328125,
36
+ 29.1796875,
37
+ 19.16015625,
38
+ 16.787109375,
39
+ -161.307421875
40
+ ],
41
+ "q99": [
42
+ 26.103515625,
43
+ 188.876953125,
44
+ 175.693359375,
45
+ 94.5703125,
46
+ -24.02578125000005
47
+ ]
48
+ },
49
+ "gripper": {
50
+ "max": [
51
+ 53.15712356567383
52
+ ],
53
+ "min": [
54
+ 0.6607929468154907
55
+ ],
56
+ "mean": [
57
+ 13.722357749938965
58
+ ],
59
+ "std": [
60
+ 11.730271339416504
61
+ ],
62
+ "q01": [
63
+ 0.6607929468154907
64
+ ],
65
+ "q99": [
66
+ 43.782673034667944
67
+ ]
68
+ }
69
+ },
70
+ "action": {
71
+ "single_arm": {
72
+ "max": [
73
+ 30.234375,
74
+ 188.7890625,
75
+ 175.517578125,
76
+ 103.447265625,
77
+ -13.447265625
78
+ ],
79
+ "min": [
80
+ -27.59765625,
81
+ 20.302734375,
82
+ -8.4375,
83
+ 9.052734375,
84
+ -182.021484375
85
+ ],
86
+ "mean": [
87
+ 2.4082045555114746,
88
+ 101.74885559082031,
89
+ 98.38890838623047,
90
+ 63.04119873046875,
91
+ -89.63679504394531
92
+ ],
93
+ "std": [
94
+ 7.5216851234436035,
95
+ 39.11914825439453,
96
+ 42.43600082397461,
97
+ 15.555571556091309,
98
+ 24.663480758666992
99
+ ],
100
+ "q01": [
101
+ -16.962890625,
102
+ 30.821484375,
103
+ 17.9296875,
104
+ 15.96796875,
105
+ -161.19140625
106
+ ],
107
+ "q99": [
108
+ 26.103515625,
109
+ 188.701171875,
110
+ 175.517578125,
111
+ 94.86210937499997,
112
+ -23.966015625000026
113
+ ]
114
+ },
115
+ "gripper": {
116
+ "max": [
117
+ 53.507015228271484
118
+ ],
119
+ "min": [
120
+ 0.4008015990257263
121
+ ],
122
+ "mean": [
123
+ 12.447723388671875
124
+ ],
125
+ "std": [
126
+ 12.622722625732422
127
+ ],
128
+ "q01": [
129
+ 0.4008015990257263
130
+ ],
131
+ "q99": [
132
+ 43.787574768066406
133
+ ]
134
+ }
135
+ }
136
+ },
137
+ "modalities": {
138
+ "video": {
139
+ "wrist": {
140
+ "resolution": [
141
+ 640,
142
+ 480
143
+ ],
144
+ "channels": 3,
145
+ "fps": 30.0
146
+ },
147
+ "front": {
148
+ "resolution": [
149
+ 640,
150
+ 480
151
+ ],
152
+ "channels": 3,
153
+ "fps": 30.0
154
+ }
155
+ },
156
+ "state": {
157
+ "single_arm": {
158
+ "absolute": true,
159
+ "rotation_type": null,
160
+ "shape": [
161
+ 5
162
+ ],
163
+ "continuous": true
164
+ },
165
+ "gripper": {
166
+ "absolute": true,
167
+ "rotation_type": null,
168
+ "shape": [
169
+ 1
170
+ ],
171
+ "continuous": true
172
+ }
173
+ },
174
+ "action": {
175
+ "single_arm": {
176
+ "absolute": true,
177
+ "rotation_type": null,
178
+ "shape": [
179
+ 5
180
+ ],
181
+ "continuous": true
182
+ },
183
+ "gripper": {
184
+ "absolute": true,
185
+ "rotation_type": null,
186
+ "shape": [
187
+ 1
188
+ ],
189
+ "continuous": true
190
+ }
191
+ }
192
+ },
193
+ "embodiment_tag": "new_embodiment"
194
+ }
195
+ }
model-00001-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9d627543d432fc154eb10886f6b34eafe83e1da2bb9a70d85c079ffe4883578
3
+ size 4999367032
model-00002-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:efcff1b28784dd652d7e449495ee9ae634f8fe1c61cee8c968312793debfe8d4
3
+ size 2586508600
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73b9d1e51b4fcf7be77e17f9eef1d372bc9f6445c25f6172775dd6627bdde15e
3
+ size 8550325978
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c301fda8889665dca81c11f1f9226252c30e8ab62035a5b4a83d6caea962468f
3
+ size 14244
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e1bbcbcfdf9accd8e1abcab22ef125a1dc411c7592c23482627ce094bc3a32b
3
+ size 1064
trainer_state.json ADDED
The diff for this file is too large to render. See raw diff