TootToot commited on
Commit
5a64b34
1 Parent(s): b170c42
Huggy.onnx CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e10454a12e659a94e3b1499f4a3633485c2e3244973debb6449c896645fb018a
3
  size 2271327
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:503641489fbc8ca045a31f901aec032798fad785d12795907d92607278d2a528
3
  size 2271327
Huggy/Huggy-1199993.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:44b787f3c82ef7127a7a17185fcb31e21b22a91095fa661a45efc63f18db5d2b
3
+ size 2271327
Huggy/Huggy-1199993.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0ebe5199120a0e34f4efa10529bd02f196eb2d4e66cd1884d1300ea992e5ad4
3
+ size 13503717
Huggy/Huggy-1399936.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee8da639546d2c291e7be2504616b58e9163764a90581eb39cb70a254a7afbbf
3
+ size 2271327
Huggy/Huggy-1399936.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0941d29a1c098880dde8306e63a70aede9b4d6d5f65228ea620bfe2fb7c2fce4
3
+ size 13503717
Huggy/Huggy-1599458.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8103102d49db8c46393c17faebc1ce5b0392280a167896fa6c1eaab444a49ea
3
+ size 2271327
Huggy/Huggy-1599458.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a4011a9ee7746a6765ede70308e3f05bedde2d4baf01ac016b9fbb1eec4a129
3
+ size 13503717
Huggy/Huggy-1799968.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ecf66d505e0a4b368b9d782fd256ffd70f0795677f0b6a0123adcfe315fc240d
3
+ size 2271327
Huggy/Huggy-1799968.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b7167289f970d8558d3f5270eb5455bd7bead35c6e22b6b9359f2e41f44c834
3
+ size 13503717
Huggy/Huggy-199894.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f29bb2482459d127fec14aecf369d04083efa5ba94c9f9178428452b6b5b75a
3
+ size 2271327
Huggy/Huggy-199894.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e731c720bcc00bbb68ac668c35801e36082ccdecc4e190a170efd6208e7c6fa8
3
+ size 13503717
Huggy/Huggy-1999906.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cb710757e51a005e8c788ed91fea50daedddca66c0bce76e440ea482d8a964f
3
+ size 2271327
Huggy/Huggy-1999906.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:660ce2e43660d19c05fd84946800dfd1c37bf44dc0163a32ca4c4ea884ead731
3
+ size 13503717
Huggy/Huggy-2000015.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:503641489fbc8ca045a31f901aec032798fad785d12795907d92607278d2a528
3
+ size 2271327
Huggy/Huggy-2000015.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc0754d1a85ebf731c9c218dfce61c950fa0c8adbddffb002238ac7cfd20016d
3
+ size 13503717
Huggy/Huggy-399998.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ed8ebec81f5858bc7e30d30d06d901a49f2cbf7bdb44b732f644c45a592c4d8
3
+ size 2271327
Huggy/Huggy-399998.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1efd9be1274f231af310b50644a540ee8ba9f79ae4cc81e3cd69c55bf9bb2e2
3
+ size 13503717
Huggy/Huggy-599933.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:610c82ba1eb1be787329a7bc4c74b2800c8ef66bf5bf7d55df2f958a1692f5ee
3
+ size 2271327
Huggy/Huggy-599933.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:924179ecd982428651dd0664806603407e9efa10b806580a3a8de6cabb1b39b8
3
+ size 13503717
Huggy/Huggy-799918.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5a8cdd916cb573db626dc1c1cae49c4d218d5869558ce047987303c5723ede3
3
+ size 2271327
Huggy/Huggy-799918.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8aed558f692cf31b00fa602a054d80e4416d6da31d1cfee28336c0a70369b8a6
3
+ size 13503717
Huggy/Huggy-999955.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c83f8ecbe487b4b292e505afdaab107d2418a2b0f5c8b1cd7def6472c516b42
3
+ size 2271327
Huggy/Huggy-999955.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:194a9db365859e8819c84e3552ef57ea15bd2ba60fb712cb1b9c8eba3c6f6e32
3
+ size 13503717
Huggy/checkpoint.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:173d55b89f73220f688c28c170acef11dabf83f1c7880152f777824447794f20
3
  size 13503717
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc0754d1a85ebf731c9c218dfce61c950fa0c8adbddffb002238ac7cfd20016d
3
  size 13503717
Huggy/events.out.tfevents.1688395923.0153706f5978.3684.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d33b4844e56347d67fbda0845de978f512d96ecd6c863e2fad976b4cb404dc95
3
+ size 418257
README.md CHANGED
@@ -8,21 +8,27 @@ tags:
8
  ---
9
 
10
  # **ppo** Agent playing **Huggy**
11
- This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents).
12
-
 
13
  ## Usage (with ML-Agents)
14
- The Documentation: https://github.com/huggingface/ml-agents#get-started
15
- We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub:
16
 
 
 
 
 
 
17
 
18
  ### Resume the training
19
- ```
20
  mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume
21
  ```
 
22
  ### Watch your Agent play
23
- You can watch your agent **playing directly in your browser:**.
24
-
25
- 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Huggy
26
  2. Step 1: Find your model_id: TootToot/ppo-Huggy
27
  3. Step 2: Select your *.nn /*.onnx file
28
  4. Click on Watch the agent play 👀
 
8
  ---
9
 
10
  # **ppo** Agent playing **Huggy**
11
+ This is a trained model of a **ppo** agent playing **Huggy**
12
+ using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents).
13
+
14
  ## Usage (with ML-Agents)
15
+ The Documentation: https://unity-technologies.github.io/ml-agents/ML-Agents-Toolkit-Documentation/
 
16
 
17
+ We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub:
18
+ - A *short tutorial* where you teach Huggy the Dog 🐶 to fetch the stick and then play with him directly in your
19
+ browser: https://huggingface.co/learn/deep-rl-course/unitbonus1/introduction
20
+ - A *longer tutorial* to understand how works ML-Agents:
21
+ https://huggingface.co/learn/deep-rl-course/unit5/introduction
22
 
23
  ### Resume the training
24
+ ```bash
25
  mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume
26
  ```
27
+
28
  ### Watch your Agent play
29
+ You can watch your agent **playing directly in your browser**
30
+
31
+ 1. If the environment is part of ML-Agents official environments, go to https://huggingface.co/unity
32
  2. Step 1: Find your model_id: TootToot/ppo-Huggy
33
  3. Step 2: Select your *.nn /*.onnx file
34
  4. Click on Watch the agent play 👀
run_logs/Player-0.log CHANGED
@@ -34,7 +34,7 @@ ALSA lib pcm.c:2642:(snd_pcm_open_noupdate) Unknown PCM default
34
  FMOD failed to initialize the output device.: "Error initializing output device. " (60)
35
  FMOD initialized on nosound output
36
  Begin MonoManager ReloadAssembly
37
- - Completed reload, in 0.125 seconds
38
  ERROR: Shader Hidden/Universal Render Pipeline/Blit shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
39
  ERROR: Shader Hidden/Universal Render Pipeline/CopyDepth shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
40
  ERROR: Shader Hidden/Universal Render Pipeline/ScreenSpaceShadows shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
@@ -78,14 +78,14 @@ ERROR: Shader Universal Render Pipeline/Lit shader is not supported on this GPU
78
  WARNING: Shader Unsupported: 'Universal Render Pipeline/Lit' - All subshaders removed
79
  WARNING: Shader Did you use #pragma only_renderers and omit this platform?
80
  WARNING: Shader If subshaders removal was intentional, you may have forgotten turning Fallback off?
81
- UnloadTime: 0.896492 ms
82
  requesting resize 84 x 84
83
  Setting up 1 worker threads for Enlighten.
84
  Memory Statistics:
85
  [ALLOC_TEMP_TLS] TLS Allocator
86
  StackAllocators :
87
  [ALLOC_TEMP_MAIN]
88
- Peak usage frame count: [4.0 KB-8.0 KB]: 26755 frames, [2.0 MB-4.0 MB]: 1 frames
89
  Initial Block Size 4.0 MB
90
  Current Block Size 4.0 MB
91
  Peak Allocated Bytes 3.6 MB
@@ -93,7 +93,7 @@ Memory Statistics:
93
  [ALLOC_TEMP_Loading.AsyncRead]
94
  Initial Block Size 64.0 KB
95
  Current Block Size 64.0 KB
96
- Peak Allocated Bytes 128 B
97
  Overflow Count 0
98
  [ALLOC_TEMP_Loading.PreloadManager]
99
  Initial Block Size 256.0 KB
@@ -201,22 +201,22 @@ Memory Statistics:
201
  Peak Allocated Bytes 0 B
202
  Overflow Count 0
203
  [ALLOC_DEFAULT] Dual Thread Allocator
204
- Peak main deferred allocation count 307
205
  [ALLOC_BUCKET]
206
  Large Block size 4.0 MB
207
  Used Block count 1
208
  Peak Allocated bytes 1.4 MB
209
  [ALLOC_DEFAULT_MAIN]
210
- Peak usage frame count: [16.0 MB-32.0 MB]: 26756 frames
211
  Requested Block Size 16.0 MB
212
  Peak Block count 1
213
- Peak Allocated memory 23.5 MB
214
  Peak Large allocation bytes 16.0 MB
215
  [ALLOC_DEFAULT_THREAD]
216
- Peak usage frame count: [2.0 MB-4.0 MB]: 26756 frames
217
  Requested Block Size 16.0 MB
218
  Peak Block count 1
219
- Peak Allocated memory 2.6 MB
220
  Peak Large allocation bytes 0 B
221
  [ALLOC_TEMP_JOB_1_FRAME]
222
  Initial Block Size 2.0 MB
@@ -245,13 +245,13 @@ Memory Statistics:
245
  Used Block count 1
246
  Peak Allocated bytes 1.4 MB
247
  [ALLOC_GFX_MAIN]
248
- Peak usage frame count: [32.0 KB-64.0 KB]: 26755 frames, [64.0 KB-128.0 KB]: 1 frames
249
  Requested Block Size 16.0 MB
250
  Peak Block count 1
251
  Peak Allocated memory 65.6 KB
252
  Peak Large allocation bytes 0 B
253
  [ALLOC_GFX_THREAD]
254
- Peak usage frame count: [64.0 KB-128.0 KB]: 26756 frames
255
  Requested Block Size 16.0 MB
256
  Peak Block count 1
257
  Peak Allocated memory 81.8 KB
@@ -263,13 +263,13 @@ Memory Statistics:
263
  Used Block count 1
264
  Peak Allocated bytes 1.4 MB
265
  [ALLOC_CACHEOBJECTS_MAIN]
266
- Peak usage frame count: [1.0 MB-2.0 MB]: 26755 frames, [16.0 MB-32.0 MB]: 1 frames
267
  Requested Block Size 4.0 MB
268
  Peak Block count 2
269
  Peak Allocated memory 30.6 MB
270
  Peak Large allocation bytes 24.9 MB
271
  [ALLOC_CACHEOBJECTS_THREAD]
272
- Peak usage frame count: [0.5 MB-1.0 MB]: 26755 frames, [2.0 MB-4.0 MB]: 1 frames
273
  Requested Block Size 4.0 MB
274
  Peak Block count 1
275
  Peak Allocated memory 2.6 MB
@@ -281,13 +281,13 @@ Memory Statistics:
281
  Used Block count 1
282
  Peak Allocated bytes 1.4 MB
283
  [ALLOC_TYPETREE_MAIN]
284
- Peak usage frame count: [0-1.0 KB]: 26756 frames
285
  Requested Block Size 2.0 MB
286
  Peak Block count 1
287
  Peak Allocated memory 1.0 KB
288
  Peak Large allocation bytes 0 B
289
  [ALLOC_TYPETREE_THREAD]
290
- Peak usage frame count: [4.0 KB-8.0 KB]: 26756 frames
291
  Requested Block Size 2.0 MB
292
  Peak Block count 1
293
  Peak Allocated memory 7.3 KB
 
34
  FMOD failed to initialize the output device.: "Error initializing output device. " (60)
35
  FMOD initialized on nosound output
36
  Begin MonoManager ReloadAssembly
37
+ - Completed reload, in 0.085 seconds
38
  ERROR: Shader Hidden/Universal Render Pipeline/Blit shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
39
  ERROR: Shader Hidden/Universal Render Pipeline/CopyDepth shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
40
  ERROR: Shader Hidden/Universal Render Pipeline/ScreenSpaceShadows shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
 
78
  WARNING: Shader Unsupported: 'Universal Render Pipeline/Lit' - All subshaders removed
79
  WARNING: Shader Did you use #pragma only_renderers and omit this platform?
80
  WARNING: Shader If subshaders removal was intentional, you may have forgotten turning Fallback off?
81
+ UnloadTime: 0.787447 ms
82
  requesting resize 84 x 84
83
  Setting up 1 worker threads for Enlighten.
84
  Memory Statistics:
85
  [ALLOC_TEMP_TLS] TLS Allocator
86
  StackAllocators :
87
  [ALLOC_TEMP_MAIN]
88
+ Peak usage frame count: [4.0 KB-8.0 KB]: 26745 frames, [2.0 MB-4.0 MB]: 1 frames
89
  Initial Block Size 4.0 MB
90
  Current Block Size 4.0 MB
91
  Peak Allocated Bytes 3.6 MB
 
93
  [ALLOC_TEMP_Loading.AsyncRead]
94
  Initial Block Size 64.0 KB
95
  Current Block Size 64.0 KB
96
+ Peak Allocated Bytes 136 B
97
  Overflow Count 0
98
  [ALLOC_TEMP_Loading.PreloadManager]
99
  Initial Block Size 256.0 KB
 
201
  Peak Allocated Bytes 0 B
202
  Overflow Count 0
203
  [ALLOC_DEFAULT] Dual Thread Allocator
204
+ Peak main deferred allocation count 326
205
  [ALLOC_BUCKET]
206
  Large Block size 4.0 MB
207
  Used Block count 1
208
  Peak Allocated bytes 1.4 MB
209
  [ALLOC_DEFAULT_MAIN]
210
+ Peak usage frame count: [16.0 MB-32.0 MB]: 26746 frames
211
  Requested Block Size 16.0 MB
212
  Peak Block count 1
213
+ Peak Allocated memory 23.3 MB
214
  Peak Large allocation bytes 16.0 MB
215
  [ALLOC_DEFAULT_THREAD]
216
+ Peak usage frame count: [2.0 MB-4.0 MB]: 26746 frames
217
  Requested Block Size 16.0 MB
218
  Peak Block count 1
219
+ Peak Allocated memory 2.8 MB
220
  Peak Large allocation bytes 0 B
221
  [ALLOC_TEMP_JOB_1_FRAME]
222
  Initial Block Size 2.0 MB
 
245
  Used Block count 1
246
  Peak Allocated bytes 1.4 MB
247
  [ALLOC_GFX_MAIN]
248
+ Peak usage frame count: [32.0 KB-64.0 KB]: 26745 frames, [64.0 KB-128.0 KB]: 1 frames
249
  Requested Block Size 16.0 MB
250
  Peak Block count 1
251
  Peak Allocated memory 65.6 KB
252
  Peak Large allocation bytes 0 B
253
  [ALLOC_GFX_THREAD]
254
+ Peak usage frame count: [64.0 KB-128.0 KB]: 26746 frames
255
  Requested Block Size 16.0 MB
256
  Peak Block count 1
257
  Peak Allocated memory 81.8 KB
 
263
  Used Block count 1
264
  Peak Allocated bytes 1.4 MB
265
  [ALLOC_CACHEOBJECTS_MAIN]
266
+ Peak usage frame count: [1.0 MB-2.0 MB]: 26745 frames, [16.0 MB-32.0 MB]: 1 frames
267
  Requested Block Size 4.0 MB
268
  Peak Block count 2
269
  Peak Allocated memory 30.6 MB
270
  Peak Large allocation bytes 24.9 MB
271
  [ALLOC_CACHEOBJECTS_THREAD]
272
+ Peak usage frame count: [0.5 MB-1.0 MB]: 26745 frames, [2.0 MB-4.0 MB]: 1 frames
273
  Requested Block Size 4.0 MB
274
  Peak Block count 1
275
  Peak Allocated memory 2.6 MB
 
281
  Used Block count 1
282
  Peak Allocated bytes 1.4 MB
283
  [ALLOC_TYPETREE_MAIN]
284
+ Peak usage frame count: [0-1.0 KB]: 26746 frames
285
  Requested Block Size 2.0 MB
286
  Peak Block count 1
287
  Peak Allocated memory 1.0 KB
288
  Peak Large allocation bytes 0 B
289
  [ALLOC_TYPETREE_THREAD]
290
+ Peak usage frame count: [4.0 KB-8.0 KB]: 26746 frames
291
  Requested Block Size 2.0 MB
292
  Peak Block count 1
293
  Peak Allocated memory 7.3 KB
run_logs/timers.json CHANGED
@@ -2,135 +2,135 @@
2
  "name": "root",
3
  "gauges": {
4
  "Huggy.Policy.Entropy.mean": {
5
- "value": 1.403124451637268,
6
- "min": 1.403124451637268,
7
- "max": 1.4271317720413208,
8
  "count": 40
9
  },
10
  "Huggy.Policy.Entropy.sum": {
11
- "value": 70533.6640625,
12
- "min": 68424.6484375,
13
- "max": 77078.6953125,
14
  "count": 40
15
  },
16
  "Huggy.Environment.EpisodeLength.mean": {
17
- "value": 86.58377425044091,
18
- "min": 76.67554858934169,
19
- "max": 387.94573643410854,
20
  "count": 40
21
  },
22
  "Huggy.Environment.EpisodeLength.sum": {
23
- "value": 49093.0,
24
- "min": 48919.0,
25
- "max": 50141.0,
26
  "count": 40
27
  },
28
  "Huggy.Step.mean": {
29
- "value": 1999930.0,
30
- "min": 49738.0,
31
- "max": 1999930.0,
32
  "count": 40
33
  },
34
  "Huggy.Step.sum": {
35
- "value": 1999930.0,
36
- "min": 49738.0,
37
- "max": 1999930.0,
38
  "count": 40
39
  },
40
  "Huggy.Policy.ExtrinsicValueEstimate.mean": {
41
- "value": 2.4486279487609863,
42
- "min": 0.07911801338195801,
43
- "max": 2.50622296333313,
44
  "count": 40
45
  },
46
  "Huggy.Policy.ExtrinsicValueEstimate.sum": {
47
- "value": 1388.3720703125,
48
- "min": 10.127105712890625,
49
- "max": 1549.3192138671875,
50
  "count": 40
51
  },
52
  "Huggy.Environment.CumulativeReward.mean": {
53
- "value": 3.8139860671033303,
54
- "min": 1.795990341110155,
55
- "max": 3.985260358931106,
56
  "count": 40
57
  },
58
  "Huggy.Environment.CumulativeReward.sum": {
59
- "value": 2162.5301000475883,
60
- "min": 229.88676366209984,
61
- "max": 2452.622166156769,
62
  "count": 40
63
  },
64
  "Huggy.Policy.ExtrinsicReward.mean": {
65
- "value": 3.8139860671033303,
66
- "min": 1.795990341110155,
67
- "max": 3.985260358931106,
68
  "count": 40
69
  },
70
  "Huggy.Policy.ExtrinsicReward.sum": {
71
- "value": 2162.5301000475883,
72
- "min": 229.88676366209984,
73
- "max": 2452.622166156769,
74
  "count": 40
75
  },
76
  "Huggy.Losses.PolicyLoss.mean": {
77
- "value": 0.020092773633142418,
78
- "min": 0.012592659465735779,
79
- "max": 0.020587260076717937,
80
  "count": 40
81
  },
82
  "Huggy.Losses.PolicyLoss.sum": {
83
- "value": 0.06027832089942725,
84
- "min": 0.025185318931471558,
85
- "max": 0.061761780230153815,
86
  "count": 40
87
  },
88
  "Huggy.Losses.ValueLoss.mean": {
89
- "value": 0.06541144243545002,
90
- "min": 0.023115050295988718,
91
- "max": 0.0654540087406834,
92
  "count": 40
93
  },
94
  "Huggy.Losses.ValueLoss.sum": {
95
- "value": 0.19623432730635007,
96
- "min": 0.046230100591977435,
97
- "max": 0.19623432730635007,
98
  "count": 40
99
  },
100
  "Huggy.Policy.LearningRate.mean": {
101
- "value": 3.6101987966333274e-06,
102
- "min": 3.6101987966333274e-06,
103
- "max": 0.000295346026551325,
104
  "count": 40
105
  },
106
  "Huggy.Policy.LearningRate.sum": {
107
- "value": 1.0830596389899982e-05,
108
- "min": 1.0830596389899982e-05,
109
- "max": 0.0008440482186505998,
110
  "count": 40
111
  },
112
  "Huggy.Policy.Epsilon.mean": {
113
- "value": 0.10120336666666664,
114
- "min": 0.10120336666666664,
115
- "max": 0.19844867499999996,
116
  "count": 40
117
  },
118
  "Huggy.Policy.Epsilon.sum": {
119
- "value": 0.30361009999999994,
120
- "min": 0.20756005,
121
- "max": 0.5813493999999999,
122
  "count": 40
123
  },
124
  "Huggy.Policy.Beta.mean": {
125
- "value": 7.004799666666658e-05,
126
- "min": 7.004799666666658e-05,
127
- "max": 0.0049225888825000005,
128
  "count": 40
129
  },
130
  "Huggy.Policy.Beta.sum": {
131
- "value": 0.00021014398999999977,
132
- "min": 0.00021014398999999977,
133
- "max": 0.014069335059999997,
134
  "count": 40
135
  },
136
  "Huggy.IsTraining.mean": {
@@ -148,67 +148,67 @@
148
  },
149
  "metadata": {
150
  "timer_format_version": "0.1.0",
151
- "start_time_seconds": "1683819497",
152
- "python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
153
  "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
154
  "mlagents_version": "0.31.0.dev0",
155
  "mlagents_envs_version": "0.31.0.dev0",
156
  "communication_protocol_version": "1.5.0",
157
  "pytorch_version": "1.11.0+cu102",
158
  "numpy_version": "1.21.2",
159
- "end_time_seconds": "1683821844"
160
  },
161
- "total": 2346.97648214,
162
  "count": 1,
163
- "self": 0.43777926000029765,
164
  "children": {
165
  "run_training.setup": {
166
- "total": 0.06755743899998379,
167
  "count": 1,
168
- "self": 0.06755743899998379
169
  },
170
  "TrainerController.start_learning": {
171
- "total": 2346.471145441,
172
  "count": 1,
173
- "self": 4.112770355036901,
174
  "children": {
175
  "TrainerController._reset_env": {
176
- "total": 4.468887656000106,
177
  "count": 1,
178
- "self": 4.468887656000106
179
  },
180
  "TrainerController.advance": {
181
- "total": 2337.7474697519633,
182
- "count": 232893,
183
- "self": 4.29859309899939,
184
  "children": {
185
  "env_step": {
186
- "total": 1815.0060323550993,
187
- "count": 232893,
188
- "self": 1535.6669886021568,
189
  "children": {
190
  "SubprocessEnvManager._take_step": {
191
- "total": 276.6194126289438,
192
- "count": 232893,
193
- "self": 16.138814581058455,
194
  "children": {
195
  "TorchPolicy.evaluate": {
196
- "total": 260.48059804788534,
197
- "count": 222960,
198
- "self": 260.48059804788534
199
  }
200
  }
201
  },
202
  "workers": {
203
- "total": 2.7196311239985107,
204
- "count": 232893,
205
  "self": 0.0,
206
  "children": {
207
  "worker_root": {
208
- "total": 2338.7060938279888,
209
- "count": 232893,
210
  "is_parallel": true,
211
- "self": 1081.6073131250346,
212
  "children": {
213
  "run_training.setup": {
214
  "total": 0.0,
@@ -217,48 +217,48 @@
217
  "self": 0.0,
218
  "children": {
219
  "steps_from_proto": {
220
- "total": 0.0012246179999237938,
221
  "count": 1,
222
  "is_parallel": true,
223
- "self": 0.0003168209999557803,
224
  "children": {
225
  "_process_rank_one_or_two_observation": {
226
- "total": 0.0009077969999680136,
227
  "count": 2,
228
  "is_parallel": true,
229
- "self": 0.0009077969999680136
230
  }
231
  }
232
  },
233
  "UnityEnvironment.step": {
234
- "total": 0.02829281199990419,
235
  "count": 1,
236
  "is_parallel": true,
237
- "self": 0.00031350799997653667,
238
  "children": {
239
  "UnityEnvironment._generate_step_input": {
240
- "total": 0.000174994999952105,
241
  "count": 1,
242
  "is_parallel": true,
243
- "self": 0.000174994999952105
244
  },
245
  "communicator.exchange": {
246
- "total": 0.027110171999993327,
247
  "count": 1,
248
  "is_parallel": true,
249
- "self": 0.027110171999993327
250
  },
251
  "steps_from_proto": {
252
- "total": 0.00069413699998222,
253
  "count": 1,
254
  "is_parallel": true,
255
- "self": 0.00021510900000976108,
256
  "children": {
257
  "_process_rank_one_or_two_observation": {
258
- "total": 0.00047902799997245893,
259
  "count": 2,
260
  "is_parallel": true,
261
- "self": 0.00047902799997245893
262
  }
263
  }
264
  }
@@ -267,34 +267,34 @@
267
  }
268
  },
269
  "UnityEnvironment.step": {
270
- "total": 1257.0987807029542,
271
- "count": 232892,
272
  "is_parallel": true,
273
- "self": 38.27769059905904,
274
  "children": {
275
  "UnityEnvironment._generate_step_input": {
276
- "total": 77.01381891188248,
277
- "count": 232892,
278
  "is_parallel": true,
279
- "self": 77.01381891188248
280
  },
281
  "communicator.exchange": {
282
- "total": 1050.2927384460377,
283
- "count": 232892,
284
  "is_parallel": true,
285
- "self": 1050.2927384460377
286
  },
287
  "steps_from_proto": {
288
- "total": 91.51453274597498,
289
- "count": 232892,
290
  "is_parallel": true,
291
- "self": 33.91195356080425,
292
  "children": {
293
  "_process_rank_one_or_two_observation": {
294
- "total": 57.602579185170725,
295
- "count": 465784,
296
  "is_parallel": true,
297
- "self": 57.602579185170725
298
  }
299
  }
300
  }
@@ -307,31 +307,31 @@
307
  }
308
  },
309
  "trainer_advance": {
310
- "total": 518.4428442978646,
311
- "count": 232893,
312
- "self": 6.360000005963343,
313
  "children": {
314
  "process_trajectory": {
315
- "total": 132.92127038290084,
316
- "count": 232893,
317
- "self": 131.50784617590114,
318
  "children": {
319
  "RLTrainer._checkpoint": {
320
- "total": 1.4134242069997072,
321
  "count": 10,
322
- "self": 1.4134242069997072
323
  }
324
  }
325
  },
326
  "_update_policy": {
327
- "total": 379.16157390900037,
328
- "count": 97,
329
- "self": 319.6634817490009,
330
  "children": {
331
  "TorchPPOOptimizer.update": {
332
- "total": 59.49809215999949,
333
- "count": 2910,
334
- "self": 59.49809215999949
335
  }
336
  }
337
  }
@@ -340,19 +340,19 @@
340
  }
341
  },
342
  "trainer_threads": {
343
- "total": 1.3270000636111945e-06,
344
  "count": 1,
345
- "self": 1.3270000636111945e-06
346
  },
347
  "TrainerController._save_models": {
348
- "total": 0.142016350999711,
349
  "count": 1,
350
- "self": 0.002214623999861942,
351
  "children": {
352
  "RLTrainer._checkpoint": {
353
- "total": 0.13980172699984905,
354
  "count": 1,
355
- "self": 0.13980172699984905
356
  }
357
  }
358
  }
 
2
  "name": "root",
3
  "gauges": {
4
  "Huggy.Policy.Entropy.mean": {
5
+ "value": 1.3996235132217407,
6
+ "min": 1.3996235132217407,
7
+ "max": 1.4283359050750732,
8
  "count": 40
9
  },
10
  "Huggy.Policy.Entropy.sum": {
11
+ "value": 69827.21875,
12
+ "min": 68443.4375,
13
+ "max": 77023.0078125,
14
  "count": 40
15
  },
16
  "Huggy.Environment.EpisodeLength.mean": {
17
+ "value": 80.47797716150082,
18
+ "min": 80.47797716150082,
19
+ "max": 401.6825396825397,
20
  "count": 40
21
  },
22
  "Huggy.Environment.EpisodeLength.sum": {
23
+ "value": 49333.0,
24
+ "min": 49157.0,
25
+ "max": 50612.0,
26
  "count": 40
27
  },
28
  "Huggy.Step.mean": {
29
+ "value": 1999906.0,
30
+ "min": 49988.0,
31
+ "max": 1999906.0,
32
  "count": 40
33
  },
34
  "Huggy.Step.sum": {
35
+ "value": 1999906.0,
36
+ "min": 49988.0,
37
+ "max": 1999906.0,
38
  "count": 40
39
  },
40
  "Huggy.Policy.ExtrinsicValueEstimate.mean": {
41
+ "value": 2.385777473449707,
42
+ "min": 0.009915120899677277,
43
+ "max": 2.430847644805908,
44
  "count": 40
45
  },
46
  "Huggy.Policy.ExtrinsicValueEstimate.sum": {
47
+ "value": 1462.4815673828125,
48
+ "min": 1.2393901348114014,
49
+ "max": 1462.4815673828125,
50
  "count": 40
51
  },
52
  "Huggy.Environment.CumulativeReward.mean": {
53
+ "value": 3.647996749224515,
54
+ "min": 1.9233169865608215,
55
+ "max": 3.8755954584141947,
56
  "count": 40
57
  },
58
  "Huggy.Environment.CumulativeReward.sum": {
59
+ "value": 2236.2220072746277,
60
+ "min": 240.4146233201027,
61
+ "max": 2236.2220072746277,
62
  "count": 40
63
  },
64
  "Huggy.Policy.ExtrinsicReward.mean": {
65
+ "value": 3.647996749224515,
66
+ "min": 1.9233169865608215,
67
+ "max": 3.8755954584141947,
68
  "count": 40
69
  },
70
  "Huggy.Policy.ExtrinsicReward.sum": {
71
+ "value": 2236.2220072746277,
72
+ "min": 240.4146233201027,
73
+ "max": 2236.2220072746277,
74
  "count": 40
75
  },
76
  "Huggy.Losses.PolicyLoss.mean": {
77
+ "value": 0.014439888263586907,
78
+ "min": 0.013108634809678837,
79
+ "max": 0.021196249868565546,
80
  "count": 40
81
  },
82
  "Huggy.Losses.PolicyLoss.sum": {
83
+ "value": 0.028879776527173814,
84
+ "min": 0.02645938691130141,
85
+ "max": 0.057462945113366,
86
  "count": 40
87
  },
88
  "Huggy.Losses.ValueLoss.mean": {
89
+ "value": 0.06863893251866102,
90
+ "min": 0.022788701392710206,
91
+ "max": 0.06882662878682216,
92
  "count": 40
93
  },
94
  "Huggy.Losses.ValueLoss.sum": {
95
+ "value": 0.13727786503732203,
96
+ "min": 0.04557740278542041,
97
+ "max": 0.19475689940154553,
98
  "count": 40
99
  },
100
  "Huggy.Policy.LearningRate.mean": {
101
+ "value": 4.459073513674998e-06,
102
+ "min": 4.459073513674998e-06,
103
+ "max": 0.00029528970157010004,
104
  "count": 40
105
  },
106
  "Huggy.Policy.LearningRate.sum": {
107
+ "value": 8.918147027349996e-06,
108
+ "min": 8.918147027349996e-06,
109
+ "max": 0.00084357466880845,
110
  "count": 40
111
  },
112
  "Huggy.Policy.Epsilon.mean": {
113
+ "value": 0.101486325,
114
+ "min": 0.101486325,
115
+ "max": 0.1984299,
116
  "count": 40
117
  },
118
  "Huggy.Policy.Epsilon.sum": {
119
+ "value": 0.20297265,
120
+ "min": 0.20297265,
121
+ "max": 0.58119155,
122
  "count": 40
123
  },
124
  "Huggy.Policy.Beta.mean": {
125
+ "value": 8.416761749999997e-05,
126
+ "min": 8.416761749999997e-05,
127
+ "max": 0.00492165201,
128
  "count": 40
129
  },
130
  "Huggy.Policy.Beta.sum": {
131
+ "value": 0.00016833523499999994,
132
+ "min": 0.00016833523499999994,
133
+ "max": 0.014061458345,
134
  "count": 40
135
  },
136
  "Huggy.IsTraining.mean": {
 
148
  },
149
  "metadata": {
150
  "timer_format_version": "0.1.0",
151
+ "start_time_seconds": "1688395922",
152
+ "python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
153
  "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
154
  "mlagents_version": "0.31.0.dev0",
155
  "mlagents_envs_version": "0.31.0.dev0",
156
  "communication_protocol_version": "1.5.0",
157
  "pytorch_version": "1.11.0+cu102",
158
  "numpy_version": "1.21.2",
159
+ "end_time_seconds": "1688398281"
160
  },
161
+ "total": 2358.6050041510002,
162
  "count": 1,
163
+ "self": 0.43864498500033733,
164
  "children": {
165
  "run_training.setup": {
166
+ "total": 0.043092682000065,
167
  "count": 1,
168
+ "self": 0.043092682000065
169
  },
170
  "TrainerController.start_learning": {
171
+ "total": 2358.123266484,
172
  "count": 1,
173
+ "self": 4.171306707058648,
174
  "children": {
175
  "TrainerController._reset_env": {
176
+ "total": 4.10922480499994,
177
  "count": 1,
178
+ "self": 4.10922480499994
179
  },
180
  "TrainerController.advance": {
181
+ "total": 2349.7315278719416,
182
+ "count": 231917,
183
+ "self": 4.388288699918121,
184
  "children": {
185
  "env_step": {
186
+ "total": 1835.5190091201075,
187
+ "count": 231917,
188
+ "self": 1546.0329148400522,
189
  "children": {
190
  "SubprocessEnvManager._take_step": {
191
+ "total": 286.8296068860244,
192
+ "count": 231917,
193
+ "self": 16.348750633011832,
194
  "children": {
195
  "TorchPolicy.evaluate": {
196
+ "total": 270.4808562530126,
197
+ "count": 222882,
198
+ "self": 270.4808562530126
199
  }
200
  }
201
  },
202
  "workers": {
203
+ "total": 2.6564873940309326,
204
+ "count": 231917,
205
  "self": 0.0,
206
  "children": {
207
  "worker_root": {
208
+ "total": 2350.545042976117,
209
+ "count": 231917,
210
  "is_parallel": true,
211
+ "self": 1085.682557866081,
212
  "children": {
213
  "run_training.setup": {
214
  "total": 0.0,
 
217
  "self": 0.0,
218
  "children": {
219
  "steps_from_proto": {
220
+ "total": 0.0008917809999502424,
221
  "count": 1,
222
  "is_parallel": true,
223
+ "self": 0.0002753239999719881,
224
  "children": {
225
  "_process_rank_one_or_two_observation": {
226
+ "total": 0.0006164569999782543,
227
  "count": 2,
228
  "is_parallel": true,
229
+ "self": 0.0006164569999782543
230
  }
231
  }
232
  },
233
  "UnityEnvironment.step": {
234
+ "total": 0.02835647599999902,
235
  "count": 1,
236
  "is_parallel": true,
237
+ "self": 0.00031673499995577004,
238
  "children": {
239
  "UnityEnvironment._generate_step_input": {
240
+ "total": 0.0002383620000045994,
241
  "count": 1,
242
  "is_parallel": true,
243
+ "self": 0.0002383620000045994
244
  },
245
  "communicator.exchange": {
246
+ "total": 0.02707606300009502,
247
  "count": 1,
248
  "is_parallel": true,
249
+ "self": 0.02707606300009502
250
  },
251
  "steps_from_proto": {
252
+ "total": 0.000725315999943632,
253
  "count": 1,
254
  "is_parallel": true,
255
+ "self": 0.00021000799995363195,
256
  "children": {
257
  "_process_rank_one_or_two_observation": {
258
+ "total": 0.0005153079999900001,
259
  "count": 2,
260
  "is_parallel": true,
261
+ "self": 0.0005153079999900001
262
  }
263
  }
264
  }
 
267
  }
268
  },
269
  "UnityEnvironment.step": {
270
+ "total": 1264.8624851100362,
271
+ "count": 231916,
272
  "is_parallel": true,
273
+ "self": 39.275736042906146,
274
  "children": {
275
  "UnityEnvironment._generate_step_input": {
276
+ "total": 78.83619503009243,
277
+ "count": 231916,
278
  "is_parallel": true,
279
+ "self": 78.83619503009243
280
  },
281
  "communicator.exchange": {
282
+ "total": 1052.4197886509828,
283
+ "count": 231916,
284
  "is_parallel": true,
285
+ "self": 1052.4197886509828
286
  },
287
  "steps_from_proto": {
288
+ "total": 94.33076538605496,
289
+ "count": 231916,
290
  "is_parallel": true,
291
+ "self": 33.390641049996475,
292
  "children": {
293
  "_process_rank_one_or_two_observation": {
294
+ "total": 60.94012433605849,
295
+ "count": 463832,
296
  "is_parallel": true,
297
+ "self": 60.94012433605849
298
  }
299
  }
300
  }
 
307
  }
308
  },
309
  "trainer_advance": {
310
+ "total": 509.8242300519157,
311
+ "count": 231917,
312
+ "self": 6.384156880928458,
313
  "children": {
314
  "process_trajectory": {
315
+ "total": 131.67724607898367,
316
+ "count": 231917,
317
+ "self": 130.32106857398344,
318
  "children": {
319
  "RLTrainer._checkpoint": {
320
+ "total": 1.3561775050002325,
321
  "count": 10,
322
+ "self": 1.3561775050002325
323
  }
324
  }
325
  },
326
  "_update_policy": {
327
+ "total": 371.7628270920036,
328
+ "count": 96,
329
+ "self": 313.0295590090092,
330
  "children": {
331
  "TorchPPOOptimizer.update": {
332
+ "total": 58.733268082994414,
333
+ "count": 2880,
334
+ "self": 58.733268082994414
335
  }
336
  }
337
  }
 
340
  }
341
  },
342
  "trainer_threads": {
343
+ "total": 8.089996299531776e-07,
344
  "count": 1,
345
+ "self": 8.089996299531776e-07
346
  },
347
  "TrainerController._save_models": {
348
+ "total": 0.11120629100014412,
349
  "count": 1,
350
+ "self": 0.0019907540004169277,
351
  "children": {
352
  "RLTrainer._checkpoint": {
353
+ "total": 0.10921553699972719,
354
  "count": 1,
355
+ "self": 0.10921553699972719
356
  }
357
  }
358
  }
run_logs/training_status.json CHANGED
@@ -2,112 +2,112 @@
2
  "Huggy": {
3
  "checkpoints": [
4
  {
5
- "steps": 199770,
6
- "file_path": "results/Huggy/Huggy/Huggy-199770.onnx",
7
- "reward": 3.631713208505663,
8
- "creation_time": 1683819737.7751646,
9
  "auxillary_file_paths": [
10
- "results/Huggy/Huggy/Huggy-199770.pt"
11
  ]
12
  },
13
  {
14
- "steps": 399894,
15
- "file_path": "results/Huggy/Huggy/Huggy-399894.onnx",
16
- "reward": 4.060420019011343,
17
- "creation_time": 1683819967.7910883,
18
  "auxillary_file_paths": [
19
- "results/Huggy/Huggy/Huggy-399894.pt"
20
  ]
21
  },
22
  {
23
- "steps": 599974,
24
- "file_path": "results/Huggy/Huggy/Huggy-599974.onnx",
25
- "reward": 4.151750664961965,
26
- "creation_time": 1683820199.9652004,
27
  "auxillary_file_paths": [
28
- "results/Huggy/Huggy/Huggy-599974.pt"
29
  ]
30
  },
31
  {
32
- "steps": 799973,
33
- "file_path": "results/Huggy/Huggy/Huggy-799973.onnx",
34
- "reward": 3.615187671055665,
35
- "creation_time": 1683820432.4321713,
36
  "auxillary_file_paths": [
37
- "results/Huggy/Huggy/Huggy-799973.pt"
38
  ]
39
  },
40
  {
41
- "steps": 999938,
42
- "file_path": "results/Huggy/Huggy/Huggy-999938.onnx",
43
- "reward": 4.024213735197411,
44
- "creation_time": 1683820667.7281137,
45
  "auxillary_file_paths": [
46
- "results/Huggy/Huggy/Huggy-999938.pt"
47
  ]
48
  },
49
  {
50
- "steps": 1199759,
51
- "file_path": "results/Huggy/Huggy/Huggy-1199759.onnx",
52
- "reward": 3.8020275682210922,
53
- "creation_time": 1683820901.2514317,
54
  "auxillary_file_paths": [
55
- "results/Huggy/Huggy/Huggy-1199759.pt"
56
  ]
57
  },
58
  {
59
- "steps": 1399930,
60
- "file_path": "results/Huggy/Huggy/Huggy-1399930.onnx",
61
- "reward": 3.7888139292361243,
62
- "creation_time": 1683821131.4652562,
63
  "auxillary_file_paths": [
64
- "results/Huggy/Huggy/Huggy-1399930.pt"
65
  ]
66
  },
67
  {
68
- "steps": 1599617,
69
- "file_path": "results/Huggy/Huggy/Huggy-1599617.onnx",
70
- "reward": 3.8905157059899174,
71
- "creation_time": 1683821364.84571,
72
  "auxillary_file_paths": [
73
- "results/Huggy/Huggy/Huggy-1599617.pt"
74
  ]
75
  },
76
  {
77
- "steps": 1799953,
78
- "file_path": "results/Huggy/Huggy/Huggy-1799953.onnx",
79
- "reward": 3.6511306976278624,
80
- "creation_time": 1683821607.4474437,
81
  "auxillary_file_paths": [
82
- "results/Huggy/Huggy/Huggy-1799953.pt"
83
  ]
84
  },
85
  {
86
- "steps": 1999930,
87
- "file_path": "results/Huggy/Huggy/Huggy-1999930.onnx",
88
- "reward": 4.0515340692118595,
89
- "creation_time": 1683821843.4603624,
90
  "auxillary_file_paths": [
91
- "results/Huggy/Huggy/Huggy-1999930.pt"
92
  ]
93
  },
94
  {
95
- "steps": 2000017,
96
- "file_path": "results/Huggy/Huggy/Huggy-2000017.onnx",
97
- "reward": 4.114608782988328,
98
- "creation_time": 1683821843.6074064,
99
  "auxillary_file_paths": [
100
- "results/Huggy/Huggy/Huggy-2000017.pt"
101
  ]
102
  }
103
  ],
104
  "final_checkpoint": {
105
- "steps": 2000017,
106
  "file_path": "results/Huggy/Huggy.onnx",
107
- "reward": 4.114608782988328,
108
- "creation_time": 1683821843.6074064,
109
  "auxillary_file_paths": [
110
- "results/Huggy/Huggy/Huggy-2000017.pt"
111
  ]
112
  }
113
  },
 
2
  "Huggy": {
3
  "checkpoints": [
4
  {
5
+ "steps": 199894,
6
+ "file_path": "results/Huggy/Huggy/Huggy-199894.onnx",
7
+ "reward": 3.419936793915769,
8
+ "creation_time": 1688396156.6679292,
9
  "auxillary_file_paths": [
10
+ "results/Huggy/Huggy/Huggy-199894.pt"
11
  ]
12
  },
13
  {
14
+ "steps": 399998,
15
+ "file_path": "results/Huggy/Huggy/Huggy-399998.onnx",
16
+ "reward": 3.814316924025373,
17
+ "creation_time": 1688396388.517391,
18
  "auxillary_file_paths": [
19
+ "results/Huggy/Huggy/Huggy-399998.pt"
20
  ]
21
  },
22
  {
23
+ "steps": 599933,
24
+ "file_path": "results/Huggy/Huggy/Huggy-599933.onnx",
25
+ "reward": 2.530343929926554,
26
+ "creation_time": 1688396624.8762076,
27
  "auxillary_file_paths": [
28
+ "results/Huggy/Huggy/Huggy-599933.pt"
29
  ]
30
  },
31
  {
32
+ "steps": 799918,
33
+ "file_path": "results/Huggy/Huggy/Huggy-799918.onnx",
34
+ "reward": 3.622871805737902,
35
+ "creation_time": 1688396858.601711,
36
  "auxillary_file_paths": [
37
+ "results/Huggy/Huggy/Huggy-799918.pt"
38
  ]
39
  },
40
  {
41
+ "steps": 999955,
42
+ "file_path": "results/Huggy/Huggy/Huggy-999955.onnx",
43
+ "reward": 3.9664319665808425,
44
+ "creation_time": 1688397097.4261496,
45
  "auxillary_file_paths": [
46
+ "results/Huggy/Huggy/Huggy-999955.pt"
47
  ]
48
  },
49
  {
50
+ "steps": 1199993,
51
+ "file_path": "results/Huggy/Huggy/Huggy-1199993.onnx",
52
+ "reward": 4.4545183430115385,
53
+ "creation_time": 1688397336.8310432,
54
  "auxillary_file_paths": [
55
+ "results/Huggy/Huggy/Huggy-1199993.pt"
56
  ]
57
  },
58
  {
59
+ "steps": 1399936,
60
+ "file_path": "results/Huggy/Huggy/Huggy-1399936.onnx",
61
+ "reward": 3.7061835756427364,
62
+ "creation_time": 1688397571.3089862,
63
  "auxillary_file_paths": [
64
+ "results/Huggy/Huggy/Huggy-1399936.pt"
65
  ]
66
  },
67
  {
68
+ "steps": 1599458,
69
+ "file_path": "results/Huggy/Huggy/Huggy-1599458.onnx",
70
+ "reward": 3.8235666255156198,
71
+ "creation_time": 1688397810.47235,
72
  "auxillary_file_paths": [
73
+ "results/Huggy/Huggy/Huggy-1599458.pt"
74
  ]
75
  },
76
  {
77
+ "steps": 1799968,
78
+ "file_path": "results/Huggy/Huggy/Huggy-1799968.onnx",
79
+ "reward": 3.6107782125473022,
80
+ "creation_time": 1688398046.2368243,
81
  "auxillary_file_paths": [
82
+ "results/Huggy/Huggy/Huggy-1799968.pt"
83
  ]
84
  },
85
  {
86
+ "steps": 1999906,
87
+ "file_path": "results/Huggy/Huggy/Huggy-1999906.onnx",
88
+ "reward": 3.579855168124904,
89
+ "creation_time": 1688398280.5472698,
90
  "auxillary_file_paths": [
91
+ "results/Huggy/Huggy/Huggy-1999906.pt"
92
  ]
93
  },
94
  {
95
+ "steps": 2000015,
96
+ "file_path": "results/Huggy/Huggy/Huggy-2000015.onnx",
97
+ "reward": 3.581122413600162,
98
+ "creation_time": 1688398280.6621647,
99
  "auxillary_file_paths": [
100
+ "results/Huggy/Huggy/Huggy-2000015.pt"
101
  ]
102
  }
103
  ],
104
  "final_checkpoint": {
105
+ "steps": 2000015,
106
  "file_path": "results/Huggy/Huggy.onnx",
107
+ "reward": 3.581122413600162,
108
+ "creation_time": 1688398280.6621647,
109
  "auxillary_file_paths": [
110
+ "results/Huggy/Huggy/Huggy-2000015.pt"
111
  ]
112
  }
113
  },