cmpatino's picture
First Push
b23e4be
raw
history blame
18.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0800565481185913,
"min": 1.0800565481185913,
"max": 2.865978240966797,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 10300.4990234375,
"min": 10300.4990234375,
"max": 27921.345703125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.852461814880371,
"min": 0.7957610487937927,
"max": 12.852461814880371,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2506.22998046875,
"min": 75.59729766845703,
"max": 2573.947509765625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 4378.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.065277744479353,
"min": 0.060407346636387874,
"max": 0.07510465324009477,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.261110977917412,
"min": 0.14982889127899326,
"max": 0.36340564895791055,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2090834575686969,
"min": 0.17593101335360722,
"max": 0.3001500289229786,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8363338302747876,
"min": 0.35186202670721445,
"max": 1.4312542606802547,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 7.0260976579999984e-06,
"min": 7.0260976579999984e-06,
"max": 0.000287526004158,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 2.8104390631999994e-05,
"min": 2.8104390631999994e-05,
"max": 0.0013138800620399998,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10234199999999999,
"min": 0.10234199999999999,
"max": 0.19584200000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.40936799999999995,
"min": 0.39168400000000003,
"max": 0.93796,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001268658,
"min": 0.0001268658,
"max": 0.004792515800000001,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005074632,
"min": 0.0005074632,
"max": 0.021904204,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.818181818181817,
"min": 4.545454545454546,
"max": 25.818181818181817,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1136.0,
"min": 100.0,
"max": 1406.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.818181818181817,
"min": 4.545454545454546,
"max": 25.818181818181817,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1136.0,
"min": 100.0,
"max": 1406.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1684800213",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1684800702"
},
"total": 489.64451215400004,
"count": 1,
"self": 0.791335937000099,
"children": {
"run_training.setup": {
"total": 0.06397809800000687,
"count": 1,
"self": 0.06397809800000687
},
"TrainerController.start_learning": {
"total": 488.78919811899993,
"count": 1,
"self": 0.5855051090049983,
"children": {
"TrainerController._reset_env": {
"total": 5.076384251000036,
"count": 1,
"self": 5.076384251000036
},
"TrainerController.advance": {
"total": 482.8961280199949,
"count": 17758,
"self": 0.2937743290030994,
"children": {
"env_step": {
"total": 482.6023536909918,
"count": 17758,
"self": 352.62906332699896,
"children": {
"SubprocessEnvManager._take_step": {
"total": 129.69049512899733,
"count": 17758,
"self": 1.8027949619893207,
"children": {
"TorchPolicy.evaluate": {
"total": 127.887700167008,
"count": 17758,
"self": 127.887700167008
}
}
},
"workers": {
"total": 0.28279523499548986,
"count": 17758,
"self": 0.0,
"children": {
"worker_root": {
"total": 486.88140983100345,
"count": 17758,
"is_parallel": true,
"self": 228.5440531170055,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.003403717000026063,
"count": 1,
"is_parallel": true,
"self": 0.0009043510000310562,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002499365999995007,
"count": 10,
"is_parallel": true,
"self": 0.002499365999995007
}
}
},
"UnityEnvironment.step": {
"total": 0.12036650600003895,
"count": 1,
"is_parallel": true,
"self": 0.0006001710000873572,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000554515999965588,
"count": 1,
"is_parallel": true,
"self": 0.000554515999965588
},
"communicator.exchange": {
"total": 0.11297735400000875,
"count": 1,
"is_parallel": true,
"self": 0.11297735400000875
},
"steps_from_proto": {
"total": 0.006234464999977263,
"count": 1,
"is_parallel": true,
"self": 0.0013472159999423639,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.004887249000034899,
"count": 10,
"is_parallel": true,
"self": 0.004887249000034899
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 258.33735671399796,
"count": 17757,
"is_parallel": true,
"self": 9.931823146993338,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.587392548996036,
"count": 17757,
"is_parallel": true,
"self": 5.587392548996036
},
"communicator.exchange": {
"total": 206.69902286500394,
"count": 17757,
"is_parallel": true,
"self": 206.69902286500394
},
"steps_from_proto": {
"total": 36.11911815300465,
"count": 17757,
"is_parallel": true,
"self": 7.059555132006551,
"children": {
"_process_rank_one_or_two_observation": {
"total": 29.059563020998098,
"count": 177570,
"is_parallel": true,
"self": 29.059563020998098
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00016971100001228479,
"count": 1,
"self": 0.00016971100001228479,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 478.99355130495525,
"count": 462471,
"is_parallel": true,
"self": 10.77299956690166,
"children": {
"process_trajectory": {
"total": 262.6289624830535,
"count": 462471,
"is_parallel": true,
"self": 260.8337015720536,
"children": {
"RLTrainer._checkpoint": {
"total": 1.7952609109999003,
"count": 4,
"is_parallel": true,
"self": 1.7952609109999003
}
}
},
"_update_policy": {
"total": 205.5915892550001,
"count": 88,
"is_parallel": true,
"self": 78.40621398899913,
"children": {
"TorchPPOOptimizer.update": {
"total": 127.18537526600096,
"count": 4485,
"is_parallel": true,
"self": 127.18537526600096
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.23101102800001172,
"count": 1,
"self": 0.0018925560000297992,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22911847199998192,
"count": 1,
"self": 0.22911847199998192
}
}
}
}
}
}
}