Naruke's picture
First Push
76e9363
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.1063027381896973,
"min": 1.0979952812194824,
"max": 2.871321439743042,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 10562.978515625,
"min": 10562.978515625,
"max": 29499.95703125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.447012901306152,
"min": 0.4224646985530853,
"max": 12.447012901306152,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2427.16748046875,
"min": 81.9581527709961,
"max": 2496.465087890625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0680802553817878,
"min": 0.06014252547291678,
"max": 0.07632606763831527,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2723210215271512,
"min": 0.2503388118462217,
"max": 0.35971706249185054,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2085458694354576,
"min": 0.1165344900047571,
"max": 0.27813386223187636,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8341834777418304,
"min": 0.4661379600190284,
"max": 1.3857629413113877,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.954545454545453,
"min": 3.772727272727273,
"max": 24.954545454545453,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1098.0,
"min": 166.0,
"max": 1330.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.954545454545453,
"min": 3.772727272727273,
"max": 24.954545454545453,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1098.0,
"min": 166.0,
"max": 1330.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1690473569",
"python_version": "3.10.6 (main, May 29 2023, 11:10:38) [GCC 11.3.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1690474067"
},
"total": 497.945676546,
"count": 1,
"self": 0.835398519000023,
"children": {
"run_training.setup": {
"total": 0.0556766830000015,
"count": 1,
"self": 0.0556766830000015
},
"TrainerController.start_learning": {
"total": 497.054601344,
"count": 1,
"self": 0.5769783049886428,
"children": {
"TrainerController._reset_env": {
"total": 5.8502916709999795,
"count": 1,
"self": 5.8502916709999795
},
"TrainerController.advance": {
"total": 490.38094480701136,
"count": 18219,
"self": 0.2856185400062259,
"children": {
"env_step": {
"total": 490.09532626700513,
"count": 18219,
"self": 355.31409618100474,
"children": {
"SubprocessEnvManager._take_step": {
"total": 134.4851476739949,
"count": 18219,
"self": 1.9607658549967368,
"children": {
"TorchPolicy.evaluate": {
"total": 132.52438181899817,
"count": 18219,
"self": 132.52438181899817
}
}
},
"workers": {
"total": 0.29608241200548946,
"count": 18219,
"self": 0.0,
"children": {
"worker_root": {
"total": 495.3952223919936,
"count": 18219,
"is_parallel": true,
"self": 235.66279135298976,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0054974759999595335,
"count": 1,
"is_parallel": true,
"self": 0.003976731999898675,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015207440000608585,
"count": 10,
"is_parallel": true,
"self": 0.0015207440000608585
}
}
},
"UnityEnvironment.step": {
"total": 0.0373264899999981,
"count": 1,
"is_parallel": true,
"self": 0.0007138590000295153,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00034540199999355536,
"count": 1,
"is_parallel": true,
"self": 0.00034540199999355536
},
"communicator.exchange": {
"total": 0.03385186299999532,
"count": 1,
"is_parallel": true,
"self": 0.03385186299999532
},
"steps_from_proto": {
"total": 0.002415365999979713,
"count": 1,
"is_parallel": true,
"self": 0.0004329140000436382,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019824519999360746,
"count": 10,
"is_parallel": true,
"self": 0.0019824519999360746
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 259.73243103900387,
"count": 18218,
"is_parallel": true,
"self": 11.222122656995907,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.464670690003572,
"count": 18218,
"is_parallel": true,
"self": 5.464670690003572
},
"communicator.exchange": {
"total": 205.80449946900194,
"count": 18218,
"is_parallel": true,
"self": 205.80449946900194
},
"steps_from_proto": {
"total": 37.24113822300245,
"count": 18218,
"is_parallel": true,
"self": 6.833339087970387,
"children": {
"_process_rank_one_or_two_observation": {
"total": 30.407799135032064,
"count": 182180,
"is_parallel": true,
"self": 30.407799135032064
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00011804100006429508,
"count": 1,
"self": 0.00011804100006429508,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 486.3396166559678,
"count": 455920,
"is_parallel": true,
"self": 10.784608052924625,
"children": {
"process_trajectory": {
"total": 265.83560848004277,
"count": 455920,
"is_parallel": true,
"self": 264.55699971204285,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2786087679999127,
"count": 4,
"is_parallel": true,
"self": 1.2786087679999127
}
}
},
"_update_policy": {
"total": 209.7194001230004,
"count": 90,
"is_parallel": true,
"self": 83.03952556699795,
"children": {
"TorchPPOOptimizer.update": {
"total": 126.67987455600246,
"count": 4587,
"is_parallel": true,
"self": 126.67987455600246
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.24626851999994415,
"count": 1,
"self": 0.0014649199998757467,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2448036000000684,
"count": 1,
"self": 0.2448036000000684
}
}
}
}
}
}
}