BlunderPanini's picture
First Push
8218d7a
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 2.871245861053467,
"min": 2.871245861053467,
"max": 2.871245861053467,
"count": 1
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 29467.595703125,
"min": 29467.595703125,
"max": 29467.595703125,
"count": 1
},
"SnowballTarget.Step.mean": {
"value": 9952.0,
"min": 9952.0,
"max": 9952.0,
"count": 1
},
"SnowballTarget.Step.sum": {
"value": 9952.0,
"min": 9952.0,
"max": 9952.0,
"count": 1
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.19350029528141022,
"min": 0.19350029528141022,
"max": 0.19350029528141022,
"count": 1
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 37.539058685302734,
"min": 37.539058685302734,
"max": 37.539058685302734,
"count": 1
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 1
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 8756.0,
"count": 1
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06077112006061956,
"min": 0.06077112006061956,
"max": 0.06077112006061956,
"count": 1
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.24308448024247825,
"min": 0.24308448024247825,
"max": 0.24308448024247825,
"count": 1
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.12860633954223172,
"min": 0.12860633954223172,
"max": 0.12860633954223172,
"count": 1
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.5144253581689269,
"min": 0.5144253581689269,
"max": 0.5144253581689269,
"count": 1
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 0.00022940005411999998,
"min": 0.00022940005411999998,
"max": 0.00022940005411999998,
"count": 1
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 0.0009176002164799999,
"min": 0.0009176002164799999,
"max": 0.0009176002164799999,
"count": 1
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.14588,
"min": 0.14588,
"max": 0.14588,
"count": 1
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.58352,
"min": 0.58352,
"max": 0.58352,
"count": 1
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.002299412,
"min": 0.002299412,
"max": 0.002299412,
"count": 1
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.009197648,
"min": 0.009197648,
"max": 0.009197648,
"count": 1
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 3.2045454545454546,
"min": 3.2045454545454546,
"max": 3.2045454545454546,
"count": 1
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 141.0,
"min": 141.0,
"max": 141.0,
"count": 1
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 3.2045454545454546,
"min": 3.2045454545454546,
"max": 3.2045454545454546,
"count": 1
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 141.0,
"min": 141.0,
"max": 141.0,
"count": 1
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1690757484",
"python_version": "3.10.6 (main, May 29 2023, 11:10:38) [GCC 11.3.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1690757516"
},
"total": 32.29701657800001,
"count": 1,
"self": 0.4326129780001793,
"children": {
"run_training.setup": {
"total": 0.03678964599998835,
"count": 1,
"self": 0.03678964599998835
},
"TrainerController.start_learning": {
"total": 31.827613953999844,
"count": 1,
"self": 0.052688158000137264,
"children": {
"TrainerController._reset_env": {
"total": 5.295655569000019,
"count": 1,
"self": 5.295655569000019
},
"TrainerController.advance": {
"total": 26.31787561400006,
"count": 934,
"self": 0.016715557000452463,
"children": {
"env_step": {
"total": 26.301160056999606,
"count": 934,
"self": 19.308190097999386,
"children": {
"SubprocessEnvManager._take_step": {
"total": 6.976216229000556,
"count": 934,
"self": 0.09144807400070931,
"children": {
"TorchPolicy.evaluate": {
"total": 6.884768154999847,
"count": 934,
"self": 6.884768154999847
}
}
},
"workers": {
"total": 0.016753729999663847,
"count": 934,
"self": 0.0,
"children": {
"worker_root": {
"total": 31.585048957997287,
"count": 934,
"is_parallel": true,
"self": 17.413109775998464,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006592164999801753,
"count": 1,
"is_parallel": true,
"self": 0.004779910999786807,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018122540000149456,
"count": 10,
"is_parallel": true,
"self": 0.0018122540000149456
}
}
},
"UnityEnvironment.step": {
"total": 0.051494462000164276,
"count": 1,
"is_parallel": true,
"self": 0.0005400480001753749,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00037356899997575965,
"count": 1,
"is_parallel": true,
"self": 0.00037356899997575965
},
"communicator.exchange": {
"total": 0.04809759399995528,
"count": 1,
"is_parallel": true,
"self": 0.04809759399995528
},
"steps_from_proto": {
"total": 0.0024832510000578623,
"count": 1,
"is_parallel": true,
"self": 0.0004327730002842145,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0020504779997736478,
"count": 10,
"is_parallel": true,
"self": 0.0020504779997736478
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 14.171939181998823,
"count": 933,
"is_parallel": true,
"self": 0.5773068859887189,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.3079306910046853,
"count": 933,
"is_parallel": true,
"self": 0.3079306910046853
},
"communicator.exchange": {
"total": 11.208800530999952,
"count": 933,
"is_parallel": true,
"self": 11.208800530999952
},
"steps_from_proto": {
"total": 2.0779010740054673,
"count": 933,
"is_parallel": true,
"self": 0.3804511670175543,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1.697449906987913,
"count": 9330,
"is_parallel": true,
"self": 1.697449906987913
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00024163799980669864,
"count": 1,
"self": 0.00024163799980669864,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 26.099451395997903,
"count": 25800,
"is_parallel": true,
"self": 0.614665287010439,
"children": {
"process_trajectory": {
"total": 15.276181148987234,
"count": 25800,
"is_parallel": true,
"self": 15.276181148987234
},
"_update_policy": {
"total": 10.20860496000023,
"count": 4,
"is_parallel": true,
"self": 4.436457219998601,
"children": {
"TorchPPOOptimizer.update": {
"total": 5.772147740001628,
"count": 201,
"is_parallel": true,
"self": 5.772147740001628
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.16115297499982262,
"count": 1,
"self": 0.0009271329997773137,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1602258420000453,
"count": 1,
"self": 0.1602258420000453
}
}
}
}
}
}
}