taohu88's picture
First Push
241dff1
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9578749537467957,
"min": 0.9578749537467957,
"max": 2.8622751235961914,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9166.86328125,
"min": 9166.86328125,
"max": 29469.984375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.566094398498535,
"min": 0.4249665439128876,
"max": 12.566094398498535,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2450.388427734375,
"min": 82.44351196289062,
"max": 2520.633056640625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0649105204721826,
"min": 0.059168840741525244,
"max": 0.07416072042696589,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2596420818887304,
"min": 0.23667536296610098,
"max": 0.37080360213482944,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.18414587184202438,
"min": 0.12566335704953718,
"max": 0.2828918762066785,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7365834873680975,
"min": 0.5026534281981487,
"max": 1.4144593810333925,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.613636363636363,
"min": 3.5681818181818183,
"max": 24.927272727272726,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1083.0,
"min": 157.0,
"max": 1371.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.613636363636363,
"min": 3.5681818181818183,
"max": 24.927272727272726,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1083.0,
"min": 157.0,
"max": 1371.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1690130910",
"python_version": "3.10.6 (main, May 29 2023, 11:10:38) [GCC 11.3.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1690131487"
},
"total": 576.372605736,
"count": 1,
"self": 0.4418644920000361,
"children": {
"run_training.setup": {
"total": 0.040566742999999406,
"count": 1,
"self": 0.040566742999999406
},
"TrainerController.start_learning": {
"total": 575.8901745009999,
"count": 1,
"self": 0.7577037469928882,
"children": {
"TrainerController._reset_env": {
"total": 6.461294677000069,
"count": 1,
"self": 6.461294677000069
},
"TrainerController.advance": {
"total": 568.495681685007,
"count": 18202,
"self": 0.39214859399908164,
"children": {
"env_step": {
"total": 568.1035330910079,
"count": 18202,
"self": 413.6486970400098,
"children": {
"SubprocessEnvManager._take_step": {
"total": 154.072196902006,
"count": 18202,
"self": 2.348685619015896,
"children": {
"TorchPolicy.evaluate": {
"total": 151.7235112829901,
"count": 18202,
"self": 151.7235112829901
}
}
},
"workers": {
"total": 0.38263914899209794,
"count": 18202,
"self": 0.0,
"children": {
"worker_root": {
"total": 573.7220324020074,
"count": 18202,
"is_parallel": true,
"self": 266.22545352900386,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.007567658999960258,
"count": 1,
"is_parallel": true,
"self": 0.00541351000015311,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0021541489998071484,
"count": 10,
"is_parallel": true,
"self": 0.0021541489998071484
}
}
},
"UnityEnvironment.step": {
"total": 0.03887141799998517,
"count": 1,
"is_parallel": true,
"self": 0.0006153179999728309,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00044006699999954435,
"count": 1,
"is_parallel": true,
"self": 0.00044006699999954435
},
"communicator.exchange": {
"total": 0.03523614699997779,
"count": 1,
"is_parallel": true,
"self": 0.03523614699997779
},
"steps_from_proto": {
"total": 0.0025798860000350032,
"count": 1,
"is_parallel": true,
"self": 0.0005214759999034868,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0020584100001315164,
"count": 10,
"is_parallel": true,
"self": 0.0020584100001315164
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 307.4965788730035,
"count": 18201,
"is_parallel": true,
"self": 13.040845756993235,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 6.561002471009147,
"count": 18201,
"is_parallel": true,
"self": 6.561002471009147
},
"communicator.exchange": {
"total": 243.2619037970053,
"count": 18201,
"is_parallel": true,
"self": 243.2619037970053
},
"steps_from_proto": {
"total": 44.63282684799583,
"count": 18201,
"is_parallel": true,
"self": 8.836483108989228,
"children": {
"_process_rank_one_or_two_observation": {
"total": 35.7963437390066,
"count": 182010,
"is_parallel": true,
"self": 35.7963437390066
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00016600200001448684,
"count": 1,
"self": 0.00016600200001448684,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 563.2455618920003,
"count": 545179,
"is_parallel": true,
"self": 13.411427696099622,
"children": {
"process_trajectory": {
"total": 313.68326283790077,
"count": 545179,
"is_parallel": true,
"self": 311.0956161099008,
"children": {
"RLTrainer._checkpoint": {
"total": 2.5876467279999815,
"count": 4,
"is_parallel": true,
"self": 2.5876467279999815
}
}
},
"_update_policy": {
"total": 236.1508713579999,
"count": 90,
"is_parallel": true,
"self": 91.04940088400281,
"children": {
"TorchPPOOptimizer.update": {
"total": 145.1014704739971,
"count": 4587,
"is_parallel": true,
"self": 145.1014704739971
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.17532839000000422,
"count": 1,
"self": 0.0009122659998865856,
"children": {
"RLTrainer._checkpoint": {
"total": 0.17441612400011763,
"count": 1,
"self": 0.17441612400011763
}
}
}
}
}
}
}