adyprat's picture
First Push
4193a15
raw
history blame
18.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8920056819915771,
"min": 0.8920056819915771,
"max": 2.847839832305908,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9655.0693359375,
"min": 8133.8291015625,
"max": 31827.458984375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 3.360323429107666,
"min": 0.25281116366386414,
"max": 3.374553918838501,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 339.3926696777344,
"min": 24.52268409729004,
"max": 339.3926696777344,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.365384615384617,
"min": 4.0,
"max": 26.56,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1371.0,
"min": 176.0,
"max": 1423.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.365384615384617,
"min": 4.0,
"max": 26.56,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1371.0,
"min": 176.0,
"max": 1423.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.047540761303353066,
"min": 0.044641229197914065,
"max": 0.051767029237762476,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.23770380651676531,
"min": 0.1791434840697548,
"max": 0.25883514618881237,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.13847568919882175,
"min": 0.07153503330191598,
"max": 0.1827829159796238,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.6923784459941088,
"min": 0.2861401332076639,
"max": 0.913914579898119,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 7.488097503999996e-06,
"min": 7.488097503999996e-06,
"max": 0.0002920740026419999,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.7440487519999984e-05,
"min": 3.7440487519999984e-05,
"max": 0.0013900080366639998,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10249599999999999,
"min": 0.10249599999999999,
"max": 0.19735800000000003,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5124799999999999,
"min": 0.42978400000000005,
"max": 0.963336,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00013455039999999994,
"min": 0.00013455039999999994,
"max": 0.0048681641999999995,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0006727519999999997,
"min": 0.0006727519999999997,
"max": 0.0231704664,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1694362327",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn /content/ml-agents/config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget2 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1694362911"
},
"total": 583.600179932,
"count": 1,
"self": 0.424705738999819,
"children": {
"run_training.setup": {
"total": 0.040271712000048865,
"count": 1,
"self": 0.040271712000048865
},
"TrainerController.start_learning": {
"total": 583.1352024810001,
"count": 1,
"self": 0.6494099940264277,
"children": {
"TrainerController._reset_env": {
"total": 4.687932924000052,
"count": 1,
"self": 4.687932924000052
},
"TrainerController.advance": {
"total": 577.6398468719738,
"count": 18356,
"self": 0.2868464799651065,
"children": {
"env_step": {
"total": 577.3530003920087,
"count": 18356,
"self": 370.46875594598623,
"children": {
"SubprocessEnvManager._take_step": {
"total": 206.60218714101325,
"count": 18356,
"self": 1.6797750560265285,
"children": {
"TorchPolicy.evaluate": {
"total": 204.92241208498672,
"count": 18356,
"self": 204.92241208498672
}
}
},
"workers": {
"total": 0.28205730500917525,
"count": 18356,
"self": 0.0,
"children": {
"worker_root": {
"total": 581.5697969669911,
"count": 18356,
"is_parallel": true,
"self": 334.3999187599966,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0022547870000835246,
"count": 1,
"is_parallel": true,
"self": 0.0005270829999517446,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00172770400013178,
"count": 10,
"is_parallel": true,
"self": 0.00172770400013178
}
}
},
"UnityEnvironment.step": {
"total": 0.10927042999992409,
"count": 1,
"is_parallel": true,
"self": 0.0006206059999840363,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00038260099995568453,
"count": 1,
"is_parallel": true,
"self": 0.00038260099995568453
},
"communicator.exchange": {
"total": 0.10682816899998215,
"count": 1,
"is_parallel": true,
"self": 0.10682816899998215
},
"steps_from_proto": {
"total": 0.0014390540000022156,
"count": 1,
"is_parallel": true,
"self": 0.00043369000036364014,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010053639996385755,
"count": 10,
"is_parallel": true,
"self": 0.0010053639996385755
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 247.16987820699455,
"count": 18355,
"is_parallel": true,
"self": 10.44284237900797,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.169266344012158,
"count": 18355,
"is_parallel": true,
"self": 5.169266344012158
},
"communicator.exchange": {
"total": 195.99508465898998,
"count": 18355,
"is_parallel": true,
"self": 195.99508465898998
},
"steps_from_proto": {
"total": 35.562684824984444,
"count": 18355,
"is_parallel": true,
"self": 6.528902865963801,
"children": {
"_process_rank_one_or_two_observation": {
"total": 29.033781959020644,
"count": 183550,
"is_parallel": true,
"self": 29.033781959020644
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.001049689999945258,
"count": 1,
"self": 0.001049689999945258,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 578.1794194920101,
"count": 10547,
"is_parallel": true,
"self": 0.324051139013136,
"children": {
"process_trajectory": {
"total": 56.35117638199631,
"count": 10547,
"is_parallel": true,
"self": 54.6480896269959,
"children": {
"RLTrainer._checkpoint": {
"total": 1.7030867550004132,
"count": 4,
"is_parallel": true,
"self": 1.7030867550004132
}
}
},
"_update_policy": {
"total": 521.5041919710006,
"count": 91,
"is_parallel": true,
"self": 242.01861293000502,
"children": {
"TorchPPOOptimizer.update": {
"total": 279.4855790409956,
"count": 7280,
"is_parallel": true,
"self": 279.4855790409956
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.15696300099989458,
"count": 1,
"self": 0.0011381809999875259,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15582481999990705,
"count": 1,
"self": 0.15582481999990705
}
}
}
}
}
}
}