ppo-Pyramids / run_logs /timers.json
ibrahimciko's picture
first commit
8a252c2
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.2865307331085205,
"min": 0.2865307331085205,
"max": 1.4149564504623413,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 8540.908203125,
"min": 8540.908203125,
"max": 42924.1171875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989915.0,
"min": 29952.0,
"max": 989915.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989915.0,
"min": 29952.0,
"max": 989915.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.47274520993232727,
"min": -0.19388015568256378,
"max": 0.5382140278816223,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 127.16846466064453,
"min": -45.9495964050293,
"max": 148.5470733642578,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.007173711899667978,
"min": 0.002500066999346018,
"max": 0.6946797370910645,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.9297285079956055,
"min": 0.6725180149078369,
"max": 164.63909912109375,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0655152004212141,
"min": 0.0655152004212141,
"max": 0.07491459351251326,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9827280063182116,
"min": 0.5238796527927002,
"max": 1.0753788167009284,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015727340592679363,
"min": 0.00020684231200680908,
"max": 0.016012362470298924,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23591010889019043,
"min": 0.0022752654320749,
"max": 0.23591010889019043,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.519957493379998e-06,
"min": 7.519957493379998e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011279936240069997,
"min": 0.00011279936240069997,
"max": 0.0032556471147844,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10250662,
"min": 0.10250662,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5375993000000001,
"min": 1.3886848,
"max": 2.5275456000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.000260411338,
"min": 0.000260411338,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00390617007,
"min": 0.00390617007,
"max": 0.10855303844,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.00717059476301074,
"min": 0.00717059476301074,
"max": 0.518469512462616,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.10755892097949982,
"min": 0.10665935277938843,
"max": 3.629286527633667,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 372.2098765432099,
"min": 339.2168674698795,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30149.0,
"min": 15984.0,
"max": 32383.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.478707298454715,
"min": -1.0000000521540642,
"max": 1.6380190343729086,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 121.25399847328663,
"min": -31.99920167028904,
"max": 137.59359888732433,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.478707298454715,
"min": -1.0000000521540642,
"max": 1.6380190343729086,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 121.25399847328663,
"min": -31.99920167028904,
"max": 137.59359888732433,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.02804278771651386,
"min": 0.027479513129427296,
"max": 11.071777628734708,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.2995085927541368,
"min": 2.2995085927541368,
"max": 177.14844205975533,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1693754577",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1693756755"
},
"total": 2178.7150572440005,
"count": 1,
"self": 0.5268930880006337,
"children": {
"run_training.setup": {
"total": 0.05170946399994136,
"count": 1,
"self": 0.05170946399994136
},
"TrainerController.start_learning": {
"total": 2178.136454692,
"count": 1,
"self": 1.3922886390969325,
"children": {
"TrainerController._reset_env": {
"total": 4.195742909000046,
"count": 1,
"self": 4.195742909000046
},
"TrainerController.advance": {
"total": 2172.451186598903,
"count": 63746,
"self": 1.3794617288954214,
"children": {
"env_step": {
"total": 1518.4770248770392,
"count": 63746,
"self": 1409.3409154951294,
"children": {
"SubprocessEnvManager._take_step": {
"total": 108.29411246392146,
"count": 63746,
"self": 4.615468237866935,
"children": {
"TorchPolicy.evaluate": {
"total": 103.67864422605453,
"count": 62557,
"self": 103.67864422605453
}
}
},
"workers": {
"total": 0.8419969179883537,
"count": 63746,
"self": 0.0,
"children": {
"worker_root": {
"total": 2173.4163545009897,
"count": 63746,
"is_parallel": true,
"self": 876.9521411079927,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0016933300000800955,
"count": 1,
"is_parallel": true,
"self": 0.000495365000460879,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011979649996192165,
"count": 8,
"is_parallel": true,
"self": 0.0011979649996192165
}
}
},
"UnityEnvironment.step": {
"total": 0.05049276600016128,
"count": 1,
"is_parallel": true,
"self": 0.000588400000196998,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005149620001247968,
"count": 1,
"is_parallel": true,
"self": 0.0005149620001247968
},
"communicator.exchange": {
"total": 0.047484227000040846,
"count": 1,
"is_parallel": true,
"self": 0.047484227000040846
},
"steps_from_proto": {
"total": 0.0019051769997986412,
"count": 1,
"is_parallel": true,
"self": 0.0003731109998170723,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001532065999981569,
"count": 8,
"is_parallel": true,
"self": 0.001532065999981569
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1296.464213392997,
"count": 63745,
"is_parallel": true,
"self": 33.85473085606782,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.758275278991505,
"count": 63745,
"is_parallel": true,
"self": 22.758275278991505
},
"communicator.exchange": {
"total": 1135.4116592869839,
"count": 63745,
"is_parallel": true,
"self": 1135.4116592869839
},
"steps_from_proto": {
"total": 104.43954797095375,
"count": 63745,
"is_parallel": true,
"self": 20.338374040010194,
"children": {
"_process_rank_one_or_two_observation": {
"total": 84.10117393094356,
"count": 509960,
"is_parallel": true,
"self": 84.10117393094356
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 652.5946999929681,
"count": 63746,
"self": 2.6568288640100945,
"children": {
"process_trajectory": {
"total": 108.50572413695545,
"count": 63746,
"self": 108.2961990409558,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20952509599965197,
"count": 2,
"self": 0.20952509599965197
}
}
},
"_update_policy": {
"total": 541.4321469920026,
"count": 447,
"self": 352.09000334299526,
"children": {
"TorchPPOOptimizer.update": {
"total": 189.3421436490073,
"count": 22779,
"self": 189.3421436490073
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.149998732027598e-07,
"count": 1,
"self": 9.149998732027598e-07
},
"TrainerController._save_models": {
"total": 0.09723562999988644,
"count": 1,
"self": 0.0014273089996095223,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09580832100027692,
"count": 1,
"self": 0.09580832100027692
}
}
}
}
}
}
}