ppo-Pyramids / run_logs /timers.json
pokjay's picture
Initial Commit
d4e3c40 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.37521347403526306,
"min": 0.36693716049194336,
"max": 1.4712775945663452,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11220.3837890625,
"min": 11119.6640625,
"max": 44632.67578125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989881.0,
"min": 29952.0,
"max": 989881.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989881.0,
"min": 29952.0,
"max": 989881.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7008219957351685,
"min": -0.11797916889190674,
"max": 0.7192474007606506,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 203.23837280273438,
"min": -28.432979583740234,
"max": 207.5098114013672,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.016824286431074142,
"min": -0.022372625768184662,
"max": 0.1762920767068863,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.879043102264404,
"min": -6.130099296569824,
"max": 42.48638916015625,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06935427494352639,
"min": 0.06620572407107737,
"max": 0.07411827280794933,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9709598492093694,
"min": 0.4878644189284177,
"max": 1.0581663122672276,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013820216292515397,
"min": 0.0007203140909728926,
"max": 0.01666263421695873,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.19348302809521556,
"min": 0.008643769091674712,
"max": 0.24795924529704885,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.5524260539857145e-06,
"min": 7.5524260539857145e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001057339647558,
"min": 0.0001057339647558,
"max": 0.003632654289115299,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10251744285714284,
"min": 0.10251744285714284,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4352441999999999,
"min": 1.3886848,
"max": 2.6108847,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002614925414285715,
"min": 0.0002614925414285715,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003660895580000001,
"min": 0.003660895580000001,
"max": 0.12110738152999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009786483831703663,
"min": 0.009786483831703663,
"max": 0.3853275775909424,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.13701076805591583,
"min": 0.13701076805591583,
"max": 2.6972930431365967,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 273.037037037037,
"min": 255.4695652173913,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29488.0,
"min": 15984.0,
"max": 32292.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6733559479407214,
"min": -1.0000000521540642,
"max": 1.7404396381100704,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 182.39579832553864,
"min": -30.322801634669304,
"max": 201.89099802076817,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6733559479407214,
"min": -1.0000000521540642,
"max": 1.7404396381100704,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 182.39579832553864,
"min": -30.322801634669304,
"max": 201.89099802076817,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.028104824934583564,
"min": 0.026106195667955984,
"max": 7.89645841345191,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.0634259178696084,
"min": 3.002212501814938,
"max": 126.34333461523056,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1706263057",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1706265475"
},
"total": 2418.5291520540004,
"count": 1,
"self": 0.4937112890006574,
"children": {
"run_training.setup": {
"total": 0.052030088000265096,
"count": 1,
"self": 0.052030088000265096
},
"TrainerController.start_learning": {
"total": 2417.9834106769995,
"count": 1,
"self": 1.825907090124474,
"children": {
"TrainerController._reset_env": {
"total": 2.0745065450000766,
"count": 1,
"self": 2.0745065450000766
},
"TrainerController.advance": {
"total": 2413.991288580875,
"count": 64248,
"self": 1.9445191926961343,
"children": {
"env_step": {
"total": 1763.7244095521964,
"count": 64248,
"self": 1610.373990484085,
"children": {
"SubprocessEnvManager._take_step": {
"total": 152.22948918414022,
"count": 64248,
"self": 5.661690075173283,
"children": {
"TorchPolicy.evaluate": {
"total": 146.56779910896694,
"count": 62560,
"self": 146.56779910896694
}
}
},
"workers": {
"total": 1.1209298839712574,
"count": 64248,
"self": 0.0,
"children": {
"worker_root": {
"total": 2411.8606322089618,
"count": 64248,
"is_parallel": true,
"self": 940.4462911188457,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018126790000678739,
"count": 1,
"is_parallel": true,
"self": 0.0005416140002125758,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001271064999855298,
"count": 8,
"is_parallel": true,
"self": 0.001271064999855298
}
}
},
"UnityEnvironment.step": {
"total": 0.05332142399993245,
"count": 1,
"is_parallel": true,
"self": 0.0006225530000847357,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00048462999984622,
"count": 1,
"is_parallel": true,
"self": 0.00048462999984622
},
"communicator.exchange": {
"total": 0.05032278899989251,
"count": 1,
"is_parallel": true,
"self": 0.05032278899989251
},
"steps_from_proto": {
"total": 0.001891452000108984,
"count": 1,
"is_parallel": true,
"self": 0.0003878540001096553,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015035979999993287,
"count": 8,
"is_parallel": true,
"self": 0.0015035979999993287
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1471.414341090116,
"count": 64247,
"is_parallel": true,
"self": 38.449438196089886,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 27.813824333921275,
"count": 64247,
"is_parallel": true,
"self": 27.813824333921275
},
"communicator.exchange": {
"total": 1290.9877652590562,
"count": 64247,
"is_parallel": true,
"self": 1290.9877652590562
},
"steps_from_proto": {
"total": 114.16331330104867,
"count": 64247,
"is_parallel": true,
"self": 24.35430929709719,
"children": {
"_process_rank_one_or_two_observation": {
"total": 89.80900400395149,
"count": 513976,
"is_parallel": true,
"self": 89.80900400395149
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 648.3223598359823,
"count": 64248,
"self": 3.2962348980208844,
"children": {
"process_trajectory": {
"total": 136.8373864219502,
"count": 64248,
"self": 136.6399273169509,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19745910499932506,
"count": 2,
"self": 0.19745910499932506
}
}
},
"_update_policy": {
"total": 508.1887385160112,
"count": 455,
"self": 298.6700501180212,
"children": {
"TorchPPOOptimizer.update": {
"total": 209.51868839799,
"count": 22833,
"self": 209.51868839799
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.96999915514607e-07,
"count": 1,
"self": 9.96999915514607e-07
},
"TrainerController._save_models": {
"total": 0.0917074640001374,
"count": 1,
"self": 0.001428682000550907,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0902787819995865,
"count": 1,
"self": 0.0902787819995865
}
}
}
}
}
}
}