ws11yrin's picture
First Push
85eb324 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.47957688570022583,
"min": 0.47957688570022583,
"max": 1.4426153898239136,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 14433.345703125,
"min": 14433.345703125,
"max": 43763.1796875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989941.0,
"min": 29952.0,
"max": 989941.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989941.0,
"min": 29952.0,
"max": 989941.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4865909814834595,
"min": -0.11183732002973557,
"max": 0.4865909814834595,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 131.3795623779297,
"min": -26.50544548034668,
"max": 131.3795623779297,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.0484333410859108,
"min": -0.0484333410859108,
"max": 0.3659016489982605,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -13.07700252532959,
"min": -13.07700252532959,
"max": 86.71868896484375,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07141206910490015,
"min": 0.06447300411632632,
"max": 0.07229203153330689,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9997689674686021,
"min": 0.4661333402063451,
"max": 1.0607779120327903,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014826165117916562,
"min": 0.0001747241915129916,
"max": 0.015093749388976304,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20756631165083186,
"min": 0.0022714144896688908,
"max": 0.22640624083464456,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.308868992314285e-06,
"min": 7.308868992314285e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001023241658924,
"min": 0.0001023241658924,
"max": 0.0032592062135979993,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10243625714285716,
"min": 0.10243625714285716,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4341076000000001,
"min": 1.3691136000000002,
"max": 2.5723956,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025338208857142864,
"min": 0.00025338208857142864,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003547349240000001,
"min": 0.003547349240000001,
"max": 0.10866155979999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.00899767316877842,
"min": 0.00899767316877842,
"max": 0.38825416564941406,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.12596742808818817,
"min": 0.12596742808818817,
"max": 2.7177791595458984,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 380.2763157894737,
"min": 380.2763157894737,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28901.0,
"min": 15984.0,
"max": 32700.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.514423664169092,
"min": -1.0000000521540642,
"max": 1.514423664169092,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 115.09619847685099,
"min": -32.000001668930054,
"max": 115.09619847685099,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.514423664169092,
"min": -1.0000000521540642,
"max": 1.514423664169092,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 115.09619847685099,
"min": -32.000001668930054,
"max": 115.09619847685099,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.035546273898370076,
"min": 0.035546273898370076,
"max": 7.836827971972525,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.7015168162761256,
"min": 2.7015168162761256,
"max": 125.3892475515604,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1716928119",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=PyramidsRND --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1716930225"
},
"total": 2106.35570868,
"count": 1,
"self": 0.5081409900003564,
"children": {
"run_training.setup": {
"total": 0.057067859999961,
"count": 1,
"self": 0.057067859999961
},
"TrainerController.start_learning": {
"total": 2105.79049983,
"count": 1,
"self": 1.426072901970656,
"children": {
"TrainerController._reset_env": {
"total": 3.3348488899999893,
"count": 1,
"self": 3.3348488899999893
},
"TrainerController.advance": {
"total": 2100.915546093029,
"count": 63535,
"self": 1.4472120960695065,
"children": {
"env_step": {
"total": 1449.6211711179808,
"count": 63535,
"self": 1314.444100573939,
"children": {
"SubprocessEnvManager._take_step": {
"total": 134.30099373104713,
"count": 63535,
"self": 4.792444272101875,
"children": {
"TorchPolicy.evaluate": {
"total": 129.50854945894525,
"count": 62550,
"self": 129.50854945894525
}
}
},
"workers": {
"total": 0.876076812994711,
"count": 63535,
"self": 0.0,
"children": {
"worker_root": {
"total": 2100.498114184982,
"count": 63535,
"is_parallel": true,
"self": 910.3144311209251,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005710314999987531,
"count": 1,
"is_parallel": true,
"self": 0.004195181999989472,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001515132999998059,
"count": 8,
"is_parallel": true,
"self": 0.001515132999998059
}
}
},
"UnityEnvironment.step": {
"total": 0.07314961100007622,
"count": 1,
"is_parallel": true,
"self": 0.0006358810001074744,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000439314999994167,
"count": 1,
"is_parallel": true,
"self": 0.000439314999994167
},
"communicator.exchange": {
"total": 0.07047092300001623,
"count": 1,
"is_parallel": true,
"self": 0.07047092300001623
},
"steps_from_proto": {
"total": 0.0016034919999583508,
"count": 1,
"is_parallel": true,
"self": 0.0003223259998321737,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001281166000126177,
"count": 8,
"is_parallel": true,
"self": 0.001281166000126177
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1190.183683064057,
"count": 63534,
"is_parallel": true,
"self": 34.34774518101881,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.05077775602456,
"count": 63534,
"is_parallel": true,
"self": 23.05077775602456
},
"communicator.exchange": {
"total": 1034.3826115150046,
"count": 63534,
"is_parallel": true,
"self": 1034.3826115150046
},
"steps_from_proto": {
"total": 98.40254861200913,
"count": 63534,
"is_parallel": true,
"self": 19.874349921879457,
"children": {
"_process_rank_one_or_two_observation": {
"total": 78.52819869012967,
"count": 508272,
"is_parallel": true,
"self": 78.52819869012967
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 649.8471628789787,
"count": 63535,
"self": 2.6537656110031094,
"children": {
"process_trajectory": {
"total": 130.80140233797124,
"count": 63535,
"self": 130.49082929597125,
"children": {
"RLTrainer._checkpoint": {
"total": 0.31057304199998725,
"count": 2,
"self": 0.31057304199998725
}
}
},
"_update_policy": {
"total": 516.3919949300043,
"count": 446,
"self": 306.78031163497576,
"children": {
"TorchPPOOptimizer.update": {
"total": 209.61168329502857,
"count": 22845,
"self": 209.61168329502857
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4049996934772935e-06,
"count": 1,
"self": 1.4049996934772935e-06
},
"TrainerController._save_models": {
"total": 0.11403054000038537,
"count": 1,
"self": 0.0014645070000369742,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1125660330003484,
"count": 1,
"self": 0.1125660330003484
}
}
}
}
}
}
}