Mtc2's picture
First commit
3a61f05
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6250206828117371,
"min": 0.5980008244514465,
"max": 1.3863393068313599,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 18520.61328125,
"min": 18143.96875,
"max": 42055.98828125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989918.0,
"min": 29952.0,
"max": 989918.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989918.0,
"min": 29952.0,
"max": 989918.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.36092233657836914,
"min": -0.11305046826601028,
"max": 0.36092233657836914,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 94.92257690429688,
"min": -26.79296112060547,
"max": 94.93196105957031,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.016433294862508774,
"min": -0.02295013889670372,
"max": 0.5422762632369995,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.321956634521484,
"min": -5.967036247253418,
"max": 128.51947021484375,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07004198653918685,
"min": 0.06461773636770117,
"max": 0.07306524962972698,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0506297980878028,
"min": 0.4691999833191205,
"max": 1.0506297980878028,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013513708031953927,
"min": 0.0005638603203181045,
"max": 0.016610162635472426,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2027056204793089,
"min": 0.005074742882862941,
"max": 0.23254227689661394,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.580417473226666e-06,
"min": 7.580417473226666e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011370626209839999,
"min": 0.00011370626209839999,
"max": 0.0035086250304583997,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10252677333333335,
"min": 0.10252677333333335,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5379016000000003,
"min": 1.3886848,
"max": 2.5695416000000004,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.000262424656,
"min": 0.000262424656,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0039363698400000004,
"min": 0.0039363698400000004,
"max": 0.11697720584,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.0121944360435009,
"min": 0.0121944360435009,
"max": 0.48069414496421814,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1829165369272232,
"min": 0.17437925934791565,
"max": 3.364859104156494,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 476.0,
"min": 434.03076923076924,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31416.0,
"min": 15984.0,
"max": 33096.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.311815121070002,
"min": -1.0000000521540642,
"max": 1.473627661053951,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 86.57979799062014,
"min": -30.673001691699028,
"max": 95.78579796850681,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.311815121070002,
"min": -1.0000000521540642,
"max": 1.473627661053951,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 86.57979799062014,
"min": -30.673001691699028,
"max": 95.78579796850681,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.06038140391981208,
"min": 0.05606997727882117,
"max": 9.462827701121569,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.9851726587075973,
"min": 3.644548523123376,
"max": 151.4052432179451,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1690702538",
"python_version": "3.10.6 (main, May 29 2023, 11:10:38) [GCC 11.3.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1690704684"
},
"total": 2146.665549881,
"count": 1,
"self": 0.7774393679997047,
"children": {
"run_training.setup": {
"total": 0.031940595000151006,
"count": 1,
"self": 0.031940595000151006
},
"TrainerController.start_learning": {
"total": 2145.856169918,
"count": 1,
"self": 1.3880385020811445,
"children": {
"TrainerController._reset_env": {
"total": 4.234977170000093,
"count": 1,
"self": 4.234977170000093
},
"TrainerController.advance": {
"total": 2140.0772520659184,
"count": 63447,
"self": 1.3538839539182845,
"children": {
"env_step": {
"total": 1478.6054591480013,
"count": 63447,
"self": 1369.4531770049402,
"children": {
"SubprocessEnvManager._take_step": {
"total": 108.33309268902735,
"count": 63447,
"self": 4.684526995035412,
"children": {
"TorchPolicy.evaluate": {
"total": 103.64856569399194,
"count": 62545,
"self": 103.64856569399194
}
}
},
"workers": {
"total": 0.8191894540336762,
"count": 63447,
"self": 0.0,
"children": {
"worker_root": {
"total": 2140.9765999859874,
"count": 63447,
"is_parallel": true,
"self": 884.6423490559114,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017320550000476942,
"count": 1,
"is_parallel": true,
"self": 0.000543917000413785,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011881379996339092,
"count": 8,
"is_parallel": true,
"self": 0.0011881379996339092
}
}
},
"UnityEnvironment.step": {
"total": 0.04741278599999532,
"count": 1,
"is_parallel": true,
"self": 0.0005648250000831467,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004826950000733632,
"count": 1,
"is_parallel": true,
"self": 0.0004826950000733632
},
"communicator.exchange": {
"total": 0.04449043099998562,
"count": 1,
"is_parallel": true,
"self": 0.04449043099998562
},
"steps_from_proto": {
"total": 0.0018748349998531921,
"count": 1,
"is_parallel": true,
"self": 0.00039123299984566984,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014836020000075223,
"count": 8,
"is_parallel": true,
"self": 0.0014836020000075223
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1256.334250930076,
"count": 63446,
"is_parallel": true,
"self": 34.37239634092407,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.89053177104506,
"count": 63446,
"is_parallel": true,
"self": 22.89053177104506
},
"communicator.exchange": {
"total": 1098.3270838920764,
"count": 63446,
"is_parallel": true,
"self": 1098.3270838920764
},
"steps_from_proto": {
"total": 100.74423892603045,
"count": 63446,
"is_parallel": true,
"self": 20.005360574964698,
"children": {
"_process_rank_one_or_two_observation": {
"total": 80.73887835106575,
"count": 507568,
"is_parallel": true,
"self": 80.73887835106575
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 660.1179089639988,
"count": 63447,
"self": 2.543370839988029,
"children": {
"process_trajectory": {
"total": 109.49104920101263,
"count": 63447,
"self": 109.15883235301226,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3322168480003711,
"count": 2,
"self": 0.3322168480003711
}
}
},
"_update_policy": {
"total": 548.0834889229982,
"count": 446,
"self": 356.67423549105047,
"children": {
"TorchPPOOptimizer.update": {
"total": 191.40925343194772,
"count": 22752,
"self": 191.40925343194772
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2730001799354795e-06,
"count": 1,
"self": 1.2730001799354795e-06
},
"TrainerController._save_models": {
"total": 0.15590090700015935,
"count": 1,
"self": 0.001823420000164333,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15407748699999502,
"count": 1,
"self": 0.15407748699999502
}
}
}
}
}
}
}