ppo-PyramidsRND / run_logs /timers.json
Genis
First Push
eaac506
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.49001896381378174,
"min": 0.49001896381378174,
"max": 1.5034692287445068,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 14645.6865234375,
"min": 14645.6865234375,
"max": 45609.2421875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989891.0,
"min": 29952.0,
"max": 989891.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989891.0,
"min": 29952.0,
"max": 989891.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.3086230456829071,
"min": -0.09220867604017258,
"max": 0.3086230456829071,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 80.85923767089844,
"min": -22.2222900390625,
"max": 80.85923767089844,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.00924289133399725,
"min": -0.00924289133399725,
"max": 0.1579740196466446,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -2.421637535095215,
"min": -2.421637535095215,
"max": 37.439842224121094,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06914126428948972,
"min": 0.06517837084824005,
"max": 0.07285535915814668,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0371189643423457,
"min": 0.47732801350795706,
"max": 1.0505344594906396,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.011857123350442287,
"min": 0.00029982233889350754,
"max": 0.012120653791060438,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.1778568502566343,
"min": 0.003897690405615598,
"max": 0.1778568502566343,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.4828375057533364e-06,
"min": 7.4828375057533364e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011224256258630004,
"min": 0.00011224256258630004,
"max": 0.0033832094722635996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1024942466666667,
"min": 0.1024942466666667,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5374137000000005,
"min": 1.3886848,
"max": 2.5277364000000007,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025917524200000017,
"min": 0.00025917524200000017,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0038876286300000024,
"min": 0.0038876286300000024,
"max": 0.11280086635999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.007337408140301704,
"min": 0.007315743248909712,
"max": 0.20766839385032654,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.11006112396717072,
"min": 0.10242040455341339,
"max": 1.4536787271499634,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 527.5833333333334,
"min": 527.5833333333334,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31655.0,
"min": 15984.0,
"max": 32729.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.1723133134345214,
"min": -1.0000000521540642,
"max": 1.1723133134345214,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 70.33879880607128,
"min": -30.731001660227776,
"max": 70.33879880607128,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.1723133134345214,
"min": -1.0000000521540642,
"max": 1.1723133134345214,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 70.33879880607128,
"min": -30.731001660227776,
"max": 70.33879880607128,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04111563407835395,
"min": 0.04111563407835395,
"max": 7.148975944379345,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.4669380447012372,
"min": 2.316392977605574,
"max": 114.38361511006951,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1690314777",
"python_version": "3.10.6 (main, May 29 2023, 11:10:38) [GCC 11.3.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1690316881"
},
"total": 2103.512406506,
"count": 1,
"self": 0.532003005999286,
"children": {
"run_training.setup": {
"total": 0.03505809200009935,
"count": 1,
"self": 0.03505809200009935
},
"TrainerController.start_learning": {
"total": 2102.9453454080003,
"count": 1,
"self": 1.4242780520585256,
"children": {
"TrainerController._reset_env": {
"total": 4.034055599999874,
"count": 1,
"self": 4.034055599999874
},
"TrainerController.advance": {
"total": 2097.322601750942,
"count": 63317,
"self": 1.402749899922128,
"children": {
"env_step": {
"total": 1413.8305108029597,
"count": 63317,
"self": 1297.8667542019907,
"children": {
"SubprocessEnvManager._take_step": {
"total": 115.1542121799207,
"count": 63317,
"self": 4.712077865981428,
"children": {
"TorchPolicy.evaluate": {
"total": 110.44213431393928,
"count": 62564,
"self": 110.44213431393928
}
}
},
"workers": {
"total": 0.8095444210482583,
"count": 63317,
"self": 0.0,
"children": {
"worker_root": {
"total": 2098.1707575230316,
"count": 63317,
"is_parallel": true,
"self": 912.6269584240035,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017652750000252126,
"count": 1,
"is_parallel": true,
"self": 0.000537261999625116,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012280130004000966,
"count": 8,
"is_parallel": true,
"self": 0.0012280130004000966
}
}
},
"UnityEnvironment.step": {
"total": 0.046720362000087334,
"count": 1,
"is_parallel": true,
"self": 0.0005681690001893003,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004731819999506115,
"count": 1,
"is_parallel": true,
"self": 0.0004731819999506115
},
"communicator.exchange": {
"total": 0.04389173999993545,
"count": 1,
"is_parallel": true,
"self": 0.04389173999993545
},
"steps_from_proto": {
"total": 0.0017872710000119696,
"count": 1,
"is_parallel": true,
"self": 0.0003627640005561261,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014245069994558435,
"count": 8,
"is_parallel": true,
"self": 0.0014245069994558435
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1185.543799099028,
"count": 63316,
"is_parallel": true,
"self": 33.81992554412932,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.51350517788933,
"count": 63316,
"is_parallel": true,
"self": 22.51350517788933
},
"communicator.exchange": {
"total": 1029.5202450829786,
"count": 63316,
"is_parallel": true,
"self": 1029.5202450829786
},
"steps_from_proto": {
"total": 99.69012329403085,
"count": 63316,
"is_parallel": true,
"self": 19.978628200085495,
"children": {
"_process_rank_one_or_two_observation": {
"total": 79.71149509394536,
"count": 506528,
"is_parallel": true,
"self": 79.71149509394536
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 682.0893410480601,
"count": 63317,
"self": 2.48643520702376,
"children": {
"process_trajectory": {
"total": 109.0537140320348,
"count": 63317,
"self": 108.72710298603488,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3266110459999254,
"count": 2,
"self": 0.3266110459999254
}
}
},
"_update_policy": {
"total": 570.5491918090015,
"count": 448,
"self": 350.8069666059655,
"children": {
"TorchPPOOptimizer.update": {
"total": 219.742225203036,
"count": 22797,
"self": 219.742225203036
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.630002750782296e-07,
"count": 1,
"self": 9.630002750782296e-07
},
"TrainerController._save_models": {
"total": 0.1644090419999884,
"count": 1,
"self": 0.003010126999924978,
"children": {
"RLTrainer._checkpoint": {
"total": 0.16139891500006343,
"count": 1,
"self": 0.16139891500006343
}
}
}
}
}
}
}