ppo-Huggy / run_logs /timers.json
ajaycompete143's picture
Huggy
925895c
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.3976372480392456,
"min": 1.3976372480392456,
"max": 1.4256682395935059,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69747.6875,
"min": 68747.4765625,
"max": 78026.1875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 86.26876090750436,
"min": 80.95901639344262,
"max": 380.4736842105263,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49432.0,
"min": 48902.0,
"max": 50603.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999888.0,
"min": 49986.0,
"max": 1999888.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999888.0,
"min": 49986.0,
"max": 1999888.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.490478992462158,
"min": 0.0772077813744545,
"max": 2.490478992462158,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1427.04443359375,
"min": 10.191427230834961,
"max": 1459.9599609375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.9424733188763965,
"min": 1.7976881941272453,
"max": 3.9424733188763965,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2259.037211716175,
"min": 237.2948416247964,
"max": 2280.4073798060417,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.9424733188763965,
"min": 1.7976881941272453,
"max": 3.9424733188763965,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2259.037211716175,
"min": 237.2948416247964,
"max": 2280.4073798060417,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015412659205564219,
"min": 0.013705016078246346,
"max": 0.01955843808342858,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.046237977616692655,
"min": 0.02860700830351561,
"max": 0.05867531425028574,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05829698741436005,
"min": 0.02432165074472626,
"max": 0.061103122557202974,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.17489096224308015,
"min": 0.04864330148945252,
"max": 0.18131309226155282,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.5801488066500017e-06,
"min": 3.5801488066500017e-06,
"max": 0.00029532622655792503,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0740446419950005e-05,
"min": 1.0740446419950005e-05,
"max": 0.0008440662186445999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10119335,
"min": 0.10119335,
"max": 0.198442075,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30358005,
"min": 0.20753875,
"max": 0.5813554000000002,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.954816500000004e-05,
"min": 6.954816500000004e-05,
"max": 0.004922259542500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0002086444950000001,
"min": 0.0002086444950000001,
"max": 0.01406963446,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1689587861",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1689590581"
},
"total": 2719.984942533,
"count": 1,
"self": 0.49159898499965493,
"children": {
"run_training.setup": {
"total": 0.04442108000012013,
"count": 1,
"self": 0.04442108000012013
},
"TrainerController.start_learning": {
"total": 2719.448922468,
"count": 1,
"self": 4.935879685907366,
"children": {
"TrainerController._reset_env": {
"total": 4.833218006999914,
"count": 1,
"self": 4.833218006999914
},
"TrainerController.advance": {
"total": 2709.5448541750934,
"count": 232346,
"self": 5.034703652055214,
"children": {
"env_step": {
"total": 2129.0727974530632,
"count": 232346,
"self": 1794.8700363611329,
"children": {
"SubprocessEnvManager._take_step": {
"total": 330.9553491219399,
"count": 232346,
"self": 18.571023672823458,
"children": {
"TorchPolicy.evaluate": {
"total": 312.38432544911643,
"count": 222918,
"self": 312.38432544911643
}
}
},
"workers": {
"total": 3.2474119699904804,
"count": 232346,
"self": 0.0,
"children": {
"worker_root": {
"total": 2710.911075688041,
"count": 232346,
"is_parallel": true,
"self": 1244.8224625140022,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009885859999485547,
"count": 1,
"is_parallel": true,
"self": 0.0002947460000086721,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006938399999398825,
"count": 2,
"is_parallel": true,
"self": 0.0006938399999398825
}
}
},
"UnityEnvironment.step": {
"total": 0.06016489700004968,
"count": 1,
"is_parallel": true,
"self": 0.0003532089997406729,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00022675800005345081,
"count": 1,
"is_parallel": true,
"self": 0.00022675800005345081
},
"communicator.exchange": {
"total": 0.05881205700006831,
"count": 1,
"is_parallel": true,
"self": 0.05881205700006831
},
"steps_from_proto": {
"total": 0.0007728730001872464,
"count": 1,
"is_parallel": true,
"self": 0.00020182400021440117,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005710489999728452,
"count": 2,
"is_parallel": true,
"self": 0.0005710489999728452
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1466.0886131740388,
"count": 232345,
"is_parallel": true,
"self": 42.95473616915706,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 92.02454393499693,
"count": 232345,
"is_parallel": true,
"self": 92.02454393499693
},
"communicator.exchange": {
"total": 1221.7943250769097,
"count": 232345,
"is_parallel": true,
"self": 1221.7943250769097
},
"steps_from_proto": {
"total": 109.3150079929751,
"count": 232345,
"is_parallel": true,
"self": 41.04799620499011,
"children": {
"_process_rank_one_or_two_observation": {
"total": 68.26701178798498,
"count": 464690,
"is_parallel": true,
"self": 68.26701178798498
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 575.437353069975,
"count": 232346,
"self": 7.300154834013938,
"children": {
"process_trajectory": {
"total": 153.92435629396095,
"count": 232346,
"self": 152.53637366596058,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3879826280003726,
"count": 10,
"self": 1.3879826280003726
}
}
},
"_update_policy": {
"total": 414.21284194200007,
"count": 97,
"self": 361.5798773969984,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.63296454500164,
"count": 2910,
"self": 52.63296454500164
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0409999049443286e-06,
"count": 1,
"self": 1.0409999049443286e-06
},
"TrainerController._save_models": {
"total": 0.13496955899972818,
"count": 1,
"self": 0.002036918999692716,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13293264000003546,
"count": 1,
"self": 0.13293264000003546
}
}
}
}
}
}
}