smtn_ppo-Huggy / run_logs /timers.json
ongkn's picture
Huggy
8f6d603
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4074827432632446,
"min": 1.4074827432632446,
"max": 1.4285556077957153,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70003.96875,
"min": 69261.875,
"max": 76662.03125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 81.29304635761589,
"min": 81.29304635761589,
"max": 405.43548387096774,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49101.0,
"min": 48856.0,
"max": 50274.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999938.0,
"min": 49863.0,
"max": 1999938.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999938.0,
"min": 49863.0,
"max": 1999938.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.413158893585205,
"min": 0.06357457488775253,
"max": 2.4889261722564697,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1462.374267578125,
"min": 7.819672584533691,
"max": 1462.374267578125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7099732121225237,
"min": 1.7809921183237216,
"max": 4.00607226097887,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2248.2437665462494,
"min": 219.06203055381775,
"max": 2263.1318060159683,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7099732121225237,
"min": 1.7809921183237216,
"max": 4.00607226097887,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2248.2437665462494,
"min": 219.06203055381775,
"max": 2263.1318060159683,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016736365075727615,
"min": 0.014144914484738062,
"max": 0.020535371990020698,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.050209095227182844,
"min": 0.030483250932108297,
"max": 0.059815653421052654,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05818153661158349,
"min": 0.022464406831810873,
"max": 0.06182031215478977,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.17454460983475048,
"min": 0.044928813663621746,
"max": 0.1780934318900108,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.6661987779666605e-06,
"min": 3.6661987779666605e-06,
"max": 0.00029530732656422496,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0998596333899982e-05,
"min": 1.0998596333899982e-05,
"max": 0.0008438463187179,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10122203333333335,
"min": 0.10122203333333335,
"max": 0.198435775,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30366610000000005,
"min": 0.2075789,
"max": 0.5812821,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.097946333333325e-05,
"min": 7.097946333333325e-05,
"max": 0.004921945172499999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021293838999999974,
"min": 0.00021293838999999974,
"max": 0.01406597679,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675213127",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1675215434"
},
"total": 2306.972684537,
"count": 1,
"self": 0.43762978999984625,
"children": {
"run_training.setup": {
"total": 0.1671187610000402,
"count": 1,
"self": 0.1671187610000402
},
"TrainerController.start_learning": {
"total": 2306.3679359860002,
"count": 1,
"self": 4.031882575894997,
"children": {
"TrainerController._reset_env": {
"total": 11.147135011000046,
"count": 1,
"self": 11.147135011000046
},
"TrainerController.advance": {
"total": 2291.083530334105,
"count": 232363,
"self": 4.342702607066258,
"children": {
"env_step": {
"total": 1774.1463892380398,
"count": 232363,
"self": 1474.267267304049,
"children": {
"SubprocessEnvManager._take_step": {
"total": 297.1378240920199,
"count": 232363,
"self": 15.459900670995182,
"children": {
"TorchPolicy.evaluate": {
"total": 281.67792342102473,
"count": 222918,
"self": 68.8864196549876,
"children": {
"TorchPolicy.sample_actions": {
"total": 212.79150376603712,
"count": 222918,
"self": 212.79150376603712
}
}
}
}
},
"workers": {
"total": 2.7412978419708907,
"count": 232363,
"self": 0.0,
"children": {
"worker_root": {
"total": 2298.537688903023,
"count": 232363,
"is_parallel": true,
"self": 1099.6149110259603,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00209108799998603,
"count": 1,
"is_parallel": true,
"self": 0.00042048400001704067,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016706039999689892,
"count": 2,
"is_parallel": true,
"self": 0.0016706039999689892
}
}
},
"UnityEnvironment.step": {
"total": 0.02714494700001069,
"count": 1,
"is_parallel": true,
"self": 0.0002707519999489705,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000180617000012262,
"count": 1,
"is_parallel": true,
"self": 0.000180617000012262
},
"communicator.exchange": {
"total": 0.02583438900001056,
"count": 1,
"is_parallel": true,
"self": 0.02583438900001056
},
"steps_from_proto": {
"total": 0.0008591890000388958,
"count": 1,
"is_parallel": true,
"self": 0.0004124320000755688,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00044675699996332696,
"count": 2,
"is_parallel": true,
"self": 0.00044675699996332696
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1198.9227778770628,
"count": 232362,
"is_parallel": true,
"self": 33.79130265907111,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 71.96523324295794,
"count": 232362,
"is_parallel": true,
"self": 71.96523324295794
},
"communicator.exchange": {
"total": 1004.7940709899722,
"count": 232362,
"is_parallel": true,
"self": 1004.7940709899722
},
"steps_from_proto": {
"total": 88.37217098506159,
"count": 232362,
"is_parallel": true,
"self": 36.406620973915494,
"children": {
"_process_rank_one_or_two_observation": {
"total": 51.96555001114609,
"count": 464724,
"is_parallel": true,
"self": 51.96555001114609
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 512.5944384889992,
"count": 232363,
"self": 6.299093360095696,
"children": {
"process_trajectory": {
"total": 162.81159942890503,
"count": 232363,
"self": 161.4947480069057,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3168514219993313,
"count": 10,
"self": 1.3168514219993313
}
}
},
"_update_policy": {
"total": 343.4837456999985,
"count": 97,
"self": 288.4110998899904,
"children": {
"TorchPPOOptimizer.update": {
"total": 55.07264581000811,
"count": 2910,
"self": 55.07264581000811
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.79000105871819e-07,
"count": 1,
"self": 9.79000105871819e-07
},
"TrainerController._save_models": {
"total": 0.1053870860000643,
"count": 1,
"self": 0.001967186000001675,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10341990000006263,
"count": 1,
"self": 0.10341990000006263
}
}
}
}
}
}
}