ppo-Huggy / run_logs /timers.json
mbarekat's picture
Huggy
8081f67
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4120174646377563,
"min": 1.4120174646377563,
"max": 1.4300819635391235,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70761.84375,
"min": 68456.2421875,
"max": 77089.8984375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 116.49172576832152,
"min": 93.68431001890359,
"max": 418.425,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49276.0,
"min": 48904.0,
"max": 50235.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999994.0,
"min": 49853.0,
"max": 1999994.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999994.0,
"min": 49853.0,
"max": 1999994.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.216273069381714,
"min": 0.15009836852550507,
"max": 2.398073673248291,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 937.4835205078125,
"min": 17.861705780029297,
"max": 1239.380126953125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.385334826812113,
"min": 1.7914845009310907,
"max": 3.966238077591967,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1431.9966317415237,
"min": 213.1866556107998,
"max": 1999.51824092865,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.385334826812113,
"min": 1.7914845009310907,
"max": 3.966238077591967,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1431.9966317415237,
"min": 213.1866556107998,
"max": 1999.51824092865,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01578161908416102,
"min": 0.014083611667835309,
"max": 0.019649320510749628,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.03156323816832204,
"min": 0.028167223335670618,
"max": 0.057878246104034275,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04077782935152451,
"min": 0.020137291091183823,
"max": 0.05739097156458431,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.08155565870304902,
"min": 0.040274582182367645,
"max": 0.17217291469375293,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.5576234808250014e-06,
"min": 4.5576234808250014e-06,
"max": 0.00029536162654612494,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.115246961650003e-06,
"min": 9.115246961650003e-06,
"max": 0.0008442622685792499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10151917500000004,
"min": 0.10151917500000004,
"max": 0.19845387500000006,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.2030383500000001,
"min": 0.2030383500000001,
"max": 0.58142075,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 8.580683250000003e-05,
"min": 8.580683250000003e-05,
"max": 0.004922848362500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00017161366500000007,
"min": 0.00017161366500000007,
"max": 0.014072895424999999,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1693840960",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1693843348"
},
"total": 2388.49761759,
"count": 1,
"self": 0.4650630769997406,
"children": {
"run_training.setup": {
"total": 0.04066057299996828,
"count": 1,
"self": 0.04066057299996828
},
"TrainerController.start_learning": {
"total": 2387.9918939400004,
"count": 1,
"self": 4.302971724051986,
"children": {
"TrainerController._reset_env": {
"total": 4.1014509309998175,
"count": 1,
"self": 4.1014509309998175
},
"TrainerController.advance": {
"total": 2379.4669169239487,
"count": 230965,
"self": 4.489100149676688,
"children": {
"env_step": {
"total": 1841.4582821770744,
"count": 230965,
"self": 1555.457514090028,
"children": {
"SubprocessEnvManager._take_step": {
"total": 283.0804348539805,
"count": 230965,
"self": 16.42806258303949,
"children": {
"TorchPolicy.evaluate": {
"total": 266.652372270941,
"count": 223041,
"self": 266.652372270941
}
}
},
"workers": {
"total": 2.920333233065776,
"count": 230965,
"self": 0.0,
"children": {
"worker_root": {
"total": 2380.4667379350312,
"count": 230965,
"is_parallel": true,
"self": 1110.2326767861537,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0008547990000806749,
"count": 1,
"is_parallel": true,
"self": 0.0002490550002676173,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006057439998130576,
"count": 2,
"is_parallel": true,
"self": 0.0006057439998130576
}
}
},
"UnityEnvironment.step": {
"total": 0.04885623099994518,
"count": 1,
"is_parallel": true,
"self": 0.00040953200004878454,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00023021700008030166,
"count": 1,
"is_parallel": true,
"self": 0.00023021700008030166
},
"communicator.exchange": {
"total": 0.047292839999954595,
"count": 1,
"is_parallel": true,
"self": 0.047292839999954595
},
"steps_from_proto": {
"total": 0.000923641999861502,
"count": 1,
"is_parallel": true,
"self": 0.0002340139999432722,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006896279999182298,
"count": 2,
"is_parallel": true,
"self": 0.0006896279999182298
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1270.2340611488776,
"count": 230964,
"is_parallel": true,
"self": 40.06141107193844,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 79.86869896700478,
"count": 230964,
"is_parallel": true,
"self": 79.86869896700478
},
"communicator.exchange": {
"total": 1053.4387828700435,
"count": 230964,
"is_parallel": true,
"self": 1053.4387828700435
},
"steps_from_proto": {
"total": 96.86516823989086,
"count": 230964,
"is_parallel": true,
"self": 34.21716544091328,
"children": {
"_process_rank_one_or_two_observation": {
"total": 62.648002798977586,
"count": 461928,
"is_parallel": true,
"self": 62.648002798977586
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 533.5195345971977,
"count": 230965,
"self": 6.636400055219838,
"children": {
"process_trajectory": {
"total": 130.6542383979779,
"count": 230965,
"self": 129.18314595397692,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4710924440009876,
"count": 10,
"self": 1.4710924440009876
}
}
},
"_update_policy": {
"total": 396.2288961439999,
"count": 96,
"self": 336.3267796100015,
"children": {
"TorchPPOOptimizer.update": {
"total": 59.902116533998424,
"count": 2880,
"self": 59.902116533998424
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.030000021273736e-06,
"count": 1,
"self": 1.030000021273736e-06
},
"TrainerController._save_models": {
"total": 0.12055333099988275,
"count": 1,
"self": 0.002393748000031337,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11815958299985141,
"count": 1,
"self": 0.11815958299985141
}
}
}
}
}
}
}