ppo-Huggy / run_logs /timers.json
butchland's picture
add Huggy PPO round 1 model
af6ceb8
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4027817249298096,
"min": 1.4027817249298096,
"max": 1.4284096956253052,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69779.9765625,
"min": 68802.9765625,
"max": 77806.671875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 79.1875,
"min": 73.96701649175412,
"max": 387.05426356589146,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49413.0,
"min": 48871.0,
"max": 50008.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999895.0,
"min": 49543.0,
"max": 1999895.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999895.0,
"min": 49543.0,
"max": 1999895.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4494645595550537,
"min": 0.07293373346328735,
"max": 2.529693126678467,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1528.4658203125,
"min": 9.335517883300781,
"max": 1637.9024658203125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7747426947148948,
"min": 1.7633754962589592,
"max": 4.080721586002537,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2355.4394415020943,
"min": 225.71206352114677,
"max": 2605.602060496807,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7747426947148948,
"min": 1.7633754962589592,
"max": 4.080721586002537,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2355.4394415020943,
"min": 225.71206352114677,
"max": 2605.602060496807,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016374832711183746,
"min": 0.013268728806482007,
"max": 0.01942460829644309,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04912449813355124,
"min": 0.02718201812240295,
"max": 0.058138287525313595,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05701457233064705,
"min": 0.021488225553184748,
"max": 0.057700087688863276,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.17104371699194115,
"min": 0.042976451106369495,
"max": 0.17258416377007962,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.725698758133329e-06,
"min": 3.725698758133329e-06,
"max": 0.00029534175155275,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1177096274399987e-05,
"min": 1.1177096274399987e-05,
"max": 0.0008440011186662999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.1012418666666667,
"min": 0.1012418666666667,
"max": 0.19844725000000002,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3037256000000001,
"min": 0.20765134999999998,
"max": 0.5813337000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.19691466666666e-05,
"min": 7.19691466666666e-05,
"max": 0.0049225177749999995,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021590743999999983,
"min": 0.00021590743999999983,
"max": 0.014068551629999999,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679225176",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679227477"
},
"total": 2300.967634187,
"count": 1,
"self": 0.5640737310000077,
"children": {
"run_training.setup": {
"total": 0.11031351300005099,
"count": 1,
"self": 0.11031351300005099
},
"TrainerController.start_learning": {
"total": 2300.2932469429998,
"count": 1,
"self": 4.128819083079179,
"children": {
"TrainerController._reset_env": {
"total": 9.258084617000009,
"count": 1,
"self": 9.258084617000009
},
"TrainerController.advance": {
"total": 2286.7220728339207,
"count": 233111,
"self": 4.407250434867819,
"children": {
"env_step": {
"total": 1779.7992772391099,
"count": 233111,
"self": 1497.4192030480842,
"children": {
"SubprocessEnvManager._take_step": {
"total": 279.64035529808314,
"count": 233111,
"self": 16.609753448036372,
"children": {
"TorchPolicy.evaluate": {
"total": 263.03060185004676,
"count": 222974,
"self": 263.03060185004676
}
}
},
"workers": {
"total": 2.7397188929425056,
"count": 233111,
"self": 0.0,
"children": {
"worker_root": {
"total": 2292.4672259849576,
"count": 233111,
"is_parallel": true,
"self": 1073.9134691999939,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.000915394000003289,
"count": 1,
"is_parallel": true,
"self": 0.00027463200001420773,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006407619999890812,
"count": 2,
"is_parallel": true,
"self": 0.0006407619999890812
}
}
},
"UnityEnvironment.step": {
"total": 0.029132501999924898,
"count": 1,
"is_parallel": true,
"self": 0.0003312579998464571,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001957810000021709,
"count": 1,
"is_parallel": true,
"self": 0.0001957810000021709
},
"communicator.exchange": {
"total": 0.027924774000098296,
"count": 1,
"is_parallel": true,
"self": 0.027924774000098296
},
"steps_from_proto": {
"total": 0.0006806889999779742,
"count": 1,
"is_parallel": true,
"self": 0.00020623599993996322,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00047445300003801094,
"count": 2,
"is_parallel": true,
"self": 0.00047445300003801094
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1218.5537567849638,
"count": 233110,
"is_parallel": true,
"self": 37.559992849976425,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 75.47943532806494,
"count": 233110,
"is_parallel": true,
"self": 75.47943532806494
},
"communicator.exchange": {
"total": 1018.5707852569114,
"count": 233110,
"is_parallel": true,
"self": 1018.5707852569114
},
"steps_from_proto": {
"total": 86.94354335001105,
"count": 233110,
"is_parallel": true,
"self": 32.7851617918775,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.158381558133556,
"count": 466220,
"is_parallel": true,
"self": 54.158381558133556
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 502.5155451599429,
"count": 233111,
"self": 6.4414604459917655,
"children": {
"process_trajectory": {
"total": 141.59358714595305,
"count": 233111,
"self": 140.26038551195336,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3332016339996926,
"count": 10,
"self": 1.3332016339996926
}
}
},
"_update_policy": {
"total": 354.4804975679981,
"count": 97,
"self": 297.2859683379868,
"children": {
"TorchPPOOptimizer.update": {
"total": 57.19452923001131,
"count": 2910,
"self": 57.19452923001131
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2719997357635293e-06,
"count": 1,
"self": 1.2719997357635293e-06
},
"TrainerController._save_models": {
"total": 0.1842691370002285,
"count": 1,
"self": 0.0033437350002714084,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1809254019999571,
"count": 1,
"self": 0.1809254019999571
}
}
}
}
}
}
}