ppo-Huggy / run_logs /timers.json
jonalkw's picture
Huggy
a9ae1df
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.3992754220962524,
"min": 1.3992754220962524,
"max": 1.4226139783859253,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71234.3125,
"min": 67608.96875,
"max": 78485.8828125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 85.080204778157,
"min": 79.86407766990291,
"max": 379.95454545454544,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49857.0,
"min": 49040.0,
"max": 50154.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999772.0,
"min": 49586.0,
"max": 1999772.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999772.0,
"min": 49586.0,
"max": 1999772.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.429859161376953,
"min": 0.005918058566749096,
"max": 2.5066163539886475,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1423.8974609375,
"min": 0.7752656936645508,
"max": 1522.224365234375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7579705776208088,
"min": 1.7261705382634664,
"max": 3.9528932315962657,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2202.170758485794,
"min": 226.12834051251411,
"max": 2362.9064193964005,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7579705776208088,
"min": 1.7261705382634664,
"max": 3.9528932315962657,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2202.170758485794,
"min": 226.12834051251411,
"max": 2362.9064193964005,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016666996888428307,
"min": 0.014074512087770724,
"max": 0.019543459454628948,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05000099066528492,
"min": 0.029360867010351892,
"max": 0.05684906305359619,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.053139806662996614,
"min": 0.024828965651492278,
"max": 0.060315325607856116,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.15941941998898984,
"min": 0.049657931302984556,
"max": 0.17903639910121757,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.6433987855666602e-06,
"min": 3.6433987855666602e-06,
"max": 0.0002953527015491001,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.093019635669998e-05,
"min": 1.093019635669998e-05,
"max": 0.0008442798185733998,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10121443333333335,
"min": 0.10121443333333335,
"max": 0.19845089999999993,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30364330000000006,
"min": 0.20756190000000002,
"max": 0.5814265999999999,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.060022333333324e-05,
"min": 7.060022333333324e-05,
"max": 0.004922699910000002,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021180066999999973,
"min": 0.00021180066999999973,
"max": 0.014073187339999998,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1672693157",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1672695538"
},
"total": 2380.4885732169996,
"count": 1,
"self": 0.3888938829995823,
"children": {
"run_training.setup": {
"total": 0.11154564100002062,
"count": 1,
"self": 0.11154564100002062
},
"TrainerController.start_learning": {
"total": 2379.9881336930002,
"count": 1,
"self": 4.271297382997545,
"children": {
"TrainerController._reset_env": {
"total": 7.6753644209999266,
"count": 1,
"self": 7.6753644209999266
},
"TrainerController.advance": {
"total": 2367.9159381720033,
"count": 232818,
"self": 4.327540198052247,
"children": {
"env_step": {
"total": 1875.3511557710146,
"count": 232818,
"self": 1573.1488223400006,
"children": {
"SubprocessEnvManager._take_step": {
"total": 299.4434823340488,
"count": 232818,
"self": 15.511486690052152,
"children": {
"TorchPolicy.evaluate": {
"total": 283.93199564399663,
"count": 223039,
"self": 69.94427837103228,
"children": {
"TorchPolicy.sample_actions": {
"total": 213.98771727296435,
"count": 223039,
"self": 213.98771727296435
}
}
}
}
},
"workers": {
"total": 2.7588510969653726,
"count": 232818,
"self": 0.0,
"children": {
"worker_root": {
"total": 2371.630582670061,
"count": 232818,
"is_parallel": true,
"self": 1077.2432030539032,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002254163999964476,
"count": 1,
"is_parallel": true,
"self": 0.0003312090000235912,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019229549999408846,
"count": 2,
"is_parallel": true,
"self": 0.0019229549999408846
}
}
},
"UnityEnvironment.step": {
"total": 0.029465662000006887,
"count": 1,
"is_parallel": true,
"self": 0.0003162579999980153,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00019091799993020686,
"count": 1,
"is_parallel": true,
"self": 0.00019091799993020686
},
"communicator.exchange": {
"total": 0.028202404000012393,
"count": 1,
"is_parallel": true,
"self": 0.028202404000012393
},
"steps_from_proto": {
"total": 0.0007560820000662716,
"count": 1,
"is_parallel": true,
"self": 0.00025802900006510754,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000498053000001164,
"count": 2,
"is_parallel": true,
"self": 0.000498053000001164
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1294.3873796161577,
"count": 232817,
"is_parallel": true,
"self": 36.1119800023082,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 85.42845444498005,
"count": 232817,
"is_parallel": true,
"self": 85.42845444498005
},
"communicator.exchange": {
"total": 1073.1969761208657,
"count": 232817,
"is_parallel": true,
"self": 1073.1969761208657
},
"steps_from_proto": {
"total": 99.64996904800353,
"count": 232817,
"is_parallel": true,
"self": 43.26156298906699,
"children": {
"_process_rank_one_or_two_observation": {
"total": 56.38840605893654,
"count": 465634,
"is_parallel": true,
"self": 56.38840605893654
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 488.2372422029365,
"count": 232818,
"self": 6.2543722870658485,
"children": {
"process_trajectory": {
"total": 162.8182278938698,
"count": 232818,
"self": 161.60791244986967,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2103154440001163,
"count": 10,
"self": 1.2103154440001163
}
}
},
"_update_policy": {
"total": 319.16464202200086,
"count": 97,
"self": 265.77658504899796,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.388056973002904,
"count": 2910,
"self": 53.388056973002904
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.189999693451682e-06,
"count": 1,
"self": 1.189999693451682e-06
},
"TrainerController._save_models": {
"total": 0.1255325269999048,
"count": 1,
"self": 0.0022850399996059423,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12324748700029886,
"count": 1,
"self": 0.12324748700029886
}
}
}
}
}
}
}