ppo-Huggy / run_logs /timers.json
nigelyeap's picture
Huggy
aafdf69
raw
history blame
17.4 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.406314730644226,
"min": 1.406314730644226,
"max": 1.4278645515441895,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69452.2578125,
"min": 67543.8359375,
"max": 80283.9140625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 84.44020797227036,
"min": 84.44020797227036,
"max": 396.0,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 48722.0,
"min": 48722.0,
"max": 50292.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999998.0,
"min": 49977.0,
"max": 1999998.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999998.0,
"min": 49977.0,
"max": 1999998.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.399977207183838,
"min": 0.15671218931674957,
"max": 2.4390816688537598,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1387.186767578125,
"min": 19.74573516845703,
"max": 1396.0731201171875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.753371951901789,
"min": 1.8141337323283393,
"max": 3.921487186606049,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2169.448988199234,
"min": 228.58085027337074,
"max": 2216.7584421634674,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.753371951901789,
"min": 1.8141337323283393,
"max": 3.921487186606049,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2169.448988199234,
"min": 228.58085027337074,
"max": 2216.7584421634674,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01676567306082385,
"min": 0.012776230747113005,
"max": 0.019688798255325917,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05029701918247155,
"min": 0.02555246149422601,
"max": 0.05906639476597775,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.057317224269111955,
"min": 0.02036927007138729,
"max": 0.05884669106453658,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.17195167280733586,
"min": 0.04073854014277458,
"max": 0.17195167280733586,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.5065988311666687e-06,
"min": 3.5065988311666687e-06,
"max": 0.000295308976563675,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0519796493500006e-05,
"min": 1.0519796493500006e-05,
"max": 0.0008440360686546499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10116883333333333,
"min": 0.10116883333333333,
"max": 0.19843632500000008,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3035065,
"min": 0.20750280000000004,
"max": 0.5813453500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.832478333333336e-05,
"min": 6.832478333333336e-05,
"max": 0.004921972617500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020497435000000009,
"min": 0.00020497435000000009,
"max": 0.014069132965,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1693833369",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1693835922"
},
"total": 2552.7420876270003,
"count": 1,
"self": 0.437822287000472,
"children": {
"run_training.setup": {
"total": 0.04720084200016572,
"count": 1,
"self": 0.04720084200016572
},
"TrainerController.start_learning": {
"total": 2552.2570644979996,
"count": 1,
"self": 4.636234637268444,
"children": {
"TrainerController._reset_env": {
"total": 4.353393673000028,
"count": 1,
"self": 4.353393673000028
},
"TrainerController.advance": {
"total": 2543.1397690327317,
"count": 231910,
"self": 4.748089664641157,
"children": {
"env_step": {
"total": 1974.6226884790408,
"count": 231910,
"self": 1674.0541549440518,
"children": {
"SubprocessEnvManager._take_step": {
"total": 297.5322049218089,
"count": 231910,
"self": 17.57167980066879,
"children": {
"TorchPolicy.evaluate": {
"total": 279.9605251211401,
"count": 222932,
"self": 279.9605251211401
}
}
},
"workers": {
"total": 3.0363286131801033,
"count": 231910,
"self": 0.0,
"children": {
"worker_root": {
"total": 2544.356158946934,
"count": 231910,
"is_parallel": true,
"self": 1168.3027842358765,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0008813280001049861,
"count": 1,
"is_parallel": true,
"self": 0.00024986900007206714,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000631459000032919,
"count": 2,
"is_parallel": true,
"self": 0.000631459000032919
}
}
},
"UnityEnvironment.step": {
"total": 0.030465621999610448,
"count": 1,
"is_parallel": true,
"self": 0.00036651899927164777,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00023503000011260156,
"count": 1,
"is_parallel": true,
"self": 0.00023503000011260156
},
"communicator.exchange": {
"total": 0.0290380470000855,
"count": 1,
"is_parallel": true,
"self": 0.0290380470000855
},
"steps_from_proto": {
"total": 0.0008260260001407005,
"count": 1,
"is_parallel": true,
"self": 0.00022165700011100853,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000604369000029692,
"count": 2,
"is_parallel": true,
"self": 0.000604369000029692
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1376.0533747110576,
"count": 231909,
"is_parallel": true,
"self": 42.35610204997192,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 84.0055643187925,
"count": 231909,
"is_parallel": true,
"self": 84.0055643187925
},
"communicator.exchange": {
"total": 1145.79003962128,
"count": 231909,
"is_parallel": true,
"self": 1145.79003962128
},
"steps_from_proto": {
"total": 103.90166872101327,
"count": 231909,
"is_parallel": true,
"self": 36.86490287804463,
"children": {
"_process_rank_one_or_two_observation": {
"total": 67.03676584296863,
"count": 463818,
"is_parallel": true,
"self": 67.03676584296863
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 563.7689908890497,
"count": 231910,
"self": 7.047530162159092,
"children": {
"process_trajectory": {
"total": 137.85116756688922,
"count": 231910,
"self": 136.45868348889053,
"children": {
"RLTrainer._checkpoint": {
"total": 1.392484077998688,
"count": 10,
"self": 1.392484077998688
}
}
},
"_update_policy": {
"total": 418.87029316000144,
"count": 97,
"self": 357.54254890901666,
"children": {
"TorchPPOOptimizer.update": {
"total": 61.32774425098478,
"count": 2910,
"self": 61.32774425098478
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.990000424091704e-07,
"count": 1,
"self": 8.990000424091704e-07
},
"TrainerController._save_models": {
"total": 0.12766625599942927,
"count": 1,
"self": 0.0021173819986870512,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12554887400074222,
"count": 1,
"self": 0.12554887400074222
}
}
}
}
}
}
}