poca-SoccerTwos / run_logs /timers.json
jvilaseca's picture
First Push
ddf66f4
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.7245991230010986,
"min": 1.6280959844589233,
"max": 1.803169846534729,
"count": 690
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 34050.484375,
"min": 16252.23046875,
"max": 41848.55078125,
"count": 690
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 72.42028985507247,
"min": 41.73728813559322,
"max": 107.25,
"count": 690
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19988.0,
"min": 7296.0,
"max": 21240.0,
"count": 690
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1652.446273309946,
"min": 1623.0626194903318,
"max": 1693.4569155917834,
"count": 690
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 228037.58571677256,
"min": 114220.06391457033,
"max": 392609.14351578726,
"count": 690
},
"SoccerTwos.Step.mean": {
"value": 24009948.0,
"min": 17119989.0,
"max": 24009948.0,
"count": 690
},
"SoccerTwos.Step.sum": {
"value": 24009948.0,
"min": 17119989.0,
"max": 24009948.0,
"count": 690
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.03957207128405571,
"min": -0.12997280061244965,
"max": 0.07670627534389496,
"count": 690
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -5.381801605224609,
"min": -20.535701751708984,
"max": 13.755611419677734,
"count": 690
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.03876207023859024,
"min": -0.1288316398859024,
"max": 0.07834520936012268,
"count": 690
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -5.271641731262207,
"min": -20.355398178100586,
"max": 13.634573936462402,
"count": 690
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 690
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 690
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.046152938814724195,
"min": -0.36934179067611694,
"max": 0.3203398482243818,
"count": 690
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 6.27679967880249,
"min": -62.43619990348816,
"max": 46.91000020503998,
"count": 690
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.046152938814724195,
"min": -0.36934179067611694,
"max": 0.3203398482243818,
"count": 690
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 6.27679967880249,
"min": -62.43619990348816,
"max": 46.91000020503998,
"count": 690
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 690
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 690
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.014841787061747406,
"min": 0.011989216684984665,
"max": 0.023097134284519902,
"count": 334
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.014841787061747406,
"min": 0.011989216684984665,
"max": 0.023097134284519902,
"count": 334
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10486778641740481,
"min": 0.08783568665385247,
"max": 0.13843777825435002,
"count": 334
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10486778641740481,
"min": 0.08783568665385247,
"max": 0.13843777825435002,
"count": 334
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.1062960721552372,
"min": 0.08942758962512017,
"max": 0.14068217327197394,
"count": 334
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.1062960721552372,
"min": 0.08942758962512017,
"max": 0.14068217327197394,
"count": 334
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 334
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 334
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 334
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 334
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.006000000000000002,
"min": 0.006000000000000002,
"max": 0.006000000000000002,
"count": 334
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.006000000000000002,
"min": 0.006000000000000002,
"max": 0.006000000000000002,
"count": 334
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1688309449",
"python_version": "3.9.16 (main, May 17 2023, 17:49:16) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\pepe\\miniconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1688336916"
},
"total": 27466.885090400003,
"count": 1,
"self": 1.1405648000036308,
"children": {
"run_training.setup": {
"total": 0.14519139999999986,
"count": 1,
"self": 0.14519139999999986
},
"TrainerController.start_learning": {
"total": 27465.5993342,
"count": 1,
"self": 11.204596699637477,
"children": {
"TrainerController._reset_env": {
"total": 9.80414069999125,
"count": 36,
"self": 9.80414069999125
},
"TrainerController.advance": {
"total": 27444.38761920037,
"count": 476298,
"self": 11.01459670057011,
"children": {
"env_step": {
"total": 7679.875852500507,
"count": 476298,
"self": 5911.034287698587,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1761.6579132017594,
"count": 476298,
"self": 67.78359530101602,
"children": {
"TorchPolicy.evaluate": {
"total": 1693.8743179007433,
"count": 865678,
"self": 1693.8743179007433
}
}
},
"workers": {
"total": 7.183651600160358,
"count": 476297,
"self": 0.0,
"children": {
"worker_root": {
"total": 27439.14209659975,
"count": 476297,
"is_parallel": true,
"self": 22733.734255899493,
"children": {
"steps_from_proto": {
"total": 0.06647450001096544,
"count": 72,
"is_parallel": true,
"self": 0.014102900009500985,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.05237160000146446,
"count": 288,
"is_parallel": true,
"self": 0.05237160000146446
}
}
},
"UnityEnvironment.step": {
"total": 4705.341366200246,
"count": 476297,
"is_parallel": true,
"self": 237.28460120103773,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 193.13705809924238,
"count": 476297,
"is_parallel": true,
"self": 193.13705809924238
},
"communicator.exchange": {
"total": 3466.292329999416,
"count": 476297,
"is_parallel": true,
"self": 3466.292329999416
},
"steps_from_proto": {
"total": 808.62737690055,
"count": 952594,
"is_parallel": true,
"self": 167.28985730025352,
"children": {
"_process_rank_one_or_two_observation": {
"total": 641.3375196002964,
"count": 3810376,
"is_parallel": true,
"self": 641.3375196002964
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 19753.497169999293,
"count": 476297,
"self": 87.17292039913082,
"children": {
"process_trajectory": {
"total": 2177.9227992001324,
"count": 476297,
"self": 2175.370412100131,
"children": {
"RLTrainer._checkpoint": {
"total": 2.5523871000011695,
"count": 14,
"self": 2.5523871000011695
}
}
},
"_update_policy": {
"total": 17488.40145040003,
"count": 335,
"self": 1199.457120199988,
"children": {
"TorchPOCAOptimizer.update": {
"total": 16288.944330200042,
"count": 10050,
"self": 16288.944330200042
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2000018614344299e-06,
"count": 1,
"self": 1.2000018614344299e-06
},
"TrainerController._save_models": {
"total": 0.2029763999998977,
"count": 1,
"self": 0.06067310000071302,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14230329999918467,
"count": 1,
"self": 0.14230329999918467
}
}
}
}
}
}
}