poca-SoccerTwos / run_logs /timers.json
r0in's picture
First Push`
e61e46b verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.3727089166641235,
"min": 1.3140296936035156,
"max": 3.295708417892456,
"count": 5000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 27498.10546875,
"min": 15181.9248046875,
"max": 141096.8125,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 54.67777777777778,
"min": 38.95161290322581,
"max": 999.0,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19684.0,
"min": 13088.0,
"max": 27332.0,
"count": 5000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1636.4102133046044,
"min": 1199.0788229743996,
"max": 1752.9235374383593,
"count": 4963
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 294553.8383948288,
"min": 2405.4962485687115,
"max": 395485.79316487187,
"count": 4963
},
"SoccerTwos.Step.mean": {
"value": 49999798.0,
"min": 9682.0,
"max": 49999798.0,
"count": 5000
},
"SoccerTwos.Step.sum": {
"value": 49999798.0,
"min": 9682.0,
"max": 49999798.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.014387241564691067,
"min": -0.14452321827411652,
"max": 0.1730378270149231,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 2.604090690612793,
"min": -28.471073150634766,
"max": 24.14034652709961,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.013612337410449982,
"min": -0.14541640877723694,
"max": 0.17362451553344727,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 2.4638330936431885,
"min": -28.64703369140625,
"max": 24.57199478149414,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.019559117641238216,
"min": -0.6490666667620341,
"max": 0.5342000017740897,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -3.5402002930641174,
"min": -65.82560014724731,
"max": 67.29599988460541,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.019559117641238216,
"min": -0.6490666667620341,
"max": 0.5342000017740897,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -3.5402002930641174,
"min": -65.82560014724731,
"max": 67.29599988460541,
"count": 5000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01983731445943704,
"min": 0.00970174377822938,
"max": 0.026898910638798647,
"count": 2423
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01983731445943704,
"min": 0.00970174377822938,
"max": 0.026898910638798647,
"count": 2423
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10150654291113217,
"min": 5.244935891823843e-05,
"max": 0.13294473936160406,
"count": 2423
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10150654291113217,
"min": 5.244935891823843e-05,
"max": 0.13294473936160406,
"count": 2423
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10232037877043089,
"min": 4.760037991218269e-05,
"max": 0.1355833689371745,
"count": 2423
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10232037877043089,
"min": 4.760037991218269e-05,
"max": 0.1355833689371745,
"count": 2423
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2423
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2423
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 2423
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 2423
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 2423
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 2423
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1708762459",
"python_version": "3.10.12 | packaged by conda-forge | (main, Jun 23 2023, 22:34:57) [MSC v.1936 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\s\\scoop\\persist\\mambaforge\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1708872312"
},
"total": 109852.8059483,
"count": 1,
"self": 1.728658700012602,
"children": {
"run_training.setup": {
"total": 0.08473060000687838,
"count": 1,
"self": 0.08473060000687838
},
"TrainerController.start_learning": {
"total": 109850.99255899998,
"count": 1,
"self": 67.65651694999542,
"children": {
"TrainerController._reset_env": {
"total": 8.078181099554058,
"count": 250,
"self": 8.078181099554058
},
"TrainerController.advance": {
"total": 109775.12425345043,
"count": 3449697,
"self": 65.48773235431872,
"children": {
"env_step": {
"total": 46783.386701313255,
"count": 3449697,
"self": 36225.92944563663,
"children": {
"SubprocessEnvManager._take_step": {
"total": 10516.50654537452,
"count": 3449697,
"self": 342.5663258137065,
"children": {
"TorchPolicy.evaluate": {
"total": 10173.940219560813,
"count": 6283050,
"self": 10173.940219560813
}
}
},
"workers": {
"total": 40.95071030210238,
"count": 3449697,
"self": 0.0,
"children": {
"worker_root": {
"total": 109765.78408021497,
"count": 3449697,
"is_parallel": true,
"self": 80961.24994871527,
"children": {
"steps_from_proto": {
"total": 0.3555603991262615,
"count": 500,
"is_parallel": true,
"self": 0.07527199713513255,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.2802884019911289,
"count": 2000,
"is_parallel": true,
"self": 0.2802884019911289
}
}
},
"UnityEnvironment.step": {
"total": 28804.17857110058,
"count": 3449697,
"is_parallel": true,
"self": 1392.6681076682871,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 1054.9750639710692,
"count": 3449697,
"is_parallel": true,
"self": 1054.9750639710692
},
"communicator.exchange": {
"total": 21824.719060978445,
"count": 3449697,
"is_parallel": true,
"self": 21824.719060978445
},
"steps_from_proto": {
"total": 4531.8163384827785,
"count": 6899394,
"is_parallel": true,
"self": 975.826527640922,
"children": {
"_process_rank_one_or_two_observation": {
"total": 3555.9898108418565,
"count": 27597576,
"is_parallel": true,
"self": 3555.9898108418565
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 62926.24981978285,
"count": 3449697,
"self": 502.0779718386475,
"children": {
"process_trajectory": {
"total": 10868.461099245527,
"count": 3449697,
"self": 10857.966346045258,
"children": {
"RLTrainer._checkpoint": {
"total": 10.49475320026977,
"count": 100,
"self": 10.49475320026977
}
}
},
"_update_policy": {
"total": 51555.71074869868,
"count": 2423,
"self": 6170.351245703176,
"children": {
"TorchPOCAOptimizer.update": {
"total": 45385.3595029955,
"count": 72690,
"self": 45385.3595029955
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.999494068324566e-07,
"count": 1,
"self": 9.999494068324566e-07
},
"TrainerController._save_models": {
"total": 0.13360650005051866,
"count": 1,
"self": 0.03155570005765185,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10205079999286681,
"count": 1,
"self": 0.10205079999286681
}
}
}
}
}
}
}