poca-SoccerTwos / run_logs /timers.json
zhijian12345's picture
First Push
d231df7
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.7002971172332764,
"min": 1.657752275466919,
"max": 3.2957510948181152,
"count": 1000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 37052.875,
"min": 12312.439453125,
"max": 161134.03125,
"count": 1000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 58.03333333333333,
"min": 43.15929203539823,
"max": 999.0,
"count": 1000
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 20892.0,
"min": 14532.0,
"max": 27444.0,
"count": 1000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1632.4033644469034,
"min": 1180.6144231738417,
"max": 1656.4278040139045,
"count": 926
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 293832.6056004426,
"min": 2361.2288463476834,
"max": 350746.16807630984,
"count": 926
},
"SoccerTwos.Step.mean": {
"value": 9999796.0,
"min": 9738.0,
"max": 9999796.0,
"count": 1000
},
"SoccerTwos.Step.sum": {
"value": 9999796.0,
"min": 9738.0,
"max": 9999796.0,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.018543794751167297,
"min": -0.12188930809497833,
"max": 0.17353613674640656,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 3.3193392753601074,
"min": -24.37786102294922,
"max": 33.569091796875,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.02035484090447426,
"min": -0.12399794906377792,
"max": 0.17788521945476532,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 3.6435165405273438,
"min": -24.799589157104492,
"max": 33.97198486328125,
"count": 1000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.09736648118695733,
"min": -0.5714285714285714,
"max": 0.44485713967255186,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 17.428600132465363,
"min": -53.2616001367569,
"max": 60.725200176239014,
"count": 1000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.09736648118695733,
"min": -0.5714285714285714,
"max": 0.44485713967255186,
"count": 1000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 17.428600132465363,
"min": -53.2616001367569,
"max": 60.725200176239014,
"count": 1000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.014148270753988374,
"min": 0.010953907556055735,
"max": 0.025983653173898346,
"count": 479
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.014148270753988374,
"min": 0.010953907556055735,
"max": 0.025983653173898346,
"count": 479
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.09655588641762733,
"min": 7.285407794673423e-07,
"max": 0.11943604374925296,
"count": 479
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.09655588641762733,
"min": 7.285407794673423e-07,
"max": 0.11943604374925296,
"count": 479
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.0975018228093783,
"min": 6.803876469045159e-07,
"max": 0.12117945080002149,
"count": 479
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.0975018228093783,
"min": 6.803876469045159e-07,
"max": 0.12117945080002149,
"count": 479
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 479
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 479
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 479
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 479
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 479
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 479
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1701141981",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/opt/conda/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.exe --run-id=SoccerTwos3.0 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1701158522"
},
"total": 16540.47173334984,
"count": 1,
"self": 0.2186872186139226,
"children": {
"run_training.setup": {
"total": 0.011327958200126886,
"count": 1,
"self": 0.011327958200126886
},
"TrainerController.start_learning": {
"total": 16540.241718173027,
"count": 1,
"self": 9.591305868700147,
"children": {
"TrainerController._reset_env": {
"total": 2.313428184017539,
"count": 50,
"self": 2.313428184017539
},
"TrainerController.advance": {
"total": 16528.231910101138,
"count": 677374,
"self": 9.871557503007352,
"children": {
"env_step": {
"total": 8804.48451926466,
"count": 677374,
"self": 6811.329781570472,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1987.2831985726953,
"count": 677374,
"self": 79.03832132695243,
"children": {
"TorchPolicy.evaluate": {
"total": 1908.2448772457428,
"count": 1265734,
"self": 1908.2448772457428
}
}
},
"workers": {
"total": 5.871539121493697,
"count": 677374,
"self": 0.0,
"children": {
"worker_root": {
"total": 16525.264180932194,
"count": 677374,
"is_parallel": true,
"self": 10888.15431030374,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002077253069728613,
"count": 2,
"is_parallel": true,
"self": 0.0005489694885909557,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015282835811376572,
"count": 8,
"is_parallel": true,
"self": 0.0015282835811376572
}
}
},
"UnityEnvironment.step": {
"total": 0.027516332920640707,
"count": 1,
"is_parallel": true,
"self": 0.0006829011254012585,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00035718875005841255,
"count": 1,
"is_parallel": true,
"self": 0.00035718875005841255
},
"communicator.exchange": {
"total": 0.024528629146516323,
"count": 1,
"is_parallel": true,
"self": 0.024528629146516323
},
"steps_from_proto": {
"total": 0.001947613898664713,
"count": 2,
"is_parallel": true,
"self": 0.0003392621874809265,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016083517111837864,
"count": 8,
"is_parallel": true,
"self": 0.0016083517111837864
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 5637.043595241848,
"count": 677373,
"is_parallel": true,
"self": 272.9911315282807,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 186.87309860251844,
"count": 677373,
"is_parallel": true,
"self": 186.87309860251844
},
"communicator.exchange": {
"total": 4397.816531648394,
"count": 677373,
"is_parallel": true,
"self": 4397.816531648394
},
"steps_from_proto": {
"total": 779.3628334626555,
"count": 1354746,
"is_parallel": true,
"self": 146.1865314510651,
"children": {
"_process_rank_one_or_two_observation": {
"total": 633.1763020115905,
"count": 5418984,
"is_parallel": true,
"self": 633.1763020115905
}
}
}
}
},
"steps_from_proto": {
"total": 0.06627538660541177,
"count": 98,
"is_parallel": true,
"self": 0.012375973630696535,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.05389941297471523,
"count": 392,
"is_parallel": true,
"self": 0.05389941297471523
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 7713.87583333347,
"count": 677374,
"self": 83.29706115927547,
"children": {
"process_trajectory": {
"total": 1536.3217827118933,
"count": 677374,
"self": 1534.389495048672,
"children": {
"RLTrainer._checkpoint": {
"total": 1.9322876632213593,
"count": 20,
"self": 1.9322876632213593
}
}
},
"_update_policy": {
"total": 6094.256989462301,
"count": 479,
"self": 993.652314550709,
"children": {
"TorchPOCAOptimizer.update": {
"total": 5100.604674911592,
"count": 14370,
"self": 5100.604674911592
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.111819624900818e-07,
"count": 1,
"self": 8.111819624900818e-07
},
"TrainerController._save_models": {
"total": 0.10507320798933506,
"count": 1,
"self": 0.001164108980447054,
"children": {
"RLTrainer._checkpoint": {
"total": 0.103909099008888,
"count": 1,
"self": 0.103909099008888
}
}
}
}
}
}
}