poca-SoccerTwos / run_logs /timers.json
RazPines's picture
First Push`
7fa4bdf verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.294991970062256,
"min": 3.294991970062256,
"max": 3.295747995376587,
"count": 3
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 59889.7734375,
"min": 59889.7734375,
"max": 105463.9375,
"count": 3
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 619.5,
"min": 492.5,
"max": 885.75,
"count": 3
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19824.0,
"min": 11820.0,
"max": 28344.0,
"count": 3
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1200.2216125847149,
"min": 1198.512895997199,
"max": 1200.2216125847149,
"count": 3
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 12002.21612584715,
"min": 4795.014370276737,
"max": 12002.21612584715,
"count": 3
},
"SoccerTwos.Step.mean": {
"value": 29579.0,
"min": 9188.0,
"max": 29579.0,
"count": 3
},
"SoccerTwos.Step.sum": {
"value": 29579.0,
"min": 9188.0,
"max": 29579.0,
"count": 3
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.04330986738204956,
"min": 0.04330986738204956,
"max": 0.04582947865128517,
"count": 3
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 0.692957878112793,
"min": 0.5041242837905884,
"max": 0.7332154512405396,
"count": 3
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.05042207986116409,
"min": 0.04577548801898956,
"max": 0.05042207986116409,
"count": 3
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 0.8067532777786255,
"min": 0.5035683512687683,
"max": 0.8067532777786255,
"count": 3
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 3
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 3
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.08561250194907188,
"min": -0.36363636363636365,
"max": 0.11101250350475311,
"count": 3
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 1.3698000311851501,
"min": -4.0,
"max": 1.7762000560760498,
"count": 3
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.08561250194907188,
"min": -0.36363636363636365,
"max": 0.11101250350475311,
"count": 3
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 1.3698000311851501,
"min": -4.0,
"max": 1.7762000560760498,
"count": 3
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 3
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 3
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.0021249399287626147,
"min": 0.0021249399287626147,
"max": 0.0021249399287626147,
"count": 1
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.0021249399287626147,
"min": 0.0021249399287626147,
"max": 0.0021249399287626147,
"count": 1
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.007496952001626293,
"min": 0.007496952001626293,
"max": 0.007496952001626293,
"count": 1
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.007496952001626293,
"min": 0.007496952001626293,
"max": 0.007496952001626293,
"count": 1
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.004695787249753873,
"min": 0.004695787249753873,
"max": 0.004695787249753873,
"count": 1
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.004695787249753873,
"min": 0.004695787249753873,
"max": 0.004695787249753873,
"count": 1
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 1
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 1
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000004,
"min": 0.20000000000000004,
"max": 0.20000000000000004,
"count": 1
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000004,
"min": 0.20000000000000004,
"max": 0.20000000000000004,
"count": 1
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005,
"min": 0.005,
"max": 0.005,
"count": 1
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005,
"min": 0.005,
"max": 0.005,
"count": 1
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1717276638",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\Jeste\\anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --torch-device cuda --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0",
"numpy_version": "1.24.3",
"end_time_seconds": "1717276694"
},
"total": 55.94161929999973,
"count": 1,
"self": 0.005248999994364567,
"children": {
"run_training.setup": {
"total": 0.07311640000261832,
"count": 1,
"self": 0.07311640000261832
},
"TrainerController.start_learning": {
"total": 55.863253900002746,
"count": 1,
"self": 0.042421900034241844,
"children": {
"TrainerController._reset_env": {
"total": 4.344175300000643,
"count": 1,
"self": 4.344175300000643
},
"TrainerController.advance": {
"total": 51.28443339997466,
"count": 2670,
"self": 0.037352400344389025,
"children": {
"env_step": {
"total": 43.60035759991297,
"count": 2670,
"self": 23.51200740026252,
"children": {
"SubprocessEnvManager._take_step": {
"total": 20.063391899806447,
"count": 2670,
"self": 0.22031719956430607,
"children": {
"TorchPolicy.evaluate": {
"total": 19.84307470024214,
"count": 5318,
"self": 19.84307470024214
}
}
},
"workers": {
"total": 0.024958299844001886,
"count": 2669,
"self": 0.0,
"children": {
"worker_root": {
"total": 52.217857700066816,
"count": 2669,
"is_parallel": true,
"self": 33.69345090002753,
"children": {
"steps_from_proto": {
"total": 0.0015405000012833625,
"count": 2,
"is_parallel": true,
"self": 0.00031939999462338164,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012211000066599809,
"count": 8,
"is_parallel": true,
"self": 0.0012211000066599809
}
}
},
"UnityEnvironment.step": {
"total": 18.522866300038004,
"count": 2669,
"is_parallel": true,
"self": 1.0449094994692132,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.8874818002150278,
"count": 2669,
"is_parallel": true,
"self": 0.8874818002150278
},
"communicator.exchange": {
"total": 13.233655100215401,
"count": 2669,
"is_parallel": true,
"self": 13.233655100215401
},
"steps_from_proto": {
"total": 3.356819900138362,
"count": 5338,
"is_parallel": true,
"self": 0.6450732003140729,
"children": {
"_process_rank_one_or_two_observation": {
"total": 2.711746699824289,
"count": 21352,
"is_parallel": true,
"self": 2.711746699824289
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 7.6467233997173025,
"count": 2669,
"self": 0.2873427996528335,
"children": {
"process_trajectory": {
"total": 3.6102151000668528,
"count": 2669,
"self": 3.6102151000668528
},
"_update_policy": {
"total": 3.7491654999976163,
"count": 1,
"self": 2.3679731999945943,
"children": {
"TorchPOCAOptimizer.update": {
"total": 1.381192300003022,
"count": 3,
"self": 1.381192300003022
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.999930625781417e-07,
"count": 1,
"self": 9.999930625781417e-07
},
"TrainerController._save_models": {
"total": 0.1922223000001395,
"count": 1,
"self": 0.0077162000015960075,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1845060999985435,
"count": 1,
"self": 0.1845060999985435
}
}
}
}
}
}
}