{ "name": "root", "gauges": { "SoccerTwos.Policy.Entropy.mean": { "value": 3.1395070552825928, "min": 3.066038131713867, "max": 3.2957417964935303, "count": 201 }, "SoccerTwos.Policy.Entropy.sum": { "value": 49327.93359375, "min": 21108.859375, "max": 135422.828125, "count": 201 }, "SoccerTwos.Environment.EpisodeLength.mean": { "value": 503.5, "min": 363.0769230769231, "max": 999.0, "count": 201 }, "SoccerTwos.Environment.EpisodeLength.sum": { "value": 20140.0, "min": 16336.0, "max": 23668.0, "count": 201 }, "SoccerTwos.Self-play.ELO.mean": { "value": 1203.78092630216, "min": 1200.7703392512578, "max": 1217.0971680566179, "count": 177 }, "SoccerTwos.Self-play.ELO.sum": { "value": 9630.24741041728, "min": 2405.578845254795, "max": 29010.63673285787, "count": 177 }, "SoccerTwos.Step.mean": { "value": 2009878.0, "min": 9012.0, "max": 2009878.0, "count": 201 }, "SoccerTwos.Step.sum": { "value": 2009878.0, "min": 9012.0, "max": 2009878.0, "count": 201 }, "SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": { "value": -0.018383052200078964, "min": -0.03868962079286575, "max": 0.015536283142864704, "count": 201 }, "SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": { "value": -0.3676610589027405, "min": -0.5621010661125183, "max": 0.2796531021595001, "count": 201 }, "SoccerTwos.Policy.ExtrinsicValueEstimate.mean": { "value": -0.015726890414953232, "min": -0.03880360350012779, "max": 0.015799133107066154, "count": 201 }, "SoccerTwos.Policy.ExtrinsicValueEstimate.sum": { "value": -0.31453779339790344, "min": -0.5768371820449829, "max": 0.2843843996524811, "count": 201 }, "SoccerTwos.Environment.CumulativeReward.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 201 }, "SoccerTwos.Environment.CumulativeReward.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 201 }, "SoccerTwos.Policy.ExtrinsicReward.mean": { "value": -0.32251999974250795, "min": -0.5474363619630987, "max": 0.3670124923810363, "count": 201 }, "SoccerTwos.Policy.ExtrinsicReward.sum": { "value": -6.450399994850159, "min": -12.043599963188171, "max": 5.8721998780965805, "count": 201 }, "SoccerTwos.Environment.GroupCumulativeReward.mean": { "value": -0.32251999974250795, "min": -0.5474363619630987, "max": 0.3670124923810363, "count": 201 }, "SoccerTwos.Environment.GroupCumulativeReward.sum": { "value": -6.450399994850159, "min": -12.043599963188171, "max": 5.8721998780965805, "count": 201 }, "SoccerTwos.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 201 }, "SoccerTwos.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 201 }, "SoccerTwos.Losses.PolicyLoss.mean": { "value": 0.016290840944566298, "min": 0.011766680127281385, "max": 0.022022569570253836, "count": 93 }, "SoccerTwos.Losses.PolicyLoss.sum": { "value": 0.016290840944566298, "min": 0.011766680127281385, "max": 0.022022569570253836, "count": 93 }, "SoccerTwos.Losses.ValueLoss.mean": { "value": 0.011315703919778267, "min": 5.496953682874542e-06, "max": 0.012770030088722706, "count": 93 }, "SoccerTwos.Losses.ValueLoss.sum": { "value": 0.011315703919778267, "min": 5.496953682874542e-06, "max": 0.012770030088722706, "count": 93 }, "SoccerTwos.Losses.BaselineLoss.mean": { "value": 0.011372741435964902, "min": 5.790270355040168e-06, "max": 0.01297680124019583, "count": 93 }, "SoccerTwos.Losses.BaselineLoss.sum": { "value": 0.011372741435964902, "min": 5.790270355040168e-06, "max": 0.01297680124019583, "count": 93 }, "SoccerTwos.Policy.LearningRate.mean": { "value": 0.0003, "min": 0.0003, "max": 0.0003, "count": 93 }, "SoccerTwos.Policy.LearningRate.sum": { "value": 0.0003, "min": 0.0003, "max": 0.0003, "count": 93 }, "SoccerTwos.Policy.Epsilon.mean": { "value": 0.20000000000000007, "min": 0.20000000000000007, "max": 0.20000000000000007, "count": 93 }, "SoccerTwos.Policy.Epsilon.sum": { "value": 0.20000000000000007, "min": 0.20000000000000007, "max": 0.20000000000000007, "count": 93 }, "SoccerTwos.Policy.Beta.mean": { "value": 0.005000000000000001, "min": 0.005000000000000001, "max": 0.005000000000000001, "count": 93 }, "SoccerTwos.Policy.Beta.sum": { "value": 0.005000000000000001, "min": 0.005000000000000001, "max": 0.005000000000000001, "count": 93 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1691382239", "python_version": "3.9.13 (tags/v3.9.13:6de2ca5, May 17 2022, 16:36:42) [MSC v.1929 64 bit (AMD64)]", "command_line_arguments": "C:\\Users\\takdeniz\\Desktop\\RL\\venv\\scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --force", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.0.1+cpu", "numpy_version": "1.21.2", "end_time_seconds": "1691390074" }, "total": 7835.7060439, "count": 1, "self": 0.04268279999996594, "children": { "run_training.setup": { "total": 0.12746289999999982, "count": 1, "self": 0.12746289999999982 }, "TrainerController.start_learning": { "total": 7835.5358982, "count": 1, "self": 3.5472111001481608, "children": { "TrainerController._reset_env": { "total": 5.138324599999709, "count": 11, "self": 5.138324599999709 }, "TrainerController.advance": { "total": 7826.6302802998525, "count": 131061, "self": 3.8947386997078866, "children": { "env_step": { "total": 2952.9264771999447, "count": 131061, "self": 2263.2747595999285, "children": { "SubprocessEnvManager._take_step": { "total": 687.39800070018, "count": 131061, "self": 22.938811600458962, "children": { "TorchPolicy.evaluate": { "total": 664.459189099721, "count": 260010, "self": 664.459189099721 } } }, "workers": { "total": 2.253716899836041, "count": 131061, "self": 0.0, "children": { "worker_root": { "total": 7774.229142500052, "count": 131061, "is_parallel": true, "self": 5970.363229400253, "children": { "steps_from_proto": { "total": 0.028926999999655756, "count": 22, "is_parallel": true, "self": 0.00533080000106434, "children": { "_process_rank_one_or_two_observation": { "total": 0.023596199998591416, "count": 88, "is_parallel": true, "self": 0.023596199998591416 } } }, "UnityEnvironment.step": { "total": 1803.8369860998, "count": 131061, "is_parallel": true, "self": 109.85109069991881, "children": { "UnityEnvironment._generate_step_input": { "total": 79.79832870002542, "count": 131061, "is_parallel": true, "self": 79.79832870002542 }, "communicator.exchange": { "total": 1250.495429699954, "count": 131061, "is_parallel": true, "self": 1250.495429699954 }, "steps_from_proto": { "total": 363.6921369999018, "count": 262122, "is_parallel": true, "self": 67.27387249990107, "children": { "_process_rank_one_or_two_observation": { "total": 296.4182645000007, "count": 1048488, "is_parallel": true, "self": 296.4182645000007 } } } } } } } } } } }, "trainer_advance": { "total": 4869.8090644001995, "count": 131061, "self": 27.058353900098155, "children": { "process_trajectory": { "total": 626.0260898001043, "count": 131061, "self": 624.9201259001047, "children": { "RLTrainer._checkpoint": { "total": 1.1059638999995514, "count": 4, "self": 1.1059638999995514 } } }, "_update_policy": { "total": 4216.724620699997, "count": 94, "self": 360.18790889999036, "children": { "TorchPOCAOptimizer.update": { "total": 3856.5367118000067, "count": 2818, "self": 3856.5367118000067 } } } } } } }, "trainer_threads": { "total": 3.3999995139311068e-06, "count": 1, "self": 3.3999995139311068e-06 }, "TrainerController._save_models": { "total": 0.22007880000001023, "count": 1, "self": 0.004899500000647095, "children": { "RLTrainer._checkpoint": { "total": 0.21517929999936314, "count": 1, "self": 0.21517929999936314 } } } } } } }