poca-SoccerTwos / run_logs /timers.json
bobobert4's picture
Initial commit some 15M-ish steps
d18813a
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.6624467372894287,
"min": 1.5665268898010254,
"max": 3.182439088821411,
"count": 1873
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 38515.56640625,
"min": 17665.7265625,
"max": 132159.90625,
"count": 1873
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 60.15853658536585,
"min": 37.12977099236641,
"max": 999.0,
"count": 1873
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19732.0,
"min": 1376.0,
"max": 32404.0,
"count": 1873
},
"SoccerTwos.Step.mean": {
"value": 24319974.0,
"min": 5009576.0,
"max": 24319974.0,
"count": 1932
},
"SoccerTwos.Step.sum": {
"value": 24319974.0,
"min": 5009576.0,
"max": 24319974.0,
"count": 1932
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.03502928093075752,
"min": -0.13355103135108948,
"max": 0.2099972516298294,
"count": 1932
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -5.709773063659668,
"min": -25.908899307250977,
"max": 24.91553497314453,
"count": 1932
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.03238891810178757,
"min": -0.13234162330627441,
"max": 0.21316803991794586,
"count": 1932
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -5.279393672943115,
"min": -25.674274444580078,
"max": 25.644224166870117,
"count": 1932
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1932
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1932
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.17315337482405588,
"min": -0.726866665813658,
"max": 0.4992333371192217,
"count": 1932
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -28.224000096321106,
"min": -69.48040002584457,
"max": 55.33920019865036,
"count": 1932
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.17315337482405588,
"min": -0.726866665813658,
"max": 0.4992333371192217,
"count": 1932
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -28.224000096321106,
"min": -69.48040002584457,
"max": 55.33920019865036,
"count": 1932
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1932
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1932
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.018134012517596904,
"min": 0.010540938661870313,
"max": 0.02639512748767932,
"count": 892
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.018134012517596904,
"min": 0.010540938661870313,
"max": 0.02639512748767932,
"count": 892
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10255525782704353,
"min": 3.3001795863975124e-10,
"max": 0.12545725579063097,
"count": 892
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10255525782704353,
"min": 3.3001795863975124e-10,
"max": 0.12545725579063097,
"count": 892
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.1045435314377149,
"min": 4.005146048147168e-10,
"max": 0.12777663220961888,
"count": 892
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.1045435314377149,
"min": 4.005146048147168e-10,
"max": 0.12777663220961888,
"count": 892
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 892
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 892
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.19999999999999993,
"max": 0.20000000000000007,
"count": 892
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.19999999999999993,
"max": 0.20000000000000007,
"count": 892
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 892
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 892
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1628.6851696778017,
"min": 1182.587815930331,
"max": 1645.289788445154,
"count": 1597
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 267104.36782715947,
"min": 2366.258953303024,
"max": 409609.75227333,
"count": 1597
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1688847145",
"python_version": "3.9.16 (main, May 15 2023, 23:46:34) \n[GCC 11.2.0]",
"command_line_arguments": "/home/roberto/miniconda3/envs/unity_ml/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=/home/roberto/Descargas/ml_agents/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1688868106"
},
"total": 20961.528685475998,
"count": 1,
"self": 0.1900593829996069,
"children": {
"run_training.setup": {
"total": 0.006814189000579063,
"count": 1,
"self": 0.006814189000579063
},
"TrainerController.start_learning": {
"total": 20961.331811903998,
"count": 1,
"self": 21.227789693399245,
"children": {
"TrainerController._reset_env": {
"total": 5.115365825982735,
"count": 98,
"self": 5.115365825982735
},
"TrainerController.advance": {
"total": 20934.860443891615,
"count": 1305625,
"self": 18.49079936577982,
"children": {
"env_step": {
"total": 15341.088063058296,
"count": 1305625,
"self": 11572.435440086618,
"children": {
"SubprocessEnvManager._take_step": {
"total": 3755.8978415009387,
"count": 1305625,
"self": 104.1612216884314,
"children": {
"TorchPolicy.evaluate": {
"total": 3651.7366198125073,
"count": 2439558,
"self": 3651.7366198125073
}
}
},
"workers": {
"total": 12.754781470739545,
"count": 1305624,
"self": 0.0,
"children": {
"worker_root": {
"total": 20934.49179555532,
"count": 1305624,
"is_parallel": true,
"self": 11508.359365922377,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001406590001352015,
"count": 2,
"is_parallel": true,
"self": 0.0003288370007794583,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010777530005725566,
"count": 8,
"is_parallel": true,
"self": 0.0010777530005725566
}
}
},
"UnityEnvironment.step": {
"total": 0.01745770300112781,
"count": 1,
"is_parallel": true,
"self": 0.0004177289993094746,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000340651000442449,
"count": 1,
"is_parallel": true,
"self": 0.000340651000442449
},
"communicator.exchange": {
"total": 0.015378976000647526,
"count": 1,
"is_parallel": true,
"self": 0.015378976000647526
},
"steps_from_proto": {
"total": 0.0013203470007283613,
"count": 2,
"is_parallel": true,
"self": 0.000274457001069095,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010458899996592663,
"count": 8,
"is_parallel": true,
"self": 0.0010458899996592663
}
}
}
}
}
}
},
"steps_from_proto": {
"total": 0.19094867799685744,
"count": 194,
"is_parallel": true,
"self": 0.03693713900975126,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.15401153898710618,
"count": 776,
"is_parallel": true,
"self": 0.15401153898710618
}
}
},
"UnityEnvironment.step": {
"total": 9425.941480954947,
"count": 1305623,
"is_parallel": true,
"self": 550.027076479897,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 384.13616320726396,
"count": 1305623,
"is_parallel": true,
"self": 384.13616320726396
},
"communicator.exchange": {
"total": 6805.923204408804,
"count": 1305623,
"is_parallel": true,
"self": 6805.923204408804
},
"steps_from_proto": {
"total": 1685.8550368589822,
"count": 2611246,
"is_parallel": true,
"self": 326.38745934742656,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1359.4675775115556,
"count": 10444984,
"is_parallel": true,
"self": 1359.4675775115556
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 5575.28158146754,
"count": 1305624,
"self": 152.71568596267753,
"children": {
"process_trajectory": {
"total": 1869.7837674928687,
"count": 1305624,
"self": 1864.1565624628583,
"children": {
"RLTrainer._checkpoint": {
"total": 5.627205030010373,
"count": 38,
"self": 5.627205030010373
}
}
},
"_update_policy": {
"total": 3552.7821280119933,
"count": 892,
"self": 1969.7334296300814,
"children": {
"TorchPOCAOptimizer.update": {
"total": 1583.048698381912,
"count": 27756,
"self": 1583.048698381912
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.110008249990642e-07,
"count": 1,
"self": 9.110008249990642e-07
},
"TrainerController._save_models": {
"total": 0.12821158199949423,
"count": 1,
"self": 0.0012444009989849292,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1269671810005093,
"count": 1,
"self": 0.1269671810005093
}
}
}
}
}
}
}