poca-SoccerTwos / run_logs /timers.json
amine-01's picture
First Push`
b56e82b verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.7826701402664185,
"min": 1.7394626140594482,
"max": 3.295732021331787,
"count": 700
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 35482.265625,
"min": 21054.87109375,
"max": 105463.375,
"count": 700
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 48.97979797979798,
"min": 40.032786885245905,
"max": 999.0,
"count": 700
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19396.0,
"min": 15772.0,
"max": 25240.0,
"count": 700
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1553.3871473543213,
"min": 1200.6163275372303,
"max": 1592.0030082153114,
"count": 696
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 307570.6551761556,
"min": 2401.619727385159,
"max": 376258.17845454486,
"count": 696
},
"SoccerTwos.Step.mean": {
"value": 6999987.0,
"min": 9636.0,
"max": 6999987.0,
"count": 700
},
"SoccerTwos.Step.sum": {
"value": 6999987.0,
"min": 9636.0,
"max": 6999987.0,
"count": 700
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.037391383200883865,
"min": -0.09339915215969086,
"max": 0.1686522513628006,
"count": 700
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -7.440885543823242,
"min": -17.465641021728516,
"max": 24.960533142089844,
"count": 700
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.039466168731451035,
"min": -0.09576797485351562,
"max": 0.16788546741008759,
"count": 700
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -7.853767395019531,
"min": -17.908611297607422,
"max": 24.847049713134766,
"count": 700
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 700
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 700
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.015877385834353653,
"min": -0.5333333333333333,
"max": 0.3762683899171891,
"count": 700
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 3.159599781036377,
"min": -53.84040021896362,
"max": 58.32160043716431,
"count": 700
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.015877385834353653,
"min": -0.5333333333333333,
"max": 0.3762683899171891,
"count": 700
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 3.159599781036377,
"min": -53.84040021896362,
"max": 58.32160043716431,
"count": 700
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 700
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 700
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.016260930813991256,
"min": 0.011429149822005356,
"max": 0.022731703917088453,
"count": 272
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.016260930813991256,
"min": 0.011429149822005356,
"max": 0.022731703917088453,
"count": 272
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10706143660677804,
"min": 0.0011126294184344797,
"max": 0.12248001620173454,
"count": 272
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10706143660677804,
"min": 0.0011126294184344797,
"max": 0.12248001620173454,
"count": 272
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10893148245910804,
"min": 0.001181832811711595,
"max": 0.12536103340486685,
"count": 272
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10893148245910804,
"min": 0.001181832811711595,
"max": 0.12536103340486685,
"count": 272
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 272
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 272
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000004,
"min": 0.20000000000000004,
"max": 0.20000000000000004,
"count": 272
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000004,
"min": 0.20000000000000004,
"max": 0.20000000000000004,
"count": 272
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 272
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 272
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1713786910",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\HP\\anaconda3\\envs\\unity_rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.2+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1713823638"
},
"total": 36728.019763799966,
"count": 1,
"self": 3.8332122998544946,
"children": {
"run_training.setup": {
"total": 0.18720070004928857,
"count": 1,
"self": 0.18720070004928857
},
"TrainerController.start_learning": {
"total": 36723.99935080006,
"count": 1,
"self": 17.48002420598641,
"children": {
"TrainerController._reset_env": {
"total": 23.18787780054845,
"count": 35,
"self": 23.18787780054845
},
"TrainerController.advance": {
"total": 36682.99288069352,
"count": 483487,
"self": 18.02699136384763,
"children": {
"env_step": {
"total": 14701.161438461975,
"count": 483487,
"self": 11609.348140965682,
"children": {
"SubprocessEnvManager._take_step": {
"total": 3080.802714015823,
"count": 483487,
"self": 107.50193848169874,
"children": {
"TorchPolicy.evaluate": {
"total": 2973.300775534124,
"count": 880612,
"self": 2973.300775534124
}
}
},
"workers": {
"total": 11.010583480470814,
"count": 483487,
"self": 0.0,
"children": {
"worker_root": {
"total": 36674.639397889725,
"count": 483487,
"is_parallel": true,
"self": 27226.273161285208,
"children": {
"steps_from_proto": {
"total": 0.10766420012805611,
"count": 70,
"is_parallel": true,
"self": 0.02140469872392714,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.08625950140412897,
"count": 280,
"is_parallel": true,
"self": 0.08625950140412897
}
}
},
"UnityEnvironment.step": {
"total": 9448.25857240439,
"count": 483487,
"is_parallel": true,
"self": 553.870610109996,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 527.3979987687198,
"count": 483487,
"is_parallel": true,
"self": 527.3979987687198
},
"communicator.exchange": {
"total": 6613.873633601819,
"count": 483487,
"is_parallel": true,
"self": 6613.873633601819
},
"steps_from_proto": {
"total": 1753.1163299238542,
"count": 966974,
"is_parallel": true,
"self": 336.6632152366219,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1416.4531146872323,
"count": 3867896,
"is_parallel": true,
"self": 1416.4531146872323
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 21963.804450867698,
"count": 483487,
"self": 111.99286475090776,
"children": {
"process_trajectory": {
"total": 5004.217538817902,
"count": 483487,
"self": 5000.8038829177385,
"children": {
"RLTrainer._checkpoint": {
"total": 3.413655900163576,
"count": 14,
"self": 3.413655900163576
}
}
},
"_update_policy": {
"total": 16847.594047298888,
"count": 272,
"self": 1552.4214620952262,
"children": {
"TorchPOCAOptimizer.update": {
"total": 15295.172585203662,
"count": 9837,
"self": 15295.172585203662
}
}
}
}
}
}
},
"trainer_threads": {
"total": 3.100023604929447e-06,
"count": 1,
"self": 3.100023604929447e-06
},
"TrainerController._save_models": {
"total": 0.338564999983646,
"count": 1,
"self": 0.08246479998342693,
"children": {
"RLTrainer._checkpoint": {
"total": 0.25610020000021905,
"count": 1,
"self": 0.25610020000021905
}
}
}
}
}
}
}