poca-SoccerTwos / run_logs /timers.json
css919's picture
First Push
15817e8
raw
history blame
20.2 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.869733452796936,
"min": 1.8288425207138062,
"max": 3.295680284500122,
"count": 755
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 38351.97265625,
"min": 16491.24609375,
"max": 127750.984375,
"count": 755
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 56.093023255813954,
"min": 41.62931034482759,
"max": 999.0,
"count": 755
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19296.0,
"min": 14640.0,
"max": 25392.0,
"count": 755
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1469.522847192251,
"min": 1185.2975081231273,
"max": 1515.4705660695133,
"count": 731
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 252757.92971706716,
"min": 2370.5950162462545,
"max": 333977.8624059555,
"count": 731
},
"SoccerTwos.Step.mean": {
"value": 7549984.0,
"min": 9990.0,
"max": 7549984.0,
"count": 755
},
"SoccerTwos.Step.sum": {
"value": 7549984.0,
"min": 9990.0,
"max": 7549984.0,
"count": 755
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.044055331498384476,
"min": -0.15102851390838623,
"max": 0.1658662110567093,
"count": 755
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -7.577517032623291,
"min": -28.242332458496094,
"max": 22.6661376953125,
"count": 755
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.04948036000132561,
"min": -0.15268553793430328,
"max": 0.16741126775741577,
"count": 755
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -8.510622024536133,
"min": -28.552194595336914,
"max": 22.265403747558594,
"count": 755
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 755
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 755
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.053179069313892095,
"min": -0.6865904785337902,
"max": 0.4144500017166138,
"count": 755
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -9.146799921989441,
"min": -58.91720008850098,
"max": 49.9891996383667,
"count": 755
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.053179069313892095,
"min": -0.6865904785337902,
"max": 0.4144500017166138,
"count": 755
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -9.146799921989441,
"min": -58.91720008850098,
"max": 49.9891996383667,
"count": 755
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 755
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 755
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.013682717209060986,
"min": 0.01074417207079629,
"max": 0.02316420157828058,
"count": 363
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.013682717209060986,
"min": 0.01074417207079629,
"max": 0.02316420157828058,
"count": 363
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.1181153250237306,
"min": 3.1416401043316e-06,
"max": 0.12238245805104574,
"count": 363
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.1181153250237306,
"min": 3.1416401043316e-06,
"max": 0.12238245805104574,
"count": 363
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.12016588772336642,
"min": 1.2618198570635287e-05,
"max": 0.1245737537741661,
"count": 363
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.12016588772336642,
"min": 1.2618198570635287e-05,
"max": 0.1245737537741661,
"count": 363
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 363
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 363
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 363
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 363
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 363
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 363
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1676138001",
"python_version": "3.9.16 (main, Jan 11 2023, 16:05:54) \n[GCC 11.2.0]",
"command_line_arguments": "/home/css/anaconda3/envs/rl_course/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0",
"numpy_version": "1.21.2",
"end_time_seconds": "1676180193"
},
"total": 42191.540514424996,
"count": 1,
"self": 0.0036059089907212183,
"children": {
"run_training.setup": {
"total": 0.13660803800030408,
"count": 1,
"self": 0.13660803800030408
},
"TrainerController.start_learning": {
"total": 42191.400300478,
"count": 1,
"self": 7.256411541078705,
"children": {
"TrainerController._reset_env": {
"total": 11.555317614986052,
"count": 38,
"self": 11.555317614986052
},
"TrainerController.advance": {
"total": 42172.302790482936,
"count": 516667,
"self": 6.64878363115713,
"children": {
"env_step": {
"total": 40113.46188635869,
"count": 516667,
"self": 38643.268364980606,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1465.7256713260076,
"count": 516667,
"self": 38.07819719355757,
"children": {
"TorchPolicy.evaluate": {
"total": 1427.64747413245,
"count": 953520,
"self": 1427.64747413245
}
}
},
"workers": {
"total": 4.467850052074937,
"count": 516666,
"self": 0.0,
"children": {
"worker_root": {
"total": 42180.143281419565,
"count": 516666,
"is_parallel": true,
"self": 4400.139482353479,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020667460003096494,
"count": 2,
"is_parallel": true,
"self": 0.00036049200025445316,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017062540000551962,
"count": 8,
"is_parallel": true,
"self": 0.0017062540000551962
}
}
},
"UnityEnvironment.step": {
"total": 0.13621059900015098,
"count": 1,
"is_parallel": true,
"self": 0.00016826499995659105,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.002248804999908316,
"count": 1,
"is_parallel": true,
"self": 0.002248804999908316
},
"communicator.exchange": {
"total": 0.13196066500040615,
"count": 1,
"is_parallel": true,
"self": 0.13196066500040615
},
"steps_from_proto": {
"total": 0.001832863999879919,
"count": 2,
"is_parallel": true,
"self": 0.00026673299998947186,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015661309998904471,
"count": 8,
"is_parallel": true,
"self": 0.0015661309998904471
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 37779.93516791707,
"count": 516665,
"is_parallel": true,
"self": 73.66751974359795,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 658.355059391859,
"count": 516665,
"is_parallel": true,
"self": 658.355059391859
},
"communicator.exchange": {
"total": 36129.33761935332,
"count": 516665,
"is_parallel": true,
"self": 36129.33761935332
},
"steps_from_proto": {
"total": 918.5749694282913,
"count": 1033330,
"is_parallel": true,
"self": 123.89298991918076,
"children": {
"_process_rank_one_or_two_observation": {
"total": 794.6819795091105,
"count": 4133320,
"is_parallel": true,
"self": 794.6819795091105
}
}
}
}
},
"steps_from_proto": {
"total": 0.06863114901443623,
"count": 74,
"is_parallel": true,
"self": 0.009315816037997138,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.059315332976439095,
"count": 296,
"is_parallel": true,
"self": 0.059315332976439095
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 2052.1921204930877,
"count": 516666,
"self": 56.59559264291693,
"children": {
"process_trajectory": {
"total": 793.5043975362314,
"count": 516666,
"self": 789.4839403192309,
"children": {
"RLTrainer._checkpoint": {
"total": 4.020457217000512,
"count": 15,
"self": 4.020457217000512
}
}
},
"_update_policy": {
"total": 1202.0921303139394,
"count": 364,
"self": 863.772296696925,
"children": {
"TorchPOCAOptimizer.update": {
"total": 338.3198336170144,
"count": 10920,
"self": 338.3198336170144
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.010013748891652e-07,
"count": 1,
"self": 6.010013748891652e-07
},
"TrainerController._save_models": {
"total": 0.28578023800218944,
"count": 1,
"self": 0.0008242700059781782,
"children": {
"RLTrainer._checkpoint": {
"total": 0.28495596799621126,
"count": 1,
"self": 0.28495596799621126
}
}
}
}
}
}
}