H3lt3r-Sk3lt3r's picture
iDUMB
ef777cf verified
raw
history blame
16 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.8245843648910522,
"min": 1.6784312725067139,
"max": 2.265312433242798,
"count": 500
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 34389.765625,
"min": 29133.91015625,
"max": 49488.48046875,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 89.12962962962963,
"min": 39.88709677419355,
"max": 112.27272727272727,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19252.0,
"min": 18112.0,
"max": 21080.0,
"count": 500
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1559.1608573381989,
"min": 1433.2700543149463,
"max": 1596.7947189480517,
"count": 500
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 168389.37259252547,
"min": 132379.53545709304,
"max": 368037.8036492519,
"count": 500
},
"SoccerTwos.Step.mean": {
"value": 9999956.0,
"min": 5009949.0,
"max": 9999956.0,
"count": 500
},
"SoccerTwos.Step.sum": {
"value": 9999956.0,
"min": 5009949.0,
"max": 9999956.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.06259040534496307,
"min": -0.149006649851799,
"max": 0.0909595862030983,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 6.759763717651367,
"min": -21.009937286376953,
"max": 14.35448932647705,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.06400468200445175,
"min": -0.15823207795619965,
"max": 0.09040261805057526,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 6.912505626678467,
"min": -22.31072235107422,
"max": 15.123698234558105,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.05145184971668102,
"min": -0.4720098376274109,
"max": 0.3441641035242977,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -5.55679976940155,
"min": -58.468000054359436,
"max": 40.542799949645996,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.05145184971668102,
"min": -0.4720098376274109,
"max": 0.3441641035242977,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -5.55679976940155,
"min": -58.468000054359436,
"max": 40.542799949645996,
"count": 500
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.013097892559987183,
"min": 0.010496539979552228,
"max": 0.025193315698804022,
"count": 242
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.013097892559987183,
"min": 0.010496539979552228,
"max": 0.025193315698804022,
"count": 242
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.07199831406275431,
"min": 0.06497642882168293,
"max": 0.12120228086908659,
"count": 242
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.07199831406275431,
"min": 0.06497642882168293,
"max": 0.12120228086908659,
"count": 242
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.07323341642816862,
"min": 0.06622683145105838,
"max": 0.12384758914510409,
"count": 242
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.07323341642816862,
"min": 0.06622683145105838,
"max": 0.12384758914510409,
"count": 242
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 242
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 242
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 242
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 242
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 242
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 242
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1717661176",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\wte42\\miniconda3\\envs\\rl\\Scripts\\mlagents-learn ./results/SoccerTwos/configuration.yaml --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1717699155"
},
"total": 37978.191293700016,
"count": 1,
"self": 1.7217363999807276,
"children": {
"run_training.setup": {
"total": 0.1701382999890484,
"count": 1,
"self": 0.1701382999890484
},
"TrainerController.start_learning": {
"total": 37976.299419000046,
"count": 1,
"self": 13.491651406046003,
"children": {
"TrainerController._reset_env": {
"total": 12.97793160006404,
"count": 26,
"self": 12.97793160006404
},
"TrainerController.advance": {
"total": 37949.59294379392,
"count": 343208,
"self": 12.36389017739566,
"children": {
"env_step": {
"total": 12556.275907405827,
"count": 343208,
"self": 9485.117376446258,
"children": {
"SubprocessEnvManager._take_step": {
"total": 3062.482253482158,
"count": 343208,
"self": 79.84183619933901,
"children": {
"TorchPolicy.evaluate": {
"total": 2982.640417282819,
"count": 627882,
"self": 2982.640417282819
}
}
},
"workers": {
"total": 8.676277477410622,
"count": 343208,
"self": 0.0,
"children": {
"worker_root": {
"total": 37948.99914501875,
"count": 343208,
"is_parallel": true,
"self": 30425.611817030294,
"children": {
"steps_from_proto": {
"total": 0.09982249996392056,
"count": 52,
"is_parallel": true,
"self": 0.018828099709935486,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.08099440025398508,
"count": 208,
"is_parallel": true,
"self": 0.08099440025398508
}
}
},
"UnityEnvironment.step": {
"total": 7523.2875054884935,
"count": 343208,
"is_parallel": true,
"self": 362.36901493981713,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 395.00273852143437,
"count": 343208,
"is_parallel": true,
"self": 395.00273852143437
},
"communicator.exchange": {
"total": 5345.40010620479,
"count": 343208,
"is_parallel": true,
"self": 5345.40010620479
},
"steps_from_proto": {
"total": 1420.515645822452,
"count": 686416,
"is_parallel": true,
"self": 243.84451605426148,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1176.6711297681904,
"count": 2745664,
"is_parallel": true,
"self": 1176.6711297681904
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 25380.953146210697,
"count": 343208,
"self": 95.99636962643126,
"children": {
"process_trajectory": {
"total": 3647.4665173835238,
"count": 343208,
"self": 3645.0027465834864,
"children": {
"RLTrainer._checkpoint": {
"total": 2.4637708000373095,
"count": 10,
"self": 2.4637708000373095
}
}
},
"_update_policy": {
"total": 21637.490259200742,
"count": 242,
"self": 1779.7197898029117,
"children": {
"TorchPOCAOptimizer.update": {
"total": 19857.77046939783,
"count": 7260,
"self": 19857.77046939783
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.9000144675374031e-06,
"count": 1,
"self": 1.9000144675374031e-06
},
"TrainerController._save_models": {
"total": 0.23689030000241473,
"count": 1,
"self": 0.01592880004318431,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22096149995923042,
"count": 1,
"self": 0.22096149995923042
}
}
}
}
}
}
}