SamSJackson's picture
First push
9e645cd
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9193317294120789,
"min": 0.9193317294120789,
"max": 2.8702778816223145,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8777.779296875,
"min": 8777.779296875,
"max": 29426.08984375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.949409484863281,
"min": 0.11097344011068344,
"max": 12.949409484863281,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2525.134765625,
"min": 21.528846740722656,
"max": 2612.91259765625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06591802781693724,
"min": 0.06052903096344523,
"max": 0.07133102808569959,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.263672111267749,
"min": 0.2461917031757598,
"max": 0.35665514042849794,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19371654085961043,
"min": 0.11831303979681038,
"max": 0.29205476835954425,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7748661634384417,
"min": 0.4732521591872415,
"max": 1.4152983374455395,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.727272727272727,
"min": 3.1363636363636362,
"max": 25.727272727272727,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1132.0,
"min": 138.0,
"max": 1405.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.727272727272727,
"min": 3.1363636363636362,
"max": 25.727272727272727,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1132.0,
"min": 138.0,
"max": 1405.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1703634378",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1703634861"
},
"total": 482.94253622299993,
"count": 1,
"self": 0.44431149799993364,
"children": {
"run_training.setup": {
"total": 0.05442209099999218,
"count": 1,
"self": 0.05442209099999218
},
"TrainerController.start_learning": {
"total": 482.443802634,
"count": 1,
"self": 0.6914505560099542,
"children": {
"TrainerController._reset_env": {
"total": 3.080748420999953,
"count": 1,
"self": 3.080748420999953
},
"TrainerController.advance": {
"total": 478.57740323599,
"count": 18199,
"self": 0.3118793959774848,
"children": {
"env_step": {
"total": 478.2655238400125,
"count": 18199,
"self": 316.3856931640156,
"children": {
"SubprocessEnvManager._take_step": {
"total": 161.55289624900342,
"count": 18199,
"self": 1.605224268988536,
"children": {
"TorchPolicy.evaluate": {
"total": 159.9476719800149,
"count": 18199,
"self": 159.9476719800149
}
}
},
"workers": {
"total": 0.32693442699348907,
"count": 18199,
"self": 0.0,
"children": {
"worker_root": {
"total": 480.9515119330134,
"count": 18199,
"is_parallel": true,
"self": 236.60706734201472,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005116841000017303,
"count": 1,
"is_parallel": true,
"self": 0.0036608379999734098,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014560030000438928,
"count": 10,
"is_parallel": true,
"self": 0.0014560030000438928
}
}
},
"UnityEnvironment.step": {
"total": 0.04348936400003822,
"count": 1,
"is_parallel": true,
"self": 0.0006430930000078661,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004580790000545676,
"count": 1,
"is_parallel": true,
"self": 0.0004580790000545676
},
"communicator.exchange": {
"total": 0.040385776999983136,
"count": 1,
"is_parallel": true,
"self": 0.040385776999983136
},
"steps_from_proto": {
"total": 0.002002414999992652,
"count": 1,
"is_parallel": true,
"self": 0.00039096599982713087,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016114490001655213,
"count": 10,
"is_parallel": true,
"self": 0.0016114490001655213
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 244.3444445909987,
"count": 18198,
"is_parallel": true,
"self": 11.447912406979526,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.964171766006984,
"count": 18198,
"is_parallel": true,
"self": 5.964171766006984
},
"communicator.exchange": {
"total": 189.57908447201066,
"count": 18198,
"is_parallel": true,
"self": 189.57908447201066
},
"steps_from_proto": {
"total": 37.35327594600153,
"count": 18198,
"is_parallel": true,
"self": 7.044188516018039,
"children": {
"_process_rank_one_or_two_observation": {
"total": 30.309087429983492,
"count": 181980,
"is_parallel": true,
"self": 30.309087429983492
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00021324500016817183,
"count": 1,
"self": 0.00021324500016817183,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 472.34916667499454,
"count": 721259,
"is_parallel": true,
"self": 15.672548802988558,
"children": {
"process_trajectory": {
"total": 261.9489423300064,
"count": 721259,
"is_parallel": true,
"self": 261.12825342900646,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8206889009999259,
"count": 4,
"is_parallel": true,
"self": 0.8206889009999259
}
}
},
"_update_policy": {
"total": 194.7276755419996,
"count": 90,
"is_parallel": true,
"self": 60.96993833299473,
"children": {
"TorchPPOOptimizer.update": {
"total": 133.75773720900486,
"count": 4584,
"is_parallel": true,
"self": 133.75773720900486
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.09398717599992779,
"count": 1,
"self": 0.0009398739998687233,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09304730200005906,
"count": 1,
"self": 0.09304730200005906
}
}
}
}
}
}
}