neatbullshit's picture
First training of SnowballTarget
6175d24
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.6006110906600952,
"min": 1.6006110906600952,
"max": 2.8590080738067627,
"count": 10
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 16462.28515625,
"min": 16048.103515625,
"max": 29247.65234375,
"count": 10
},
"SnowballTarget.Step.mean": {
"value": 99960.0,
"min": 9952.0,
"max": 99960.0,
"count": 10
},
"SnowballTarget.Step.sum": {
"value": 99960.0,
"min": 9952.0,
"max": 99960.0,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 8.503311157226562,
"min": 0.37480053305625916,
"max": 8.503311157226562,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 1734.675537109375,
"min": 72.7113037109375,
"max": 1734.675537109375,
"count": 10
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 10
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 10
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07058133363767125,
"min": 0.06533432319074385,
"max": 0.07365583580166477,
"count": 10
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.35290666818835625,
"min": 0.2613372927629754,
"max": 0.36049635989326695,
"count": 10
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.256859752477384,
"min": 0.11705705442342579,
"max": 0.2931802852773199,
"count": 10
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 1.2842987623869202,
"min": 0.46822821769370315,
"max": 1.461701294370726,
"count": 10
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.6464094511999996e-05,
"min": 1.6464094511999996e-05,
"max": 0.000283764005412,
"count": 10
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 8.232047255999999e-05,
"min": 8.232047255999999e-05,
"max": 0.00127032007656,
"count": 10
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.105488,
"min": 0.105488,
"max": 0.194588,
"count": 10
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.52744,
"min": 0.4615520000000001,
"max": 0.92344,
"count": 10
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0002838512,
"min": 0.0002838512,
"max": 0.0047299412,
"count": 10
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.001419256,
"min": 0.001419256,
"max": 0.021179656,
"count": 10
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 18.01818181818182,
"min": 3.4545454545454546,
"max": 18.113636363636363,
"count": 10
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 991.0,
"min": 152.0,
"max": 991.0,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 17.837999937751075,
"min": 3.420000032945113,
"max": 17.932499939745124,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 981.0899965763092,
"min": 150.48000144958496,
"max": 981.0899965763092,
"count": 10
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678405569",
"python_version": "3.8.16 (default, Mar 2 2023, 03:21:46) \n[GCC 11.2.0]",
"command_line_arguments": "/home/ikari/miniconda3/envs/unity/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.23.5",
"end_time_seconds": "1678406269"
},
"total": 700.7034086575732,
"count": 1,
"self": 0.2702407883480191,
"children": {
"run_training.setup": {
"total": 0.01100737601518631,
"count": 1,
"self": 0.01100737601518631
},
"TrainerController.start_learning": {
"total": 700.42216049321,
"count": 1,
"self": 0.23413055948913097,
"children": {
"TrainerController._reset_env": {
"total": 2.672250861302018,
"count": 1,
"self": 2.672250861302018
},
"TrainerController.advance": {
"total": 697.4390443023294,
"count": 9133,
"self": 0.11070850305259228,
"children": {
"env_step": {
"total": 697.3283357992768,
"count": 9133,
"self": 659.1052371319383,
"children": {
"SubprocessEnvManager._take_step": {
"total": 38.11382957268506,
"count": 9133,
"self": 0.5173566238954663,
"children": {
"TorchPolicy.evaluate": {
"total": 37.5964729487896,
"count": 9133,
"self": 11.23597602546215,
"children": {
"TorchPolicy.sample_actions": {
"total": 26.360496923327446,
"count": 9133,
"self": 26.360496923327446
}
}
}
}
},
"workers": {
"total": 0.1092690946534276,
"count": 9133,
"self": 0.0,
"children": {
"worker_root": {
"total": 700.063291747123,
"count": 9133,
"is_parallel": true,
"self": 56.8271007174626,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002307421527802944,
"count": 1,
"is_parallel": true,
"self": 0.0006025554612278938,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017048660665750504,
"count": 10,
"is_parallel": true,
"self": 0.0017048660665750504
}
}
},
"UnityEnvironment.step": {
"total": 0.10010497365146875,
"count": 1,
"is_parallel": true,
"self": 0.00017625000327825546,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.001834200695157051,
"count": 1,
"is_parallel": true,
"self": 0.001834200695157051
},
"communicator.exchange": {
"total": 0.09669351670891047,
"count": 1,
"is_parallel": true,
"self": 0.09669351670891047
},
"steps_from_proto": {
"total": 0.001401006244122982,
"count": 1,
"is_parallel": true,
"self": 0.0002723298966884613,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011286763474345207,
"count": 10,
"is_parallel": true,
"self": 0.0011286763474345207
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 643.2361910296604,
"count": 9132,
"is_parallel": true,
"self": 1.4528268156573176,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.059284785762429,
"count": 9132,
"is_parallel": true,
"self": 7.059284785762429
},
"communicator.exchange": {
"total": 619.2320438921452,
"count": 9132,
"is_parallel": true,
"self": 619.2320438921452
},
"steps_from_proto": {
"total": 15.4920355360955,
"count": 9132,
"is_parallel": true,
"self": 2.536272614262998,
"children": {
"_process_rank_one_or_two_observation": {
"total": 12.955762921832502,
"count": 91320,
"is_parallel": true,
"self": 12.955762921832502
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.66367569565773e-05,
"count": 1,
"self": 2.66367569565773e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 689.2931618690491,
"count": 1153995,
"is_parallel": true,
"self": 20.071513460949063,
"children": {
"process_trajectory": {
"total": 500.72718169074506,
"count": 1153995,
"is_parallel": true,
"self": 500.2185857957229,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5085958950221539,
"count": 2,
"is_parallel": true,
"self": 0.5085958950221539
}
}
},
"_update_policy": {
"total": 168.49446671735495,
"count": 45,
"is_parallel": true,
"self": 14.908211816102266,
"children": {
"TorchPPOOptimizer.update": {
"total": 153.5862549012527,
"count": 2292,
"is_parallel": true,
"self": 153.5862549012527
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.07670813333243132,
"count": 1,
"self": 0.0007420452311635017,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07596608810126781,
"count": 1,
"self": 0.07596608810126781
}
}
}
}
}
}
}