MaitreHibou's picture
First Push
8f00982
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 2.7357916831970215,
"min": 2.7357916831970215,
"max": 2.8492350578308105,
"count": 2
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 28227.8984375,
"min": 28227.8984375,
"max": 29429.75,
"count": 2
},
"SnowballTarget.Step.mean": {
"value": 19992.0,
"min": 9952.0,
"max": 19992.0,
"count": 2
},
"SnowballTarget.Step.sum": {
"value": 19992.0,
"min": 9952.0,
"max": 19992.0,
"count": 2
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 1.4086668491363525,
"min": 0.3880881071090698,
"max": 1.4086668491363525,
"count": 2
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 288.7767028808594,
"min": 75.28909301757812,
"max": 288.7767028808594,
"count": 2
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 2
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 2
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0726243893754002,
"min": 0.06779168705747461,
"max": 0.0726243893754002,
"count": 2
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.363121946877001,
"min": 0.27116674822989845,
"max": 0.363121946877001,
"count": 2
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.18778798019184786,
"min": 0.1503324258282744,
"max": 0.18778798019184786,
"count": 2
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.9389399009592393,
"min": 0.6013297033130977,
"max": 0.9389399009592393,
"count": 2
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 9.376007656000002e-05,
"min": 9.376007656000002e-05,
"max": 0.00029176002706000003,
"count": 2
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 0.0004688003828000001,
"min": 0.0004688003828000001,
"max": 0.0011670401082400001,
"count": 2
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.12344000000000002,
"min": 0.12344000000000002,
"max": 0.17294,
"count": 2
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.6172000000000001,
"min": 0.6172000000000001,
"max": 0.69176,
"count": 2
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0011796560000000003,
"min": 0.0011796560000000003,
"max": 0.003649706,
"count": 2
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.005898280000000001,
"min": 0.005898280000000001,
"max": 0.014598824,
"count": 2
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 6.236363636363636,
"min": 3.3636363636363638,
"max": 6.236363636363636,
"count": 2
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 343.0,
"min": 148.0,
"max": 343.0,
"count": 2
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 6.236363636363636,
"min": 3.3636363636363638,
"max": 6.236363636363636,
"count": 2
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 343.0,
"min": 148.0,
"max": 343.0,
"count": 2
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 2
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 2
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1689037043",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1689037100"
},
"total": 56.40853439699998,
"count": 1,
"self": 0.4486982919999605,
"children": {
"run_training.setup": {
"total": 0.0429481360000068,
"count": 1,
"self": 0.0429481360000068
},
"TrainerController.start_learning": {
"total": 55.916887969000015,
"count": 1,
"self": 0.09420053700074504,
"children": {
"TrainerController._reset_env": {
"total": 4.112518426000008,
"count": 1,
"self": 4.112518426000008
},
"TrainerController.advance": {
"total": 51.3415636959993,
"count": 1889,
"self": 0.03251706099962348,
"children": {
"env_step": {
"total": 51.309046634999675,
"count": 1889,
"self": 36.27657740800083,
"children": {
"SubprocessEnvManager._take_step": {
"total": 15.006151975998023,
"count": 1889,
"self": 0.16561176900057717,
"children": {
"TorchPolicy.evaluate": {
"total": 14.840540206997446,
"count": 1889,
"self": 14.840540206997446
}
}
},
"workers": {
"total": 0.026317251000818942,
"count": 1889,
"self": 0.0,
"children": {
"worker_root": {
"total": 55.32565411799726,
"count": 1889,
"is_parallel": true,
"self": 29.76586097599818,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006152908999979445,
"count": 1,
"is_parallel": true,
"self": 0.004683542000009311,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014693669999701342,
"count": 10,
"is_parallel": true,
"self": 0.0014693669999701342
}
}
},
"UnityEnvironment.step": {
"total": 0.04464788900003214,
"count": 1,
"is_parallel": true,
"self": 0.0004196579999984351,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004304519999891454,
"count": 1,
"is_parallel": true,
"self": 0.0004304519999891454
},
"communicator.exchange": {
"total": 0.042066221000027326,
"count": 1,
"is_parallel": true,
"self": 0.042066221000027326
},
"steps_from_proto": {
"total": 0.0017315580000172304,
"count": 1,
"is_parallel": true,
"self": 0.0003441659998770774,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001387392000140153,
"count": 10,
"is_parallel": true,
"self": 0.001387392000140153
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 25.55979314199908,
"count": 1888,
"is_parallel": true,
"self": 1.1021282649982709,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.5411639280001168,
"count": 1888,
"is_parallel": true,
"self": 0.5411639280001168
},
"communicator.exchange": {
"total": 20.206473971000435,
"count": 1888,
"is_parallel": true,
"self": 20.206473971000435
},
"steps_from_proto": {
"total": 3.7100269780002577,
"count": 1888,
"is_parallel": true,
"self": 0.6820784209991189,
"children": {
"_process_rank_one_or_two_observation": {
"total": 3.027948557001139,
"count": 18880,
"is_parallel": true,
"self": 3.027948557001139
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00026426400000900685,
"count": 1,
"self": 0.00026426400000900685,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 51.080616559999896,
"count": 38720,
"is_parallel": true,
"self": 0.8460359980052203,
"children": {
"process_trajectory": {
"total": 30.809546705994705,
"count": 38720,
"is_parallel": true,
"self": 27.443558514994777,
"children": {
"RLTrainer._checkpoint": {
"total": 3.3659881909999285,
"count": 4,
"is_parallel": true,
"self": 3.3659881909999285
}
}
},
"_update_policy": {
"total": 19.42503385599997,
"count": 9,
"is_parallel": true,
"self": 7.394169554999053,
"children": {
"TorchPPOOptimizer.update": {
"total": 12.030864301000918,
"count": 456,
"is_parallel": true,
"self": 12.030864301000918
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.368341045999955,
"count": 1,
"self": 0.004682722999973521,
"children": {
"RLTrainer._checkpoint": {
"total": 0.36365832299998146,
"count": 1,
"self": 0.36365832299998146
}
}
}
}
}
}
}