edures's picture
First Push
7dfd6de
raw
history blame
20.2 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0560487508773804,
"min": 1.0546391010284424,
"max": 2.878401756286621,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 10141.236328125,
"min": 10141.236328125,
"max": 29572.69921875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.21324348449707,
"min": 0.28190872073173523,
"max": 12.21324348449707,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2381.58251953125,
"min": 54.69029235839844,
"max": 2483.07861328125,
"count": 20
},
"SnowballTarget.Policy.CuriosityValueEstimate.mean": {
"value": 3.719325304031372,
"min": 0.8654767274856567,
"max": 5.98353385925293,
"count": 20
},
"SnowballTarget.Policy.CuriosityValueEstimate.sum": {
"value": 725.2684326171875,
"min": 167.90248107910156,
"max": 1205.016845703125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06924930362533904,
"min": 0.06338950567627691,
"max": 0.07393685213504725,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.27699721450135617,
"min": 0.25355802270510763,
"max": 0.36968426067523624,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.11405135957779838,
"min": 0.11405135957779838,
"max": 0.17432669158659728,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.45620543831119353,
"min": 0.45620543831119353,
"max": 0.8716334579329864,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Losses.CuriosityForwardLoss.mean": {
"value": 0.033297970324900804,
"min": 0.033297970324900804,
"max": 0.11310116368272871,
"count": 20
},
"SnowballTarget.Losses.CuriosityForwardLoss.sum": {
"value": 0.13319188129960322,
"min": 0.13319188129960322,
"max": 0.45240465473091485,
"count": 20
},
"SnowballTarget.Losses.CuriosityInverseLoss.mean": {
"value": 0.9637458064392501,
"min": 0.9518208898750007,
"max": 2.789861294349619,
"count": 20
},
"SnowballTarget.Losses.CuriosityInverseLoss.sum": {
"value": 3.8549832257570005,
"min": 3.8549832257570005,
"max": 13.06172506014506,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 23.75,
"min": 2.5681818181818183,
"max": 24.15909090909091,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1045.0,
"min": 113.0,
"max": 1324.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 23.75,
"min": 2.5681818181818183,
"max": 24.15909090909091,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1045.0,
"min": 113.0,
"max": 1324.0,
"count": 20
},
"SnowballTarget.Policy.CuriosityReward.mean": {
"value": 6.748560233041644,
"min": 6.748560233041644,
"max": 14.70017956414006,
"count": 20
},
"SnowballTarget.Policy.CuriosityReward.sum": {
"value": 296.93665025383234,
"min": 296.93665025383234,
"max": 808.5098760277033,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1690619349",
"python_version": "3.10.6 (main, May 29 2023, 11:10:38) [GCC 11.3.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1690619956"
},
"total": 606.944480975,
"count": 1,
"self": 0.4383797349998986,
"children": {
"run_training.setup": {
"total": 0.05684811300011461,
"count": 1,
"self": 0.05684811300011461
},
"TrainerController.start_learning": {
"total": 606.449253127,
"count": 1,
"self": 0.7125750909988255,
"children": {
"TrainerController._reset_env": {
"total": 5.905509917000018,
"count": 1,
"self": 5.905509917000018
},
"TrainerController.advance": {
"total": 599.6454166860012,
"count": 18208,
"self": 0.37150189300677994,
"children": {
"env_step": {
"total": 599.2739147929944,
"count": 18208,
"self": 461.17990884699134,
"children": {
"SubprocessEnvManager._take_step": {
"total": 137.73083868699064,
"count": 18208,
"self": 2.117318586992269,
"children": {
"TorchPolicy.evaluate": {
"total": 135.61352009999837,
"count": 18208,
"self": 135.61352009999837
}
}
},
"workers": {
"total": 0.3631672590124708,
"count": 18208,
"self": 0.0,
"children": {
"worker_root": {
"total": 604.5033127649888,
"count": 18208,
"is_parallel": true,
"self": 312.414665197997,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006989888999896721,
"count": 1,
"is_parallel": true,
"self": 0.004549587000155952,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0024403019997407682,
"count": 10,
"is_parallel": true,
"self": 0.0024403019997407682
}
}
},
"UnityEnvironment.step": {
"total": 0.044610140000031606,
"count": 1,
"is_parallel": true,
"self": 0.0005343699997411022,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00042016499992314493,
"count": 1,
"is_parallel": true,
"self": 0.00042016499992314493
},
"communicator.exchange": {
"total": 0.04139244000020881,
"count": 1,
"is_parallel": true,
"self": 0.04139244000020881
},
"steps_from_proto": {
"total": 0.0022631650001585513,
"count": 1,
"is_parallel": true,
"self": 0.00043739700004152837,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001825768000117023,
"count": 10,
"is_parallel": true,
"self": 0.001825768000117023
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 292.08864756699177,
"count": 18207,
"is_parallel": true,
"self": 12.171518478975713,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 6.209938714005148,
"count": 18207,
"is_parallel": true,
"self": 6.209938714005148
},
"communicator.exchange": {
"total": 230.84534465901447,
"count": 18207,
"is_parallel": true,
"self": 230.84534465901447
},
"steps_from_proto": {
"total": 42.861845714996434,
"count": 18207,
"is_parallel": true,
"self": 8.34825417394677,
"children": {
"_process_rank_one_or_two_observation": {
"total": 34.513591541049664,
"count": 182070,
"is_parallel": true,
"self": 34.513591541049664
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00019768900006056356,
"count": 1,
"self": 0.00019768900006056356,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 595.750979182008,
"count": 447237,
"is_parallel": true,
"self": 11.112235243015675,
"children": {
"process_trajectory": {
"total": 312.65605925099067,
"count": 447237,
"is_parallel": true,
"self": 311.31322570999055,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3428335410001182,
"count": 4,
"is_parallel": true,
"self": 1.3428335410001182
}
}
},
"_update_policy": {
"total": 271.98268468800165,
"count": 90,
"is_parallel": true,
"self": 170.0724140550151,
"children": {
"TorchPPOOptimizer.update": {
"total": 101.91027063298657,
"count": 4587,
"is_parallel": true,
"self": 101.91027063298657
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.18555374399988978,
"count": 1,
"self": 0.0008983399998214736,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1846554040000683,
"count": 1,
"self": 0.1846554040000683
}
}
}
}
}
}
}