bobobert4's picture
Curiosity reward signal enabled
9e832ad
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.7823862433433533,
"min": 0.7754074335098267,
"max": 2.87212872505188,
"count": 40
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 6962.455078125,
"min": 6900.3505859375,
"max": 31340.669921875,
"count": 40
},
"SnowballTarget.Step.mean": {
"value": 399928.0,
"min": 9952.0,
"max": 399928.0,
"count": 40
},
"SnowballTarget.Step.sum": {
"value": 399928.0,
"min": 9952.0,
"max": 399928.0,
"count": 40
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.034942626953125,
"min": 0.45372357964515686,
"max": 12.034942626953125,
"count": 40
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 1733.03173828125,
"min": 65.33619689941406,
"max": 1863.9541015625,
"count": 40
},
"SnowballTarget.Policy.CuriosityValueEstimate.mean": {
"value": 0.705839991569519,
"min": 0.15340012311935425,
"max": 1.2888423204421997,
"count": 40
},
"SnowballTarget.Policy.CuriosityValueEstimate.sum": {
"value": 101.64096069335938,
"min": 22.089616775512695,
"max": 197.5901641845703,
"count": 40
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 40
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 40
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06742327443161604,
"min": 0.0635198512355975,
"max": 0.07569230777824379,
"count": 40
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.26969309772646416,
"min": 0.25407940494239,
"max": 0.3784615388912189,
"count": 40
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.0851801988285254,
"min": 0.05186068983508877,
"max": 0.10859993385041461,
"count": 40
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.3407207953141016,
"min": 0.20744275934035508,
"max": 0.5272768591256702,
"count": 40
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 3.825098724999996e-06,
"min": 3.825098724999996e-06,
"max": 0.000295941001353,
"count": 40
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 1.5300394899999985e-05,
"min": 1.5300394899999985e-05,
"max": 0.00144225001925,
"count": 40
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10191249999999999,
"min": 0.10191249999999999,
"max": 0.2479705,
"count": 40
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.40764999999999996,
"min": 0.40764999999999996,
"max": 1.221125,
"count": 40
},
"SnowballTarget.Policy.Beta.mean": {
"value": 7.362249999999992e-05,
"min": 7.362249999999992e-05,
"max": 0.0049324853,
"count": 40
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0002944899999999997,
"min": 0.0002944899999999997,
"max": 0.024039425,
"count": 40
},
"SnowballTarget.Losses.CuriosityForwardLoss.mean": {
"value": 0.04188172720810946,
"min": 0.033935607860193534,
"max": 0.08911535578560742,
"count": 40
},
"SnowballTarget.Losses.CuriosityForwardLoss.sum": {
"value": 0.16752690883243784,
"min": 0.13837627082624857,
"max": 0.3564614231424297,
"count": 40
},
"SnowballTarget.Losses.CuriosityInverseLoss.mean": {
"value": 0.6003185132847113,
"min": 0.5939198171391207,
"max": 2.693057048539905,
"count": 40
},
"SnowballTarget.Losses.CuriosityInverseLoss.sum": {
"value": 2.4012740531388452,
"min": 2.4012740531388452,
"max": 11.969808614955229,
"count": 40
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.931818181818183,
"min": 3.25,
"max": 26.204545454545453,
"count": 40
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1141.0,
"min": 143.0,
"max": 1441.0,
"count": 40
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 23.338635753501546,
"min": 2.9249998954209415,
"max": 23.584090276197955,
"count": 40
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1026.899973154068,
"min": 128.69999539852142,
"max": 1296.8999617099762,
"count": 40
},
"SnowballTarget.Policy.CuriosityReward.mean": {
"value": 1.5325787596997211,
"min": 1.4327925314618783,
"max": 2.6246763768859886,
"count": 40
},
"SnowballTarget.Policy.CuriosityReward.sum": {
"value": 67.43346542678773,
"min": 63.73716417513788,
"max": 144.35720072872937,
"count": 40
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1689921905",
"python_version": "3.10.6 (main, May 29 2023, 11:10:38) [GCC 11.3.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./ppo_snowballtarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTargetCurious --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1689923075"
},
"total": 1170.717425587999,
"count": 1,
"self": 0.42497604499931185,
"children": {
"run_training.setup": {
"total": 0.03128317899972899,
"count": 1,
"self": 0.03128317899972899
},
"TrainerController.start_learning": {
"total": 1170.261166364,
"count": 1,
"self": 1.0667345250103608,
"children": {
"TrainerController._reset_env": {
"total": 4.5801261289998365,
"count": 1,
"self": 4.5801261289998365
},
"TrainerController.advance": {
"total": 1164.4760683689901,
"count": 36412,
"self": 0.5050375901291773,
"children": {
"env_step": {
"total": 1163.971030778861,
"count": 36412,
"self": 873.7409455367024,
"children": {
"SubprocessEnvManager._take_step": {
"total": 289.68929279413896,
"count": 36412,
"self": 3.2346155361719866,
"children": {
"TorchPolicy.evaluate": {
"total": 286.454677257967,
"count": 36412,
"self": 286.454677257967
}
}
},
"workers": {
"total": 0.5407924480196016,
"count": 36412,
"self": 0.0,
"children": {
"worker_root": {
"total": 1167.2430328119744,
"count": 36412,
"is_parallel": true,
"self": 681.4198886719532,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018927660003100755,
"count": 1,
"is_parallel": true,
"self": 0.000562683999305591,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013300820010044845,
"count": 10,
"is_parallel": true,
"self": 0.0013300820010044845
}
}
},
"UnityEnvironment.step": {
"total": 0.06856268100000307,
"count": 1,
"is_parallel": true,
"self": 0.0005964209994999692,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003943410001738812,
"count": 1,
"is_parallel": true,
"self": 0.0003943410001738812
},
"communicator.exchange": {
"total": 0.06546917000014219,
"count": 1,
"is_parallel": true,
"self": 0.06546917000014219
},
"steps_from_proto": {
"total": 0.002102749000187032,
"count": 1,
"is_parallel": true,
"self": 0.0005411560005086358,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015615929996783962,
"count": 10,
"is_parallel": true,
"self": 0.0015615929996783962
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 485.8231441400212,
"count": 36411,
"is_parallel": true,
"self": 20.639413553023587,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 10.078052166051748,
"count": 36411,
"is_parallel": true,
"self": 10.078052166051748
},
"communicator.exchange": {
"total": 387.16746825984774,
"count": 36411,
"is_parallel": true,
"self": 387.16746825984774
},
"steps_from_proto": {
"total": 67.93821016109814,
"count": 36411,
"is_parallel": true,
"self": 12.569071423020432,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.36913873807771,
"count": 364110,
"is_parallel": true,
"self": 55.36913873807771
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00020994999977119733,
"count": 1,
"self": 0.00020994999977119733,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 1165.3188830539748,
"count": 22648,
"is_parallel": true,
"self": 0.5316522920466014,
"children": {
"process_trajectory": {
"total": 165.31442269693162,
"count": 22648,
"is_parallel": true,
"self": 155.09521240393315,
"children": {
"RLTrainer._checkpoint": {
"total": 10.219210292998469,
"count": 40,
"is_parallel": true,
"self": 10.219210292998469
}
}
},
"_update_policy": {
"total": 999.4728080649966,
"count": 181,
"is_parallel": true,
"self": 622.892439668005,
"children": {
"TorchPPOOptimizer.update": {
"total": 376.5803683969916,
"count": 15380,
"is_parallel": true,
"self": 376.5803683969916
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.13802739099992323,
"count": 1,
"self": 0.0007361719999607885,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13729121899996244,
"count": 1,
"self": 0.13729121899996244
}
}
}
}
}
}
}