Pongsaky's picture
First push
5355769
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5671671032905579,
"min": 0.5671671032905579,
"max": 1.4436455965042114,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 17033.162109375,
"min": 17033.162109375,
"max": 43794.43359375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989963.0,
"min": 29952.0,
"max": 989963.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989963.0,
"min": 29952.0,
"max": 989963.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5754985213279724,
"min": -0.10258052498102188,
"max": 0.5824079513549805,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 162.29058837890625,
"min": -24.927066802978516,
"max": 165.4038543701172,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.06394016742706299,
"min": -0.06394016742706299,
"max": 0.35053372383117676,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -18.0311279296875,
"min": -18.0311279296875,
"max": 83.07649230957031,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0702031629250592,
"min": 0.06619880732753138,
"max": 0.07339590215165767,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.053047443875888,
"min": 0.5137713150616037,
"max": 1.053047443875888,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01794834718683786,
"min": 0.0002900829543136433,
"max": 0.019909722254461876,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2692252078025679,
"min": 0.004061161360391006,
"max": 0.27873611156246625,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.419597526833336e-06,
"min": 7.419597526833336e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011129396290250004,
"min": 0.00011129396290250004,
"max": 0.0036331330889556998,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10247316666666668,
"min": 0.10247316666666668,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5370975000000002,
"min": 1.3886848,
"max": 2.6110442999999997,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025706935,
"min": 0.00025706935,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0038560402500000006,
"min": 0.0038560402500000006,
"max": 0.12112332557000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010825072415173054,
"min": 0.01015456486493349,
"max": 0.3324744999408722,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.16237609088420868,
"min": 0.14216390252113342,
"max": 2.3273215293884277,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 321.4719101123595,
"min": 309.5,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28611.0,
"min": 15984.0,
"max": 32668.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6560449313079373,
"min": -1.0000000521540642,
"max": 1.6616417439756812,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 147.38799888640642,
"min": -30.995201610028744,
"max": 165.04899854958057,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6560449313079373,
"min": -1.0000000521540642,
"max": 1.6616417439756812,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 147.38799888640642,
"min": -30.995201610028744,
"max": 165.04899854958057,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03634270197432946,
"min": 0.032868223816767565,
"max": 6.83444485347718,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.234500475715322,
"min": 3.131070501056456,
"max": 109.35111765563488,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1703517003",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1703519231"
},
"total": 2228.0305342889997,
"count": 1,
"self": 0.6534944110007928,
"children": {
"run_training.setup": {
"total": 0.0563965699993787,
"count": 1,
"self": 0.0563965699993787
},
"TrainerController.start_learning": {
"total": 2227.3206433079995,
"count": 1,
"self": 2.1007193750056103,
"children": {
"TrainerController._reset_env": {
"total": 2.205991990000257,
"count": 1,
"self": 2.205991990000257
},
"TrainerController.advance": {
"total": 2222.956130341994,
"count": 63861,
"self": 2.0510763020829472,
"children": {
"env_step": {
"total": 1509.6926524459295,
"count": 63861,
"self": 1382.3562625148434,
"children": {
"SubprocessEnvManager._take_step": {
"total": 126.10974500001703,
"count": 63861,
"self": 4.8086407979417345,
"children": {
"TorchPolicy.evaluate": {
"total": 121.3011042020753,
"count": 62554,
"self": 121.3011042020753
}
}
},
"workers": {
"total": 1.2266449310691314,
"count": 63861,
"self": 0.0,
"children": {
"worker_root": {
"total": 2223.068679779074,
"count": 63861,
"is_parallel": true,
"self": 970.4035015570416,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0013806999995722435,
"count": 1,
"is_parallel": true,
"self": 0.00041244999829359585,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0009682500012786477,
"count": 8,
"is_parallel": true,
"self": 0.0009682500012786477
}
}
},
"UnityEnvironment.step": {
"total": 0.04531978999966668,
"count": 1,
"is_parallel": true,
"self": 0.0005633099990518531,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00026123000043298816,
"count": 1,
"is_parallel": true,
"self": 0.00026123000043298816
},
"communicator.exchange": {
"total": 0.04308442000001378,
"count": 1,
"is_parallel": true,
"self": 0.04308442000001378
},
"steps_from_proto": {
"total": 0.0014108300001680618,
"count": 1,
"is_parallel": true,
"self": 0.0002990000002682791,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011118299998997827,
"count": 8,
"is_parallel": true,
"self": 0.0011118299998997827
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1252.6651782220324,
"count": 63860,
"is_parallel": true,
"self": 34.92733187217982,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 19.586493822932425,
"count": 63860,
"is_parallel": true,
"self": 19.586493822932425
},
"communicator.exchange": {
"total": 1101.2546276129988,
"count": 63860,
"is_parallel": true,
"self": 1101.2546276129988
},
"steps_from_proto": {
"total": 96.89672491392139,
"count": 63860,
"is_parallel": true,
"self": 20.76002368598529,
"children": {
"_process_rank_one_or_two_observation": {
"total": 76.1367012279361,
"count": 510880,
"is_parallel": true,
"self": 76.1367012279361
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 711.2124015939817,
"count": 63861,
"self": 3.68536197679623,
"children": {
"process_trajectory": {
"total": 117.70001922418123,
"count": 63861,
"self": 117.57851627418222,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12150294999901234,
"count": 2,
"self": 0.12150294999901234
}
}
},
"_update_policy": {
"total": 589.8270203930042,
"count": 453,
"self": 240.60977506203744,
"children": {
"TorchPPOOptimizer.update": {
"total": 349.21724533096676,
"count": 22809,
"self": 349.21724533096676
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.599998520570807e-07,
"count": 1,
"self": 9.599998520570807e-07
},
"TrainerController._save_models": {
"total": 0.05780064099963056,
"count": 1,
"self": 0.001257710999198025,
"children": {
"RLTrainer._checkpoint": {
"total": 0.056542930000432534,
"count": 1,
"self": 0.056542930000432534
}
}
}
}
}
}
}