Cihangirus's picture
Basis Commit
df155de verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.17482934892177582,
"min": 0.17420333623886108,
"max": 1.5982792377471924,
"count": 84
},
"Pyramids.Policy.Entropy.sum": {
"value": 5239.2861328125,
"min": 5189.86572265625,
"max": 38346.4296875,
"count": 84
},
"Pyramids.Step.mean": {
"value": 2999957.0,
"min": 509925.0,
"max": 2999957.0,
"count": 84
},
"Pyramids.Step.sum": {
"value": 2999957.0,
"min": 509925.0,
"max": 2999957.0,
"count": 84
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7689042091369629,
"min": -0.14964228868484497,
"max": 0.8614444136619568,
"count": 84
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 234.51577758789062,
"min": -35.70473861694336,
"max": 264.46343994140625,
"count": 84
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.0020513564813882113,
"min": -0.03093297779560089,
"max": 0.41726115345954895,
"count": 84
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -0.6256637573242188,
"min": -7.856976509094238,
"max": 100.1426773071289,
"count": 84
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06637767469898488,
"min": 0.06235508583162161,
"max": 0.07530194558322742,
"count": 84
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9956651204847732,
"min": 0.1367307250426772,
"max": 1.0809680807481832,
"count": 84
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.016830076922714297,
"min": 0.00013697546329992298,
"max": 0.017540914127104252,
"count": 84
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2524511538407144,
"min": 0.0017806810228989988,
"max": 0.25283179609601464,
"count": 84
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.4847728384422218e-06,
"min": 1.4847728384422218e-06,
"max": 0.0002493787168737667,
"count": 84
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.227159257663333e-05,
"min": 2.227159257663333e-05,
"max": 0.0030872145709286004,
"count": 84
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10049489111111111,
"min": 0.10049489111111111,
"max": 0.18312623333333333,
"count": 84
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5074233666666665,
"min": 0.36625246666666667,
"max": 2.4374331000000002,
"count": 84
},
"Pyramids.Policy.Beta.mean": {
"value": 5.9439622e-05,
"min": 5.9439622e-05,
"max": 0.008314310710000002,
"count": 84
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00089159433,
"min": 0.00089159433,
"max": 0.10294423286,
"count": 84
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.007446684874594212,
"min": 0.007070202846080065,
"max": 0.8641199469566345,
"count": 84
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.11170027405023575,
"min": 0.09898284077644348,
"max": 1.9113531112670898,
"count": 84
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 84
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 84
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 233.87218045112783,
"min": 213.12408759124088,
"max": 999.0,
"count": 83
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31105.0,
"min": 18981.0,
"max": 32633.0,
"count": 83
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7215393754004529,
"min": -1.0000000521540642,
"max": 1.786195639034976,
"count": 83
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 227.24319755285978,
"min": -32.000001668930054,
"max": 253.36139796674252,
"count": 83
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7215393754004529,
"min": -1.0000000521540642,
"max": 1.786195639034976,
"count": 83
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 227.24319755285978,
"min": -32.000001668930054,
"max": 253.36139796674252,
"count": 83
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.017973600814112808,
"min": 0.016360701486374255,
"max": 6.292424391023815,
"count": 83
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.3725153074628906,
"min": 2.1935658946749754,
"max": 201.35758051276207,
"count": 83
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1739874395",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --resume",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1739880960"
},
"total": 6564.893774228,
"count": 1,
"self": 0.5416670360000353,
"children": {
"run_training.setup": {
"total": 0.021810664000440738,
"count": 1,
"self": 0.021810664000440738
},
"TrainerController.start_learning": {
"total": 6564.330296528,
"count": 1,
"self": 4.346934234961736,
"children": {
"TrainerController._reset_env": {
"total": 2.936605330999555,
"count": 1,
"self": 2.936605330999555
},
"TrainerController.advance": {
"total": 6556.9565665290365,
"count": 161807,
"self": 4.36184257162131,
"children": {
"env_step": {
"total": 4738.850829515155,
"count": 161807,
"self": 4294.386342242735,
"children": {
"SubprocessEnvManager._take_step": {
"total": 441.9649625342163,
"count": 161807,
"self": 13.31593896180948,
"children": {
"TorchPolicy.evaluate": {
"total": 428.6490235724068,
"count": 156299,
"self": 428.6490235724068
}
}
},
"workers": {
"total": 2.499524738203945,
"count": 161807,
"self": 0.0,
"children": {
"worker_root": {
"total": 6549.014671803079,
"count": 161807,
"is_parallel": true,
"self": 2580.480464456886,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002100775999679172,
"count": 1,
"is_parallel": true,
"self": 0.0006744310012436472,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014263449984355248,
"count": 8,
"is_parallel": true,
"self": 0.0014263449984355248
}
}
},
"UnityEnvironment.step": {
"total": 0.05697112200050469,
"count": 1,
"is_parallel": true,
"self": 0.0004941830011375714,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004356379995442694,
"count": 1,
"is_parallel": true,
"self": 0.0004356379995442694
},
"communicator.exchange": {
"total": 0.0546180169994841,
"count": 1,
"is_parallel": true,
"self": 0.0546180169994841
},
"steps_from_proto": {
"total": 0.0014232840003387537,
"count": 1,
"is_parallel": true,
"self": 0.0002906259996962035,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011326580006425502,
"count": 8,
"is_parallel": true,
"self": 0.0011326580006425502
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 3968.5342073461934,
"count": 161806,
"is_parallel": true,
"self": 87.96226158397258,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 61.19592845652551,
"count": 161806,
"is_parallel": true,
"self": 61.19592845652551
},
"communicator.exchange": {
"total": 3560.82909994094,
"count": 161806,
"is_parallel": true,
"self": 3560.82909994094
},
"steps_from_proto": {
"total": 258.54691736475525,
"count": 161806,
"is_parallel": true,
"self": 54.16847064481772,
"children": {
"_process_rank_one_or_two_observation": {
"total": 204.37844671993753,
"count": 1294448,
"is_parallel": true,
"self": 204.37844671993753
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1813.7438944422602,
"count": 161807,
"self": 8.371537257095952,
"children": {
"process_trajectory": {
"total": 343.85949047214217,
"count": 161807,
"self": 343.31208792414327,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5474025479988995,
"count": 5,
"self": 0.5474025479988995
}
}
},
"_update_policy": {
"total": 1461.512866713022,
"count": 1153,
"self": 795.8833492420399,
"children": {
"TorchPPOOptimizer.update": {
"total": 665.6295174709821,
"count": 57030,
"self": 665.6295174709821
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.29000782687217e-07,
"count": 1,
"self": 8.29000782687217e-07
},
"TrainerController._save_models": {
"total": 0.09018960400135256,
"count": 1,
"self": 0.002010412999879918,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08817919100147265,
"count": 1,
"self": 0.08817919100147265
}
}
}
}
}
}
}