totally-not-an-llm's picture
Upload folder using huggingface_hub
ebe1b41 verified
raw
history blame
6.06 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 8,
"global_step": 32,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03,
"grad_norm": 0.5859375,
"learning_rate": 2e-05,
"loss": 3.9295,
"step": 1
},
{
"epoch": 0.03,
"eval_loss": 3.907318115234375,
"eval_runtime": 1.2713,
"eval_samples_per_second": 11.012,
"eval_steps_per_second": 5.506,
"step": 1
},
{
"epoch": 0.06,
"grad_norm": 0.5078125,
"learning_rate": 4e-05,
"loss": 3.805,
"step": 2
},
{
"epoch": 0.09,
"grad_norm": 0.63671875,
"learning_rate": 6e-05,
"loss": 3.8521,
"step": 3
},
{
"epoch": 0.12,
"grad_norm": 0.609375,
"learning_rate": 8e-05,
"loss": 3.8947,
"step": 4
},
{
"epoch": 0.16,
"grad_norm": 0.546875,
"learning_rate": 0.0001,
"loss": 3.6494,
"step": 5
},
{
"epoch": 0.19,
"grad_norm": 0.50390625,
"learning_rate": 0.00012,
"loss": 3.6457,
"step": 6
},
{
"epoch": 0.22,
"grad_norm": 0.640625,
"learning_rate": 0.00014,
"loss": 3.967,
"step": 7
},
{
"epoch": 0.25,
"grad_norm": 0.494140625,
"learning_rate": 0.00016,
"loss": 3.5364,
"step": 8
},
{
"epoch": 0.25,
"eval_loss": 3.6198840141296387,
"eval_runtime": 1.2681,
"eval_samples_per_second": 11.04,
"eval_steps_per_second": 5.52,
"step": 8
},
{
"epoch": 0.28,
"grad_norm": 0.51171875,
"learning_rate": 0.00018,
"loss": 3.5216,
"step": 9
},
{
"epoch": 0.31,
"grad_norm": 0.6171875,
"learning_rate": 0.0002,
"loss": 3.677,
"step": 10
},
{
"epoch": 0.34,
"grad_norm": 0.734375,
"learning_rate": 0.00019996456111234527,
"loss": 3.058,
"step": 11
},
{
"epoch": 0.38,
"grad_norm": 0.66796875,
"learning_rate": 0.0001998582695676762,
"loss": 3.1333,
"step": 12
},
{
"epoch": 0.41,
"grad_norm": 0.8359375,
"learning_rate": 0.000199681200703075,
"loss": 3.38,
"step": 13
},
{
"epoch": 0.44,
"grad_norm": 0.62109375,
"learning_rate": 0.00019943348002101371,
"loss": 3.1371,
"step": 14
},
{
"epoch": 0.47,
"grad_norm": 0.65625,
"learning_rate": 0.00019911528310040074,
"loss": 3.1479,
"step": 15
},
{
"epoch": 0.5,
"grad_norm": 0.6484375,
"learning_rate": 0.00019872683547213446,
"loss": 3.263,
"step": 16
},
{
"epoch": 0.5,
"eval_loss": 3.1820719242095947,
"eval_runtime": 1.2692,
"eval_samples_per_second": 11.031,
"eval_steps_per_second": 5.515,
"step": 16
},
{
"epoch": 0.53,
"grad_norm": 0.53125,
"learning_rate": 0.00019826841245925212,
"loss": 2.9833,
"step": 17
},
{
"epoch": 0.56,
"grad_norm": 0.5703125,
"learning_rate": 0.00019774033898178667,
"loss": 3.0787,
"step": 18
},
{
"epoch": 0.59,
"grad_norm": 0.71484375,
"learning_rate": 0.00019714298932647098,
"loss": 3.4132,
"step": 19
},
{
"epoch": 0.62,
"grad_norm": 0.73046875,
"learning_rate": 0.0001964767868814516,
"loss": 2.7304,
"step": 20
},
{
"epoch": 0.66,
"grad_norm": 0.55078125,
"learning_rate": 0.00019574220383620055,
"loss": 3.0116,
"step": 21
},
{
"epoch": 0.69,
"grad_norm": 0.6171875,
"learning_rate": 0.00019493976084683813,
"loss": 2.9474,
"step": 22
},
{
"epoch": 0.72,
"grad_norm": 0.61328125,
"learning_rate": 0.00019407002666710336,
"loss": 2.9415,
"step": 23
},
{
"epoch": 0.75,
"grad_norm": 0.6328125,
"learning_rate": 0.00019313361774523385,
"loss": 2.798,
"step": 24
},
{
"epoch": 0.75,
"eval_loss": 2.896176815032959,
"eval_runtime": 1.2765,
"eval_samples_per_second": 10.967,
"eval_steps_per_second": 5.484,
"step": 24
},
{
"epoch": 0.78,
"grad_norm": 0.8046875,
"learning_rate": 0.00019213119778704128,
"loss": 3.2157,
"step": 25
},
{
"epoch": 0.81,
"grad_norm": 0.7109375,
"learning_rate": 0.00019106347728549135,
"loss": 3.0666,
"step": 26
},
{
"epoch": 0.84,
"grad_norm": 0.59765625,
"learning_rate": 0.00018993121301712193,
"loss": 2.8219,
"step": 27
},
{
"epoch": 0.88,
"grad_norm": 0.75390625,
"learning_rate": 0.00018873520750565718,
"loss": 3.1164,
"step": 28
},
{
"epoch": 0.91,
"grad_norm": 0.67578125,
"learning_rate": 0.00018747630845319612,
"loss": 2.7154,
"step": 29
},
{
"epoch": 0.94,
"grad_norm": 0.625,
"learning_rate": 0.0001861554081393806,
"loss": 2.7395,
"step": 30
},
{
"epoch": 0.97,
"grad_norm": 0.8125,
"learning_rate": 0.0001847734427889671,
"loss": 2.8282,
"step": 31
},
{
"epoch": 1.0,
"grad_norm": 0.86328125,
"learning_rate": 0.0001833313919082515,
"loss": 2.7787,
"step": 32
},
{
"epoch": 1.0,
"eval_loss": 2.67726731300354,
"eval_runtime": 1.2769,
"eval_samples_per_second": 10.964,
"eval_steps_per_second": 5.482,
"step": 32
}
],
"logging_steps": 1,
"max_steps": 128,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 32,
"total_flos": 6667331136651264.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}