qwen-lora / trainer_state.json
mridul161203's picture
Upload folder using huggingface_hub
0eb51da verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.976,
"eval_steps": 500,
"global_step": 186,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.16,
"grad_norm": 0.374408483505249,
"learning_rate": 2.6315789473684212e-05,
"loss": 1.1454,
"step": 10
},
{
"epoch": 0.32,
"grad_norm": 0.311002641916275,
"learning_rate": 4.999557652060729e-05,
"loss": 1.0246,
"step": 20
},
{
"epoch": 0.48,
"grad_norm": 0.30777308344841003,
"learning_rate": 4.946665048328287e-05,
"loss": 0.9632,
"step": 30
},
{
"epoch": 0.64,
"grad_norm": 0.4400554895401001,
"learning_rate": 4.807442755497524e-05,
"loss": 0.978,
"step": 40
},
{
"epoch": 0.8,
"grad_norm": 0.4841279983520508,
"learning_rate": 4.586803181690609e-05,
"loss": 0.968,
"step": 50
},
{
"epoch": 0.96,
"grad_norm": 0.34245041012763977,
"learning_rate": 4.292531514268008e-05,
"loss": 0.9348,
"step": 60
},
{
"epoch": 1.12,
"grad_norm": 0.4096768796443939,
"learning_rate": 3.9350110223152844e-05,
"loss": 0.7968,
"step": 70
},
{
"epoch": 1.28,
"grad_norm": 0.6328812837600708,
"learning_rate": 3.526856686758269e-05,
"loss": 0.7295,
"step": 80
},
{
"epoch": 1.44,
"grad_norm": 0.47058913111686707,
"learning_rate": 3.082470085335133e-05,
"loss": 0.6724,
"step": 90
},
{
"epoch": 1.6,
"grad_norm": 0.46030929684638977,
"learning_rate": 2.6175312381477442e-05,
"loss": 0.6994,
"step": 100
},
{
"epoch": 1.76,
"grad_norm": 0.5056099891662598,
"learning_rate": 2.148445343837755e-05,
"loss": 0.7171,
"step": 110
},
{
"epoch": 1.92,
"grad_norm": 0.39329010248184204,
"learning_rate": 1.69176392810087e-05,
"loss": 0.7364,
"step": 120
},
{
"epoch": 2.08,
"grad_norm": 0.5568200349807739,
"learning_rate": 1.2636008291040618e-05,
"loss": 0.7352,
"step": 130
},
{
"epoch": 2.24,
"grad_norm": 0.714911937713623,
"learning_rate": 8.790636265485334e-06,
"loss": 0.4738,
"step": 140
},
{
"epoch": 2.4,
"grad_norm": 0.5729328989982605,
"learning_rate": 5.51720576197794e-06,
"loss": 0.5162,
"step": 150
},
{
"epoch": 2.56,
"grad_norm": 0.6667868494987488,
"learning_rate": 2.931218588927315e-06,
"loss": 0.4586,
"step": 160
},
{
"epoch": 2.7199999999999998,
"grad_norm": 0.49541157484054565,
"learning_rate": 1.1239203660860648e-06,
"loss": 0.5178,
"step": 170
},
{
"epoch": 2.88,
"grad_norm": 0.5764051079750061,
"learning_rate": 1.5908095594207583e-07,
"loss": 0.4586,
"step": 180
},
{
"epoch": 2.976,
"step": 186,
"total_flos": 1.5945119821725696e+16,
"train_loss": 0.7450424432754517,
"train_runtime": 1935.7993,
"train_samples_per_second": 0.775,
"train_steps_per_second": 0.096
}
],
"logging_steps": 10,
"max_steps": 186,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.5945119821725696e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}