File size: 2,218 Bytes
68c0706
 
 
b718063
50808b6
b718063
68c0706
 
 
 
 
50808b6
68c0706
 
525d2ab
68c0706
 
 
50808b6
525d2ab
b718063
 
 
68c0706
 
 
50808b6
b718063
68c0706
525d2ab
d7eb129
c537bc2
 
50808b6
b718063
d7eb129
525d2ab
25946e5
16c8fbd
 
50808b6
b718063
 
 
 
50808b6
16c8fbd
 
50808b6
b718063
50808b6
b718063
d7eb129
 
 
50808b6
b718063
16c8fbd
b718063
16c8fbd
 
 
50808b6
b718063
16c8fbd
b718063
16c8fbd
 
 
50808b6
b718063
 
 
 
16c8fbd
68c0706
 
 
50808b6
68c0706
 
50808b6
68c0706
 
 
 
 
 
 
525d2ab
68c0706
 
 
 
b718063
68c0706
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 0.5,
  "eval_steps": 3,
  "global_step": 6,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.08333333333333333,
      "grad_norm": NaN,
      "learning_rate": 0.0,
      "loss": 1.4927,
      "step": 1
    },
    {
      "epoch": 0.08333333333333333,
      "eval_loss": 1.4977792501449585,
      "eval_runtime": 0.453,
      "eval_samples_per_second": 6.622,
      "eval_steps_per_second": 6.622,
      "step": 1
    },
    {
      "epoch": 0.16666666666666666,
      "grad_norm": 4.665465831756592,
      "learning_rate": 2e-05,
      "loss": 1.307,
      "step": 2
    },
    {
      "epoch": 0.25,
      "grad_norm": 5.513491153717041,
      "learning_rate": 4e-05,
      "loss": 1.5735,
      "step": 3
    },
    {
      "epoch": 0.25,
      "eval_loss": 1.4473786354064941,
      "eval_runtime": 0.4679,
      "eval_samples_per_second": 6.412,
      "eval_steps_per_second": 6.412,
      "step": 3
    },
    {
      "epoch": 0.3333333333333333,
      "grad_norm": 4.566956520080566,
      "learning_rate": 6e-05,
      "loss": 1.282,
      "step": 4
    },
    {
      "epoch": 0.4166666666666667,
      "grad_norm": 3.311328649520874,
      "learning_rate": 8e-05,
      "loss": 1.3071,
      "step": 5
    },
    {
      "epoch": 0.5,
      "grad_norm": 2.798964262008667,
      "learning_rate": 0.0001,
      "loss": 1.0892,
      "step": 6
    },
    {
      "epoch": 0.5,
      "eval_loss": 0.8217151761054993,
      "eval_runtime": 0.4475,
      "eval_samples_per_second": 6.705,
      "eval_steps_per_second": 6.705,
      "step": 6
    }
  ],
  "logging_steps": 1,
  "max_steps": 10,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 1,
  "save_steps": 3,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": false
      },
      "attributes": {}
    }
  },
  "total_flos": 1109738548887552.0,
  "train_batch_size": 1,
  "trial_name": null,
  "trial_params": null
}