File size: 3,044 Bytes
68c0706
 
 
711bfbd
50808b6
711bfbd
68c0706
 
 
 
 
50808b6
68c0706
 
525d2ab
68c0706
 
 
50808b6
525d2ab
711bfbd
 
 
68c0706
 
 
50808b6
711bfbd
68c0706
525d2ab
d7eb129
c537bc2
 
50808b6
711bfbd
d7eb129
525d2ab
25946e5
16c8fbd
 
50808b6
711bfbd
 
 
 
50808b6
4e8e0bb
 
 
711bfbd
4e8e0bb
711bfbd
4e8e0bb
 
 
 
711bfbd
4e8e0bb
711bfbd
4e8e0bb
 
 
 
711bfbd
4e8e0bb
711bfbd
4e8e0bb
 
 
 
711bfbd
 
 
 
4e8e0bb
711bfbd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68c0706
 
 
50808b6
68c0706
 
50808b6
68c0706
 
 
 
 
 
 
711bfbd
68c0706
 
 
 
711bfbd
68c0706
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 0.8333333333333334,
  "eval_steps": 3,
  "global_step": 10,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.08333333333333333,
      "grad_norm": NaN,
      "learning_rate": 0.0,
      "loss": 1.4927,
      "step": 1
    },
    {
      "epoch": 0.08333333333333333,
      "eval_loss": 1.4977792501449585,
      "eval_runtime": 0.453,
      "eval_samples_per_second": 6.622,
      "eval_steps_per_second": 6.622,
      "step": 1
    },
    {
      "epoch": 0.16666666666666666,
      "grad_norm": 4.665465831756592,
      "learning_rate": 2e-05,
      "loss": 1.307,
      "step": 2
    },
    {
      "epoch": 0.25,
      "grad_norm": 5.513491153717041,
      "learning_rate": 4e-05,
      "loss": 1.5735,
      "step": 3
    },
    {
      "epoch": 0.25,
      "eval_loss": 1.4473786354064941,
      "eval_runtime": 0.4679,
      "eval_samples_per_second": 6.412,
      "eval_steps_per_second": 6.412,
      "step": 3
    },
    {
      "epoch": 0.3333333333333333,
      "grad_norm": 4.566956520080566,
      "learning_rate": 6e-05,
      "loss": 1.282,
      "step": 4
    },
    {
      "epoch": 0.4166666666666667,
      "grad_norm": 3.311328649520874,
      "learning_rate": 8e-05,
      "loss": 1.3071,
      "step": 5
    },
    {
      "epoch": 0.5,
      "grad_norm": 2.798964262008667,
      "learning_rate": 0.0001,
      "loss": 1.0892,
      "step": 6
    },
    {
      "epoch": 0.5,
      "eval_loss": 0.8217151761054993,
      "eval_runtime": 0.4475,
      "eval_samples_per_second": 6.705,
      "eval_steps_per_second": 6.705,
      "step": 6
    },
    {
      "epoch": 0.5833333333333334,
      "grad_norm": 2.373387575149536,
      "learning_rate": 0.00012,
      "loss": 0.8337,
      "step": 7
    },
    {
      "epoch": 0.6666666666666666,
      "grad_norm": 2.598729133605957,
      "learning_rate": 0.00014,
      "loss": 0.5417,
      "step": 8
    },
    {
      "epoch": 0.75,
      "grad_norm": 2.8108651638031006,
      "learning_rate": 0.00016,
      "loss": 0.2385,
      "step": 9
    },
    {
      "epoch": 0.75,
      "eval_loss": 0.05185846984386444,
      "eval_runtime": 0.4321,
      "eval_samples_per_second": 6.943,
      "eval_steps_per_second": 6.943,
      "step": 9
    },
    {
      "epoch": 0.8333333333333334,
      "grad_norm": 2.6426496505737305,
      "learning_rate": 0.00018,
      "loss": 0.1522,
      "step": 10
    }
  ],
  "logging_steps": 1,
  "max_steps": 10,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 1,
  "save_steps": 3,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": true
      },
      "attributes": {}
    }
  },
  "total_flos": 1849564248145920.0,
  "train_batch_size": 1,
  "trial_name": null,
  "trial_params": null
}