TinyPixel commited on
Commit
c407ef4
1 Parent(s): ac0f423

Upload folder using huggingface_hub

Browse files
adapter_config.json CHANGED
@@ -20,12 +20,12 @@
20
  "revision": null,
21
  "target_modules": [
22
  "up_proj",
23
- "down_proj",
24
- "o_proj",
25
  "gate_proj",
26
- "k_proj",
 
27
  "v_proj",
28
- "q_proj"
29
  ],
30
  "task_type": "CAUSAL_LM",
31
  "use_rslora": false
 
20
  "revision": null,
21
  "target_modules": [
22
  "up_proj",
23
+ "q_proj",
 
24
  "gate_proj",
25
+ "o_proj",
26
+ "down_proj",
27
  "v_proj",
28
+ "k_proj"
29
  ],
30
  "task_type": "CAUSAL_LM",
31
  "use_rslora": false
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:82f0584a9fdecad22b26d4b43b9759f406a5219824727c031f1c8d8557e8792f
3
  size 242266152
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc805562df8d1ecadb0c7ceffd216b6ce1fc1b37b12c12a52bb95c28b1936981
3
  size 242266152
optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b6cbf7903fa258b793125361ef6b4c1b93c52baafc3b11c52784b5bac1e483d4
3
  size 484650042
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbc845a86ea49fcb13768bd2e1dffa9c405dc1c7d0f7bd8eff6b9a03d0a4e377
3
  size 484650042
rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:619dbcc06b74b236aa1e4a20235db3ddb6093a74fe192b6d650a68dea86629c7
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97c1c57b943500bdd422828e26395d31b84f5a9794fe4c32c901ecbc4e90724e
3
  size 14244
trainer_state.json CHANGED
@@ -11,187 +11,187 @@
11
  {
12
  "epoch": 0.03,
13
  "learning_rate": 0.0002,
14
- "loss": 1.8763,
15
  "step": 2
16
  },
17
  {
18
  "epoch": 0.06,
19
  "learning_rate": 0.0002,
20
- "loss": 1.6551,
21
  "step": 4
22
  },
23
  {
24
  "epoch": 0.1,
25
  "learning_rate": 0.0002,
26
- "loss": 1.6117,
27
  "step": 6
28
  },
29
  {
30
  "epoch": 0.13,
31
  "learning_rate": 0.0002,
32
- "loss": 1.4364,
33
  "step": 8
34
  },
35
  {
36
  "epoch": 0.16,
37
  "learning_rate": 0.0002,
38
- "loss": 1.3856,
39
  "step": 10
40
  },
41
  {
42
  "epoch": 0.19,
43
  "learning_rate": 0.0002,
44
- "loss": 1.888,
45
  "step": 12
46
  },
47
  {
48
  "epoch": 0.22,
49
  "learning_rate": 0.0002,
50
- "loss": 2.7773,
51
  "step": 14
52
  },
53
  {
54
  "epoch": 0.26,
55
  "learning_rate": 0.0002,
56
- "loss": 2.7147,
57
  "step": 16
58
  },
59
  {
60
  "epoch": 0.29,
61
  "learning_rate": 0.0002,
62
- "loss": 1.6225,
63
  "step": 18
64
  },
65
  {
66
  "epoch": 0.32,
67
  "learning_rate": 0.0002,
68
- "loss": 1.4844,
69
  "step": 20
70
  },
71
  {
72
  "epoch": 0.35,
73
  "learning_rate": 0.0002,
74
- "loss": 1.2329,
75
  "step": 22
76
  },
77
  {
78
  "epoch": 0.38,
79
  "learning_rate": 0.0002,
80
- "loss": 1.0668,
81
  "step": 24
82
  },
83
  {
84
  "epoch": 0.42,
85
  "learning_rate": 0.0002,
86
- "loss": 1.0695,
87
  "step": 26
88
  },
89
  {
90
  "epoch": 0.45,
91
  "learning_rate": 0.0002,
92
- "loss": 1.8005,
93
  "step": 28
94
  },
95
  {
96
  "epoch": 0.48,
97
  "learning_rate": 0.0002,
98
- "loss": 2.2701,
99
  "step": 30
100
  },
101
  {
102
  "epoch": 0.51,
103
  "learning_rate": 0.0002,
104
- "loss": 1.7467,
105
  "step": 32
106
  },
107
  {
108
  "epoch": 0.54,
109
  "learning_rate": 0.0002,
110
- "loss": 1.416,
111
  "step": 34
112
  },
113
  {
114
  "epoch": 0.58,
115
  "learning_rate": 0.0002,
116
- "loss": 1.3106,
117
  "step": 36
118
  },
119
  {
120
  "epoch": 0.61,
121
  "learning_rate": 0.0002,
122
- "loss": 0.8445,
123
  "step": 38
124
  },
125
  {
126
  "epoch": 0.64,
127
  "learning_rate": 0.0002,
128
- "loss": 1.0193,
129
  "step": 40
130
  },
131
  {
132
  "epoch": 0.67,
133
  "learning_rate": 0.0002,
134
- "loss": 1.5088,
135
  "step": 42
136
  },
137
  {
138
  "epoch": 0.7,
139
  "learning_rate": 0.0002,
140
- "loss": 1.7339,
141
  "step": 44
142
  },
143
  {
144
  "epoch": 0.74,
145
  "learning_rate": 0.0002,
146
- "loss": 2.137,
147
  "step": 46
148
  },
149
  {
150
  "epoch": 0.77,
151
  "learning_rate": 0.0002,
152
- "loss": 1.3696,
153
  "step": 48
154
  },
155
  {
156
  "epoch": 0.8,
157
  "learning_rate": 0.0002,
158
- "loss": 1.338,
159
  "step": 50
160
  },
161
  {
162
  "epoch": 0.83,
163
  "learning_rate": 0.0002,
164
- "loss": 1.2948,
165
  "step": 52
166
  },
167
  {
168
  "epoch": 0.86,
169
  "learning_rate": 0.0002,
170
- "loss": 0.9147,
171
  "step": 54
172
  },
173
  {
174
  "epoch": 0.9,
175
  "learning_rate": 0.0002,
176
- "loss": 1.1057,
177
  "step": 56
178
  },
179
  {
180
  "epoch": 0.93,
181
  "learning_rate": 0.0002,
182
- "loss": 1.4985,
183
  "step": 58
184
  },
185
  {
186
  "epoch": 0.96,
187
  "learning_rate": 0.0002,
188
- "loss": 1.9006,
189
  "step": 60
190
  },
191
  {
192
  "epoch": 0.99,
193
  "learning_rate": 0.0002,
194
- "loss": 1.1481,
195
  "step": 62
196
  }
197
  ],
@@ -200,7 +200,7 @@
200
  "num_input_tokens_seen": 0,
201
  "num_train_epochs": 1,
202
  "save_steps": 500,
203
- "total_flos": 3204864602185728.0,
204
  "train_batch_size": 1,
205
  "trial_name": null,
206
  "trial_params": null
 
11
  {
12
  "epoch": 0.03,
13
  "learning_rate": 0.0002,
14
+ "loss": 1.6044,
15
  "step": 2
16
  },
17
  {
18
  "epoch": 0.06,
19
  "learning_rate": 0.0002,
20
+ "loss": 1.5881,
21
  "step": 4
22
  },
23
  {
24
  "epoch": 0.1,
25
  "learning_rate": 0.0002,
26
+ "loss": 1.5893,
27
  "step": 6
28
  },
29
  {
30
  "epoch": 0.13,
31
  "learning_rate": 0.0002,
32
+ "loss": 1.6713,
33
  "step": 8
34
  },
35
  {
36
  "epoch": 0.16,
37
  "learning_rate": 0.0002,
38
+ "loss": 1.6675,
39
  "step": 10
40
  },
41
  {
42
  "epoch": 0.19,
43
  "learning_rate": 0.0002,
44
+ "loss": 1.728,
45
  "step": 12
46
  },
47
  {
48
  "epoch": 0.22,
49
  "learning_rate": 0.0002,
50
+ "loss": 2.0484,
51
  "step": 14
52
  },
53
  {
54
  "epoch": 0.26,
55
  "learning_rate": 0.0002,
56
+ "loss": 1.6691,
57
  "step": 16
58
  },
59
  {
60
  "epoch": 0.29,
61
  "learning_rate": 0.0002,
62
+ "loss": 1.2777,
63
  "step": 18
64
  },
65
  {
66
  "epoch": 0.32,
67
  "learning_rate": 0.0002,
68
+ "loss": 1.3286,
69
  "step": 20
70
  },
71
  {
72
  "epoch": 0.35,
73
  "learning_rate": 0.0002,
74
+ "loss": 1.2907,
75
  "step": 22
76
  },
77
  {
78
  "epoch": 0.38,
79
  "learning_rate": 0.0002,
80
+ "loss": 1.2286,
81
  "step": 24
82
  },
83
  {
84
  "epoch": 0.42,
85
  "learning_rate": 0.0002,
86
+ "loss": 1.3034,
87
  "step": 26
88
  },
89
  {
90
  "epoch": 0.45,
91
  "learning_rate": 0.0002,
92
+ "loss": 1.2253,
93
  "step": 28
94
  },
95
  {
96
  "epoch": 0.48,
97
  "learning_rate": 0.0002,
98
+ "loss": 1.398,
99
  "step": 30
100
  },
101
  {
102
  "epoch": 0.51,
103
  "learning_rate": 0.0002,
104
+ "loss": 1.1233,
105
  "step": 32
106
  },
107
  {
108
  "epoch": 0.54,
109
  "learning_rate": 0.0002,
110
+ "loss": 1.181,
111
  "step": 34
112
  },
113
  {
114
  "epoch": 0.58,
115
  "learning_rate": 0.0002,
116
+ "loss": 1.1303,
117
  "step": 36
118
  },
119
  {
120
  "epoch": 0.61,
121
  "learning_rate": 0.0002,
122
+ "loss": 1.12,
123
  "step": 38
124
  },
125
  {
126
  "epoch": 0.64,
127
  "learning_rate": 0.0002,
128
+ "loss": 1.191,
129
  "step": 40
130
  },
131
  {
132
  "epoch": 0.67,
133
  "learning_rate": 0.0002,
134
+ "loss": 1.1562,
135
  "step": 42
136
  },
137
  {
138
  "epoch": 0.7,
139
  "learning_rate": 0.0002,
140
+ "loss": 1.1221,
141
  "step": 44
142
  },
143
  {
144
  "epoch": 0.74,
145
  "learning_rate": 0.0002,
146
+ "loss": 1.2625,
147
  "step": 46
148
  },
149
  {
150
  "epoch": 0.77,
151
  "learning_rate": 0.0002,
152
+ "loss": 1.0936,
153
  "step": 48
154
  },
155
  {
156
  "epoch": 0.8,
157
  "learning_rate": 0.0002,
158
+ "loss": 1.048,
159
  "step": 50
160
  },
161
  {
162
  "epoch": 0.83,
163
  "learning_rate": 0.0002,
164
+ "loss": 0.9645,
165
  "step": 52
166
  },
167
  {
168
  "epoch": 0.86,
169
  "learning_rate": 0.0002,
170
+ "loss": 1.0299,
171
  "step": 54
172
  },
173
  {
174
  "epoch": 0.9,
175
  "learning_rate": 0.0002,
176
+ "loss": 1.0774,
177
  "step": 56
178
  },
179
  {
180
  "epoch": 0.93,
181
  "learning_rate": 0.0002,
182
+ "loss": 1.1457,
183
  "step": 58
184
  },
185
  {
186
  "epoch": 0.96,
187
  "learning_rate": 0.0002,
188
+ "loss": 1.1011,
189
  "step": 60
190
  },
191
  {
192
  "epoch": 0.99,
193
  "learning_rate": 0.0002,
194
+ "loss": 1.0221,
195
  "step": 62
196
  }
197
  ],
 
200
  "num_input_tokens_seen": 0,
201
  "num_train_epochs": 1,
202
  "save_steps": 500,
203
+ "total_flos": 2179055645245440.0,
204
  "train_batch_size": 1,
205
  "trial_name": null,
206
  "trial_params": null
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0681cd1910c2b08abb29c099e64819599db5cbd9791249e18d756090b563b039
3
  size 4728
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:085df06261844d8e43666229030657ce5dd5cc1aac22d14d759f080cd507fb4e
3
  size 4728