TinyPixel commited on
Commit
0ee5188
1 Parent(s): 5f73e1a

Upload folder using huggingface_hub

Browse files
adapter_config.json CHANGED
@@ -19,10 +19,10 @@
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
22
- "query_key_value",
23
  "dense",
24
- "dense_4h_to_h",
25
- "dense_h_to_4h"
 
26
  ],
27
  "task_type": "CAUSAL_LM",
28
  "use_rslora": false
 
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
 
22
  "dense",
23
+ "query_key_value",
24
+ "dense_h_to_4h",
25
+ "dense_4h_to_h"
26
  ],
27
  "task_type": "CAUSAL_LM",
28
  "use_rslora": false
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9c6fbc8839039d81518e809e09d04f26d0b62532c6bba65cc594b3d72b56418f
3
  size 134235712
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7bebd68776a4a61cb1f4f81939eaf7f8e14cde232d04f0af2894676460b6433c
3
  size 134235712
optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1fcb6340b982aa0e8d5ec80efe728b52fbfcffab7aceaed128ea70997a523b77
3
  size 268514874
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a928ccde6a9c2011fa546fdb58512b2cab3585ac2aac2aac229b06dfffabdec4
3
  size 268514874
rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6ea94740a278d2cdc43335e117d4b3125aa15fdbf70cc95cca4937d85340fc2b
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2409359a63e121485fe24a344ba36e669f278d392404a09304404248de3e122
3
  size 14244
special_tokens_map.json CHANGED
@@ -1,20 +1,4 @@
1
  {
2
- "additional_special_tokens": [
3
- {
4
- "content": "<|im_end|>",
5
- "lstrip": false,
6
- "normalized": false,
7
- "rstrip": false,
8
- "single_word": false
9
- },
10
- {
11
- "content": "<|im_start|>",
12
- "lstrip": false,
13
- "normalized": false,
14
- "rstrip": false,
15
- "single_word": false
16
- }
17
- ],
18
  "bos_token": {
19
  "content": "<|endoftext|>",
20
  "lstrip": false,
 
1
  {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  "bos_token": {
3
  "content": "<|endoftext|>",
4
  "lstrip": false,
tokenizer.json CHANGED
@@ -235,24 +235,6 @@
235
  },
236
  {
237
  "id": 50277,
238
- "content": "<|im_end|>",
239
- "single_word": false,
240
- "lstrip": false,
241
- "rstrip": false,
242
- "normalized": false,
243
- "special": true
244
- },
245
- {
246
- "id": 50278,
247
- "content": "<|im_start|>",
248
- "single_word": false,
249
- "lstrip": false,
250
- "rstrip": false,
251
- "normalized": false,
252
- "special": true
253
- },
254
- {
255
- "id": 50279,
256
  "content": "[PAD]",
257
  "single_word": false,
258
  "lstrip": false,
 
235
  },
236
  {
237
  "id": 50277,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
238
  "content": "[PAD]",
239
  "single_word": false,
240
  "lstrip": false,
tokenizer_config.json CHANGED
@@ -202,22 +202,6 @@
202
  "special": false
203
  },
204
  "50277": {
205
- "content": "<|im_end|>",
206
- "lstrip": false,
207
- "normalized": false,
208
- "rstrip": false,
209
- "single_word": false,
210
- "special": true
211
- },
212
- "50278": {
213
- "content": "<|im_start|>",
214
- "lstrip": false,
215
- "normalized": false,
216
- "rstrip": false,
217
- "single_word": false,
218
- "special": true
219
- },
220
- "50279": {
221
  "content": "[PAD]",
222
  "lstrip": false,
223
  "normalized": false,
@@ -226,10 +210,6 @@
226
  "special": true
227
  }
228
  },
229
- "additional_special_tokens": [
230
- "<|im_end|>",
231
- "<|im_start|>"
232
- ],
233
  "bos_token": "<|endoftext|>",
234
  "clean_up_tokenization_spaces": true,
235
  "eos_token": "<|endoftext|>",
 
202
  "special": false
203
  },
204
  "50277": {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205
  "content": "[PAD]",
206
  "lstrip": false,
207
  "normalized": false,
 
210
  "special": true
211
  }
212
  },
 
 
 
 
213
  "bos_token": "<|endoftext|>",
214
  "clean_up_tokenization_spaces": true,
215
  "eos_token": "<|endoftext|>",
trainer_state.json CHANGED
@@ -11,145 +11,145 @@
11
  {
12
  "epoch": 0.04,
13
  "learning_rate": 0.0002,
14
- "loss": 2.2783,
15
  "step": 2
16
  },
17
  {
18
  "epoch": 0.08,
19
  "learning_rate": 0.0002,
20
- "loss": 2.5108,
21
  "step": 4
22
  },
23
  {
24
  "epoch": 0.12,
25
  "learning_rate": 0.0002,
26
- "loss": 2.6053,
27
  "step": 6
28
  },
29
  {
30
  "epoch": 0.16,
31
  "learning_rate": 0.0002,
32
- "loss": 2.8587,
33
  "step": 8
34
  },
35
  {
36
  "epoch": 0.21,
37
  "learning_rate": 0.0002,
38
- "loss": 2.8002,
39
  "step": 10
40
  },
41
  {
42
  "epoch": 0.25,
43
  "learning_rate": 0.0002,
44
- "loss": 3.4071,
45
  "step": 12
46
  },
47
  {
48
  "epoch": 0.29,
49
  "learning_rate": 0.0002,
50
- "loss": 2.2816,
51
  "step": 14
52
  },
53
  {
54
  "epoch": 0.33,
55
  "learning_rate": 0.0002,
56
- "loss": 2.6214,
57
  "step": 16
58
  },
59
  {
60
  "epoch": 0.37,
61
  "learning_rate": 0.0002,
62
- "loss": 2.4466,
63
  "step": 18
64
  },
65
  {
66
  "epoch": 0.41,
67
  "learning_rate": 0.0002,
68
- "loss": 2.7186,
69
  "step": 20
70
  },
71
  {
72
  "epoch": 0.45,
73
  "learning_rate": 0.0002,
74
- "loss": 2.6705,
75
  "step": 22
76
  },
77
  {
78
  "epoch": 0.49,
79
  "learning_rate": 0.0002,
80
- "loss": 3.3761,
81
  "step": 24
82
  },
83
  {
84
  "epoch": 0.53,
85
  "learning_rate": 0.0002,
86
- "loss": 2.2793,
87
  "step": 26
88
  },
89
  {
90
  "epoch": 0.57,
91
  "learning_rate": 0.0002,
92
- "loss": 2.2849,
93
  "step": 28
94
  },
95
  {
96
  "epoch": 0.62,
97
  "learning_rate": 0.0002,
98
- "loss": 2.3829,
99
  "step": 30
100
  },
101
  {
102
  "epoch": 0.66,
103
  "learning_rate": 0.0002,
104
- "loss": 2.6203,
105
  "step": 32
106
  },
107
  {
108
  "epoch": 0.7,
109
  "learning_rate": 0.0002,
110
- "loss": 2.5967,
111
  "step": 34
112
  },
113
  {
114
  "epoch": 0.74,
115
  "learning_rate": 0.0002,
116
- "loss": 2.7552,
117
  "step": 36
118
  },
119
  {
120
  "epoch": 0.78,
121
  "learning_rate": 0.0002,
122
- "loss": 2.1381,
123
  "step": 38
124
  },
125
  {
126
  "epoch": 0.82,
127
  "learning_rate": 0.0002,
128
- "loss": 2.2939,
129
  "step": 40
130
  },
131
  {
132
  "epoch": 0.86,
133
  "learning_rate": 0.0002,
134
- "loss": 2.5408,
135
  "step": 42
136
  },
137
  {
138
  "epoch": 0.9,
139
  "learning_rate": 0.0002,
140
- "loss": 2.4259,
141
  "step": 44
142
  },
143
  {
144
  "epoch": 0.94,
145
  "learning_rate": 0.0002,
146
- "loss": 2.5202,
147
  "step": 46
148
  },
149
  {
150
  "epoch": 0.98,
151
  "learning_rate": 0.0002,
152
- "loss": 2.5082,
153
  "step": 48
154
  }
155
  ],
@@ -158,7 +158,7 @@
158
  "num_input_tokens_seen": 0,
159
  "num_train_epochs": 1,
160
  "save_steps": 500,
161
- "total_flos": 2050368575963136.0,
162
  "train_batch_size": 1,
163
  "trial_name": null,
164
  "trial_params": null
 
11
  {
12
  "epoch": 0.04,
13
  "learning_rate": 0.0002,
14
+ "loss": 2.3695,
15
  "step": 2
16
  },
17
  {
18
  "epoch": 0.08,
19
  "learning_rate": 0.0002,
20
+ "loss": 2.3951,
21
  "step": 4
22
  },
23
  {
24
  "epoch": 0.12,
25
  "learning_rate": 0.0002,
26
+ "loss": 2.5973,
27
  "step": 6
28
  },
29
  {
30
  "epoch": 0.16,
31
  "learning_rate": 0.0002,
32
+ "loss": 2.5197,
33
  "step": 8
34
  },
35
  {
36
  "epoch": 0.21,
37
  "learning_rate": 0.0002,
38
+ "loss": 2.5993,
39
  "step": 10
40
  },
41
  {
42
  "epoch": 0.25,
43
  "learning_rate": 0.0002,
44
+ "loss": 2.7419,
45
  "step": 12
46
  },
47
  {
48
  "epoch": 0.29,
49
  "learning_rate": 0.0002,
50
+ "loss": 2.2986,
51
  "step": 14
52
  },
53
  {
54
  "epoch": 0.33,
55
  "learning_rate": 0.0002,
56
+ "loss": 2.3248,
57
  "step": 16
58
  },
59
  {
60
  "epoch": 0.37,
61
  "learning_rate": 0.0002,
62
+ "loss": 2.5489,
63
  "step": 18
64
  },
65
  {
66
  "epoch": 0.41,
67
  "learning_rate": 0.0002,
68
+ "loss": 2.407,
69
  "step": 20
70
  },
71
  {
72
  "epoch": 0.45,
73
  "learning_rate": 0.0002,
74
+ "loss": 2.59,
75
  "step": 22
76
  },
77
  {
78
  "epoch": 0.49,
79
  "learning_rate": 0.0002,
80
+ "loss": 2.6604,
81
  "step": 24
82
  },
83
  {
84
  "epoch": 0.53,
85
  "learning_rate": 0.0002,
86
+ "loss": 2.134,
87
  "step": 26
88
  },
89
  {
90
  "epoch": 0.57,
91
  "learning_rate": 0.0002,
92
+ "loss": 2.2908,
93
  "step": 28
94
  },
95
  {
96
  "epoch": 0.62,
97
  "learning_rate": 0.0002,
98
+ "loss": 2.4975,
99
  "step": 30
100
  },
101
  {
102
  "epoch": 0.66,
103
  "learning_rate": 0.0002,
104
+ "loss": 2.4533,
105
  "step": 32
106
  },
107
  {
108
  "epoch": 0.7,
109
  "learning_rate": 0.0002,
110
+ "loss": 2.4644,
111
  "step": 34
112
  },
113
  {
114
  "epoch": 0.74,
115
  "learning_rate": 0.0002,
116
+ "loss": 2.6187,
117
  "step": 36
118
  },
119
  {
120
  "epoch": 0.78,
121
  "learning_rate": 0.0002,
122
+ "loss": 2.137,
123
  "step": 38
124
  },
125
  {
126
  "epoch": 0.82,
127
  "learning_rate": 0.0002,
128
+ "loss": 2.368,
129
  "step": 40
130
  },
131
  {
132
  "epoch": 0.86,
133
  "learning_rate": 0.0002,
134
+ "loss": 2.4951,
135
  "step": 42
136
  },
137
  {
138
  "epoch": 0.9,
139
  "learning_rate": 0.0002,
140
+ "loss": 2.4786,
141
  "step": 44
142
  },
143
  {
144
  "epoch": 0.94,
145
  "learning_rate": 0.0002,
146
+ "loss": 2.5544,
147
  "step": 46
148
  },
149
  {
150
  "epoch": 0.98,
151
  "learning_rate": 0.0002,
152
+ "loss": 2.6641,
153
  "step": 48
154
  }
155
  ],
 
158
  "num_input_tokens_seen": 0,
159
  "num_train_epochs": 1,
160
  "save_steps": 500,
161
+ "total_flos": 2050091535802368.0,
162
  "train_batch_size": 1,
163
  "trial_name": null,
164
  "trial_params": null
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bbb5d771a6d1a018774cf9649db97eccab93bd3525027d97d1a3b3ef065d42e8
3
  size 4728
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c192e600688dd01de21da2985b51206f8f07a168a8b5675a50f80ace12a3a17f
3
  size 4728