emanuelaboros commited on
Commit
fc58e46
1 Parent(s): c381a7f

Upload folder using huggingface_hub

Browse files
MAR-INF/MANIFEST.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "createdOn": "06/12/2023 17:03:48",
3
+ "runtime": "python",
4
+ "model": {
5
+ "modelName": "nel",
6
+ "handler": "model_handler_nel.py",
7
+ "modelVersion": "1.0",
8
+ "configFile": "model-config.yaml"
9
+ },
10
+ "archiverVersion": "0.8.1"
11
+ }
__pycache__/model_handler_nel.cpython-311.pyc ADDED
Binary file (14.8 kB). View file
 
config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/mgenre-wiki",
3
+ "activation_dropout": 0.0,
4
+ "activation_function": "gelu",
5
+ "architectures": [
6
+ "MBartForConditionalGeneration"
7
+ ],
8
+ "attention_dropout": 0.0,
9
+ "bos_token_id": 0,
10
+ "classifier_dropout": 0.0,
11
+ "d_model": 1024,
12
+ "decoder_attention_heads": 16,
13
+ "decoder_ffn_dim": 4096,
14
+ "decoder_layerdrop": 0.0,
15
+ "decoder_layers": 12,
16
+ "decoder_start_token_id": 2,
17
+ "dropout": 0.1,
18
+ "encoder_attention_heads": 16,
19
+ "encoder_ffn_dim": 4096,
20
+ "encoder_layerdrop": 0.0,
21
+ "encoder_layers": 12,
22
+ "eos_token_id": 2,
23
+ "forced_eos_token_id": 2,
24
+ "init_std": 0.02,
25
+ "is_encoder_decoder": true,
26
+ "max_position_embeddings": 1024,
27
+ "model_type": "mbart",
28
+ "num_hidden_layers": 12,
29
+ "pad_token_id": 1,
30
+ "scale_embedding": true,
31
+ "torch_dtype": "float32",
32
+ "transformers_version": "4.31.0",
33
+ "use_cache": true,
34
+ "vocab_size": 256001
35
+ }
generation_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 0,
3
+ "decoder_start_token_id": 2,
4
+ "eos_token_id": 2,
5
+ "forced_eos_token_id": 2,
6
+ "pad_token_id": 1,
7
+ "transformers_version": "4.31.0"
8
+ }
model_handler_nel.py ADDED
@@ -0,0 +1,323 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ts.torch_handler.base_handler import BaseHandler
2
+ from nltk.chunk import conlltags2tree
3
+ from nltk import pos_tag
4
+ from nltk.tree import Tree
5
+ import numpy as np
6
+ import torch
7
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
8
+ import json
9
+ import string
10
+ # Get the directory of your script
11
+ import logging
12
+ import os
13
+ import sys
14
+ logger = logging.getLogger(__name__)
15
+
16
+ # get the current directory
17
+ current_directory = os.path.dirname(os.path.realpath(__file__))
18
+ print(current_directory)
19
+ # add the current directory to sys.path
20
+ sys.path.insert(0, current_directory)
21
+ import pickle
22
+ def pickle_load(path, verbose=False):
23
+ if path is None:
24
+ return None
25
+ if verbose:
26
+ print('Loading {}'.format(path))
27
+ with open(path, "rb") as f:
28
+ obj = pickle.load(f)
29
+ return obj
30
+
31
+ DEFAULT_MODEL = 'facebook/mgenre-wiki'
32
+
33
+ def tokenize(text):
34
+ # Add a space before and after specified punctuation marks
35
+ # text = re.sub(r'([,.!?])', r' \1 ', text)
36
+ # Split the text into tokens
37
+ tokens = text.split()
38
+ return tokens
39
+
40
+ logger.info(f'Loading title2wikidataID')
41
+ lang_title2wikidataID_path = "lang_title2wikidataID-normalized_with_redirect.pkl"
42
+ lang_title2wikidataID = pickle_load(
43
+ lang_title2wikidataID_path, verbose=True)
44
+
45
+ def text_to_id(x):
46
+ return max(lang_title2wikidataID[tuple(
47
+ reversed([y.strip() for y in x.split(" >> ")]))], key=lambda y: int(y[1:]))
48
+
49
+ """
50
+ Method for retrieving the Qid
51
+ """
52
+
53
+ def get_wikidata_qid(wikipedia_titles, scores):
54
+ qid = 'NIL'
55
+ wikipedia_title = wikipedia_titles[0]
56
+ score = scores[0]
57
+ for idx, title in enumerate(
58
+ wikipedia_titles):
59
+ try:
60
+ qid = text_to_id(title)
61
+ wikipedia_title = wikipedia_titles[idx]
62
+ score = scores[idx]
63
+ return qid, wikipedia_title, score
64
+ except BaseException:
65
+ qid = 'NIL'
66
+ return qid, wikipedia_title, score
67
+
68
+
69
+ def get_entities(tokens, preds_list_coarse, preds_list_fine, coarse_confidences, fine_confidences):
70
+ tags_coarse = [tag.replace('S-', 'B-').replace('E-', 'I-') for tag in preds_list_coarse]
71
+ tags_fine = [tag.replace('S-', 'B-').replace('E-', 'I-') for tag in preds_list_fine]
72
+ pos_tags = [pos for token, pos in pos_tag(tokens)]
73
+
74
+ conll_coarse_tags = [(token, pos, tg)
75
+ for token, pos, tg in zip(tokens, pos_tags, tags_coarse)]
76
+ conll_fine_tags = [(token, pos, tg)
77
+ for token, pos, tg in zip(tokens, pos_tags, tags_fine)]
78
+
79
+ ne_tree_coarse = conlltags2tree(conll_coarse_tags)
80
+ ne_tree_fine = conlltags2tree(conll_fine_tags)
81
+
82
+ coarse_entities = get_entities_from_tree(ne_tree_coarse, coarse_confidences)
83
+ fine_entities = get_entities_from_tree(ne_tree_fine, fine_confidences)
84
+ return coarse_entities, fine_entities
85
+
86
+
87
+ def logarithmic_scaling(confidence_score):
88
+ return np.log(confidence_score + 1e-10) # Adding a small value to avoid log(0)
89
+
90
+
91
+ def classify_confidence(confidence_score):
92
+ return int(confidence_score * 100.0)
93
+ # TypeError: Object of type float32 is not JSON serializable
94
+ # if confidence_score > 0.95:
95
+ # return 'high'
96
+ # elif confidence_score > 0.75:
97
+ # return 'medium'
98
+ # else:
99
+ # return 'low'
100
+
101
+ def get_entities_from_tree(ne_tree, token_confidences):
102
+ entities = []
103
+ idx = 0
104
+ char_position = 0 # This will hold the current character position
105
+
106
+ for subtree in ne_tree:
107
+ # skipping 'O' tags
108
+ if isinstance(subtree, Tree):
109
+ original_label = subtree.label()
110
+ original_string = " ".join(
111
+ [token for token, pos in subtree.leaves()])
112
+
113
+ # original_string = reconstruct_text([token for token, pos in subtree.leaves()])
114
+
115
+ entity_start_position = char_position
116
+ entity_end_position = entity_start_position + len(original_string)
117
+
118
+ confidences = token_confidences[idx:idx + len(subtree)]
119
+ # Compute the average confidence
120
+ avg_confidence = sum(confidences) / len(confidences)
121
+ print(original_string, '- confidence -', token_confidences[idx:idx + len(subtree)], '- avg -',
122
+ avg_confidence, classify_confidence(avg_confidence), '- label -', original_label)
123
+
124
+ entities.append(
125
+ (original_string,
126
+ original_label,
127
+ (idx,
128
+ idx + len(subtree)),
129
+ (entity_start_position,
130
+ entity_end_position),
131
+ classify_confidence(avg_confidence)))
132
+
133
+ idx += len(subtree)
134
+
135
+ # Update the current character position
136
+ # We add the length of the original string + 1 (for the space)
137
+ char_position += len(original_string) + 1
138
+ else:
139
+ token, pos = subtree
140
+ # If it's not a named entity, we still need to update the character
141
+ # position
142
+ char_position += len(token) + 1 # We add 1 for the space
143
+ idx += 1
144
+ return entities
145
+
146
+
147
+ def realign(
148
+ text_sentence,
149
+ tokens_coarse_result,
150
+ tokens_fine_result,
151
+ coarse_confidences,
152
+ fine_confidences,
153
+ tokenizer,
154
+ language,
155
+ nerc_coarse_label_map,
156
+ nerc_fine_label_map):
157
+
158
+ preds_list_coarse, preds_list_fine, words_list, coarse_confidences_list, fine_confidences_list = [], [], [], [], []
159
+ word_ids = tokenizer(text_sentence, is_split_into_words=True).word_ids()
160
+
161
+ for idx, word in enumerate(text_sentence):
162
+ try:
163
+ beginning_index = word_ids.index(idx)
164
+ preds_list_coarse.append(nerc_coarse_label_map[tokens_coarse_result[beginning_index]])
165
+ preds_list_fine.append(nerc_fine_label_map[tokens_fine_result[beginning_index]])
166
+
167
+ coarse_confidences_list.append(coarse_confidences[beginning_index])
168
+ fine_confidences_list.append(fine_confidences[beginning_index])
169
+
170
+ except Exception as ex: # the sentence was longer then max_length
171
+ preds_list_coarse.append('O')
172
+ preds_list_fine.append('O')
173
+
174
+ coarse_confidences_list.append(1.0)
175
+ fine_confidences_list.append(1.0)
176
+
177
+ words_list.append(word)
178
+
179
+ return words_list, preds_list_coarse, preds_list_fine, coarse_confidences_list, fine_confidences_list
180
+
181
+ import os
182
+
183
+
184
+
185
+ class NewsAgencyHandler(BaseHandler):
186
+ def __init__(self):
187
+ super().__init__()
188
+ self.model = None
189
+ self.tokenizer = None
190
+ self.device = None
191
+
192
+ def initialize(self, ctx):
193
+ # boilerplate
194
+ properties = ctx.system_properties
195
+ self.map_location = "cuda" if torch.cuda.is_available() else "cpu"
196
+
197
+ self.device = torch.device(self.map_location + ":" + str(
198
+ properties.get("gpu_id")) if torch.cuda.is_available() else self.map_location)
199
+
200
+ # self.manifest = ctx.manifest
201
+ # model_dir is the inside of your archive!
202
+ # extra-files are in this dir.
203
+
204
+
205
+ model_name = ctx.model_yaml_config["handler"]["model_name"]
206
+ logger.info("Model %s loading tokenizer", model_name)
207
+
208
+ # serialized_file = self.manifest["model"]["serializedFile"]
209
+
210
+ # self.tokenizer = AutoTokenizer.from_pretrained(
211
+ # model_dir, local_files_only=True)
212
+ #
213
+ # # Loading the model and tokenizer from checkpoint and config files based on the user's choice of mode
214
+ # # further setup config can be added.
215
+ logger.error(f'getcwd: {os.getcwd()}')
216
+ logger.error(f'__file__: {__file__}')
217
+ logger.error(f'Model: {model_name}')
218
+ logger.error(f'Device: {self.device}')
219
+ #
220
+ # save_mode = "pretrained"
221
+ #
222
+ # if save_mode == "torchscript":
223
+ # self.model = torch.jit.load(serialized_file)
224
+ # elif save_mode == "pretrained":
225
+ # model_dir = properties.get("model_dir")
226
+ # serialized_file = self.manifest["model"]["serializedFile"]
227
+ # self.tokenizer = AutoTokenizer.from_pretrained(
228
+ # model_dir, local_files_only=True)
229
+ #
230
+ # self.model = torch.jit.load(serialized_file, map_location=self.device)
231
+ #
232
+ # self.model.to(self.device)
233
+ # self.model.eval()
234
+
235
+ self.model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
236
+ # self.model = torch.nn.DataParallel(self.model)
237
+
238
+ self.tokenizer = AutoTokenizer.from_pretrained(model_name)
239
+
240
+ # else:
241
+ # logger.warning("Missing the checkpoint or state_dict.")
242
+ self.model.to(self.map_location)
243
+ self.model.eval()
244
+ logger.info("Transformer model from path %s loaded successfully", model_name)
245
+
246
+ def preprocess(self, requests):
247
+ logger.info(f'Preprocessing requests {len(requests)}')
248
+
249
+ data = requests[0]
250
+ text_sentences = []
251
+
252
+ # The request should have the text:
253
+ # THE next MEETLNG of the TRITSTEE, will be held at the [START] LONDON HOTEL [END] in POOLE, on ldomaT,
254
+ # the 12th day or MARCH next. at 12 oClock at Noon
255
+
256
+ for item in data['body']:
257
+ item = json.loads(item)
258
+ text = item['text']
259
+ text_sentences.append(text)
260
+ language = item['language']
261
+ # print('Doc id:', item['doc_id'])
262
+ # print('-----Text', text, type(text))
263
+ # print('-----Language', language)
264
+
265
+ return text_sentences, language
266
+
267
+ def inference(self, inputs):
268
+
269
+ text_sentences, language = inputs
270
+
271
+ tokens_coarse_results, tokens_fine_results = [], []
272
+ tokens_coarse_confidences, tokens_fine_confidences = [], []
273
+
274
+ qids = []
275
+ with torch.no_grad():
276
+ for sentence in text_sentences:
277
+
278
+ sentences = [sentence]
279
+
280
+ # logger.error(f'Device: {self.device}')
281
+
282
+ outputs = self.model.generate(
283
+ **self.tokenizer(sentences, return_tensors="pt").to(self.device),
284
+ num_beams=5,
285
+ num_return_sequences=5,
286
+ return_dict_in_generate=True,
287
+ output_scores=True)
288
+
289
+ token_ids, scores = outputs['sequences'], outputs['sequences_scores']
290
+ wikipedia_titles = self.tokenizer.batch_decode(token_ids, skip_special_tokens=True)
291
+
292
+ # Example log-likelihoods (scores)
293
+ log_likelihoods = torch.tensor(scores)
294
+
295
+ # Convert log-likelihoods to "probabilities" (not true probabilities)
296
+ probabilities = torch.exp(log_likelihoods)
297
+
298
+ # Normalize these probabilities so they sum to 1
299
+ normalized_probabilities = probabilities / torch.sum(probabilities)
300
+
301
+ # Convert to percentages
302
+ percentages = normalized_probabilities * 100
303
+
304
+ qid, wikipedia_title, score = get_wikidata_qid(wikipedia_titles, percentages)
305
+ percentage_score = int(score)
306
+
307
+ # logger.info(f"Model prediction: {wikipedia_titles} {qid}, {wikipedia_title}, {score}, "
308
+ # f"---- {percentage_score}")
309
+
310
+ qids.append({'qid': qid, 'wikipedia_title': wikipedia_title, 'score': percentage_score})
311
+ # logger.info('-' * 100)
312
+
313
+ return qids, text_sentences, language
314
+
315
+ def postprocess(self, outputs):
316
+ # postprocess the outputs here, for example, convert predictions to labels
317
+
318
+ qids, text_sentences, language = outputs
319
+
320
+ logger.info(f'Result NEL: {qids}')
321
+
322
+ return [[qids]]
323
+
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c82be96aaccda634f8070e3b99cc8f4e74059ceae0562f43b8d88c2151d7050e
3
+ size 4936064811
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b3a2cc84b5557d70fcb9e55dfda9ab2f94faa27bf1d18cc54aab6e95a2e3200
3
+ size 2469076765
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b856bbb62b6a458ccdf5042ed253e3a935d7082ea4a9b9dcd51e72facf2510f
3
+ size 14575
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08d8da819fd0b4e7c292d859bdbe164715dec0253457d3967452b528c4c3f3ce
3
+ size 627
sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ee4dc054a17c18fe81f76c0b1cda00e9fc1cfd9e0f1a16cb6d77009e2076653
3
+ size 4870365
special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "clean_up_tokenization_spaces": true,
4
+ "cls_token": "<s>",
5
+ "eos_token": "</s>",
6
+ "mask_token": {
7
+ "__type": "AddedToken",
8
+ "content": "<mask>",
9
+ "lstrip": true,
10
+ "normalized": true,
11
+ "rstrip": false,
12
+ "single_word": false
13
+ },
14
+ "model_max_length": 512,
15
+ "pad_token": "<pad>",
16
+ "sep_token": "</s>",
17
+ "sp_model_kwargs": {},
18
+ "tokenizer_class": "XLMRobertaTokenizer",
19
+ "unk_token": "<unk>"
20
+ }
trainer_state.json ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 7.377049180327869,
5
+ "global_step": 9000,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.41,
12
+ "learning_rate": 1.918032786885246e-05,
13
+ "loss": 1.0017,
14
+ "step": 500
15
+ },
16
+ {
17
+ "epoch": 0.82,
18
+ "learning_rate": 1.836065573770492e-05,
19
+ "loss": 0.0813,
20
+ "step": 1000
21
+ },
22
+ {
23
+ "epoch": 1.0,
24
+ "eval_bleu": 0.0,
25
+ "eval_gen_len": 9.8363,
26
+ "eval_loss": 0.11947569251060486,
27
+ "eval_runtime": 48.7643,
28
+ "eval_samples_per_second": 41.957,
29
+ "eval_steps_per_second": 0.656,
30
+ "step": 1220
31
+ },
32
+ {
33
+ "epoch": 1.23,
34
+ "learning_rate": 1.7540983606557377e-05,
35
+ "loss": 0.0534,
36
+ "step": 1500
37
+ },
38
+ {
39
+ "epoch": 1.64,
40
+ "learning_rate": 1.6721311475409837e-05,
41
+ "loss": 0.038,
42
+ "step": 2000
43
+ },
44
+ {
45
+ "epoch": 2.0,
46
+ "eval_bleu": 0.0,
47
+ "eval_gen_len": 9.8495,
48
+ "eval_loss": 0.126708522439003,
49
+ "eval_runtime": 51.3358,
50
+ "eval_samples_per_second": 39.855,
51
+ "eval_steps_per_second": 0.623,
52
+ "step": 2440
53
+ },
54
+ {
55
+ "epoch": 2.05,
56
+ "learning_rate": 1.5901639344262295e-05,
57
+ "loss": 0.0347,
58
+ "step": 2500
59
+ },
60
+ {
61
+ "epoch": 2.46,
62
+ "learning_rate": 1.5081967213114754e-05,
63
+ "loss": 0.0187,
64
+ "step": 3000
65
+ },
66
+ {
67
+ "epoch": 2.87,
68
+ "learning_rate": 1.4262295081967214e-05,
69
+ "loss": 0.0198,
70
+ "step": 3500
71
+ },
72
+ {
73
+ "epoch": 3.0,
74
+ "eval_bleu": 0.0,
75
+ "eval_gen_len": 9.8822,
76
+ "eval_loss": 0.13768209517002106,
77
+ "eval_runtime": 49.8051,
78
+ "eval_samples_per_second": 41.08,
79
+ "eval_steps_per_second": 0.643,
80
+ "step": 3660
81
+ },
82
+ {
83
+ "epoch": 3.28,
84
+ "learning_rate": 1.3442622950819673e-05,
85
+ "loss": 0.0134,
86
+ "step": 4000
87
+ },
88
+ {
89
+ "epoch": 3.69,
90
+ "learning_rate": 1.2622950819672132e-05,
91
+ "loss": 0.0108,
92
+ "step": 4500
93
+ },
94
+ {
95
+ "epoch": 4.0,
96
+ "eval_bleu": 0.0,
97
+ "eval_gen_len": 9.9233,
98
+ "eval_loss": 0.15018606185913086,
99
+ "eval_runtime": 50.7382,
100
+ "eval_samples_per_second": 40.325,
101
+ "eval_steps_per_second": 0.631,
102
+ "step": 4880
103
+ },
104
+ {
105
+ "epoch": 4.1,
106
+ "learning_rate": 1.1803278688524591e-05,
107
+ "loss": 0.0099,
108
+ "step": 5000
109
+ },
110
+ {
111
+ "epoch": 4.51,
112
+ "learning_rate": 1.0983606557377052e-05,
113
+ "loss": 0.0065,
114
+ "step": 5500
115
+ },
116
+ {
117
+ "epoch": 4.92,
118
+ "learning_rate": 1.0163934426229509e-05,
119
+ "loss": 0.0067,
120
+ "step": 6000
121
+ },
122
+ {
123
+ "epoch": 5.0,
124
+ "eval_bleu": 0.0,
125
+ "eval_gen_len": 9.8421,
126
+ "eval_loss": 0.1598789393901825,
127
+ "eval_runtime": 51.4429,
128
+ "eval_samples_per_second": 39.772,
129
+ "eval_steps_per_second": 0.622,
130
+ "step": 6100
131
+ },
132
+ {
133
+ "epoch": 5.33,
134
+ "learning_rate": 9.344262295081968e-06,
135
+ "loss": 0.0051,
136
+ "step": 6500
137
+ },
138
+ {
139
+ "epoch": 5.74,
140
+ "learning_rate": 8.524590163934427e-06,
141
+ "loss": 0.0045,
142
+ "step": 7000
143
+ },
144
+ {
145
+ "epoch": 6.0,
146
+ "eval_bleu": 0.0,
147
+ "eval_gen_len": 9.8827,
148
+ "eval_loss": 0.16899947822093964,
149
+ "eval_runtime": 50.0655,
150
+ "eval_samples_per_second": 40.866,
151
+ "eval_steps_per_second": 0.639,
152
+ "step": 7320
153
+ },
154
+ {
155
+ "epoch": 6.15,
156
+ "learning_rate": 7.704918032786886e-06,
157
+ "loss": 0.0043,
158
+ "step": 7500
159
+ },
160
+ {
161
+ "epoch": 6.56,
162
+ "learning_rate": 6.885245901639345e-06,
163
+ "loss": 0.0035,
164
+ "step": 8000
165
+ },
166
+ {
167
+ "epoch": 6.97,
168
+ "learning_rate": 6.065573770491804e-06,
169
+ "loss": 0.0036,
170
+ "step": 8500
171
+ },
172
+ {
173
+ "epoch": 7.0,
174
+ "eval_bleu": 0.0,
175
+ "eval_gen_len": 9.8832,
176
+ "eval_loss": 0.17745506763458252,
177
+ "eval_runtime": 50.7877,
178
+ "eval_samples_per_second": 40.285,
179
+ "eval_steps_per_second": 0.63,
180
+ "step": 8540
181
+ },
182
+ {
183
+ "epoch": 7.38,
184
+ "learning_rate": 5.245901639344263e-06,
185
+ "loss": 0.0028,
186
+ "step": 9000
187
+ }
188
+ ],
189
+ "max_steps": 12200,
190
+ "num_train_epochs": 10,
191
+ "total_flos": 5.951675547814134e+17,
192
+ "trial_name": null,
193
+ "trial_params": null
194
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e2dbcc7f012329fe991ba44783e188cc8f597ec80ff4744a0259bdb6e0c316a
3
+ size 4155