Ihor commited on
Commit
9f43e8d
1 Parent(s): 1512483

Upload folder using huggingface_hub

Browse files
added_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "<<LABEL>>": 32000,
3
+ "<<SEP>>": 32001
4
+ }
config.json ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "models/gliclass/llama/last",
3
+ "architecture_type": "uni-encoder",
4
+ "architectures": [
5
+ "GLiClassModel"
6
+ ],
7
+ "class_token_index": 32000,
8
+ "contrastive_loss_coef": 0.0,
9
+ "encoder_config": {
10
+ "_name_or_path": "knowledgator/Sheared-LLaMA-encoder-1.3B",
11
+ "add_cross_attention": false,
12
+ "architectures": [
13
+ "LlamaBiModel"
14
+ ],
15
+ "attention_bias": false,
16
+ "attention_dropout": 0.0,
17
+ "bad_words_ids": null,
18
+ "begin_suppress_tokens": null,
19
+ "bos_token_id": 1,
20
+ "chunk_size_feed_forward": 0,
21
+ "cross_attention_hidden_size": null,
22
+ "decoder_start_token_id": null,
23
+ "diversity_penalty": 0.0,
24
+ "do_sample": false,
25
+ "early_stopping": false,
26
+ "encoder_no_repeat_ngram_size": 0,
27
+ "eos_token_id": 2,
28
+ "exponential_decay_length_penalty": null,
29
+ "finetuning_task": null,
30
+ "forced_bos_token_id": null,
31
+ "forced_eos_token_id": null,
32
+ "hidden_act": "silu",
33
+ "hidden_size": 2048,
34
+ "id2label": {
35
+ "0": "LABEL_0",
36
+ "1": "LABEL_1"
37
+ },
38
+ "initializer_range": 0.02,
39
+ "intermediate_size": 5504,
40
+ "is_decoder": false,
41
+ "is_encoder_decoder": false,
42
+ "label2id": {
43
+ "LABEL_0": 0,
44
+ "LABEL_1": 1
45
+ },
46
+ "length_penalty": 1.0,
47
+ "max_length": 20,
48
+ "max_position_embeddings": 4096,
49
+ "min_length": 0,
50
+ "model_type": "llama",
51
+ "no_repeat_ngram_size": 0,
52
+ "num_attention_heads": 16,
53
+ "num_beam_groups": 1,
54
+ "num_beams": 1,
55
+ "num_hidden_layers": 24,
56
+ "num_key_value_heads": 16,
57
+ "num_return_sequences": 1,
58
+ "output_attentions": false,
59
+ "output_hidden_states": false,
60
+ "output_scores": false,
61
+ "pad_token_id": 0,
62
+ "prefix": null,
63
+ "pretraining_tp": 1,
64
+ "problem_type": null,
65
+ "pruned_heads": {},
66
+ "remove_invalid_values": false,
67
+ "repetition_penalty": 1.0,
68
+ "return_dict": true,
69
+ "return_dict_in_generate": false,
70
+ "rms_norm_eps": 1e-05,
71
+ "rope_scaling": null,
72
+ "rope_theta": 10000.0,
73
+ "sep_token_id": null,
74
+ "suppress_tokens": null,
75
+ "task_specific_params": null,
76
+ "temperature": 1.0,
77
+ "tf_legacy_loss": false,
78
+ "tie_encoder_decoder": false,
79
+ "tie_word_embeddings": false,
80
+ "tokenizer_class": null,
81
+ "top_k": 50,
82
+ "top_p": 1.0,
83
+ "torch_dtype": "bfloat16",
84
+ "torchscript": false,
85
+ "typical_p": 1.0,
86
+ "use_bfloat16": false,
87
+ "use_cache": true,
88
+ "vocab_size": 32002
89
+ },
90
+ "encoder_model_name": "knowledgator/Sheared-LLaMA-encoder-1.3B",
91
+ "extract_text_features": true,
92
+ "focal_loss_alpha": -1,
93
+ "focal_loss_gamma": 2,
94
+ "hidden_size": 2048,
95
+ "ignore_index": -100,
96
+ "initializer_range": 0.03,
97
+ "logit_scale_init_value": 2.6592,
98
+ "max_num_classes": 25,
99
+ "model_type": "GLiClass",
100
+ "normalize_features": false,
101
+ "pooling_strategy": "avg",
102
+ "problem_type": "multi_label_classification",
103
+ "projector_hidden_act": "gelu",
104
+ "prompt_first": true,
105
+ "scorer_type": "simple",
106
+ "squeeze_layers": false,
107
+ "text_token_index": 32001,
108
+ "torch_dtype": "float32",
109
+ "transformers_version": "4.40.1",
110
+ "use_lstm": false,
111
+ "vocab_size": 32002
112
+ }
model-00001-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1460ded675dd7d69f0e7448b5d636a776911bc5ea77a8ff1be3f8eec6a42bf7
3
+ size 4984335468
model-00002-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab8e8b4ee62428bb1f66b2b1c1763ffe6e5b638ea8e1bb3ff3fa86342bd8faa3
3
+ size 202401016
model.safetensors.index.json ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 5186707460
4
+ },
5
+ "weight_map": {
6
+ "model.classes_projector.linear_1.bias": "model-00001-of-00002.safetensors",
7
+ "model.classes_projector.linear_1.weight": "model-00001-of-00002.safetensors",
8
+ "model.classes_projector.linear_2.bias": "model-00001-of-00002.safetensors",
9
+ "model.classes_projector.linear_2.weight": "model-00001-of-00002.safetensors",
10
+ "model.encoder_model.embed_tokens.weight": "model-00001-of-00002.safetensors",
11
+ "model.encoder_model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
12
+ "model.encoder_model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
13
+ "model.encoder_model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
14
+ "model.encoder_model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
15
+ "model.encoder_model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
16
+ "model.encoder_model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
17
+ "model.encoder_model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
18
+ "model.encoder_model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
19
+ "model.encoder_model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
20
+ "model.encoder_model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
21
+ "model.encoder_model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
22
+ "model.encoder_model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
23
+ "model.encoder_model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
24
+ "model.encoder_model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
25
+ "model.encoder_model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
26
+ "model.encoder_model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
27
+ "model.encoder_model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
28
+ "model.encoder_model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
29
+ "model.encoder_model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
30
+ "model.encoder_model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
31
+ "model.encoder_model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
32
+ "model.encoder_model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
33
+ "model.encoder_model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
34
+ "model.encoder_model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
35
+ "model.encoder_model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
36
+ "model.encoder_model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
37
+ "model.encoder_model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
38
+ "model.encoder_model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
39
+ "model.encoder_model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
40
+ "model.encoder_model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
41
+ "model.encoder_model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
42
+ "model.encoder_model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
43
+ "model.encoder_model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
44
+ "model.encoder_model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
45
+ "model.encoder_model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
46
+ "model.encoder_model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
47
+ "model.encoder_model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
48
+ "model.encoder_model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
49
+ "model.encoder_model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
50
+ "model.encoder_model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
51
+ "model.encoder_model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
52
+ "model.encoder_model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
53
+ "model.encoder_model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
54
+ "model.encoder_model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
55
+ "model.encoder_model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
56
+ "model.encoder_model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
57
+ "model.encoder_model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
58
+ "model.encoder_model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
59
+ "model.encoder_model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
60
+ "model.encoder_model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
61
+ "model.encoder_model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
62
+ "model.encoder_model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
63
+ "model.encoder_model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
64
+ "model.encoder_model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
65
+ "model.encoder_model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
66
+ "model.encoder_model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
67
+ "model.encoder_model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
68
+ "model.encoder_model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
69
+ "model.encoder_model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
70
+ "model.encoder_model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
71
+ "model.encoder_model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
72
+ "model.encoder_model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
73
+ "model.encoder_model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
74
+ "model.encoder_model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
75
+ "model.encoder_model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
76
+ "model.encoder_model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
77
+ "model.encoder_model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
78
+ "model.encoder_model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
79
+ "model.encoder_model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
80
+ "model.encoder_model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
81
+ "model.encoder_model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
82
+ "model.encoder_model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
83
+ "model.encoder_model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
84
+ "model.encoder_model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
85
+ "model.encoder_model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
86
+ "model.encoder_model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
87
+ "model.encoder_model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
88
+ "model.encoder_model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
89
+ "model.encoder_model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
90
+ "model.encoder_model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
91
+ "model.encoder_model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
92
+ "model.encoder_model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
93
+ "model.encoder_model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
94
+ "model.encoder_model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
95
+ "model.encoder_model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
96
+ "model.encoder_model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
97
+ "model.encoder_model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
98
+ "model.encoder_model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
99
+ "model.encoder_model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
100
+ "model.encoder_model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
101
+ "model.encoder_model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
102
+ "model.encoder_model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
103
+ "model.encoder_model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
104
+ "model.encoder_model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
105
+ "model.encoder_model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
106
+ "model.encoder_model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
107
+ "model.encoder_model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
108
+ "model.encoder_model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
109
+ "model.encoder_model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
110
+ "model.encoder_model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
111
+ "model.encoder_model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
112
+ "model.encoder_model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
113
+ "model.encoder_model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
114
+ "model.encoder_model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
115
+ "model.encoder_model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
116
+ "model.encoder_model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
117
+ "model.encoder_model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
118
+ "model.encoder_model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
119
+ "model.encoder_model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
120
+ "model.encoder_model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
121
+ "model.encoder_model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
122
+ "model.encoder_model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
123
+ "model.encoder_model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
124
+ "model.encoder_model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
125
+ "model.encoder_model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
126
+ "model.encoder_model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
127
+ "model.encoder_model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
128
+ "model.encoder_model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
129
+ "model.encoder_model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
130
+ "model.encoder_model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
131
+ "model.encoder_model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
132
+ "model.encoder_model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
133
+ "model.encoder_model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
134
+ "model.encoder_model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
135
+ "model.encoder_model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
136
+ "model.encoder_model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
137
+ "model.encoder_model.layers.21.input_layernorm.weight": "model-00001-of-00002.safetensors",
138
+ "model.encoder_model.layers.21.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
139
+ "model.encoder_model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
140
+ "model.encoder_model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
141
+ "model.encoder_model.layers.21.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
142
+ "model.encoder_model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
143
+ "model.encoder_model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
144
+ "model.encoder_model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
145
+ "model.encoder_model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
146
+ "model.encoder_model.layers.22.input_layernorm.weight": "model-00001-of-00002.safetensors",
147
+ "model.encoder_model.layers.22.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
148
+ "model.encoder_model.layers.22.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
149
+ "model.encoder_model.layers.22.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
150
+ "model.encoder_model.layers.22.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
151
+ "model.encoder_model.layers.22.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
152
+ "model.encoder_model.layers.22.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
153
+ "model.encoder_model.layers.22.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
154
+ "model.encoder_model.layers.22.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
155
+ "model.encoder_model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
156
+ "model.encoder_model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
157
+ "model.encoder_model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
158
+ "model.encoder_model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
159
+ "model.encoder_model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
160
+ "model.encoder_model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
161
+ "model.encoder_model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
162
+ "model.encoder_model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
163
+ "model.encoder_model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
164
+ "model.encoder_model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
165
+ "model.encoder_model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
166
+ "model.encoder_model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
167
+ "model.encoder_model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
168
+ "model.encoder_model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
169
+ "model.encoder_model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
170
+ "model.encoder_model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
171
+ "model.encoder_model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
172
+ "model.encoder_model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
173
+ "model.encoder_model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
174
+ "model.encoder_model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
175
+ "model.encoder_model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
176
+ "model.encoder_model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
177
+ "model.encoder_model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
178
+ "model.encoder_model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
179
+ "model.encoder_model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
180
+ "model.encoder_model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
181
+ "model.encoder_model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
182
+ "model.encoder_model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
183
+ "model.encoder_model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
184
+ "model.encoder_model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
185
+ "model.encoder_model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
186
+ "model.encoder_model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
187
+ "model.encoder_model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
188
+ "model.encoder_model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
189
+ "model.encoder_model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
190
+ "model.encoder_model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
191
+ "model.encoder_model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
192
+ "model.encoder_model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
193
+ "model.encoder_model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
194
+ "model.encoder_model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
195
+ "model.encoder_model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
196
+ "model.encoder_model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
197
+ "model.encoder_model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
198
+ "model.encoder_model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
199
+ "model.encoder_model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
200
+ "model.encoder_model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
201
+ "model.encoder_model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
202
+ "model.encoder_model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
203
+ "model.encoder_model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
204
+ "model.encoder_model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
205
+ "model.encoder_model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
206
+ "model.encoder_model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
207
+ "model.encoder_model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
208
+ "model.encoder_model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
209
+ "model.encoder_model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
210
+ "model.encoder_model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
211
+ "model.encoder_model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
212
+ "model.encoder_model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
213
+ "model.encoder_model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
214
+ "model.encoder_model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
215
+ "model.encoder_model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
216
+ "model.encoder_model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
217
+ "model.encoder_model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
218
+ "model.encoder_model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
219
+ "model.encoder_model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
220
+ "model.encoder_model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
221
+ "model.encoder_model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
222
+ "model.encoder_model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
223
+ "model.encoder_model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
224
+ "model.encoder_model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
225
+ "model.encoder_model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
226
+ "model.encoder_model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
227
+ "model.encoder_model.norm.weight": "model-00002-of-00002.safetensors",
228
+ "model.logit_scale": "model-00001-of-00002.safetensors",
229
+ "model.text_projector.linear_1.bias": "model-00001-of-00002.safetensors",
230
+ "model.text_projector.linear_1.weight": "model-00001-of-00002.safetensors",
231
+ "model.text_projector.linear_2.bias": "model-00001-of-00002.safetensors",
232
+ "model.text_projector.linear_2.weight": "model-00001-of-00002.safetensors"
233
+ }
234
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "mask_token": {
17
+ "content": "_",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "pad_token": {
24
+ "content": "</s>",
25
+ "lstrip": false,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "unk_token": {
31
+ "content": "<unk>",
32
+ "lstrip": false,
33
+ "normalized": true,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ }
37
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": true,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "29918": {
30
+ "content": "_",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "32000": {
38
+ "content": "<<LABEL>>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "32001": {
46
+ "content": "<<SEP>>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ }
53
+ },
54
+ "bos_token": "<s>",
55
+ "clean_up_tokenization_spaces": false,
56
+ "eos_token": "</s>",
57
+ "mask_token": "_",
58
+ "model_max_length": 1000000000000000019884624838656,
59
+ "pad_token": "</s>",
60
+ "sp_model_kwargs": {},
61
+ "tokenizer_class": "LlamaTokenizer",
62
+ "unk_token": "<unk>",
63
+ "use_default_system_prompt": false
64
+ }
trainer_state.json ADDED
@@ -0,0 +1,1293 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.0788133053640996,
5
+ "eval_steps": 500,
6
+ "global_step": 18000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.005993407252022775,
13
+ "grad_norm": 0.8397009372711182,
14
+ "learning_rate": 5.991611743559017e-07,
15
+ "loss": 0.0366,
16
+ "step": 100
17
+ },
18
+ {
19
+ "epoch": 0.01198681450404555,
20
+ "grad_norm": 0.7360026240348816,
21
+ "learning_rate": 1.1983223487118035e-06,
22
+ "loss": 0.0143,
23
+ "step": 200
24
+ },
25
+ {
26
+ "epoch": 0.017980221756068324,
27
+ "grad_norm": 0.26396483182907104,
28
+ "learning_rate": 1.7974835230677055e-06,
29
+ "loss": 0.0091,
30
+ "step": 300
31
+ },
32
+ {
33
+ "epoch": 0.0239736290080911,
34
+ "grad_norm": 0.08779273182153702,
35
+ "learning_rate": 2.396644697423607e-06,
36
+ "loss": 0.0059,
37
+ "step": 400
38
+ },
39
+ {
40
+ "epoch": 0.029967036260113874,
41
+ "grad_norm": 0.5255675911903381,
42
+ "learning_rate": 2.995805871779509e-06,
43
+ "loss": 0.0059,
44
+ "step": 500
45
+ },
46
+ {
47
+ "epoch": 0.03596044351213665,
48
+ "grad_norm": 0.2772226929664612,
49
+ "learning_rate": 3.594967046135411e-06,
50
+ "loss": 0.005,
51
+ "step": 600
52
+ },
53
+ {
54
+ "epoch": 0.041953850764159424,
55
+ "grad_norm": 0.29560720920562744,
56
+ "learning_rate": 4.194128220491313e-06,
57
+ "loss": 0.0041,
58
+ "step": 700
59
+ },
60
+ {
61
+ "epoch": 0.0479472580161822,
62
+ "grad_norm": 0.4243590235710144,
63
+ "learning_rate": 4.793289394847214e-06,
64
+ "loss": 0.0038,
65
+ "step": 800
66
+ },
67
+ {
68
+ "epoch": 0.05394066526820498,
69
+ "grad_norm": 0.12234604358673096,
70
+ "learning_rate": 5.392450569203116e-06,
71
+ "loss": 0.0033,
72
+ "step": 900
73
+ },
74
+ {
75
+ "epoch": 0.05993407252022775,
76
+ "grad_norm": 0.17332005500793457,
77
+ "learning_rate": 5.991611743559018e-06,
78
+ "loss": 0.0045,
79
+ "step": 1000
80
+ },
81
+ {
82
+ "epoch": 0.06592747977225052,
83
+ "grad_norm": 0.017084548249840736,
84
+ "learning_rate": 6.59077291791492e-06,
85
+ "loss": 0.0031,
86
+ "step": 1100
87
+ },
88
+ {
89
+ "epoch": 0.0719208870242733,
90
+ "grad_norm": 0.04909040033817291,
91
+ "learning_rate": 7.189934092270822e-06,
92
+ "loss": 0.0034,
93
+ "step": 1200
94
+ },
95
+ {
96
+ "epoch": 0.07791429427629608,
97
+ "grad_norm": 0.03835730627179146,
98
+ "learning_rate": 7.789095266626723e-06,
99
+ "loss": 0.0028,
100
+ "step": 1300
101
+ },
102
+ {
103
+ "epoch": 0.08390770152831885,
104
+ "grad_norm": 0.04889771714806557,
105
+ "learning_rate": 8.388256440982625e-06,
106
+ "loss": 0.0028,
107
+ "step": 1400
108
+ },
109
+ {
110
+ "epoch": 0.08990110878034162,
111
+ "grad_norm": 0.1031421571969986,
112
+ "learning_rate": 8.987417615338527e-06,
113
+ "loss": 0.003,
114
+ "step": 1500
115
+ },
116
+ {
117
+ "epoch": 0.0958945160323644,
118
+ "grad_norm": 0.11215908825397491,
119
+ "learning_rate": 9.586578789694428e-06,
120
+ "loss": 0.0027,
121
+ "step": 1600
122
+ },
123
+ {
124
+ "epoch": 0.10188792328438717,
125
+ "grad_norm": 0.1708650439977646,
126
+ "learning_rate": 9.99022112867102e-06,
127
+ "loss": 0.0025,
128
+ "step": 1700
129
+ },
130
+ {
131
+ "epoch": 0.10788133053640996,
132
+ "grad_norm": 0.01850762963294983,
133
+ "learning_rate": 9.958676382448504e-06,
134
+ "loss": 0.0025,
135
+ "step": 1800
136
+ },
137
+ {
138
+ "epoch": 0.11387473778843273,
139
+ "grad_norm": 0.10600468516349792,
140
+ "learning_rate": 9.927131636225988e-06,
141
+ "loss": 0.0025,
142
+ "step": 1900
143
+ },
144
+ {
145
+ "epoch": 0.1198681450404555,
146
+ "grad_norm": 0.16077758371829987,
147
+ "learning_rate": 9.895586890003471e-06,
148
+ "loss": 0.0027,
149
+ "step": 2000
150
+ },
151
+ {
152
+ "epoch": 0.12586155229247828,
153
+ "grad_norm": 0.3142828047275543,
154
+ "learning_rate": 9.864042143780953e-06,
155
+ "loss": 0.0022,
156
+ "step": 2100
157
+ },
158
+ {
159
+ "epoch": 0.13185495954450105,
160
+ "grad_norm": 0.18406708538532257,
161
+ "learning_rate": 9.832497397558437e-06,
162
+ "loss": 0.0025,
163
+ "step": 2200
164
+ },
165
+ {
166
+ "epoch": 0.13784836679652382,
167
+ "grad_norm": 0.10599557310342789,
168
+ "learning_rate": 9.80095265133592e-06,
169
+ "loss": 0.0027,
170
+ "step": 2300
171
+ },
172
+ {
173
+ "epoch": 0.1438417740485466,
174
+ "grad_norm": 0.041681960225105286,
175
+ "learning_rate": 9.769407905113404e-06,
176
+ "loss": 0.0027,
177
+ "step": 2400
178
+ },
179
+ {
180
+ "epoch": 0.1498351813005694,
181
+ "grad_norm": 0.26586103439331055,
182
+ "learning_rate": 9.737863158890888e-06,
183
+ "loss": 0.0031,
184
+ "step": 2500
185
+ },
186
+ {
187
+ "epoch": 0.15582858855259216,
188
+ "grad_norm": 0.1568969488143921,
189
+ "learning_rate": 9.70631841266837e-06,
190
+ "loss": 0.0025,
191
+ "step": 2600
192
+ },
193
+ {
194
+ "epoch": 0.16182199580461493,
195
+ "grad_norm": 0.09259970486164093,
196
+ "learning_rate": 9.674773666445855e-06,
197
+ "loss": 0.0023,
198
+ "step": 2700
199
+ },
200
+ {
201
+ "epoch": 0.1678154030566377,
202
+ "grad_norm": 0.03380216658115387,
203
+ "learning_rate": 9.643228920223337e-06,
204
+ "loss": 0.0022,
205
+ "step": 2800
206
+ },
207
+ {
208
+ "epoch": 0.17380881030866047,
209
+ "grad_norm": 0.18946796655654907,
210
+ "learning_rate": 9.611684174000821e-06,
211
+ "loss": 0.0025,
212
+ "step": 2900
213
+ },
214
+ {
215
+ "epoch": 0.17980221756068324,
216
+ "grad_norm": 0.3344770073890686,
217
+ "learning_rate": 9.580139427778305e-06,
218
+ "loss": 0.0021,
219
+ "step": 3000
220
+ },
221
+ {
222
+ "epoch": 0.18579562481270603,
223
+ "grad_norm": 0.04218849539756775,
224
+ "learning_rate": 9.548594681555787e-06,
225
+ "loss": 0.0024,
226
+ "step": 3100
227
+ },
228
+ {
229
+ "epoch": 0.1917890320647288,
230
+ "grad_norm": 0.0481434129178524,
231
+ "learning_rate": 9.517049935333272e-06,
232
+ "loss": 0.0027,
233
+ "step": 3200
234
+ },
235
+ {
236
+ "epoch": 0.19778243931675157,
237
+ "grad_norm": 0.32030656933784485,
238
+ "learning_rate": 9.485505189110754e-06,
239
+ "loss": 0.0025,
240
+ "step": 3300
241
+ },
242
+ {
243
+ "epoch": 0.20377584656877434,
244
+ "grad_norm": 0.19509385526180267,
245
+ "learning_rate": 9.453960442888238e-06,
246
+ "loss": 0.0022,
247
+ "step": 3400
248
+ },
249
+ {
250
+ "epoch": 0.2097692538207971,
251
+ "grad_norm": 0.08745113760232925,
252
+ "learning_rate": 9.422415696665721e-06,
253
+ "loss": 0.0026,
254
+ "step": 3500
255
+ },
256
+ {
257
+ "epoch": 0.2157626610728199,
258
+ "grad_norm": 0.11743105947971344,
259
+ "learning_rate": 9.390870950443205e-06,
260
+ "loss": 0.0021,
261
+ "step": 3600
262
+ },
263
+ {
264
+ "epoch": 0.22175606832484268,
265
+ "grad_norm": 0.1497587114572525,
266
+ "learning_rate": 9.359326204220689e-06,
267
+ "loss": 0.0026,
268
+ "step": 3700
269
+ },
270
+ {
271
+ "epoch": 0.22774947557686545,
272
+ "grad_norm": 0.07227639853954315,
273
+ "learning_rate": 9.32778145799817e-06,
274
+ "loss": 0.0024,
275
+ "step": 3800
276
+ },
277
+ {
278
+ "epoch": 0.23374288282888822,
279
+ "grad_norm": 0.022099023684859276,
280
+ "learning_rate": 9.296236711775654e-06,
281
+ "loss": 0.0019,
282
+ "step": 3900
283
+ },
284
+ {
285
+ "epoch": 0.239736290080911,
286
+ "grad_norm": 0.09603813290596008,
287
+ "learning_rate": 9.264691965553138e-06,
288
+ "loss": 0.0019,
289
+ "step": 4000
290
+ },
291
+ {
292
+ "epoch": 0.24572969733293376,
293
+ "grad_norm": 0.09311718493700027,
294
+ "learning_rate": 9.233147219330622e-06,
295
+ "loss": 0.002,
296
+ "step": 4100
297
+ },
298
+ {
299
+ "epoch": 0.25172310458495656,
300
+ "grad_norm": 0.06892485171556473,
301
+ "learning_rate": 9.201602473108105e-06,
302
+ "loss": 0.0022,
303
+ "step": 4200
304
+ },
305
+ {
306
+ "epoch": 0.2577165118369793,
307
+ "grad_norm": 0.2696809470653534,
308
+ "learning_rate": 9.170057726885589e-06,
309
+ "loss": 0.0024,
310
+ "step": 4300
311
+ },
312
+ {
313
+ "epoch": 0.2637099190890021,
314
+ "grad_norm": 0.12481023371219635,
315
+ "learning_rate": 9.138512980663071e-06,
316
+ "loss": 0.0021,
317
+ "step": 4400
318
+ },
319
+ {
320
+ "epoch": 0.2697033263410249,
321
+ "grad_norm": 0.029085570946335793,
322
+ "learning_rate": 9.106968234440555e-06,
323
+ "loss": 0.0025,
324
+ "step": 4500
325
+ },
326
+ {
327
+ "epoch": 0.27569673359304764,
328
+ "grad_norm": 0.16772325336933136,
329
+ "learning_rate": 9.075423488218038e-06,
330
+ "loss": 0.0019,
331
+ "step": 4600
332
+ },
333
+ {
334
+ "epoch": 0.28169014084507044,
335
+ "grad_norm": 0.25038984417915344,
336
+ "learning_rate": 9.04387874199552e-06,
337
+ "loss": 0.0022,
338
+ "step": 4700
339
+ },
340
+ {
341
+ "epoch": 0.2876835480970932,
342
+ "grad_norm": 0.009772785007953644,
343
+ "learning_rate": 9.012333995773006e-06,
344
+ "loss": 0.002,
345
+ "step": 4800
346
+ },
347
+ {
348
+ "epoch": 0.293676955349116,
349
+ "grad_norm": 0.10010802745819092,
350
+ "learning_rate": 8.980789249550487e-06,
351
+ "loss": 0.0021,
352
+ "step": 4900
353
+ },
354
+ {
355
+ "epoch": 0.2996703626011388,
356
+ "grad_norm": 0.019169898703694344,
357
+ "learning_rate": 8.949244503327971e-06,
358
+ "loss": 0.0024,
359
+ "step": 5000
360
+ },
361
+ {
362
+ "epoch": 0.3056637698531615,
363
+ "grad_norm": 0.039739012718200684,
364
+ "learning_rate": 8.917699757105455e-06,
365
+ "loss": 0.0022,
366
+ "step": 5100
367
+ },
368
+ {
369
+ "epoch": 0.3116571771051843,
370
+ "grad_norm": 0.20961305499076843,
371
+ "learning_rate": 8.886155010882938e-06,
372
+ "loss": 0.0021,
373
+ "step": 5200
374
+ },
375
+ {
376
+ "epoch": 0.31765058435720706,
377
+ "grad_norm": 0.07605484127998352,
378
+ "learning_rate": 8.854610264660422e-06,
379
+ "loss": 0.002,
380
+ "step": 5300
381
+ },
382
+ {
383
+ "epoch": 0.32364399160922985,
384
+ "grad_norm": 0.01589258573949337,
385
+ "learning_rate": 8.823065518437904e-06,
386
+ "loss": 0.0022,
387
+ "step": 5400
388
+ },
389
+ {
390
+ "epoch": 0.3296373988612526,
391
+ "grad_norm": 0.10248999297618866,
392
+ "learning_rate": 8.791520772215388e-06,
393
+ "loss": 0.0023,
394
+ "step": 5500
395
+ },
396
+ {
397
+ "epoch": 0.3356308061132754,
398
+ "grad_norm": 0.09167122095823288,
399
+ "learning_rate": 8.759976025992871e-06,
400
+ "loss": 0.002,
401
+ "step": 5600
402
+ },
403
+ {
404
+ "epoch": 0.3416242133652982,
405
+ "grad_norm": 0.23392055928707123,
406
+ "learning_rate": 8.728431279770355e-06,
407
+ "loss": 0.0021,
408
+ "step": 5700
409
+ },
410
+ {
411
+ "epoch": 0.34761762061732093,
412
+ "grad_norm": 0.040714360773563385,
413
+ "learning_rate": 8.696886533547839e-06,
414
+ "loss": 0.0025,
415
+ "step": 5800
416
+ },
417
+ {
418
+ "epoch": 0.35361102786934373,
419
+ "grad_norm": 0.184820294380188,
420
+ "learning_rate": 8.665341787325322e-06,
421
+ "loss": 0.0021,
422
+ "step": 5900
423
+ },
424
+ {
425
+ "epoch": 0.3596044351213665,
426
+ "grad_norm": 0.04772236570715904,
427
+ "learning_rate": 8.633797041102804e-06,
428
+ "loss": 0.0022,
429
+ "step": 6000
430
+ },
431
+ {
432
+ "epoch": 0.36559784237338927,
433
+ "grad_norm": 0.12407626956701279,
434
+ "learning_rate": 8.60225229488029e-06,
435
+ "loss": 0.0018,
436
+ "step": 6100
437
+ },
438
+ {
439
+ "epoch": 0.37159124962541207,
440
+ "grad_norm": 0.1552393138408661,
441
+ "learning_rate": 8.570707548657772e-06,
442
+ "loss": 0.0024,
443
+ "step": 6200
444
+ },
445
+ {
446
+ "epoch": 0.3775846568774348,
447
+ "grad_norm": 0.005017109680920839,
448
+ "learning_rate": 8.539162802435255e-06,
449
+ "loss": 0.0022,
450
+ "step": 6300
451
+ },
452
+ {
453
+ "epoch": 0.3835780641294576,
454
+ "grad_norm": 0.00316947465762496,
455
+ "learning_rate": 8.507618056212739e-06,
456
+ "loss": 0.0021,
457
+ "step": 6400
458
+ },
459
+ {
460
+ "epoch": 0.38957147138148035,
461
+ "grad_norm": 0.08644753694534302,
462
+ "learning_rate": 8.476073309990221e-06,
463
+ "loss": 0.0016,
464
+ "step": 6500
465
+ },
466
+ {
467
+ "epoch": 0.39556487863350315,
468
+ "grad_norm": 0.23877011239528656,
469
+ "learning_rate": 8.444528563767705e-06,
470
+ "loss": 0.0023,
471
+ "step": 6600
472
+ },
473
+ {
474
+ "epoch": 0.40155828588552595,
475
+ "grad_norm": 0.12397243827581406,
476
+ "learning_rate": 8.412983817545188e-06,
477
+ "loss": 0.002,
478
+ "step": 6700
479
+ },
480
+ {
481
+ "epoch": 0.4075516931375487,
482
+ "grad_norm": 0.08488207310438156,
483
+ "learning_rate": 8.381439071322672e-06,
484
+ "loss": 0.002,
485
+ "step": 6800
486
+ },
487
+ {
488
+ "epoch": 0.4135451003895715,
489
+ "grad_norm": 0.15658150613307953,
490
+ "learning_rate": 8.349894325100156e-06,
491
+ "loss": 0.0021,
492
+ "step": 6900
493
+ },
494
+ {
495
+ "epoch": 0.4195385076415942,
496
+ "grad_norm": 0.09054456651210785,
497
+ "learning_rate": 8.31834957887764e-06,
498
+ "loss": 0.0022,
499
+ "step": 7000
500
+ },
501
+ {
502
+ "epoch": 0.425531914893617,
503
+ "grad_norm": 0.1383715718984604,
504
+ "learning_rate": 8.286804832655121e-06,
505
+ "loss": 0.0019,
506
+ "step": 7100
507
+ },
508
+ {
509
+ "epoch": 0.4315253221456398,
510
+ "grad_norm": 0.23421403765678406,
511
+ "learning_rate": 8.255260086432605e-06,
512
+ "loss": 0.0021,
513
+ "step": 7200
514
+ },
515
+ {
516
+ "epoch": 0.43751872939766256,
517
+ "grad_norm": 0.07612959295511246,
518
+ "learning_rate": 8.223715340210089e-06,
519
+ "loss": 0.0018,
520
+ "step": 7300
521
+ },
522
+ {
523
+ "epoch": 0.44351213664968536,
524
+ "grad_norm": 0.08813223987817764,
525
+ "learning_rate": 8.192170593987572e-06,
526
+ "loss": 0.0028,
527
+ "step": 7400
528
+ },
529
+ {
530
+ "epoch": 0.4495055439017081,
531
+ "grad_norm": 0.11603320389986038,
532
+ "learning_rate": 8.160625847765056e-06,
533
+ "loss": 0.0021,
534
+ "step": 7500
535
+ },
536
+ {
537
+ "epoch": 0.4554989511537309,
538
+ "grad_norm": 0.06462118774652481,
539
+ "learning_rate": 8.129081101542538e-06,
540
+ "loss": 0.0021,
541
+ "step": 7600
542
+ },
543
+ {
544
+ "epoch": 0.46149235840575364,
545
+ "grad_norm": 0.08253411203622818,
546
+ "learning_rate": 8.097536355320023e-06,
547
+ "loss": 0.0019,
548
+ "step": 7700
549
+ },
550
+ {
551
+ "epoch": 0.46748576565777644,
552
+ "grad_norm": 0.017711922526359558,
553
+ "learning_rate": 8.065991609097505e-06,
554
+ "loss": 0.0018,
555
+ "step": 7800
556
+ },
557
+ {
558
+ "epoch": 0.47347917290979924,
559
+ "grad_norm": 0.16423271596431732,
560
+ "learning_rate": 8.034446862874989e-06,
561
+ "loss": 0.0021,
562
+ "step": 7900
563
+ },
564
+ {
565
+ "epoch": 0.479472580161822,
566
+ "grad_norm": 0.17104622721672058,
567
+ "learning_rate": 8.002902116652473e-06,
568
+ "loss": 0.0022,
569
+ "step": 8000
570
+ },
571
+ {
572
+ "epoch": 0.4854659874138448,
573
+ "grad_norm": 0.11236003786325455,
574
+ "learning_rate": 7.971357370429955e-06,
575
+ "loss": 0.002,
576
+ "step": 8100
577
+ },
578
+ {
579
+ "epoch": 0.4914593946658675,
580
+ "grad_norm": 0.004910625517368317,
581
+ "learning_rate": 7.93981262420744e-06,
582
+ "loss": 0.0017,
583
+ "step": 8200
584
+ },
585
+ {
586
+ "epoch": 0.4974528019178903,
587
+ "grad_norm": 0.015166868455708027,
588
+ "learning_rate": 7.908267877984922e-06,
589
+ "loss": 0.0016,
590
+ "step": 8300
591
+ },
592
+ {
593
+ "epoch": 0.5034462091699131,
594
+ "grad_norm": 0.04219336435198784,
595
+ "learning_rate": 7.876723131762406e-06,
596
+ "loss": 0.0019,
597
+ "step": 8400
598
+ },
599
+ {
600
+ "epoch": 0.5094396164219359,
601
+ "grad_norm": 0.08096965402364731,
602
+ "learning_rate": 7.84517838553989e-06,
603
+ "loss": 0.002,
604
+ "step": 8500
605
+ },
606
+ {
607
+ "epoch": 0.5154330236739586,
608
+ "grad_norm": 0.27304044365882874,
609
+ "learning_rate": 7.813633639317373e-06,
610
+ "loss": 0.002,
611
+ "step": 8600
612
+ },
613
+ {
614
+ "epoch": 0.5214264309259814,
615
+ "grad_norm": 0.023843977600336075,
616
+ "learning_rate": 7.782088893094857e-06,
617
+ "loss": 0.0021,
618
+ "step": 8700
619
+ },
620
+ {
621
+ "epoch": 0.5274198381780042,
622
+ "grad_norm": 0.06996838003396988,
623
+ "learning_rate": 7.750544146872338e-06,
624
+ "loss": 0.0016,
625
+ "step": 8800
626
+ },
627
+ {
628
+ "epoch": 0.533413245430027,
629
+ "grad_norm": 0.09238845109939575,
630
+ "learning_rate": 7.718999400649822e-06,
631
+ "loss": 0.0017,
632
+ "step": 8900
633
+ },
634
+ {
635
+ "epoch": 0.5394066526820498,
636
+ "grad_norm": 0.031245483085513115,
637
+ "learning_rate": 7.687454654427306e-06,
638
+ "loss": 0.0019,
639
+ "step": 9000
640
+ },
641
+ {
642
+ "epoch": 0.5454000599340725,
643
+ "grad_norm": 0.02232646569609642,
644
+ "learning_rate": 7.65590990820479e-06,
645
+ "loss": 0.0022,
646
+ "step": 9100
647
+ },
648
+ {
649
+ "epoch": 0.5513934671860953,
650
+ "grad_norm": 0.18810293078422546,
651
+ "learning_rate": 7.624365161982272e-06,
652
+ "loss": 0.002,
653
+ "step": 9200
654
+ },
655
+ {
656
+ "epoch": 0.5573868744381181,
657
+ "grad_norm": 0.04845254495739937,
658
+ "learning_rate": 7.592820415759756e-06,
659
+ "loss": 0.0021,
660
+ "step": 9300
661
+ },
662
+ {
663
+ "epoch": 0.5633802816901409,
664
+ "grad_norm": 0.12073975801467896,
665
+ "learning_rate": 7.561275669537239e-06,
666
+ "loss": 0.0021,
667
+ "step": 9400
668
+ },
669
+ {
670
+ "epoch": 0.5693736889421637,
671
+ "grad_norm": 0.03330647572875023,
672
+ "learning_rate": 7.529730923314722e-06,
673
+ "loss": 0.002,
674
+ "step": 9500
675
+ },
676
+ {
677
+ "epoch": 0.5753670961941864,
678
+ "grad_norm": 0.23292703926563263,
679
+ "learning_rate": 7.498186177092206e-06,
680
+ "loss": 0.0018,
681
+ "step": 9600
682
+ },
683
+ {
684
+ "epoch": 0.5813605034462092,
685
+ "grad_norm": 0.3227817118167877,
686
+ "learning_rate": 7.466641430869689e-06,
687
+ "loss": 0.0017,
688
+ "step": 9700
689
+ },
690
+ {
691
+ "epoch": 0.587353910698232,
692
+ "grad_norm": 0.03530238941311836,
693
+ "learning_rate": 7.4350966846471726e-06,
694
+ "loss": 0.0023,
695
+ "step": 9800
696
+ },
697
+ {
698
+ "epoch": 0.5933473179502547,
699
+ "grad_norm": 0.1631837785243988,
700
+ "learning_rate": 7.403551938424655e-06,
701
+ "loss": 0.0019,
702
+ "step": 9900
703
+ },
704
+ {
705
+ "epoch": 0.5993407252022775,
706
+ "grad_norm": 0.11341429501771927,
707
+ "learning_rate": 7.37200719220214e-06,
708
+ "loss": 0.0018,
709
+ "step": 10000
710
+ },
711
+ {
712
+ "epoch": 0.6053341324543002,
713
+ "grad_norm": 0.19524067640304565,
714
+ "learning_rate": 7.340462445979623e-06,
715
+ "loss": 0.0021,
716
+ "step": 10100
717
+ },
718
+ {
719
+ "epoch": 0.611327539706323,
720
+ "grad_norm": 0.058198366314172745,
721
+ "learning_rate": 7.308917699757106e-06,
722
+ "loss": 0.0018,
723
+ "step": 10200
724
+ },
725
+ {
726
+ "epoch": 0.6173209469583458,
727
+ "grad_norm": 0.02788078971207142,
728
+ "learning_rate": 7.277372953534589e-06,
729
+ "loss": 0.0014,
730
+ "step": 10300
731
+ },
732
+ {
733
+ "epoch": 0.6233143542103686,
734
+ "grad_norm": 0.07168685644865036,
735
+ "learning_rate": 7.245828207312072e-06,
736
+ "loss": 0.0017,
737
+ "step": 10400
738
+ },
739
+ {
740
+ "epoch": 0.6293077614623914,
741
+ "grad_norm": 0.07542666047811508,
742
+ "learning_rate": 7.2142834610895565e-06,
743
+ "loss": 0.0022,
744
+ "step": 10500
745
+ },
746
+ {
747
+ "epoch": 0.6353011687144141,
748
+ "grad_norm": 0.1050957664847374,
749
+ "learning_rate": 7.182738714867039e-06,
750
+ "loss": 0.0017,
751
+ "step": 10600
752
+ },
753
+ {
754
+ "epoch": 0.6412945759664369,
755
+ "grad_norm": 0.02330237440764904,
756
+ "learning_rate": 7.151193968644523e-06,
757
+ "loss": 0.002,
758
+ "step": 10700
759
+ },
760
+ {
761
+ "epoch": 0.6472879832184597,
762
+ "grad_norm": 0.019814783707261086,
763
+ "learning_rate": 7.119649222422006e-06,
764
+ "loss": 0.0019,
765
+ "step": 10800
766
+ },
767
+ {
768
+ "epoch": 0.6532813904704825,
769
+ "grad_norm": 0.041212160140275955,
770
+ "learning_rate": 7.08810447619949e-06,
771
+ "loss": 0.0022,
772
+ "step": 10900
773
+ },
774
+ {
775
+ "epoch": 0.6592747977225052,
776
+ "grad_norm": 0.104148730635643,
777
+ "learning_rate": 7.056559729976973e-06,
778
+ "loss": 0.0017,
779
+ "step": 11000
780
+ },
781
+ {
782
+ "epoch": 0.665268204974528,
783
+ "grad_norm": 0.060578759759664536,
784
+ "learning_rate": 7.025014983754457e-06,
785
+ "loss": 0.0019,
786
+ "step": 11100
787
+ },
788
+ {
789
+ "epoch": 0.6712616122265508,
790
+ "grad_norm": 0.014108662493526936,
791
+ "learning_rate": 6.99347023753194e-06,
792
+ "loss": 0.002,
793
+ "step": 11200
794
+ },
795
+ {
796
+ "epoch": 0.6772550194785736,
797
+ "grad_norm": 0.06860730797052383,
798
+ "learning_rate": 6.9619254913094224e-06,
799
+ "loss": 0.0018,
800
+ "step": 11300
801
+ },
802
+ {
803
+ "epoch": 0.6832484267305964,
804
+ "grad_norm": 0.2818455696105957,
805
+ "learning_rate": 6.930380745086906e-06,
806
+ "loss": 0.0015,
807
+ "step": 11400
808
+ },
809
+ {
810
+ "epoch": 0.6892418339826191,
811
+ "grad_norm": 0.09976188093423843,
812
+ "learning_rate": 6.89883599886439e-06,
813
+ "loss": 0.0017,
814
+ "step": 11500
815
+ },
816
+ {
817
+ "epoch": 0.6952352412346419,
818
+ "grad_norm": 0.04988027364015579,
819
+ "learning_rate": 6.8672912526418734e-06,
820
+ "loss": 0.0016,
821
+ "step": 11600
822
+ },
823
+ {
824
+ "epoch": 0.7012286484866647,
825
+ "grad_norm": 0.061295535415410995,
826
+ "learning_rate": 6.835746506419356e-06,
827
+ "loss": 0.0016,
828
+ "step": 11700
829
+ },
830
+ {
831
+ "epoch": 0.7072220557386875,
832
+ "grad_norm": 0.04820416495203972,
833
+ "learning_rate": 6.80420176019684e-06,
834
+ "loss": 0.0017,
835
+ "step": 11800
836
+ },
837
+ {
838
+ "epoch": 0.7132154629907103,
839
+ "grad_norm": 0.08933009207248688,
840
+ "learning_rate": 6.772657013974323e-06,
841
+ "loss": 0.002,
842
+ "step": 11900
843
+ },
844
+ {
845
+ "epoch": 0.719208870242733,
846
+ "grad_norm": 0.057753268629312515,
847
+ "learning_rate": 6.7411122677518055e-06,
848
+ "loss": 0.0018,
849
+ "step": 12000
850
+ },
851
+ {
852
+ "epoch": 0.7252022774947557,
853
+ "grad_norm": 0.020321357995271683,
854
+ "learning_rate": 6.70956752152929e-06,
855
+ "loss": 0.0017,
856
+ "step": 12100
857
+ },
858
+ {
859
+ "epoch": 0.7311956847467785,
860
+ "grad_norm": 0.258957177400589,
861
+ "learning_rate": 6.678022775306773e-06,
862
+ "loss": 0.0019,
863
+ "step": 12200
864
+ },
865
+ {
866
+ "epoch": 0.7371890919988013,
867
+ "grad_norm": 0.1562880277633667,
868
+ "learning_rate": 6.6464780290842565e-06,
869
+ "loss": 0.002,
870
+ "step": 12300
871
+ },
872
+ {
873
+ "epoch": 0.7431824992508241,
874
+ "grad_norm": 0.0703672245144844,
875
+ "learning_rate": 6.614933282861739e-06,
876
+ "loss": 0.0018,
877
+ "step": 12400
878
+ },
879
+ {
880
+ "epoch": 0.7491759065028468,
881
+ "grad_norm": 0.015919741243124008,
882
+ "learning_rate": 6.583388536639224e-06,
883
+ "loss": 0.0018,
884
+ "step": 12500
885
+ },
886
+ {
887
+ "epoch": 0.7551693137548696,
888
+ "grad_norm": 0.06606917828321457,
889
+ "learning_rate": 6.551843790416707e-06,
890
+ "loss": 0.0022,
891
+ "step": 12600
892
+ },
893
+ {
894
+ "epoch": 0.7611627210068924,
895
+ "grad_norm": 0.1327201873064041,
896
+ "learning_rate": 6.52029904419419e-06,
897
+ "loss": 0.002,
898
+ "step": 12700
899
+ },
900
+ {
901
+ "epoch": 0.7671561282589152,
902
+ "grad_norm": 0.10167068988084793,
903
+ "learning_rate": 6.488754297971673e-06,
904
+ "loss": 0.0018,
905
+ "step": 12800
906
+ },
907
+ {
908
+ "epoch": 0.773149535510938,
909
+ "grad_norm": 0.20014306902885437,
910
+ "learning_rate": 6.457209551749156e-06,
911
+ "loss": 0.0019,
912
+ "step": 12900
913
+ },
914
+ {
915
+ "epoch": 0.7791429427629607,
916
+ "grad_norm": 0.10611408203840256,
917
+ "learning_rate": 6.4256648055266405e-06,
918
+ "loss": 0.0016,
919
+ "step": 13000
920
+ },
921
+ {
922
+ "epoch": 0.7851363500149835,
923
+ "grad_norm": 0.004227208439260721,
924
+ "learning_rate": 6.394120059304123e-06,
925
+ "loss": 0.0018,
926
+ "step": 13100
927
+ },
928
+ {
929
+ "epoch": 0.7911297572670063,
930
+ "grad_norm": 0.04251255840063095,
931
+ "learning_rate": 6.362575313081607e-06,
932
+ "loss": 0.0022,
933
+ "step": 13200
934
+ },
935
+ {
936
+ "epoch": 0.7971231645190291,
937
+ "grad_norm": 0.09611974656581879,
938
+ "learning_rate": 6.33103056685909e-06,
939
+ "loss": 0.0019,
940
+ "step": 13300
941
+ },
942
+ {
943
+ "epoch": 0.8031165717710519,
944
+ "grad_norm": 0.060009848326444626,
945
+ "learning_rate": 6.299485820636574e-06,
946
+ "loss": 0.0019,
947
+ "step": 13400
948
+ },
949
+ {
950
+ "epoch": 0.8091099790230746,
951
+ "grad_norm": 0.027135098353028297,
952
+ "learning_rate": 6.267941074414057e-06,
953
+ "loss": 0.0016,
954
+ "step": 13500
955
+ },
956
+ {
957
+ "epoch": 0.8151033862750974,
958
+ "grad_norm": 0.09115968644618988,
959
+ "learning_rate": 6.236396328191541e-06,
960
+ "loss": 0.0017,
961
+ "step": 13600
962
+ },
963
+ {
964
+ "epoch": 0.8210967935271202,
965
+ "grad_norm": 0.3819001317024231,
966
+ "learning_rate": 6.204851581969024e-06,
967
+ "loss": 0.0019,
968
+ "step": 13700
969
+ },
970
+ {
971
+ "epoch": 0.827090200779143,
972
+ "grad_norm": 0.07268409430980682,
973
+ "learning_rate": 6.173306835746506e-06,
974
+ "loss": 0.002,
975
+ "step": 13800
976
+ },
977
+ {
978
+ "epoch": 0.8330836080311658,
979
+ "grad_norm": 0.1490897685289383,
980
+ "learning_rate": 6.14176208952399e-06,
981
+ "loss": 0.0015,
982
+ "step": 13900
983
+ },
984
+ {
985
+ "epoch": 0.8390770152831885,
986
+ "grad_norm": 0.07468798011541367,
987
+ "learning_rate": 6.110217343301474e-06,
988
+ "loss": 0.0017,
989
+ "step": 14000
990
+ },
991
+ {
992
+ "epoch": 0.8450704225352113,
993
+ "grad_norm": 0.045000866055488586,
994
+ "learning_rate": 6.078672597078957e-06,
995
+ "loss": 0.0019,
996
+ "step": 14100
997
+ },
998
+ {
999
+ "epoch": 0.851063829787234,
1000
+ "grad_norm": 0.22245222330093384,
1001
+ "learning_rate": 6.04712785085644e-06,
1002
+ "loss": 0.0015,
1003
+ "step": 14200
1004
+ },
1005
+ {
1006
+ "epoch": 0.8570572370392568,
1007
+ "grad_norm": 0.09135129302740097,
1008
+ "learning_rate": 6.015583104633924e-06,
1009
+ "loss": 0.002,
1010
+ "step": 14300
1011
+ },
1012
+ {
1013
+ "epoch": 0.8630506442912796,
1014
+ "grad_norm": 0.043701499700546265,
1015
+ "learning_rate": 5.984038358411407e-06,
1016
+ "loss": 0.0017,
1017
+ "step": 14400
1018
+ },
1019
+ {
1020
+ "epoch": 0.8690440515433023,
1021
+ "grad_norm": 0.1364869773387909,
1022
+ "learning_rate": 5.9524936121888895e-06,
1023
+ "loss": 0.0019,
1024
+ "step": 14500
1025
+ },
1026
+ {
1027
+ "epoch": 0.8750374587953251,
1028
+ "grad_norm": 0.08669265359640121,
1029
+ "learning_rate": 5.920948865966374e-06,
1030
+ "loss": 0.002,
1031
+ "step": 14600
1032
+ },
1033
+ {
1034
+ "epoch": 0.8810308660473479,
1035
+ "grad_norm": 0.00844608899205923,
1036
+ "learning_rate": 5.889404119743857e-06,
1037
+ "loss": 0.0016,
1038
+ "step": 14700
1039
+ },
1040
+ {
1041
+ "epoch": 0.8870242732993707,
1042
+ "grad_norm": 0.027935262769460678,
1043
+ "learning_rate": 5.8578593735213405e-06,
1044
+ "loss": 0.0018,
1045
+ "step": 14800
1046
+ },
1047
+ {
1048
+ "epoch": 0.8930176805513935,
1049
+ "grad_norm": 0.0481196753680706,
1050
+ "learning_rate": 5.826314627298823e-06,
1051
+ "loss": 0.0019,
1052
+ "step": 14900
1053
+ },
1054
+ {
1055
+ "epoch": 0.8990110878034162,
1056
+ "grad_norm": 0.021947329863905907,
1057
+ "learning_rate": 5.794769881076308e-06,
1058
+ "loss": 0.0015,
1059
+ "step": 15000
1060
+ },
1061
+ {
1062
+ "epoch": 0.905004495055439,
1063
+ "grad_norm": 0.08527759462594986,
1064
+ "learning_rate": 5.763225134853791e-06,
1065
+ "loss": 0.0017,
1066
+ "step": 15100
1067
+ },
1068
+ {
1069
+ "epoch": 0.9109979023074618,
1070
+ "grad_norm": 0.021068023517727852,
1071
+ "learning_rate": 5.731680388631274e-06,
1072
+ "loss": 0.0018,
1073
+ "step": 15200
1074
+ },
1075
+ {
1076
+ "epoch": 0.9169913095594846,
1077
+ "grad_norm": 0.08113428950309753,
1078
+ "learning_rate": 5.700135642408757e-06,
1079
+ "loss": 0.0017,
1080
+ "step": 15300
1081
+ },
1082
+ {
1083
+ "epoch": 0.9229847168115073,
1084
+ "grad_norm": 0.10709325969219208,
1085
+ "learning_rate": 5.66859089618624e-06,
1086
+ "loss": 0.0015,
1087
+ "step": 15400
1088
+ },
1089
+ {
1090
+ "epoch": 0.9289781240635301,
1091
+ "grad_norm": 0.08009694516658783,
1092
+ "learning_rate": 5.6370461499637244e-06,
1093
+ "loss": 0.0016,
1094
+ "step": 15500
1095
+ },
1096
+ {
1097
+ "epoch": 0.9349715313155529,
1098
+ "grad_norm": 0.03613545373082161,
1099
+ "learning_rate": 5.605501403741207e-06,
1100
+ "loss": 0.0017,
1101
+ "step": 15600
1102
+ },
1103
+ {
1104
+ "epoch": 0.9409649385675757,
1105
+ "grad_norm": 0.06710252165794373,
1106
+ "learning_rate": 5.573956657518691e-06,
1107
+ "loss": 0.0018,
1108
+ "step": 15700
1109
+ },
1110
+ {
1111
+ "epoch": 0.9469583458195985,
1112
+ "grad_norm": 0.09847810864448547,
1113
+ "learning_rate": 5.542411911296174e-06,
1114
+ "loss": 0.0014,
1115
+ "step": 15800
1116
+ },
1117
+ {
1118
+ "epoch": 0.9529517530716212,
1119
+ "grad_norm": 0.011624569073319435,
1120
+ "learning_rate": 5.510867165073658e-06,
1121
+ "loss": 0.0016,
1122
+ "step": 15900
1123
+ },
1124
+ {
1125
+ "epoch": 0.958945160323644,
1126
+ "grad_norm": 0.06741365045309067,
1127
+ "learning_rate": 5.479322418851141e-06,
1128
+ "loss": 0.0015,
1129
+ "step": 16000
1130
+ },
1131
+ {
1132
+ "epoch": 0.9649385675756668,
1133
+ "grad_norm": 0.021546615287661552,
1134
+ "learning_rate": 5.447777672628625e-06,
1135
+ "loss": 0.0017,
1136
+ "step": 16100
1137
+ },
1138
+ {
1139
+ "epoch": 0.9709319748276896,
1140
+ "grad_norm": 0.1303360015153885,
1141
+ "learning_rate": 5.4162329264061075e-06,
1142
+ "loss": 0.0018,
1143
+ "step": 16200
1144
+ },
1145
+ {
1146
+ "epoch": 0.9769253820797124,
1147
+ "grad_norm": 0.10070718824863434,
1148
+ "learning_rate": 5.38468818018359e-06,
1149
+ "loss": 0.0018,
1150
+ "step": 16300
1151
+ },
1152
+ {
1153
+ "epoch": 0.982918789331735,
1154
+ "grad_norm": 0.08305861055850983,
1155
+ "learning_rate": 5.353143433961074e-06,
1156
+ "loss": 0.0016,
1157
+ "step": 16400
1158
+ },
1159
+ {
1160
+ "epoch": 0.9889121965837578,
1161
+ "grad_norm": 0.007656518369913101,
1162
+ "learning_rate": 5.321598687738557e-06,
1163
+ "loss": 0.0017,
1164
+ "step": 16500
1165
+ },
1166
+ {
1167
+ "epoch": 0.9949056038357806,
1168
+ "grad_norm": 0.0743492990732193,
1169
+ "learning_rate": 5.290053941516041e-06,
1170
+ "loss": 0.0015,
1171
+ "step": 16600
1172
+ },
1173
+ {
1174
+ "epoch": 1.0,
1175
+ "eval_accuracy": 0.8271744263468347,
1176
+ "eval_f1": 0.7498195656860883,
1177
+ "eval_loss": 0.001594877801835537,
1178
+ "eval_precision": 0.6861185445920746,
1179
+ "eval_recall": 0.8271744263468347,
1180
+ "eval_runtime": 1686.0917,
1181
+ "eval_samples_per_second": 8.796,
1182
+ "eval_steps_per_second": 1.1,
1183
+ "step": 16685
1184
+ },
1185
+ {
1186
+ "epoch": 1.0008990110878033,
1187
+ "grad_norm": 0.05216585099697113,
1188
+ "learning_rate": 5.258509195293524e-06,
1189
+ "loss": 0.0015,
1190
+ "step": 16700
1191
+ },
1192
+ {
1193
+ "epoch": 1.0068924183398262,
1194
+ "grad_norm": 0.12606635689735413,
1195
+ "learning_rate": 5.226964449071008e-06,
1196
+ "loss": 0.0011,
1197
+ "step": 16800
1198
+ },
1199
+ {
1200
+ "epoch": 1.012885825591849,
1201
+ "grad_norm": 0.0004606186121236533,
1202
+ "learning_rate": 5.195419702848491e-06,
1203
+ "loss": 0.001,
1204
+ "step": 16900
1205
+ },
1206
+ {
1207
+ "epoch": 1.0188792328438718,
1208
+ "grad_norm": 0.000365409447113052,
1209
+ "learning_rate": 5.1638749566259735e-06,
1210
+ "loss": 0.001,
1211
+ "step": 17000
1212
+ },
1213
+ {
1214
+ "epoch": 1.0248726400958945,
1215
+ "grad_norm": 0.031485725194215775,
1216
+ "learning_rate": 5.132330210403458e-06,
1217
+ "loss": 0.0012,
1218
+ "step": 17100
1219
+ },
1220
+ {
1221
+ "epoch": 1.0308660473479172,
1222
+ "grad_norm": 0.0031660550739616156,
1223
+ "learning_rate": 5.100785464180941e-06,
1224
+ "loss": 0.0011,
1225
+ "step": 17200
1226
+ },
1227
+ {
1228
+ "epoch": 1.0368594545999401,
1229
+ "grad_norm": 0.04788443446159363,
1230
+ "learning_rate": 5.0692407179584244e-06,
1231
+ "loss": 0.001,
1232
+ "step": 17300
1233
+ },
1234
+ {
1235
+ "epoch": 1.0428528618519628,
1236
+ "grad_norm": 0.07966958731412888,
1237
+ "learning_rate": 5.037695971735907e-06,
1238
+ "loss": 0.001,
1239
+ "step": 17400
1240
+ },
1241
+ {
1242
+ "epoch": 1.0488462691039857,
1243
+ "grad_norm": 0.2937103807926178,
1244
+ "learning_rate": 5.006151225513392e-06,
1245
+ "loss": 0.0007,
1246
+ "step": 17500
1247
+ },
1248
+ {
1249
+ "epoch": 1.0548396763560084,
1250
+ "grad_norm": 0.0027551730163395405,
1251
+ "learning_rate": 4.974606479290875e-06,
1252
+ "loss": 0.0007,
1253
+ "step": 17600
1254
+ },
1255
+ {
1256
+ "epoch": 1.060833083608031,
1257
+ "grad_norm": 0.08430271595716476,
1258
+ "learning_rate": 4.943061733068357e-06,
1259
+ "loss": 0.0008,
1260
+ "step": 17700
1261
+ },
1262
+ {
1263
+ "epoch": 1.066826490860054,
1264
+ "grad_norm": 0.24536843597888947,
1265
+ "learning_rate": 4.911516986845841e-06,
1266
+ "loss": 0.0006,
1267
+ "step": 17800
1268
+ },
1269
+ {
1270
+ "epoch": 1.0728198981120767,
1271
+ "grad_norm": 0.040876179933547974,
1272
+ "learning_rate": 4.879972240623325e-06,
1273
+ "loss": 0.0009,
1274
+ "step": 17900
1275
+ },
1276
+ {
1277
+ "epoch": 1.0788133053640996,
1278
+ "grad_norm": 0.0515579879283905,
1279
+ "learning_rate": 4.848427494400808e-06,
1280
+ "loss": 0.0007,
1281
+ "step": 18000
1282
+ }
1283
+ ],
1284
+ "logging_steps": 100,
1285
+ "max_steps": 33370,
1286
+ "num_input_tokens_seen": 0,
1287
+ "num_train_epochs": 2,
1288
+ "save_steps": 1000,
1289
+ "total_flos": 7.597573697465206e+17,
1290
+ "train_batch_size": 8,
1291
+ "trial_name": null,
1292
+ "trial_params": null
1293
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2097cd6fb5dac6d8995986c823c20a197b2b221f4320ad59ee2ca729bd32beb
3
+ size 5048