beverlyjfu commited on
Commit
c3c7a08
1 Parent(s): 6cb5c05

Training in progress, epoch 1

Browse files
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ checkpoint-*/
config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "wukevin/tcr-bert",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "id2label": {
13
+ "0": "LABEL_0"
14
+ },
15
+ "initializer_range": 0.02,
16
+ "intermediate_size": 1536,
17
+ "label2id": {
18
+ "LABEL_0": 0
19
+ },
20
+ "layer_norm_eps": 1e-12,
21
+ "max_position_embeddings": 64,
22
+ "model_type": "bert",
23
+ "num_attention_heads": 12,
24
+ "num_hidden_layers": 12,
25
+ "pad_token_id": 21,
26
+ "position_embedding_type": "absolute",
27
+ "problem_type": "regression",
28
+ "torch_dtype": "float32",
29
+ "transformers_version": "4.27.4",
30
+ "type_vocab_size": 2,
31
+ "use_cache": true,
32
+ "vocab_size": 26
33
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f18b1ad44520d63444d1ddd465f394c0b42e6cc08d4b17ea5f4fb9f792e2c3b6
3
+ size 229621365
runs/Apr07_03-24-21_e79bc52ba879/1680837887.4074097/events.out.tfevents.1680837887.e79bc52ba879.155.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0dfdb0e0ea4ebb54291c47bca7c5b80dd32789b859efe51bf97b5aab490954e1
3
+ size 5861
runs/Apr07_03-24-21_e79bc52ba879/events.out.tfevents.1680837887.e79bc52ba879.155.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f9a9ed7fd7928242863027a29130f87e8ded19ed0473ced389068210d7e8dbad
3
+ size 4397
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "*",
3
+ "mask_token": ".",
4
+ "pad_token": "$",
5
+ "sep_token": "|",
6
+ "unk_token": "?"
7
+ }
tokenizer.json ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
5
+ "added_tokens": [
6
+ {
7
+ "id": 21,
8
+ "content": "$",
9
+ "single_word": false,
10
+ "lstrip": false,
11
+ "rstrip": false,
12
+ "normalized": false,
13
+ "special": true
14
+ },
15
+ {
16
+ "id": 22,
17
+ "content": ".",
18
+ "single_word": false,
19
+ "lstrip": false,
20
+ "rstrip": false,
21
+ "normalized": false,
22
+ "special": true
23
+ },
24
+ {
25
+ "id": 23,
26
+ "content": "?",
27
+ "single_word": false,
28
+ "lstrip": false,
29
+ "rstrip": false,
30
+ "normalized": false,
31
+ "special": true
32
+ },
33
+ {
34
+ "id": 24,
35
+ "content": "|",
36
+ "single_word": false,
37
+ "lstrip": false,
38
+ "rstrip": false,
39
+ "normalized": false,
40
+ "special": true
41
+ },
42
+ {
43
+ "id": 25,
44
+ "content": "*",
45
+ "single_word": false,
46
+ "lstrip": false,
47
+ "rstrip": false,
48
+ "normalized": false,
49
+ "special": true
50
+ }
51
+ ],
52
+ "normalizer": {
53
+ "type": "BertNormalizer",
54
+ "clean_text": true,
55
+ "handle_chinese_chars": false,
56
+ "strip_accents": null,
57
+ "lowercase": false
58
+ },
59
+ "pre_tokenizer": {
60
+ "type": "BertPreTokenizer"
61
+ },
62
+ "post_processor": {
63
+ "type": "TemplateProcessing",
64
+ "single": [
65
+ {
66
+ "SpecialToken": {
67
+ "id": "*",
68
+ "type_id": 0
69
+ }
70
+ },
71
+ {
72
+ "Sequence": {
73
+ "id": "A",
74
+ "type_id": 0
75
+ }
76
+ },
77
+ {
78
+ "SpecialToken": {
79
+ "id": "|",
80
+ "type_id": 0
81
+ }
82
+ }
83
+ ],
84
+ "pair": [
85
+ {
86
+ "SpecialToken": {
87
+ "id": "*",
88
+ "type_id": 0
89
+ }
90
+ },
91
+ {
92
+ "Sequence": {
93
+ "id": "A",
94
+ "type_id": 0
95
+ }
96
+ },
97
+ {
98
+ "SpecialToken": {
99
+ "id": "|",
100
+ "type_id": 0
101
+ }
102
+ },
103
+ {
104
+ "Sequence": {
105
+ "id": "B",
106
+ "type_id": 1
107
+ }
108
+ },
109
+ {
110
+ "SpecialToken": {
111
+ "id": "|",
112
+ "type_id": 1
113
+ }
114
+ }
115
+ ],
116
+ "special_tokens": {
117
+ "*": {
118
+ "id": "*",
119
+ "ids": [
120
+ 25
121
+ ],
122
+ "tokens": [
123
+ "*"
124
+ ]
125
+ },
126
+ "|": {
127
+ "id": "|",
128
+ "ids": [
129
+ 24
130
+ ],
131
+ "tokens": [
132
+ "|"
133
+ ]
134
+ }
135
+ }
136
+ },
137
+ "decoder": {
138
+ "type": "WordPiece",
139
+ "prefix": "##",
140
+ "cleanup": true
141
+ },
142
+ "model": {
143
+ "type": "WordPiece",
144
+ "unk_token": "?",
145
+ "continuing_subword_prefix": "##",
146
+ "max_input_chars_per_word": 100,
147
+ "vocab": {
148
+ "R": 0,
149
+ "H": 1,
150
+ "K": 2,
151
+ "D": 3,
152
+ "E": 4,
153
+ "S": 5,
154
+ "T": 6,
155
+ "N": 7,
156
+ "Q": 8,
157
+ "C": 9,
158
+ "U": 10,
159
+ "G": 11,
160
+ "P": 12,
161
+ "A": 13,
162
+ "V": 14,
163
+ "I": 15,
164
+ "L": 16,
165
+ "M": 17,
166
+ "F": 18,
167
+ "Y": 19,
168
+ "W": 20,
169
+ "$": 21,
170
+ ".": 22,
171
+ "?": 23,
172
+ "|": 24,
173
+ "*": 25
174
+ }
175
+ }
176
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "*",
3
+ "do_basic_tokenize": true,
4
+ "do_lower_case": false,
5
+ "mask_token": ".",
6
+ "model_max_len": 45,
7
+ "model_max_length": 1000000000000000019884624838656,
8
+ "never_split": null,
9
+ "pad_token": "$",
10
+ "padding_side": "right",
11
+ "sep_token": "|",
12
+ "special_tokens_map_file": "/root/.cache/huggingface/hub/models--wukevin--tcr-bert/snapshots/ef65ddcb4e549990e584680e27f9ae2618c884ff/special_tokens_map.json",
13
+ "strip_accents": null,
14
+ "tokenize_chinese_chars": false,
15
+ "tokenizer_class": "BertTokenizer",
16
+ "unk_token": "?"
17
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1649313dd16468ef88e2614194b37b71ecbc2890bf75467ca4a56a198e657a5
3
+ size 3579
vocab.txt ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ R
2
+ H
3
+ K
4
+ D
5
+ E
6
+ S
7
+ T
8
+ N
9
+ Q
10
+ C
11
+ U
12
+ G
13
+ P
14
+ A
15
+ V
16
+ I
17
+ L
18
+ M
19
+ F
20
+ Y
21
+ W
22
+ $
23
+ .
24
+ ?
25
+ |
26
+ *