gArthur98 commited on
Commit
dc6b3d5
1 Parent(s): d5ee4ac

Training in progress, step 16

Browse files
config.json CHANGED
@@ -1,35 +1,35 @@
1
  {
2
- "_name_or_path": "distilbert-base-cased-distilled-squad",
3
- "activation": "gelu",
4
  "architectures": [
5
- "DistilBertModel"
6
  ],
7
- "attention_dropout": 0.1,
8
- "dim": 768,
9
- "dropout": 0.1,
10
- "hidden_dim": 3072,
 
11
  "id2label": {
12
  "0": "LABEL_0",
13
  "1": "LABEL_1",
14
  "2": "LABEL_2"
15
  },
16
  "initializer_range": 0.02,
 
17
  "label2id": {
18
  "LABEL_0": 0,
19
  "LABEL_1": 1,
20
  "LABEL_2": 2
21
  },
 
22
  "max_position_embeddings": 512,
23
- "model_type": "distilbert",
24
- "n_heads": 12,
25
- "n_layers": 6,
26
- "output_past": true,
27
  "pad_token_id": 0,
28
- "qa_dropout": 0.1,
29
- "seq_classif_dropout": 0.2,
30
- "sinusoidal_pos_embds": true,
31
- "tie_weights_": true,
32
  "torch_dtype": "float32",
33
  "transformers_version": "4.30.2",
34
- "vocab_size": 28996
 
 
35
  }
 
1
  {
2
+ "_name_or_path": "microsoft/MiniLM-L12-H384-uncased",
 
3
  "architectures": [
4
+ "BertForSequenceClassification"
5
  ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.1,
10
+ "hidden_size": 384,
11
  "id2label": {
12
  "0": "LABEL_0",
13
  "1": "LABEL_1",
14
  "2": "LABEL_2"
15
  },
16
  "initializer_range": 0.02,
17
+ "intermediate_size": 1536,
18
  "label2id": {
19
  "LABEL_0": 0,
20
  "LABEL_1": 1,
21
  "LABEL_2": 2
22
  },
23
+ "layer_norm_eps": 1e-12,
24
  "max_position_embeddings": 512,
25
+ "model_type": "bert",
26
+ "num_attention_heads": 12,
27
+ "num_hidden_layers": 12,
 
28
  "pad_token_id": 0,
29
+ "position_embedding_type": "absolute",
 
 
 
30
  "torch_dtype": "float32",
31
  "transformers_version": "4.30.2",
32
+ "type_vocab_size": 2,
33
+ "use_cache": true,
34
+ "vocab_size": 30522
35
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4ba71baf18dbdeade76ea0ea4bf9918696466a6a3550d86276bdf1fe629b622b
3
- size 260796829
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1cf3797bcc8039abb4ba0de9950153a94b4eb941cc3520f6cf9d49b09601ac01
3
+ size 133517429
runs/Jul16_13-26-53_484444fb5cf2/events.out.tfevents.1689514304.484444fb5cf2.209.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01c5b34a383b9bc4e1e07ddc2ea2ff8d40182d45425779668aecc2301965d62b
3
+ size 4117
runs/Jul16_13-36-18_484444fb5cf2/events.out.tfevents.1689514615.484444fb5cf2.209.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b29a6fa952328565e82ab103a666caa036cc6655c60b73a4d1787a47fa877f03
3
+ size 4165
runs/Jul16_13-41-23_484444fb5cf2/events.out.tfevents.1689514901.484444fb5cf2.209.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd49df1a217540b3a0c8165e9a0985f12a9e6d94137f2874e1b723b3e690f317
3
+ size 4117
runs/Jul16_13-47-26_484444fb5cf2/events.out.tfevents.1689515266.484444fb5cf2.209.3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13acd25a6081da530f5b4ef31aa91753b718a1346a84194250825133463ed2de
3
+ size 4117
runs/Jul16_13-49-40_484444fb5cf2/events.out.tfevents.1689515396.484444fb5cf2.209.4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19c40d7dbdc0d3fd0b1ef0f3cb5860af3041ced9454ce23f0c4a0e68764d0313
3
+ size 4582
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "clean_up_tokenization_spaces": true,
3
+ "cls_token": "[CLS]",
4
+ "do_basic_tokenize": true,
5
+ "do_lower_case": true,
6
+ "mask_token": "[MASK]",
7
+ "model_max_length": 1000000000000000019884624838656,
8
+ "never_split": null,
9
+ "pad_token": "[PAD]",
10
+ "sep_token": "[SEP]",
11
+ "strip_accents": null,
12
+ "tokenize_chinese_chars": true,
13
+ "tokenizer_class": "BertTokenizer",
14
+ "unk_token": "[UNK]"
15
+ }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4dd1f40f40a9deb6c572b410633518e1bba679b0ee93d7ccf794cfc1eed9db89
3
  size 3963
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23f6946b36d6ea68b377f53c7698f6d1519bccad1c0ff1e38426f14f51cbe616
3
  size 3963
vocab.txt ADDED
The diff for this file is too large to render. See raw diff