nikotang commited on
Commit
760a5ec
1 Parent(s): b8db141

Add model weights and configuration

Browse files
conf.yml ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Path to pretrained model or model identifier from huggingface.co/models
2
+ model_name_or_path: "bert-large-uncased-whole-word-masking"
3
+
4
+
5
+ train_file: "../contract-nli-bert/data/train.json"
6
+
7
+ dev_file: "../contract-nli-bert/data/dev.json"
8
+
9
+ # Pretrained config name or path if not the same as model_name
10
+ config_name: null
11
+
12
+ # Pretrained tokenizer name or path if not the same as model_name
13
+ tokenizer_name: null
14
+
15
+ # Directory to save downloaded pretrained model
16
+ # Default to ~/.cache/huggingface/transformers
17
+ cache_dir: null
18
+
19
+ # The maximum total input sequence length.
20
+ # Sequence longer max_seq_length will be splitted into different chunks.
21
+ max_seq_length: 512
22
+
23
+ # How many tokens should the first span have in each chunk.
24
+ # Note that it may not be honored when the span is too long.
25
+ doc_stride: 128
26
+
27
+ # The maximum number of tokens for the hypothesis.
28
+ # Hypotheses longer than this will be truncated.
29
+ max_query_length: 256
30
+
31
+ # Set this flag if you are using an uncased model.
32
+ do_lower_case: true
33
+
34
+ per_gpu_train_batch_size: 2
35
+
36
+ per_gpu_eval_batch_size: 2
37
+
38
+ learning_rate: !!float 2e-5
39
+
40
+ # Number of updates steps to accumulate before performing a backward/update pass.
41
+ gradient_accumulation_steps: 3
42
+
43
+ weight_decay: 0.0
44
+
45
+ adam_epsilon: !!float 1e-8
46
+
47
+ max_grad_norm: 1.0
48
+
49
+ num_epochs: 3.0
50
+
51
+ # If set, total number of training steps to perform. Conflicts with num_epochs.
52
+ max_steps: null
53
+
54
+ # Linear warmup over warmup_steps
55
+ warmup_steps: 1000
56
+
57
+ # language id of input for language-specific xlm models (see tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)
58
+ lang_id: null
59
+
60
+ # Validate every n steps
61
+ valid_steps: 3000
62
+
63
+ early_stopping: true
64
+
65
+ # save model every n steps
66
+ save_steps: -1
67
+
68
+ seed: 42
69
+
70
+ # Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit
71
+ fp16: false
72
+
73
+ # For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].
74
+ # See details at https://nvidia.github.io/apex/amp.html
75
+ fp16_opt_level: "O1"
76
+
77
+ # Make it true if you have a gpu but you don't want to use it
78
+ no_cuda: false
79
+
80
+ # Overwrite the cached training and evaluation sets
81
+ overwrite_cache: false
82
+
83
+ weight_class_probs_by_span_probs: true
84
+
85
+ # class loss is multiplied by this value
86
+ class_loss_weight: 0.05
87
+
88
+ # Either of 'identification_classification' or 'classification'
89
+ task: "identification_classification"
90
+
91
+ # Whether to treat hypothesis (query) texts as a symbol instead of feeding the
92
+ # hypothesis descriptions
93
+ symbol_based_hypothesis: false
config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "bert-large-uncased-whole-word-masking",
3
+ "architectures": [
4
+ "BertForQuestionAnswering"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "gradient_checkpointing": false,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.1,
10
+ "hidden_size": 1024,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 4096,
13
+ "layer_norm_eps": 1e-12,
14
+ "max_position_embeddings": 512,
15
+ "model_type": "bert",
16
+ "num_attention_heads": 16,
17
+ "num_hidden_layers": 24,
18
+ "pad_token_id": 0,
19
+ "position_embedding_type": "absolute",
20
+ "transformers_version": "4.5.1",
21
+ "type_vocab_size": 2,
22
+ "use_cache": true,
23
+ "vocab_size": 30522
24
+ }
nbest_predictions_.json ADDED
Binary file (77.7 MB). View file
 
null_odds_.json ADDED
The diff for this file is too large to render. See raw diff
 
predictions_.json ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53f11ee0778e6ad51be4fbd055da1ca56e5123ffb9a6f9a2e61e40f7e19f7cd7
3
+ size 1336543060
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": false, "do_basic_tokenize": true, "never_split": null, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "special_tokens_map_file": null, "tokenizer_file": "/home/yiukei/.cache/huggingface/transformers/33ee0bae279476c742373ecfd5a127d27372fbb9e2f5a84ccb38bbd72775f296.7f2721073f19841be16f41b0a70b600ca6b880c8f3df6f3535cbc704371bdfa4", "name_or_path": "bert-large-uncased-whole-word-masking"}
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f9958be1e763c01cb9053fb20d9452e453745a4d51b0c6d2a443be589f739d6
3
+ size 1711
vocab.txt ADDED
The diff for this file is too large to render. See raw diff