ddemszky commited on
Commit
e03df0d
1 Parent(s): 0f8efcf

Upload 9 files

Browse files
command_args.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"dataset_path": "data/switchboard_question_detection.json", "model_name": "bert-base-uncased", "checkpoint": "bert-base-uncased", "max_history": 0, "train_batch_size": 32, "valid_batch_size": 16, "gradient_accumulation_steps": 1, "max_length": 300, "lr": 6.25e-05, "max_norm": 1.0, "n_epochs": 3, "eval_before_start": true, "min_prev_length": 0, "final_speaker": -1, "fp16": "O2", "local_rank": -1, "head_attr": {"is_question": {"size": 2}}, "distributed": false}
config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "bert-base-uncased",
3
+ "architectures": [
4
+ "MultiHeadModel"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "gradient_checkpointing": false,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.1,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "LABEL_0"
13
+ },
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 3072,
16
+ "label2id": {
17
+ "LABEL_0": 0
18
+ },
19
+ "layer_norm_eps": 1e-12,
20
+ "max_position_embeddings": 512,
21
+ "model_type": "bert",
22
+ "num_attention_heads": 12,
23
+ "num_hidden_layers": 12,
24
+ "pad_token_id": 0,
25
+ "type_vocab_size": 2,
26
+ "vocab_size": 30522
27
+ }
events.out.tfevents.1615609899.jagupard15.stanford.edu ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a36559c9a456174397728edafe722a014bb3b5849fa6bd4327ab1c4b1bfbc9cb
3
+ size 1041987
model_training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce47d22031b2427148e0a9be0d6a67f64f758f4517c931ab4f2a83bb882ab6f6
3
+ size 703
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:057f3bb15adf44a4383c6fb4b010c10d8ede269773be8e33d4ae017904d3126c
3
+ size 437989727
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": true, "do_basic_tokenize": true, "never_split": null, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "name_or_path": "bert-base-uncased"}
val_results.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {'average_accuracy': 0.6614378054548321,
2
+ 'is_question_accuracy': 0.6614378054548321,
3
+ 'is_question_loss': 0.6052431167722518}
4
+ {'average_accuracy': 0.9674444269273215,
5
+ 'is_question_accuracy': 0.9674444269273215,
6
+ 'is_question_loss': 0.09393756604431444}
7
+ {'average_accuracy': 0.9680750433548794,
8
+ 'is_question_accuracy': 0.9680750433548794,
9
+ 'is_question_loss': 0.09026440343758332}
10
+ {'average_accuracy': 0.9682326974617689,
11
+ 'is_question_accuracy': 0.9682326974617689,
12
+ 'is_question_loss': 0.10944933036552813}
vocab.txt ADDED
The diff for this file is too large to render. See raw diff