silviacamplani commited on
Commit
a1efed4
·
1 Parent(s): 2e32ff0

Training in progress epoch 0

Browse files
Files changed (7) hide show
  1. README.md +53 -0
  2. config.json +85 -0
  3. special_tokens_map.json +1 -0
  4. tf_model.h5 +3 -0
  5. tokenizer.json +0 -0
  6. tokenizer_config.json +1 -0
  7. vocab.txt +0 -0
README.md ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ tags:
4
+ - generated_from_keras_callback
5
+ model-index:
6
+ - name: silviacamplani/distilbert-uncase-finetuned-ai-ner
7
+ results: []
8
+ ---
9
+
10
+ <!-- This model card has been generated automatically according to the information Keras had access to. You should
11
+ probably proofread and complete it, then remove this comment. -->
12
+
13
+ # silviacamplani/distilbert-uncase-finetuned-ai-ner
14
+
15
+ This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset.
16
+ It achieves the following results on the evaluation set:
17
+ - Train Loss: 3.2660
18
+ - Validation Loss: 3.0039
19
+ - Epoch: 0
20
+
21
+ ## Model description
22
+
23
+ More information needed
24
+
25
+ ## Intended uses & limitations
26
+
27
+ More information needed
28
+
29
+ ## Training and evaluation data
30
+
31
+ More information needed
32
+
33
+ ## Training procedure
34
+
35
+ ### Training hyperparameters
36
+
37
+ The following hyperparameters were used during training:
38
+ - optimizer: {'inner_optimizer': {'class_name': 'AdamWeightDecay', 'config': {'name': 'AdamWeightDecay', 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 18, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.01}}, 'dynamic': True, 'initial_scale': 32768.0, 'dynamic_growth_steps': 2000}
39
+ - training_precision: mixed_float16
40
+
41
+ ### Training results
42
+
43
+ | Train Loss | Validation Loss | Epoch |
44
+ |:----------:|:---------------:|:-----:|
45
+ | 3.2660 | 3.0039 | 0 |
46
+
47
+
48
+ ### Framework versions
49
+
50
+ - Transformers 4.18.0
51
+ - TensorFlow 2.6.4
52
+ - Datasets 2.1.0
53
+ - Tokenizers 0.12.1
config.json ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "distilbert-base-uncased",
3
+ "activation": "gelu",
4
+ "architectures": [
5
+ "DistilBertForTokenClassification"
6
+ ],
7
+ "attention_dropout": 0.1,
8
+ "dim": 768,
9
+ "dropout": 0.1,
10
+ "hidden_dim": 3072,
11
+ "id2label": {
12
+ "0": "O",
13
+ "1": "B-product",
14
+ "10": "B-programlang",
15
+ "11": "B-algorithm",
16
+ "12": "I-algorithm",
17
+ "13": "B-misc",
18
+ "14": "I-misc",
19
+ "15": "I-university",
20
+ "16": "B-metrics",
21
+ "17": "B-organisation",
22
+ "18": "I-organisation",
23
+ "19": "I-metrics",
24
+ "2": "I-product",
25
+ "20": "B-conference",
26
+ "21": "I-conference",
27
+ "22": "B-country",
28
+ "23": "I-programlang",
29
+ "24": "B-location",
30
+ "25": "B-person",
31
+ "26": "I-person",
32
+ "27": "I-country",
33
+ "28": "I-location",
34
+ "3": "B-field",
35
+ "4": "I-field",
36
+ "5": "B-task",
37
+ "6": "I-task",
38
+ "7": "B-researcher",
39
+ "8": "I-researcher",
40
+ "9": "B-university"
41
+ },
42
+ "initializer_range": 0.02,
43
+ "label2id": {
44
+ "B-algorithm": "11",
45
+ "B-conference": "20",
46
+ "B-country": "22",
47
+ "B-field": "3",
48
+ "B-location": "24",
49
+ "B-metrics": "16",
50
+ "B-misc": "13",
51
+ "B-organisation": "17",
52
+ "B-person": "25",
53
+ "B-product": "1",
54
+ "B-programlang": "10",
55
+ "B-researcher": "7",
56
+ "B-task": "5",
57
+ "B-university": "9",
58
+ "I-algorithm": "12",
59
+ "I-conference": "21",
60
+ "I-country": "27",
61
+ "I-field": "4",
62
+ "I-location": "28",
63
+ "I-metrics": "19",
64
+ "I-misc": "14",
65
+ "I-organisation": "18",
66
+ "I-person": "26",
67
+ "I-product": "2",
68
+ "I-programlang": "23",
69
+ "I-researcher": "8",
70
+ "I-task": "6",
71
+ "I-university": "15",
72
+ "O": "0"
73
+ },
74
+ "max_position_embeddings": 512,
75
+ "model_type": "distilbert",
76
+ "n_heads": 12,
77
+ "n_layers": 6,
78
+ "pad_token_id": 0,
79
+ "qa_dropout": 0.1,
80
+ "seq_classif_dropout": 0.2,
81
+ "sinusoidal_pos_embds": false,
82
+ "tie_weights_": true,
83
+ "transformers_version": "4.18.0",
84
+ "vocab_size": 30522
85
+ }
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tf_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6390c08454a3eaef46ba61bea4b188e91b1bc2a5f5b516fde7182cb832700e9
3
+ size 265666976
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "distilbert-base-uncased", "tokenizer_class": "DistilBertTokenizer"}
vocab.txt ADDED
The diff for this file is too large to render. See raw diff