imtiaz114 commited on
Commit
5291866
1 Parent(s): 7682072

Training in progress epoch 0

Browse files
Files changed (7) hide show
  1. README.md +7 -11
  2. config.json +14 -16
  3. special_tokens_map.json +3 -11
  4. tf_model.h5 +2 -2
  5. tokenizer.json +2 -2
  6. tokenizer_config.json +7 -17
  7. vocab.txt +0 -0
README.md CHANGED
@@ -1,6 +1,5 @@
1
  ---
2
- license: mit
3
- base_model: ai4bharat/indic-bert
4
  tags:
5
  - generated_from_keras_callback
6
  model-index:
@@ -13,11 +12,11 @@ probably proofread and complete it, then remove this comment. -->
13
 
14
  # imtiaz114/mbert-ner-baseline-1
15
 
16
- This model is a fine-tuned version of [ai4bharat/indic-bert](https://huggingface.co/ai4bharat/indic-bert) on an unknown dataset.
17
  It achieves the following results on the evaluation set:
18
- - Train Loss: 0.0636
19
- - Validation Loss: 0.0832
20
- - Epoch: 3
21
 
22
  ## Model description
23
 
@@ -36,17 +35,14 @@ More information needed
36
  ### Training hyperparameters
37
 
38
  The following hyperparameters were used during training:
39
- - optimizer: {'inner_optimizer': {'class_name': 'AdamWeightDecay', 'config': {'name': 'AdamWeightDecay', 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 3184, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.01}}, 'dynamic': True, 'initial_scale': 32768.0, 'dynamic_growth_steps': 2000}
40
  - training_precision: mixed_float16
41
 
42
  ### Training results
43
 
44
  | Train Loss | Validation Loss | Epoch |
45
  |:----------:|:---------------:|:-----:|
46
- | 0.2187 | 0.1320 | 0 |
47
- | 0.1144 | 0.0944 | 1 |
48
- | 0.0817 | 0.0866 | 2 |
49
- | 0.0636 | 0.0832 | 3 |
50
 
51
 
52
  ### Framework versions
 
1
  ---
2
+ base_model: Aleksandar/electra-srb-ner
 
3
  tags:
4
  - generated_from_keras_callback
5
  model-index:
 
12
 
13
  # imtiaz114/mbert-ner-baseline-1
14
 
15
+ This model is a fine-tuned version of [Aleksandar/electra-srb-ner](https://huggingface.co/Aleksandar/electra-srb-ner) on an unknown dataset.
16
  It achieves the following results on the evaluation set:
17
+ - Train Loss: 0.1767
18
+ - Validation Loss: 0.1226
19
+ - Epoch: 0
20
 
21
  ## Model description
22
 
 
35
  ### Training hyperparameters
36
 
37
  The following hyperparameters were used during training:
38
+ - optimizer: {'inner_optimizer': {'class_name': 'AdamWeightDecay', 'config': {'name': 'AdamWeightDecay', 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 1592, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.01}}, 'dynamic': True, 'initial_scale': 32768.0, 'dynamic_growth_steps': 2000}
39
  - training_precision: mixed_float16
40
 
41
  ### Training results
42
 
43
  | Train Loss | Validation Loss | Epoch |
44
  |:----------:|:---------------:|:-----:|
45
+ | 0.1767 | 0.1226 | 0 |
 
 
 
46
 
47
 
48
  ### Framework versions
config.json CHANGED
@@ -1,17 +1,13 @@
1
  {
2
- "_name_or_path": "ai4bharat/indic-bert",
3
  "architectures": [
4
- "AlbertForTokenClassification"
5
  ],
6
- "attention_probs_dropout_prob": 0,
7
- "bos_token_id": 2,
8
- "classifier_dropout_prob": 0.1,
9
- "down_scale_factor": 1,
10
- "embedding_size": 128,
11
- "eos_token_id": 3,
12
- "gap_size": 0,
13
  "hidden_act": "gelu",
14
- "hidden_dropout_prob": 0,
15
  "hidden_size": 768,
16
  "id2label": {
17
  "0": "O",
@@ -23,7 +19,6 @@
23
  "6": "I-LOC"
24
  },
25
  "initializer_range": 0.02,
26
- "inner_group_num": 1,
27
  "intermediate_size": 3072,
28
  "label2id": {
29
  "B-LOC": 5,
@@ -36,15 +31,18 @@
36
  },
37
  "layer_norm_eps": 1e-12,
38
  "max_position_embeddings": 512,
39
- "model_type": "albert",
40
- "net_structure_type": 0,
41
  "num_attention_heads": 12,
42
- "num_hidden_groups": 1,
43
  "num_hidden_layers": 12,
44
- "num_memory_blocks": 0,
45
  "pad_token_id": 0,
46
  "position_embedding_type": "absolute",
 
 
 
 
 
47
  "transformers_version": "4.31.0.dev0",
48
  "type_vocab_size": 2,
49
- "vocab_size": 200000
 
50
  }
 
1
  {
2
+ "_name_or_path": "Aleksandar/electra-srb-ner",
3
  "architectures": [
4
+ "ElectraForTokenClassification"
5
  ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "embedding_size": 768,
 
 
 
 
9
  "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
  "hidden_size": 768,
12
  "id2label": {
13
  "0": "O",
 
19
  "6": "I-LOC"
20
  },
21
  "initializer_range": 0.02,
 
22
  "intermediate_size": 3072,
23
  "label2id": {
24
  "B-LOC": 5,
 
31
  },
32
  "layer_norm_eps": 1e-12,
33
  "max_position_embeddings": 512,
34
+ "model_type": "electra",
 
35
  "num_attention_heads": 12,
 
36
  "num_hidden_layers": 12,
 
37
  "pad_token_id": 0,
38
  "position_embedding_type": "absolute",
39
+ "summary_activation": "gelu",
40
+ "summary_last_dropout": 0.1,
41
+ "summary_type": "first",
42
+ "summary_use_proj": true,
43
+ "torch_dtype": "float32",
44
  "transformers_version": "4.31.0.dev0",
45
  "type_vocab_size": 2,
46
+ "use_cache": true,
47
+ "vocab_size": 30522
48
  }
special_tokens_map.json CHANGED
@@ -1,15 +1,7 @@
1
  {
2
- "bos_token": "[CLS]",
3
  "cls_token": "[CLS]",
4
- "eos_token": "[SEP]",
5
- "mask_token": {
6
- "content": "[MASK]",
7
- "lstrip": true,
8
- "normalized": false,
9
- "rstrip": false,
10
- "single_word": false
11
- },
12
- "pad_token": "<pad>",
13
  "sep_token": "[SEP]",
14
- "unk_token": "<unk>"
15
  }
 
1
  {
 
2
  "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
 
 
 
 
 
 
 
5
  "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
  }
tf_model.h5 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:efa595d50fd8161039fc600e5d410925f970223195f98d7b0ee20d164cdd1b5e
3
- size 131473068
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f43194a5239c23b38310828324013de45ce38bbf2df00287b74ea6ab69d94903
3
+ size 435876320
tokenizer.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:431ec72a485a382d7e3a6063af5f54249b36e4870e0c8d079918e79cd0a4f8f1
3
- size 15285606
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e4a3974847fda36dc728132432b910a0f67598ca1cc7f0686901cf3ef82d4f03
3
+ size 772540
tokenizer_config.json CHANGED
@@ -1,23 +1,13 @@
1
  {
2
- "bos_token": "[CLS]",
3
  "clean_up_tokenization_spaces": true,
4
  "cls_token": "[CLS]",
5
  "do_lower_case": true,
6
- "eos_token": "[SEP]",
7
- "keep_accents": false,
8
- "mask_token": {
9
- "__type": "AddedToken",
10
- "content": "[MASK]",
11
- "lstrip": true,
12
- "normalized": false,
13
- "rstrip": false,
14
- "single_word": false
15
- },
16
- "model_max_length": 1000000000000000019884624838656,
17
- "pad_token": "<pad>",
18
- "remove_space": true,
19
  "sep_token": "[SEP]",
20
- "sp_model_kwargs": {},
21
- "tokenizer_class": "AlbertTokenizer",
22
- "unk_token": "<unk>"
 
23
  }
 
1
  {
 
2
  "clean_up_tokenization_spaces": true,
3
  "cls_token": "[CLS]",
4
  "do_lower_case": true,
5
+ "mask_token": "[MASK]",
6
+ "model_max_length": 512,
7
+ "pad_token": "[PAD]",
 
 
 
 
 
 
 
 
 
 
8
  "sep_token": "[SEP]",
9
+ "strip_accents": null,
10
+ "tokenize_chinese_chars": true,
11
+ "tokenizer_class": "ElectraTokenizer",
12
+ "unk_token": "[UNK]"
13
  }
vocab.txt CHANGED
The diff for this file is too large to render. See raw diff