FatemehYp commited on
Commit
3372a4b
1 Parent(s): 00ed75b

Training in progress epoch 0

Browse files
Files changed (5) hide show
  1. README.md +5 -8
  2. config.json +1 -1
  3. tf_model.h5 +1 -1
  4. tokenizer.json +2 -16
  5. tokenizer_config.json +7 -0
README.md CHANGED
@@ -1,5 +1,4 @@
1
  ---
2
- base_model: medicalai/ClinicalBERT
3
  tags:
4
  - generated_from_keras_callback
5
  model-index:
@@ -12,10 +11,10 @@ probably proofread and complete it, then remove this comment. -->
12
 
13
  # FatemehYp/clinicalbert-complete_Diagnosis_in_responses_finetuned-squad
14
 
15
- This model is a fine-tuned version of [medicalai/ClinicalBERT](https://huggingface.co/medicalai/ClinicalBERT) on an unknown dataset.
16
  It achieves the following results on the evaluation set:
17
- - Train Loss: 1.3473
18
- - Epoch: 2
19
 
20
  ## Model description
21
 
@@ -35,15 +34,13 @@ More information needed
35
 
36
  The following hyperparameters were used during training:
37
  - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 444, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.01}
38
- - training_precision: float32
39
 
40
  ### Training results
41
 
42
  | Train Loss | Epoch |
43
  |:----------:|:-----:|
44
- | 2.5166 | 0 |
45
- | 1.5944 | 1 |
46
- | 1.3473 | 2 |
47
 
48
 
49
  ### Framework versions
 
1
  ---
 
2
  tags:
3
  - generated_from_keras_callback
4
  model-index:
 
11
 
12
  # FatemehYp/clinicalbert-complete_Diagnosis_in_responses_finetuned-squad
13
 
14
+ This model was trained from scratch on an unknown dataset.
15
  It achieves the following results on the evaluation set:
16
+ - Train Loss: 1.2579
17
+ - Epoch: 0
18
 
19
  ## Model description
20
 
 
34
 
35
  The following hyperparameters were used during training:
36
  - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 444, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.01}
37
+ - training_precision: mixed_float16
38
 
39
  ### Training results
40
 
41
  | Train Loss | Epoch |
42
  |:----------:|:-----:|
43
+ | 1.2579 | 0 |
 
 
44
 
45
 
46
  ### Framework versions
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "medicalai/ClinicalBERT",
3
  "activation": "gelu",
4
  "architectures": [
5
  "DistilBertForQuestionAnswering"
 
1
  {
2
+ "_name_or_path": "clinicalbert-complete_Diagnosis_in_responses_finetuned-squad",
3
  "activation": "gelu",
4
  "architectures": [
5
  "DistilBertForQuestionAnswering"
tf_model.h5 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:12259eb56970ca0425fb21e5cd97d3b088b4d522e5f5293b16801b8240416a9f
3
  size 539068392
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:faddd8df1d114e9a55d7f804e65657a826d0dec357879a7ac16f4a3502d39e82
3
  size 539068392
tokenizer.json CHANGED
@@ -1,21 +1,7 @@
1
  {
2
  "version": "1.0",
3
- "truncation": {
4
- "direction": "Right",
5
- "max_length": 384,
6
- "strategy": "OnlySecond",
7
- "stride": 128
8
- },
9
- "padding": {
10
- "strategy": {
11
- "Fixed": 384
12
- },
13
- "direction": "Right",
14
- "pad_to_multiple_of": null,
15
- "pad_id": 0,
16
- "pad_type_id": 0,
17
- "pad_token": "[PAD]"
18
- },
19
  "added_tokens": [
20
  {
21
  "id": 0,
 
1
  {
2
  "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  "added_tokens": [
6
  {
7
  "id": 0,
tokenizer_config.json CHANGED
@@ -5,12 +5,19 @@
5
  "do_lower_case": true,
6
  "full_tokenizer_file": null,
7
  "mask_token": "[MASK]",
 
8
  "model_max_length": 1000000000000000019884624838656,
9
  "never_split": null,
 
10
  "pad_token": "[PAD]",
 
 
11
  "sep_token": "[SEP]",
 
12
  "strip_accents": null,
13
  "tokenize_chinese_chars": true,
14
  "tokenizer_class": "DistilBertTokenizer",
 
 
15
  "unk_token": "[UNK]"
16
  }
 
5
  "do_lower_case": true,
6
  "full_tokenizer_file": null,
7
  "mask_token": "[MASK]",
8
+ "max_length": 384,
9
  "model_max_length": 1000000000000000019884624838656,
10
  "never_split": null,
11
+ "pad_to_multiple_of": null,
12
  "pad_token": "[PAD]",
13
+ "pad_token_type_id": 0,
14
+ "padding_side": "right",
15
  "sep_token": "[SEP]",
16
+ "stride": 128,
17
  "strip_accents": null,
18
  "tokenize_chinese_chars": true,
19
  "tokenizer_class": "DistilBertTokenizer",
20
+ "truncation_side": "right",
21
+ "truncation_strategy": "only_second",
22
  "unk_token": "[UNK]"
23
  }