Training in progress epoch 0
Browse files- README.md +10 -9
- config.json +2 -2
- tf_model.h5 +2 -2
- tokenizer.json +16 -2
- tokenizer_config.json +1 -1
README.md
CHANGED
@@ -1,5 +1,4 @@
|
|
1 |
---
|
2 |
-
license: apache-2.0
|
3 |
tags:
|
4 |
- generated_from_keras_callback
|
5 |
model-index:
|
@@ -12,10 +11,12 @@ probably proofread and complete it, then remove this comment. -->
|
|
12 |
|
13 |
# veb/twitch-bert-base-cased-finetuned
|
14 |
|
15 |
-
This model
|
16 |
It achieves the following results on the evaluation set:
|
17 |
-
- Train Loss:
|
18 |
-
-
|
|
|
|
|
19 |
- Epoch: 0
|
20 |
|
21 |
## Model description
|
@@ -35,14 +36,14 @@ More information needed
|
|
35 |
### Training hyperparameters
|
36 |
|
37 |
The following hyperparameters were used during training:
|
38 |
-
- optimizer: {'name': '
|
39 |
-
- training_precision:
|
40 |
|
41 |
### Training results
|
42 |
|
43 |
-
| Train Loss | Validation Loss | Epoch |
|
44 |
-
|
45 |
-
|
|
46 |
|
47 |
|
48 |
### Framework versions
|
|
|
1 |
---
|
|
|
2 |
tags:
|
3 |
- generated_from_keras_callback
|
4 |
model-index:
|
|
|
11 |
|
12 |
# veb/twitch-bert-base-cased-finetuned
|
13 |
|
14 |
+
This model was trained from scratch on an unknown dataset.
|
15 |
It achieves the following results on the evaluation set:
|
16 |
+
- Train Loss: 0.2929
|
17 |
+
- Train Sparse Categorical Accuracy: 0.8768
|
18 |
+
- Validation Loss: 0.1927
|
19 |
+
- Validation Sparse Categorical Accuracy: 0.9483
|
20 |
- Epoch: 0
|
21 |
|
22 |
## Model description
|
|
|
36 |
### Training hyperparameters
|
37 |
|
38 |
The following hyperparameters were used during training:
|
39 |
+
- optimizer: {'name': 'Adam', 'learning_rate': 5e-05, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False}
|
40 |
+
- training_precision: float32
|
41 |
|
42 |
### Training results
|
43 |
|
44 |
+
| Train Loss | Train Sparse Categorical Accuracy | Validation Loss | Validation Sparse Categorical Accuracy | Epoch |
|
45 |
+
|:----------:|:---------------------------------:|:---------------:|:--------------------------------------:|:-----:|
|
46 |
+
| 0.2929 | 0.8768 | 0.1927 | 0.9483 | 0 |
|
47 |
|
48 |
|
49 |
### Framework versions
|
config.json
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
{
|
2 |
-
"_name_or_path": "bert-base-cased",
|
3 |
"architectures": [
|
4 |
-
"
|
5 |
],
|
6 |
"attention_probs_dropout_prob": 0.1,
|
7 |
"classifier_dropout": null,
|
|
|
1 |
{
|
2 |
+
"_name_or_path": "veb/twitch-bert-base-cased-finetuned",
|
3 |
"architectures": [
|
4 |
+
"BertForSequenceClassification"
|
5 |
],
|
6 |
"attention_probs_dropout_prob": 0.1,
|
7 |
"classifier_dropout": null,
|
tf_model.h5
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b2f508293dc744f769dc04c1f63cda14895450cf69850a06dc85ff12f489c232
|
3 |
+
size 433518320
|
tokenizer.json
CHANGED
@@ -1,7 +1,21 @@
|
|
1 |
{
|
2 |
"version": "1.0",
|
3 |
-
"truncation":
|
4 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
"added_tokens": [
|
6 |
{
|
7 |
"id": 0,
|
|
|
1 |
{
|
2 |
"version": "1.0",
|
3 |
+
"truncation": {
|
4 |
+
"direction": "Right",
|
5 |
+
"max_length": 512,
|
6 |
+
"strategy": "LongestFirst",
|
7 |
+
"stride": 0
|
8 |
+
},
|
9 |
+
"padding": {
|
10 |
+
"strategy": {
|
11 |
+
"Fixed": 512
|
12 |
+
},
|
13 |
+
"direction": "Right",
|
14 |
+
"pad_to_multiple_of": null,
|
15 |
+
"pad_id": 0,
|
16 |
+
"pad_type_id": 0,
|
17 |
+
"pad_token": "[PAD]"
|
18 |
+
},
|
19 |
"added_tokens": [
|
20 |
{
|
21 |
"id": 0,
|
tokenizer_config.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"do_lower_case": false, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "bert-base-cased", "tokenizer_class": "BertTokenizer"}
|
|
|
1 |
+
{"do_lower_case": false, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "veb/twitch-bert-base-cased-finetuned", "tokenizer_class": "BertTokenizer"}
|