a2ran commited on
Commit
2d03a46
·
1 Parent(s): 6e0ae7a

End of training

Browse files
README.md CHANGED
@@ -1,6 +1,5 @@
1
  ---
2
- license: apache-2.0
3
- base_model: t5-small
4
  tags:
5
  - generated_from_trainer
6
  model-index:
@@ -13,9 +12,9 @@ should probably proofread and complete it, then remove this comment. -->
13
 
14
  # FingerFriend-t5-small
15
 
16
- This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the None dataset.
17
  It achieves the following results on the evaluation set:
18
- - Loss: 0.7932
19
 
20
  ## Model description
21
 
@@ -35,8 +34,8 @@ More information needed
35
 
36
  The following hyperparameters were used during training:
37
  - learning_rate: 2e-05
38
- - train_batch_size: 16
39
- - eval_batch_size: 16
40
  - seed: 42
41
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
42
  - lr_scheduler_type: linear
@@ -46,16 +45,12 @@ The following hyperparameters were used during training:
46
 
47
  | Training Loss | Epoch | Step | Validation Loss |
48
  |:-------------:|:-----:|:----:|:---------------:|
49
- | 1.6439 | 1.0 | 152 | 1.1984 |
50
- | 1.2309 | 2.0 | 304 | 1.0458 |
51
- | 1.109 | 3.0 | 456 | 0.9631 |
52
- | 1.0371 | 4.0 | 608 | 0.9101 |
53
- | 0.989 | 5.0 | 760 | 0.8840 |
54
- | 0.9521 | 6.0 | 912 | 0.8545 |
55
- | 0.921 | 7.0 | 1064 | 0.8384 |
56
- | 0.9018 | 8.0 | 1216 | 0.8176 |
57
- | 0.8801 | 9.0 | 1368 | 0.8030 |
58
- | 0.8567 | 10.0 | 1520 | 0.7932 |
59
 
60
 
61
  ### Framework versions
 
1
  ---
2
+ base_model: eenzeenee/t5-small-korean-summarization
 
3
  tags:
4
  - generated_from_trainer
5
  model-index:
 
12
 
13
  # FingerFriend-t5-small
14
 
15
+ This model is a fine-tuned version of [eenzeenee/t5-small-korean-summarization](https://huggingface.co/eenzeenee/t5-small-korean-summarization) on the None dataset.
16
  It achieves the following results on the evaluation set:
17
+ - Loss: 0.6002
18
 
19
  ## Model description
20
 
 
34
 
35
  The following hyperparameters were used during training:
36
  - learning_rate: 2e-05
37
+ - train_batch_size: 8
38
+ - eval_batch_size: 8
39
  - seed: 42
40
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
41
  - lr_scheduler_type: linear
 
45
 
46
  | Training Loss | Epoch | Step | Validation Loss |
47
  |:-------------:|:-----:|:----:|:---------------:|
48
+ | 1.4509 | 1.0 | 342 | 0.7916 |
49
+ | 0.8917 | 2.0 | 684 | 0.7062 |
50
+ | 0.7846 | 3.0 | 1026 | 0.6640 |
51
+ | 0.72 | 4.0 | 1368 | 0.6334 |
52
+ | 0.6804 | 5.0 | 1710 | 0.6100 |
53
+ | 0.6387 | 6.0 | 2052 | 0.6002 |
 
 
 
 
54
 
55
 
56
  ### Framework versions
config.json CHANGED
@@ -1,61 +1,33 @@
1
  {
2
- "_name_or_path": "t5-small",
3
  "architectures": [
4
  "T5ForConditionalGeneration"
5
  ],
6
  "classifier_dropout": 0.0,
7
- "d_ff": 2048,
8
  "d_kv": 64,
9
  "d_model": 512,
10
  "decoder_start_token_id": 0,
11
- "dense_act_fn": "relu",
12
  "dropout_rate": 0.1,
13
  "eos_token_id": 1,
14
- "feed_forward_proj": "relu",
15
  "initializer_factor": 1.0,
16
  "is_encoder_decoder": true,
17
- "is_gated_act": false,
18
  "layer_norm_epsilon": 1e-06,
 
19
  "model_type": "t5",
20
- "n_positions": 512,
21
- "num_decoder_layers": 6,
22
- "num_heads": 8,
23
- "num_layers": 6,
24
  "output_past": true,
25
  "pad_token_id": 0,
26
  "relative_attention_max_distance": 128,
27
  "relative_attention_num_buckets": 32,
28
- "task_specific_params": {
29
- "summarization": {
30
- "early_stopping": true,
31
- "length_penalty": 2.0,
32
- "max_length": 200,
33
- "min_length": 30,
34
- "no_repeat_ngram_size": 3,
35
- "num_beams": 4,
36
- "prefix": "summarize: "
37
- },
38
- "translation_en_to_de": {
39
- "early_stopping": true,
40
- "max_length": 300,
41
- "num_beams": 4,
42
- "prefix": "translate English to German: "
43
- },
44
- "translation_en_to_fr": {
45
- "early_stopping": true,
46
- "max_length": 300,
47
- "num_beams": 4,
48
- "prefix": "translate English to French: "
49
- },
50
- "translation_en_to_ro": {
51
- "early_stopping": true,
52
- "max_length": 300,
53
- "num_beams": 4,
54
- "prefix": "translate English to Romanian: "
55
- }
56
- },
57
  "torch_dtype": "float32",
58
  "transformers_version": "4.33.2",
59
  "use_cache": true,
60
- "vocab_size": 32128
61
  }
 
1
  {
2
+ "_name_or_path": "eenzeenee/t5-small-korean-summarization",
3
  "architectures": [
4
  "T5ForConditionalGeneration"
5
  ],
6
  "classifier_dropout": 0.0,
7
+ "d_ff": 1024,
8
  "d_kv": 64,
9
  "d_model": 512,
10
  "decoder_start_token_id": 0,
11
+ "dense_act_fn": "gelu_new",
12
  "dropout_rate": 0.1,
13
  "eos_token_id": 1,
14
+ "feed_forward_proj": "gated-gelu",
15
  "initializer_factor": 1.0,
16
  "is_encoder_decoder": true,
17
+ "is_gated_act": true,
18
  "layer_norm_epsilon": 1e-06,
19
+ "max_length": 128,
20
  "model_type": "t5",
21
+ "num_decoder_layers": 8,
22
+ "num_heads": 6,
23
+ "num_layers": 8,
 
24
  "output_past": true,
25
  "pad_token_id": 0,
26
  "relative_attention_max_distance": 128,
27
  "relative_attention_num_buckets": 32,
28
+ "tie_word_embeddings": false,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  "torch_dtype": "float32",
30
  "transformers_version": "4.33.2",
31
  "use_cache": true,
32
+ "vocab_size": 50358
33
  }
generation_config.json CHANGED
@@ -2,6 +2,7 @@
2
  "_from_model_config": true,
3
  "decoder_start_token_id": 0,
4
  "eos_token_id": 1,
 
5
  "pad_token_id": 0,
6
  "transformers_version": "4.33.2"
7
  }
 
2
  "_from_model_config": true,
3
  "decoder_start_token_id": 0,
4
  "eos_token_id": 1,
5
+ "max_length": 128,
6
  "pad_token_id": 0,
7
  "transformers_version": "4.33.2"
8
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f853b8cf45d94d2efb9558aed7a7972cccfcf2fd74398427724f9812260a97d1
3
- size 242071641
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:408db3f7ac52178898e790a6cd185b6a9e21de5a085d64c1af7778130ccf1a26
3
+ size 382580229
special_tokens_map.json CHANGED
@@ -103,5 +103,5 @@
103
  ],
104
  "eos_token": "</s>",
105
  "pad_token": "<pad>",
106
- "unk_token": "<unk>"
107
  }
 
103
  ],
104
  "eos_token": "</s>",
105
  "pad_token": "<pad>",
106
+ "unk_token": "<pad>"
107
  }
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -104,8 +104,12 @@
104
  "clean_up_tokenization_spaces": true,
105
  "eos_token": "</s>",
106
  "extra_ids": 100,
107
- "model_max_length": 512,
 
108
  "pad_token": "<pad>",
 
109
  "tokenizer_class": "T5Tokenizer",
110
- "unk_token": "<unk>"
 
 
111
  }
 
104
  "clean_up_tokenization_spaces": true,
105
  "eos_token": "</s>",
106
  "extra_ids": 100,
107
+ "max_length": 128,
108
+ "model_max_length": 128,
109
  "pad_token": "<pad>",
110
+ "stride": 128,
111
  "tokenizer_class": "T5Tokenizer",
112
+ "truncation_side": "right",
113
+ "truncation_strategy": "longest_first",
114
+ "unk_token": "<pad>"
115
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a1f97e5d8e3d00a4d95907b6bdd0ce1012c9c9e534357a764cdca168a1265e55
3
  size 4155
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0383c3fd8f425030cfa2c0c23ae01db1ffb7eed00a42904bef85495ec164cff
3
  size 4155