Cheng98 commited on
Commit
bcab7db
1 Parent(s): d8d73a1

Upload folder using huggingface_hub

Browse files
README.md CHANGED
@@ -1,13 +1,28 @@
1
  ---
 
 
2
  license: other
3
  base_model: facebook/opt-350m
4
  tags:
5
  - generated_from_trainer
 
 
6
  metrics:
7
  - accuracy
8
  model-index:
9
  - name: opt-350m-rte
10
- results: []
 
 
 
 
 
 
 
 
 
 
 
11
  ---
12
 
13
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -15,10 +30,10 @@ should probably proofread and complete it, then remove this comment. -->
15
 
16
  # opt-350m-rte
17
 
18
- This model is a fine-tuned version of [facebook/opt-350m](https://huggingface.co/facebook/opt-350m) on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
- - Loss: 0.7144
21
- - Accuracy: 0.6859
22
 
23
  ## Model description
24
 
@@ -37,24 +52,16 @@ More information needed
37
  ### Training hyperparameters
38
 
39
  The following hyperparameters were used during training:
40
- - learning_rate: 5e-05
41
- - train_batch_size: 16
42
- - eval_batch_size: 32
43
  - seed: 42
44
- - gradient_accumulation_steps: 8
45
- - total_train_batch_size: 128
46
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
  - lr_scheduler_type: linear
48
- - num_epochs: 4
49
 
50
  ### Training results
51
 
52
- | Training Loss | Epoch | Step | Validation Loss | Accuracy |
53
- |:-------------:|:-----:|:----:|:---------------:|:--------:|
54
- | No log | 0.97 | 19 | 0.9919 | 0.4729 |
55
- | No log | 2.0 | 39 | 0.6613 | 0.5848 |
56
- | No log | 2.97 | 58 | 0.6972 | 0.6318 |
57
- | No log | 3.9 | 76 | 0.7144 | 0.6859 |
58
 
59
 
60
  ### Framework versions
 
1
  ---
2
+ language:
3
+ - en
4
  license: other
5
  base_model: facebook/opt-350m
6
  tags:
7
  - generated_from_trainer
8
+ datasets:
9
+ - glue
10
  metrics:
11
  - accuracy
12
  model-index:
13
  - name: opt-350m-rte
14
+ results:
15
+ - task:
16
+ name: Text Classification
17
+ type: text-classification
18
+ dataset:
19
+ name: GLUE RTE
20
+ type: glue
21
+ args: rte
22
+ metrics:
23
+ - name: Accuracy
24
+ type: accuracy
25
+ value: 0.7111913357400722
26
  ---
27
 
28
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
30
 
31
  # opt-350m-rte
32
 
33
+ This model is a fine-tuned version of [facebook/opt-350m](https://huggingface.co/facebook/opt-350m) on the GLUE RTE dataset.
34
  It achieves the following results on the evaluation set:
35
+ - Loss: 2.1042
36
+ - Accuracy: 0.7112
37
 
38
  ## Model description
39
 
 
52
  ### Training hyperparameters
53
 
54
  The following hyperparameters were used during training:
55
+ - learning_rate: 2e-05
56
+ - train_batch_size: 32
57
+ - eval_batch_size: 8
58
  - seed: 42
 
 
59
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
60
  - lr_scheduler_type: linear
61
+ - num_epochs: 5.0
62
 
63
  ### Training results
64
 
 
 
 
 
 
 
65
 
66
 
67
  ### Framework versions
all_results.json CHANGED
@@ -1,14 +1,14 @@
1
  {
2
- "epoch": 3.9,
3
- "eval_accuracy": 0.6859205776173285,
4
- "eval_loss": 0.714410662651062,
5
- "eval_runtime": 1.9726,
6
  "eval_samples": 277,
7
- "eval_samples_per_second": 140.423,
8
- "eval_steps_per_second": 4.562,
9
- "train_loss": 0.8060256556460732,
10
- "train_runtime": 205.3233,
11
  "train_samples": 2490,
12
- "train_samples_per_second": 48.509,
13
- "train_steps_per_second": 0.37
14
  }
 
1
  {
2
+ "epoch": 5.0,
3
+ "eval_accuracy": 0.7111913357400722,
4
+ "eval_loss": 2.1042087078094482,
5
+ "eval_runtime": 1.5849,
6
  "eval_samples": 277,
7
+ "eval_samples_per_second": 174.772,
8
+ "eval_steps_per_second": 22.083,
9
+ "train_loss": 0.30048399705153245,
10
+ "train_runtime": 200.6393,
11
  "train_samples": 2490,
12
+ "train_samples_per_second": 62.052,
13
+ "train_steps_per_second": 1.944
14
  }
config.json CHANGED
@@ -13,16 +13,16 @@
13
  "enable_bias": true,
14
  "eos_token_id": 2,
15
  "ffn_dim": 4096,
16
- "finetuning_task": "text-classification",
17
  "hidden_size": 1024,
18
  "id2label": {
19
- "0": "0",
20
- "1": "1"
21
  },
22
  "init_std": 0.02,
23
  "label2id": {
24
- "0": 0,
25
- "1": 1
26
  },
27
  "layer_norm_elementwise_affine": true,
28
  "layerdrop": 0.0,
 
13
  "enable_bias": true,
14
  "eos_token_id": 2,
15
  "ffn_dim": 4096,
16
+ "finetuning_task": "rte",
17
  "hidden_size": 1024,
18
  "id2label": {
19
+ "0": "entailment",
20
+ "1": "not_entailment"
21
  },
22
  "init_std": 0.02,
23
  "label2id": {
24
+ "entailment": 0,
25
+ "not_entailment": 1
26
  },
27
  "layer_norm_elementwise_affine": true,
28
  "layerdrop": 0.0,
eval_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "epoch": 3.9,
3
- "eval_accuracy": 0.6859205776173285,
4
- "eval_loss": 0.714410662651062,
5
- "eval_runtime": 1.9726,
6
  "eval_samples": 277,
7
- "eval_samples_per_second": 140.423,
8
- "eval_steps_per_second": 4.562
9
  }
 
1
  {
2
+ "epoch": 5.0,
3
+ "eval_accuracy": 0.7111913357400722,
4
+ "eval_loss": 2.1042087078094482,
5
+ "eval_runtime": 1.5849,
6
  "eval_samples": 277,
7
+ "eval_samples_per_second": 174.772,
8
+ "eval_steps_per_second": 22.083
9
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4f0cbae03772046ad4f9f14f0c4f5ad4b6d6c2f6673e61f11d538cbb736445cc
3
  size 1324921569
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b358536980e2f2c0deac46a206a0542b52e165e42423a83c7316fe8553ef30c7
3
  size 1324921569
tokenizer.json CHANGED
@@ -2,13 +2,13 @@
2
  "version": "1.0",
3
  "truncation": {
4
  "direction": "Right",
5
- "max_length": 512,
6
  "strategy": "LongestFirst",
7
  "stride": 0
8
  },
9
  "padding": {
10
  "strategy": {
11
- "Fixed": 512
12
  },
13
  "direction": "Right",
14
  "pad_to_multiple_of": null,
 
2
  "version": "1.0",
3
  "truncation": {
4
  "direction": "Right",
5
+ "max_length": 128,
6
  "strategy": "LongestFirst",
7
  "stride": 0
8
  },
9
  "padding": {
10
  "strategy": {
11
+ "Fixed": 128
12
  },
13
  "direction": "Right",
14
  "pad_to_multiple_of": null,
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "epoch": 3.9,
3
- "train_loss": 0.8060256556460732,
4
- "train_runtime": 205.3233,
5
  "train_samples": 2490,
6
- "train_samples_per_second": 48.509,
7
- "train_steps_per_second": 0.37
8
  }
 
1
  {
2
+ "epoch": 5.0,
3
+ "train_loss": 0.30048399705153245,
4
+ "train_runtime": 200.6393,
5
  "train_samples": 2490,
6
+ "train_samples_per_second": 62.052,
7
+ "train_steps_per_second": 1.944
8
  }
trainer_state.json CHANGED
@@ -1,61 +1,25 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 3.8974358974358974,
5
- "global_step": 76,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
9
  "log_history": [
10
  {
11
- "epoch": 0.97,
12
- "eval_accuracy": 0.4729241877256318,
13
- "eval_loss": 0.9918849468231201,
14
- "eval_runtime": 1.9203,
15
- "eval_samples_per_second": 144.25,
16
- "eval_steps_per_second": 4.687,
17
- "step": 19
18
- },
19
- {
20
- "epoch": 2.0,
21
- "eval_accuracy": 0.5848375451263538,
22
- "eval_loss": 0.6613093018531799,
23
- "eval_runtime": 1.9293,
24
- "eval_samples_per_second": 143.576,
25
- "eval_steps_per_second": 4.665,
26
- "step": 39
27
- },
28
- {
29
- "epoch": 2.97,
30
- "eval_accuracy": 0.631768953068592,
31
- "eval_loss": 0.6972018480300903,
32
- "eval_runtime": 1.9423,
33
- "eval_samples_per_second": 142.614,
34
- "eval_steps_per_second": 4.634,
35
- "step": 58
36
- },
37
- {
38
- "epoch": 3.9,
39
- "eval_accuracy": 0.6859205776173285,
40
- "eval_loss": 0.714410662651062,
41
- "eval_runtime": 1.9428,
42
- "eval_samples_per_second": 142.575,
43
- "eval_steps_per_second": 4.632,
44
- "step": 76
45
- },
46
- {
47
- "epoch": 3.9,
48
- "step": 76,
49
- "total_flos": 9048931298181120.0,
50
- "train_loss": 0.8060256556460732,
51
- "train_runtime": 205.3233,
52
- "train_samples_per_second": 48.509,
53
- "train_steps_per_second": 0.37
54
  }
55
  ],
56
- "max_steps": 76,
57
- "num_train_epochs": 4,
58
- "total_flos": 9048931298181120.0,
59
  "trial_name": null,
60
  "trial_params": null
61
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 5.0,
5
+ "global_step": 390,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
9
  "log_history": [
10
  {
11
+ "epoch": 5.0,
12
+ "step": 390,
13
+ "total_flos": 2900597184921600.0,
14
+ "train_loss": 0.30048399705153245,
15
+ "train_runtime": 200.6393,
16
+ "train_samples_per_second": 62.052,
17
+ "train_steps_per_second": 1.944
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  }
19
  ],
20
+ "max_steps": 390,
21
+ "num_train_epochs": 5,
22
+ "total_flos": 2900597184921600.0,
23
  "trial_name": null,
24
  "trial_params": null
25
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:95dff2048bf9a7a9cb3f3d470d5ca72f7a996d9a67bd7db6b0f00ffb636f00b4
3
- size 4091
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b3b22874f2da3cca2923e809d12e9c08b60ee574321286e71bb5196c3407c8e
3
+ size 3963