gokuls commited on
Commit
c3cc819
1 Parent(s): 4d9c8e2

End of training

Browse files
README.md CHANGED
@@ -1,4 +1,6 @@
1
  ---
 
 
2
  tags:
3
  - generated_from_trainer
4
  datasets:
@@ -12,7 +14,7 @@ model-index:
12
  name: Text Classification
13
  type: text-classification
14
  dataset:
15
- name: glue
16
  type: glue
17
  config: cola
18
  split: validation
@@ -28,9 +30,9 @@ should probably proofread and complete it, then remove this comment. -->
28
 
29
  # hBERTv1_cola
30
 
31
- This model is a fine-tuned version of [gokuls/bert_12_layer_model_v1](https://huggingface.co/gokuls/bert_12_layer_model_v1) on the glue dataset.
32
  It achieves the following results on the evaluation set:
33
- - Loss: 0.6212
34
  - Matthews Correlation: 0.0
35
 
36
  ## Model description
 
1
  ---
2
+ language:
3
+ - en
4
  tags:
5
  - generated_from_trainer
6
  datasets:
 
14
  name: Text Classification
15
  type: text-classification
16
  dataset:
17
+ name: GLUE COLA
18
  type: glue
19
  config: cola
20
  split: validation
 
30
 
31
  # hBERTv1_cola
32
 
33
+ This model is a fine-tuned version of [gokuls/bert_12_layer_model_v1](https://huggingface.co/gokuls/bert_12_layer_model_v1) on the GLUE COLA dataset.
34
  It achieves the following results on the evaluation set:
35
+ - Loss: 0.6180
36
  - Matthews Correlation: 0.0
37
 
38
  ## Model description
all_results.json CHANGED
@@ -2,13 +2,13 @@
2
  "epoch": 6.0,
3
  "eval_loss": 0.6179953217506409,
4
  "eval_matthews_correlation": 0.0,
5
- "eval_runtime": 1.322,
6
  "eval_samples": 1043,
7
- "eval_samples_per_second": 788.959,
8
- "eval_steps_per_second": 3.782,
9
- "train_loss": 0.6118226051330566,
10
- "train_runtime": 236.3939,
11
  "train_samples": 8551,
12
- "train_samples_per_second": 1808.634,
13
- "train_steps_per_second": 7.191
14
  }
 
2
  "epoch": 6.0,
3
  "eval_loss": 0.6179953217506409,
4
  "eval_matthews_correlation": 0.0,
5
+ "eval_runtime": 1.313,
6
  "eval_samples": 1043,
7
+ "eval_samples_per_second": 794.374,
8
+ "eval_steps_per_second": 3.808,
9
+ "train_loss": 0.5082857468548942,
10
+ "train_runtime": 207.6378,
11
  "train_samples": 8551,
12
+ "train_samples_per_second": 2059.114,
13
+ "train_steps_per_second": 8.187
14
  }
eval_results.json CHANGED
@@ -2,8 +2,8 @@
2
  "epoch": 6.0,
3
  "eval_loss": 0.6179953217506409,
4
  "eval_matthews_correlation": 0.0,
5
- "eval_runtime": 1.322,
6
  "eval_samples": 1043,
7
- "eval_samples_per_second": 788.959,
8
- "eval_steps_per_second": 3.782
9
  }
 
2
  "epoch": 6.0,
3
  "eval_loss": 0.6179953217506409,
4
  "eval_matthews_correlation": 0.0,
5
+ "eval_runtime": 1.313,
6
  "eval_samples": 1043,
7
+ "eval_samples_per_second": 794.374,
8
+ "eval_steps_per_second": 3.808
9
  }
logs/events.out.tfevents.1677708978.serv-9225.3038038.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ffe2d80605e183b9b6e03ed2c0a20587cc90484c500f22ea0b0925644aa91f66
3
+ size 375
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 6.0,
3
- "train_loss": 0.6118226051330566,
4
- "train_runtime": 236.3939,
5
  "train_samples": 8551,
6
- "train_samples_per_second": 1808.634,
7
- "train_steps_per_second": 7.191
8
  }
 
1
  {
2
  "epoch": 6.0,
3
+ "train_loss": 0.5082857468548942,
4
+ "train_runtime": 207.6378,
5
  "train_samples": 8551,
6
+ "train_samples_per_second": 2059.114,
7
+ "train_steps_per_second": 8.187
8
  }
trainer_state.json CHANGED
@@ -32,9 +32,9 @@
32
  "epoch": 2.0,
33
  "eval_loss": 0.6210665702819824,
34
  "eval_matthews_correlation": 0.0,
35
- "eval_runtime": 1.3016,
36
- "eval_samples_per_second": 801.321,
37
- "eval_steps_per_second": 3.841,
38
  "step": 68
39
  },
40
  {
@@ -47,9 +47,9 @@
47
  "epoch": 3.0,
48
  "eval_loss": 0.6251837611198425,
49
  "eval_matthews_correlation": 0.0,
50
- "eval_runtime": 1.3047,
51
- "eval_samples_per_second": 799.413,
52
- "eval_steps_per_second": 3.832,
53
  "step": 102
54
  },
55
  {
@@ -62,9 +62,9 @@
62
  "epoch": 4.0,
63
  "eval_loss": 0.6181675791740417,
64
  "eval_matthews_correlation": 0.0,
65
- "eval_runtime": 1.3088,
66
- "eval_samples_per_second": 796.936,
67
- "eval_steps_per_second": 3.82,
68
  "step": 136
69
  },
70
  {
@@ -77,9 +77,9 @@
77
  "epoch": 5.0,
78
  "eval_loss": 0.6210399270057678,
79
  "eval_matthews_correlation": 0.0,
80
- "eval_runtime": 1.3101,
81
- "eval_samples_per_second": 796.146,
82
- "eval_steps_per_second": 3.817,
83
  "step": 170
84
  },
85
  {
@@ -92,19 +92,19 @@
92
  "epoch": 6.0,
93
  "eval_loss": 0.6212285757064819,
94
  "eval_matthews_correlation": 0.0,
95
- "eval_runtime": 1.3069,
96
- "eval_samples_per_second": 798.045,
97
- "eval_steps_per_second": 3.826,
98
  "step": 204
99
  },
100
  {
101
  "epoch": 6.0,
102
  "step": 204,
103
  "total_flos": 6480513749483520.0,
104
- "train_loss": 0.6118226051330566,
105
- "train_runtime": 236.3939,
106
- "train_samples_per_second": 1808.634,
107
- "train_steps_per_second": 7.191
108
  }
109
  ],
110
  "max_steps": 1700,
 
32
  "epoch": 2.0,
33
  "eval_loss": 0.6210665702819824,
34
  "eval_matthews_correlation": 0.0,
35
+ "eval_runtime": 1.3002,
36
+ "eval_samples_per_second": 802.193,
37
+ "eval_steps_per_second": 3.846,
38
  "step": 68
39
  },
40
  {
 
47
  "epoch": 3.0,
48
  "eval_loss": 0.6251837611198425,
49
  "eval_matthews_correlation": 0.0,
50
+ "eval_runtime": 1.3073,
51
+ "eval_samples_per_second": 797.82,
52
+ "eval_steps_per_second": 3.825,
53
  "step": 102
54
  },
55
  {
 
62
  "epoch": 4.0,
63
  "eval_loss": 0.6181675791740417,
64
  "eval_matthews_correlation": 0.0,
65
+ "eval_runtime": 1.307,
66
+ "eval_samples_per_second": 798.007,
67
+ "eval_steps_per_second": 3.826,
68
  "step": 136
69
  },
70
  {
 
77
  "epoch": 5.0,
78
  "eval_loss": 0.6210399270057678,
79
  "eval_matthews_correlation": 0.0,
80
+ "eval_runtime": 1.3053,
81
+ "eval_samples_per_second": 799.078,
82
+ "eval_steps_per_second": 3.831,
83
  "step": 170
84
  },
85
  {
 
92
  "epoch": 6.0,
93
  "eval_loss": 0.6212285757064819,
94
  "eval_matthews_correlation": 0.0,
95
+ "eval_runtime": 1.3083,
96
+ "eval_samples_per_second": 797.202,
97
+ "eval_steps_per_second": 3.822,
98
  "step": 204
99
  },
100
  {
101
  "epoch": 6.0,
102
  "step": 204,
103
  "total_flos": 6480513749483520.0,
104
+ "train_loss": 0.5082857468548942,
105
+ "train_runtime": 207.6378,
106
+ "train_samples_per_second": 2059.114,
107
+ "train_steps_per_second": 8.187
108
  }
109
  ],
110
  "max_steps": 1700,