asoria HF staff commited on
Commit
a9569f7
1 Parent(s): 4ac6cfb

gpt2-tweet_sentiment_extraction

Browse files
README.md ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: mit
4
+ base_model: gpt2
5
+ tags:
6
+ - generated_from_trainer
7
+ metrics:
8
+ - accuracy
9
+ model-index:
10
+ - name: test_trainer
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # test_trainer
18
+
19
+ This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on an unknown dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 0.8872
22
+ - Accuracy: 0.72
23
+
24
+ ## Model description
25
+
26
+ More information needed
27
+
28
+ ## Intended uses & limitations
29
+
30
+ More information needed
31
+
32
+ ## Training and evaluation data
33
+
34
+ More information needed
35
+
36
+ ## Training procedure
37
+
38
+ ### Training hyperparameters
39
+
40
+ The following hyperparameters were used during training:
41
+ - learning_rate: 5e-05
42
+ - train_batch_size: 1
43
+ - eval_batch_size: 1
44
+ - seed: 42
45
+ - gradient_accumulation_steps: 4
46
+ - total_train_batch_size: 4
47
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
48
+ - lr_scheduler_type: linear
49
+ - num_epochs: 3.0
50
+
51
+ ### Training results
52
+
53
+
54
+
55
+ ### Framework versions
56
+
57
+ - Transformers 4.44.2
58
+ - Pytorch 2.4.0+cu121
59
+ - Datasets 3.0.0
60
+ - Tokenizers 0.19.1
config.json ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "gpt2",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2ForSequenceClassification"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "id2label": {
12
+ "0": "LABEL_0",
13
+ "1": "LABEL_1",
14
+ "2": "LABEL_2"
15
+ },
16
+ "initializer_range": 0.02,
17
+ "label2id": {
18
+ "LABEL_0": 0,
19
+ "LABEL_1": 1,
20
+ "LABEL_2": 2
21
+ },
22
+ "layer_norm_epsilon": 1e-05,
23
+ "model_type": "gpt2",
24
+ "n_ctx": 1024,
25
+ "n_embd": 768,
26
+ "n_head": 12,
27
+ "n_inner": null,
28
+ "n_layer": 12,
29
+ "n_positions": 1024,
30
+ "problem_type": "single_label_classification",
31
+ "reorder_and_upcast_attn": false,
32
+ "resid_pdrop": 0.1,
33
+ "scale_attn_by_inverse_layer_idx": false,
34
+ "scale_attn_weights": true,
35
+ "summary_activation": null,
36
+ "summary_first_dropout": 0.1,
37
+ "summary_proj_to_labels": true,
38
+ "summary_type": "cls_index",
39
+ "summary_use_proj": true,
40
+ "task_specific_params": {
41
+ "text-generation": {
42
+ "do_sample": true,
43
+ "max_length": 50
44
+ }
45
+ },
46
+ "torch_dtype": "float32",
47
+ "transformers_version": "4.44.2",
48
+ "use_cache": true,
49
+ "vocab_size": 50257
50
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:898adc4cb1ca1f9cef2a240f987a330c0d62153aa8930ec529351f689f11a18d
3
+ size 497783504
runs/Sep17_10-55-12_714d69f1d953/events.out.tfevents.1726570517.714d69f1d953.606.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88dc014b3dbbb3a4c108ce21ebd31aa882f56613dc8a02f7a7a79c739b2acce2
3
+ size 5842
runs/Sep17_10-55-12_714d69f1d953/events.out.tfevents.1726571372.714d69f1d953.606.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ceff9a1a482ca7fd7c4d6fbd08decb4cd01b42fc89bf079b691152d08a343534
3
+ size 411
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:efd68022c2a215eae6347ad18b0226ddd9e18dc3eba0a454ae149769e0d83650
3
+ size 5176