ShushantLLM commited on
Commit
ca5b355
1 Parent(s): 1e50ef1

Training complete

Browse files
Files changed (2) hide show
  1. README.md +7 -6
  2. generation_config.json +1 -1
README.md CHANGED
@@ -1,11 +1,12 @@
1
  ---
 
 
2
  base_model: meta-llama/Llama-2-7b-hf
3
  tags:
4
  - trl
5
  - sft
6
- - missing lyric Llama2
7
- - generated_from_trainer
8
  - missing lyric Llama2 1
 
9
  datasets:
10
  - generator
11
  model-index:
@@ -46,7 +47,7 @@ The following hyperparameters were used during training:
46
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
  - lr_scheduler_type: constant_with_warmup
48
  - lr_scheduler_warmup_ratio: 0.04
49
- - num_epochs: 3
50
  - mixed_precision_training: Native AMP
51
 
52
  ### Training results
@@ -55,7 +56,7 @@ The following hyperparameters were used during training:
55
 
56
  ### Framework versions
57
 
58
- - Transformers 4.39.3
59
- - Pytorch 2.1.2
60
  - Datasets 2.18.0
61
- - Tokenizers 0.15.2
 
1
  ---
2
+ library_name: transformers
3
+ license: llama2
4
  base_model: meta-llama/Llama-2-7b-hf
5
  tags:
6
  - trl
7
  - sft
 
 
8
  - missing lyric Llama2 1
9
+ - generated_from_trainer
10
  datasets:
11
  - generator
12
  model-index:
 
47
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
48
  - lr_scheduler_type: constant_with_warmup
49
  - lr_scheduler_warmup_ratio: 0.04
50
+ - num_epochs: 1
51
  - mixed_precision_training: Native AMP
52
 
53
  ### Training results
 
56
 
57
  ### Framework versions
58
 
59
+ - Transformers 4.44.2
60
+ - Pytorch 2.2.2+cu121
61
  - Datasets 2.18.0
62
+ - Tokenizers 0.19.1
generation_config.json CHANGED
@@ -6,5 +6,5 @@
6
  "pad_token_id": 0,
7
  "temperature": 0.6,
8
  "top_p": 0.9,
9
- "transformers_version": "4.39.3"
10
  }
 
6
  "pad_token_id": 0,
7
  "temperature": 0.6,
8
  "top_p": 0.9,
9
+ "transformers_version": "4.44.2"
10
  }