tuanna08go commited on
Commit
5bfbe56
·
verified ·
1 Parent(s): c137eb3

End of training

Browse files
Files changed (2) hide show
  1. README.md +8 -8
  2. adapter_model.bin +1 -1
README.md CHANGED
@@ -46,7 +46,7 @@ flash_attention: false
46
  fp16: null
47
  fsdp: null
48
  fsdp_config: null
49
- gradient_accumulation_steps: 16
50
  gradient_checkpointing: false
51
  group_by_length: false
52
  hub_model_id: tuanna08go/ade6d149-5430-4614-9b8f-0bebe06ee174
@@ -57,7 +57,7 @@ learning_rate: 0.0001
57
  load_in_4bit: false
58
  load_in_8bit: false
59
  local_rank: null
60
- logging_steps: 10
61
  lora_alpha: 16
62
  lora_dropout: 0.05
63
  lora_fan_in_fan_out: null
@@ -66,7 +66,7 @@ lora_r: 8
66
  lora_target_linear: true
67
  lr_scheduler: cosine
68
  max_steps: 1
69
- micro_batch_size: 8
70
  mlflow_experiment_name: /tmp/b3eb0292cb32b6ac_train_data.json
71
  model_type: AutoModelForCausalLM
72
  num_epochs: 1
@@ -120,11 +120,11 @@ More information needed
120
 
121
  The following hyperparameters were used during training:
122
  - learning_rate: 0.0001
123
- - train_batch_size: 8
124
- - eval_batch_size: 8
125
  - seed: 42
126
- - gradient_accumulation_steps: 16
127
- - total_train_batch_size: 128
128
  - optimizer: Use OptimizerNames.ADAMW_BNB with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
129
  - lr_scheduler_type: cosine
130
  - lr_scheduler_warmup_steps: 2
@@ -134,7 +134,7 @@ The following hyperparameters were used during training:
134
 
135
  | Training Loss | Epoch | Step | Validation Loss |
136
  |:-------------:|:------:|:----:|:---------------:|
137
- | No log | 0.0349 | 1 | 7.9264 |
138
 
139
 
140
  ### Framework versions
 
46
  fp16: null
47
  fsdp: null
48
  fsdp_config: null
49
+ gradient_accumulation_steps: 4
50
  gradient_checkpointing: false
51
  group_by_length: false
52
  hub_model_id: tuanna08go/ade6d149-5430-4614-9b8f-0bebe06ee174
 
57
  load_in_4bit: false
58
  load_in_8bit: false
59
  local_rank: null
60
+ logging_steps: 5
61
  lora_alpha: 16
62
  lora_dropout: 0.05
63
  lora_fan_in_fan_out: null
 
66
  lora_target_linear: true
67
  lr_scheduler: cosine
68
  max_steps: 1
69
+ micro_batch_size: 2
70
  mlflow_experiment_name: /tmp/b3eb0292cb32b6ac_train_data.json
71
  model_type: AutoModelForCausalLM
72
  num_epochs: 1
 
120
 
121
  The following hyperparameters were used during training:
122
  - learning_rate: 0.0001
123
+ - train_batch_size: 2
124
+ - eval_batch_size: 2
125
  - seed: 42
126
+ - gradient_accumulation_steps: 4
127
+ - total_train_batch_size: 8
128
  - optimizer: Use OptimizerNames.ADAMW_BNB with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
129
  - lr_scheduler_type: cosine
130
  - lr_scheduler_warmup_steps: 2
 
134
 
135
  | Training Loss | Epoch | Step | Validation Loss |
136
  |:-------------:|:------:|:----:|:---------------:|
137
+ | No log | 0.0022 | 1 | 7.9420 |
138
 
139
 
140
  ### Framework versions
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fa2ab8b537a6231fa3e2cfe939219a4e5882aa339f3090025c6f06e7b4a37880
3
  size 100149034
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:29e1774b592bd0e3ee0f02c3712168b8a8735acbbe19ed67f5746900b52a2214
3
  size 100149034