nielsbantilan commited on
Commit
9185e3f
1 Parent(s): df37322

Upload folder using huggingface_hub

Browse files
README.md CHANGED
@@ -1,16 +1,21 @@
1
  ---
2
- language:
3
- - en
4
- license: apache-2.0
5
- tags:
6
- - pytorch
7
- - causal-lm
8
- - llama2
9
- - code llama
10
- - fine-tuning
11
- - flyte llama
12
- - flyte repo dataset
13
-
14
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
- # FlyteLlama-v0-7b-hf fine-tuned on Flyte repos
 
1
  ---
2
+ library_name: peft
 
 
 
 
 
 
 
 
 
 
 
3
  ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: False
10
+ - load_in_4bit: True
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: True
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: nf4
16
+ - bnb_4bit_use_double_quant: True
17
+ - bnb_4bit_compute_dtype: bfloat16
18
+ ### Framework versions
19
+
20
 
21
+ - PEFT 0.6.0.dev0
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c78cb3c709570c1035287079d51dc7cceaba72c4849f3ea215d8e2115b2f7010
3
  size 16822989
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ab78716caf1e4202e5116db608efb1b610c3b05fc7dd24c311b8fdd4f3b30c2
3
  size 16822989
checkpoint-4000/README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: False
10
+ - load_in_4bit: True
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: True
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: nf4
16
+ - bnb_4bit_use_double_quant: True
17
+ - bnb_4bit_compute_dtype: bfloat16
18
+ ### Framework versions
19
+
20
+
21
+ - PEFT 0.6.0.dev0
checkpoint-4000/adapter_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "codellama/CodeLlama-7b-hf",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 16,
11
+ "lora_dropout": 0.05,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 8,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "q_proj",
18
+ "v_proj"
19
+ ],
20
+ "task_type": "CAUSAL_LM"
21
+ }
checkpoint-4000/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d0a83f4f2fc62ae48117094d70e0ad646aca909419defa1db5bdd97c66f4caa
3
+ size 16822989
checkpoint-4000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9368f44d3cb9367b33939b2abc9069be2b54d2c5d2ca24eeddb83b5f209ed1f
3
+ size 8574597
checkpoint-4000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bac79b6e9153bbef96c74123b36c0dabdb4f856aeeac01c3d2e1fe506691d7c8
3
+ size 14575
checkpoint-4000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4d150e4abf74595e7d6caefc93e41253f33b6a8cbb0d381a0ca72b530020438
3
+ size 627
checkpoint-4000/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-4000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96e2b1b1ebc290c407aaea372c88594148df65a6451edabadf2e7a423d8c00a0
3
+ size 4027
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ad3cdc5a771d5b1133ae9b85b673b204bccf1b2b47f7ac4b50da7e1c99e554c0
3
  size 4027
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96e2b1b1ebc290c407aaea372c88594148df65a6451edabadf2e7a423d8c00a0
3
  size 4027