dasanindya15 commited on
Commit
e712e55
1 Parent(s): d9c36df

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +55 -3
README.md CHANGED
@@ -1,3 +1,55 @@
1
- ---
2
- license: mit
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ datasets:
4
+ - dasanindya15/Cladder_v1
5
+ ---
6
+
7
+
8
+ ### Loading Model and Tokenizer:
9
+
10
+ ```python
11
+
12
+ !pip install -U bitsandbytes
13
+ !pip install -U transformers
14
+ !pip install -U accelerate
15
+ !pip install -U peft
16
+
17
+ base_model_id = "NousResearch/Meta-Llama-3-8B"
18
+ new_model_id = "dasanindya15/llama3-8b_qlora_Cladder_v1"
19
+
20
+ import torch
21
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
22
+ from peft import PeftModel
23
+ from transformers import BitsAndBytesConfig
24
+
25
+ # Load the entire model on the GPU 0
26
+ device_map = {"": 0}
27
+
28
+ # Reload model in FP16 and merge it with LoRA weights
29
+ # specify the quantize the model
30
+ quantization_config = BitsAndBytesConfig(
31
+ load_in_4bit=True,
32
+ bnb_4bit_use_double_quant=True,
33
+ bnb_4bit_quant_type="nf4",
34
+ bnb_4bit_compute_dtype=torch.bfloat16,
35
+ )
36
+ base_model = AutoModelForCausalLM.from_pretrained(base_model_id,
37
+ quantization_config=quantization_config,
38
+ device_map=device_map)
39
+ model = PeftModel.from_pretrained(base_model, new_model_id)
40
+
41
+ # Reload tokenizer to save it
42
+ tokenizer = AutoTokenizer.from_pretrained(base_model_id, trust_remote_code=True)
43
+ tokenizer.pad_token = tokenizer.eos_token
44
+ tokenizer.padding_side = "right"
45
+
46
+
47
+ ```
48
+
49
+
50
+ ---
51
+ license: mit
52
+ datasets:
53
+ - dasanindya15/Cladder_v1
54
+ pipeline_tag: text-classification
55
+ ---