dasanindya15 commited on
Commit
d8bd4b8
·
verified ·
1 Parent(s): 93d2950

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +51 -1
README.md CHANGED
@@ -1,6 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: mit
3
  datasets:
4
  - dasanindya15/Cladder_v1
5
  pipeline_tag: text-generation
6
- ---
 
 
 
1
+
2
+ ### Loading Model and Tokenizer:
3
+
4
+ ```python
5
+
6
+ import os
7
+ import pandas as pd
8
+ import torch
9
+ from datasets import load_dataset, Dataset
10
+ from transformers import (
11
+ AutoModelForCausalLM,
12
+ AutoTokenizer,
13
+ BitsAndBytesConfig,
14
+ HfArgumentParser,
15
+ )
16
+ from peft import LoraConfig, PeftModel
17
+
18
+ base_model_name = "NousResearch/Llama-2-7b-chat-hf"
19
+ finetuned_model = "dasanindya15/llama2-7b_qlora_Cladder_v1"
20
+
21
+ # Load the entire model on the GPU 0
22
+ device_map = {"": 0}
23
+
24
+ # Reload model in FP16 and merge it with LoRA weights
25
+ base_model = AutoModelForCausalLM.from_pretrained(
26
+ base_model_name,
27
+ low_cpu_mem_usage=True,
28
+ return_dict=True,
29
+ torch_dtype=torch.float16,
30
+ device_map=device_map,
31
+ )
32
+ model = PeftModel.from_pretrained(base_model, finetuned_model)
33
+ model = model.merge_and_unload()
34
+
35
+ # Reload tokenizer to save it
36
+ tokenizer = AutoTokenizer.from_pretrained(base_model_name, trust_remote_code=True)
37
+ tokenizer.add_special_tokens({'pad_token': '[PAD]'})
38
+ tokenizer.pad_token = tokenizer.eos_token
39
+ tokenizer.padding_side = "right"
40
+
41
+ ```
42
+
43
+
44
+
45
+
46
+
47
+
48
+
49
  ---
50
  license: mit
51
  datasets:
52
  - dasanindya15/Cladder_v1
53
  pipeline_tag: text-generation
54
+ ---
55
+
56
+