ameerazam08
commited on
Commit
•
9425e9c
1
Parent(s):
b97726d
Update README.md
Browse files
README.md
CHANGED
@@ -9,4 +9,76 @@ library_name: peft
|
|
9 |
pipeline_tag: text-generation
|
10 |
tags:
|
11 |
- joke
|
12 |
-
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
pipeline_tag: text-generation
|
10 |
tags:
|
11 |
- joke
|
12 |
+
---
|
13 |
+
|
14 |
+
#### Fine-tuning examples
|
15 |
+
|
16 |
+
You can find fine-tuning notebooks under the [`examples/` directory](https://huggingface.co/google/gemma-7b/tree/main/examples). We provide:
|
17 |
+
|
18 |
+
* A notebook that you can run on a free-tier Google Colab instance to perform SFT on English quotes dataset
|
19 |
+
|
20 |
+
#### Running the model on a CPU
|
21 |
+
|
22 |
+
|
23 |
+
```python
|
24 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
25 |
+
|
26 |
+
tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b")
|
27 |
+
model = AutoModelForCausalLM.from_pretrained("google/gemma-7b")
|
28 |
+
|
29 |
+
input_text = "Write me a poem about Machine Learning."
|
30 |
+
input_ids = tokenizer(input_text, return_tensors="pt")
|
31 |
+
|
32 |
+
outputs = model.generate(**input_ids)
|
33 |
+
print(tokenizer.decode(outputs[0]))
|
34 |
+
```
|
35 |
+
|
36 |
+
#### Running the model on a GPU After Finetune_model
|
37 |
+
|
38 |
+
```python
|
39 |
+
import torch
|
40 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
|
41 |
+
from peft import PeftModel
|
42 |
+
|
43 |
+
base_model_id= "google/gemma-2b"
|
44 |
+
bnb_config = BitsAndBytesConfig(
|
45 |
+
load_in_4bit=True,
|
46 |
+
bnb_4bit_use_double_quant=True,
|
47 |
+
bnb_4bit_quant_type="nf4",
|
48 |
+
bnb_4bit_compute_dtype=torch.bfloat16
|
49 |
+
)
|
50 |
+
|
51 |
+
base_model = AutoModelForCausalLM.from_pretrained(
|
52 |
+
base_model_id, # Mistral, same as before
|
53 |
+
quantization_config=bnb_config, # Same quantization config as before
|
54 |
+
device_map="auto",
|
55 |
+
trust_remote_code=True,
|
56 |
+
)
|
57 |
+
|
58 |
+
eval_tokenizer = AutoTokenizer.from_pretrained(base_model_id, add_bos_token=True, trust_remote_code=True)
|
59 |
+
|
60 |
+
ft_model = PeftModel.from_pretrained(base_model, "./gemma-jokes-gemma/checkpoint-150")
|
61 |
+
|
62 |
+
|
63 |
+
eval_prompt = "why can't Barbie get pregnant"
|
64 |
+
# eval_prompt = "You know... When someone says to you Jesus loves you It's always comforting. Unless you are in a Mexican jail."
|
65 |
+
model_input = eval_tokenizer(eval_prompt, return_tensors="pt").to("cuda:0")
|
66 |
+
ft_model.eval()
|
67 |
+
with torch.no_grad():
|
68 |
+
print(eval_tokenizer.decode(ft_model.generate(**model_input, max_new_tokens=100, repetition_penalty=1.15)[0], skip_special_tokens=True))
|
69 |
+
|
70 |
+
# Result
|
71 |
+
# why can't Barbie get pregnant? Because she has no eggs.
|
72 |
+
|
73 |
+
# Why did the chicken cross the road? To get to the other side of the egg.
|
74 |
+
|
75 |
+
# Why do chickens lay eggs in their sleep? Because they don't want to wake up and find out they're dead.
|
76 |
+
|
77 |
+
# Why do chickens wear glasses? Because they have a hard time seeing the yolk.
|
78 |
+
|
79 |
+
# Why do chickens eat so much? Because they are always hungry.
|
80 |
+
|
81 |
+
# Why do chickens like to go to the beach? Because they love laying eggs
|
82 |
+
|
83 |
+
|
84 |
+
```
|