booktrawler commited on
Commit
e15cb5f
1 Parent(s): 3b4e686
Files changed (1) hide show
  1. app.py +32 -0
app.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from peft import PeftModel
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
4
+
5
+ model_name = "huggyllama/llama-7b"
6
+ adapters_name = 'timdettmers/guanaco-7b'
7
+
8
+ model = AutoModelForCausalLM.from_pretrained(
9
+ model_name,
10
+ load_in_4bit=True,
11
+ torch_dtype=torch.bfloat16,
12
+ device_map="auto",
13
+ max_memory= {i: '24000MB' for i in range(torch.cuda.device_count())},
14
+ quantization_config=BitsAndBytesConfig(
15
+ load_in_4bit=True,
16
+ bnb_4bit_compute_dtype=torch.bfloat16,
17
+ bnb_4bit_use_double_quant=True,
18
+ bnb_4bit_quant_type='nf4'
19
+ ),
20
+ )
21
+ model = PeftModel.from_pretrained(model, adapters_name)
22
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
23
+
24
+ prompt = "Introduce yourself"
25
+ formatted_prompt = (
26
+ f"A chat between a curious human and an artificial intelligence assistant."
27
+ f"The assistant gives helpful, detailed, and polite answers to the user's questions.\n"
28
+ f"### Human: {prompt} ### Assistant:"
29
+ )
30
+ inputs = tokenizer(formatted_prompt, return_tensors="pt").to("cuda:0")
31
+ outputs = model.generate(inputs=inputs.input_ids, max_new_tokens=20)
32
+ print(tokenizer.decode(outputs[0], skip_special_tokens=True))