File size: 992 Bytes
c4adad6
 
 
57291d6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
---
license: apache-2.0
---
note : use original open llama tokenizer

#model_path = 'openlm-research/open_llama_3b'
model_path = 'ruwan/open-llama-sharded-1GB-7B-alpaca-vmware'
# model_path = 'openlm-research/open_llama_13b_600bt'

tokenizer = LlamaTokenizer.from_pretrained("openlm-research/open_llama_7b")
model = LlamaForCausalLM.from_pretrained(
    model_path, torch_dtype=torch.float16, device_map='auto',load_in_8bit=True
)
 
 
prompt_template = "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n### Instruction:\n{instruction}\n\n### Response:"

prompt=  'Explain in simple terms how the attention mechanism of a transformer model works'


inputt = prompt_template.format(instruction= prompt)
input_ids = tokenizer(inputt, return_tensors="pt").input_ids.to("cuda")

output1 = model.generate(input_ids, max_length=512)
input_length = input_ids.shape[1]
output1 = output1[:, input_length:]
output= tokenizer.decode(output1[0])