jaty54 commited on
Commit
722a6ef
1 Parent(s): 6cd4b9e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -3
app.py CHANGED
@@ -1,3 +1,31 @@
1
- git clone -b v2.5 https://github.com/camenduru/text-generation-webui
2
- cd text-generation-webui
3
- pip install -q -r requirements.txt
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+
4
+ model_id = "edumunozsala/llama-2-7b-int4-python-code-20k"
5
+
6
+ tokenizer = AutoTokenizer.from_pretrained(hf_model_repo)
7
+
8
+ model = AutoModelForCausalLM.from_pretrained(hf_model_repo, load_in_4bit=True, torch_dtype=torch.float16,
9
+ device_map=device_map)
10
+
11
+ instruction="Write a Python function to display the first and last elements of a list."
12
+ input=""
13
+
14
+ prompt = f"""### Instruction:
15
+ Use the Task below and the Input given to write the Response, which is a programming code that can solve the Task.
16
+
17
+ ### Task:
18
+ {instruction}
19
+
20
+ ### Input:
21
+ {input}
22
+
23
+ ### Response:
24
+ """
25
+
26
+ input_ids = tokenizer(prompt, return_tensors="pt", truncation=True).input_ids.cuda()
27
+ # with torch.inference_mode():
28
+ outputs = model.generate(input_ids=input_ids, max_new_tokens=100, do_sample=True, top_p=0.9,temperature=0.5)
29
+
30
+ print(f"Prompt:\n{prompt}\n")
31
+ print(f"Generated instruction:\n{tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True)[0][len(prompt):]}")