lakshyaag commited on
Commit
f40f065
·
verified ·
1 Parent(s): 0150dd4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -47,7 +47,7 @@ model = PeftModel.from_pretrained(base_model, "lakshyaag/llama38binstruct_summar
47
  if torch.cuda.is_available():
48
  model = model.to("cuda")
49
 
50
- tokenizer = AutoTokenizer.from_pretrained("lakshyaag/llama38binstruct_summarize")
51
 
52
  tokenizer.pad_token = tokenizer.eos_token
53
  tokenizer.padding_side = "right"
@@ -100,7 +100,7 @@ async def main(message: cl.Message):
100
  print(decoded_output)
101
 
102
  # return only the generated response (not the prompt) as output
103
- response = decoded_output[0].split("<|end_of_text|>")[-1]
104
 
105
  msg = cl.Message(content=response)
106
 
 
47
  if torch.cuda.is_available():
48
  model = model.to("cuda")
49
 
50
+ tokenizer = AutoTokenizer.from_pretrained("NousResearch/Meta-Llama-3-8B-Instruct")
51
 
52
  tokenizer.pad_token = tokenizer.eos_token
53
  tokenizer.padding_side = "right"
 
100
  print(decoded_output)
101
 
102
  # return only the generated response (not the prompt) as output
103
+ response = decoded_output[0].split("<|end_header_id|>")[-1]
104
 
105
  msg = cl.Message(content=response)
106