akhil-vaidya commited on
Commit
1729495
1 Parent(s): d96edb1

reduced tokens

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -98,7 +98,7 @@ def get_llama_op(image_file, model, processor):
98
  input_text = processor.apply_chat_template(messages, add_generation_prompt=True)
99
  inputs = processor(images=image, text=input_text, return_tensors="pt").to(model.device)
100
 
101
- output = model.generate(**inputs, max_new_tokens=128)
102
  return processor.decode(output[0])
103
 
104
  def get_text(image_file, model, tokenizer):
 
98
  input_text = processor.apply_chat_template(messages, add_generation_prompt=True)
99
  inputs = processor(images=image, text=input_text, return_tensors="pt").to(model.device)
100
 
101
+ output = model.generate(**inputs, max_new_tokens=20)
102
  return processor.decode(output[0])
103
 
104
  def get_text(image_file, model, tokenizer):