akhil-vaidya commited on
Commit
7316288
1 Parent(s): 880d93b

some improvements

Browse files
Files changed (1) hide show
  1. app.py +4 -2
app.py CHANGED
@@ -87,6 +87,8 @@ def get_llama_op(image_file, model, processor):
87
 
88
  with open(image_file, "rb") as f:
89
  image = base64.b64encode(f.read()).decode('utf-8')
 
 
90
  messages = [
91
  {"role": "user", "content": [
92
  {"type": "image"},
@@ -94,9 +96,9 @@ def get_llama_op(image_file, model, processor):
94
  ]}
95
  ]
96
  input_text = processor.apply_chat_template(messages, add_generation_prompt=True)
97
- inputs = processor(image, input_text, return_tensors="pt").to(model.device)
98
 
99
- output = model.generate(**inputs, max_new_tokens=30)
100
  return processor.decode(output[0])
101
 
102
  def get_text(image_file, model, tokenizer):
 
87
 
88
  with open(image_file, "rb") as f:
89
  image = base64.b64encode(f.read()).decode('utf-8')
90
+
91
+ image = Image.open(image_file)
92
  messages = [
93
  {"role": "user", "content": [
94
  {"type": "image"},
 
96
  ]}
97
  ]
98
  input_text = processor.apply_chat_template(messages, add_generation_prompt=True)
99
+ inputs = processor(images=image, text=input_text, return_tensors="pt").to(model.device)
100
 
101
+ output = model.generate(**inputs, max_new_tokens=128)
102
  return processor.decode(output[0])
103
 
104
  def get_text(image_file, model, tokenizer):