AdrienB134 commited on
Commit
207c30a
1 Parent(s): 79aabbe
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -61,8 +61,8 @@ def model_inference(
61
  text = f"{assistant_prefix} {text}"
62
 
63
 
64
- prompt = processor.apply_chat_template(resulting_messages, add_generation_prompt=True)
65
- inputs = processor(text=prompt, images=[images], return_tensors="pt")
66
  inputs = {k: v.to("cuda") for k, v in inputs.items()}
67
 
68
  generation_args = {
@@ -86,9 +86,9 @@ def model_inference(
86
  generation_args.update(inputs)
87
 
88
  # Generate
89
- generated_ids = model.generate(**generation_args)
90
 
91
- generated_texts = processor.batch_decode(generated_ids[:, generation_args["input_ids"].size(1):], skip_special_tokens=True)
92
  return generated_texts[0]
93
 
94
  # Load model
 
61
  text = f"{assistant_prefix} {text}"
62
 
63
 
64
+ prompt = id_processor.apply_chat_template(resulting_messages, add_generation_prompt=True)
65
+ inputs = id_processor(text=prompt, images=[images], return_tensors="pt")
66
  inputs = {k: v.to("cuda") for k, v in inputs.items()}
67
 
68
  generation_args = {
 
86
  generation_args.update(inputs)
87
 
88
  # Generate
89
+ generated_ids = id_model.generate(**generation_args)
90
 
91
+ generated_texts = id_processor.batch_decode(generated_ids[:, generation_args["input_ids"].size(1):], skip_special_tokens=True)
92
  return generated_texts[0]
93
 
94
  # Load model