MaziyarPanahi commited on
Commit
61f4d5b
1 Parent(s): c845a4c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -4
app.py CHANGED
@@ -28,8 +28,7 @@ processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True)
28
 
29
  model = AutoModelForCausalLM.from_pretrained(
30
  model_id,
31
- torch_dtype=torch.float16,
32
- low_cpu_mem_usage=True,
33
  trust_remote_code=True,
34
  )
35
 
@@ -59,8 +58,18 @@ def bot_streaming(message, history):
59
  # Handle the case where 'image' is not defined at all
60
  gr.Error("You need to upload an image for Phi-3-vision to work.")
61
 
62
- prompt = f"{message['text']}<|image_1|>\nCan you convert the table to markdown format?{prompt_suffix}{assistant_prompt}"
63
- print(f"prompt: {prompt}")
 
 
 
 
 
 
 
 
 
 
64
  image = Image.open(image)
65
  inputs = processor(prompt, [image], return_tensors='pt').to("cuda:0")
66
 
 
28
 
29
  model = AutoModelForCausalLM.from_pretrained(
30
  model_id,
31
+ torch_dtype="auto",
 
32
  trust_remote_code=True,
33
  )
34
 
 
58
  # Handle the case where 'image' is not defined at all
59
  gr.Error("You need to upload an image for Phi-3-vision to work.")
60
 
61
+ # prompt = f"{message['text']}<|image_1|>\nCan you convert the table to markdown format?{prompt_suffix}{assistant_prompt}"
62
+ chat = [
63
+ {"role": "user", "content": f"<|image_1|>\n{message['text']}"},
64
+ ]
65
+ prompt = processor.tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)
66
+
67
+ # need to remove last <|endoftext|> if it is there, which is used for training, not inference. For training, make sure to add <|endoftext|> in the end.
68
+ if prompt.endswith("<|endoftext|>"):
69
+ prompt = prompt.rstrip("<|endoftext|>")
70
+
71
+ print(f">>> Prompt\n{prompt}")}")
72
+
73
  image = Image.open(image)
74
  inputs = processor(prompt, [image], return_tensors='pt').to("cuda:0")
75