Ketengan-Diffusion-Lab commited on
Commit
a6663c1
1 Parent(s): fd57ab6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -10
app.py CHANGED
@@ -15,15 +15,11 @@ tokenizer = AutoTokenizer.from_pretrained('internlm/internlm-xcomposer2d5-7b',
15
  model.tokenizer = tokenizer
16
 
17
  # Define the function to process input and generate a response
18
- def analyze_image(query, image):
19
- image = Image.open(image)
20
- # Convert image to required format
21
- image_path = './input_image.png'
22
- image.save(image_path)
23
- image_list = [image_path]
24
-
25
  with torch.autocast(device_type='cuda', dtype=torch.float16):
26
- response, _ = model.chat(tokenizer, query, image_list, do_sample=False, num_beams=3, use_meta=True)
27
 
28
  return response
29
 
@@ -35,7 +31,7 @@ with gr.Blocks() as demo:
35
  query_input = gr.Textbox(label="Enter your query", placeholder="Analyze the given image in a detailed manner")
36
 
37
  with gr.Row():
38
- image_input = gr.Image(label="Upload an Image", type="file")
39
 
40
  with gr.Row():
41
  result_output = gr.Textbox(label="Result", placeholder="Model response will appear here", interactive=False)
@@ -46,4 +42,4 @@ with gr.Blocks() as demo:
46
  submit_button.click(fn=analyze_image, inputs=[query_input, image_input], outputs=result_output)
47
 
48
  # Launch the Gradio interface
49
- demo.launch()
 
15
  model.tokenizer = tokenizer
16
 
17
  # Define the function to process input and generate a response
18
+ def analyze_image(query, image_path):
19
+ image = Image.open(image_path)
20
+ # Convert image to required format and save temporarily if needed
 
 
 
 
21
  with torch.autocast(device_type='cuda', dtype=torch.float16):
22
+ response, _ = model.chat(tokenizer, query, [image_path], do_sample=False, num_beams=3, use_meta=True)
23
 
24
  return response
25
 
 
31
  query_input = gr.Textbox(label="Enter your query", placeholder="Analyze the given image in a detailed manner")
32
 
33
  with gr.Row():
34
+ image_input = gr.Image(label="Upload an Image", type="filepath")
35
 
36
  with gr.Row():
37
  result_output = gr.Textbox(label="Result", placeholder="Model response will appear here", interactive=False)
 
42
  submit_button.click(fn=analyze_image, inputs=[query_input, image_input], outputs=result_output)
43
 
44
  # Launch the Gradio interface
45
+ demo.launch()