Files changed (1) hide show
  1. app.py +13 -14
app.py CHANGED
@@ -149,34 +149,34 @@ examples_path = os.path.dirname(__file__)
149
  EXAMPLES = [
150
  [
151
  {
152
- "text": "Hi, who are you",
153
  }
154
  ],
155
  [
156
  {
157
- "text": "Create a Photorealistic image of Eiffel Tower",
158
  }
159
  ],
160
  [
161
  {
162
- "text": "Read what's written on the paper",
163
  "files": [f"{examples_path}/example_images/paper_with_text.png"],
164
  }
165
  ],
166
  [
167
  {
168
- "text": "Identify 2 famous persons of modern world",
169
  "files": [f"{examples_path}/example_images/elon_smoking.jpg", f"{examples_path}/example_images/steve_jobs.jpg",]
170
  }
171
  ],
172
  [
173
  {
174
- "text": "Create 5 images of super cars, all cars must in different color",
175
  }
176
  ],
177
  [
178
  {
179
- "text": "What is 900*900",
180
  }
181
  ],
182
  [
@@ -187,13 +187,13 @@ EXAMPLES = [
187
  ],
188
  [
189
  {
190
- "text": "Write an online ad for that product.",
191
  "files": [f"{examples_path}/example_images/shampoo.jpg"],
192
  }
193
  ],
194
  [
195
  {
196
- "text": "What is formed by the deposition of either the weathered remains of other rocks?",
197
  "files": [f"{examples_path}/example_images/ai2d_example.jpeg"],
198
  }
199
  ],
@@ -234,8 +234,7 @@ def format_user_prompt_with_im_history_and_system_conditioning(
234
  user_prompt, chat_history
235
  ) -> List[Dict[str, Union[List, str]]]:
236
  """
237
- Produces the resulting list that needs to go inside the processor.
238
- It handles the potential image(s), the history and the system conditionning.
239
  """
240
  resulting_messages = copy.deepcopy(SYSTEM_PROMPT)
241
  resulting_images = []
@@ -316,10 +315,10 @@ def model_inference(
316
  top_p,
317
  ):
318
  if user_prompt["text"].strip() == "" and not user_prompt["files"]:
319
- gr.Error("Please input a query and optionally image(s).")
320
 
321
  if user_prompt["text"].strip() == "" and user_prompt["files"]:
322
- gr.Error("Please input a text query along the image(s).")
323
 
324
  streamer = TextIteratorStreamer(
325
  PROCESSOR.tokenizer,
@@ -417,7 +416,7 @@ decoding_strategy = gr.Radio(
417
  value="Top P Sampling",
418
  label="Decoding strategy",
419
  interactive=True,
420
- info="Higher values is equivalent to sampling more low-probability tokens.",
421
  )
422
  temperature = gr.Slider(
423
  minimum=0.0,
@@ -437,7 +436,7 @@ top_p = gr.Slider(
437
  visible=True,
438
  interactive=True,
439
  label="Top P",
440
- info="Higher values is equivalent to sampling more low-probability tokens.",
441
  )
442
 
443
 
 
149
  EXAMPLES = [
150
  [
151
  {
152
+ "text": "Hi, who are you?",
153
  }
154
  ],
155
  [
156
  {
157
+ "text": "Create a Photorealistic image of the Eiffel Tower.",
158
  }
159
  ],
160
  [
161
  {
162
+ "text": "Read what's written on the paper.",
163
  "files": [f"{examples_path}/example_images/paper_with_text.png"],
164
  }
165
  ],
166
  [
167
  {
168
+ "text": "Identify two famous people in the modern world.",
169
  "files": [f"{examples_path}/example_images/elon_smoking.jpg", f"{examples_path}/example_images/steve_jobs.jpg",]
170
  }
171
  ],
172
  [
173
  {
174
+ "text": "Create five images of supercars, each in a different color.",
175
  }
176
  ],
177
  [
178
  {
179
+ "text": "What is 900 multiplied by 900?",
180
  }
181
  ],
182
  [
 
187
  ],
188
  [
189
  {
190
+ "text": "Create an online ad for this product.",
191
  "files": [f"{examples_path}/example_images/shampoo.jpg"],
192
  }
193
  ],
194
  [
195
  {
196
+ "text": "What is formed by the deposition of the weathered remains of other rocks?",
197
  "files": [f"{examples_path}/example_images/ai2d_example.jpeg"],
198
  }
199
  ],
 
234
  user_prompt, chat_history
235
  ) -> List[Dict[str, Union[List, str]]]:
236
  """
237
+ Produce the resulting list that needs to go inside the processor. It handles the potential image(s), the history, and the system conditioning.
 
238
  """
239
  resulting_messages = copy.deepcopy(SYSTEM_PROMPT)
240
  resulting_images = []
 
315
  top_p,
316
  ):
317
  if user_prompt["text"].strip() == "" and not user_prompt["files"]:
318
+ gr.Error("Please input a query and optionally an image(s).")
319
 
320
  if user_prompt["text"].strip() == "" and user_prompt["files"]:
321
+ gr.Error("Please input a text query along with the image(s).")
322
 
323
  streamer = TextIteratorStreamer(
324
  PROCESSOR.tokenizer,
 
416
  value="Top P Sampling",
417
  label="Decoding strategy",
418
  interactive=True,
419
+ info="Higher values are equivalent to sampling more low-probability tokens.",
420
  )
421
  temperature = gr.Slider(
422
  minimum=0.0,
 
436
  visible=True,
437
  interactive=True,
438
  label="Top P",
439
+ info="Higher values are equivalent to sampling more low-probability tokens.",
440
  )
441
 
442