Sakalti commited on
Commit
4ac2dfd
1 Parent(s): c6304a6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -7
app.py CHANGED
@@ -1,14 +1,23 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
 
 
3
 
4
  """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
  """
7
- client = InferenceClient("Sakalti/hamachi-1.7B")
8
 
 
 
 
 
 
9
 
10
  def respond(
11
  message,
 
12
  history: list[tuple[str, str]],
13
  system_message,
14
  max_tokens,
@@ -23,7 +32,11 @@ def respond(
23
  if val[1]:
24
  messages.append({"role": "assistant", "content": val[1]})
25
 
26
- messages.append({"role": "user", "content": message})
 
 
 
 
27
 
28
  response = ""
29
 
@@ -39,13 +52,13 @@ def respond(
39
  response += token
40
  yield response
41
 
42
-
43
  """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
  """
46
  demo = gr.ChatInterface(
47
  respond,
48
  additional_inputs=[
 
49
  gr.Textbox(value="あなたは親切なチャットボットです。", label="システムメッセージ"),
50
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
@@ -59,6 +72,5 @@ demo = gr.ChatInterface(
59
  ],
60
  )
61
 
62
-
63
  if __name__ == "__main__":
64
- demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
+ import base64
4
+ from io import BytesIO
5
+ from PIL import Image
6
 
7
  """
8
+ Hugging Face Hubの推論APIについての詳細は、以下のドキュメントを参照してください: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
9
  """
10
+ client = InferenceClient("Sakalti/SabaVL1-2B") # モデル名を更新
11
 
12
+ def encode_image(image):
13
+ buffered = BytesIO()
14
+ image.save(buffered, format="JPEG")
15
+ img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
16
+ return f"data:image/jpeg;base64,{img_str}"
17
 
18
  def respond(
19
  message,
20
+ image,
21
  history: list[tuple[str, str]],
22
  system_message,
23
  max_tokens,
 
32
  if val[1]:
33
  messages.append({"role": "assistant", "content": val[1]})
34
 
35
+ if image is not None:
36
+ image_url = encode_image(image)
37
+ messages.append({"role": "user", "content": [{"type": "image_url", "image_url": {"url": image_url}}]})
38
+
39
+ messages.append({"role": "user", "content": [{"type": "text", "text": message}]})
40
 
41
  response = ""
42
 
 
52
  response += token
53
  yield response
54
 
 
55
  """
56
+ gradioのChatInterfaceのカスタマイズについては、以下のドキュメントを参照してください: https://www.gradio.app/docs/chatinterface
57
  """
58
  demo = gr.ChatInterface(
59
  respond,
60
  additional_inputs=[
61
+ gr.Image(type="pil", label="画像をアップロード"),
62
  gr.Textbox(value="あなたは親切なチャットボットです。", label="システムメッセージ"),
63
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
64
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
 
72
  ],
73
  )
74
 
 
75
  if __name__ == "__main__":
76
+ demo.launch()