SMeyersMrOvkill commited on
Commit
7e08481
1 Parent(s): 54225d7

Added chat w/ Llama 3 70b

Browse files
Files changed (3) hide show
  1. app.py +50 -2
  2. image.jpeg +0 -0
  3. image2.jpeg +0 -0
app.py CHANGED
@@ -9,6 +9,8 @@ from PIL import Image
9
  from typing import Union
10
  import os
11
  import base64
 
 
12
 
13
  device = "cuda" if torch.cuda.is_available() else "cpu"
14
  print(f"Using {device}" if device != "cpu" else "Using CPU")
@@ -35,6 +37,8 @@ class MoonDream():
35
 
36
  md = MoonDream()
37
 
 
 
38
  def _respond_one(question, img):
39
  txt = ""
40
  yield (txt := txt + MoonDream()(question, [img]))
@@ -144,5 +148,49 @@ with gr.Blocks() as demo:
144
 
145
  demo.launch(debug=True, share=True)
146
  """
147
- with gr.TabbedInterface([ifc_imgprompt2text], ["Prompt & Image 2 Text"]) as ifc:
148
- ifc.launch(share=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  from typing import Union
10
  import os
11
  import base64
12
+ from together import Together
13
+ import pathlib
14
 
15
  device = "cuda" if torch.cuda.is_available() else "cpu"
16
  print(f"Using {device}" if device != "cpu" else "Using CPU")
 
37
 
38
  md = MoonDream()
39
 
40
+ SYSTEM_PROMPT = "You are Llama 3 70b. You have been given access to Moondream 2 for VQA when given images. When you have a question about an image, simple start your response with the text, '@question\\nMy question?'. When you do this, the request will be sent to Moondream 2. User can see this happening if they turn debug on, so be professional and stay on topic. Any chat from anyone starting with @answer is the answer to last question asked. If something appears out of sync, ask User to clear the chat."
41
+
42
  def _respond_one(question, img):
43
  txt = ""
44
  yield (txt := txt + MoonDream()(question, [img]))
 
148
 
149
  demo.launch(debug=True, share=True)
150
  """
151
+
152
+ def chat(inpt, mess):
153
+ from together import Together
154
+ print(inpt, mess)
155
+ if mess is None:
156
+ mess = []
157
+
158
+ tog = Together(api_key=os.getenv("TOGETHER_KEY"))
159
+ messages = [
160
+ {
161
+ 'role': 'system',
162
+ 'content': SYSTEM_PROMPT
163
+ },
164
+ {
165
+ 'role': 'user',
166
+ 'content': inpt
167
+ }
168
+ ]
169
+ for cht in mess:
170
+ print(cht)
171
+ res = tog.chat.completions.create(
172
+ messages=messages,
173
+ model="meta-llama/Llama-3-70b-chat-hf", stop=["<|eot_id|>"], stream=True)
174
+ txt = ""
175
+ for pk in res:
176
+ print(pk)
177
+ txt += pk.choices[0].delta.content
178
+ #mess[-1][-2] += pk.choices[0].delta.content
179
+ yield txt #, json.dumps(messages)#mess#, json.dumps(messages)
180
+
181
+ chatbot = gr.Chatbot(
182
+ [],
183
+ elem_id="chatbot",
184
+ bubble_full_width=False,
185
+ sanitize_html=False,
186
+ show_copy_button=True,
187
+ avatar_images=[
188
+ pathlib.Path("image.jpeg"),
189
+ pathlib.Path("image2.jpeg")
190
+ ])
191
+
192
+ jsn = None
193
+ chat_input = None
194
+ with gr.TabbedInterface([ifc_imgprompt2text, gr.ChatInterface(chat, chatbot=chatbot, submit_btn=gr.Button(scale=1)), gr.Interface(lambda _: "", inputs=[jsn := gr.JSON(label="conversation")], outputs=[jsn])], ["Prompt & Image 2 Text", "Chat w/ Llama 3 70b & Moondream 2", "data_ignoreme"]) as ifc:
195
+ jsn = gr.JSON(label="conversation", visible=True)
196
+ ifc.launch(share=False, debug=True)
image.jpeg ADDED
image2.jpeg ADDED