Arigadam commited on
Commit
1aedba1
1 Parent(s): 0d90500

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -16
app.py CHANGED
@@ -1,43 +1,40 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
 
 
 
 
 
 
3
 
4
  """
5
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
  """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
9
 
10
  def respond(
11
  message,
12
  history: list[tuple[str, str]],
13
- system_message,
14
  max_tokens,
15
  temperature,
16
  top_p,
17
  ):
 
18
  messages = [{"role": "system", "content": system_message}]
19
 
20
  for val in history:
21
  if val[0]:
22
  messages.append({"role": "user", "content": val[0]})
23
  if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
 
26
  messages.append({"role": "user", "content": message})
27
 
28
  response = ""
29
 
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
 
42
 
43
  """
@@ -46,7 +43,6 @@ For information on how to customize the ChatInterface, peruse the gradio docs: h
46
  demo = gr.ChatInterface(
47
  respond,
48
  additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
  gr.Slider(
 
1
  import gradio as gr
2
+ #from huggingface_hub import InferenceClient
3
+ import openai
4
+ from markdown import markdown
5
+ from markdownify import markdownify
6
+
7
+ openai.base_url = "https://text.pollinations.ai/openai"
8
+ openai.api_key = "aaa"
9
 
10
  """
11
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
12
  """
13
+ #client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
14
 
15
 
16
  def respond(
17
  message,
18
  history: list[tuple[str, str]],
 
19
  max_tokens,
20
  temperature,
21
  top_p,
22
  ):
23
+ system_message = """Привет! Ты должен рисовать и править рисунки по запросу пользователя. Для генерации ты должен написать промпт на английском для ИИ который будет генерировать картинку. Чтобы отобразить картинку в сообщении в месте где должна быть картинка напиши ![](https://image.pollinations.ai/prompt/{prompt}) где {prompt} Твой промпт. ГОВОРИ ПО-АНГЛИЙСКИ!"""
24
  messages = [{"role": "system", "content": system_message}]
25
 
26
  for val in history:
27
  if val[0]:
28
  messages.append({"role": "user", "content": val[0]})
29
  if val[1]:
30
+ messages.append({"role": "assistant", "content": markdownify(val[1]}))
31
 
32
  messages.append({"role": "user", "content": message})
33
 
34
  response = ""
35
 
36
+ response = openai.chat.completions.create(messages=messages, model="openai").choices[0].message.content
37
+ return markdown(response)
 
 
 
 
 
 
 
 
 
38
 
39
 
40
  """
 
43
  demo = gr.ChatInterface(
44
  respond,
45
  additional_inputs=[
 
46
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
47
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
48
  gr.Slider(