Greff3 commited on
Commit
64be49e
·
verified ·
1 Parent(s): 2b5fc9a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -67
app.py CHANGED
@@ -23,54 +23,26 @@ client = OpenAI(
23
 
24
  # Create supported models
25
  model_links = {
26
- "Mistral-Nemo-Instruct-2407": "mistralai/Mistral-Nemo-Instruct-2407",
27
- "Meta-Llama-3.1-70B-Instruct": "meta-llama/Meta-Llama-3.1-70B-Instruct",
28
- "Meta-Llama-3.1-8B-Instruct": "meta-llama/Meta-Llama-3.1-8B-Instruct",
29
- "Meta-Llama-3.1-405B-Instruct-FP8": "meta-llama/Meta-Llama-3.1-405B-Instruct-FP8",
30
- "Meta-Llama-3.1-405B-Instruct": "meta-llama/Meta-Llama-3.1-405B-Instruct",
31
- "Meta-Llama-3-70B-Instruct": "meta-llama/Meta-Llama-3-70B-Instruct",
32
  "Meta-Llama-3-8B-Instruct": "meta-llama/Meta-Llama-3-8B-Instruct",
33
- "C4ai-command-r-plus": "CohereForAI/c4ai-command-r-plus",
34
- "Aya-23-35B": "CohereForAI/aya-23-35B",
35
- "Zephyr-orpo-141b-A35b-v0.1": "HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1",
36
- "Mixtral-8x7B-Instruct-v0.1": "mistralai/Mixtral-8x7B-Instruct-v0.1",
37
- "Codestral-22B-v0.1": "mistralai/Codestral-22B-v0.1",
38
- "Nous-Hermes-2-Mixtral-8x7B-DPO": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
39
- "Yi-1.5-34B-Chat": "01-ai/Yi-1.5-34B-Chat",
40
- "Gemma-2-27b-it": "google/gemma-2-27b-it",
41
- "Meta-Llama-2-70B-Chat-HF": "meta-llama/Llama-2-70b-chat-hf",
42
  "Meta-Llama-2-7B-Chat-HF": "meta-llama/Llama-2-7b-chat-hf",
43
  "Meta-Llama-2-13B-Chat-HF": "meta-llama/Llama-2-13b-chat-hf",
 
 
 
44
  "Mistral-7B-Instruct-v0.1": "mistralai/Mistral-7B-Instruct-v0.1",
45
  "Mistral-7B-Instruct-v0.2": "mistralai/Mistral-7B-Instruct-v0.2",
46
  "Mistral-7B-Instruct-v0.3": "mistralai/Mistral-7B-Instruct-v0.3",
47
- "Falcon-7b-Instruct": "tiiuae/falcon-7b-instruct",
 
 
48
  "Starchat2-15b-v0.1": "HuggingFaceH4/starchat2-15b-v0.1",
49
- "Gemma-1.1-7b-it": "google/gemma-1.1-7b-it",
50
- "Gemma-1.1-2b-it": "google/gemma-1.1-2b-it",
51
  "Zephyr-7B-Beta": "HuggingFaceH4/zephyr-7b-beta",
52
  "Zephyr-7B-Alpha": "HuggingFaceH4/zephyr-7b-alpha",
53
- "Phi-3-mini-128k-instruct": "microsoft/Phi-3-mini-128k-instruct",
54
- "Phi-3-mini-4k-instruct": "microsoft/Phi-3-mini-4k-instruct",
55
  }
56
 
57
- #Random dog images for error message
58
- random_dog = ["0f476473-2d8b-415e-b944-483768418a95.jpg",
59
- "1bd75c81-f1d7-4e55-9310-a27595fa8762.jpg",
60
- "526590d2-8817-4ff0-8c62-fdcba5306d02.jpg",
61
- "1326984c-39b0-492c-a773-f120d747a7e2.jpg",
62
- "42a98d03-5ed7-4b3b-af89-7c4876cb14c3.jpg",
63
- "8b3317ed-2083-42ac-a575-7ae45f9fdc0d.jpg",
64
- "ee17f54a-83ac-44a3-8a35-e89ff7153fb4.jpg",
65
- "027eef85-ccc1-4a66-8967-5d74f34c8bb4.jpg",
66
- "08f5398d-7f89-47da-a5cd-1ed74967dc1f.jpg",
67
- "0fd781ff-ec46-4bdc-a4e8-24f18bf07def.jpg",
68
- "0fb4aeee-f949-4c7b-a6d8-05bf0736bdd1.jpg",
69
- "6edac66e-c0de-4e69-a9d6-b2e6f6f9001b.jpg",
70
- "bfb9e165-c643-4993-9b3a-7e73571672a6.jpg"]
71
-
72
-
73
-
74
  def reset_conversation():
75
  '''
76
  Resets Conversation
@@ -79,29 +51,24 @@ def reset_conversation():
79
  st.session_state.messages = []
80
  return None
81
 
82
-
83
-
84
-
85
  # Define the available models
86
  models =[key for key in model_links.keys()]
87
 
88
  # Create the sidebar with the dropdown for model selection
89
- selected_model = st.sidebar.selectbox("Select Model", models)
90
-
91
- # Create a temperature slider
92
- temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
93
-
94
 
95
  #Add reset button to clear conversation
96
- st.sidebar.button('Reset Chat', on_click=reset_conversation) #Reset button
97
-
98
 
99
- # Create model description
100
- st.sidebar.write(f"You're now chatting with **{selected_model}**")
101
- st.sidebar.markdown("*Generated content may be inaccurate or false.*")
102
- st.sidebar.markdown("\n[TypeGPT](https://typegpt.net).")
103
 
104
 
 
 
 
105
 
106
 
107
  if "prev_option" not in st.session_state:
@@ -119,8 +86,8 @@ if st.session_state.prev_option != selected_model:
119
  repo_id = model_links[selected_model]
120
 
121
 
122
- st.subheader(f'TypeGPT.net - {selected_model}')
123
- # st.title(f'ChatBot Using {selected_model}')
124
 
125
  # Set a default model
126
  if selected_model not in st.session_state:
@@ -139,7 +106,7 @@ for message in st.session_state.messages:
139
 
140
 
141
  # Accept user input
142
- if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
143
  # Display user message in chat message container
144
  with st.chat_message("user"):
145
  st.markdown(prompt)
@@ -165,20 +132,9 @@ if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
165
 
166
  except Exception as e:
167
  # st.empty()
168
- response = "😵‍💫 Looks like someone unplugged something!\
169
- \n Either the model space is being updated or something is down.\
170
- \n\
171
- \n Try again later. \
172
- \n\
173
- \n Here's a random pic of a 🐶:"
174
  st.write(response)
175
- random_dog_pick = 'https://random.dog/'+ random_dog[np.random.randint(len(random_dog))]
176
- st.image(random_dog_pick)
177
- st.write("This was the error message:")
178
- st.write(e)
179
-
180
-
181
-
182
 
183
 
184
 
 
23
 
24
  # Create supported models
25
  model_links = {
26
+ "GPT-4o": "mistralai/Mistral-Nemo-Instruct-2407",
27
+ "GPT-4": "meta-llama/Meta-Llama-3.1-70B-Instruct",
28
+ "GPT-3.5": "meta-llama/Meta-Llama-3.1-8B-Instruct",
 
 
 
29
  "Meta-Llama-3-8B-Instruct": "meta-llama/Meta-Llama-3-8B-Instruct",
 
 
 
 
 
 
 
 
 
30
  "Meta-Llama-2-7B-Chat-HF": "meta-llama/Llama-2-7b-chat-hf",
31
  "Meta-Llama-2-13B-Chat-HF": "meta-llama/Llama-2-13b-chat-hf",
32
+ "Gemma-1.1-7b-it": "google/gemma-1.1-7b-it",
33
+ "Gemma-1.1-2b-it": "google/gemma-1.1-2b-it",
34
+ "Mixtral-8x7B-Instruct-v0.1": "mistralai/Mixtral-8x7B-Instruct-v0.1",
35
  "Mistral-7B-Instruct-v0.1": "mistralai/Mistral-7B-Instruct-v0.1",
36
  "Mistral-7B-Instruct-v0.2": "mistralai/Mistral-7B-Instruct-v0.2",
37
  "Mistral-7B-Instruct-v0.3": "mistralai/Mistral-7B-Instruct-v0.3",
38
+ "Mixtral-8x7B-DPO": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
39
+ "Phi-3-mini-4k-instruct": "microsoft/Phi-3-mini-4k-instruct",
40
+ "Yi-1.5-34B-Chat": "01-ai/Yi-1.5-34B-Chat",
41
  "Starchat2-15b-v0.1": "HuggingFaceH4/starchat2-15b-v0.1",
 
 
42
  "Zephyr-7B-Beta": "HuggingFaceH4/zephyr-7b-beta",
43
  "Zephyr-7B-Alpha": "HuggingFaceH4/zephyr-7b-alpha",
 
 
44
  }
45
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
  def reset_conversation():
47
  '''
48
  Resets Conversation
 
51
  st.session_state.messages = []
52
  return None
53
 
 
 
 
54
  # Define the available models
55
  models =[key for key in model_links.keys()]
56
 
57
  # Create the sidebar with the dropdown for model selection
58
+ selected_model = st.sidebar.selectbox("Выбрать модель GPT", models)
 
 
 
 
59
 
60
  #Add reset button to clear conversation
61
+ st.sidebar.button('Новый чат', on_click=reset_conversation) #Reset button
 
62
 
63
+ # Create a temperature slider
64
+ temp_values = st.sidebar.slider('Температура ChatGPT', 0.0, 1.0, (0.5))
65
+ st.sidebar.markdown("Температура в ChatGPT влияет на качество и связность генерируемого текста.")
66
+ st.sidebar.markdown("**Для оптимального результата рекомендуем выбирать температуру в диапазоне от 0,5 до 0,7**.")
67
 
68
 
69
+ # Create model description
70
+ st.sidebar.markdown("*Созданный контент может быть неточным.*")
71
+ st.sidebar.markdown("\n Наш сайт: [GPT-ChatBot.ru](https://gpt-chatbot.ru/).")
72
 
73
 
74
  if "prev_option" not in st.session_state:
 
86
  repo_id = model_links[selected_model]
87
 
88
 
89
+ st.subheader(f'[GPT-ChatBot.ru](https://gpt-chatbot.ru/) с моделью {selected_model}')
90
+ # st.title(f'GPT-ChatBot сейчас использует {selected_model}')
91
 
92
  # Set a default model
93
  if selected_model not in st.session_state:
 
106
 
107
 
108
  # Accept user input
109
+ if prompt := st.chat_input(f"Привет. Я {selected_model}. Как я могу вам помочь сегодня?"):
110
  # Display user message in chat message container
111
  with st.chat_message("user"):
112
  st.markdown(prompt)
 
132
 
133
  except Exception as e:
134
  # st.empty()
135
+ response = "Похоже, чат перегружен!\
136
+ \n Повторите свой запрос позже:( "
 
 
 
 
137
  st.write(response)
 
 
 
 
 
 
 
138
 
139
 
140