File size: 4,727 Bytes
3f78c39 d9a0196 7a6824f d9a0196 7a6824f d9a0196 64be49e 32627c4 704cc76 13dbab7 64be49e d9a0196 64be49e 5dfc352 d9a0196 7a6824f d9a0196 7a6824f d9a0196 64be49e 7a6824f 64be49e d9a0196 64be49e 13dbab7 64be49e 7a6824f 64be49e 7a6824f d9a0196 7a6824f d9a0196 7a6824f d9a0196 7a6824f 64be49e 7a6824f d9a0196 7a6824f d9a0196 7a6824f d9a0196 64be49e d9a0196 4112e9f 7a6824f d9a0196 7a6824f d9a0196 7a6824f d9a0196 7a6824f 64be49e d9a0196 4112e9f d9a0196 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 |
""" TypeGPT
@author: NiansuhAI
@email: niansuhtech@gmail.com
"""
import numpy as np
import streamlit as st
from openai import OpenAI
import os
import sys
from dotenv import load_dotenv, dotenv_values
load_dotenv()
# initialize the client
client = OpenAI(
base_url="https://api-inference.huggingface.co/v1",
api_key=os.environ.get('API_KEY') # Replace with your token
)
# Create supported models
model_links = {
"GPT-4o": "mistralai/Mistral-Nemo-Instruct-2407",
"GPT-4": "meta-llama/Meta-Llama-3.1-70B-Instruct",
"GPT-3.5": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"Meta-Llama-3-8B-Instruct": "meta-llama/Meta-Llama-3-8B-Instruct",
"Meta-Llama-2-13B-Chat-HF": "meta-llama/Llama-2-13b-chat-hf",
"Meta-Llama-2-7B-Chat-HF": "meta-llama/Llama-2-7b-chat-hf",
"Gemini-1.3-2b-it": "google/gemma-1.1-2b-it",
"Gemini-1.3-7b-it": "google/gemma-1.1-7b-it",
"Mixtral-8x7B-Instruct-v0.1": "mistralai/Mixtral-8x7B-Instruct-v0.1",
"Mistral-7B-Instruct-v0.1": "mistralai/Mistral-7B-Instruct-v0.1",
"Mistral-7B-Instruct-v0.2": "mistralai/Mistral-7B-Instruct-v0.2",
"Mistral-7B-Instruct-v0.3": "mistralai/Mistral-7B-Instruct-v0.3",
"Mixtral-8x7B-DPO": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
"Starchat2-15b-v0.1": "HuggingFaceH4/starchat2-15b-v0.1",
}
def reset_conversation():
'''
Resets Conversation
'''
st.session_state.conversation = []
st.session_state.messages = []
return None
# Define the available models
models =[key for key in model_links.keys()]
# Create the sidebar with the dropdown for model selection
selected_model = st.sidebar.selectbox("Выбрать модель GPT", models)
#Add reset button to clear conversation
st.sidebar.button('Новый чат', on_click=reset_conversation) #Reset button
# Create a temperature slider
temp_values = st.sidebar.slider('Температура GPT-ChatBot', 0.0, 1.0, (0.5))
st.sidebar.markdown("Температура в GPT-ChatBot влияет на качество и связность генерируемого текста.")
st.sidebar.markdown("**Для оптимального результата рекомендуем выбирать температуру в диапазоне от 0,5 до 0,7**.")
# Create model description
st.sidebar.markdown("*Созданный контент может быть неточным.*")
st.sidebar.markdown("\n Наш сайт: [GPT-ChatBot.ru](https://gpt-chatbot.ru/).")
if "prev_option" not in st.session_state:
st.session_state.prev_option = selected_model
if st.session_state.prev_option != selected_model:
st.session_state.messages = []
# st.write(f"Changed to {selected_model}")
st.session_state.prev_option = selected_model
reset_conversation()
#Pull in the model we want to use
repo_id = model_links[selected_model]
st.subheader(f'[GPT-ChatBot.ru](https://gpt-chatbot.ru/) с моделью {selected_model}')
# st.title(f'GPT-ChatBot сейчас использует {selected_model}')
# Set a default model
if selected_model not in st.session_state:
st.session_state[selected_model] = model_links[selected_model]
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Accept user input
if prompt := st.chat_input(f"Привет. Я {selected_model}. Как я могу вам помочь сегодня?"):
# Display user message in chat message container
with st.chat_message("user"):
st.markdown(prompt)
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
# Display assistant response in chat message container
with st.chat_message("assistant"):
try:
stream = client.chat.completions.create(
model=model_links[selected_model],
messages=[
{"role": m["role"], "content": m["content"]}
for m in st.session_state.messages
],
temperature=temp_values,#0.5,
stream=True,
max_tokens=3000,
)
response = st.write_stream(stream)
except Exception as e:
# st.empty()
response = "Похоже, чат перегружен!\
\n Повторите свой запрос позже:( "
st.write(response)
st.session_state.messages.append({"role": "assistant", "content": response}) |