Spaces:
Running
Running
File size: 5,499 Bytes
143876a 6267abf 143876a cf2d6b2 aae122c 143876a 0a52d1d e515e58 c413a53 e515e58 cf2d6b2 144ef01 8eb2972 144ef01 7e04ee5 3c7180f 144ef01 e515e58 6f52ead 63b723e 8eb2972 63b723e 8eb2972 63b723e adbbf1a 3c7180f 98b2a1b 3c7180f adbbf1a 1528b5f 156644b adbbf1a 3c7180f adbbf1a 3c7180f 4292e9c adbbf1a 143876a cf2d6b2 8eb2972 143876a cf2d6b2 143876a cf2d6b2 143876a 4292e9c cf2d6b2 143876a cf2d6b2 f99d7c3 143876a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 |
import gradio as gr
from huggingface_hub import InferenceClient
from langchain_community.chat_models import ChatOpenAI
from langchain.chains.retrieval_qa.base import RetrievalQA
from langchain_community.embeddings import OpenAIEmbeddings
from langchain.schema import HumanMessage, SystemMessage
from langchain_community.document_loaders import DirectoryLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import Chroma
import requests
from langchain_core.prompts import PromptTemplate
"""
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
"""
import gradio as gr
from openai import OpenAI
import os
TOKEN = os.getenv("HF_TOKEN")
def load_embedding_mode():
# embedding_model_dict = {"m3e-base": "/home/xiongwen/m3e-base"}
encode_kwargs = {"normalize_embeddings": False}
model_kwargs = {"device": 'cpu'}
return HuggingFaceEmbeddings(model_name="BAAI/bge-m3",
model_kwargs=model_kwargs,
encode_kwargs=encode_kwargs)
client = OpenAI(
base_url="https://api-inference.huggingface.co/v1/",
api_key=TOKEN,
)
def qwen_api(user_message, top_p=0.9,temperature=0.7, system_message='', max_tokens=1024, gradio_history=[]):
history = []
if gradio_history:
for message in history:
if message:
history.append({"role": "user", "content": message[0]})
history.append({"role": "assistant", "content": message[1]})
if system_message!='':
history.append({'role': 'system', 'content': system_message})
history.append({"role": "user", "content": user_message})
response = ""
for message in client.chat.completions.create(
model="meta-llama/Meta-Llama-3-8B-Instruct",
# model="Qwen/Qwen1.5-4B-Chat",
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
messages=history,
):
token = message.choices[0].delta.content
response += token
return response
os.environ["OPENAI_API_BASE"] = "https://api-inference.huggingface.co/v1/"
os.environ["OPENAI_API_KEY"] = TOKEN
embedding = load_embedding_mode()
db = Chroma(persist_directory='./VecterStore2_512_txt/VecterStore2_512_txt', embedding_function=embedding)
prompt_template = """
{context}
The above content is a form of biological background knowledge. Please answer the questions according to the above content.
Question: {question}
Please be sure to answer the questions according to the background knowledge and attach the doi number of the information source when answering.
Answer in English:"""
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
chain_type_kwargs = {"prompt": PROMPT}
retriever = db.as_retriever()
def langchain_chat(message, temperature, top_p, max_tokens):
llm = ChatOpenAI(
model="meta-llama/Meta-Llama-3-8B-Instruct",
# model="Qwen/Qwen1.5-4B-Chat",
temperature=temperature,
top_p=top_p,
max_tokens=max_tokens)
qa = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=retriever,
chain_type_kwargs=chain_type_kwargs,
return_source_documents=True
)
response = qa.invoke(message)['result']
return response
def chat(
message,
history: list[tuple[str, str]],
system_message,
max_tokens,
temperature,
top_p,
):
if len(history) == 0:
response = langchain_chat(message, temperature, top_p, max_tokens)
else:
response = qwen_api(message, gradio_history=history, max_tokens=max_tokens, top_p=top_p, temperature=temperature)
print(response)
yield response
return response
def respond(
message,
history: list[tuple[str, str]],
system_message,
max_tokens,
temperature,
top_p,
):
messages = [{"role": "system", "content": system_message}]
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
messages.append({"role": "user", "content": message})
response = ""
for message in client.chat.completions.create(
model="meta-llama/Meta-Llama-3-8B-Instruct",
# model="Qwen/Qwen1.5-4B-Chat",
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
messages=messages,
):
token = message.choices[0].delta.content
response += token
yield response
chatbot = gr.Chatbot(height=600)
demo = gr.ChatInterface(
fn=chat,
fill_height=True,
chatbot=chatbot,
additional_inputs=[
gr.Textbox(label="System message"),
gr.Slider(minimum=1, maximum=1024, value=512, step=1, label="Max new tokens"),
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.95,
step=0.05,
label="Top-p (nucleus sampling)",
),
],
)
if __name__ == "__main__":
demo.launch() |