Spaces:
Sleeping
Sleeping
root
commited on
Commit
•
84e25a6
1
Parent(s):
010e6f1
add app
Browse files- app.py +117 -0
- backend/__pycache__/query_llm.cpython-310.pyc +0 -0
- backend/__pycache__/semantic_search.cpython-310.pyc +0 -0
- backend/query_llm.py +128 -0
- backend/semantic_search.py +39 -0
- requirements.txt +7 -0
- templates/template.j2 +8 -0
- templates/template_html.j2 +102 -0
app.py
ADDED
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Credit to Derek Thomas, derek@huggingface.co
|
3 |
+
"""
|
4 |
+
import os
|
5 |
+
import logging
|
6 |
+
from pathlib import Path
|
7 |
+
from time import perf_counter
|
8 |
+
|
9 |
+
import gradio as gr
|
10 |
+
from jinja2 import Environment, FileSystemLoader
|
11 |
+
|
12 |
+
from backend.query_llm import generate_hf, generate_openai
|
13 |
+
from backend.semantic_search import retrieve, rerank
|
14 |
+
|
15 |
+
|
16 |
+
TOP_N = int(os.getenv("TOP_N", 12))
|
17 |
+
TOP_K = int(os.getenv("TOP_K", 4))
|
18 |
+
|
19 |
+
proj_dir = Path(__file__).parent
|
20 |
+
# Setting up the logging
|
21 |
+
logging.basicConfig(level=logging.INFO)
|
22 |
+
logger = logging.getLogger(__name__)
|
23 |
+
|
24 |
+
# Set up the template environment with the templates directory
|
25 |
+
env = Environment(loader=FileSystemLoader(proj_dir / 'templates'))
|
26 |
+
|
27 |
+
# Load the templates directly from the environment
|
28 |
+
template = env.get_template('template.j2')
|
29 |
+
template_html = env.get_template('template_html.j2')
|
30 |
+
|
31 |
+
|
32 |
+
def add_text(history, text):
|
33 |
+
history = [] if history is None else history
|
34 |
+
history = history + [(text, None)]
|
35 |
+
return history, gr.Textbox(value="", interactive=False)
|
36 |
+
|
37 |
+
|
38 |
+
def bot(history, api_kind):
|
39 |
+
query = history[-1][0]
|
40 |
+
|
41 |
+
if not query:
|
42 |
+
raise gr.Warning("Please submit a non-empty string as a prompt")
|
43 |
+
|
44 |
+
logger.info('Retrieving documents...')
|
45 |
+
# Retrieve documents relevant to query
|
46 |
+
document_start = perf_counter()
|
47 |
+
|
48 |
+
documents = retrieve(query, TOP_N)
|
49 |
+
|
50 |
+
document_time = perf_counter() - document_start
|
51 |
+
logger.info(f'Finished Retrieving documents in {round(document_time, 2)} seconds...')
|
52 |
+
|
53 |
+
logger.info('Reranking documents...')
|
54 |
+
# Retrieve documents relevant to query
|
55 |
+
document_start = perf_counter()
|
56 |
+
|
57 |
+
documents = rerank(query, documents, TOP_K)
|
58 |
+
|
59 |
+
document_time = perf_counter() - document_start
|
60 |
+
logger.info(f'Finished Reranking documents in {round(document_time, 2)} seconds...')
|
61 |
+
|
62 |
+
# Create Prompt
|
63 |
+
prompt = template.render(documents=documents, query=query)
|
64 |
+
prompt_html = template_html.render(documents=documents, query=query)
|
65 |
+
|
66 |
+
if api_kind == "HuggingFace":
|
67 |
+
generate_fn = generate_hf
|
68 |
+
elif api_kind == "OpenAI":
|
69 |
+
generate_fn = generate_openai
|
70 |
+
else:
|
71 |
+
raise gr.Error(f"API {api_kind} is not supported")
|
72 |
+
|
73 |
+
history[-1][1] = ""
|
74 |
+
for character in generate_fn(prompt, history[:-1]):
|
75 |
+
history[-1][1] = character
|
76 |
+
yield history, prompt_html
|
77 |
+
|
78 |
+
|
79 |
+
with gr.Blocks() as demo:
|
80 |
+
chatbot = gr.Chatbot(
|
81 |
+
[],
|
82 |
+
elem_id="chatbot",
|
83 |
+
avatar_images=('https://aui.atlassian.com/aui/8.8/docs/images/avatar-person.svg',
|
84 |
+
'https://huggingface.co/datasets/huggingface/brand-assets/resolve/main/hf-logo.svg'),
|
85 |
+
bubble_full_width=False,
|
86 |
+
show_copy_button=True,
|
87 |
+
show_share_button=True,
|
88 |
+
)
|
89 |
+
|
90 |
+
with gr.Row():
|
91 |
+
txt = gr.Textbox(
|
92 |
+
scale=3,
|
93 |
+
show_label=False,
|
94 |
+
placeholder="Enter text and press enter",
|
95 |
+
container=False,
|
96 |
+
)
|
97 |
+
txt_btn = gr.Button(value="Submit text", scale=1)
|
98 |
+
|
99 |
+
api_kind = gr.Radio(choices=["HuggingFace", "OpenAI"], value="HuggingFace")
|
100 |
+
|
101 |
+
prompt_html = gr.HTML()
|
102 |
+
# Turn off interactivity while generating if you click
|
103 |
+
txt_msg = txt_btn.click(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
|
104 |
+
bot, [chatbot, api_kind], [chatbot, prompt_html])
|
105 |
+
|
106 |
+
# Turn it back on
|
107 |
+
txt_msg.then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False)
|
108 |
+
|
109 |
+
# Turn off interactivity while generating if you hit enter
|
110 |
+
txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
|
111 |
+
bot, [chatbot, api_kind], [chatbot, prompt_html])
|
112 |
+
|
113 |
+
# Turn it back on
|
114 |
+
txt_msg.then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False)
|
115 |
+
|
116 |
+
demo.queue()
|
117 |
+
demo.launch(debug=True)
|
backend/__pycache__/query_llm.cpython-310.pyc
ADDED
Binary file (4.03 kB). View file
|
|
backend/__pycache__/semantic_search.cpython-310.pyc
ADDED
Binary file (1.18 kB). View file
|
|
backend/query_llm.py
ADDED
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import openai
|
2 |
+
import gradio as gr
|
3 |
+
import os
|
4 |
+
|
5 |
+
from typing import Any, Dict, Generator, List
|
6 |
+
|
7 |
+
from huggingface_hub import InferenceClient
|
8 |
+
from transformers import AutoTokenizer
|
9 |
+
|
10 |
+
|
11 |
+
OPENAI_KEY = os.getenv("OPENAI_API_KEY")
|
12 |
+
HF_TOKEN = os.getenv("HF_TOKEN")
|
13 |
+
TOKENIZER = AutoTokenizer.from_pretrained(os.getenv("HF_MODEL"))
|
14 |
+
|
15 |
+
HF_CLIENT = InferenceClient(
|
16 |
+
os.getenv("HF_MODEL"),
|
17 |
+
token=HF_TOKEN
|
18 |
+
)
|
19 |
+
OAI_CLIENT = openai.Client(api_key=OPENAI_KEY)
|
20 |
+
|
21 |
+
HF_GENERATE_KWARGS = {
|
22 |
+
'temperature': max(float(os.getenv("TEMPERATURE", 0.9)), 1e-2),
|
23 |
+
'max_new_tokens': int(os.getenv("MAX_NEW_TOKENS", 256)),
|
24 |
+
'top_p': float(os.getenv("TOP_P", 0.6)),
|
25 |
+
'repetition_penalty': float(os.getenv("REP_PENALTY", 1.2)),
|
26 |
+
'do_sample': bool(os.getenv("DO_SAMPLE", True))
|
27 |
+
}
|
28 |
+
|
29 |
+
OAI_GENERATE_KWARGS = {
|
30 |
+
'temperature': max(float(os.getenv("TEMPERATURE", 0.9)), 1e-2),
|
31 |
+
'max_tokens': int(os.getenv("MAX_NEW_TOKENS", 256)),
|
32 |
+
'top_p': float(os.getenv("TOP_P", 0.6)),
|
33 |
+
'frequency_penalty': max(-2, min(float(os.getenv("FREQ_PENALTY", 0)), 2))
|
34 |
+
}
|
35 |
+
|
36 |
+
|
37 |
+
def format_prompt(message: str, api_kind: str):
|
38 |
+
"""
|
39 |
+
Formats the given message using a chat template.
|
40 |
+
|
41 |
+
Args:
|
42 |
+
message (str): The user message to be formatted.
|
43 |
+
api_kind (str): LLM API provider.
|
44 |
+
Returns:
|
45 |
+
str: Formatted message after applying the chat template.
|
46 |
+
"""
|
47 |
+
|
48 |
+
# Create a list of message dictionaries with role and content
|
49 |
+
messages: List[Dict[str, Any]] = [{'role': 'user', 'content': message}]
|
50 |
+
|
51 |
+
if api_kind == "openai":
|
52 |
+
return messages
|
53 |
+
elif api_kind == "hf":
|
54 |
+
return TOKENIZER.apply_chat_template(messages, tokenize=False)
|
55 |
+
elif api_kind:
|
56 |
+
raise ValueError("API is not supported")
|
57 |
+
|
58 |
+
|
59 |
+
def generate_hf(prompt: str, history: str) -> Generator[str, None, str]:
|
60 |
+
"""
|
61 |
+
Generate a sequence of tokens based on a given prompt and history using Mistral client.
|
62 |
+
|
63 |
+
Args:
|
64 |
+
prompt (str): The prompt for the text generation.
|
65 |
+
history (str): Context or history for the text generation.
|
66 |
+
Returns:
|
67 |
+
Generator[str, None, str]: A generator yielding chunks of generated text.
|
68 |
+
Returns a final string if an error occurs.
|
69 |
+
"""
|
70 |
+
|
71 |
+
formatted_prompt = format_prompt(prompt, "hf")
|
72 |
+
formatted_prompt = formatted_prompt.encode("utf-8").decode("utf-8")
|
73 |
+
|
74 |
+
try:
|
75 |
+
stream = HF_CLIENT.text_generation(
|
76 |
+
formatted_prompt,
|
77 |
+
**HF_GENERATE_KWARGS,
|
78 |
+
stream=True,
|
79 |
+
details=True,
|
80 |
+
return_full_text=False
|
81 |
+
)
|
82 |
+
output = ""
|
83 |
+
for response in stream:
|
84 |
+
output += response.token.text
|
85 |
+
yield output
|
86 |
+
|
87 |
+
except Exception as e:
|
88 |
+
if "Too Many Requests" in str(e):
|
89 |
+
raise gr.Error(f"Too many requests: {str(e)}")
|
90 |
+
elif "Authorization header is invalid" in str(e):
|
91 |
+
raise gr.Error("Authentication error: HF token was either not provided or incorrect")
|
92 |
+
else:
|
93 |
+
raise gr.Error(f"Unhandled Exception: {str(e)}")
|
94 |
+
|
95 |
+
|
96 |
+
def generate_openai(prompt: str, history: str) -> Generator[str, None, str]:
|
97 |
+
"""
|
98 |
+
Generate a sequence of tokens based on a given prompt and history using Mistral client.
|
99 |
+
|
100 |
+
Args:
|
101 |
+
prompt (str): The initial prompt for the text generation.
|
102 |
+
history (str): Context or history for the text generation.
|
103 |
+
Returns:
|
104 |
+
Generator[str, None, str]: A generator yielding chunks of generated text.
|
105 |
+
Returns a final string if an error occurs.
|
106 |
+
"""
|
107 |
+
formatted_prompt = format_prompt(prompt, "openai")
|
108 |
+
|
109 |
+
try:
|
110 |
+
stream = OAI_CLIENT.chat.completions.create(
|
111 |
+
model=os.getenv("OPENAI_MODEL"),
|
112 |
+
messages=formatted_prompt,
|
113 |
+
**OAI_GENERATE_KWARGS,
|
114 |
+
stream=True
|
115 |
+
)
|
116 |
+
output = ""
|
117 |
+
for chunk in stream:
|
118 |
+
if chunk.choices[0].delta.content:
|
119 |
+
output += chunk.choices[0].delta.content
|
120 |
+
yield output
|
121 |
+
|
122 |
+
except Exception as e:
|
123 |
+
if "Too Many Requests" in str(e):
|
124 |
+
raise gr.Error("ERROR: Too many requests on OpenAI client")
|
125 |
+
elif "You didn't provide an API key" in str(e):
|
126 |
+
raise gr.Error("Authentication error: OpenAI key was either not provided or incorrect")
|
127 |
+
else:
|
128 |
+
raise gr.Error(f"Unhandled Exception: {str(e)}")
|
backend/semantic_search.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import lancedb
|
2 |
+
import os
|
3 |
+
import gradio as gr
|
4 |
+
import numpy as np
|
5 |
+
from sentence_transformers import SentenceTransformer, CrossEncoder
|
6 |
+
|
7 |
+
|
8 |
+
db = lancedb.connect(".lancedb")
|
9 |
+
|
10 |
+
TABLE = db.open_table(os.getenv("TABLE_NAME"))
|
11 |
+
VECTOR_COLUMN = os.getenv("VECTOR_COLUMN", "vector")
|
12 |
+
TEXT_COLUMN = os.getenv("TEXT_COLUMN", "text")
|
13 |
+
BATCH_SIZE = int(os.getenv("BATCH_SIZE", 32))
|
14 |
+
|
15 |
+
retriever = SentenceTransformer(os.getenv("EMB_MODEL"))
|
16 |
+
reranker = CrossEncoder("RERANK_MODEL", max_length=512)
|
17 |
+
|
18 |
+
|
19 |
+
def retrieve(query, n):
|
20 |
+
query_vec = retriever.encode(query)
|
21 |
+
try:
|
22 |
+
documents = TABLE.search(query_vec, vector_column_name=VECTOR_COLUMN).limit(n).to_list()
|
23 |
+
documents = [doc[TEXT_COLUMN] for doc in documents]
|
24 |
+
|
25 |
+
return documents
|
26 |
+
|
27 |
+
except Exception as e:
|
28 |
+
raise gr.Error(str(e))
|
29 |
+
|
30 |
+
def rerank(query, documents, k):
|
31 |
+
query_doc_pairs = [[query, doc] for doc in documents]
|
32 |
+
similarity_scores = reranker.predict(query_doc_pairs)
|
33 |
+
sim_scores_argsort = reversed(np.argsort(similarity_scores))
|
34 |
+
|
35 |
+
rerank_documents = []
|
36 |
+
|
37 |
+
for idx in sim_scores_argsort[:k]:
|
38 |
+
rerank_documents.append(documents[idx])
|
39 |
+
return rerank_documents
|
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
lancedb==0.5.3
|
2 |
+
openai==1.11.1
|
3 |
+
sentence-transformers==2.3.1
|
4 |
+
tqdm==4.66.1
|
5 |
+
torch==2.1.1
|
6 |
+
transformers==4.37.2
|
7 |
+
|
templates/template.j2
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Instructions: Use the following unique documents in the Context section to answer the Query at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
|
2 |
+
Context:
|
3 |
+
{% for doc in documents %}
|
4 |
+
---
|
5 |
+
{{ doc }}
|
6 |
+
{% endfor %}
|
7 |
+
---
|
8 |
+
Query: {{ query }}
|
templates/template_html.j2
ADDED
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!DOCTYPE html>
|
2 |
+
<html lang="en">
|
3 |
+
<head>
|
4 |
+
<meta charset="UTF-8">
|
5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
6 |
+
<title>Information Page</title>
|
7 |
+
<link rel="stylesheet" href="https://fonts.googleapis.com/css2?family=Source+Sans+Pro:wght@400;600&display=swap">
|
8 |
+
<link rel="stylesheet" href="https://fonts.googleapis.com/css2?family=IBM+Plex+Mono:wght@400;600&display=swap">
|
9 |
+
<style>
|
10 |
+
* {
|
11 |
+
font-family: "Source Sans Pro";
|
12 |
+
}
|
13 |
+
|
14 |
+
.instructions > * {
|
15 |
+
color: #111 !important;
|
16 |
+
}
|
17 |
+
|
18 |
+
details.doc-box * {
|
19 |
+
color: #111 !important;
|
20 |
+
}
|
21 |
+
|
22 |
+
.dark {
|
23 |
+
background: #111;
|
24 |
+
color: white;
|
25 |
+
}
|
26 |
+
|
27 |
+
.doc-box {
|
28 |
+
padding: 10px;
|
29 |
+
margin-top: 10px;
|
30 |
+
background-color: #baecc2;
|
31 |
+
border-radius: 6px;
|
32 |
+
color: #111 !important;
|
33 |
+
max-width: 700px;
|
34 |
+
box-shadow: rgba(0, 0, 0, 0.2) 0px 1px 2px 0px;
|
35 |
+
}
|
36 |
+
|
37 |
+
.doc-full {
|
38 |
+
margin: 10px 14px;
|
39 |
+
line-height: 1.6rem;
|
40 |
+
}
|
41 |
+
|
42 |
+
.instructions {
|
43 |
+
color: #111 !important;
|
44 |
+
background: #b7bdfd;
|
45 |
+
display: block;
|
46 |
+
border-radius: 6px;
|
47 |
+
padding: 6px 10px;
|
48 |
+
line-height: 1.6rem;
|
49 |
+
max-width: 700px;
|
50 |
+
box-shadow: rgba(0, 0, 0, 0.2) 0px 1px 2px 0px;
|
51 |
+
}
|
52 |
+
|
53 |
+
.query {
|
54 |
+
color: #111 !important;
|
55 |
+
background: #ffbcbc;
|
56 |
+
display: block;
|
57 |
+
border-radius: 6px;
|
58 |
+
padding: 6px 10px;
|
59 |
+
line-height: 1.6rem;
|
60 |
+
max-width: 700px;
|
61 |
+
box-shadow: rgba(0, 0, 0, 0.2) 0px 1px 2px 0px;
|
62 |
+
}
|
63 |
+
</style>
|
64 |
+
</head>
|
65 |
+
<body>
|
66 |
+
<div class="prose svelte-1ybaih5" id="component-6">
|
67 |
+
<h2>Prompt</h2>
|
68 |
+
Below is the prompt that is given to the model. <hr>
|
69 |
+
<h2>Instructions</h2>
|
70 |
+
<span class="instructions">Use the following pieces of context to answer the question at the end.<br>If you don't know the answer, just say that you don't know, <span style="font-weight: bold;">don't try to make up an answer.</span></span><br>
|
71 |
+
<h2>Context</h2>
|
72 |
+
{% for doc in documents %}
|
73 |
+
<details class="doc-box">
|
74 |
+
<summary>
|
75 |
+
<b>Doc {{ loop.index }}:</b> <span class="doc-short">{{ doc[:100] }}...</span>
|
76 |
+
</summary>
|
77 |
+
<div class="doc-full">{{ doc }}</div>
|
78 |
+
</details>
|
79 |
+
{% endfor %}
|
80 |
+
|
81 |
+
<h2>Query</h2>
|
82 |
+
<span class="query">{{ query }}</span>
|
83 |
+
</div>
|
84 |
+
|
85 |
+
<script>
|
86 |
+
document.addEventListener("DOMContentLoaded", function() {
|
87 |
+
const detailsElements = document.querySelectorAll('.doc-box');
|
88 |
+
|
89 |
+
detailsElements.forEach(detail => {
|
90 |
+
detail.addEventListener('toggle', function() {
|
91 |
+
const docShort = this.querySelector('.doc-short');
|
92 |
+
if (this.open) {
|
93 |
+
docShort.style.display = 'none';
|
94 |
+
} else {
|
95 |
+
docShort.style.display = 'inline';
|
96 |
+
}
|
97 |
+
});
|
98 |
+
});
|
99 |
+
});
|
100 |
+
</script>
|
101 |
+
</body>
|
102 |
+
</html>
|