timeki commited on
Commit
26ed9d3
1 Parent(s): 8298c5b
Files changed (8) hide show
  1. .gitignore +5 -0
  2. README.md +1 -1
  3. app.py +198 -56
  4. config.py +0 -0
  5. prompt.py +53 -0
  6. requirements.txt +13 -1
  7. style.css +598 -0
  8. utils.py +81 -0
.gitignore ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ __pycache__/
2
+ .env
3
+ .gradio
4
+ data/
5
+ faiss_index/
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 💬
4
  colorFrom: yellow
5
  colorTo: purple
6
  sdk: gradio
7
- sdk_version: 5.0.1
8
  app_file: app.py
9
  pinned: false
10
  ---
 
4
  colorFrom: yellow
5
  colorTo: purple
6
  sdk: gradio
7
+ sdk_version: 5.0.2
8
  app_file: app.py
9
  pinned: false
10
  ---
app.py CHANGED
@@ -1,64 +1,206 @@
 
 
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
 
9
 
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  ):
18
- messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
-
28
- response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
  )
61
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
 
63
- if __name__ == "__main__":
64
- demo.launch()
 
 
 
 
1
+ from langchain.memory import ConversationBufferMemory
2
+
3
+ import pandas as pd
4
  import gradio as gr
 
5
 
6
+ from langchain.embeddings import OpenAIEmbeddings
7
+ from langchain.vectorstores import FAISS
8
+
9
+ import os
10
+
11
+
12
+ import pandas as pd
13
+ from langchain.embeddings import OpenAIEmbeddings
14
+ from langchain.vectorstores import FAISS
15
+ from langchain.schema import Document
16
+ import os
17
+
18
+ from utils import make_html_source, make_pairs, get_llm, reset_textbox
19
+
20
+ from prompt import PROMPT_INTERPRATE_INTENTION, ANSWER_PROMPT
21
+
22
+
23
+ try:
24
+ from dotenv import load_dotenv
25
+ load_dotenv()
26
+ except Exception:
27
+ pass
28
+
29
+
30
+ # Load your OpenAI API key
31
+ import os
32
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
33
+ assert OPENAI_API_KEY, "Please set your OpenAI API key"
34
+
35
+ embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY)
36
+
37
+
38
+
39
+
40
+ new_vector_store = FAISS.load_local(
41
+ "faiss_index", embeddings, allow_dangerous_deserialization=True
42
+ )
43
+
44
+ retriever = new_vector_store.as_retriever()
45
+
46
+
47
 
48
+ llm = get_llm()
49
 
50
+ memory = ConversationBufferMemory(
51
+ return_messages=True, output_key="answer", input_key="question"
52
+ )
53
+
54
+ def make_qa_chain(
55
+
56
+ ) :
57
+ final_inputs = {
58
+ "context": lambda x: x["context"],
59
+ "question": lambda x: x["question"],
60
+ }
61
+
62
+
63
+
64
+ return final_inputs | ANSWER_PROMPT | llm
65
+
66
+
67
+ def load_documents_meeting(meeting_number):
68
+ # Step 1: Load the CSV data
69
+ csv_file_path = "../data/mfls.xlsx"
70
+ df = pd.read_excel(csv_file_path)
71
+
72
+ df["meeting_number"]= df["Meeting"].apply(lambda x: x.split(" ")[0][:-2])
73
+ df_meeting = df[df["meeting_number"] == meeting_number]
74
+ def combine_title_and_content(row):
75
+ return f"{row['Meeting']} {row['Issues']} {row['Content']}"
76
+
77
+ df_meeting['combined'] = df_meeting.apply(combine_title_and_content, axis=1)
78
+
79
+ # Step 3: Generate embeddings using OpenAI
80
+ embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY)
81
+
82
+ # Generate embeddings for each document
83
+ documents = [
84
+ Document(
85
+ page_content=row['combined'],
86
+ metadata={
87
+ "Issues": row['Issues'],
88
+ "Title": row['Title'],
89
+ "meeting_number": row["Meeting"].split(" ")[0][:-2],
90
+ "Agencies": row["Agencies"],
91
+ "project": row["Projects"],
92
+ }
93
+ ) for i,row in df_meeting.iterrows()]
94
+ return documents
95
+
96
+
97
+ async def chat(
98
+ query: str,
99
+ history: list = [],
100
  ):
101
+ """taking a query and a message history, use a pipeline (reformulation, retriever, answering) to yield a tuple of:
102
+ (messages in gradio format, messages in langchain format, source documents)"""
103
+ source_string = ""
104
+ gradio_format = make_pairs([a.content for a in history]) + [(query, "")]
105
+ qa_chain = make_qa_chain()
106
+
107
+ # reset memory
108
+ memory.clear()
109
+ for message in history:
110
+ memory.chat_memory.add_message(message)
111
+
112
+ inputs = {"question": query}
113
+
114
+ ## INTENT
115
+ intent = await llm.abatch([PROMPT_INTERPRATE_INTENTION.format_prompt(query = query)])
116
+ intent = intent[0].content
117
+ print("intent", intent)
118
+
119
+ ## RETRIEVER
120
+ if intent.split(" ")[0] == "meeting":
121
+ meeting_number = intent.split(" ")[-1]
122
+ sources = load_documents_meeting(meeting_number)
123
+ else :
124
+ sources = new_vector_store.search(query, search_type="similarity", k=5)
125
+
126
+ source_string = "\n\n".join([make_html_source(doc, i) for i, doc in enumerate(sources, 1)])
127
+
128
+ ## RAG
129
+ inputs_rag = {"question": query, "context": sources}
130
+
131
+ result = qa_chain.astream_log(inputs_rag)
132
+
133
+ reformulated_question_path_id = "/logs/ChatOpenAI/streamed_output_str/-"
134
+ retriever_path_id = "/logs/VectorStoreRetriever/final_output"
135
+ final_answer_path_id = "/streamed_output/-"
136
+
137
+ async for op in result:
138
+ op = op.ops[0]
139
+ # print(op["path"])
140
+ if op['path'] == reformulated_question_path_id: # reforulated question
141
+ new_token = op['value'] # str
142
+
143
+ elif op['path'] == retriever_path_id: # documents
144
+ sources = op['value']['documents'] # List[Document]
145
+ source_string = "\n\n".join([make_html_source(i, doc) for i, doc in enumerate(sources, 1)])
146
+
147
+ elif op['path'] == final_answer_path_id: # final answer
148
+ new_token = op['value'].content # str
149
+ answer_yet = gradio_format[-1][1]
150
+ gradio_format[-1] = (query, answer_yet + new_token )
151
+
152
+ yield gradio_format, history, source_string
153
+
154
+ memory.save_context(inputs, {"answer": gradio_format[-1][1]})
155
+ yield gradio_format, memory.load_memory_variables({})["history"], source_string
156
+
157
+
158
+ ### GRADIO UI
159
+
160
+ theme = gr.themes.Soft(
161
+ primary_hue="sky",
162
+ font=[gr.themes.GoogleFont("Poppins"), "ui-sans-serif", "system-ui", "sans-serif"],
163
  )
164
 
165
+ demo_name = "UNEP Q&A"
166
+
167
+ with gr.Blocks(title=f"{demo_name}", theme=theme, css_paths=os.getcwd()+ "/style.css") as demo:
168
+
169
+ gr.Markdown(f"<h1><center>{demo_name}</center></h1>")
170
+
171
+ with gr.Row():
172
+ with gr.Column(scale=2):
173
+ chatbot = gr.Chatbot(
174
+ value = [("","Hello ! How can I help you today ?")],
175
+ elem_id="chatbot",
176
+ label=f"{demo_name} chatbot",
177
+ show_label=False
178
+ )
179
+ state = gr.State([])
180
+
181
+ with gr.Row():
182
+ ask = gr.Textbox(
183
+ show_label=False,
184
+ placeholder="Input your question then press enter",
185
+ )
186
+
187
+ with gr.Column(scale=1, variant="panel"):
188
+ gr.Markdown("### Sources")
189
+ sources_textbox = gr.HTML(show_label=False)
190
+
191
+ ask.submit(
192
+ fn=chat,
193
+ inputs=[
194
+ ask,
195
+ state,
196
+ ],
197
+ outputs=[chatbot, state, sources_textbox],
198
+ )
199
+
200
+ ask.submit(reset_textbox, [], [ask])
201
 
202
+ demo.queue()
203
+ demo.launch(
204
+ share=True,
205
+ debug=True
206
+ )
config.py ADDED
File without changes
prompt.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Prompt configuration.
3
+ """
4
+ from datetime import datetime
5
+ from langchain.prompts.prompt import PromptTemplate
6
+ from langchain.prompts import ChatPromptTemplate
7
+
8
+ from langchain_core.prompts import ChatPromptTemplate
9
+
10
+ interprate_question_sharepoint_template = """
11
+ whatever is asked, just answer only {{}}"""
12
+
13
+ PROMPT_INTERPRATE_INTENTION_SHAREPOINT = ChatPromptTemplate.from_template(
14
+ interprate_question_sharepoint_template
15
+ )
16
+
17
+ interprate_question_template = (
18
+ """You are an assistant that have to identify the object of a question.
19
+ A user asks a question about meeting decisions.
20
+ If the question is about a particular meeting, identified by a meeting number, answer only 'meeting <meeting number>'.
21
+ Otherwise answer only 'other'.
22
+
23
+ Example:
24
+ Q: What decision was taken at meeting 123th?
25
+ R: meeting 123
26
+ Q: Give me an example of a decision that applied a penalty to a country?
27
+ R: autre
28
+
29
+ """
30
+ "La question est la suivante: {query}."
31
+ )
32
+
33
+ PROMPT_INTERPRATE_INTENTION = ChatPromptTemplate.from_template(
34
+ interprate_question_template
35
+ )
36
+
37
+ current_date = datetime.now().strftime('%d/%m/%Y')
38
+ company_name = "UNEP" # to change
39
+
40
+ answering_template = (
41
+ f"You are an AI Assistant by Ekimetrics for {company_name}. "
42
+ f"Your task is to help {company_name} employees. "
43
+ "You will be given a question and extracted parts of documents."
44
+ "Provide a clear and structured answer based on the context provided. "
45
+ "When relevant, use bullet points and lists to structure your answers. "
46
+ "Whenever you use information from a document, reference it at the end of the sentence (ex: [doc 2]). "
47
+ "You don't have to use all documents, only if it makes sense in the conversation. "
48
+ "If no relevant information to answer the question is present in the documents, "
49
+ "just say you don't have enough information to answer.\n\n"
50
+ "{context}\n\n"
51
+ "Question: {question}"
52
+ )
53
+ ANSWER_PROMPT = ChatPromptTemplate.from_template(answering_template)
requirements.txt CHANGED
@@ -1 +1,13 @@
1
- huggingface_hub==0.25.2
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ huggingface_hub==0.25.2
2
+ # gradio==5.0.2
3
+ # python-dotenv==1.0.0
4
+ # langchain==0.2.1
5
+ # langchain-community==0.2
6
+ # langchain_openai==0.1.7
7
+ # faiss-cpu==1.9.0
8
+ python-dotenv==1.0.1
9
+ gradio==5.0.2
10
+ langchain==0.3.3
11
+ langchain-community==0.3.2
12
+ langchain-openai==0.2.2
13
+ faiss-cpu==1.9.0
style.css ADDED
@@ -0,0 +1,598 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ /* :root {
3
+ --user-image: url('https://ih1.redbubble.net/image.4776899543.6215/st,small,507x507-pad,600x600,f8f8f8.jpg');
4
+ } */
5
+ .avatar-container.svelte-1x5p6hu:not(.thumbnail-item) img {
6
+ width: 100%;
7
+ height: 100%;
8
+ object-fit: cover;
9
+ border-radius: 50%;
10
+ padding: 0px;
11
+ margin: 0px;
12
+ }
13
+
14
+ .gradio-container {
15
+ width: 100%!important;
16
+ max-width: 100% !important;
17
+ }
18
+
19
+ /* fix for huggingface infinite growth*/
20
+ main.flex.flex-1.flex-col {
21
+ max-height: 95vh !important;
22
+ }
23
+
24
+ button#show-figures{
25
+ /* Base styles */
26
+ background-color: #f5f5f5;
27
+ border: 1px solid #e0e0e0;
28
+ border-radius: 4px;
29
+ color: #333333;
30
+ cursor: pointer;
31
+ width: 100%;
32
+ text-align: center;
33
+ }
34
+
35
+ .avatar-container.svelte-1x5p6hu:not(.thumbnail-item) img {
36
+ width: 100%;
37
+ height: 100%;
38
+ object-fit: cover;
39
+ border-radius: 50%;
40
+ padding: 0px;
41
+ margin: 0px;
42
+ }
43
+
44
+ .warning-box {
45
+ background-color: #fff3cd;
46
+ border: 1px solid #ffeeba;
47
+ border-radius: 4px;
48
+ padding: 15px 20px;
49
+ font-size: 14px;
50
+ color: #856404;
51
+ display: inline-block;
52
+ margin-bottom: 15px;
53
+ }
54
+
55
+
56
+ .tip-box {
57
+ background-color: #f0f9ff;
58
+ border: 1px solid #80d4fa;
59
+ border-radius: 4px;
60
+ margin-top:20px;
61
+ padding: 15px 20px;
62
+ font-size: 14px;
63
+ display: inline-block;
64
+ margin-bottom: 15px;
65
+ width: auto;
66
+ color:black !important;
67
+ }
68
+
69
+ body.dark .warning-box * {
70
+ color:black !important;
71
+ }
72
+
73
+
74
+ body.dark .tip-box * {
75
+ color:black !important;
76
+ }
77
+
78
+
79
+ .tip-box-title {
80
+ font-weight: bold;
81
+ font-size: 14px;
82
+ margin-bottom: 5px;
83
+ }
84
+
85
+ .light-bulb {
86
+ display: inline;
87
+ margin-right: 5px;
88
+ }
89
+
90
+ .gr-box {border-color: #d6c37c}
91
+
92
+ #hidden-message{
93
+ display:none;
94
+ }
95
+
96
+ .message{
97
+ font-size:14px !important;
98
+
99
+ }
100
+ .card-content img {
101
+ display: block;
102
+ margin: auto;
103
+ max-width: 100%; /* Ensures the image is responsive */
104
+ height: auto;
105
+ }
106
+
107
+ a {
108
+ text-decoration: none;
109
+ color: inherit;
110
+ }
111
+
112
+ .doc-ref sup{
113
+ color:#dc2626!important;
114
+ /* margin-right:1px; */
115
+ }
116
+
117
+
118
+ .card {
119
+ background-color: white;
120
+ border-radius: 10px;
121
+ box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
122
+ overflow: hidden;
123
+ display: flex;
124
+ flex-direction: column;
125
+ margin:20px;
126
+ }
127
+
128
+ .card-content {
129
+ padding: 20px;
130
+ }
131
+
132
+ .card-content h2 {
133
+ font-size: 14px !important;
134
+ font-weight: bold;
135
+ margin-bottom: 10px;
136
+ margin-top:0px !important;
137
+ color:#dc2626!important;;
138
+ }
139
+
140
+ .card-content p {
141
+ font-size: 12px;
142
+ margin-bottom: 0;
143
+ }
144
+
145
+ .card-footer {
146
+ background-color: #f4f4f4;
147
+ font-size: 10px;
148
+ padding: 10px;
149
+ display: flex;
150
+ justify-content: space-between;
151
+ align-items: center;
152
+ }
153
+
154
+ .card-footer span {
155
+ flex-grow: 1;
156
+ text-align: left;
157
+ color: #999 !important;
158
+ }
159
+
160
+ .pdf-link {
161
+ display: inline-flex;
162
+ align-items: center;
163
+ margin-left: auto;
164
+ text-decoration: none!important;
165
+ font-size: 14px;
166
+ }
167
+
168
+
169
+
170
+ .message.user{
171
+ /* background-color:#7494b0 !important; */
172
+ border:none;
173
+ /* color:white!important; */
174
+ }
175
+
176
+ .message.bot{
177
+ /* background-color:#f2f2f7 !important; */
178
+ border:none;
179
+ }
180
+
181
+ /* .gallery-item > div:hover{
182
+ background-color:#7494b0 !important;
183
+ color:white!important;
184
+ }
185
+
186
+ .gallery-item:hover{
187
+ border:#7494b0 !important;
188
+ }
189
+
190
+ .gallery-item > div{
191
+ background-color:white !important;
192
+ color:#577b9b!important;
193
+ }
194
+
195
+ .label{
196
+ color:#577b9b!important;
197
+ } */
198
+
199
+ /* .paginate{
200
+ color:#577b9b!important;
201
+ } */
202
+
203
+
204
+
205
+ /* span[data-testid="block-info"]{
206
+ background:none !important;
207
+ color:#577b9b;
208
+ } */
209
+
210
+ /* Pseudo-element for the circularly cropped picture */
211
+ /* .message.bot::before {
212
+ content: '';
213
+ position: absolute;
214
+ top: -10px;
215
+ left: -10px;
216
+ width: 30px;
217
+ height: 30px;
218
+ background-image: var(--user-image);
219
+ background-size: cover;
220
+ background-position: center;
221
+ border-radius: 50%;
222
+ z-index: 10;
223
+ }
224
+ */
225
+
226
+ label.selected{
227
+ background:none !important;
228
+ }
229
+
230
+ #submit-button{
231
+ padding:0px !important;
232
+ }
233
+
234
+ @media screen and (min-width: 1024px) {
235
+ .gradio-container {
236
+ max-height: calc(100vh - 190px) !important;
237
+ overflow: hidden;
238
+ }
239
+ /* div#chatbot{
240
+ height:calc(100vh - 170px) !important;
241
+ max-height:calc(100vh - 170px) !important;
242
+
243
+ } */
244
+
245
+ div#tab-examples{
246
+ height:calc(100vh - 190px) !important;
247
+ overflow-y: scroll !important;
248
+ /* overflow-y: auto; */
249
+ }
250
+
251
+ div#sources-textbox{
252
+ height:calc(100vh - 190px) !important;
253
+ overflow-y: scroll !important;
254
+ /* overflow-y: auto !important; */
255
+ }
256
+
257
+ div#sources-figures{
258
+ height:calc(100vh - 300px) !important;
259
+ max-height: 90vh !important;
260
+ overflow-y: scroll !important;
261
+ }
262
+
263
+ div#tab-config{
264
+ height:calc(100vh - 190px) !important;
265
+ overflow-y: scroll !important;
266
+ /* overflow-y: auto !important; */
267
+ }
268
+
269
+ /* Force container to respect height limits */
270
+ .main-component{
271
+ contain: size layout;
272
+ overflow: hidden;
273
+ }
274
+
275
+
276
+ div#chatbot-row{
277
+ max-height:calc(100vh - 90px) !important;
278
+ }
279
+ /*
280
+
281
+
282
+ .max-height{
283
+ height:calc(100vh - 90px) !important;
284
+ max-height:calc(100vh - 90px) !important;
285
+ overflow-y: auto;
286
+ }
287
+ */
288
+
289
+ }
290
+
291
+ footer {
292
+ visibility: hidden;
293
+ display:none !important;
294
+ }
295
+
296
+
297
+ @media screen and (max-width: 767px) {
298
+ /* Your mobile-specific styles go here */
299
+
300
+ div#chatbot{
301
+ height:500px !important;
302
+ }
303
+
304
+ #submit-button{
305
+ padding:0px !important;
306
+ min-width: 80px;
307
+ }
308
+
309
+ /* This will hide all list items */
310
+ div.tab-nav button {
311
+ display: none !important;
312
+ }
313
+
314
+ /* This will show only the first list item */
315
+ div.tab-nav button:first-child {
316
+ display: block !important;
317
+ }
318
+
319
+ /* This will show only the first list item */
320
+ div.tab-nav button:nth-child(2) {
321
+ display: block !important;
322
+ }
323
+
324
+ #right-panel button{
325
+ display: block !important;
326
+ }
327
+
328
+ /* ... add other mobile-specific styles ... */
329
+ }
330
+
331
+ @media (prefers-color-scheme: dark) {
332
+ .card{
333
+ background-color: #374151;
334
+ }
335
+ .card-image > .card-content{
336
+ background-color: rgb(55, 65, 81) !important;
337
+ }
338
+
339
+ .card-footer {
340
+ background-color: #404652;
341
+ }
342
+
343
+ .container > .wrap{
344
+ background-color: #374151 !important;
345
+ color:white !important;
346
+ }
347
+ .card-content h2{
348
+ color:#e7754f !important;
349
+ }
350
+ .doc-ref sup{
351
+ color:rgb(235 109 35)!important;
352
+ /* margin-right:1px; */
353
+ }
354
+ .card-footer span {
355
+ color:white !important;
356
+ }
357
+
358
+ }
359
+
360
+
361
+ .doc-ref{
362
+ color:#dc2626!important;
363
+ margin-right:1px;
364
+ }
365
+
366
+ .tabitem{
367
+ border:none !important;
368
+ }
369
+
370
+ .other-tabs > div{
371
+ padding-left:40px;
372
+ padding-right:40px;
373
+ padding-top:10px;
374
+ }
375
+
376
+ .gallery-item > div{
377
+ white-space: normal !important; /* Allow the text to wrap */
378
+ word-break: break-word !important; /* Break words to prevent overflow */
379
+ overflow-wrap: break-word !important; /* Break long words if necessary */
380
+ }
381
+
382
+ span.chatbot > p > img{
383
+ margin-top:40px !important;
384
+ max-height: none !important;
385
+ max-width: 80% !important;
386
+ border-radius:0px !important;
387
+ }
388
+
389
+
390
+ .chatbot-caption{
391
+ font-size:11px;
392
+ font-style:italic;
393
+ color:#508094;
394
+ }
395
+
396
+ .ai-generated{
397
+ font-size:11px!important;
398
+ font-style:italic;
399
+ color:#73b8d4 !important;
400
+ }
401
+
402
+ .card-image > .card-content{
403
+ background-color:#f1f7fa;
404
+ }
405
+
406
+
407
+
408
+ .tab-nav > button.selected{
409
+ color:#4b8ec3;
410
+ font-weight:bold;
411
+ border:none;
412
+ }
413
+
414
+ .tab-nav{
415
+ border:none !important;
416
+ }
417
+
418
+ #input-textbox > label > textarea{
419
+ border-radius:40px;
420
+ padding-left:30px;
421
+ resize:none;
422
+ }
423
+
424
+ #input-message > div{
425
+ border:none;
426
+ }
427
+
428
+ #dropdown-samples{
429
+ /*! border:none !important; */
430
+ /*! border-width:0px !important; */
431
+ background:none !important;
432
+
433
+ }
434
+
435
+ #dropdown-samples > .container > .wrap{
436
+ background-color:white;
437
+ }
438
+
439
+
440
+ #tab-examples > div > .form{
441
+ border:none;
442
+ background:none !important;
443
+ }
444
+
445
+ .a-doc-ref{
446
+ text-decoration: none !important;
447
+ }
448
+
449
+
450
+ .dropdown {
451
+ position: relative;
452
+ display:inline-block;
453
+ margin-bottom: 10px;
454
+ }
455
+
456
+ .dropdown-toggle {
457
+ background-color: #f2f2f2;
458
+ color: black;
459
+ padding: 10px;
460
+ font-size: 16px;
461
+ cursor: pointer;
462
+ display: block;
463
+ width: 400px; /* Adjust width as needed */
464
+ position: relative;
465
+ display: flex;
466
+ align-items: center; /* Vertically center the contents */
467
+ justify-content: left;
468
+ }
469
+
470
+ .dropdown-toggle .caret {
471
+ content: "";
472
+ position: absolute;
473
+ right: 10px;
474
+ top: 50%;
475
+ border-left: 5px solid transparent;
476
+ border-right: 5px solid transparent;
477
+ border-top: 5px solid black;
478
+ transform: translateY(-50%);
479
+ }
480
+
481
+ input[type="checkbox"] {
482
+ display: none !important;
483
+ }
484
+
485
+ input[type="checkbox"]:checked + .dropdown-content {
486
+ display: block;
487
+ }
488
+
489
+ .dropdown-content {
490
+ display: none;
491
+ position: absolute;
492
+ background-color: #f9f9f9;
493
+ min-width: 300px;
494
+ box-shadow: 0 8px 16px 0 rgba(0,0,0,0.2);
495
+ z-index: 1;
496
+ padding: 12px;
497
+ border: 1px solid #ccc;
498
+ }
499
+
500
+ input[type="checkbox"]:checked + .dropdown-toggle + .dropdown-content {
501
+ display: block;
502
+ }
503
+
504
+ input[type="checkbox"]:checked + .dropdown-toggle .caret {
505
+ border-top: 0;
506
+ border-bottom: 5px solid black;
507
+ }
508
+
509
+ .loader {
510
+ border: 1px solid #d0d0d0 !important; /* Light grey background */
511
+ border-top: 1px solid #db3434 !important; /* Blue color */
512
+ border-right: 1px solid #3498db !important; /* Blue color */
513
+ border-radius: 50%;
514
+ width: 20px;
515
+ height: 20px;
516
+ animation: spin 2s linear infinite;
517
+ display:inline-block;
518
+ margin-right:10px !important;
519
+ }
520
+
521
+ .checkmark{
522
+ color:green !important;
523
+ font-size:18px;
524
+ margin-right:10px !important;
525
+ }
526
+
527
+ @keyframes spin {
528
+ 0% { transform: rotate(0deg); }
529
+ 100% { transform: rotate(360deg); }
530
+ }
531
+
532
+
533
+ .relevancy-score{
534
+ margin-top:10px !important;
535
+ font-size:10px !important;
536
+ font-style:italic;
537
+ }
538
+
539
+ .score-green{
540
+ color:green !important;
541
+ }
542
+
543
+ .score-orange{
544
+ color:orange !important;
545
+ }
546
+
547
+ .score-orange{
548
+ color:red !important;
549
+ }
550
+
551
+ /* Additional style for scrollable tab content */
552
+ div#tab-recommended_content {
553
+ overflow-y: auto; /* Enable vertical scrolling */
554
+ max-height: 80vh; /* Adjust height as needed */
555
+ }
556
+
557
+ /* Mobile specific adjustments */
558
+ @media screen and (max-width: 767px) {
559
+ div#tab-recommended_content {
560
+ max-height: 50vh; /* Reduce height for smaller screens */
561
+ overflow-y: auto;
562
+ }
563
+ }
564
+
565
+ /* Additional style for scrollable tab content */
566
+ div#tab-saved-graphs {
567
+ overflow-y: auto; /* Enable vertical scrolling */
568
+ max-height: 80vh; /* Adjust height as needed */
569
+ }
570
+
571
+ /* Mobile specific adjustments */
572
+ @media screen and (max-width: 767px) {
573
+ div#tab-saved-graphs {
574
+ max-height: 50vh; /* Reduce height for smaller screens */
575
+ overflow-y: auto;
576
+ }
577
+ }
578
+ .message-buttons-left.panel.message-buttons.with-avatar {
579
+ display: none;
580
+ }
581
+ .score-red{
582
+ color:red !important;
583
+ }
584
+ .message-buttons-left.panel.message-buttons.with-avatar {
585
+ display: none;
586
+ }
587
+
588
+ /* Specific fixes for Hugging Face Space iframe */
589
+ .h-full {
590
+ height: auto !important;
591
+ min-height: 0 !important;
592
+ }
593
+
594
+ .space-content {
595
+ height: auto !important;
596
+ max-height: 100vh !important;
597
+ overflow: hidden;
598
+ }
utils.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_openai import ChatOpenAI
2
+ from langchain.prompts.prompt import PromptTemplate
3
+ from typing import Tuple, List
4
+ from langchain.schema import format_document
5
+
6
+
7
+ import gradio as gr
8
+
9
+ from langchain.chat_models import ChatOpenAI
10
+ import os
11
+ from langchain_openai import ChatOpenAI
12
+ import os
13
+
14
+ DEFAULT_DOCUMENT_PROMPT = PromptTemplate.from_template(template="{page_content}")
15
+
16
+
17
+ def make_pairs(lst):
18
+ """from a list of even lenght, make tupple pairs"""
19
+ return [(lst[i], lst[i + 1]) for i in range(0, len(lst), 2)]
20
+
21
+ def reset_textbox():
22
+ return gr.update(value="")
23
+
24
+ def _combine_documents(
25
+ docs, document_prompt=DEFAULT_DOCUMENT_PROMPT, document_separator="\n\n"
26
+ ):
27
+ doc_strings = [f"Document {i}: \n'''\n{format_document(doc, document_prompt)}\n'''" for i, doc in enumerate(docs, 1)]
28
+ return document_separator.join(doc_strings)
29
+
30
+
31
+ def _format_chat_history(chat_history: List[Tuple]) -> str:
32
+ buffer = ""
33
+ for dialogue_turn in chat_history:
34
+ human = "Human: " + dialogue_turn[0]
35
+ ai = "Assistant: " + dialogue_turn[1]
36
+ buffer += "\n" + "\n".join([human, ai])
37
+ return buffer
38
+
39
+ def _format_chat_history(chat_history: List[Tuple]) -> str:
40
+ turn = 1
41
+ buffer = []
42
+ for dialogue in chat_history:
43
+ buffer.append(("Human: " if turn else "Assistant: ") + dialogue.content)
44
+ turn ^= 1
45
+ return "\n".join(buffer) + "\n"
46
+
47
+ def get_llm(model="gpt-4o-mini",max_tokens=1024, temperature=0.0, streaming=True,timeout=30, **kwargs):
48
+
49
+ llm = ChatOpenAI(
50
+ model=model,
51
+ api_key=os.environ.get("OPENAI_API_KEY", None),
52
+ max_tokens = max_tokens,
53
+ streaming = streaming,
54
+ temperature=temperature,
55
+ timeout = timeout,
56
+ **kwargs,
57
+ )
58
+
59
+ return llm
60
+
61
+
62
+
63
+ def make_html_source(source,i):
64
+ meta = source.metadata
65
+ # content = source.page_content.split(":",1)[1].strip()
66
+ content = source.page_content.strip()
67
+
68
+
69
+ card = f"""
70
+ <div class="card" id="doc{i}">
71
+ <div class="card-content">
72
+ <h2>Document {i} - Meeting {meta["meeting_number"]} - title {meta['Title']} - Issues {meta['Issues']}</h2>
73
+ <p>{content}</p>
74
+ </div>
75
+
76
+ </div>
77
+ """
78
+ return card
79
+
80
+
81
+