Rahatara commited on
Commit
7279d6d
1 Parent(s): 6062c05

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +136 -371
app.py CHANGED
@@ -1,388 +1,153 @@
 
 
1
  import gradio as gr
2
- import os
3
- from langchain_community.document_loaders import PyPDFLoader
4
- from langchain.text_splitter import RecursiveCharacterTextSplitter
5
  from langchain_community.vectorstores import Chroma
6
- from langchain.chains import ConversationalRetrievalChain
7
- from langchain_community.embeddings import HuggingFaceEmbeddings
8
- from langchain_community.llms import HuggingFacePipeline
9
- from langchain.chains import ConversationChain
10
- from langchain.memory import ConversationBufferMemory
11
- from langchain_community.llms import HuggingFaceEndpoint
12
- from pathlib import Path
13
- import chromadb
14
- from unidecode import unidecode
15
- from transformers import AutoTokenizer
16
- import transformers
17
- import torch
18
- import tqdm
19
- import accelerate
20
- import re
21
- from huggingface_hub import InferenceClient
22
- from huggingface_hub import login, HfApi
23
-
24
-
25
-
26
- # Authenticate using the Hugging Face token stored as a secret
27
- def authenticate_hf():
28
- token = os.getenv("HF")
29
- if not token:
30
- raise ValueError("Hugging Face token not found in environment variables.")
31
-
32
- try:
33
- login(token=token)
34
- api = HfApi()
35
- user_info = api.whoami(token=token)
36
- print(f"Login successful. User info: {user_info}")
37
- except Exception as e:
38
- raise ValueError(f"Error during login: {e}")
39
-
40
- # Authenticate at the start of the script
41
- authenticate_hf()
42
-
43
-
44
-
45
 
46
- # default_persist_directory = './chroma_HF/'
47
- list_llm = ["mistralai/Mistral-7B-Instruct-v0.2", "mistralai/Mixtral-8x7B-Instruct-v0.1", "mistralai/Mistral-7B-Instruct-v0.1", \
48
- "google/gemma-7b-it","google/gemma-2b-it", \
49
- "HuggingFaceH4/zephyr-7b-beta", "HuggingFaceH4/zephyr-7b-gemma-v0.1", \
50
- "meta-llama/Llama-2-7b-chat-hf", "microsoft/phi-2", \
51
- "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "mosaicml/mpt-7b-instruct", "tiiuae/falcon-7b-instruct", \
52
- "google/flan-t5-xxl"
53
- ]
54
- list_llm_simple = [os.path.basename(llm) for llm in list_llm]
55
-
56
- # Load PDF document and create doc splits
57
- def load_doc(list_file_path, chunk_size, chunk_overlap):
58
- # Processing for one document only
59
- # loader = PyPDFLoader(file_path)
60
- # pages = loader.load()
61
- loaders = [PyPDFLoader(x) for x in list_file_path]
62
- pages = []
63
- for loader in loaders:
64
- pages.extend(loader.load())
65
- # text_splitter = RecursiveCharacterTextSplitter(chunk_size = 600, chunk_overlap = 50)
66
- text_splitter = RecursiveCharacterTextSplitter(
67
- chunk_size = chunk_size,
68
- chunk_overlap = chunk_overlap)
69
- doc_splits = text_splitter.split_documents(pages)
70
- return doc_splits
71
-
72
-
73
- # Create vector database
74
- def create_db(splits, collection_name):
75
- embedding = HuggingFaceEmbeddings()
76
- new_client = chromadb.EphemeralClient()
77
- vectordb = Chroma.from_documents(
78
- documents=splits,
79
- embedding=embedding,
80
- client=new_client,
81
- collection_name=collection_name,
82
- # persist_directory=default_persist_directory
83
- )
84
- return vectordb
85
-
86
-
87
- # Load vector database
88
- def load_db():
89
- embedding = HuggingFaceEmbeddings()
90
- vectordb = Chroma(
91
- # persist_directory=default_persist_directory,
92
- embedding_function=embedding)
93
- return vectordb
94
 
 
95
 
96
- # Initialize langchain LLM chain
97
- def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
98
- progress(0.1, desc="Initializing HF tokenizer...")
99
- # HuggingFacePipeline uses local model
100
- # Note: it will download model locally...
101
- # tokenizer=AutoTokenizer.from_pretrained(llm_model)
102
- # progress(0.5, desc="Initializing HF pipeline...")
103
- # pipeline=transformers.pipeline(
104
- # "text-generation",
105
- # model=llm_model,
106
- # tokenizer=tokenizer,
107
- # torch_dtype=torch.bfloat16,
108
- # trust_remote_code=True,
109
- # device_map="auto",
110
- # # max_length=1024,
111
- # max_new_tokens=max_tokens,
112
- # do_sample=True,
113
- # top_k=top_k,
114
- # num_return_sequences=1,
115
- # eos_token_id=tokenizer.eos_token_id
116
- # )
117
- # llm = HuggingFacePipeline(pipeline=pipeline, model_kwargs={'temperature': temperature})
118
-
119
- # HuggingFaceHub uses HF inference endpoints
120
- progress(0.5, desc="Initializing HF Hub...")
121
- # Use of trust_remote_code as model_kwargs
122
- # Warning: langchain issue
123
- # URL: https://github.com/langchain-ai/langchain/issues/6080
124
- if llm_model == "mistralai/Mixtral-8x7B-Instruct-v0.1":
125
- llm = HuggingFaceEndpoint(
126
- repo_id=llm_model,
127
- # model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k, "load_in_8bit": True}
128
- temperature = temperature,
129
- max_new_tokens = max_tokens,
130
- top_k = top_k,
131
- load_in_8bit = True,
132
- )
133
- elif llm_model in ["HuggingFaceH4/zephyr-7b-gemma-v0.1","mosaicml/mpt-7b-instruct"]:
134
- raise gr.Error("LLM model is too large to be loaded automatically on free inference endpoint")
135
- llm = HuggingFaceEndpoint(
136
- repo_id=llm_model,
137
- temperature = temperature,
138
- max_new_tokens = max_tokens,
139
- top_k = top_k,
140
- )
141
- elif llm_model == "microsoft/phi-2":
142
- # raise gr.Error("phi-2 model requires 'trust_remote_code=True', currently not supported by langchain HuggingFaceHub...")
143
- llm = HuggingFaceEndpoint(
144
- repo_id=llm_model,
145
- # model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k, "trust_remote_code": True, "torch_dtype": "auto"}
146
- temperature = temperature,
147
- max_new_tokens = max_tokens,
148
- top_k = top_k,
149
- trust_remote_code = True,
150
- torch_dtype = "auto",
151
- )
152
- elif llm_model == "TinyLlama/TinyLlama-1.1B-Chat-v1.0":
153
- llm = HuggingFaceEndpoint(
154
- repo_id=llm_model,
155
- # model_kwargs={"temperature": temperature, "max_new_tokens": 250, "top_k": top_k}
156
- temperature = temperature,
157
- max_new_tokens = 250,
158
- top_k = top_k,
159
- )
160
- elif llm_model == "meta-llama/Llama-2-7b-chat-hf":
161
- raise gr.Error("Llama-2-7b-chat-hf model requires a Pro subscription...")
162
- llm = HuggingFaceEndpoint(
163
- repo_id=llm_model,
164
- # model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k}
165
- temperature = temperature,
166
- max_new_tokens = max_tokens,
167
- top_k = top_k,
168
  )
169
- else:
170
- llm = HuggingFaceEndpoint(
171
- repo_id=llm_model,
172
- # model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k, "trust_remote_code": True, "torch_dtype": "auto"}
173
- # model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k}
174
- temperature = temperature,
175
- max_new_tokens = max_tokens,
176
- top_k = top_k,
177
  )
178
-
179
- progress(0.75, desc="Defining buffer memory...")
180
- memory = ConversationBufferMemory(
181
- memory_key="chat_history",
182
- output_key='answer',
183
- return_messages=True
184
- )
185
- # retriever=vector_db.as_retriever(search_type="similarity", search_kwargs={'k': 3})
186
- retriever=vector_db.as_retriever()
187
- progress(0.8, desc="Defining retrieval chain...")
188
- qa_chain = ConversationalRetrievalChain.from_llm(
189
- llm,
190
- retriever=retriever,
191
- chain_type="stuff",
192
- memory=memory,
193
- # combine_docs_chain_kwargs={"prompt": your_prompt})
194
- return_source_documents=True,
195
- #return_generated_question=False,
196
- verbose=False,
197
- )
198
- progress(0.9, desc="Done!")
199
- return qa_chain
200
-
201
-
202
- # Generate collection name for vector database
203
- # - Use filepath as input, ensuring unicode text
204
- def create_collection_name(filepath):
205
- # Extract filename without extension
206
- collection_name = Path(filepath).stem
207
- # Fix potential issues from naming convention
208
- ## Remove space
209
- collection_name = collection_name.replace(" ","-")
210
- ## ASCII transliterations of Unicode text
211
- collection_name = unidecode(collection_name)
212
- ## Remove special characters
213
- #collection_name = re.findall("[\dA-Za-z]*", collection_name)[0]
214
- collection_name = re.sub('[^A-Za-z0-9]+', '-', collection_name)
215
- ## Limit length to 50 characters
216
- collection_name = collection_name[:50]
217
- ## Minimum length of 3 characters
218
- if len(collection_name) < 3:
219
- collection_name = collection_name + 'xyz'
220
- ## Enforce start and end as alphanumeric character
221
- if not collection_name[0].isalnum():
222
- collection_name = 'A' + collection_name[1:]
223
- if not collection_name[-1].isalnum():
224
- collection_name = collection_name[:-1] + 'Z'
225
- print('Filepath: ', filepath)
226
- print('Collection name: ', collection_name)
227
- return collection_name
228
-
229
-
230
- # Initialize database
231
- def initialize_database(list_file_obj, chunk_size, chunk_overlap, progress=gr.Progress()):
232
- # Create list of documents (when valid)
233
- list_file_path = [x.name for x in list_file_obj if x is not None]
234
- # Create collection_name for vector database
235
- progress(0.1, desc="Creating collection name...")
236
- collection_name = create_collection_name(list_file_path[0])
237
- progress(0.25, desc="Loading document...")
238
- # Load document and create splits
239
- doc_splits = load_doc(list_file_path, chunk_size, chunk_overlap)
240
- # Create or load vector database
241
- progress(0.5, desc="Generating vector database...")
242
- # global vector_db
243
- vector_db = create_db(doc_splits, collection_name)
244
- progress(0.9, desc="Done!")
245
- return vector_db, collection_name, "Complete!"
246
-
247
-
248
- def initialize_LLM(llm_option, llm_temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
249
- # print("llm_option",llm_option)
250
- llm_name = list_llm[llm_option]
251
- print("llm_name: ",llm_name)
252
- qa_chain = initialize_llmchain(llm_name, llm_temperature, max_tokens, top_k, vector_db, progress)
253
- return qa_chain, "Complete!"
254
-
255
 
256
- def format_chat_history(message, chat_history):
257
- formatted_chat_history = []
258
- for user_message, bot_message in chat_history:
259
- formatted_chat_history.append(f"User: {user_message}")
260
- formatted_chat_history.append(f"Assistant: {bot_message}")
261
- return formatted_chat_history
262
-
263
 
264
- def conversation(qa_chain, message, history):
265
- formatted_chat_history = format_chat_history(message, history)
266
- #print("formatted_chat_history",formatted_chat_history)
267
-
268
- # Generate response using QA chain
269
- response = qa_chain({"question": message, "chat_history": formatted_chat_history})
270
- response_answer = response["answer"]
271
- if response_answer.find("Helpful Answer:") != -1:
272
- response_answer = response_answer.split("Helpful Answer:")[-1]
273
- response_sources = response["source_documents"]
274
- response_source1 = response_sources[0].page_content.strip()
275
- response_source2 = response_sources[1].page_content.strip()
276
- response_source3 = response_sources[2].page_content.strip()
277
- # Langchain sources are zero-based
278
- response_source1_page = response_sources[0].metadata["page"] + 1
279
- response_source2_page = response_sources[1].metadata["page"] + 1
280
- response_source3_page = response_sources[2].metadata["page"] + 1
281
- # print ('chat response: ', response_answer)
282
- # print('DB source', response_sources)
283
-
284
- # Append user message and response to chat history
285
- new_history = history + [(message, response_answer)]
286
- # return gr.update(value=""), new_history, response_sources[0], response_sources[1]
287
- return qa_chain, gr.update(value=""), new_history, response_source1, response_source1_page, response_source2, response_source2_page, response_source3, response_source3_page
288
-
289
-
290
- def upload_file(file_obj):
291
- list_file_path = []
292
- for idx, file in enumerate(file_obj):
293
- file_path = file_obj.name
294
- list_file_path.append(file_path)
295
- # print(file_path)
296
- # initialize_database(file_path, progress)
297
- return list_file_path
298
-
299
-
300
- def demo():
301
- with gr.Blocks(theme="base") as demo:
302
- vector_db = gr.State()
303
- qa_chain = gr.State()
304
- collection_name = gr.State()
305
-
306
-
307
- with gr.Tab("Step 1 - Upload PDF"):
308
- with gr.Row():
309
- document = gr.Files(height=100, file_count="multiple", file_types=["pdf"], interactive=True, label="Upload your PDF documents (single or multiple)")
310
- # upload_btn = gr.UploadButton("Loading document...", height=100, file_count="multiple", file_types=["pdf"], scale=1)
311
-
312
- with gr.Tab("Step 2 - Process document"):
313
- with gr.Row():
314
- db_btn = gr.Radio(["ChromaDB"], label="Vector database type", value = "ChromaDB", type="index", info="Choose your vector database")
315
- with gr.Accordion("Advanced options - Document text splitter", open=False):
316
- with gr.Row():
317
- slider_chunk_size = gr.Slider(minimum = 100, maximum = 1000, value=600, step=20, label="Chunk size", info="Chunk size", interactive=True)
318
  with gr.Row():
319
- slider_chunk_overlap = gr.Slider(minimum = 10, maximum = 200, value=40, step=10, label="Chunk overlap", info="Chunk overlap", interactive=True)
320
- with gr.Row():
321
- db_progress = gr.Textbox(label="Vector database initialization", value="None")
322
- with gr.Row():
323
- db_btn = gr.Button("Generate vector database")
324
-
325
- with gr.Tab("Step 3 - Initialize QA chain"):
326
- with gr.Row():
327
- llm_btn = gr.Radio(list_llm_simple, \
328
- label="LLM models", value = list_llm_simple[0], type="index", info="Choose your LLM model")
329
- with gr.Accordion("Advanced options - LLM model", open=False):
330
  with gr.Row():
331
- slider_temperature = gr.Slider(minimum = 0.01, maximum = 1.0, value=0.7, step=0.1, label="Temperature", info="Model temperature", interactive=True)
 
 
 
 
 
 
 
332
  with gr.Row():
333
- slider_maxtokens = gr.Slider(minimum = 224, maximum = 4096, value=1024, step=32, label="Max Tokens", info="Model max tokens", interactive=True)
334
  with gr.Row():
335
- slider_topk = gr.Slider(minimum = 1, maximum = 10, value=3, step=1, label="top-k samples", info="Model top-k samples", interactive=True)
336
- with gr.Row():
337
- llm_progress = gr.Textbox(value="None",label="QA chain initialization")
338
- with gr.Row():
339
- qachain_btn = gr.Button("Initialize Question Answering chain")
340
 
341
- with gr.Tab("Step 4 - Chatbot"):
342
- chatbot = gr.Chatbot(height=300)
343
- with gr.Accordion("Advanced - Document references", open=False):
344
- with gr.Row():
345
- doc_source1 = gr.Textbox(label="Reference 1", lines=2, container=True, scale=20)
346
- source1_page = gr.Number(label="Page", scale=1)
347
- with gr.Row():
348
- doc_source2 = gr.Textbox(label="Reference 2", lines=2, container=True, scale=20)
349
- source2_page = gr.Number(label="Page", scale=1)
350
- with gr.Row():
351
- doc_source3 = gr.Textbox(label="Reference 3", lines=2, container=True, scale=20)
352
- source3_page = gr.Number(label="Page", scale=1)
353
- with gr.Row():
354
- msg = gr.Textbox(placeholder="Type message (e.g. 'What is this document about?')", container=True)
355
- with gr.Row():
356
- submit_btn = gr.Button("Submit message")
357
- clear_btn = gr.ClearButton([msg, chatbot], value="Clear conversation")
358
-
359
- # Preprocessing events
360
- #upload_btn.upload(upload_file, inputs=[upload_btn], outputs=[document])
361
- db_btn.click(initialize_database, \
362
- inputs=[document, slider_chunk_size, slider_chunk_overlap], \
363
- outputs=[vector_db, collection_name, db_progress])
364
- qachain_btn.click(initialize_LLM, \
365
- inputs=[llm_btn, slider_temperature, slider_maxtokens, slider_topk, vector_db], \
366
- outputs=[qa_chain, llm_progress]).then(lambda:[None,"",0,"",0,"",0], \
367
- inputs=None, \
368
- outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
369
- queue=False)
370
 
371
- # Chatbot events
372
- msg.submit(conversation, \
373
- inputs=[qa_chain, msg, chatbot], \
374
- outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
375
- queue=False)
376
- submit_btn.click(conversation, \
377
- inputs=[qa_chain, msg, chatbot], \
378
- outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
379
- queue=False)
380
- clear_btn.click(lambda:[None,"",0,"",0,"",0], \
381
- inputs=None, \
382
- outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
383
- queue=False)
384
- demo.queue().launch(debug=True)
385
 
386
 
387
- if __name__ == "__main__":
388
- demo()
 
1
+
2
+ from typing import Any
3
  import gradio as gr
4
+ from langchain_openai import OpenAIEmbeddings
 
 
5
  from langchain_community.vectorstores import Chroma
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
+ from langchain.chains import ConversationalRetrievalChain
8
+ from langchain_openai import ChatOpenAI
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
+ from langchain_community.document_loaders import PyMuPDFLoader
11
 
12
+ import fitz
13
+ from PIL import Image
14
+ import os
15
+ import re
16
+ import openai
17
+
18
+ openai.api_key = "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"
19
+
20
+
21
+ def add_text(history, text: str):
22
+ if not text:
23
+ raise gr.Error("Enter text")
24
+ history = history + [(text, "")]
25
+ return history
26
+
27
+
28
+ class MyApp:
29
+ def __init__(self) -> None:
30
+ self.OPENAI_API_KEY: str = openai.api_key
31
+ self.chain = None
32
+ self.chat_history: list = []
33
+ self.N: int = 0
34
+ self.count: int = 0
35
+
36
+ def __call__(self, file: str) -> Any:
37
+ if self.count == 0:
38
+ self.chain = self.build_chain(file)
39
+ self.count += 1
40
+ return self.chain
41
+
42
+ def process_file(self, file: str):
43
+ loader = PyMuPDFLoader(file.name)
44
+ documents = loader.load()
45
+ pattern = r"/([^/]+)$"
46
+ match = re.search(pattern, file.name)
47
+ try:
48
+ file_name = match.group(1)
49
+ except:
50
+ file_name = os.path.basename(file)
51
+
52
+ return documents, file_name
53
+
54
+ def build_chain(self, file: str):
55
+ documents, file_name = self.process_file(file)
56
+ # Load embeddings model
57
+ embeddings = OpenAIEmbeddings(openai_api_key=self.OPENAI_API_KEY)
58
+ pdfsearch = Chroma.from_documents(
59
+ documents,
60
+ embeddings,
61
+ collection_name=file_name,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
  )
63
+ chain = ConversationalRetrievalChain.from_llm(
64
+ ChatOpenAI(temperature=0.0, openai_api_key=self.OPENAI_API_KEY),
65
+ retriever=pdfsearch.as_retriever(search_kwargs={"k": 1}),
66
+ return_source_documents=True,
 
 
 
 
67
  )
68
+ return chain
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
 
 
 
 
 
 
 
 
70
 
71
+ def get_response(history, query, file):
72
+ if not file:
73
+ raise gr.Error(message="Upload a PDF")
74
+ chain = app(file)
75
+ result = chain(
76
+ {"question": query, "chat_history": app.chat_history}, return_only_outputs=True
77
+ )
78
+ app.chat_history += [(query, result["answer"])]
79
+ app.N = list(result["source_documents"][0])[1][1]["page"]
80
+ for char in result["answer"]:
81
+ history[-1][-1] += char
82
+ yield history, ""
83
+
84
+
85
+ def render_file(file):
86
+ doc = fitz.open(file.name)
87
+ page = doc[app.N]
88
+ # Render the page as a PNG image with a resolution of 150 DPI
89
+ pix = page.get_pixmap(dpi=150)
90
+ image = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
91
+ return image
92
+
93
+
94
+ def purge_chat_and_render_first(file):
95
+ print("purge_chat_and_render_first")
96
+ # Purges the previous chat session so that the bot has no concept of previous documents
97
+ app.chat_history = []
98
+ app.count = 0
99
+
100
+ # Use PyMuPDF to render the first page of the uploaded document
101
+ doc = fitz.open(file.name)
102
+ page = doc[0]
103
+ # Render the page as a PNG image with a resolution of 150 DPI
104
+ pix = page.get_pixmap(dpi=150)
105
+ image = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
106
+ return image, []
107
+
108
+
109
+ app = MyApp()
110
+
111
+ with gr.Blocks() as demo:
112
+ with gr.Column():
113
+ with gr.Row():
114
+ with gr.Column(scale=2):
 
 
 
 
 
 
 
 
 
 
115
  with gr.Row():
116
+ chatbot = gr.Chatbot(value=[], elem_id="chatbot")
 
 
 
 
 
 
 
 
 
 
117
  with gr.Row():
118
+ txt = gr.Textbox(
119
+ show_label=False,
120
+ placeholder="Enter text and press submit",
121
+ scale=2
122
+ )
123
+ submit_btn = gr.Button("Submit", scale=1)
124
+
125
+ with gr.Column(scale=1):
126
  with gr.Row():
127
+ show_img = gr.Image(label="Upload PDF")
128
  with gr.Row():
129
+ btn = gr.UploadButton("📁 Upload a PDF", file_types=[".pdf"])
 
 
 
 
130
 
131
+ btn.upload(
132
+ fn=purge_chat_and_render_first,
133
+ inputs=[btn],
134
+ outputs=[show_img, chatbot],
135
+ )
136
+
137
+ submit_btn.click(
138
+ fn=add_text,
139
+ inputs=[chatbot, txt],
140
+ outputs=[
141
+ chatbot,
142
+ ],
143
+ queue=False,
144
+ ).success(
145
+ fn=get_response, inputs=[chatbot, txt, btn], outputs=[chatbot, txt]
146
+ ).success(
147
+ fn=render_file, inputs=[btn], outputs=[show_img]
148
+ )
 
 
 
 
 
 
 
 
 
 
 
149
 
150
+ demo.queue()
151
+ demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
152
 
153