Spaces:
Sleeping
Sleeping
arabellastrange
commited on
Commit
·
39a05d7
1
Parent(s):
2417a86
more logs to prints
Browse files
app.py
CHANGED
@@ -75,9 +75,9 @@ def google_search_chat(message, history):
|
|
75 |
# f'**Is response in source documents?**: {faithfulness_str}'
|
76 |
# f'\n **Sources used:** \n {sources}')
|
77 |
|
78 |
-
|
79 |
else:
|
80 |
-
|
81 |
f'Assistant Response: Sorry, no search results found.')
|
82 |
yield "Sorry, no search results found."
|
83 |
|
@@ -98,22 +98,15 @@ if __name__ == '__main__':
|
|
98 |
logger.addHandler(filehandler) # set the new handler
|
99 |
logger.setLevel(logging.INFO)
|
100 |
|
101 |
-
logging.getLogger('selenium.webdriver.common').setLevel(logging.DEBUG)
|
102 |
-
|
103 |
api_key = os.getenv('gpt_api_key')
|
104 |
|
105 |
# GPT - 4 Turbo. The latest GPT - 4 model intended to reduce cases of “laziness” where the model doesn’t complete
|
106 |
# a task. Returns a maximum of 4,096 output tokens. Link:
|
107 |
# https://openai.com/blog/new-embedding-models-and-api-updates
|
108 |
set_llm(key=api_key, model="gpt-4-0125-preview", temperature=0)
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
# libfontconfig1=2.11.0-6.7+b1
|
113 |
-
# check_call(['apt-get', 'install', '-y', 'libglib2.0-0 libnss3 libgconf-2-4 libfontconfig1'],
|
114 |
-
# stdout=open(os.devnull, 'wb'), stderr=STDOUT)
|
115 |
-
|
116 |
-
logger.info("Launching Gradio ChatInterface for searchbot...")
|
117 |
|
118 |
demo = gr.ChatInterface(fn=google_search_chat,
|
119 |
title="Search Assistant", retry_btn=None, undo_btn=None, clear_btn=None,
|
|
|
75 |
# f'**Is response in source documents?**: {faithfulness_str}'
|
76 |
# f'\n **Sources used:** \n {sources}')
|
77 |
|
78 |
+
print(f'Assistant Response: {string_output}')
|
79 |
else:
|
80 |
+
print(
|
81 |
f'Assistant Response: Sorry, no search results found.')
|
82 |
yield "Sorry, no search results found."
|
83 |
|
|
|
98 |
logger.addHandler(filehandler) # set the new handler
|
99 |
logger.setLevel(logging.INFO)
|
100 |
|
|
|
|
|
101 |
api_key = os.getenv('gpt_api_key')
|
102 |
|
103 |
# GPT - 4 Turbo. The latest GPT - 4 model intended to reduce cases of “laziness” where the model doesn’t complete
|
104 |
# a task. Returns a maximum of 4,096 output tokens. Link:
|
105 |
# https://openai.com/blog/new-embedding-models-and-api-updates
|
106 |
set_llm(key=api_key, model="gpt-4-0125-preview", temperature=0)
|
107 |
+
|
108 |
+
|
109 |
+
print("Launching Gradio ChatInterface for searchbot...")
|
|
|
|
|
|
|
|
|
|
|
110 |
|
111 |
demo = gr.ChatInterface(fn=google_search_chat,
|
112 |
title="Search Assistant", retry_btn=None, undo_btn=None, clear_btn=None,
|