Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Update app.py
Browse files
app.py
CHANGED
@@ -147,6 +147,42 @@ def generate_filename(prompt, file_type):
|
|
147 |
return f"{safe_date_time}_{safe_prompt}.{file_type}"
|
148 |
|
149 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
150 |
|
151 |
def process_text(text_input):
|
152 |
if text_input:
|
@@ -168,7 +204,9 @@ def process_text(text_input):
|
|
168 |
return_text = completion.choices[0].message.content
|
169 |
st.write("Assistant: " + return_text)
|
170 |
filename = generate_filename(text_input, "md")
|
171 |
-
|
|
|
|
|
172 |
st.session_state.messages.append({"role": "assistant", "content": return_text})
|
173 |
|
174 |
#st.write("Assistant: " + completion.choices[0].message.content)
|
@@ -816,7 +854,10 @@ def process_text2(MODEL='gpt-4o-2024-05-13', text_input='What is 2+2 and what is
|
|
816 |
return_text = completion.choices[0].message.content
|
817 |
st.write("Assistant: " + return_text)
|
818 |
filename = generate_filename(text_input, "md")
|
819 |
-
|
|
|
|
|
|
|
820 |
return return_text
|
821 |
|
822 |
@st.cache_resource
|
@@ -1599,7 +1640,10 @@ def process_user_input(user_question):
|
|
1599 |
filename = generate_filename(user_question, 'txt')
|
1600 |
response = message.content
|
1601 |
user_prompt = user_question
|
1602 |
-
|
|
|
|
|
|
|
1603 |
|
1604 |
def divide_prompt(prompt, max_length):
|
1605 |
words = prompt.split()
|
@@ -1794,13 +1838,6 @@ if example_input:
|
|
1794 |
for example_input in session_state["search_queries"]:
|
1795 |
st.write(example_input)
|
1796 |
|
1797 |
-
#if st.button("Run Prompt", help="Click to run."):
|
1798 |
-
# try:
|
1799 |
-
# response=StreamLLMChatResponse(example_input)
|
1800 |
-
# create_file(filename, example_input, response, should_save)
|
1801 |
-
# except:
|
1802 |
-
# st.write('model is asleep. Starting now on A10 GPU. Please wait one minute then retry. KEDA triggered.')
|
1803 |
-
|
1804 |
openai.api_key = os.getenv('OPENAI_API_KEY')
|
1805 |
if openai.api_key == None: openai.api_key = st.secrets['OPENAI_API_KEY']
|
1806 |
menu = ["txt", "htm", "xlsx", "csv", "md", "py"]
|
|
|
147 |
return f"{safe_date_time}_{safe_prompt}.{file_type}"
|
148 |
|
149 |
|
150 |
+
def create_and_save_file(content, file_type="md", prompt=None, is_image=False, should_save=True):
|
151 |
+
"""
|
152 |
+
Combines file name generation and file creation into one function.
|
153 |
+
If the file is a markdown file, extracts the title from the content (if available) and uses it for the filename.
|
154 |
+
"""
|
155 |
+
if not should_save:
|
156 |
+
return None
|
157 |
+
|
158 |
+
# Step 1: Generate filename
|
159 |
+
filename = generate_filename(prompt if prompt else content, file_type)
|
160 |
+
|
161 |
+
# Step 2: If it's a markdown file, check if it has a title (e.g., # Heading in markdown)
|
162 |
+
if file_type == "md":
|
163 |
+
title_from_content = extract_markdown_title(content)
|
164 |
+
if title_from_content:
|
165 |
+
filename = generate_filename(title_from_content, file_type)
|
166 |
+
|
167 |
+
# Step 3: Save file
|
168 |
+
with open(filename, "w", encoding="utf-8") as f:
|
169 |
+
if is_image:
|
170 |
+
f.write(content)
|
171 |
+
else:
|
172 |
+
f.write(prompt + "\n\n" + content)
|
173 |
+
|
174 |
+
return filename
|
175 |
+
|
176 |
+
|
177 |
+
def extract_markdown_title(content):
|
178 |
+
"""
|
179 |
+
Extract the first markdown title (line starting with '#') from the content.
|
180 |
+
"""
|
181 |
+
# Use regex to find the first line that starts with '#'
|
182 |
+
title_match = re.search(r'^\s*#\s*(.+)', content, re.MULTILINE)
|
183 |
+
if title_match:
|
184 |
+
return title_match.group(1).strip()
|
185 |
+
return None
|
186 |
|
187 |
def process_text(text_input):
|
188 |
if text_input:
|
|
|
204 |
return_text = completion.choices[0].message.content
|
205 |
st.write("Assistant: " + return_text)
|
206 |
filename = generate_filename(text_input, "md")
|
207 |
+
|
208 |
+
create_and_save_file(return_text, file_type="md", prompt=text_input, is_image=False, should_save=True)
|
209 |
+
#create_file(filename, text_input, return_text, should_save)
|
210 |
st.session_state.messages.append({"role": "assistant", "content": return_text})
|
211 |
|
212 |
#st.write("Assistant: " + completion.choices[0].message.content)
|
|
|
854 |
return_text = completion.choices[0].message.content
|
855 |
st.write("Assistant: " + return_text)
|
856 |
filename = generate_filename(text_input, "md")
|
857 |
+
|
858 |
+
create_and_save_file(return_text, file_type="md", prompt=text_input, is_image=False, should_save=True) # the new
|
859 |
+
|
860 |
+
#create_file(filename, text_input, return_text, should_save)
|
861 |
return return_text
|
862 |
|
863 |
@st.cache_resource
|
|
|
1640 |
filename = generate_filename(user_question, 'txt')
|
1641 |
response = message.content
|
1642 |
user_prompt = user_question
|
1643 |
+
|
1644 |
+
create_and_save_file(response, file_type="md", prompt=user_prompt, is_image=False, should_save=True) # the new
|
1645 |
+
|
1646 |
+
#create_file(filename, user_prompt, response, should_save)
|
1647 |
|
1648 |
def divide_prompt(prompt, max_length):
|
1649 |
words = prompt.split()
|
|
|
1838 |
for example_input in session_state["search_queries"]:
|
1839 |
st.write(example_input)
|
1840 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1841 |
openai.api_key = os.getenv('OPENAI_API_KEY')
|
1842 |
if openai.api_key == None: openai.api_key = st.secrets['OPENAI_API_KEY']
|
1843 |
menu = ["txt", "htm", "xlsx", "csv", "md", "py"]
|