Spaces:
Runtime error
Runtime error
ffreemt
commited on
Commit
•
d431ce1
1
Parent(s):
23fc95a
Update predict_str is boot is None: bot = []
Browse files- .flake8 → .flake8- +0 -0
- .ruff.toml +13 -0
- app.py +8 -4
.flake8 → .flake8-
RENAMED
File without changes
|
.ruff.toml
CHANGED
@@ -2,3 +2,16 @@
|
|
2 |
target-version = "py310"
|
3 |
# Decrease the maximum line length to 79 characters.
|
4 |
line-length = 300
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
target-version = "py310"
|
3 |
# Decrease the maximum line length to 79 characters.
|
4 |
line-length = 300
|
5 |
+
|
6 |
+
# pyflakes, pycodestyle, isort
|
7 |
+
# flake8 YTT, pydocstyle D, pylint PLC
|
8 |
+
select = ["F", "E", "W", "I001", "YTT", "D", "PLC"]
|
9 |
+
# select = ["ALL"]
|
10 |
+
|
11 |
+
# D103 Missing docstring in public function
|
12 |
+
# D101 Missing docstring in public class
|
13 |
+
# `multi-line-summary-first-line` (D212)
|
14 |
+
# `one-blank-line-before-class` (D203)
|
15 |
+
extend-ignore = ["D103", "D101", "D212", "D203"]
|
16 |
+
|
17 |
+
exclude = [".venv"]
|
app.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
"""Run codes"""
|
2 |
# pylint: disable=line-too-long, broad-exception-caught, invalid-name, missing-function-docstring, too-many-instance-attributes, missing-class-docstring
|
3 |
# ruff: noqa: E501
|
4 |
import os
|
@@ -35,6 +35,10 @@ assistant_prefix = "[assistant]: "
|
|
35 |
|
36 |
def predict_str(prompt, bot): # bot is in fact bot_history
|
37 |
# logger.debug(f"{prompt=}, {bot=}, {timeout=}")
|
|
|
|
|
|
|
|
|
38 |
logger.debug(f"{prompt=}, {bot=}")
|
39 |
|
40 |
try:
|
@@ -150,7 +154,7 @@ def predict_api(prompt):
|
|
150 |
stop=["<|im_end|>", "|<"],
|
151 |
)
|
152 |
|
153 |
-
# TODO stream does not make sense in api?
|
154 |
generator = generate(
|
155 |
LLM, _, system_prompt=default_system_prompt, user_prompt=prompt.strip()
|
156 |
)
|
@@ -215,7 +219,7 @@ def generate(
|
|
215 |
system_prompt: str = default_system_prompt,
|
216 |
user_prompt: str = "",
|
217 |
):
|
218 |
-
"""Run model inference, will return a Generator if streaming is true"""
|
219 |
# if not user_prompt.strip():
|
220 |
return llm(
|
221 |
format_prompt(
|
@@ -443,7 +447,7 @@ with gr.Blocks(
|
|
443 |
fn=predict_str,
|
444 |
inputs=[msg, chatbot],
|
445 |
outputs=[msg, chatbot],
|
446 |
-
|
447 |
show_progress="full",
|
448 |
api_name="predict",
|
449 |
).then(bot_str, chatbot, chatbot)
|
|
|
1 |
+
"""Run codes."""
|
2 |
# pylint: disable=line-too-long, broad-exception-caught, invalid-name, missing-function-docstring, too-many-instance-attributes, missing-class-docstring
|
3 |
# ruff: noqa: E501
|
4 |
import os
|
|
|
35 |
|
36 |
def predict_str(prompt, bot): # bot is in fact bot_history
|
37 |
# logger.debug(f"{prompt=}, {bot=}, {timeout=}")
|
38 |
+
|
39 |
+
if bot is None:
|
40 |
+
bot = []
|
41 |
+
|
42 |
logger.debug(f"{prompt=}, {bot=}")
|
43 |
|
44 |
try:
|
|
|
154 |
stop=["<|im_end|>", "|<"],
|
155 |
)
|
156 |
|
157 |
+
# TODO: stream does not make sense in api?
|
158 |
generator = generate(
|
159 |
LLM, _, system_prompt=default_system_prompt, user_prompt=prompt.strip()
|
160 |
)
|
|
|
219 |
system_prompt: str = default_system_prompt,
|
220 |
user_prompt: str = "",
|
221 |
):
|
222 |
+
"""Run model inference, will return a Generator if streaming is true."""
|
223 |
# if not user_prompt.strip():
|
224 |
return llm(
|
225 |
format_prompt(
|
|
|
447 |
fn=predict_str,
|
448 |
inputs=[msg, chatbot],
|
449 |
outputs=[msg, chatbot],
|
450 |
+
queue=True,
|
451 |
show_progress="full",
|
452 |
api_name="predict",
|
453 |
).then(bot_str, chatbot, chatbot)
|