Spaces:
Sleeping
Sleeping
IliaLarchenko
commited on
Commit
•
b47e010
1
Parent(s):
403487b
Fixed empty reply bug
Browse files- api/llm.py +2 -0
- tests/candidate.py +2 -2
- tests/test_e2e.py +1 -1
api/llm.py
CHANGED
@@ -141,6 +141,8 @@ class LLMManager:
|
|
141 |
for m in split_messages:
|
142 |
if m.strip():
|
143 |
chat_display.append([None, m])
|
|
|
|
|
144 |
|
145 |
yield chat_history, chat_display, code
|
146 |
|
|
|
141 |
for m in split_messages:
|
142 |
if m.strip():
|
143 |
chat_display.append([None, m])
|
144 |
+
if len(chat_display) == original_len:
|
145 |
+
chat_display.append([None, ""])
|
146 |
|
147 |
yield chat_history, chat_display, code
|
148 |
|
tests/candidate.py
CHANGED
@@ -95,9 +95,9 @@ def complete_interview(
|
|
95 |
response_content = "".join(random.choices(string.ascii_letters + string.digits, k=50))
|
96 |
elif mode == "repeat":
|
97 |
response_content = chat_display[-1][1]
|
98 |
-
else:
|
99 |
response = client.chat.completions.create(
|
100 |
-
model=model, messages=messages_candidate, temperature=1, response_format={"type": "json_object"}
|
101 |
)
|
102 |
response_json = json.loads(response.choices[0].message.content)
|
103 |
response_content = response_json.get("message", "")
|
|
|
95 |
response_content = "".join(random.choices(string.ascii_letters + string.digits, k=50))
|
96 |
elif mode == "repeat":
|
97 |
response_content = chat_display[-1][1]
|
98 |
+
else:
|
99 |
response = client.chat.completions.create(
|
100 |
+
model=model, messages=messages_candidate, temperature=1, response_format={"type": "json_object"}, stream=False
|
101 |
)
|
102 |
response_json = json.loads(response.choices[0].message.content)
|
103 |
response_content = response_json.get("message", "")
|
tests/test_e2e.py
CHANGED
@@ -26,7 +26,7 @@ def test_complete_interview() -> None:
|
|
26 |
interview_types = ["ml_design", "math", "ml_theory", "system_design", "sql", "coding"]
|
27 |
scores: List[float] = []
|
28 |
|
29 |
-
with ThreadPoolExecutor(max_workers=
|
30 |
# Test normal interviews
|
31 |
futures = [executor.submit(complete_and_grade_interview, it) for it in interview_types]
|
32 |
|
|
|
26 |
interview_types = ["ml_design", "math", "ml_theory", "system_design", "sql", "coding"]
|
27 |
scores: List[float] = []
|
28 |
|
29 |
+
with ThreadPoolExecutor(max_workers=2) as executor:
|
30 |
# Test normal interviews
|
31 |
futures = [executor.submit(complete_and_grade_interview, it) for it in interview_types]
|
32 |
|