Spaces:
Running
Running
Update utils.py
Browse files
utils.py
CHANGED
@@ -222,99 +222,99 @@ def update_notebook_display(notebook_data):
|
|
222 |
def run_interactive_notebook(client, model, tokenizer, messages, sbx, max_new_tokens=512):
|
223 |
notebook_data, code_cell_counter = create_base_notebook(messages)
|
224 |
turns = 0
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
252 |
|
253 |
-
|
254 |
-
|
255 |
-
if
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
create_cell=True
|
264 |
-
code_cell = "<|python_tag|>" in tokens[0]
|
265 |
-
if code_cell:
|
266 |
-
code_cell_counter +=1
|
267 |
else:
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
notebook_data["cells"].append({
|
274 |
-
"cell_type": "code",
|
275 |
-
"execution_count": None,
|
276 |
-
"metadata": {},
|
277 |
-
"source": assistant_response,
|
278 |
-
"outputs": []
|
279 |
-
})
|
280 |
-
else:
|
281 |
-
notebook_data["cells"].append({
|
282 |
-
"cell_type": "markdown",
|
283 |
-
"metadata": {},
|
284 |
-
"source": assistant_response
|
285 |
-
})
|
286 |
-
else:
|
287 |
-
notebook_data["cells"][-1]["source"] = assistant_response
|
288 |
-
if i%16 == 0:
|
289 |
-
yield update_notebook_display(notebook_data), messages
|
290 |
-
yield update_notebook_display(notebook_data), messages
|
291 |
-
|
292 |
-
|
293 |
-
# Handle code execution
|
294 |
-
if code_cell:
|
295 |
-
notebook_data["cells"][-1]["execution_count"] = code_cell_counter
|
296 |
-
|
297 |
-
|
298 |
-
exec_result, execution = execute_code(sbx, assistant_response)
|
299 |
-
messages.append({
|
300 |
-
"role": "assistant",
|
301 |
-
"content": assistant_response,
|
302 |
-
"tool_calls": [{
|
303 |
-
"type": "function",
|
304 |
-
"function": {
|
305 |
-
"name": "code_interpreter",
|
306 |
-
"arguments": {"code": assistant_response}
|
307 |
-
}
|
308 |
-
}]
|
309 |
-
})
|
310 |
-
messages.append({"role": "ipython", "content": parse_exec_result_llm(execution), "nbformat": parse_exec_result_nb(execution)})
|
311 |
-
|
312 |
-
# Update the last code cell with execution results
|
313 |
-
notebook_data["cells"][-1]["outputs"] = parse_exec_result_nb(execution)
|
314 |
-
update_notebook_display(notebook_data)
|
315 |
else:
|
316 |
-
|
317 |
-
|
318 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
319 |
|
320 |
yield update_notebook_display(notebook_data), messages
|
|
|
222 |
def run_interactive_notebook(client, model, tokenizer, messages, sbx, max_new_tokens=512):
|
223 |
notebook_data, code_cell_counter = create_base_notebook(messages)
|
224 |
turns = 0
|
225 |
+
|
226 |
+
#code_cell_counter = 0
|
227 |
+
while turns <= MAX_TURNS:
|
228 |
+
turns += 1
|
229 |
+
input_tokens = tokenizer.apply_chat_template(
|
230 |
+
messages,
|
231 |
+
chat_template=llama_template,
|
232 |
+
builtin_tools=["code_interpreter"],
|
233 |
+
add_generation_prompt=True
|
234 |
+
)
|
235 |
+
model_input = tokenizer.decode(input_tokens)
|
236 |
+
|
237 |
+
print(f"Model input:\n{model_input}\n{'='*80}")
|
238 |
+
|
239 |
+
response_stream = client.text_generation(
|
240 |
+
model=model,
|
241 |
+
prompt=model_input,
|
242 |
+
details=True,
|
243 |
+
stream=True,
|
244 |
+
do_sample=True,
|
245 |
+
repetition_penalty=1.1,
|
246 |
+
temperature=0.8,
|
247 |
+
max_new_tokens=max_new_tokens,
|
248 |
+
)
|
249 |
+
|
250 |
+
assistant_response = ""
|
251 |
+
tokens = []
|
252 |
+
|
253 |
+
code_cell = False
|
254 |
+
for i, chunk in enumerate(response_stream):
|
255 |
+
if not chunk.token.special:
|
256 |
+
content = chunk.token.text
|
257 |
+
else:
|
258 |
+
content = ""
|
259 |
+
tokens.append(chunk.token.text)
|
260 |
+
assistant_response += content
|
261 |
+
|
262 |
+
if len(tokens)==1:
|
263 |
+
create_cell=True
|
264 |
+
code_cell = "<|python_tag|>" in tokens[0]
|
265 |
+
if code_cell:
|
266 |
+
code_cell_counter +=1
|
267 |
+
else:
|
268 |
+
create_cell = False
|
269 |
|
270 |
+
# Update notebook in real-time
|
271 |
+
if create_cell:
|
272 |
+
if "<|python_tag|>" in tokens[0]:
|
273 |
+
notebook_data["cells"].append({
|
274 |
+
"cell_type": "code",
|
275 |
+
"execution_count": None,
|
276 |
+
"metadata": {},
|
277 |
+
"source": assistant_response,
|
278 |
+
"outputs": []
|
279 |
+
})
|
|
|
|
|
|
|
|
|
280 |
else:
|
281 |
+
notebook_data["cells"].append({
|
282 |
+
"cell_type": "markdown",
|
283 |
+
"metadata": {},
|
284 |
+
"source": assistant_response
|
285 |
+
})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
286 |
else:
|
287 |
+
notebook_data["cells"][-1]["source"] = assistant_response
|
288 |
+
if i%16 == 0:
|
289 |
+
yield update_notebook_display(notebook_data), messages
|
290 |
+
yield update_notebook_display(notebook_data), messages
|
291 |
+
|
292 |
+
|
293 |
+
# Handle code execution
|
294 |
+
if code_cell:
|
295 |
+
notebook_data["cells"][-1]["execution_count"] = code_cell_counter
|
296 |
+
|
297 |
+
|
298 |
+
exec_result, execution = execute_code(sbx, assistant_response)
|
299 |
+
messages.append({
|
300 |
+
"role": "assistant",
|
301 |
+
"content": assistant_response,
|
302 |
+
"tool_calls": [{
|
303 |
+
"type": "function",
|
304 |
+
"function": {
|
305 |
+
"name": "code_interpreter",
|
306 |
+
"arguments": {"code": assistant_response}
|
307 |
+
}
|
308 |
+
}]
|
309 |
+
})
|
310 |
+
messages.append({"role": "ipython", "content": parse_exec_result_llm(execution), "nbformat": parse_exec_result_nb(execution)})
|
311 |
+
|
312 |
+
# Update the last code cell with execution results
|
313 |
+
notebook_data["cells"][-1]["outputs"] = parse_exec_result_nb(execution)
|
314 |
+
update_notebook_display(notebook_data)
|
315 |
+
else:
|
316 |
+
messages.append({"role": "assistant", "content": assistant_response})
|
317 |
+
if tokens[-1] == "<|eot_id|>":
|
318 |
+
break
|
319 |
|
320 |
yield update_notebook_display(notebook_data), messages
|