lvwerra HF staff commited on
Commit
7120f23
·
verified ·
1 Parent(s): 28de828

Update utils.py

Browse files
Files changed (1) hide show
  1. utils.py +16 -9
utils.py CHANGED
@@ -206,30 +206,37 @@ def update_notebook_display(notebook_data):
206
  notebook_body, _ = html_exporter.from_notebook_node(notebook)
207
  return notebook_body
208
 
209
- def run_interactive_notebook(client, model, messages, sbx, max_new_tokens=512):
210
  notebook_data, code_cell_counter = create_base_notebook(messages)
211
  try:
 
 
 
 
 
 
 
212
  #code_cell_counter = 0
213
  while True:
214
  response_stream = client.chat.completions.create(
215
  model=model,
216
- messages=messages,
217
- logprobs=True,
218
  stream=True,
219
  max_tokens=max_new_tokens,
220
  )
221
 
222
  assistant_response = ""
223
  tokens = []
224
- current_cell_content = []
225
 
226
  code_cell = False
227
  for i, chunk in enumerate(response_stream):
228
-
229
- content = chunk.choices[0].delta.content
230
- tokens.append(chunk.choices[0].logprobs.content[0].token)
 
 
231
  assistant_response += content
232
- current_cell_content.append(content)
233
 
234
  if len(tokens)==1:
235
  create_cell=True
@@ -257,7 +264,7 @@ def run_interactive_notebook(client, model, messages, sbx, max_new_tokens=512):
257
  })
258
  else:
259
  notebook_data["cells"][-1]["source"] = assistant_response
260
- if i%8 == 0:
261
  yield update_notebook_display(notebook_data), messages
262
  yield update_notebook_display(notebook_data), messages
263
 
 
206
  notebook_body, _ = html_exporter.from_notebook_node(notebook)
207
  return notebook_body
208
 
209
+ def run_interactive_notebook(client, model, tokenizer, messages, sbx, max_new_tokens=512):
210
  notebook_data, code_cell_counter = create_base_notebook(messages)
211
  try:
212
+ input_tokens = tokenizer.apply_chat_template(
213
+ messages,
214
+ builtin_tools=["code_interpreter"],
215
+ add_generation_prompt=True
216
+ )
217
+ model_input = tokenizer.decode(input_tokens)
218
+
219
  #code_cell_counter = 0
220
  while True:
221
  response_stream = client.chat.completions.create(
222
  model=model,
223
+ messages=model_input,
224
+ details=True,
225
  stream=True,
226
  max_tokens=max_new_tokens,
227
  )
228
 
229
  assistant_response = ""
230
  tokens = []
 
231
 
232
  code_cell = False
233
  for i, chunk in enumerate(response_stream):
234
+ if not chunk.token.special:
235
+ content = chunk.token.text
236
+ else:
237
+ content = ""
238
+ tokens.append(chunk.token.text)
239
  assistant_response += content
 
240
 
241
  if len(tokens)==1:
242
  create_cell=True
 
264
  })
265
  else:
266
  notebook_data["cells"][-1]["source"] = assistant_response
267
+ if i%16 == 0:
268
  yield update_notebook_display(notebook_data), messages
269
  yield update_notebook_display(notebook_data), messages
270