Tomoniai commited on
Commit
616f923
1 Parent(s): d631036

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -4
app.py CHANGED
@@ -26,9 +26,7 @@ def generate(prompt, history, max_new_tokens = 128, temperature = 0.6):
26
  formatted_prompt = format_prompt(prompt, history)
27
  response = ""
28
 
29
- # Count the number of tokens in the prompt
30
  num_prompt_tokens = len(tokenizer(formatted_prompt)['input_ids'])
31
- # Calculate the maximum length for the generation
32
  max_length = num_prompt_tokens + max_new_tokens
33
 
34
  textgen = pipeline('text-generation', model=model, tokenizer=tokenizer, max_length=max_length, temperature=temperature)
@@ -36,11 +34,9 @@ def generate(prompt, history, max_new_tokens = 128, temperature = 0.6):
36
  response = output[0]['generated_text'].replace(formatted_prompt, '')
37
  return response
38
 
39
-
40
  mychatbot = gr.Chatbot(
41
  avatar_images=["user.png", "botp.png"], bubble_full_width=False, show_label=False, show_copy_button=True, likeable=True,)
42
 
43
-
44
  demo = gr.ChatInterface(fn=generate,
45
  chatbot=mychatbot,
46
  title="Phi-3 Mini Chat Demo",
 
26
  formatted_prompt = format_prompt(prompt, history)
27
  response = ""
28
 
 
29
  num_prompt_tokens = len(tokenizer(formatted_prompt)['input_ids'])
 
30
  max_length = num_prompt_tokens + max_new_tokens
31
 
32
  textgen = pipeline('text-generation', model=model, tokenizer=tokenizer, max_length=max_length, temperature=temperature)
 
34
  response = output[0]['generated_text'].replace(formatted_prompt, '')
35
  return response
36
 
 
37
  mychatbot = gr.Chatbot(
38
  avatar_images=["user.png", "botp.png"], bubble_full_width=False, show_label=False, show_copy_button=True, likeable=True,)
39
 
 
40
  demo = gr.ChatInterface(fn=generate,
41
  chatbot=mychatbot,
42
  title="Phi-3 Mini Chat Demo",