wq2012 commited on
Commit
93baeba
1 Parent(s): f380bda

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -11
app.py CHANGED
@@ -25,7 +25,7 @@ model = GPT4All(model_name=model_name,
25
  device="cpu")
26
  print("Finish the model init process")
27
 
28
- def generater(message, history):
29
  prompt = message + prompt_suffix
30
  max_new_tokens = round(len(prompt) / 3.0 * 1.2)
31
  outputs = []
@@ -43,24 +43,17 @@ def generater(message, history):
43
  transferred_completion = utils.transfer_llm_completion(completion, message)
44
  yield transferred_completion
45
 
46
- print("Create chatbot")
47
- chatbot = gr.Chatbot()
48
- print("Created chatbot")
49
 
50
- iface = gr.ChatInterface(
51
  fn = generater,
52
  title=title,
53
  description = description,
54
- chatbot=chatbot,
55
- additional_inputs=[],
56
  examples=[
57
  ["<speaker:1> Hello, my name is Tom. May I speak to Laura <speaker:2> please? Hello, this is Laura. <speaker:1> Hi Laura, how are you? This is <speaker:2> Tom. Hi Tom, I haven't seen you for a <speaker:1> while."],
58
  ]
59
  )
60
 
61
- with gr.Blocks() as demo:
62
- iface.render()
63
-
64
-
65
  if __name__ == "__main__":
66
  demo.queue(max_size=3).launch()
 
25
  device="cpu")
26
  print("Finish the model init process")
27
 
28
+ def generater(message):
29
  prompt = message + prompt_suffix
30
  max_new_tokens = round(len(prompt) / 3.0 * 1.2)
31
  outputs = []
 
43
  transferred_completion = utils.transfer_llm_completion(completion, message)
44
  yield transferred_completion
45
 
 
 
 
46
 
47
+ demo = gr.Interface(
48
  fn = generater,
49
  title=title,
50
  description = description,
51
+ inputs=["text"],
52
+ outputs=["text"],
53
  examples=[
54
  ["<speaker:1> Hello, my name is Tom. May I speak to Laura <speaker:2> please? Hello, this is Laura. <speaker:1> Hi Laura, how are you? This is <speaker:2> Tom. Hi Tom, I haven't seen you for a <speaker:1> while."],
55
  ]
56
  )
57
 
 
 
 
 
58
  if __name__ == "__main__":
59
  demo.queue(max_size=3).launch()