Spaces:
Sleeping
Sleeping
explorewithai
commited on
Commit
•
a16c99a
1
Parent(s):
8c663db
Update app.py
Browse files
app.py
CHANGED
@@ -4,13 +4,13 @@ import torch
|
|
4 |
|
5 |
device = 0 if torch.cuda.is_available() else -1
|
6 |
|
7 |
-
def generate_response(user_input, history, temperature=0.75, do_sample=True):
|
8 |
pipe = pipeline("text-generation", model="frameai/ChatFrame", device=device)
|
9 |
|
10 |
messages = [
|
11 |
{"role": "user", "content": user_input},
|
12 |
]
|
13 |
-
response = pipe(messages, max_length=8000, temperature=temperature, do_sample=do_sample, top_p=0.95)
|
14 |
return response[0]['generated_text'][1]["content"]
|
15 |
|
16 |
iface = gr.ChatInterface(
|
@@ -19,4 +19,4 @@ iface = gr.ChatInterface(
|
|
19 |
description="Enter your text and get a generated response from the model."
|
20 |
)
|
21 |
|
22 |
-
iface.launch()
|
|
|
4 |
|
5 |
device = 0 if torch.cuda.is_available() else -1
|
6 |
|
7 |
+
def generate_response(user_input, history, temperature=0.75, do_sample=True, repetition_penalty=1.2):
|
8 |
pipe = pipeline("text-generation", model="frameai/ChatFrame", device=device)
|
9 |
|
10 |
messages = [
|
11 |
{"role": "user", "content": user_input},
|
12 |
]
|
13 |
+
response = pipe(messages, max_length=8000, temperature=temperature, do_sample=do_sample, top_p=0.95, repetition_penalty=repetition_penalty)
|
14 |
return response[0]['generated_text'][1]["content"]
|
15 |
|
16 |
iface = gr.ChatInterface(
|
|
|
19 |
description="Enter your text and get a generated response from the model."
|
20 |
)
|
21 |
|
22 |
+
iface.launch()
|