Spaces:
Sleeping
Sleeping
imperialwool
commited on
Commit
·
536efdb
1
Parent(s):
6522af3
Update app.py
Browse files
app.py
CHANGED
@@ -8,7 +8,7 @@ tokenizer = AutoTokenizer.from_pretrained("OpenBuddy/openbuddy-openllama-3b-v10-
|
|
8 |
model = AutoModelForCausalLM.from_pretrained("OpenBuddy/openbuddy-openllama-3b-v10-bf16")
|
9 |
model.eval()
|
10 |
|
11 |
-
with open('
|
12 |
prompt = f.read()
|
13 |
|
14 |
@app.post("/request")
|
@@ -16,7 +16,7 @@ async def echo():
|
|
16 |
data = await request.get_json()
|
17 |
if data.get("max_tokens") != None and data.get("max_tokens") > 500: data['max_tokens'] = 500
|
18 |
userPrompt = prompt + "\n\nUser: " + data['request'] + "\nAssistant: "
|
19 |
-
input_ids = tokenizer.encode(
|
20 |
with torch.no_grad():
|
21 |
output_ids = model.generate(
|
22 |
input_ids=input_ids,
|
|
|
8 |
model = AutoModelForCausalLM.from_pretrained("OpenBuddy/openbuddy-openllama-3b-v10-bf16")
|
9 |
model.eval()
|
10 |
|
11 |
+
with open('system.prompt', 'r', encoding='utf-8') as f:
|
12 |
prompt = f.read()
|
13 |
|
14 |
@app.post("/request")
|
|
|
16 |
data = await request.get_json()
|
17 |
if data.get("max_tokens") != None and data.get("max_tokens") > 500: data['max_tokens'] = 500
|
18 |
userPrompt = prompt + "\n\nUser: " + data['request'] + "\nAssistant: "
|
19 |
+
input_ids = tokenizer.encode(userPrompt, return_tensors='pt')
|
20 |
with torch.no_grad():
|
21 |
output_ids = model.generate(
|
22 |
input_ids=input_ids,
|