Update app.py
Browse files
app.py
CHANGED
@@ -96,6 +96,46 @@ async def complete(
|
|
96 |
status_code=500, content={"message": "Internal Server Error"}
|
97 |
)
|
98 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
99 |
|
100 |
if __name__ == "__main__":
|
101 |
import uvicorn
|
|
|
96 |
status_code=500, content={"message": "Internal Server Error"}
|
97 |
)
|
98 |
|
99 |
+
# Chat Completion API
|
100 |
+
@app.post("/generate")
|
101 |
+
async def complete(
|
102 |
+
question: str,
|
103 |
+
system: str = "You are a story writing assistant.",
|
104 |
+
temperature: float = 0.7,
|
105 |
+
seed: int = 42,
|
106 |
+
) -> dict:
|
107 |
+
try:
|
108 |
+
st = time()
|
109 |
+
output = llama.create_chat_completion(
|
110 |
+
messages=[
|
111 |
+
{"role": "system", "content": system},
|
112 |
+
{"role": "user", "content": question},
|
113 |
+
],
|
114 |
+
temperature=temperature,
|
115 |
+
seed=seed,
|
116 |
+
stream=True
|
117 |
+
)
|
118 |
+
"""
|
119 |
+
for chunk in output:
|
120 |
+
|
121 |
+
delta = chunk['choices'][0]['delta']
|
122 |
+
if 'role' in delta:
|
123 |
+
print(delta['role'], end=': ')
|
124 |
+
elif 'content' in delta:
|
125 |
+
print(delta['content'], end='')
|
126 |
+
|
127 |
+
print(chunk)
|
128 |
+
"""
|
129 |
+
et = time()
|
130 |
+
output["time"] = et - st
|
131 |
+
return output
|
132 |
+
except Exception as e:
|
133 |
+
logger.error(f"Error in /complete endpoint: {e}")
|
134 |
+
return JSONResponse(
|
135 |
+
status_code=500, content={"message": "Internal Server Error"}
|
136 |
+
)
|
137 |
+
|
138 |
+
|
139 |
|
140 |
if __name__ == "__main__":
|
141 |
import uvicorn
|