Spaces:
Paused
Paused
import fastapi | |
import json | |
import markdown | |
import uvicorn | |
from ctransformers import AutoModelForCausalLM | |
from fastapi.responses import HTMLResponse | |
from fastapi.middleware.cors import CORSMiddleware | |
from sse_starlette.sse import EventSourceResponse | |
from ctransformers.langchain import CTransformers | |
from pydantic import BaseModel, Field | |
from typing import List, Any | |
from typing_extensions import TypedDict, Literal | |
llm = AutoModelForCausalLM.from_pretrained("NeoDim/starchat-alpha-GGML", | |
model_file="starchat-alpha-ggml-q4_0.bin", | |
model_type="starcoder") | |
app = fastapi.FastAPI(title="Starchat Alpha") | |
app.add_middleware( | |
CORSMiddleware, | |
allow_origins=["*"], | |
allow_credentials=True, | |
allow_methods=["*"], | |
allow_headers=["*"], | |
) | |
async def index(): | |
with open("README.md", "r", encoding="utf-8") as readme_file: | |
md_template_string = readme_file.read() | |
html_content = markdown.markdown(md_template_string) | |
return HTMLResponse(content=html_content, status_code=200) | |
class ChatCompletionRequest(BaseModel): | |
prompt: str | |
async def demo(): | |
html_content = """ | |
<!DOCTYPE html> | |
<html> | |
<body> | |
<style> | |
pre { | |
padding: 1em; | |
border: 1px solid black; | |
} | |
#content { | |
font-family: "SFMono-Regular",Consolas,"Liberation Mono",Menlo,Courier,monospace !important; | |
box-sizing: border-box; | |
min-width: 200px; | |
max-width: 980px; | |
margin: 0 auto; | |
padding: 45px; | |
font-size: 16px; | |
} | |
@media (max-width: 767px) { | |
#content { | |
padding: 15px; | |
} | |
} | |
</style> | |
<pre><code id="content"></code></pre> | |
<script> | |
var source = new EventSource("https://matthoffner-starchat-alpha.hf.space/stream"); | |
source.onmessage = function(event) { | |
document.getElementById("content").innerHTML += event.data | |
}; | |
</script> | |
</body> | |
</html> | |
""" | |
return HTMLResponse(content=html_content, status_code=200) | |
async def chat(prompt = "Write a simple expres server"): | |
tokens = llm.tokenize(prompt) | |
async def server_sent_events(chat_chunks, llm): | |
yield prompt | |
for chat_chunk in llm.generate(chat_chunks): | |
yield llm.detokenize(chat_chunk) | |
yield "" | |
return EventSourceResponse(server_sent_events(tokens, llm)) | |
async def chat(request: ChatCompletionRequest, response_mode=None): | |
tokens = llm.tokenize(request.prompt) | |
async def server_sent_events(chat_chunks, llm): | |
for token in llm.generate(chat_chunks): | |
yield llm.detokenize(token) | |
yield "" | |
return EventSourceResponse(server_sent_events(tokens, llm)) | |
if __name__ == "__main__": | |
uvicorn.run(app, host="0.0.0.0", port=8000) | |