import fastapi import json import markdown import uvicorn from ctransformers import AutoModelForCausalLM from fastapi.responses import HTMLResponse from fastapi.middleware.cors import CORSMiddleware from sse_starlette.sse import EventSourceResponse from ctransformers.langchain import CTransformers from pydantic import BaseModel, Field from typing import List, Any from typing_extensions import TypedDict, Literal llm = AutoModelForCausalLM.from_pretrained("NeoDim/starchat-alpha-GGML", model_file="starchat-alpha-ggml-q4_0.bin", model_type="starcoder") app = fastapi.FastAPI(title="Starchat Alpha") app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) @app.get("/") async def index(): with open("README.md", "r", encoding="utf-8") as readme_file: md_template_string = readme_file.read() html_content = markdown.markdown(md_template_string) return HTMLResponse(content=html_content, status_code=200) class ChatCompletionRequest(BaseModel): prompt: str @app.get("/demo") async def demo(): html_content = """
"""
return HTMLResponse(content=html_content, status_code=200)
@app.get("/stream")
async def chat(prompt = "Write a simple expres server"):
tokens = llm.tokenize(prompt)
async def server_sent_events(chat_chunks, llm):
yield prompt
for chat_chunk in llm.generate(chat_chunks):
yield llm.detokenize(chat_chunk)
yield ""
return EventSourceResponse(server_sent_events(tokens, llm))
@app.post("/v1/chat/completions")
async def chat(request: ChatCompletionRequest, response_mode=None):
tokens = llm.tokenize(request.prompt)
async def server_sent_events(chat_chunks, llm):
for token in llm.generate(chat_chunks):
yield llm.detokenize(token)
yield ""
return EventSourceResponse(server_sent_events(tokens, llm))
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000)