yasserrmd's picture
Create app.py
0537a74 verified
raw
history blame
1.8 kB
import os
from fastapi import FastAPI, Request
from fastapi.responses import HTMLResponse, JSONResponse
from fastapi.staticfiles import StaticFiles
from pydantic import BaseModel
from huggingface_hub import InferenceClient
import uvicorn
# Initialize FastAPI app
app = FastAPI()
# Serve static files for assets
app.mount("/static", StaticFiles(directory="static"), name="static")
# Initialize Hugging Face Inference Client
client = InferenceClient()
# Pydantic model for API input
class InfographicRequest(BaseModel):
description: str
# Load prompt template from environment variable
PROMPT_TEMPLATE = os.getenv("PROMPT_TEMPLATE")
# Route to serve the HTML template
@app.get("/", response_class=HTMLResponse)
async def serve_frontend():
with open("infographic_gen.html", "r") as file:
return HTMLResponse(content=file.read())
# Route to handle infographic generation
@app.post("/generate")
async def generate_infographic(request: InfographicRequest):
description = request.description
prompt = PROMPT_TEMPLATE.format(description=description)
try:
# Query Hugging Face model
messages = [{"role": "user", "content": prompt}]
stream = client.chat.completions.create(
model="Qwen/Qwen2.5-Coder-32B-Instruct",
messages=messages,
temperature=0.5,
max_tokens=1024,
top_p=0.7,
stream=True,
)
# Collect the HTML content from the stream
generated_html = ""
for chunk in stream:
generated_html += chunk.choices[0].delta.content
# Return the generated HTML content
return JSONResponse(content={"html": generated_html})
except Exception as e:
return JSONResponse(content={"error": str(e)}, status_code=500)