Spaces:
Running
Running
File size: 1,801 Bytes
0537a74 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
import os
from fastapi import FastAPI, Request
from fastapi.responses import HTMLResponse, JSONResponse
from fastapi.staticfiles import StaticFiles
from pydantic import BaseModel
from huggingface_hub import InferenceClient
import uvicorn
# Initialize FastAPI app
app = FastAPI()
# Serve static files for assets
app.mount("/static", StaticFiles(directory="static"), name="static")
# Initialize Hugging Face Inference Client
client = InferenceClient()
# Pydantic model for API input
class InfographicRequest(BaseModel):
description: str
# Load prompt template from environment variable
PROMPT_TEMPLATE = os.getenv("PROMPT_TEMPLATE")
# Route to serve the HTML template
@app.get("/", response_class=HTMLResponse)
async def serve_frontend():
with open("infographic_gen.html", "r") as file:
return HTMLResponse(content=file.read())
# Route to handle infographic generation
@app.post("/generate")
async def generate_infographic(request: InfographicRequest):
description = request.description
prompt = PROMPT_TEMPLATE.format(description=description)
try:
# Query Hugging Face model
messages = [{"role": "user", "content": prompt}]
stream = client.chat.completions.create(
model="Qwen/Qwen2.5-Coder-32B-Instruct",
messages=messages,
temperature=0.5,
max_tokens=1024,
top_p=0.7,
stream=True,
)
# Collect the HTML content from the stream
generated_html = ""
for chunk in stream:
generated_html += chunk.choices[0].delta.content
# Return the generated HTML content
return JSONResponse(content={"html": generated_html})
except Exception as e:
return JSONResponse(content={"error": str(e)}, status_code=500)
|