yasserrmd commited on
Commit
777550c
1 Parent(s): ce99034

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -42
app.py CHANGED
@@ -27,6 +27,8 @@ SYSTEM_INSTRUCT = os.getenv("SYSTEM_INSTRUCTOR")
27
  PROMPT_TEMPLATE = os.getenv("PROMPT_TEMPLATE")
28
 
29
 
 
 
30
  async def extract_code_blocks(markdown_text):
31
  """
32
  Extracts code blocks from the given Markdown text.
@@ -45,23 +47,13 @@ async def extract_code_blocks(markdown_text):
45
 
46
  return code_blocks
47
 
48
-
49
-
50
- # Route to serve the HTML template
51
- @app.get("/", response_class=HTMLResponse)
52
- async def serve_frontend():
53
- return HTMLResponse(open("static/infographic_gen.html").read())
54
-
55
- # Route to handle infographic generation
56
- @app.post("/generate")
57
- async def generate_infographic(request: InfographicRequest):
58
  description = request.description
59
- prompt = PROMPT_TEMPLATE.format(description=description)
60
  generated_completion = client.chat.completions.create(
61
  model="llama-3.1-70b-versatile",
62
  messages=[
63
  {"role": "system", "content": SYSTEM_INSTRUCT},
64
- {"role": "user", "content": prompt}
65
  ],
66
  temperature=0.5,
67
  max_tokens=5000,
@@ -70,35 +62,41 @@ async def generate_infographic(request: InfographicRequest):
70
  stop=None
71
  )
72
  generated_text = generated_completion.choices[0].message.content
73
- code_blocks=await extract_code_blocks(generated_text)
74
- if code_blocks:
75
- return JSONResponse(content={"html": code_blocks[0]})
76
- else:
77
- return JSONResponse(content={"error": "No generation"},status_code=500)
78
-
79
- # try:
80
- # messages = [{"role": "user", "content": prompt}]
81
- # stream = client.chat.completions.create(
82
- # model="Qwen/Qwen2.5-Coder-32B-Instruct",
83
- # messages=messages,
84
- # temperature=0.4,
85
- # max_tokens=6000,
86
- # top_p=0.7,
87
- # stream=True,
88
- # )
 
 
 
 
 
 
89
 
90
 
91
- # generated_text = ""
92
- # for chunk in stream:
93
- # generated_text += chunk.choices[0].delta.content
94
-
95
- # print(generated_text)
96
- #code_blocks= await extract_code_blocks(generated_text)
97
- # code_blocks= await generate_infographic(description)
98
- # if code_blocks:
99
- # return JSONResponse(content={"html": code_blocks[0]})
100
- # else:
101
- # return JSONResponse(content={"error": "No generation"},status_code=500)
102
-
103
- # except Exception as e:
104
- # return JSONResponse(content={"error": str(e)}, status_code=500)
 
27
  PROMPT_TEMPLATE = os.getenv("PROMPT_TEMPLATE")
28
 
29
 
30
+
31
+
32
  async def extract_code_blocks(markdown_text):
33
  """
34
  Extracts code blocks from the given Markdown text.
 
47
 
48
  return code_blocks
49
 
50
+ async def generate_infographic_details(request: InfographicRequest):
 
 
 
 
 
 
 
 
 
51
  description = request.description
 
52
  generated_completion = client.chat.completions.create(
53
  model="llama-3.1-70b-versatile",
54
  messages=[
55
  {"role": "system", "content": SYSTEM_INSTRUCT},
56
+ {"role": "user", "content": description}
57
  ],
58
  temperature=0.5,
59
  max_tokens=5000,
 
62
  stop=None
63
  )
64
  generated_text = generated_completion.choices[0].message.content
65
+
66
+ # Route to serve the HTML template
67
+ @app.get("/", response_class=HTMLResponse)
68
+ async def serve_frontend():
69
+ return HTMLResponse(open("static/infographic_gen.html").read())
70
+
71
+ # Route to handle infographic generation
72
+ @app.post("/generate")
73
+ async def generate_infographic(request: InfographicRequest):
74
+ description =await generate_infographic_details(request)
75
+ prompt = PROMPT_TEMPLATE.format(description=description)
76
+
77
+ try:
78
+ messages = [{"role": "user", "content": prompt}]
79
+ stream = client.chat.completions.create(
80
+ model="Qwen/Qwen2.5-Coder-32B-Instruct",
81
+ messages=messages,
82
+ temperature=0.4,
83
+ max_tokens=6000,
84
+ top_p=0.7,
85
+ stream=True,
86
+ )
87
 
88
 
89
+ generated_text = ""
90
+ for chunk in stream:
91
+ generated_text += chunk.choices[0].delta.content
92
+
93
+ print(generated_text)
94
+ code_blocks= await extract_code_blocks(generated_text)
95
+ code_blocks= await generate_infographic(description)
96
+ if code_blocks:
97
+ return JSONResponse(content={"html": code_blocks[0]})
98
+ else:
99
+ return JSONResponse(content={"error": "No generation"},status_code=500)
100
+
101
+ except Exception as e:
102
+ return JSONResponse(content={"error": str(e)}, status_code=500)