Spaces:
Paused
Paused
matthoffner
commited on
Commit
·
b7ec1ef
1
Parent(s):
3151c18
Update main.py
Browse files
main.py
CHANGED
@@ -40,11 +40,14 @@ async def demo():
|
|
40 |
html_content = """
|
41 |
<!DOCTYPE html>
|
42 |
<html>
|
|
|
|
|
|
|
43 |
<body>
|
44 |
<style>
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
}
|
49 |
#content {
|
50 |
font-family: "SFMono-Regular",Consolas,"Liberation Mono",Menlo,Courier,monospace !important;
|
@@ -62,11 +65,12 @@ async def demo():
|
|
62 |
}
|
63 |
}
|
64 |
</style>
|
65 |
-
<
|
|
|
66 |
<script>
|
67 |
var source = new EventSource("https://matthoffner-starchat-alpha.hf.space/stream");
|
68 |
source.onmessage = function(event) {
|
69 |
-
document.getElementById("content").innerHTML += event.data
|
70 |
};
|
71 |
</script>
|
72 |
|
@@ -76,7 +80,7 @@ async def demo():
|
|
76 |
return HTMLResponse(content=html_content, status_code=200)
|
77 |
|
78 |
@app.get("/stream")
|
79 |
-
async def chat(prompt = "Write a simple
|
80 |
tokens = llm.tokenize(prompt)
|
81 |
async def server_sent_events(chat_chunks, llm):
|
82 |
yield prompt
|
|
|
40 |
html_content = """
|
41 |
<!DOCTYPE html>
|
42 |
<html>
|
43 |
+
<head>
|
44 |
+
<script src="https://cdn.jsdelivr.net/npm/marked/marked.min.js"></script>
|
45 |
+
</head>
|
46 |
<body>
|
47 |
<style>
|
48 |
+
code {
|
49 |
+
display: inline-block;
|
50 |
+
background-color: lightgray;
|
51 |
}
|
52 |
#content {
|
53 |
font-family: "SFMono-Regular",Consolas,"Liberation Mono",Menlo,Courier,monospace !important;
|
|
|
65 |
}
|
66 |
}
|
67 |
</style>
|
68 |
+
<article id="content"></article>
|
69 |
+
|
70 |
<script>
|
71 |
var source = new EventSource("https://matthoffner-starchat-alpha.hf.space/stream");
|
72 |
source.onmessage = function(event) {
|
73 |
+
document.getElementById("content").innerHTML += marked.parse(event.data)
|
74 |
};
|
75 |
</script>
|
76 |
|
|
|
80 |
return HTMLResponse(content=html_content, status_code=200)
|
81 |
|
82 |
@app.get("/stream")
|
83 |
+
async def chat(prompt = "Write a simple express server in rust"):
|
84 |
tokens = llm.tokenize(prompt)
|
85 |
async def server_sent_events(chat_chunks, llm):
|
86 |
yield prompt
|