freddyaboulton HF staff commited on
Commit
aca1d5b
1 Parent(s): 9cd3f82

Fix errors

Browse files
Files changed (3) hide show
  1. README.md +3 -3
  2. app.py +50 -26
  3. something_happened.html +37 -0
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
- title: Llama Code Editor
3
- emoji: 🐢
4
  colorFrom: indigo
5
  colorTo: pink
6
  sdk: gradio
@@ -8,7 +8,7 @@ sdk_version: 5.6.0
8
  app_file: app.py
9
  pinned: false
10
  license: mit
11
- short_description: Create interactive HTML web pages with your voice
12
  ---
13
 
14
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Hello Llama Code Editor
3
+ emoji: 🦙
4
  colorFrom: indigo
5
  colorTo: pink
6
  sdk: gradio
 
8
  app_file: app.py
9
  pinned: false
10
  license: mit
11
+ short_description: Say "Hello Llama" and start coding with your voice!
12
  ---
13
 
14
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py CHANGED
@@ -1,19 +1,29 @@
1
  import gradio as gr
2
- from gradio_webrtc import WebRTC, ReplyOnStopWords, AdditionalOutputs, audio_to_bytes, get_twilio_turn_credentials
 
 
 
 
 
 
3
  import numpy as np
4
  import base64
5
  import re
6
  from groq import Groq
7
 
8
  from dotenv import load_dotenv
 
9
  load_dotenv()
10
 
11
  spinner_html = open("spinner.html").read()
 
 
12
 
13
  rtc_configuration = get_twilio_turn_credentials()
14
 
15
 
16
  import logging
 
17
  # Configure the root logger to WARNING to suppress debug messages from other libraries
18
  logging.basicConfig(level=logging.WARNING)
19
 
@@ -36,23 +46,22 @@ groq_client = Groq()
36
  system_prompt = "You are an AI coding assistant. Your task is to write single-file HTML applications based on a user's request. Only return the necessary code. Include all necessary imports and styles. You may also be asked to edit your original response."
37
  user_prompt = "Please write a single-file HTML application to fulfill the following request.\nThe message:{user_message}\nCurrent code you have written:{code}"
38
 
 
39
  def extract_html_content(text):
40
  """
41
  Extract content including HTML tags.
42
  """
43
- match = re.search(r'<!DOCTYPE html>.*?</html>', text, re.DOTALL)
44
  return match.group(0) if match else None
45
 
46
 
47
  def display_in_sandbox(code):
48
- encoded_html = base64.b64encode(code.encode('utf-8')).decode('utf-8')
49
  data_uri = f"data:text/html;charset=utf-8;base64,{encoded_html}"
50
- return f"<iframe src=\"{data_uri}\" width=\"100%\" height=\"600px\"></iframe>"
51
 
52
 
53
- def generate(user_message: tuple[int, np.ndarray],
54
- history: list[dict],
55
- code: str):
56
  yield AdditionalOutputs(history, spinner_html)
57
 
58
  sr, audio = user_message
@@ -66,20 +75,24 @@ def generate(user_message: tuple[int, np.ndarray],
66
 
67
  user_msg_formatted = user_prompt.format(user_message=text, code=code)
68
  history.append({"role": "user", "content": user_msg_formatted})
69
-
70
  print("generating response")
71
  response = groq_client.chat.completions.create(
72
  model="llama-3.3-70b-versatile",
73
  messages=history,
74
  temperature=1,
75
- max_tokens=1024,
76
  top_p=1,
77
  stream=False,
78
  )
79
  print("finished generating response")
80
-
81
  output = response.choices[0].message.content
82
- html_code = extract_html_content(output)
 
 
 
 
83
  history.append({"role": "assistant", "content": output})
84
  yield AdditionalOutputs(history, html_code)
85
 
@@ -100,28 +113,39 @@ with gr.Blocks(css=".code-component {max-height: 500px !important}") as demo:
100
  Each conversation is limited to 90 seconds. Once the time limit is up you can rejoin the conversation.
101
  </p>
102
  """
103
- )
104
- webrtc = WebRTC(rtc_configuration=rtc_configuration,
105
- mode="send", modality="audio")
 
106
  with gr.Column(scale=10):
107
  with gr.Tabs():
108
  with gr.Tab("Sandbox"):
109
- sandbox = gr.HTML(value=open("sandbox.html").read())
110
  with gr.Tab("Code"):
111
- code = gr.Code(language="html", max_lines=50, interactive=False, elem_classes="code-component")
 
 
 
 
 
112
  with gr.Tab("Chat"):
113
  cb = gr.Chatbot(type="messages")
114
-
115
- webrtc.stream(ReplyOnStopWords(generate,
116
- input_sample_rate=16000,
117
- stop_words=["hello llama", "hello lama", "hello lamma", "hello llamma"]),
118
- inputs=[webrtc, history, code],
119
- outputs=[webrtc], time_limit=90,
120
- concurrency_limit=10)
121
- webrtc.on_additional_outputs(lambda history, code: (history, code, history),
122
- outputs=[history, code, cb])
 
 
 
 
 
 
123
  code.change(display_in_sandbox, code, sandbox, queue=False)
124
 
125
  if __name__ == "__main__":
126
  demo.launch()
127
-
 
1
  import gradio as gr
2
+ from gradio_webrtc import (
3
+ WebRTC,
4
+ ReplyOnStopWords,
5
+ AdditionalOutputs,
6
+ audio_to_bytes,
7
+ get_twilio_turn_credentials,
8
+ )
9
  import numpy as np
10
  import base64
11
  import re
12
  from groq import Groq
13
 
14
  from dotenv import load_dotenv
15
+
16
  load_dotenv()
17
 
18
  spinner_html = open("spinner.html").read()
19
+ sandbox_html = open("sandbox.html").read()
20
+ something_happened_html = open("something_happened.html").read()
21
 
22
  rtc_configuration = get_twilio_turn_credentials()
23
 
24
 
25
  import logging
26
+
27
  # Configure the root logger to WARNING to suppress debug messages from other libraries
28
  logging.basicConfig(level=logging.WARNING)
29
 
 
46
  system_prompt = "You are an AI coding assistant. Your task is to write single-file HTML applications based on a user's request. Only return the necessary code. Include all necessary imports and styles. You may also be asked to edit your original response."
47
  user_prompt = "Please write a single-file HTML application to fulfill the following request.\nThe message:{user_message}\nCurrent code you have written:{code}"
48
 
49
+
50
  def extract_html_content(text):
51
  """
52
  Extract content including HTML tags.
53
  """
54
+ match = re.search(r"<!DOCTYPE html>.*?</html>", text, re.DOTALL)
55
  return match.group(0) if match else None
56
 
57
 
58
  def display_in_sandbox(code):
59
+ encoded_html = base64.b64encode(code.encode("utf-8")).decode("utf-8")
60
  data_uri = f"data:text/html;charset=utf-8;base64,{encoded_html}"
61
+ return f'<iframe src="{data_uri}" width="100%" height="600px"></iframe>'
62
 
63
 
64
+ def generate(user_message: tuple[int, np.ndarray], history: list[dict], code: str):
 
 
65
  yield AdditionalOutputs(history, spinner_html)
66
 
67
  sr, audio = user_message
 
75
 
76
  user_msg_formatted = user_prompt.format(user_message=text, code=code)
77
  history.append({"role": "user", "content": user_msg_formatted})
78
+
79
  print("generating response")
80
  response = groq_client.chat.completions.create(
81
  model="llama-3.3-70b-versatile",
82
  messages=history,
83
  temperature=1,
84
+ max_tokens=2048,
85
  top_p=1,
86
  stream=False,
87
  )
88
  print("finished generating response")
89
+
90
  output = response.choices[0].message.content
91
+ try:
92
+ html_code = extract_html_content(output)
93
+ except Exception as e:
94
+ html_code = something_happened_html
95
+ print(e)
96
  history.append({"role": "assistant", "content": output})
97
  yield AdditionalOutputs(history, html_code)
98
 
 
113
  Each conversation is limited to 90 seconds. Once the time limit is up you can rejoin the conversation.
114
  </p>
115
  """
116
+ )
117
+ webrtc = WebRTC(
118
+ rtc_configuration=rtc_configuration, mode="send", modality="audio"
119
+ )
120
  with gr.Column(scale=10):
121
  with gr.Tabs():
122
  with gr.Tab("Sandbox"):
123
+ sandbox = gr.HTML(value=sandbox_html)
124
  with gr.Tab("Code"):
125
+ code = gr.Code(
126
+ language="html",
127
+ max_lines=50,
128
+ interactive=False,
129
+ elem_classes="code-component",
130
+ )
131
  with gr.Tab("Chat"):
132
  cb = gr.Chatbot(type="messages")
133
+
134
+ webrtc.stream(
135
+ ReplyOnStopWords(
136
+ generate,
137
+ input_sample_rate=16000,
138
+ stop_words=["hello llama", "hello lama", "hello lamma", "hello llamma"],
139
+ ),
140
+ inputs=[webrtc, history, code],
141
+ outputs=[webrtc],
142
+ time_limit=90,
143
+ concurrency_limit=10,
144
+ )
145
+ webrtc.on_additional_outputs(
146
+ lambda history, code: (history, code, history), outputs=[history, code, cb]
147
+ )
148
  code.change(display_in_sandbox, code, sandbox, queue=False)
149
 
150
  if __name__ == "__main__":
151
  demo.launch()
 
something_happened.html ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <div style="
2
+ display: flex;
3
+ flex-direction: column;
4
+ align-items: center;
5
+ justify-content: center;
6
+ min-height: 400px;
7
+ background: linear-gradient(135deg, #f5f7fa 0%, #e4e8ec 100%);
8
+ border-radius: 8px;
9
+ border: 2px dashed #cbd5e1;
10
+ padding: 2rem;
11
+ text-align: center;
12
+ color: #64748b;
13
+ font-family: system-ui, -apple-system, sans-serif;
14
+ ">
15
+ <div style="
16
+ width: 80px;
17
+ height: 80px;
18
+ margin-bottom: 1.5rem;
19
+ border: 3px solid #cbd5e1;
20
+ border-radius: 12px;
21
+ position: relative;
22
+ ">
23
+ <div style="
24
+ position: absolute;
25
+ top: 50%;
26
+ left: 50%;
27
+ transform: translate(-50%, -50%);
28
+ font-size: 2rem;
29
+ ">😬</div>
30
+ </div>
31
+ <h2 style="
32
+ margin: 0 0 0.5rem 0;
33
+ font-size: 1.5rem;
34
+ font-weight: 600;
35
+ color: #475569;
36
+ ">Error generating the page, see Chat tab to see the model's response</h2>
37
+ </div>