Update app.py
Browse files
app.py
CHANGED
@@ -2,8 +2,7 @@ import json
|
|
2 |
import os
|
3 |
import pandas as pd
|
4 |
import requests
|
5 |
-
|
6 |
-
from functools import partial
|
7 |
import streamlit as st
|
8 |
from datasets import load_dataset, load_metric
|
9 |
|
@@ -33,7 +32,11 @@ def read_markdown(path):
|
|
33 |
output = f.read()
|
34 |
st.markdown(output, unsafe_allow_html=True)
|
35 |
|
36 |
-
|
|
|
|
|
|
|
|
|
37 |
url = (
|
38 |
f"https://hf.space/embed/loubnabnl/{model_name.lower()}-subspace/+/api/predict/"
|
39 |
)
|
@@ -41,7 +44,8 @@ def generate_code(model_name, gen_prompt, max_new_tokens, temperature, seed):
|
|
41 |
url=url, json={"data": [gen_prompt, max_new_tokens, temperature, seed]}
|
42 |
)
|
43 |
generated_text = r.json()["data"][0]
|
44 |
-
|
|
|
45 |
|
46 |
def generate_code_threads(
|
47 |
generations, models, gen_prompt, max_new_tokens, temperature, seed
|
@@ -181,23 +185,22 @@ gen_prompt = st.text_area(
|
|
181 |
value=example_text,
|
182 |
height=200,
|
183 |
).strip()
|
184 |
-
if st.button("Generate code!"):
|
185 |
with st.spinner("Generating code..."):
|
186 |
-
#
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
gen_prompt=gen_prompt,
|
192 |
max_new_tokens=max_new_tokens,
|
193 |
temperature=temperature,
|
194 |
seed=seed,
|
195 |
)
|
196 |
-
|
197 |
-
for i in range(len(output)):
|
198 |
st.markdown(f"**{selected_models[i]}**")
|
199 |
-
st.code(
|
200 |
-
if len(
|
201 |
st.markdown("<span style='color:red'>Warning: Some models run into timeout, you can try generating code using the original subspaces: [InCoder](https://huggingface.co/spaces/loubnabnl/incoder-subspace), [CodeGen](https://huggingface.co/spaces/loubnabnl/codegen-subspace), [CodeParrot](https://huggingface.co/spaces/loubnabnl/codeparrot-subspace)</span>", unsafe_allow_html=True)
|
202 |
|
203 |
# Resources
|
|
|
2 |
import os
|
3 |
import pandas as pd
|
4 |
import requests
|
5 |
+
import threading
|
|
|
6 |
import streamlit as st
|
7 |
from datasets import load_dataset, load_metric
|
8 |
|
|
|
32 |
output = f.read()
|
33 |
st.markdown(output, unsafe_allow_html=True)
|
34 |
|
35 |
+
|
36 |
+
def generate_code(
|
37 |
+
generations, model_name, gen_prompt, max_new_tokens, temperature, seed
|
38 |
+
):
|
39 |
+
# call space using its API endpoint
|
40 |
url = (
|
41 |
f"https://hf.space/embed/loubnabnl/{model_name.lower()}-subspace/+/api/predict/"
|
42 |
)
|
|
|
44 |
url=url, json={"data": [gen_prompt, max_new_tokens, temperature, seed]}
|
45 |
)
|
46 |
generated_text = r.json()["data"][0]
|
47 |
+
generations.append(generated_text)
|
48 |
+
|
49 |
|
50 |
def generate_code_threads(
|
51 |
generations, models, gen_prompt, max_new_tokens, temperature, seed
|
|
|
185 |
value=example_text,
|
186 |
height=200,
|
187 |
).strip()
|
188 |
+
if st.button("Generate code!", key=4):
|
189 |
with st.spinner("Generating code..."):
|
190 |
+
# use threading
|
191 |
+
generations = []
|
192 |
+
generate_code_threads(
|
193 |
+
generations,
|
194 |
+
selected_models,
|
195 |
gen_prompt=gen_prompt,
|
196 |
max_new_tokens=max_new_tokens,
|
197 |
temperature=temperature,
|
198 |
seed=seed,
|
199 |
)
|
200 |
+
for i in range(len(generations)):
|
|
|
201 |
st.markdown(f"**{selected_models[i]}**")
|
202 |
+
st.code(generations[i])
|
203 |
+
if len(generations) < len(selected_models):
|
204 |
st.markdown("<span style='color:red'>Warning: Some models run into timeout, you can try generating code using the original subspaces: [InCoder](https://huggingface.co/spaces/loubnabnl/incoder-subspace), [CodeGen](https://huggingface.co/spaces/loubnabnl/codegen-subspace), [CodeParrot](https://huggingface.co/spaces/loubnabnl/codeparrot-subspace)</span>", unsafe_allow_html=True)
|
205 |
|
206 |
# Resources
|