Update app_threading.py
Browse files- app_threading.py +14 -17
app_threading.py
CHANGED
@@ -2,7 +2,8 @@ import json
|
|
2 |
import os
|
3 |
import pandas as pd
|
4 |
import requests
|
5 |
-
import
|
|
|
6 |
import streamlit as st
|
7 |
from datasets import load_dataset, load_metric
|
8 |
|
@@ -32,11 +33,7 @@ def read_markdown(path):
|
|
32 |
output = f.read()
|
33 |
st.markdown(output, unsafe_allow_html=True)
|
34 |
|
35 |
-
|
36 |
-
def generate_code(
|
37 |
-
generations, model_name, gen_prompt, max_new_tokens, temperature, seed
|
38 |
-
):
|
39 |
-
# call space using its API endpoint
|
40 |
url = (
|
41 |
f"https://hf.space/embed/loubnabnl/{model_name.lower()}-subspace/+/api/predict/"
|
42 |
)
|
@@ -44,8 +41,7 @@ def generate_code(
|
|
44 |
url=url, json={"data": [gen_prompt, max_new_tokens, temperature, seed]}
|
45 |
)
|
46 |
generated_text = r.json()["data"][0]
|
47 |
-
|
48 |
-
|
49 |
|
50 |
def generate_code_threads(
|
51 |
generations, models, gen_prompt, max_new_tokens, temperature, seed
|
@@ -185,22 +181,23 @@ gen_prompt = st.text_area(
|
|
185 |
value=example_text,
|
186 |
height=200,
|
187 |
).strip()
|
188 |
-
if st.button("Generate code!"
|
189 |
with st.spinner("Generating code..."):
|
190 |
-
#
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
gen_prompt=gen_prompt,
|
196 |
max_new_tokens=max_new_tokens,
|
197 |
temperature=temperature,
|
198 |
seed=seed,
|
199 |
)
|
200 |
-
|
|
|
201 |
st.markdown(f"**{selected_models[i]}**")
|
202 |
-
st.code(
|
203 |
-
if len(
|
204 |
st.markdown("<span style='color:red'>Warning: Some models run into timeout, you can try generating code using the original subspaces: [InCoder](https://huggingface.co/spaces/loubnabnl/incoder-subspace), [CodeGen](https://huggingface.co/spaces/loubnabnl/codegen-subspace), [CodeParrot](https://huggingface.co/spaces/loubnabnl/codeparrot-subspace)</span>", unsafe_allow_html=True)
|
205 |
|
206 |
# Resources
|
|
|
2 |
import os
|
3 |
import pandas as pd
|
4 |
import requests
|
5 |
+
from multiprocessing import Pool
|
6 |
+
from functools import partial
|
7 |
import streamlit as st
|
8 |
from datasets import load_dataset, load_metric
|
9 |
|
|
|
33 |
output = f.read()
|
34 |
st.markdown(output, unsafe_allow_html=True)
|
35 |
|
36 |
+
def generate_code(model_name, gen_prompt, max_new_tokens, temperature, seed):
|
|
|
|
|
|
|
|
|
37 |
url = (
|
38 |
f"https://hf.space/embed/loubnabnl/{model_name.lower()}-subspace/+/api/predict/"
|
39 |
)
|
|
|
41 |
url=url, json={"data": [gen_prompt, max_new_tokens, temperature, seed]}
|
42 |
)
|
43 |
generated_text = r.json()["data"][0]
|
44 |
+
return generated_text
|
|
|
45 |
|
46 |
def generate_code_threads(
|
47 |
generations, models, gen_prompt, max_new_tokens, temperature, seed
|
|
|
181 |
value=example_text,
|
182 |
height=200,
|
183 |
).strip()
|
184 |
+
if st.button("Generate code!"):
|
185 |
with st.spinner("Generating code..."):
|
186 |
+
# Create a multiprocessing Pool
|
187 |
+
pool = Pool()
|
188 |
+
generate_parallel = partial(
|
189 |
+
generate_code,
|
190 |
+
|
191 |
gen_prompt=gen_prompt,
|
192 |
max_new_tokens=max_new_tokens,
|
193 |
temperature=temperature,
|
194 |
seed=seed,
|
195 |
)
|
196 |
+
output = pool.map(generate_parallel, selected_models)
|
197 |
+
for i in range(len(output)):
|
198 |
st.markdown(f"**{selected_models[i]}**")
|
199 |
+
st.code(output[i])
|
200 |
+
if len(output) < len(selected_models):
|
201 |
st.markdown("<span style='color:red'>Warning: Some models run into timeout, you can try generating code using the original subspaces: [InCoder](https://huggingface.co/spaces/loubnabnl/incoder-subspace), [CodeGen](https://huggingface.co/spaces/loubnabnl/codegen-subspace), [CodeParrot](https://huggingface.co/spaces/loubnabnl/codeparrot-subspace)</span>", unsafe_allow_html=True)
|
202 |
|
203 |
# Resources
|