Loubna ben allal
update app
1c7be52
raw
history blame
4.08 kB
import streamlit as st
from transformers import AutoTokenizer, AutoModelForCausalLM, set_seed
from transformers import pipeline
import torch
import json
@st.cache(allow_output_mutation=True)
def load_tokenizer(model_ckpt):
return AutoTokenizer.from_pretrained(model_ckpt)
@st.cache(allow_output_mutation=True)
def load_model(model_ckpt):
model = AutoModelForCausalLM.from_pretrained(model_ckpt, low_cpu_mem_usage=True)
return model
@st.cache()
def load_examples():
with open("examples.json", "r") as f:
examples = json.load(f)
return examples
st.set_page_config(page_icon=':laptop:', layout="wide")
st.sidebar.header("Models")
models = ["CodeParrot", "OPT", "InCoder"]
selected_models = st.sidebar.multiselect('Select code generation models to compare:',
models,
default=["CodeParrot"])
st.sidebar.header("Tasks")
tasks = [" ","Model architecture", "Model evaluation", "Pretraining dataset", "Code generation"]
selected_task = st.sidebar.selectbox("Select a task:", tasks)
tokenizer1 = load_tokenizer("lvwerra/codeparrot")
model1 = load_model("lvwerra/codeparrot")
tokenizer2 = load_tokenizer("facebook/incoder-1B")
model2 = load_model("facebook/incoder-1B")
tokenizer3 = load_tokenizer("facebook/opt-1.3b")
model3 = load_model("facebook/opt-1.3b")
pipelines = {}
for model in models:
if model == "CodeParrot":
pipe = pipeline("text-generation", model=model1, tokenizer=tokenizer1)
pipelines[model] = pipe
elif model == "InCoder":
tokenizer = load_tokenizer("facebook/incoder-1B")
model = load_model("facebook/incoder-1B")
pipe = pipeline("text-generation", model=model2, tokenizer=tokenizer2)
pipelines[model] = pipe
else:
tokenizer = load_tokenizer("facebook/opt-1.3b")
model = load_model("facebook/opt-1.3b")
pipe = pipeline("text-generation", model=model3, tokenizer=tokenizer3)
pipelines[model] = pipe
example_names = [example["name"] for example in examples]
name2id = dict([(name, i) for i, name in enumerate(example_names)])
set_seed(42)
gen_kwargs = {}
if selected_task == " ":
st.title("Code Generation Models comparison πŸ’»")
with open("intro.txt", "r") as f:
intro = f.read()
st.markdown(intro)
elif selected_task == "Pretraining dataset":
st.title("Pretraining datasets πŸ“š")
for model in selected_models:
with open(f"datasets/{model.lower()}.txt", "r") as f:
text = f.read()
st.markdown(f"## {model}:")
st.markdown(text)
elif selected_task == "Model architecture":
st.title("Model architecture πŸ”¨")
for model in selected_models:
with open(f"architectures/{model.lower()}.txt", "r") as f:
text = f.read()
st.markdown(f"## {model}:")
st.markdown(text)
elif selected_task == "Code generation":
st.title("Code generation πŸ’»")
st.sidebar.header("Examples")
selected_example = st.sidebar.selectbox("Select one of the following examples:", example_names)
example_text = examples[name2id[selected_example]]["value"]
default_length = examples[name2id[selected_example]]["length"]
st.sidebar.header("Generation settings")
gen_kwargs["do_sample"] = st.sidebar.radio("Decoding strategy:", ["Greedy", "Sample"]) == "Sample"
gen_kwargs["max_new_tokens"] = st.sidebar.slider("Number of tokens to generate:", value=default_length, min_value=8, step=8, max_value=256)
if gen_kwargs["do_sample"]:
gen_kwargs["temperature"] = 0.2
gen_kwargs["top_k"] = 0
gen_kwargs["top_p"] = 0.95
gen_prompt = st.text_area("Generate code with prompt:", value=example_text, height=220,).strip()
if st.button("Generate code!"):
with st.spinner("Generating code..."):
for model in selected_models:
pipe = pipelines[model]
generated_text = pipe(gen_prompt, **gen_kwargs)[0]['generated_text']
st.markdown(f"### {model}:")
st.code(generated_text)