import streamlit as st
import requests
import time
from ast import literal_eval
from datetime import datetime
def to_md(text):
# return text.replace("\n", "
")
return text.replace("\n", "
")
@st.cache
def infer(
prompt,
model_name,
max_new_tokens=10,
temperature=0.1,
top_p=1.0,
top_k=40,
num_completions=1,
seed=42,
stop="\n"
):
model_name_map = {
"GPT-JT-6B-v1": "Together-gpt-JT-6B-v1",
}
max_new_tokens = int(max_new_tokens)
num_completions = int(num_completions)
temperature = float(temperature)
top_p = float(top_p)
stop = stop.split(";")
seed = seed
assert 0 <= max_new_tokens <= 256
assert 1 <= num_completions <= 5
assert 0.0 <= temperature <= 10.0
assert 0.0 <= top_p <= 1.0
if temperature == 0.0:
temperature = 0.01
my_post_dict = {
"model": "Together-gpt-JT-6B-v1",
"prompt": prompt,
"top_p": top_p,
"top_k": top_k,
"temperature": temperature,
"max_tokens": max_new_tokens,
"stop": stop,
}
print(f"send: {datetime.now()}")
response = requests.get("https://staging.together.xyz/api/inference", params=my_post_dict).json()
generated_text = response['output']['choices'][0]['text']
print(f"recv: {datetime.now()}")
for stop_word in stop:
if stop_word in generated_text:
generated_text = generated_text[:generated_text.find(stop_word)]
st.session_state.updated = True
return generated_text
def set_preset():
if st.session_state.preset == "Classification":
if not st.session_state.updated:
st.session_state.prompt = '''Please classify the given sentence.
Possible labels:
1.
2.
Input:
Label:
Input:
Label:'''
st.session_state.temperature = "0.0"
st.session_state.top_p = "1.0"
st.session_state.max_new_tokens = "10"
elif st.session_state.preset == "Generation":
if not st.session_state.updated:
st.session_state.prompt = '''Please write a story given keywords.
Input: bear, honey
Story: Once upon a time,'''
st.session_state.temperature = "0.0"
st.session_state.top_p = "0.9"
st.session_state.max_new_tokens = "100"
else:
pass
def main():
if 'preset' not in st.session_state:
st.session_state.preset = "Classification"
if 'prompt' not in st.session_state:
st.session_state.prompt = "Please answer the following question:\n\nQuestion: In which country is Zurich located?\nAnswer:"
if 'temperature' not in st.session_state:
st.session_state.temperature = "0.8"
if 'top_p' not in st.session_state:
st.session_state.top_p = "1.0"
if 'top_k' not in st.session_state:
st.session_state.top_k = "40"
if 'max_new_tokens' not in st.session_state:
st.session_state.max_new_tokens = "10"
if 'updated' not in st.session_state:
st.session_state.updated = False
st.title("GPT-JT")
col1, col2 = st.columns([1, 3])
with col1:
model_name = st.selectbox("Model", ["GPT-JT-6B-v1"])
max_new_tokens = st.text_input('Max new tokens', st.session_state.max_new_tokens)
temperature = st.text_input('temperature', st.session_state.temperature)
top_k = st.text_input('top_k', st.session_state.top_k)
top_p = st.text_input('top_p', st.session_state.top_p)
# num_completions = st.text_input('num_completions (only the best one will be returend)', "1")
num_completions = "1"
stop = st.text_input('stop, split by;', r'\n')
# seed = st.text_input('seed', "42")
seed = "42"
with col2:
# preset = st.radio(
# "Recommended Templates",
# ('Classification', 'Generation'),
# on_change=set_preset,
# key="preset",
# horizontal=True
# )
prompt_area = st.empty()
prompt = prompt_area.text_area(
"Prompt",
value=st.session_state.prompt,
max_chars=4096,
height=300,
)
generated_area = st.empty()
generated_area.markdown("(Generate here)")
button_submit = st.button("Submit")
if button_submit:
generated_area.markdown("" + to_md(prompt) + "", unsafe_allow_html=True)
report_text = infer(
prompt, model_name=model_name, max_new_tokens=max_new_tokens, temperature=temperature, top_p=top_p, top_k=top_k,
num_completions=num_completions, seed=seed, stop=literal_eval("'''"+stop+"'''"),
)
generated_area.markdown("" + to_md(prompt) + "" + to_md(report_text)+"", unsafe_allow_html=True)
if __name__ == '__main__':
main()