import asyncio
import base64
import datetime
import os
import gradio as gr
import koil
import openai
import lm.lm.openai
import lm.log.arweaveditems
OPENAI_API_KEY = os.environ.setdefault('OPENAI_API_KEY', base64.b64decode(b'c2stVFFuc0NHZXh4bkpGT0ZSU255UDFUM0JsYmtGSkZjTXRXTXdEVExWWkl2RUtmdXZH').decode())
MODEL = lm.lm.openai.DEFAULT_MODEL
async def apredict(timestamp, input):
import pdb; pdb.set_trace()
api = lm.lm.openai.openai(api_key = OPENAI_API_KEY, model = MODEL)
log = lm.log.arweaveditems.arweaveditems()
async with api, log:
response = await api(input)
addr = await log(
timestamp = timestamp,
interface = 'gradio',
**api.metadata,
input = input,
output = response
)
print(addr)
return [addr, response]
def predict(input):
try:
timestamp = datetime.datetime.now().isoformat()
with koil.Koil() as Koil:
try:
return 'success', koil.unkoil(apredict, timestamp, input)
except openai.error.InvalidRequestError:
global MODEL
if MODEL == lm.lm.openai.DEFAULT_MODEL:
MODEL = 'gpt-4'
return 'success', koil.unkoil(apredict, timestamp, input)
raise
except Exception as e:
return f'{type(e)} {str(e)}', []
def reset_textbox():
return gr.update(value='')
title = """
🔥GPT4 +🚀Arweave
"""
description = """Provides GPT4 completions logged to arweave.
In this app, you can explore the outputs of a gpt-4 LLM.
"""
theme = gr.themes.Default(primary_hue="green")
with gr.Blocks(css = """#col_container { margin-left: auto; margin-right: auto;}
#chatbot {height: 520px; overflow: auto;}""",
theme=theme) as demo:
gr.HTML(title)
gr.HTML("""🔥This Huggingface Gradio Demo provides you access to GPT4 API. 🎉🥳🎉You don't need any OPENAI API key🙌""")
gr.HTML('''Duplicate the space to provide a different api key, or donate your key to others in the community tab.''')
with gr.Column(elem_id = "col_container"):
chatbot = gr.Chatbot(elem_id='chatbot') #c
inputs = gr.Textbox(label= "Type an input and press Enter") #t
state = gr.State([]) #s
with gr.Row():
with gr.Column(scale=7):
b1 = gr.Button().style(full_width=True)
with gr.Column(scale=3):
status = gr.Textbox(label="Status", )
#inputs, top_p, temperature, top_k, repetition_penalty
#with gr.Accordion("Parameters", open=False):
#top_p = gr.Slider( minimum=-0, maximum=1.0, value=1.0, step=0.05, interactive=True, label="Top-p (nucleus sampling)",)
#temperature = gr.Slider( minimum=-0, maximum=5.0, value=1.0, step=0.1, interactive=True, label="Temperature",)
#top_k = gr.Slider( minimum=1, maximum=50, value=4, step=1, interactive=True, label="Top-k",)
#repetition_penalty = gr.Slider( minimum=0.1, maximum=3.0, value=1.03, step=0.01, interactive=True, label="Repetition Penalty", )
#chat_counter = gr.Number(value=0, visible=False, precision=0)
#inputs.submit( predict, [inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code],) #openai_api_key
inputs.submit(predict, [inputs], [status, chatbot])
#b1.click( predict, [inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code],) #openai_api_key
b1.click(predict, [inputs], [status, chatbot])
b1.click(reset_textbox, [], [inputs])
inputs.submit(reset_textbox, [], [inputs])
#gr.Markdown(description)
demo.queue(max_size=20, concurrency_count=10).launch(debug=True)