Spaces:
Starting
Starting
import gradio as gr | |
import os | |
import requests | |
import openai | |
def greet(modelpath1="",modelpath2="",modelpath3="",modelpath4="",modelpath5=""): | |
names = [modelpath1,modelpath2,modelpath3,modelpath4,modelpath5] | |
names = [name for name in names if name != ""] | |
if names == []: | |
return "Please enter at least one model name." | |
if len(names) < 2: | |
return "Please enter at least 2 model names." | |
urls = [] | |
for name in names: | |
if name in ["meta-llama/Llama-2-7b-hf","meta-llama/Llama-2-13b-hf","meta-llama/Llama-2-70b-hf","meta-llama/Llama-2-7b-chat-hf","meta-llama/Llama-2-13b-chat-hf","meta-llama/Llama-2-70b-chat-hf","stabilityai/japanese-stablelm-instruct-alpha-7b","tiiuae/falcon-180B"]: | |
urls.append("https://huggingface.co/spaces/gojiteji/LLM-Comparer/raw/main/" + name + "/config.json") | |
else: | |
urls.append("https://huggingface.co/" + name + "/raw/main/config.json") | |
configs = [] | |
index_to_ignore =[] | |
for i in range(len(urls)): | |
get_result = requests.get(urls[i]) | |
if get_result.status_code == 200: | |
configs.append(get_result.json()) | |
else: | |
configs.append("") | |
index_to_ignore.append(i) | |
if configs == []: | |
return "Could not find any models. Please check the model name." | |
gpt_input = "" | |
gpt_input += os.environ["prompts"]+os.environ["prompts2"] +"\n\n" | |
for i in range(len(names)): | |
if i not in index_to_ignore: | |
gpt_input += "modelname: " + names[i] + "\n" + " config file:" + str(configs[i]) + "\n\n" | |
openai.api_key = os.environ["APIKEY"] | |
respose = openai.ChatCompletion.create( | |
model="gpt-4o-mini", | |
messages=[ | |
{ | |
"role": "system", | |
"content": gpt_input | |
}, | |
], | |
) | |
response_text = "" | |
for i in index_to_ignore: | |
response_text += "- **⚠️Config file inaccessible:** " + names[i] +"\n" | |
table_text = respose["choices"][0]["message"]["content"] | |
# get the first | to the last | | |
response_text += "\n"+table_text[table_text.find("|")+1:table_text.rfind("|")+1] | |
return response_text | |
text1 = gr.Textbox(placeholder="ower/modelname1", label="Input modelname like meta-llama/Llama-2-70b-hf", max_lines=1, interactive=True) | |
text2 = gr.Textbox(placeholder="ower/modelname2", label="model 2", max_lines=1, interactive=True) | |
text3 = gr.Textbox(placeholder="ower/modelname3", label="model 3", max_lines=1, interactive=True) | |
text4 = gr.Textbox(placeholder="ower/modelname4", label="model 4", max_lines=1, interactive=True) | |
if __name__ == '__main__': | |
interFace = gr.Interface( | |
fn=greet, | |
inputs=[text1, text2, text3, text4], | |
outputs=[gr.Markdown(value="")], | |
title="LLM Config Comparer⚖️", | |
description="Please copy and paste the owner name / model name from the Hugging Face model hub.\nThe same input can produce different results, so please resubmit if the results are not ideal.", | |
theme='finlaymacklon/smooth_slate', | |
allow_flagging=False | |
) | |
interFace.launch(share=False) | |