Spaces:
Running
Running
File size: 3,135 Bytes
cd20a0b 992602c 3269765 25931fe da31fca 992602c f856dd5 992602c c9c62e4 992602c 7fe9dd8 992602c 7fe9dd8 992602c 73044d6 992602c 73044d6 3f3c7f6 73044d6 45a1572 7fe9dd8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 |
import gradio as gr
import os
import requests
import openai
def greet(modelpath1="",modelpath2="",modelpath3="",modelpath4="",modelpath5=""):
names = [modelpath1,modelpath2,modelpath3,modelpath4,modelpath5]
names = [name for name in names if name != ""]
if names == []:
return "Please enter at least one model name."
if len(names) < 2:
return "Please enter at least 2 model names."
urls = []
for name in names:
if name in ["meta-llama/Llama-2-7b-hf","meta-llama/Llama-2-13b-hf","meta-llama/Llama-2-70b-hf","meta-llama/Llama-2-7b-chat-hf","meta-llama/Llama-2-13b-chat-hf","meta-llama/Llama-2-70b-chat-hf","stabilityai/japanese-stablelm-instruct-alpha-7b","tiiuae/falcon-180B"]:
urls.append("https://huggingface.co/spaces/gojiteji/LLM-Comparer/raw/main/" + name + "/config.json")
else:
urls.append("https://huggingface.co/" + name + "/raw/main/config.json")
configs = []
index_to_ignore =[]
for i in range(len(urls)):
get_result = requests.get(urls[i])
if get_result.status_code == 200:
configs.append(get_result.json())
else:
configs.append("")
index_to_ignore.append(i)
if configs == []:
return "Could not find any models. Please check the model name."
gpt_input = ""
gpt_input += os.environ["prompts"]+os.environ["prompts2"] +"\n\n"
for i in range(len(names)):
if i not in index_to_ignore:
gpt_input += "modelname: " + names[i] + "\n" + " config file:" + str(configs[i]) + "\n\n"
openai.api_key = os.environ["APIKEY"]
respose = openai.ChatCompletion.create(
model="gpt-4o-mini",
messages=[
{
"role": "system",
"content": gpt_input
},
],
)
response_text = ""
for i in index_to_ignore:
response_text += "- **⚠️Config file inaccessible:** " + names[i] +"\n"
table_text = respose["choices"][0]["message"]["content"]
# get the first | to the last |
response_text += "\n"+table_text[table_text.find("|")+1:table_text.rfind("|")+1]
return response_text
text1 = gr.Textbox(placeholder="ower/modelname1", label="Input modelname like meta-llama/Llama-2-70b-hf", max_lines=1, interactive=True)
text2 = gr.Textbox(placeholder="ower/modelname2", label="model 2", max_lines=1, interactive=True)
text3 = gr.Textbox(placeholder="ower/modelname3", label="model 3", max_lines=1, interactive=True)
text4 = gr.Textbox(placeholder="ower/modelname4", label="model 4", max_lines=1, interactive=True)
if __name__ == '__main__':
interFace = gr.Interface(
fn=greet,
inputs=[text1, text2, text3, text4],
outputs=[gr.Markdown(value="")],
title="LLM Config Comparer⚖️",
description="Please copy and paste the owner name / model name from the Hugging Face model hub.\nThe same input can produce different results, so please resubmit if the results are not ideal.",
theme='finlaymacklon/smooth_slate',
allow_flagging=False
)
interFace.launch(share=False)
|