File size: 2,416 Bytes
f23942a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
import openai
import gradio as gr
import subprocess

openai.api_key = "EMPTY" # Key is ignored and does not matter
openai.api_base = "http://zanino.millennium.berkeley.edu:8000/v1"

# Query Gorilla Server
def get_gorilla_response(prompt, model):
    try:
        completion = openai.ChatCompletion.create(
            model=model,
            messages=[{"role": "user", "content": prompt}]
        )
        print("Response: ", completion)
        return completion.choices[0].message.content
    except Exception as e:
        print("Sorry, something went wrong!")

def extract_code_from_output(output):
    code = output.split("code>>>:")[1]
    return code

def run_generated_code(file_path):
    # Command to run the generated code using Python interpreter
    command = ["python", file_path]
    try:
        # Execute the command as a subprocess and capture the output and error streams
        result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
        if result.returncode == 0:
            return "Generated code executed successfully.\n" + result.stdout
        else:
            return "Generated code execution failed with the following error:\n" + result.stderr
    except Exception as e:
        return "Error occurred while running the generated code: " + str(e)

def gorilla_magic(openai_key, input_prompt, model_option):
    openai.api_key = openai_key
    if len(input_prompt) > 0:
        result = get_gorilla_response(prompt=input_prompt, model=model_option)
        code_result = extract_code_from_output(result)
        file_path = f"generated_code_{model_option}.py"
        with open(file_path, 'w') as file:
            file.write(code_result)
        execution_result = run_generated_code(file_path)
        return result, code_result, execution_result

iface = gr.Interface(
    fn=gorilla_magic,
    inputs=[
        gr.inputs.Textbox(lines=1, placeholder="Enter your OpenAI key here:"),
        gr.inputs.Textbox(lines=5, placeholder="Enter your prompt below:"),
        gr.inputs.Dropdown(choices=['gorilla-7b-hf-v1', 'gorilla-mpt-7b-hf-v0'], label="Select a model option from the list:")
    ],
    outputs=[
        gr.outputs.Textbox(label="Response"),
        gr.outputs.Textbox(label="Generated Code"), # 使用 Textbox 來顯示代碼
        # gr.outputs.Textbox(label="Execution Result")
    ],
    live=True
)

iface.launch(share=False)