File size: 1,692 Bytes
1f95db9
 
 
6dba01e
1f95db9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3364520
1f95db9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
import os
import gradio as gr
import pinecone
from llama_index import GPTIndexMemory, GPTPineconeIndex
from langchain.agents import Tool
from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain import OpenAI
from langchain.agents import initialize_agent

OPENAI_API_KEY=os.environ["OPENAI_API_KEY"]
PINECONE_API_KEY=os.environ["PINECONE_API_KEY"]

pinecone.init(api_key=PINECONE_API_KEY, environment="us-east1-gcp")

pindex=pinecone.Index("sejarah")
indexed_pinecone=GPTPineconeIndex([], pinecone_index=pindex)

tools = [
    Tool(
        name = "GPT Index",
        func=lambda q: str(indexed_pinecone.query(q)),
        description="useful for when you want to answer questions about the author. The input to this tool should be a complete english sentence.",
        return_direct=True
    )
]
memory = GPTIndexMemory(index=indexed_pinecone, memory_key="chat_history", query_kwargs={"response_mode": "compact"})
llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo")
agent_chain = initialize_agent(tools, llm, agent="conversational-react-description", memory=memory, verbose=True)

def predict(input, history=[]):
    response = agent_chain.run(input)
    history = history + [(input, response)]
    response = history
    # response = [response] 
    # return response, response
    return response, response

with gr.Blocks() as demo:
    chatbot = gr.Chatbot()
    state = gr.State([])

    with gr.Row():
        txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter").style(container=False)

    txt.submit(predict, [txt, state], [chatbot, state])
    # txt.submit(agent_executor.run, [txt, state], [chatbot, state])

demo.launch()