Spaces:
Runtime error
Runtime error
from langchain.docstore.document import Document | |
from langchain.vectorstores import FAISS | |
from langchain.embeddings.openai import OpenAIEmbeddings | |
from langchain.memory.simple import SimpleMemory | |
from langchain.chains import ConversationChain, LLMChain, SequentialChain | |
from langchain.memory import ConversationBufferMemory | |
from langchain.prompts import ChatPromptTemplate, PromptTemplate | |
from langchain.document_loaders import UnstructuredFileLoader | |
from langchain.chat_models import ChatOpenAI | |
from langchain.llms import OpenAI | |
from langchain.memory import ConversationSummaryMemory | |
from langchain.callbacks import PromptLayerCallbackHandler | |
from langchain.prompts.chat import ( | |
ChatPromptTemplate, | |
SystemMessagePromptTemplate, | |
AIMessagePromptTemplate, | |
HumanMessagePromptTemplate, | |
) | |
from langchain.schema import AIMessage, HumanMessage, SystemMessage | |
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler | |
from langchain.callbacks.base import BaseCallbackHandler | |
import gradio as gr | |
from threading import Thread | |
from queue import Queue, Empty | |
from threading import Thread | |
from collections.abc import Generator | |
from langchain.llms import OpenAI | |
from langchain.callbacks.base import BaseCallbackHandler | |
import itertools | |
import time | |
import os | |
import getpass | |
import json | |
import sys | |
from typing import Any, Dict, List, Union | |
import promptlayer | |
import openai | |
import gradio as gr | |
from pydantic import BaseModel, Field, validator | |
#Load the FAISS Model ( vector ) | |
openai.api_key = os.environ["OPENAI_API_KEY"] | |
db = FAISS.load_local("db", OpenAIEmbeddings()) | |
#API Keys | |
promptlayer.api_key = os.environ["PROMPTLAYER"] | |
from langchain.callbacks import PromptLayerCallbackHandler | |
from langchain.prompts.chat import ( | |
ChatPromptTemplate, | |
SystemMessagePromptTemplate, | |
AIMessagePromptTemplate, | |
HumanMessagePromptTemplate, | |
) | |
from langchain.memory import ConversationSummaryMemory | |
# Defined a QueueCallback, which takes as a Queue object during initialization. Each new token is pushed to the queue. | |
class QueueCallback(BaseCallbackHandler): | |
"""Callback handler for streaming LLM responses to a queue.""" | |
def __init__(self, q): | |
self.q = q | |
def on_llm_new_token(self, token: str, **kwargs: Any) -> None: | |
self.q.put(token) | |
def on_llm_end(self, *args, **kwargs: Any) -> None: | |
return self.q.empty() | |
class DDSAgent: | |
def __init__(self, name, db, prompt_template='', model_name='gpt-4', verbose=False, temp=0.2): | |
self.db = db | |
self.verbose = verbose | |
self.llm = ChatOpenAI( | |
model_name="gpt-4", | |
temperature=temp | |
) | |
#The zero shot prompt provided at creation | |
self.prompt_template = prompt_template | |
#The LLM used for conversation summarization | |
self.summary_llm = ChatOpenAI( | |
model_name=model_name, | |
max_tokens=25, | |
callbacks=[PromptLayerCallbackHandler(pl_tags=["froebel"])], | |
streaming=False, | |
) | |
#Reviews convesation history and summarizes it to keep the token count down. | |
self.memory = ConversationSummaryMemory(llm=self.summary_llm, | |
max_token_limit=200, | |
memory_key="memory", | |
input_key="input") | |
def chain(self, prompt: PromptTemplate, llm: ChatOpenAI) -> LLMChain: | |
return LLMChain( | |
llm=llm, | |
prompt=prompt, | |
verbose=self.verbose, | |
memory=self.memory | |
) | |
def lookup(self, input, num_docs=5): | |
docs = self.db.similarity_search(input, k=num_docs) | |
docs_to_string = "" | |
for doc in docs: | |
docs_to_string += str(doc.page_content) | |
return docs_to_string | |
def stream(self, input) -> Generator: | |
# Create a Queue | |
q = Queue() | |
job_done = object() | |
#RAG | |
docs = self.lookup(input,5) | |
llm = ChatOpenAI( | |
model_name='gpt-4', | |
callbacks=[QueueCallback(q), | |
PromptLayerCallbackHandler(pl_tags=["froebel"])], | |
streaming=True, | |
) | |
prompt = PromptTemplate( | |
input_variables=['input','docs','history'], | |
template=self.prompt_template | |
# partial_variables={"format_instructions": self.parser.get_format_instructions()} | |
) | |
# Create a funciton to call - this will run in a thread | |
def task(): | |
resp = self.chain(prompt,llm).run( | |
{'input':input, | |
'docs':docs, | |
'history':self.memory}) | |
q.put(job_done) | |
# Create a thread and start the function | |
t = Thread(target=task) | |
t.start() | |
content = "" | |
# Get each new token from the queue and yield for our generator | |
while True: | |
try: | |
next_token = q.get(True, timeout=1) | |
if next_token is job_done: | |
break | |
content += next_token | |
yield next_token, content | |
except Empty: | |
continue | |
agent_prompt = """ | |
Roleplay | |
You are a UBD ( Understanding by Design ) coach. | |
Educators come to you to develop UBD based learning experiences | |
and curriculum. | |
This is the conversation up until now: | |
{history} | |
The teacher says: | |
{input} | |
As a result, following standards were matched: | |
{docs} | |
Respond to the teacher message. | |
You have three objectives: | |
a) to help them through the design process | |
b) to help simplify the process for the educator | |
c) to help build confidence and understand in the ubd process | |
Take it step by step and keep. | |
Keep focused on the current task at hand. | |
Close with a single guiding step in the form of a question. | |
Be encouraging. | |
Do not start with "AI:" or any self identifying text. | |
""" | |
dds = DDSAgent('agent', db, prompt_template=agent_prompt) | |
def ask_agent(input, history): | |
for next_token, content in dds.stream(input): | |
yield(content) | |
gr.ChatInterface(ask_agent, | |
title="UBD Coach", | |
description=""" | |
Using the Understanding By Design framework? I can help. (/◕ヮ◕)/ | |
""", | |
theme="monochrome", | |
retry_btn=None, | |
undo_btn=None, | |
clear_btn=None | |
).queue().launch(debug=True) |