Spaces:
Runtime error
Runtime error
Errolmking
commited on
Commit
·
ce2be79
1
Parent(s):
c474fb7
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,218 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.docstore.document import Document
|
2 |
+
from langchain.vectorstores import FAISS
|
3 |
+
from langchain.embeddings.openai import OpenAIEmbeddings
|
4 |
+
from langchain.memory.simple import SimpleMemory
|
5 |
+
|
6 |
+
from langchain.chains import ConversationChain, LLMChain, SequentialChain
|
7 |
+
from langchain.memory import ConversationBufferMemory
|
8 |
+
|
9 |
+
from langchain.prompts import ChatPromptTemplate, PromptTemplate
|
10 |
+
from langchain.document_loaders import UnstructuredFileLoader
|
11 |
+
|
12 |
+
from langchain.chat_models import ChatOpenAI
|
13 |
+
from langchain.llms import OpenAI
|
14 |
+
from langchain.memory import ConversationSummaryMemory
|
15 |
+
|
16 |
+
from langchain.callbacks import PromptLayerCallbackHandler
|
17 |
+
from langchain.prompts.chat import (
|
18 |
+
ChatPromptTemplate,
|
19 |
+
SystemMessagePromptTemplate,
|
20 |
+
AIMessagePromptTemplate,
|
21 |
+
HumanMessagePromptTemplate,
|
22 |
+
)
|
23 |
+
|
24 |
+
from langchain.schema import AIMessage, HumanMessage, SystemMessage
|
25 |
+
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
26 |
+
from langchain.callbacks.base import BaseCallbackHandler
|
27 |
+
import gradio as gr
|
28 |
+
|
29 |
+
from threading import Thread
|
30 |
+
from queue import Queue, Empty
|
31 |
+
from threading import Thread
|
32 |
+
from collections.abc import Generator
|
33 |
+
from langchain.llms import OpenAI
|
34 |
+
from langchain.callbacks.base import BaseCallbackHandler
|
35 |
+
|
36 |
+
import itertools
|
37 |
+
import time
|
38 |
+
import os
|
39 |
+
import getpass
|
40 |
+
import json
|
41 |
+
import sys
|
42 |
+
from typing import Any, Dict, List, Union
|
43 |
+
|
44 |
+
import promptlayer
|
45 |
+
import openai
|
46 |
+
import gradio as gr
|
47 |
+
|
48 |
+
from pydantic import BaseModel, Field, validator
|
49 |
+
|
50 |
+
#Load the FAISS Model ( vector )
|
51 |
+
openai.api_key = os.environ["OPENAI_API_KEY"]
|
52 |
+
db = FAISS.load_local("db", OpenAIEmbeddings())
|
53 |
+
|
54 |
+
#API Keys
|
55 |
+
promptlayer.api_key = os.environ["PROMPTLAYER"]
|
56 |
+
|
57 |
+
from langchain.callbacks import PromptLayerCallbackHandler
|
58 |
+
from langchain.prompts.chat import (
|
59 |
+
ChatPromptTemplate,
|
60 |
+
SystemMessagePromptTemplate,
|
61 |
+
AIMessagePromptTemplate,
|
62 |
+
HumanMessagePromptTemplate,
|
63 |
+
)
|
64 |
+
from langchain.memory import ConversationSummaryMemory
|
65 |
+
|
66 |
+
# Defined a QueueCallback, which takes as a Queue object during initialization. Each new token is pushed to the queue.
|
67 |
+
class QueueCallback(BaseCallbackHandler):
|
68 |
+
"""Callback handler for streaming LLM responses to a queue."""
|
69 |
+
|
70 |
+
def __init__(self, q):
|
71 |
+
self.q = q
|
72 |
+
|
73 |
+
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
|
74 |
+
self.q.put(token)
|
75 |
+
|
76 |
+
def on_llm_end(self, *args, **kwargs: Any) -> None:
|
77 |
+
return self.q.empty()
|
78 |
+
|
79 |
+
class DDSAgent:
|
80 |
+
|
81 |
+
def __init__(self, name, db, prompt_template='', model_name='gpt-4', verbose=False, temp=0.2):
|
82 |
+
self.db = db
|
83 |
+
self.verbose = verbose
|
84 |
+
self.llm = ChatOpenAI(
|
85 |
+
model_name="gpt-4",
|
86 |
+
temperature=temp
|
87 |
+
)
|
88 |
+
|
89 |
+
#The zero shot prompt provided at creation
|
90 |
+
self.prompt_template = prompt_template
|
91 |
+
|
92 |
+
#The LLM used for conversation summarization
|
93 |
+
self.summary_llm = ChatOpenAI(
|
94 |
+
model_name=model_name,
|
95 |
+
max_tokens=25,
|
96 |
+
callbacks=[PromptLayerCallbackHandler(pl_tags=["froebel"])],
|
97 |
+
streaming=False,
|
98 |
+
)
|
99 |
+
|
100 |
+
#Reviews convesation history and summarizes it to keep the token count down.
|
101 |
+
self.memory = ConversationSummaryMemory(llm=self.summary_llm,
|
102 |
+
max_token_limit=200,
|
103 |
+
memory_key="memory",
|
104 |
+
input_key="input")
|
105 |
+
|
106 |
+
def chain(self, prompt: PromptTemplate, llm: ChatOpenAI) -> LLMChain:
|
107 |
+
return LLMChain(
|
108 |
+
llm=llm,
|
109 |
+
prompt=prompt,
|
110 |
+
verbose=self.verbose,
|
111 |
+
memory=self.memory
|
112 |
+
)
|
113 |
+
|
114 |
+
def lookup(self, input, num_docs=5):
|
115 |
+
docs = self.db.similarity_search(input, k=num_docs)
|
116 |
+
docs_to_string = ""
|
117 |
+
for doc in docs:
|
118 |
+
docs_to_string += str(doc.page_content)
|
119 |
+
return docs_to_string
|
120 |
+
|
121 |
+
def stream(self, input) -> Generator:
|
122 |
+
|
123 |
+
# Create a Queue
|
124 |
+
q = Queue()
|
125 |
+
job_done = object()
|
126 |
+
|
127 |
+
#RAG
|
128 |
+
docs = self.lookup(input,5)
|
129 |
+
|
130 |
+
llm = ChatOpenAI(
|
131 |
+
model_name='gpt-4',
|
132 |
+
callbacks=[QueueCallback(q),
|
133 |
+
PromptLayerCallbackHandler(pl_tags=["froebel"])],
|
134 |
+
streaming=True,
|
135 |
+
)
|
136 |
+
|
137 |
+
prompt = PromptTemplate(
|
138 |
+
input_variables=['input','docs','history'],
|
139 |
+
template=self.prompt_template
|
140 |
+
# partial_variables={"format_instructions": self.parser.get_format_instructions()}
|
141 |
+
)
|
142 |
+
|
143 |
+
# Create a funciton to call - this will run in a thread
|
144 |
+
def task():
|
145 |
+
resp = self.chain(prompt,llm).run(
|
146 |
+
{'input':input,
|
147 |
+
'docs':docs,
|
148 |
+
'history':self.memory})
|
149 |
+
q.put(job_done)
|
150 |
+
|
151 |
+
# Create a thread and start the function
|
152 |
+
t = Thread(target=task)
|
153 |
+
t.start()
|
154 |
+
|
155 |
+
content = ""
|
156 |
+
|
157 |
+
# Get each new token from the queue and yield for our generator
|
158 |
+
while True:
|
159 |
+
try:
|
160 |
+
next_token = q.get(True, timeout=1)
|
161 |
+
if next_token is job_done:
|
162 |
+
break
|
163 |
+
content += next_token
|
164 |
+
yield next_token, content
|
165 |
+
except Empty:
|
166 |
+
continue
|
167 |
+
|
168 |
+
|
169 |
+
|
170 |
+
|
171 |
+
agent_prompt = """
|
172 |
+
Roleplay
|
173 |
+
You are a UBD ( Understanding by Design ) coach.
|
174 |
+
Educators come to you to develop UBD based learning experiences
|
175 |
+
and curriculum.
|
176 |
+
|
177 |
+
This is the conversation up until now:
|
178 |
+
{history}
|
179 |
+
|
180 |
+
The teacher says:
|
181 |
+
{input}
|
182 |
+
|
183 |
+
As a result, following standards were matched:
|
184 |
+
{docs}
|
185 |
+
|
186 |
+
Respond to the teacher message.
|
187 |
+
|
188 |
+
You have three objectives:
|
189 |
+
|
190 |
+
a) to help them through the design process
|
191 |
+
b) to help simplify the process for the educator
|
192 |
+
c) to help build confidence and understand in the ubd process
|
193 |
+
|
194 |
+
Take it step by step and keep.
|
195 |
+
Keep focused on the current task at hand.
|
196 |
+
Close with a single guiding step in the form of a question.
|
197 |
+
Be encouraging.
|
198 |
+
|
199 |
+
Do not start with "AI:" or any self identifying text.
|
200 |
+
|
201 |
+
"""
|
202 |
+
|
203 |
+
dds = DDSAgent('agent', db, prompt_template=agent_prompt)
|
204 |
+
|
205 |
+
def ask_agent(input, history):
|
206 |
+
for next_token, content in dds.stream(input):
|
207 |
+
yield(content)
|
208 |
+
|
209 |
+
gr.ChatInterface(ask_agent,
|
210 |
+
title="UBD Coach",
|
211 |
+
description="""
|
212 |
+
Using the Understanding By Design framework? I can help. (/◕ヮ◕)/
|
213 |
+
""",
|
214 |
+
theme="monochrome",
|
215 |
+
retry_btn=None,
|
216 |
+
undo_btn=None,
|
217 |
+
clear_btn=None
|
218 |
+
).queue().launch(debug=True)
|