Errolmking commited on
Commit
0ebeec1
·
1 Parent(s): 9bdbeee

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +48 -80
app.py CHANGED
@@ -54,6 +54,9 @@ db = FAISS.load_local("db", OpenAIEmbeddings())
54
  #API Keys
55
  promptlayer.api_key = os.environ["PROMPTLAYER"]
56
 
 
 
 
57
  from langchain.callbacks import PromptLayerCallbackHandler
58
  from langchain.prompts.chat import (
59
  ChatPromptTemplate,
@@ -76,76 +79,62 @@ class QueueCallback(BaseCallbackHandler):
76
  def on_llm_end(self, *args, **kwargs: Any) -> None:
77
  return self.q.empty()
78
 
79
- class DDSAgent:
 
 
 
 
80
 
81
- def __init__(self, name, db, prompt_template='', model_name='gpt-4', verbose=False, temp=0.2):
82
- self.db = db
 
 
 
 
 
 
 
 
 
 
83
  self.verbose = verbose
84
  self.llm = ChatOpenAI(
85
- model_name="gpt-4",
86
  temperature=temp
87
  )
88
 
89
  #The zero shot prompt provided at creation
90
  self.prompt_template = prompt_template
91
 
92
- #The LLM used for conversation summarization
93
- self.summary_llm = ChatOpenAI(
94
- model_name=model_name,
95
- max_tokens=25,
96
- callbacks=[PromptLayerCallbackHandler(pl_tags=["froebel"])],
97
- streaming=False,
98
- )
99
-
100
- #Reviews convesation history and summarizes it to keep the token count down.
101
- self.memory = ConversationSummaryMemory(llm=self.summary_llm,
102
- max_token_limit=200,
103
- memory_key="memory",
104
- input_key="input")
105
-
106
  def chain(self, prompt: PromptTemplate, llm: ChatOpenAI) -> LLMChain:
107
  return LLMChain(
108
  llm=llm,
109
  prompt=prompt,
110
  verbose=self.verbose,
111
- memory=self.memory
112
  )
113
 
114
- def lookup(self, input, num_docs=5):
115
- docs = self.db.similarity_search(input, k=num_docs)
116
- docs_to_string = ""
117
- for doc in docs:
118
- docs_to_string += str(doc.page_content)
119
- return docs_to_string
120
-
121
  def stream(self, input) -> Generator:
122
 
123
  # Create a Queue
124
  q = Queue()
125
  job_done = object()
126
 
127
- #RAG
128
- docs = self.lookup(input,5)
129
-
130
  llm = ChatOpenAI(
131
- model_name='gpt-4',
132
  callbacks=[QueueCallback(q),
133
- PromptLayerCallbackHandler(pl_tags=["froebel"])],
134
  streaming=True,
135
  )
136
 
137
  prompt = PromptTemplate(
138
- input_variables=['input','docs','history'],
139
  template=self.prompt_template
140
- # partial_variables={"format_instructions": self.parser.get_format_instructions()}
141
  )
142
 
143
  # Create a funciton to call - this will run in a thread
144
  def task():
145
  resp = self.chain(prompt,llm).run(
146
- {'input':input,
147
- 'docs':docs,
148
- 'history':self.memory})
149
  q.put(job_done)
150
 
151
  # Create a thread and start the function
@@ -165,53 +154,32 @@ class DDSAgent:
165
  except Empty:
166
  continue
167
 
168
-
169
-
170
-
171
  agent_prompt = """
172
- Roleplay
173
- You are a UBD ( Understanding by Design ) coach.
174
- Educators come to you to develop UBD based learning experiences
175
- and curriculum.
176
-
177
- This is the conversation up until now:
178
- {history}
179
-
180
- The teacher says:
181
  {input}
182
-
183
- As a result, following standards were matched:
184
- {docs}
185
-
186
- Respond to the teacher message.
187
-
188
- You have three objectives:
189
-
190
- a) to help them through the design process
191
- b) to help simplify the process for the educator
192
- c) to help build confidence and understand in the ubd process
193
-
194
- Take it step by step and keep.
195
- Keep focused on the current task at hand.
196
- Close with a single guiding step in the form of a question.
197
- Be encouraging.
198
-
199
- Do not start with "AI:" or any self identifying text.
200
-
201
  """
202
 
203
- def ask_agent(input, history):
204
- dds = DDSAgent('agent', db, prompt_template=agent_prompt)
205
- for next_token, content in dds.stream(input):
 
206
  yield(content)
207
 
208
- gr.ChatInterface(ask_agent,
209
- title="UBD Coach",
210
- description="""
211
- Using the Understanding By Design framework? I can help. (/◕ヮ◕)/
212
- """,
213
- theme="monochrome",
214
- retry_btn=None,
215
- undo_btn=None,
216
- clear_btn=None
217
- ).queue().launch(debug=True)
 
54
  #API Keys
55
  promptlayer.api_key = os.environ["PROMPTLAYER"]
56
 
57
+ MODEL = "gpt-3.5-turbo"
58
+ # MODEL = "gpt-4"
59
+
60
  from langchain.callbacks import PromptLayerCallbackHandler
61
  from langchain.prompts.chat import (
62
  ChatPromptTemplate,
 
79
  def on_llm_end(self, *args, **kwargs: Any) -> None:
80
  return self.q.empty()
81
 
82
+ MODEL = "gpt-3.5-turbo"
83
+
84
+ # Defined a QueueCallback, which takes as a Queue object during initialization. Each new token is pushed to the queue.
85
+ class QueueCallback(BaseCallbackHandler):
86
+ """Callback handler for streaming LLM responses to a queue."""
87
 
88
+ def __init__(self, q):
89
+ self.q = q
90
+
91
+ def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
92
+ self.q.put(token)
93
+
94
+ def on_llm_end(self, *args, **kwargs: Any) -> None:
95
+ return self.q.empty()
96
+
97
+ class UnitGenerator:
98
+
99
+ def __init__(self, prompt_template='', model_name=MODEL, verbose=False, temp=0.2):
100
  self.verbose = verbose
101
  self.llm = ChatOpenAI(
102
+ model_name=MODEL,
103
  temperature=temp
104
  )
105
 
106
  #The zero shot prompt provided at creation
107
  self.prompt_template = prompt_template
108
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
  def chain(self, prompt: PromptTemplate, llm: ChatOpenAI) -> LLMChain:
110
  return LLMChain(
111
  llm=llm,
112
  prompt=prompt,
113
  verbose=self.verbose,
 
114
  )
115
 
 
 
 
 
 
 
 
116
  def stream(self, input) -> Generator:
117
 
118
  # Create a Queue
119
  q = Queue()
120
  job_done = object()
121
 
 
 
 
122
  llm = ChatOpenAI(
123
+ model_name=MODEL,
124
  callbacks=[QueueCallback(q),
125
+ PromptLayerCallbackHandler(pl_tags=["unit-generator"])],
126
  streaming=True,
127
  )
128
 
129
  prompt = PromptTemplate(
130
+ input_variables=['input'],
131
  template=self.prompt_template
 
132
  )
133
 
134
  # Create a funciton to call - this will run in a thread
135
  def task():
136
  resp = self.chain(prompt,llm).run(
137
+ {'input':input})
 
 
138
  q.put(job_done)
139
 
140
  # Create a thread and start the function
 
154
  except Empty:
155
  continue
156
 
 
 
 
157
  agent_prompt = """
158
+ Take the following class overview (delimited by three dollar signs)
159
+ $$$
 
 
 
 
 
 
 
160
  {input}
161
+ $$$
162
+
163
+ Make a list of 3 big and enduring ideas that students should walk away with.
164
+ Make a list of 9 essential questions we want students to think about.These questions should be open-ended and provocative. Written in "kid friendly" language. Designed to focus instruction for uncovering the important ideas of the content.
165
+ Make a list of 7 ideas, concepts, generalizations and principles we want students to know and understand about the unit or topic we are teaching?
166
+ Make a list of critical skills describing what we want students to be able to do. Each item should begin with "Students will be able to..."
167
+ Make a list of 7 potential assessments we can use to give students opportunities to demonstrate their skills.
168
+ Make a list of 7 ways that I might adapt the experience for different learning styles. Make a list of 5 materials and technologies that I can enhance the learning experience.
169
+ Make a list of 4 learning activities that support and scaffold the learning experience.
 
 
 
 
 
 
 
 
 
 
170
  """
171
 
172
+ unit_generator = UnitGenerator(prompt_template=agent_prompt)
173
+
174
+ def generate_unit(input):
175
+ for next_token, content in unit_generator.stream(input):
176
  yield(content)
177
 
178
+ gr.Interface(generate_unit,
179
+ [gr.Textbox(
180
+ label="Enter your unit vision.",
181
+ info="Provide high level details for the learning experience."
182
+ )],
183
+ [gr.Textbox(
184
+ label="Unit",
185
+ )],allow_flagging="never").queue().launch(debug=True, share=True)