app updated
Browse files- app.py +15 -6
- utils/helper.py +45 -7
app.py
CHANGED
@@ -12,10 +12,13 @@
|
|
12 |
# See the License for the specific language governing permissions and
|
13 |
# limitations under the License.
|
14 |
|
15 |
-
import streamlit as st
|
16 |
from datetime import datetime
|
|
|
|
|
|
|
17 |
from utils.helper import *
|
18 |
|
|
|
19 |
def run():
|
20 |
st.set_page_config(page_title="Momentum-Strategy", page_icon="💹", layout="wide")
|
21 |
|
@@ -123,16 +126,22 @@ def run():
|
|
123 |
)
|
124 |
|
125 |
with st.expander("Please expand/collapse to chat with an AI advisor:"):
|
126 |
-
user_question = st.text_input(
|
127 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
128 |
st.markdown(ai_answer)
|
129 |
|
130 |
-
|
131 |
-
# Credit
|
132 |
def current_year():
|
133 |
now = datetime.now()
|
134 |
return now.year
|
135 |
-
|
136 |
# Example usage:
|
137 |
current_year = current_year() # This will print the current year
|
138 |
st.markdown(
|
|
|
12 |
# See the License for the specific language governing permissions and
|
13 |
# limitations under the License.
|
14 |
|
|
|
15 |
from datetime import datetime
|
16 |
+
|
17 |
+
import streamlit as st
|
18 |
+
|
19 |
from utils.helper import *
|
20 |
|
21 |
+
|
22 |
def run():
|
23 |
st.set_page_config(page_title="Momentum-Strategy", page_icon="💹", layout="wide")
|
24 |
|
|
|
126 |
)
|
127 |
|
128 |
with st.expander("Please expand/collapse to chat with an AI advisor:"):
|
129 |
+
user_question = st.text_input(
|
130 |
+
"Enter a question:", "What does the company NVIDIA do?"
|
131 |
+
)
|
132 |
+
ai_answer = call_gpt(
|
133 |
+
prompt=user_question,
|
134 |
+
content="""
|
135 |
+
You are a Financial Advisor. You know all about FinTech, Corporate Finance, Modern Portfolio Theory, and knowledge like that.
|
136 |
+
""",
|
137 |
+
)
|
138 |
st.markdown(ai_answer)
|
139 |
|
140 |
+
# Credit
|
|
|
141 |
def current_year():
|
142 |
now = datetime.now()
|
143 |
return now.year
|
144 |
+
|
145 |
# Example usage:
|
146 |
current_year = current_year() # This will print the current year
|
147 |
st.markdown(
|
utils/helper.py
CHANGED
@@ -9,6 +9,7 @@ import yfinance as yf
|
|
9 |
from langchain import LLMChain, PromptTemplate
|
10 |
from langchain.agents import initialize_agent, load_tools
|
11 |
from langchain.llms import OpenAI
|
|
|
12 |
|
13 |
|
14 |
def download_stock_data(
|
@@ -293,9 +294,7 @@ def display_simulated_ef_with_random(
|
|
293 |
OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
|
294 |
|
295 |
|
296 |
-
def run_langchain_agent_(
|
297 |
-
question: str = "What is your question?"
|
298 |
-
) -> str:
|
299 |
"""
|
300 |
Executes a language chain agent to answer questions by using a series of tools.
|
301 |
|
@@ -320,9 +319,7 @@ def run_langchain_agent_(
|
|
320 |
You are a financial advisor and user has a question above regarding related tickers provided.
|
321 |
Let's think step by step.
|
322 |
Answer: """
|
323 |
-
prompt = PromptTemplate(
|
324 |
-
template=template, input_variables=["question"]
|
325 |
-
)
|
326 |
|
327 |
# Building a chain of language model actions based on the prompt template
|
328 |
llm_chain = LLMChain(prompt=prompt, llm=llm)
|
@@ -332,7 +329,10 @@ def run_langchain_agent_(
|
|
332 |
|
333 |
# Initializing the agent with the loaded tools, the language model, default name, and verbosity
|
334 |
agent = initialize_agent(
|
335 |
-
tools,
|
|
|
|
|
|
|
336 |
max_iterations=5,
|
337 |
)
|
338 |
|
@@ -341,3 +341,41 @@ def run_langchain_agent_(
|
|
341 |
|
342 |
# Return
|
343 |
return output_
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
from langchain import LLMChain, PromptTemplate
|
10 |
from langchain.agents import initialize_agent, load_tools
|
11 |
from langchain.llms import OpenAI
|
12 |
+
from openai import OpenAI
|
13 |
|
14 |
|
15 |
def download_stock_data(
|
|
|
294 |
OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
|
295 |
|
296 |
|
297 |
+
def run_langchain_agent_(question: str = "What is your question?") -> str:
|
|
|
|
|
298 |
"""
|
299 |
Executes a language chain agent to answer questions by using a series of tools.
|
300 |
|
|
|
319 |
You are a financial advisor and user has a question above regarding related tickers provided.
|
320 |
Let's think step by step.
|
321 |
Answer: """
|
322 |
+
prompt = PromptTemplate(template=template, input_variables=["question"])
|
|
|
|
|
323 |
|
324 |
# Building a chain of language model actions based on the prompt template
|
325 |
llm_chain = LLMChain(prompt=prompt, llm=llm)
|
|
|
329 |
|
330 |
# Initializing the agent with the loaded tools, the language model, default name, and verbosity
|
331 |
agent = initialize_agent(
|
332 |
+
tools,
|
333 |
+
llm,
|
334 |
+
agent="zero-shot-react-description",
|
335 |
+
verbose=True,
|
336 |
max_iterations=5,
|
337 |
)
|
338 |
|
|
|
341 |
|
342 |
# Return
|
343 |
return output_
|
344 |
+
|
345 |
+
|
346 |
+
openai_client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
|
347 |
+
|
348 |
+
|
349 |
+
def call_gpt(prompt: str, content: str) -> str:
|
350 |
+
"""
|
351 |
+
Sends a structured conversation context including a system prompt, user prompt,
|
352 |
+
and additional background content to the GPT-3.5-turbo model for a response.
|
353 |
+
This function is responsible for generating an AI-powered response by interacting
|
354 |
+
with the OpenAI API. It puts together a preset system message, a formatted user query,
|
355 |
+
and additional background information before requesting the completion from the model.
|
356 |
+
Args:
|
357 |
+
prompt (str): The main question or topic that the user wants to address.
|
358 |
+
content (str): Additional background information or details relevant to the prompt.
|
359 |
+
Returns:
|
360 |
+
str: The generated response from the GPT model based on the given prompts and content.
|
361 |
+
Note: 'openai_client' is assumed to be an already created and authenticated instance of the OpenAI
|
362 |
+
openai_client, which should be set up prior to calling this function.
|
363 |
+
"""
|
364 |
+
|
365 |
+
# Generates a response from the model based on the interactive messages provided
|
366 |
+
response = openai_client.chat.completions.create(
|
367 |
+
model="gpt-3.5-turbo", # The AI model being queried for a response
|
368 |
+
messages=[
|
369 |
+
# System message defining the assistant's role
|
370 |
+
{"role": "system", "content": "You are a helpful assistant."},
|
371 |
+
# User message containing the prompt
|
372 |
+
{"role": "user", "content": f"I want to ask you a question: {prompt}"},
|
373 |
+
# Assistant message asking for background content
|
374 |
+
{"role": "assistant", "content": "What is the background content?"},
|
375 |
+
# User providing the background content
|
376 |
+
{"role": "user", "content": content},
|
377 |
+
],
|
378 |
+
)
|
379 |
+
|
380 |
+
# Extracts and returns the response content from the model's completion
|
381 |
+
return response.choices[0].message.content
|