File size: 3,576 Bytes
8f8cc82
 
 
 
 
 
 
 
 
489301b
8f8cc82
 
489301b
e692b0d
489301b
e692b0d
489301b
 
8f8cc82
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
489301b
8f8cc82
489301b
 
 
8f8cc82
 
 
 
 
489301b
 
 
 
 
 
 
 
 
8f8cc82
489301b
 
8f8cc82
 
489301b
e692b0d
8f8cc82
489301b
e692b0d
 
489301b
 
e692b0d
489301b
 
 
 
 
 
 
 
 
e692b0d
489301b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e692b0d
489301b
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
import streamlit as st
import google.generativeai as genai
from dotenv import load_dotenv
import os

# Load environment variables
load_dotenv()

# Configure Google Generative AI with API key
api_key = os.getenv("GOOGLE_API_KEY")
genai.configure(api_key=api_key)

# Initialize the session state to store chat history
if 'messages' not in st.session_state:
    st.session_state['messages'] = []

# Global variable to maintain chat session
chat = None

# Generation configuration and safety settings
generation_config = {
    "temperature": 0.9,
    "top_p": 0.5,
    "top_k": 5,
    "max_output_tokens": 1000,
}

safety_settings = [
    {
        "category": "HARM_CATEGORY_HARASSMENT",
        "threshold": "BLOCK_MEDIUM_AND_ABOVE"
    },
    {
        "category": "HARM_CATEGORY_HATE_SPEECH",
        "threshold": "BLOCK_MEDIUM_AND_ABOVE"
    },
    {
        "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
        "threshold": "BLOCK_MEDIUM_AND_ABOVE"
    },
    {
        "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
        "threshold": "BLOCK_MEDIUM_AND_ABOVE"
    },
]

# Function to handle text summary requests
def text_summary(text, isNew=False):
    global chat
    
    if isNew or chat is None:  # Start a new chat session
        model = genai.GenerativeModel(
            model_name="gemini-pro",
            generation_config=generation_config,
            safety_settings=safety_settings
        )
        chat = model.start_chat()
        chat.send_message("""
        Act as a financial advisor and generate financial summaries in a structured and tabular format. Follow these guidelines strictly:
        - Start each section with a clear title in <strong> tags.
        - For key metrics, use a table with two columns: one for the metric name and one for its value.
        - Use bullet points only for listing risks and growth prospects.
        - Ensure each section is clearly separated with line breaks.
        - Do not use bold or italic formatting (, *), except for the specified HTML tags.
        """)

    # Send message and return response
    response = chat.send_message(text)
    return response.text

# Layout for chatbot UI
st.title("Financial Summary Chatbot")

# Chat history container (This is where the conversation will appear)
chat_container = st.container()

# Input container (This will stay at the bottom)
input_container = st.container()

# Function to display the chat history
def display_chat():
    with chat_container:
        # Loop through session messages and display them
        for message in st.session_state['messages']:
            if message['role'] == 'user':
                st.write(f"**You:** {message['content']}")
            else:
                st.write(f"**Bot:** {message['content']}")

# Fixed input area at the bottom using the input container
with input_container:
    is_new_session = st.checkbox("Start new session", value=False)
    user_input = st.text_area("Type your message here:", height=100)
    send_button = st.button("Send")

    # If user presses 'Send'
    if send_button and user_input:
        # Store the user's input
        st.session_state['messages'].append({"role": "user", "content": user_input})
        
        # Call the text_summary function to get the bot's response
        bot_response = text_summary(user_input, is_new_session)
        
        # Store the bot's response
        st.session_state['messages'].append({"role": "bot", "content": bot_response})
        
        # Clear the input text area
        user_input = ""

# Display the chat history
display_chat()