J-Assistant_ISW / app.py
TahaRasouli's picture
Create app.py
c46ce88 verified
import streamlit as st
import json
import os
from groq import Groq
# Function to recursively extract all unique keys
def extract_keys(data, parent_key='', keys_set=None):
if keys_set is None:
keys_set = set()
if isinstance(data, dict):
for key, value in data.items():
full_key = f"{parent_key}.{key}" if parent_key else key
keys_set.add(full_key)
extract_keys(value, full_key, keys_set)
elif isinstance(data, list):
for item in data:
extract_keys(item, parent_key, keys_set)
return keys_set
# Function to recursively extract all values for a specific full key path
def extract_values(data, full_key_path, parent_key=''):
values = []
current_key = full_key_path.split('.')[0]
remaining_path = '.'.join(full_key_path.split('.')[1:])
if isinstance(data, dict):
for key, value in data.items():
if key == current_key:
if remaining_path:
values.extend(extract_values(value, remaining_path))
else:
values.append(value)
elif isinstance(value, (dict, list)):
values.extend(extract_values(value, full_key_path, parent_key=key))
elif isinstance(data, list):
for item in data:
values.extend(extract_values(item, full_key_path))
return values
# Streamlit App
def main():
st.title("JSON Enquiry")
# Sidebar for file upload and key selection
st.sidebar.title("Options")
uploaded_file = st.sidebar.file_uploader("Upload a JSON file", type="json")
if uploaded_file is not None:
data = json.load(uploaded_file)
# Extract unique keys
unique_keys = extract_keys(data)
# Display the unique keys
st.sidebar.subheader("Unique Keys")
selected_keys = st.sidebar.multiselect("Select keys to extract values for", sorted(unique_keys))
if selected_keys:
st.subheader("Extracted Values")
selected_json_content = {}
for key in selected_keys:
values = extract_values(data, key)
selected_json_content[key] = values
st.write(f"**Values for '{key}':**")
st.write(values)
# Convert selected content to a JSON string
selected_json_string = json.dumps(selected_json_content, indent=2)
# Create a prompt using the prefabricated template
prefabricated_prompt = f"Convert the given json object into unstructured readable paragraph. Output just the final paragraph. json: {selected_json_string}"
# Initialize the LLM client once
client = Groq(api_key=os.environ.get("GROQ_API_KEY"))
if 'messages' not in st.session_state:
st.session_state['messages'] = []
if st.button("Explain JSON!"):
# Add initial prompt to messages
st.session_state.messages.append({"role": "user", "content": prefabricated_prompt})
chat_completion = client.chat.completions.create(
messages=st.session_state.messages,
model="llama3-8b-8192",
)
response_content = chat_completion.choices[0].message.content
st.session_state.messages.append({"role": "assistant", "content": response_content})
# Display only the assistant's responses in the chat history
st.subheader("LLM Responses")
for msg in st.session_state.messages:
if msg['role'] == 'assistant':
st.write(f"**Assistant:** {msg['content']}")
# Input for follow-up messages
follow_up_message = st.text_area("Enter your message:", height=100)
if st.button("Send Follow-Up"):
if follow_up_message.strip() != "":
st.session_state.messages.append({"role": "user", "content": follow_up_message})
chat_completion = client.chat.completions.create(
messages=st.session_state.messages,
model="llama3-8b-8192",
)
response_content = chat_completion.choices[0].message.content
st.session_state.messages.append({"role": "assistant", "content": response_content})
st.write(f"**Assistant:** {response_content}")
else:
st.warning("Please enter a message to continue the conversation.")
else:
st.sidebar.warning("Select at least one key to extract values.")
else:
st.sidebar.info("Please upload a JSON file to get started.")
if __name__ == "__main__":
main()