import streamlit as st import chromadb from chromadb.utils import embedding_functions import groq from typing import Dict import os class CourseAdvisor: def __init__(self, db_path: str = "./chroma_db"): """Initialize the course advisor with existing ChromaDB database.""" # Initialize persistent client with path self.chroma_client = chromadb.PersistentClient(path=db_path) # Initialize embedding function self.embedding_function = embedding_functions.SentenceTransformerEmbeddingFunction( model_name="jinaai/jina-embeddings-v2-base-en" ) # Get existing collection self.collection = self.chroma_client.get_collection( name="courses", embedding_function=self.embedding_function ) def query_courses(self, query_text: str, chat_history: str, api_key: str, n_results: int = 3) -> Dict: """Query the vector database and get course recommendations.""" # Initialize Groq client with provided API key groq_client = groq.Groq(api_key=api_key) try: # Get relevant documents from vector DB results = self.collection.query( query_texts=[query_text], n_results=min(n_results, self.collection.count()), include=['documents', 'metadatas'] ) # Prepare context from retrieved documents docs_context = "\n\n".join(results['documents'][0]) except Exception as e: st.error(f"Error querying database: {str(e)}") return { 'llm_response': "I encountered an error while searching the course database. Please try again.", 'retrieved_courses': [] } # Create prompt with chat history prompt = f"""Previous conversation: {chat_history} Current user query: {query_text} Relevant course information: {docs_context} Please provide course recommendations based on the entire conversation context. Format your response as: 1. Understanding of the user's needs (based on conversation history) 2. Overall recommendation with reasoning 3. Specific benefits of each recommended course 4. Learning path suggestion (if applicable) 5. Any prerequisites or important notes""" try: # Get response from Groq completion = groq_client.chat.completions.create( messages=[ {"role": "system", "content": "You are a helpful course advisor who provides detailed, relevant course recommendations based on the user's needs and conversation history. Keep responses clear and well-structured."}, {"role": "user", "content": prompt} ], model="mixtral-8x7b-32768", temperature=0.7, ) return { 'llm_response': completion.choices[0].message.content, 'retrieved_courses': results['metadatas'][0] } except Exception as e: st.error(f"Error with Groq API: {str(e)}") return { 'llm_response': "I encountered an error while generating recommendations. Please check your API key and try again.", 'retrieved_courses': [] } def initialize_session_state(): """Initialize session state variables.""" if 'messages' not in st.session_state: st.session_state.messages = [] if 'course_advisor' not in st.session_state: st.session_state.course_advisor = CourseAdvisor() if 'api_key' not in st.session_state: st.session_state.api_key = "" def get_chat_history() -> str: """Format chat history for LLM context.""" history = [] for message in st.session_state.messages[-5:]: # Only use last 5 messages for context role = message["role"] content = message["content"] history.append(f"{role}: {content}") return "\n".join(history) def display_course_card(course: Dict): """Display a single course recommendation in a card format.""" with st.container(): # Add a light background and padding with st.container(): st.markdown(""" """, unsafe_allow_html=True) with st.container(): st.markdown('