File size: 8,629 Bytes
9dc639f
 
864c041
9dc639f
 
 
 
 
 
 
726773c
e8182c5
9dc639f
864c041
9dc639f
 
 
 
 
 
 
 
 
 
 
726773c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e8182c5
 
864c041
 
 
 
 
 
 
e8182c5
864c041
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c299b66
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
864c041
 
c299b66
 
 
 
 
e8182c5
c299b66
 
 
e8182c5
c299b66
 
 
 
 
 
 
 
 
864c041
 
9dc639f
864c041
9dc639f
 
 
864c041
9dc639f
 
 
 
 
 
 
 
 
 
 
 
864c041
c299b66
 
 
 
 
 
 
 
 
 
864c041
 
9dc639f
 
864c041
 
9dc639f
 
 
 
 
 
864c041
 
 
 
 
 
9dc639f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
864c041
9dc639f
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
import os
import getpass
import spacy  # Import spaCy for NER functionality
import pandas as pd
from typing import Optional
from langchain.docstore.document import Document
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
from langchain.chains import RetrievalQA
from smolagents import CodeAgent, DuckDuckGoSearchTool, ManagedAgent, LiteLLMModel
import subprocess  # Import subprocess to run shell commands
from langchain.llms.base import LLM  # Import LLM

# Import the chain builders from our separate files
from classification_chain import get_classification_chain
from refusal_chain import get_refusal_chain
from tailor_chain import get_tailor_chain
from cleaner_chain import get_cleaner_chain, CleanerChain

# 1) Environment: set up keys if missing
if not os.environ.get("GEMINI_API_KEY"):
    os.environ["GEMINI_API_KEY"] = getpass.getpass("Enter your Gemini API Key: ")
if not os.environ.get("GROQ_API_KEY"):
    os.environ["GROQ_API_KEY"] = getpass.getpass("Enter your GROQ API Key: ")

# 2) Load spaCy model for NER and download the spaCy model if not already installed
def install_spacy_model():
    try:
        # Check if the model is already installed
        spacy.load("en_core_web_sm")
        print("spaCy model 'en_core_web_sm' is already installed.")
    except OSError:
        # If model is not installed, download it using subprocess
        print("Downloading spaCy model 'en_core_web_sm'...")
        subprocess.run(["python", "-m", "spacy", "download", "en_core_web_sm"], check=True)
        print("spaCy model 'en_core_web_sm' downloaded successfully.")

# Call the function to install the spaCy model if needed
install_spacy_model()

# Load the spaCy model globally
nlp = spacy.load("en_core_web_sm")

# Function to extract the main topic using NER
def extract_main_topic(query: str) -> str:
    """
    Extracts the main topic from the user's query using spaCy's NER.
    Returns the first named entity or noun found in the query.
    """
    doc = nlp(query)  # Use the globally loaded spaCy model
    
    # Try to extract the main topic as a named entity (person, product, etc.)
    main_topic = None
    for ent in doc.ents:
        # Filter for specific entity types (you can adjust this based on your needs)
        if ent.label_ in ["ORG", "PRODUCT", "PERSON", "GPE", "TIME"]:  # Add more entity labels as needed
            main_topic = ent.text
            break
    
    # If no named entity found, fallback to extracting the first noun or proper noun
    if not main_topic:
        for token in doc:
            if token.pos_ in ["NOUN", "PROPN"]:  # Extract first noun or proper noun
                main_topic = token.text
                break
    
    # Return the extracted topic or a fallback value if no topic is found
    return main_topic if main_topic else "this topic"

# 3) build_or_load_vectorstore (no changes)
def build_or_load_vectorstore(csv_path: str, store_dir: str) -> FAISS:
    if os.path.exists(store_dir):
        print(f"DEBUG: Found existing FAISS store at '{store_dir}'. Loading...")
        embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/multi-qa-mpnet-base-dot-v1")
        vectorstore = FAISS.load_local(store_dir, embeddings)
        return vectorstore
    else:
        print(f"DEBUG: Building new store from CSV: {csv_path}")
        df = pd.read_csv(csv_path)
        df = df.loc[:, ~df.columns.str.contains('^Unnamed')]
        df.columns = df.columns.str.strip()
        if "Answer" in df.columns:
            df.rename(columns={"Answer": "Answers"}, inplace=True)
        if "Question" not in df.columns and "Question " in df.columns:
            df.rename(columns={"Question ": "Question"}, inplace=True)
        if "Question" not in df.columns or "Answers" not in df.columns:
            raise ValueError("CSV must have 'Question' and 'Answers' columns.")
        docs = []
        for _, row in df.iterrows():
            q = str(row["Question"])
            ans = str(row["Answers"])
            doc = Document(page_content=ans, metadata={"question": q})
            docs.append(doc)
        embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/multi-qa-mpnet-base-dot-v1")
        vectorstore = FAISS.from_documents(docs, embedding=embeddings)
        vectorstore.save_local(store_dir)
        return vectorstore

# 4) Build RAG chain for Gemini (no changes)
def build_rag_chain(llm_model: LiteLLMModel, vectorstore: FAISS) -> RetrievalQA:
    class GeminiLangChainLLM(LLM):
        def _call(self, prompt: str, stop: Optional[list] = None, **kwargs) -> str:
            messages = [{"role": "user", "content": prompt}]
            return llm_model(messages, stop_sequences=stop)

        @property
        def _llm_type(self) -> str:
            return "custom_gemini"
    
    retriever = vectorstore.as_retriever(search_type="similarity", search_kwargs={"k": 3})
    gemini_as_llm = GeminiLangChainLLM()
    rag_chain = RetrievalQA.from_chain_type(
        llm=gemini_as_llm,
        chain_type="stuff",
        retriever=retriever,
        return_source_documents=True
    )
    return rag_chain

# 5) Initialize all the separate chains
classification_chain = get_classification_chain()
refusal_chain = get_refusal_chain()  # Refusal chain will now use dynamic topic
tailor_chain = get_tailor_chain()
cleaner_chain = get_cleaner_chain()

# 6) Build our vectorstores + RAG chains
wellness_csv = "AIChatbot.csv"
brand_csv = "BrandAI.csv"
wellness_store_dir = "faiss_wellness_store"
brand_store_dir = "faiss_brand_store"

wellness_vectorstore = build_or_load_vectorstore(wellness_csv, wellness_store_dir)
brand_vectorstore = build_or_load_vectorstore(brand_csv, brand_store_dir)

gemini_llm = LiteLLMModel(model_id="gemini/gemini-pro", api_key=os.environ.get("GEMINI_API_KEY"))
wellness_rag_chain = build_rag_chain(gemini_llm, wellness_vectorstore)
brand_rag_chain = build_rag_chain(gemini_llm, brand_vectorstore)

# 7) Tools / Agents for web search (no changes)
search_tool = DuckDuckGoSearchTool()
web_agent = CodeAgent(tools=[search_tool], model=gemini_llm)
managed_web_agent = ManagedAgent(agent=web_agent, name="web_search", description="Runs web search for you.")
manager_agent = CodeAgent(tools=[], model=gemini_llm, managed_agents=[managed_web_agent])

def do_web_search(query: str) -> str:
    print("DEBUG: Attempting web search for more info...")
    search_query = f"Give me relevant info: {query}"
    response = manager_agent.run(search_query)
    return response

# 8) Orchestrator: run_with_chain
def run_with_chain(query: str) -> str:
    print("DEBUG: Starting run_with_chain...")
    
    # 1) Classify the query
    class_result = classification_chain.invoke({"query": query})
    classification = class_result.get("text", "").strip()
    print("DEBUG: Classification =>", classification)

    # If OutOfScope => refusal => tailor => return
    if classification == "OutOfScope":
        # Extract the main topic for the refusal message
        topic = extract_main_topic(query)
        print("DEBUG: Extracted Topic =>", topic)
        
        # Pass the extracted topic to the refusal chain
        refusal_text = refusal_chain.run({"topic": topic})
        final_refusal = tailor_chain.run({"response": refusal_text})
        return final_refusal.strip()

    # If Wellness => wellness RAG => if insufficient => web => unify => tailor
    if classification == "Wellness":
        rag_result = wellness_rag_chain({"query": query})
        csv_answer = rag_result["result"].strip()
        if not csv_answer:
            web_answer = do_web_search(query)
        else:
            lower_ans = csv_answer.lower()
            if any(phrase in lower_ans for phrase in ["i do not know", "not sure", "no context", "cannot answer"]):
                web_answer = do_web_search(query)
            else:
                web_answer = ""
        final_merged = cleaner_chain.merge(kb=csv_answer, web=web_answer)
        final_answer = tailor_chain.run({"response": final_merged})
        return final_answer.strip()

    # If Brand => brand RAG => tailor => return
    if classification == "Brand":
        rag_result = brand_rag_chain({"query": query})
        csv_answer = rag_result["result"].strip()
        final_merged = cleaner_chain.merge(kb=csv_answer, web="")
        final_answer = tailor_chain.run({"response": final_merged})
        return final_answer.strip()

    # fallback
    refusal_text = refusal_chain.run({"topic": "this topic"})
    final_refusal = tailor_chain.run({"response": refusal_text})
    return final_refusal.strip()