|
from langchain_core.messages import HumanMessage, SystemMessage, BaseMessage |
|
from langchain_community.chat_models import ChatPerplexity |
|
from langchain_openai import ChatOpenAI |
|
|
|
from .prompts import general_model_prompt, opportunity_search_prompt |
|
|
|
|
|
def invoke_general_model(user_question: str) -> BaseMessage: |
|
"""Function to invoke the general model, to answer general questions related to sales.""" |
|
model = ChatOpenAI(model="gpt-4o-mini") |
|
system_message = SystemMessage(content=general_model_prompt) |
|
human_message = HumanMessage(content=user_question) |
|
response = model.invoke([system_message, human_message]) |
|
return response |
|
|
|
|
|
def invoke_customer_search(customer_name: str) -> BaseMessage: |
|
"""Function to invoke a Perplexity search on the customer name.""" |
|
model = ChatPerplexity() |
|
message = HumanMessage(content=opportunity_search_prompt.format(customer_name)) |
|
response = model.invoke([message]) |
|
return response |
|
|
|
|
|
if __name__ == "__main__": |
|
from dotenv import load_dotenv |
|
|
|
load_dotenv() |
|
|
|
def test_invoke_general_model(): |
|
|
|
response = invoke_general_model("What is MEDDPICC?") |
|
assert "MEDDPIC" in response.content |
|
assert len(response.content) > 10 |
|
|
|
|
|
response = invoke_general_model("What is the weather like today?") |
|
assert "weather" not in response.content |
|
assert "I'm only here to assist you with sales processes and closing deals." in response.content |
|
|
|
def test_invoke_customer_search(): |
|
|
|
response = invoke_customer_search("Datadog") |
|
assert "Datadog" in response.content |
|
assert len(response.content) > 10 |
|
|
|
test_invoke_general_model() |
|
test_invoke_customer_search() |