|
import asyncio |
|
import dotenv |
|
import os |
|
import pandas as pd |
|
import sys |
|
import typing as t |
|
|
|
from dataclasses import dataclass, field |
|
from datetime import datetime |
|
from langchain_openai import ChatOpenAI |
|
from pydantic import BaseModel, Field |
|
from ragas import SingleTurnSample |
|
from ragas.llms.base import LangchainLLMWrapper |
|
from ragas.metrics.base import MetricType |
|
from ragas.metrics.base import MetricWithLLM, SingleTurnMetric |
|
from ragas.prompt.pydantic_prompt import PydanticPrompt |
|
from typing import List, Tuple |
|
|
|
|
|
|
|
dotenv.load_dotenv() |
|
|
|
|
|
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") |
|
|
|
class ObjectionInput(BaseModel): |
|
user_input: str = Field(description="The objection text") |
|
response: str = Field(default="", description="The response to the objection") |
|
reference: str = Field(default="", description="Any reference related to the objection") |
|
|
|
|
|
class ObjectionOutput(BaseModel): |
|
satisfy: bool = Field(description="Boolean indicating if the objection was satisfied") |
|
|
|
def process_salesbud_file(file_path: str) -> List[Tuple[ObjectionInput, ObjectionOutput]]: |
|
""" |
|
Process the salesbud CSV file and return a list of examples for ObjectionlPrompt. |
|
|
|
Args: |
|
file_path (str): The path to the salesbud CSV file. |
|
|
|
Returns: |
|
List[Tuple[ObjectionInput, ObjectionOutput]]: A list of tuples containing ObjectionInput and ObjectionOutput. |
|
""" |
|
|
|
print(f"{datetime.now()}: Processing file: salesbud_examples.csv") |
|
|
|
|
|
_file_path = os.path.join(os.path.dirname(__file__), '../data/salesbud_examples.csv') |
|
df = pd.read_csv(_file_path) |
|
|
|
|
|
examples = [] |
|
|
|
|
|
for index, row in df.iterrows(): |
|
|
|
objection_input = ObjectionInput( |
|
user_input=row['objection'], |
|
response=row.get('response', ""), |
|
reference=row.get('reference', "") |
|
) |
|
|
|
|
|
objection_output = ObjectionOutput( |
|
satisfy= row['satisfy'] |
|
) |
|
|
|
examples.append((objection_input, objection_output)) |
|
|
|
return examples |
|
|
|
class ObjectionlPrompt(PydanticPrompt[ObjectionInput, ObjectionOutput]): |
|
instruction = "You are an expert technology sales rep that is tasked with judging if response satisfies potential customer's objection (user input). \ |
|
Given an user input and sales rep response, output True if the response satisfies the objection by the potential customer" |
|
|
|
input_model = ObjectionInput |
|
output_model = ObjectionOutput |
|
examples = process_salesbud_file('salesbud_examples.csv') |
|
|
|
@dataclass |
|
class SatisfyRate(MetricWithLLM, SingleTurnMetric): |
|
name: str = "satisfy_rate" |
|
_required_columns: t.Dict[MetricType, t.Set[str]] = field( |
|
default_factory=lambda: {MetricType.SINGLE_TURN: {"response", "reference"}} |
|
) |
|
objection_prompt: PydanticPrompt = ObjectionlPrompt() |
|
|
|
async def _ascore(self, row): |
|
pass |
|
|
|
async def _single_turn_ascore(self, sample, callbacks): |
|
prompt_input = ObjectionInput( |
|
user_input=sample.user_input, response=sample.response |
|
) |
|
prompt_response = await self.objection_prompt.generate( |
|
data=prompt_input, llm=self.llm |
|
) |
|
print("prompt_response") |
|
print(prompt_response) |
|
return int(prompt_response.satisfy) |
|
|
|
async def generate_objection_score(question_answer): |
|
print("generate_objection_scores()") |
|
|
|
openai_model = LangchainLLMWrapper(ChatOpenAI(model_name="gpt-4o", api_key=OPENAI_API_KEY)) |
|
scorer = SatisfyRate(llm=openai_model) |
|
|
|
sample = SingleTurnSample(user_input=question_answer['objection'], response=question_answer['answer']) |
|
|
|
|
|
satisfy_0_1 = await scorer.single_turn_ascore(sample) |
|
print(satisfy_0_1) |
|
|
|
print (question_answer['objection'], question_answer['answer'], satisfy_0_1) |
|
|
|
return satisfy_0_1 |
|
|
|
|
|
async def generate_response_to_objection(file_path, num): |
|
from langchain_openai import ChatOpenAI |
|
from ragas.llms.base import LangchainLLMWrapper |
|
import pandas as pd |
|
user_response= pd.read_csv(file_path) |
|
openai_model = LangchainLLMWrapper(ChatOpenAI(model_name="gpt-4o", api_key=OPENAI_API_KEY)) |
|
scorer = SatisfyRate(llm=openai_model) |
|
|
|
sample = SingleTurnSample(user_input=user_response['objection'][num], response=user_response['response'][num]) |
|
|
|
|
|
satisfy_0_1 = await scorer.single_turn_ascore(sample) |
|
|
|
print (user_response['objection'][num], user_response['response'][num], satisfy_0_1) |
|
|
|
return satisfy_0_1 |
|
|
|
async def main(file_path): |
|
|
|
|
|
response = await generate_response_to_objection(file_path, 0) |
|
|
|
if __name__ == "__main__": |
|
|
|
if len(sys.argv) != 2: |
|
print("Usage: python objection_eval.py <path_to_salesbud.csv>") |
|
sys.exit(1) |
|
|
|
|
|
file_path = sys.argv[1] |
|
|
|
|
|
asyncio.run(main(file_path)) |
|
|