File size: 7,594 Bytes
64aee40
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
import os

from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.text_splitter import TokenTextSplitter
from langchain.llms import OpenAI, LlamaCpp
from langchain.chat_models import ChatOpenAI
from langchain.chains import ChatVectorDBChain

from langchain.embeddings import LlamaCppEmbeddings

from langchain.output_parsers import StructuredOutputParser, ResponseSchema, PydanticOutputParser
from langchain.prompts import PromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate, FewShotPromptTemplate
from langchain.chains import LLMChain
from langchain.chains import SimpleSequentialChain

from pydantic import BaseModel, Field, validator
from typing import List, Dict

class AnswerTemplate(BaseModel):
    type: List[str] = Field(description="What is the type of the trip: business, family, vactions. And with whom are you travelling? If can't anwser them leave it empty")
    dates: Dict[str, str] = Field(description="What are the importante dates and times? If can't anwser them leave it empty")
    preferences: List[str] = Field(description="What are the user's preferences? If can't anwser them leave it empty")
    conditions: str = Field(description="Does the user has any special medical condition? If can't anwser them leave it empty.")
    dist_range: str = Field(description="Max distance from a place? If can't anwser them leave it empty")

class CheckAnswerTemplate(BaseModel):
    isComplete: bool = Field(description="Is the input complete?")
    answer: str = Field(description="""If the answer to 'isComplete' is true leave this empty, else complete this by asking the user for the missing information. Just one question.""")
    

# os.environ["OPENAI_API_KEY"] = "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"
os.environ["OPENAI_API_KEY"] = "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" # Mindera's Key
embeddings = OpenAIEmbeddings()
# embeddings = LlamaCppEmbeddings()

persist_directory="../../../chroma/"

# model_name = "gpt-4"
model_name = "gpt-3.5-turbo"

chat_history = ""

model = OpenAI(model_name=model_name, temperature=0)

output_parser_gather = PydanticOutputParser(pydantic_object=AnswerTemplate)
format_instructions_gather = output_parser_gather.get_format_instructions()

output_parser_check = PydanticOutputParser(pydantic_object=CheckAnswerTemplate)
format_instructions_check = output_parser_check.get_format_instructions()

user_input = input("Helper: Hello, can you tell me your trip details and constraints so I can give you great recomendations?\nUser: ")

examples = [
    {"input": "i am travelling from 12 of july to 15 of july", "response": "start date: 12th july, end date: 15th july"},
    {"input": "I like museums and cafes", "response": "preferences: museums and cafes"},
    {"input": "Maximum 5km from the city's stadium", "response": "dist_range: 5km from the city's stadium"},
    {"input": "It's a business trip and i am travelling alone", "response": "type: [business, alone]"}
]

example_formatter_template = """User: {input}
Response: {response}
"""

example_prompt = PromptTemplate(
    input_variables=["input", "response"],
    template=example_formatter_template,
)

few_shot_prompt = FewShotPromptTemplate(
    # These are the examples we want to insert into the prompt.
    examples=examples,
    # This is how we want to format the examples when we insert them into the prompt.
    example_prompt=example_prompt,
    # The prefix is some text that goes before the examples in the prompt.
    # Usually, this consists of intructions.
    prefix="""### Instruction
            You are Trainline Mate an helpful assistant that plans tours for people at trainline.com.
            As a smart itinerary planner with extensive knowledge of places around the
            world, your task is to determine the user's travel destinations and any specific interests or preferences from
            their message. Here is the history that you have so far: {history} \n###""",
    # The suffix is some text that goes after the examples in the prompt.
    # Usually, this is where the user input will go
    suffix="""\n### User: {input}
            \n### Response: {format_instructions}""",
    # The input variables are the variables that the overall prompt expects.
    input_variables=["input", "history", "format_instructions"],
    # The example_separator is the string we will use to join the prefix, examples, and suffix togather with.
    example_separator="\n",
)

prompt_gather = PromptTemplate(
    template="""\
            ### Instruction
            You are Trainline Mate an helpful assistant that plans tours for people at trainline.com.
            As a smart itinerary planner with extensive knowledge of places around the
            world, your task is to determine the user's travel destinations and any specific interests or preferences from
            their message. Here is the history that you have so far: {history} \n### User: {input}
            \n### Response: {format_instructions}
            """,
    input_variables=["input", "history", "format_instructions"]
    # partial_variables={"format_instructions": format_instructions_gather}
)

prompt_check = PromptTemplate(
    template="""\
            ### Instruction
            Is this input complete? If not, what is missing? If it's the first time responding to the user then thank the user for the details 
            provided and then ask for the missing information. Don't ask more then one question.
            Ask just one of the following:
            If 'type' is empty then ask the user what is the objective of the trip and with whom are you travelling;
            If 'dates' is empty then ask the user what are the importante dates and times;
            If 'preferences' is empty then ask the user what are the user's preferences;
            If 'conditions' is empty then ask the user if they have any special medical condition;
            If 'dist_range' is empty then ask the user what is the distance range you prefer for your accommodations and activities? \n### Input: {input}
            \n### Response: {format_instructions}
            """,
    input_variables=["input", "format_instructions"]
    # partial_variables={"format_instructions": format_instructions_check}
)

examples_gather = [
    {"input": "happy", "antonym": "sad"},
    {"word": "tall", "antonym": "short"},
]

isComplete = False

while isComplete == False:

    _input_gather = few_shot_prompt.format_prompt(history=chat_history, input=user_input, format_instructions=format_instructions_gather)

    # chain_gather = LLMChain(llm=model, prompt=prompt_gather)
    # chain_check = LLMChain(llm=model, prompt=prompt_check)

    # overall_chain = SimpleSequentialChain(chains=[chain_gather, chain_check], verbose=True)

    result_gather = model(_input_gather.to_string())
    parsed_result_gather = output_parser_gather.parse(result_gather)
    print(parsed_result_gather)

    _input_check = prompt_check.format_prompt(input=parsed_result_gather, format_instructions=format_instructions_check)
    result_check = model(_input_check.to_string())
    parsed_result_check = output_parser_check.parse(result_check)
    # print(parsed_result_check)

    isComplete = parsed_result_check.isComplete

    if isComplete == False:
        chat_history += "User: " + user_input + "\nHelper: " + parsed_result_check.answer + "\n"
        user_input = input("Helper: " + parsed_result_check.answer + "\nUser: ")

    # print(overall_chain.run(input=user_input))

print(parsed_result_gather)