Spaces:
Sleeping
Sleeping
import os | |
from langchain.embeddings.openai import OpenAIEmbeddings | |
from langchain.vectorstores import Chroma | |
from langchain.text_splitter import TokenTextSplitter | |
from langchain.llms import OpenAI, LlamaCpp | |
from langchain.chat_models import ChatOpenAI | |
from langchain.chains import ChatVectorDBChain | |
from langchain.embeddings import LlamaCppEmbeddings | |
from langchain.output_parsers import StructuredOutputParser, ResponseSchema, PydanticOutputParser | |
from langchain.prompts import PromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate, FewShotPromptTemplate | |
from langchain.chains import LLMChain | |
from langchain.chains import SimpleSequentialChain | |
from pydantic import BaseModel, Field, validator | |
from typing import List, Dict | |
class AnswerTemplate(BaseModel): | |
type: List[str] = Field(description="What is the type of the trip: business, family, vactions. And with whom are you travelling? If can't anwser them leave it empty") | |
dates: Dict[str, str] = Field(description="What are the importante dates and times? If can't anwser them leave it empty") | |
preferences: List[str] = Field(description="What are the user's preferences? If can't anwser them leave it empty") | |
conditions: str = Field(description="Does the user has any special medical condition? If can't anwser them leave it empty.") | |
dist_range: str = Field(description="Max distance from a place? If can't anwser them leave it empty") | |
class CheckAnswerTemplate(BaseModel): | |
isComplete: bool = Field(description="Is the input complete?") | |
answer: str = Field(description="""If the answer to 'isComplete' is true leave this empty, else complete this by asking the user for the missing information. Just one question.""") | |
# os.environ["OPENAI_API_KEY"] = "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" | |
os.environ["OPENAI_API_KEY"] = "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" # Mindera's Key | |
embeddings = OpenAIEmbeddings() | |
# embeddings = LlamaCppEmbeddings() | |
persist_directory="../../../chroma/" | |
# model_name = "gpt-4" | |
model_name = "gpt-3.5-turbo" | |
chat_history = "" | |
model = OpenAI(model_name=model_name, temperature=0) | |
output_parser_gather = PydanticOutputParser(pydantic_object=AnswerTemplate) | |
format_instructions_gather = output_parser_gather.get_format_instructions() | |
output_parser_check = PydanticOutputParser(pydantic_object=CheckAnswerTemplate) | |
format_instructions_check = output_parser_check.get_format_instructions() | |
user_input = input("Helper: Hello, can you tell me your trip details and constraints so I can give you great recomendations?\nUser: ") | |
examples = [ | |
{"input": "i am travelling from 12 of july to 15 of july", "response": "start date: 12th july, end date: 15th july"}, | |
{"input": "I like museums and cafes", "response": "preferences: museums and cafes"}, | |
{"input": "Maximum 5km from the city's stadium", "response": "dist_range: 5km from the city's stadium"}, | |
{"input": "It's a business trip and i am travelling alone", "response": "type: [business, alone]"} | |
] | |
example_formatter_template = """User: {input} | |
Response: {response} | |
""" | |
example_prompt = PromptTemplate( | |
input_variables=["input", "response"], | |
template=example_formatter_template, | |
) | |
few_shot_prompt = FewShotPromptTemplate( | |
# These are the examples we want to insert into the prompt. | |
examples=examples, | |
# This is how we want to format the examples when we insert them into the prompt. | |
example_prompt=example_prompt, | |
# The prefix is some text that goes before the examples in the prompt. | |
# Usually, this consists of intructions. | |
prefix="""### Instruction | |
You are Trainline Mate an helpful assistant that plans tours for people at trainline.com. | |
As a smart itinerary planner with extensive knowledge of places around the | |
world, your task is to determine the user's travel destinations and any specific interests or preferences from | |
their message. Here is the history that you have so far: {history} \n###""", | |
# The suffix is some text that goes after the examples in the prompt. | |
# Usually, this is where the user input will go | |
suffix="""\n### User: {input} | |
\n### Response: {format_instructions}""", | |
# The input variables are the variables that the overall prompt expects. | |
input_variables=["input", "history", "format_instructions"], | |
# The example_separator is the string we will use to join the prefix, examples, and suffix togather with. | |
example_separator="\n", | |
) | |
prompt_gather = PromptTemplate( | |
template="""\ | |
### Instruction | |
You are Trainline Mate an helpful assistant that plans tours for people at trainline.com. | |
As a smart itinerary planner with extensive knowledge of places around the | |
world, your task is to determine the user's travel destinations and any specific interests or preferences from | |
their message. Here is the history that you have so far: {history} \n### User: {input} | |
\n### Response: {format_instructions} | |
""", | |
input_variables=["input", "history", "format_instructions"] | |
# partial_variables={"format_instructions": format_instructions_gather} | |
) | |
prompt_check = PromptTemplate( | |
template="""\ | |
### Instruction | |
Is this input complete? If not, what is missing? If it's the first time responding to the user then thank the user for the details | |
provided and then ask for the missing information. Don't ask more then one question. | |
Ask just one of the following: | |
If 'type' is empty then ask the user what is the objective of the trip and with whom are you travelling; | |
If 'dates' is empty then ask the user what are the importante dates and times; | |
If 'preferences' is empty then ask the user what are the user's preferences; | |
If 'conditions' is empty then ask the user if they have any special medical condition; | |
If 'dist_range' is empty then ask the user what is the distance range you prefer for your accommodations and activities? \n### Input: {input} | |
\n### Response: {format_instructions} | |
""", | |
input_variables=["input", "format_instructions"] | |
# partial_variables={"format_instructions": format_instructions_check} | |
) | |
examples_gather = [ | |
{"input": "happy", "antonym": "sad"}, | |
{"word": "tall", "antonym": "short"}, | |
] | |
isComplete = False | |
while isComplete == False: | |
_input_gather = few_shot_prompt.format_prompt(history=chat_history, input=user_input, format_instructions=format_instructions_gather) | |
# chain_gather = LLMChain(llm=model, prompt=prompt_gather) | |
# chain_check = LLMChain(llm=model, prompt=prompt_check) | |
# overall_chain = SimpleSequentialChain(chains=[chain_gather, chain_check], verbose=True) | |
result_gather = model(_input_gather.to_string()) | |
parsed_result_gather = output_parser_gather.parse(result_gather) | |
print(parsed_result_gather) | |
_input_check = prompt_check.format_prompt(input=parsed_result_gather, format_instructions=format_instructions_check) | |
result_check = model(_input_check.to_string()) | |
parsed_result_check = output_parser_check.parse(result_check) | |
# print(parsed_result_check) | |
isComplete = parsed_result_check.isComplete | |
if isComplete == False: | |
chat_history += "User: " + user_input + "\nHelper: " + parsed_result_check.answer + "\n" | |
user_input = input("Helper: " + parsed_result_check.answer + "\nUser: ") | |
# print(overall_chain.run(input=user_input)) | |
print(parsed_result_gather) |