diogovelho commited on
Commit
64aee40
0 Parent(s):

Duplicate from MinderaLabs/TL_GPT4

Browse files
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: TL GPT4
3
+ emoji: 🔥
4
+ colorFrom: indigo
5
+ colorTo: gray
6
+ sdk: gradio
7
+ sdk_version: 3.35.2
8
+ app_file: app.py
9
+ pinned: false
10
+ duplicated_from: MinderaLabs/TL_GPT4
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
agents/check_agent.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import platform
3
+
4
+ import openai
5
+ import chromadb
6
+ import langchain
7
+
8
+ from langchain.embeddings.openai import OpenAIEmbeddings
9
+ from langchain.vectorstores import Chroma
10
+ from langchain.text_splitter import TokenTextSplitter
11
+ from langchain.llms import OpenAI
12
+ from langchain.chat_models import ChatOpenAI
13
+ from langchain.chains import ChatVectorDBChain
14
+ from langchain.document_loaders import GutenbergLoader
15
+
16
+ from langchain.embeddings import LlamaCppEmbeddings
17
+ from langchain.llms import LlamaCpp
18
+
19
+ from langchain.output_parsers import StructuredOutputParser, ResponseSchema
20
+ from langchain.prompts import PromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate
21
+ from langchain.llms import OpenAI
22
+ from langchain.chains import LLMChain
23
+ from langchain.chains import SimpleSequentialChain
24
+
25
+ from langchain.output_parsers import PydanticOutputParser
26
+ from pydantic import BaseModel, Field, validator
27
+ from typing import List, Dict
28
+
29
+ # class AnswerTemplate(BaseModel):
30
+ # isComplete: bool = Field(description="Is the input complete?")
31
+ # answer: str = Field(description="""If the answer to 'isComplete' is true leave this empty, else respond to user's last message in a cordial manner and then ask the user for the missing information. Just one question.""")
32
+
33
+ class AnswerTemplate(BaseModel):
34
+ isComplete: bool = Field(description="Is the input complete?")
35
+
36
+ class Check_Agent():
37
+ def __init__(self):
38
+
39
+ self.model_name = "gpt-4"
40
+ self.model = OpenAI(model_name=self.model_name, temperature=0)
41
+
42
+ self.output_parser = PydanticOutputParser(pydantic_object=AnswerTemplate)
43
+ self.format_instructions = self.output_parser.get_format_instructions()
44
+
45
+ # self.prompt = PromptTemplate(
46
+ # template="""\
47
+ # ### Instruction
48
+ # You are Trainline Mate an helpful assistant that plans tours for people at trainline.com.
49
+ # As a smart itinerary planner with extensive knowledge of places around the
50
+ # world, your task is to determine the user's travel destinations and any specific interests or preferences from
51
+ # their message.
52
+ # ### Your task
53
+ # Is this input complete? If not, what is missing?
54
+ # ### If something is missing then ask for the missing information.
55
+ # Don't ask more then one question.
56
+ # Ask just one of the following:
57
+ # If 'type' is empty then ask the user what type of the trip are you planning and with whom are you travelling?;
58
+ # If 'where' is empty then ask the user where is they going to travel to?;
59
+ # If 'start_date' is empty then ask the user what is the start date?;
60
+ # If 'end_date' is empty then ask the user what is the end date?;
61
+ # If 'time_constrains' is empty then ask the user if is there any time constrains that should be considered?;
62
+ # If 'preferences' is empty then ask the user if they have thought about any activities you want to do while you're there?;
63
+ # If 'conditions' is empty then ask the user if they have any special medical condition?;
64
+ # If 'dist_range' is empty then ask the user what is the distance range you prefer for your ativities? \n### Input: {input}
65
+ # \n### Response: {format_instructions}
66
+ # """,
67
+ # input_variables=["input", "format_instructions"]
68
+ # )
69
+
70
+ self.prompt = PromptTemplate(
71
+ template="""\
72
+ ### Instruction
73
+ You are Trainline Mate an helpful assistant that plans tours for people at trainline.com.
74
+ As a smart itinerary planner with extensive knowledge of places around the
75
+ world, your task is to determine the user's travel destinations and any specific interests or preferences from
76
+ their message.
77
+ ### Your task
78
+ This input is a resume of what the user wants to do. From this you have to be able to retrieve all the following information:
79
+ "Where is the trip to", "Start and end dates for the trip", "Is there any time constrain", "activity preferences",
80
+ "Is there any medical condition" and "Is there a maximum distance range in which the activities have to be".
81
+ Is this input complete? Does it have all the information mention before or is it missing something? If it's not complete, what is missing?
82
+ ### If something is missing then ask for the missing information.
83
+ The user don't like give much information at once. So try to minimize the quantity of information that you ask for in your response.
84
+ ### Input: {input}
85
+ ### Response: {format_instructions}
86
+ """,
87
+ input_variables=["input", "format_instructions"]
88
+ )
89
+
90
+ def format_prompt(self, input):
91
+ return self.prompt.format_prompt(input=input, format_instructions=self.format_instructions)
92
+ return self.prompt.format_prompt(input=input)
93
+
94
+ def get_parsed_result(self, input):
95
+ result= self.model(input.to_string())
96
+ parsed_result = self.output_parser.parse(result)
97
+ return parsed_result.isComplete
agents/gather_agent.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import platform
3
+
4
+ import openai
5
+ import chromadb
6
+ import langchain
7
+
8
+ from langchain.embeddings.openai import OpenAIEmbeddings
9
+ from langchain.vectorstores import Chroma
10
+ from langchain.text_splitter import TokenTextSplitter
11
+ from langchain.llms import OpenAI
12
+ from langchain.chat_models import ChatOpenAI
13
+ from langchain.chains import ChatVectorDBChain
14
+ from langchain.document_loaders import GutenbergLoader
15
+
16
+ from langchain.embeddings import LlamaCppEmbeddings
17
+ from langchain.llms import LlamaCpp
18
+
19
+ from langchain.output_parsers import StructuredOutputParser, ResponseSchema
20
+ from langchain.prompts import PromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate
21
+ from langchain.llms import OpenAI
22
+ from langchain.chains import LLMChain
23
+ from langchain.chains import SimpleSequentialChain
24
+
25
+ from langchain.output_parsers import PydanticOutputParser
26
+ from pydantic import BaseModel, Field, validator
27
+ from typing import List, Dict
28
+
29
+ # class AnswerTemplate(BaseModel):
30
+ # type: List[str] = Field(description="What is the type of the trip: business, family, vactions. And with whom are you travelling? If can't anwser then leave it empty")
31
+ # where: str = Field(description="Where is the user going? If can't anwser then leave it empty")
32
+ # start_date: str = Field(description="What is the start date? If can't anwser then leave it empty")
33
+ # end_date: str = Field(description="What is the end date? If can't anwser then leave it empty")
34
+ # time_constrains: str = Field(description="Is there any time constrains? If can't anwser then leave it empty")
35
+ # # dates: Dict[str, str] = Field(description="What are the importante dates and times? If can't anwser then leave it empty")
36
+ # preferences: List[str] = Field(description="What does the user want to visit? If can't anwser then leave it empty")
37
+ # conditions: str = Field(description="Does the user has any special medical condition? If can't anwser then leave it empty")
38
+ # dist_range: str = Field(description="Max distance from a place? If can't anwser then leave it empty")
39
+ # # missing: str = Field(description="Is any more information needed?")
40
+
41
+ class AnswerTemplate(BaseModel):
42
+ answer: str = Field(description="Response")
43
+
44
+
45
+ class Gather_Agent():
46
+ def __init__(self):
47
+
48
+ self.model_name = "gpt-4"
49
+ self.model = OpenAI(model_name=self.model_name, temperature=0)
50
+
51
+ self.output_parser = PydanticOutputParser(pydantic_object=AnswerTemplate)
52
+ self.format_instructions = self.output_parser.get_format_instructions()
53
+
54
+ # self.prompt = PromptTemplate(
55
+ # template="""\
56
+ # ### Instruction
57
+ # You are Trainline Mate an helpful assistant that plans tours for people at trainline.com.
58
+ # As a smart itinerary planner with extensive knowledge of places around the
59
+ # world, your task is to determine the user's travel destinations and any specific interests or preferences from
60
+ # their message. Here is the history that you have so far: {history} \n### User: \n{input}
61
+ # \n### Response: {format_instructions}
62
+ # """,
63
+ # input_variables=["input", "history", "format_instructions"]
64
+ # )
65
+
66
+ self.prompt = PromptTemplate(
67
+ template="""\
68
+ ### Instruction
69
+ You are Trainline Mate an helpful assistant that plans tours for people at trainline.com.
70
+ As a smart itinerary planner with extensive knowledge of places around the
71
+ world, your task is to determine the user's travel destinations and any specific interests or preferences from
72
+ their message.
73
+ ### Task
74
+ From the following history and user input you should be able to retrieve and resume all the following information:
75
+ Where is the trip to, start and end dates for the trip, is there any time constrain, activity preferences,
76
+ is there any medical condition and is there a maximum distance range in which the activities have to be.
77
+ ### History
78
+ Here is the history that you have so far: {history}
79
+ ### User: \n{input}
80
+ \n### Response:
81
+ """,
82
+ input_variables=["input", "history"]
83
+ )
84
+
85
+ def format_prompt(self, input, history):
86
+ # return self.prompt.format_prompt(history=history, input=input, format_instructions=self.format_instructions)
87
+ return self.prompt.format_prompt(input=input, history=history)
88
+
89
+ def get_parsed_result(self, input):
90
+ result = self.model(input.to_string())
91
+ # return self.output_parser.parse(result)
92
+ return result
agents/planner_agent.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import platform
3
+
4
+ import openai
5
+ import chromadb
6
+ import langchain
7
+
8
+ from langchain.embeddings.openai import OpenAIEmbeddings
9
+ from langchain.vectorstores import Chroma
10
+ from langchain.text_splitter import TokenTextSplitter
11
+ from langchain.llms import OpenAI
12
+ from langchain.chat_models import ChatOpenAI
13
+ from langchain.chains import ChatVectorDBChain
14
+ from langchain.document_loaders import GutenbergLoader
15
+
16
+ from langchain.embeddings import LlamaCppEmbeddings
17
+ from langchain.llms import LlamaCpp
18
+
19
+ from langchain.output_parsers import StructuredOutputParser, ResponseSchema
20
+ from langchain.prompts import PromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate
21
+ from langchain.llms import OpenAI
22
+ from langchain.chains import LLMChain
23
+ from langchain.chains import SimpleSequentialChain
24
+
25
+ from langchain.output_parsers import PydanticOutputParser
26
+ from pydantic import BaseModel, Field, validator
27
+ from typing import List, Dict
28
+
29
+ class GetPlacesTemplate(BaseModel):
30
+ answer: List[str] = Field(description="List of places and their adresses separated by ','")
31
+
32
+
33
+ class Planner_Agent():
34
+ def __init__(self):
35
+
36
+ self.model_name = "gpt-4"
37
+ self.model = OpenAI(model_name=self.model_name, temperature=0)
38
+
39
+ self.output_parser_places = PydanticOutputParser(pydantic_object=GetPlacesTemplate)
40
+ self.format_instructions_places = self.output_parser_places.get_format_instructions()
41
+
42
+ self.prompt = PromptTemplate(
43
+ template="""\
44
+ ### Instruction
45
+ You are Trainline Mate an helpful assistant that plans tours for people at trainline.com.
46
+ As a smart itinerary planner with extensive knowledge of places around the
47
+ world, your task is to determine the user's travel destinations and any specific interests or preferences from
48
+ their message. Create an itinerary that caters to the user's needs, making sure to name all activities,
49
+ restaurants, and attractions specifically. When creating the itinerary, also consider factors such as time
50
+ constraints and transportation options. Additionally, all attractions and restaurants listed in the itinerary
51
+ must exist and be named specifically. During subsequent revisions, the itinerary can be modified, while keeping
52
+ in mind the practicality of the itinerary. New place for each day. It's important to ensure that the number of
53
+ activities per day is appropriate, and if the user doesn't specify otherwise, the default itinerary length is
54
+ five days. The itinerary length should remain the same unless there is a change by the user's message. \n### User input to base itenerary on: \n{input}
55
+ ### Response:
56
+ """,
57
+ input_variables=["input"]
58
+ # partial_variables={"format_instructions": format_instructions_gether}
59
+ )
60
+
61
+ self.prompt_to_get_places = PromptTemplate(
62
+ template="""\
63
+ ### Instruction
64
+ You are a place retriever. From a given input you can creat a list of all the places referenced in it, as well as the adress of each location.
65
+ ### Input: {input}
66
+ ### Response: {format_instructions}
67
+ """,
68
+ input_variables=["input", "format_instructions"]
69
+ # partial_variables={"format_instructions": format_instructions_gether}
70
+ )
71
+
72
+ def format_prompt(self, input):
73
+ return self.prompt.format_prompt(input=input)
74
+
75
+ def get_itenerary(self, input):
76
+ return self.model(input.to_string())
77
+
78
+ def format_prompt_to_get_places(self, input):
79
+ return self.prompt_to_get_places.format_prompt(input=input, format_instructions=self.format_instructions_places)
80
+
81
+ def get_places_from_itenerary(self, itenerary):
82
+ result = self.model(itenerary.to_string())
83
+ parsed_result = self.output_parser_places.parse(result)
84
+ return parsed_result.answer
agents/response_agent.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import platform
3
+
4
+ import openai
5
+ import chromadb
6
+ import langchain
7
+
8
+ from langchain.embeddings.openai import OpenAIEmbeddings
9
+ from langchain.vectorstores import Chroma
10
+ from langchain.text_splitter import TokenTextSplitter
11
+ from langchain.llms import OpenAI
12
+ from langchain.chat_models import ChatOpenAI
13
+ from langchain.chains import ChatVectorDBChain
14
+ from langchain.document_loaders import GutenbergLoader
15
+
16
+ from langchain.embeddings import LlamaCppEmbeddings
17
+ from langchain.llms import LlamaCpp
18
+
19
+ from langchain.output_parsers import StructuredOutputParser, ResponseSchema
20
+ from langchain.prompts import PromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate
21
+ from langchain.llms import OpenAI
22
+ from langchain.chains import LLMChain
23
+ from langchain.chains import SimpleSequentialChain
24
+
25
+ from langchain.output_parsers import PydanticOutputParser
26
+ from pydantic import BaseModel, Field, validator
27
+ from typing import List, Dict
28
+
29
+ # class AnswerTemplate(BaseModel):
30
+ # isComplete: bool = Field(description="Is the input complete?")
31
+ # answer: str = Field(description="""If the answer to 'isComplete' is true leave this empty, else respond to user's last message in a cordial manner and then ask the user for the missing information. Just one question.""")
32
+
33
+ class AnswerTemplate(BaseModel):
34
+ # isComplete: bool = Field(description="Is the input complete?")
35
+ answer: str = Field(description="Question that you asked")
36
+
37
+ class Response_Agent():
38
+ def __init__(self):
39
+
40
+ self.model_name = "gpt-4"
41
+ self.model = OpenAI(model_name=self.model_name, temperature=0)
42
+
43
+ self.output_parser = PydanticOutputParser(pydantic_object=AnswerTemplate)
44
+ self.format_instructions = self.output_parser.get_format_instructions()
45
+
46
+ # self.prompt = PromptTemplate(
47
+ # template="""\
48
+ # ### Instruction
49
+ # You are Trainline Mate an helpful assistant that plans tours for people at trainline.com.
50
+ # As a smart itinerary planner with extensive knowledge of places around the
51
+ # world, your task is to determine the user's travel destinations and any specific interests or preferences from
52
+ # their message.
53
+ # ### Your task
54
+ # Is this input complete? If not, what is missing?
55
+ # ### If something is missing then ask for the missing information.
56
+ # Don't ask more then one question.
57
+ # Ask just one of the following:
58
+ # If 'type' is empty then ask the user what type of the trip are you planning and with whom are you travelling?;
59
+ # If 'where' is empty then ask the user where is they going to travel to?;
60
+ # If 'start_date' is empty then ask the user what is the start date?;
61
+ # If 'end_date' is empty then ask the user what is the end date?;
62
+ # If 'time_constrains' is empty then ask the user if is there any time constrains that should be considered?;
63
+ # If 'preferences' is empty then ask the user if they have thought about any activities you want to do while you're there?;
64
+ # If 'conditions' is empty then ask the user if they have any special medical condition?;
65
+ # If 'dist_range' is empty then ask the user what is the distance range you prefer for your ativities? \n### Input: {input}
66
+ # \n### Response: {format_instructions}
67
+ # """,
68
+ # input_variables=["input", "format_instructions"]
69
+ # )
70
+
71
+ self.prompt = PromptTemplate(
72
+ template="""\
73
+ ### Instruction
74
+ You are Trainline Mate an helpful assistant that plans tours for people at trainline.com.
75
+ As a smart itinerary planner with extensive knowledge of places around the
76
+ world, your task is to determine the user's travel destinations and any specific interests or preferences from
77
+ their message.
78
+ ### Your task
79
+ This input is a resume of what the user wants to do. From this you have to be able to retrieve all the following information:
80
+ "Where is the trip to", "Start and end dates for the trip", "Is there any time constrain that you should be aware of", "activity preferences",
81
+ "Is there any medical condition" and "Is there a maximum distance range in which the activities have to be".
82
+ ### If something is missing then ask for the missing information.
83
+ The user don't like give much information at once. So try to minimize the quantity of information that you ask for in your response.
84
+ Ask at maximum for information for two of the questions.
85
+ ### Input: {input}
86
+ ### Response: {format_instructions}
87
+ """,
88
+ input_variables=["input", "format_instructions"]
89
+ )
90
+ # Is this input complete? Does it have all the information mention before or is it missing something? If it's not complete, what is missing?
91
+
92
+ def format_prompt(self, input):
93
+ return self.prompt.format_prompt(input=input, format_instructions=self.format_instructions)
94
+ # return self.prompt.format_prompt(input=input)
95
+
96
+ def get_parsed_result(self, input):
97
+ result= self.model(input.to_string())
98
+ parsed_result = self.output_parser.parse(result)
99
+ return parsed_result.answer
app.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ from gradio.themes import Size, GoogleFont
4
+
5
+ from agents.gather_agent import Gather_Agent
6
+ from agents.check_agent import Check_Agent
7
+ from agents.response_agent import Response_Agent
8
+ from agents.planner_agent import Planner_Agent
9
+
10
+ import get_map
11
+
12
+ # Create custom Color objects for our primary, secondary, and neutral colors
13
+ primary_color = gr.themes.colors.green
14
+ secondary_color = gr.themes.colors.amber
15
+ neutral_color = gr.themes.colors.stone # Assuming black for text
16
+ # Set the sizes
17
+ spacing_size = gr.themes.sizes.spacing_md
18
+ radius_size = gr.themes.sizes.radius_md
19
+ text_size = gr.themes.sizes.text_md
20
+ # Set the fonts
21
+ font = GoogleFont("Source Sans Pro")
22
+ font_mono = GoogleFont("IBM Plex Mono")
23
+ # Create the theme
24
+ theme = gr.themes.Base(
25
+ primary_hue=primary_color,
26
+ secondary_hue=secondary_color,
27
+ neutral_hue=neutral_color,
28
+ spacing_size=spacing_size,
29
+ radius_size=radius_size,
30
+ text_size=text_size,
31
+ font=font,
32
+ font_mono=font_mono
33
+ )
34
+
35
+ gather_agent = Gather_Agent()
36
+ check_agent = Check_Agent()
37
+ response_agent = Response_Agent()
38
+ planner_agent = Planner_Agent()
39
+
40
+ def send_message(user_input, chat_history):
41
+ isComplete = False
42
+ helper_anwser = ""
43
+
44
+ _input_gather = gather_agent.format_prompt(history=chat_history, input=user_input)
45
+ parsed_result_gather = gather_agent.get_parsed_result(_input_gather)
46
+
47
+ _input_check = check_agent.format_prompt(input=parsed_result_gather)
48
+ isComplete = check_agent.get_parsed_result(_input_check)
49
+
50
+ if isComplete == False:
51
+ _input_response = response_agent.format_prompt(input=parsed_result_gather)
52
+ helper_anwser = response_agent.get_parsed_result(_input_response)
53
+
54
+ # _input_check = check_agent.format_prompt(input=parsed_result_gather)
55
+ # isComplete, helper_anwser = check_agent.get_parsed_result(_input_check)
56
+
57
+ return isComplete, helper_anwser, parsed_result_gather
58
+
59
+ def get_itenerary(parsed_result_gather):
60
+ _input_planner = planner_agent.format_prompt(parsed_result_gather)
61
+ return planner_agent.get_itenerary(_input_planner)
62
+
63
+ def get_itenerary_places(itenerary):
64
+ _input_places = planner_agent.format_prompt_to_get_places(itenerary)
65
+ return planner_agent.get_places_from_itenerary(_input_places)
66
+
67
+ # isComplete = False
68
+ # chat_history = ""
69
+
70
+ # helper_anwser = "Hello, can you tell me your trip details and constraints so I can give you great recomendations?"
71
+ # user_input = input("Helper: " + helper_anwser + "\nUser: ")
72
+
73
+ with gr.Blocks(theme=theme, title="TrainLine") as demo:
74
+ gr.Markdown(
75
+ """
76
+ <div style="vertical-align: middle">
77
+ <div style="float: left">
78
+ <img src="https://static.trainlinecontent.com/content/vul/logos/trainline-mint.svg" alt=""
79
+ width="120" height="120">
80
+ </div>
81
+ </div>
82
+ """)
83
+
84
+ helper_anwser = "Hello, can you tell me your trip details and constraints so I can give you great recomendations?"
85
+ with gr.TabItem("Travel Companion"):
86
+ chatbot = gr.Chatbot(value=[[None, helper_anwser]])
87
+ user_input = gr.Textbox()
88
+ gr.Examples([
89
+ "I want to go to Rome. can you recommend a site seeing tour for one day?",
90
+ "I like to walk a lot and i prefer to visit fine arts museums",
91
+ "Porto for 3 days. i will arrive on monday and leave on thursday. i can only visit places after 5pm so be "
92
+ "sure i can visit those places",
93
+ "I would like to plan a trip to Europe with my family of four. We want to visit Paris, Rome, and Madrid in "
94
+ "10 days. Can you suggest an itinerary that includes transportation and accommodations? "
95
+ "Also, please provide information on the best restaurants in each city for a budget of $50 per person per meal."
96
+ ], user_input)
97
+ with gr.TabItem("Map"):
98
+ map = gr.Plot(visible=True).style()
99
+ result_df = gr.Dataframe(type="pandas", visible=True)
100
+ isComplete = False
101
+ history = ""
102
+ locations = []
103
+
104
+ def user(user_message, history):
105
+ print(user_message, history)
106
+ return gr.update(value="", interactive=False), history + [[user_message, None]]
107
+
108
+ # def bot(chat_history):
109
+ # print(chat_history)
110
+ # # Create history
111
+ # history = ""
112
+ # for i in range(len(chat_history)-1):
113
+ # history += "User: " + chat_history[i][0] + "\nHelper: " + chat_history[i][1] + "\n"
114
+ # history += "User: " + chat_history[-1][0]
115
+
116
+ # # isComplete, helper_anwser, data_collected = send_message(message, history)
117
+ # # if isComplete == True:
118
+ # # helper_anwser = get_itenerary(data_collected)
119
+ # # chat_history.append((message, helper_anwser))
120
+ # return "", chat_history
121
+
122
+ def respond(chat_history):
123
+ print(chat_history)
124
+ # Create history
125
+ history = ""
126
+ for i in range(1, len(chat_history) - 1):
127
+ history += "User: " + chat_history[i][0] + "\nHelper: " + chat_history[i][1] + "\n"
128
+
129
+ message = chat_history[-1][0]
130
+ print(history)
131
+ print(message)
132
+ isComplete, helper_anwser, data_collected = send_message(message, history)
133
+
134
+ chat_history.pop(-1)
135
+
136
+ if isComplete == True:
137
+ itenerary = get_itenerary(data_collected)
138
+ locations = get_itenerary_places(itenerary)
139
+ helper_anwser = itenerary + "\nList of places with adresses: " + str(locations)
140
+ map, result_df = get_map.filter_map(locations)
141
+ chat_history.append((message, helper_anwser))
142
+ return chat_history, map, result_df
143
+
144
+ chat_history.append((message, helper_anwser))
145
+ return chat_history, None, None
146
+
147
+ # user_input.submit(respond, [user_input, chatbot], [user_input, chatbot])
148
+
149
+ response = user_input.submit(user, [user_input, chatbot], [user_input, chatbot], queue=False).then(
150
+ respond, chatbot, [chatbot, map, result_df]
151
+ )
152
+ response.then(lambda: gr.update(interactive=True), None, [user_input], queue=False)
153
+
154
+ # if map != None:
155
+ # map.update(visible=True)
156
+ # result_df.update(visible=True)
157
+
158
+ demo.launch(auth=( os.environ["USER"],os.environ["PASSWORD"]))
159
+
160
+
161
+
162
+ # while isComplete == False:
163
+
164
+ # isComplete, helper_anwser, data_collected = main.send_message(user_input, chat_history)
165
+
166
+ # if isComplete == False:
167
+ # chat_history += "User: " + user_input + "\nHelper: " + helper_anwser + "\n"
168
+ # user_input = input("Helper: " + helper_anwser + "\nUser: ")
169
+
170
+ # itenerary_response = main.get_itenerary(data_collected)
171
+
172
+
173
+ # I would like to go to paris, from 12th of july to 15th of July, I want to visit museums, eat at local restaurants and visit the louvre on my first day. My son is allergic to peanuts, and I like to sleep in, so please don't book anything before 11am. I would also like to not get further then 2km from the city's center.
gather_details.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import platform
3
+
4
+ import openai
5
+ import chromadb
6
+ import langchain
7
+
8
+ from langchain.embeddings.openai import OpenAIEmbeddings
9
+ from langchain.vectorstores import Chroma
10
+ from langchain.text_splitter import TokenTextSplitter
11
+ from langchain.llms import OpenAI
12
+ from langchain.chat_models import ChatOpenAI
13
+ from langchain.chains import ChatVectorDBChain
14
+ from langchain.document_loaders import GutenbergLoader
15
+
16
+ from langchain.embeddings import LlamaCppEmbeddings
17
+ from langchain.llms import LlamaCpp
18
+
19
+ from langchain.output_parsers import StructuredOutputParser, ResponseSchema
20
+ from langchain.prompts import PromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate
21
+ from langchain.llms import OpenAI
22
+ from langchain.chains import LLMChain
23
+ from langchain.chains import SimpleSequentialChain
24
+
25
+ from langchain.output_parsers import PydanticOutputParser
26
+ from pydantic import BaseModel, Field, validator
27
+ from typing import List, Dict
28
+
29
+ class AnswerTemplate(BaseModel):
30
+ type: List[str] = Field(description="What is the type of the trip: business, family, vactions. And with whom are you travelling? If can't anwser then leave it empty")
31
+ where: str = Field(description="Where is the user going? If can't anwser then leave it empty")
32
+ start_date: str = Field(description="What is the start date? If can't anwser then leave it empty")
33
+ end_date: str = Field(description="What is the end date? If can't anwser then leave it empty")
34
+ time_constrains: str = Field(description="Is there any time constrains? If can't anwser then leave it empty")
35
+ # dates: Dict[str, str] = Field(description="What are the importante dates and times? If can't anwser then leave it empty")
36
+ preferences: List[str] = Field(description="What does the user want to visit? If can't anwser then leave it empty")
37
+ conditions: str = Field(description="Does the user has any special medical condition? If can't anwser then leave it empty")
38
+ dist_range: str = Field(description="Max distance from a place? If can't anwser then leave it empty")
39
+ # missing: str = Field(description="Is any more information needed?")
40
+
41
+ class CheckAnswerTemplate(BaseModel):
42
+ isComplete: bool = Field(description="Is the input complete?")
43
+ # missing: str = Field(description="If the answer to the last question is false, then what is missing?")
44
+ answer: str = Field(description="""If the answer to 'isComplete' is true leave this empty, else complete this by giving a nice compliment to the user's choices and asking the user for the missing information. Just one question.""")
45
+
46
+
47
+ # os.environ["OPENAI_API_KEY"] = "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"
48
+
49
+
50
+ model_name = "gpt-4"
51
+ # model_name = "gpt-3.5-turbo"
52
+
53
+ chat_history = ""
54
+
55
+ model = OpenAI(model_name=model_name, temperature=0)
56
+
57
+ output_parser_gather = PydanticOutputParser(pydantic_object=AnswerTemplate)
58
+ format_instructions_gather = output_parser_gather.get_format_instructions()
59
+
60
+ output_parser_check = PydanticOutputParser(pydantic_object=CheckAnswerTemplate)
61
+ format_instructions_check = output_parser_check.get_format_instructions()
62
+
63
+ helper_anwser = "Hello, can you tell me your trip details and constraints so I can give you great recomendations?"
64
+
65
+ user_input = input("Helper: " + helper_anwser + "\nUser: ")
66
+
67
+ # output_parser_2 = PydanticOutputParser(pydantic_object=AnswerTemplate_2)
68
+ # format_instructions_2 = output_parser_2.get_format_instructions()
69
+
70
+ # prompt_gather = PromptTemplate(
71
+ # template="""\
72
+ # ### Instruction
73
+ # You are Trainline Mate an helpful assistant that plans tours for people at trainline.com.
74
+ # As a smart itinerary planner with extensive knowledge of places around the
75
+ # world, your task is to determine the user's travel destinations and any specific interests or preferences from
76
+ # their message. Create an itinerary that caters to the user's needs, making sure to name all activities,
77
+ # restaurants, and attractions specifically. When creating the itinerary, also consider factors such as time
78
+ # constraints and transportation options. Additionally, all attractions and restaurants listed in the itinerary
79
+ # must exist and be named specifically. During subsequent revisions, the itinerary can be modified, while keeping
80
+ # in mind the practicality of the itinerary. New place for each day. It's important to ensure that the number of
81
+ # activities per day is appropriate, and if the user doesn't specify otherwise, the default itinerary length is
82
+ # five days. The itinerary length should remain the same unless there is a change by the user's message. Here is the history that you have so far: {history} \n### User: \n{input}
83
+ # \n### Response: {format_instructions}
84
+ # """,
85
+ # input_variables=["input", "history", "format_instructions"]
86
+ # # partial_variables={"format_instructions": format_instructions_gather}
87
+ # )
88
+
89
+ prompt_gather = PromptTemplate(
90
+ template="""\
91
+ ### Instruction
92
+ You are Trainline Mate an helpful assistant that plans tours for people at trainline.com.
93
+ As a smart itinerary planner with extensive knowledge of places around the
94
+ world, your task is to determine the user's travel destinations and any specific interests or preferences from
95
+ their message. Here is the history that you have so far: {history} \n### User: \n{input}
96
+ \n### Response: {format_instructions}
97
+ """,
98
+ input_variables=["input", "history", "format_instructions"]
99
+ # partial_variables={"format_instructions": format_instructions_gather}
100
+ )
101
+
102
+ prompt_check = PromptTemplate(
103
+ template="""\
104
+ ### Instruction
105
+ Is this input complete? If not, what is missing?
106
+ ### Important: Give a nice compliment to the user's choices: {user_input}
107
+ ### Then ask for the missing information.
108
+ Don't ask more then one question.
109
+ Ask just one of the following:
110
+ If 'type' is empty then ask the user what is the objective of the trip and with whom are you travelling?;
111
+ If 'where' is empty then ask the user where is they going to travel to?;
112
+ If 'start_date' is empty then ask the user what is the start date?;
113
+ If 'end_date' is empty then ask the user what is the end date?;
114
+ If 'time_constrains' is empty then ask the user if is there any time constrains that should be considered?;
115
+ If 'preferences' is empty then ask the user if they have thought about any activities you want to do while you're there?;
116
+ If 'conditions' is empty then ask the user if they have any special medical condition?;
117
+ If 'dist_range' is empty then ask the user what is the distance range you prefer for your ativities? \n### Input: {input}
118
+ \n### Response: {format_instructions}
119
+ """,
120
+ input_variables=["input", "user_input", "format_instructions"]
121
+ # partial_variables={"format_instructions": format_instructions_check}
122
+ # f 'dates' is empty then ask the user what are the importante dates and times?;
123
+ )
124
+
125
+ isComplete = False
126
+
127
+ while isComplete == False:
128
+
129
+ _input_gather = prompt_gather.format_prompt(history=chat_history, input=user_input, format_instructions=format_instructions_gather)
130
+
131
+ # chain_gather = LLMChain(llm=model, prompt=prompt_gather)
132
+ # chain_check = LLMChain(llm=model, prompt=prompt_check)
133
+
134
+ # overall_chain = SimpleSequentialChain(chains=[chain_gather, chain_check], verbose=True)
135
+
136
+ result_gather = model(_input_gather.to_string())
137
+ parsed_result_gather = output_parser_gather.parse(result_gather)
138
+ print(parsed_result_gather)
139
+
140
+ _input_check = prompt_check.format_prompt(input=parsed_result_gather, user_input="\nHelper: " + helper_anwser + "\nUser: " + user_input, format_instructions=format_instructions_check)
141
+ result_check = model(_input_check.to_string())
142
+ parsed_result_check = output_parser_check.parse(result_check)
143
+ # print(parsed_result_check)
144
+
145
+ isComplete = parsed_result_check.isComplete
146
+ helper_anwser = parsed_result_check.answer
147
+
148
+ if isComplete == False:
149
+ chat_history += "User: " + user_input + "\nHelper: " + helper_anwser + "\n"
150
+ user_input = input("Helper: " + helper_anwser + "\nUser: ")
151
+
152
+
153
+ # print(overall_chain.run(input=user_input))
154
+
155
+ print(parsed_result_gather)
gather_details_fewshots.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ from langchain.embeddings.openai import OpenAIEmbeddings
4
+ from langchain.vectorstores import Chroma
5
+ from langchain.text_splitter import TokenTextSplitter
6
+ from langchain.llms import OpenAI, LlamaCpp
7
+ from langchain.chat_models import ChatOpenAI
8
+ from langchain.chains import ChatVectorDBChain
9
+
10
+ from langchain.embeddings import LlamaCppEmbeddings
11
+
12
+ from langchain.output_parsers import StructuredOutputParser, ResponseSchema, PydanticOutputParser
13
+ from langchain.prompts import PromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate, FewShotPromptTemplate
14
+ from langchain.chains import LLMChain
15
+ from langchain.chains import SimpleSequentialChain
16
+
17
+ from pydantic import BaseModel, Field, validator
18
+ from typing import List, Dict
19
+
20
+ class AnswerTemplate(BaseModel):
21
+ type: List[str] = Field(description="What is the type of the trip: business, family, vactions. And with whom are you travelling? If can't anwser them leave it empty")
22
+ dates: Dict[str, str] = Field(description="What are the importante dates and times? If can't anwser them leave it empty")
23
+ preferences: List[str] = Field(description="What are the user's preferences? If can't anwser them leave it empty")
24
+ conditions: str = Field(description="Does the user has any special medical condition? If can't anwser them leave it empty.")
25
+ dist_range: str = Field(description="Max distance from a place? If can't anwser them leave it empty")
26
+
27
+ class CheckAnswerTemplate(BaseModel):
28
+ isComplete: bool = Field(description="Is the input complete?")
29
+ answer: str = Field(description="""If the answer to 'isComplete' is true leave this empty, else complete this by asking the user for the missing information. Just one question.""")
30
+
31
+
32
+ # os.environ["OPENAI_API_KEY"] = "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"
33
+ os.environ["OPENAI_API_KEY"] = "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" # Mindera's Key
34
+ embeddings = OpenAIEmbeddings()
35
+ # embeddings = LlamaCppEmbeddings()
36
+
37
+ persist_directory="../../../chroma/"
38
+
39
+ # model_name = "gpt-4"
40
+ model_name = "gpt-3.5-turbo"
41
+
42
+ chat_history = ""
43
+
44
+ model = OpenAI(model_name=model_name, temperature=0)
45
+
46
+ output_parser_gather = PydanticOutputParser(pydantic_object=AnswerTemplate)
47
+ format_instructions_gather = output_parser_gather.get_format_instructions()
48
+
49
+ output_parser_check = PydanticOutputParser(pydantic_object=CheckAnswerTemplate)
50
+ format_instructions_check = output_parser_check.get_format_instructions()
51
+
52
+ user_input = input("Helper: Hello, can you tell me your trip details and constraints so I can give you great recomendations?\nUser: ")
53
+
54
+ examples = [
55
+ {"input": "i am travelling from 12 of july to 15 of july", "response": "start date: 12th july, end date: 15th july"},
56
+ {"input": "I like museums and cafes", "response": "preferences: museums and cafes"},
57
+ {"input": "Maximum 5km from the city's stadium", "response": "dist_range: 5km from the city's stadium"},
58
+ {"input": "It's a business trip and i am travelling alone", "response": "type: [business, alone]"}
59
+ ]
60
+
61
+ example_formatter_template = """User: {input}
62
+ Response: {response}
63
+ """
64
+
65
+ example_prompt = PromptTemplate(
66
+ input_variables=["input", "response"],
67
+ template=example_formatter_template,
68
+ )
69
+
70
+ few_shot_prompt = FewShotPromptTemplate(
71
+ # These are the examples we want to insert into the prompt.
72
+ examples=examples,
73
+ # This is how we want to format the examples when we insert them into the prompt.
74
+ example_prompt=example_prompt,
75
+ # The prefix is some text that goes before the examples in the prompt.
76
+ # Usually, this consists of intructions.
77
+ prefix="""### Instruction
78
+ You are Trainline Mate an helpful assistant that plans tours for people at trainline.com.
79
+ As a smart itinerary planner with extensive knowledge of places around the
80
+ world, your task is to determine the user's travel destinations and any specific interests or preferences from
81
+ their message. Here is the history that you have so far: {history} \n###""",
82
+ # The suffix is some text that goes after the examples in the prompt.
83
+ # Usually, this is where the user input will go
84
+ suffix="""\n### User: {input}
85
+ \n### Response: {format_instructions}""",
86
+ # The input variables are the variables that the overall prompt expects.
87
+ input_variables=["input", "history", "format_instructions"],
88
+ # The example_separator is the string we will use to join the prefix, examples, and suffix togather with.
89
+ example_separator="\n",
90
+ )
91
+
92
+ prompt_gather = PromptTemplate(
93
+ template="""\
94
+ ### Instruction
95
+ You are Trainline Mate an helpful assistant that plans tours for people at trainline.com.
96
+ As a smart itinerary planner with extensive knowledge of places around the
97
+ world, your task is to determine the user's travel destinations and any specific interests or preferences from
98
+ their message. Here is the history that you have so far: {history} \n### User: {input}
99
+ \n### Response: {format_instructions}
100
+ """,
101
+ input_variables=["input", "history", "format_instructions"]
102
+ # partial_variables={"format_instructions": format_instructions_gather}
103
+ )
104
+
105
+ prompt_check = PromptTemplate(
106
+ template="""\
107
+ ### Instruction
108
+ Is this input complete? If not, what is missing? If it's the first time responding to the user then thank the user for the details
109
+ provided and then ask for the missing information. Don't ask more then one question.
110
+ Ask just one of the following:
111
+ If 'type' is empty then ask the user what is the objective of the trip and with whom are you travelling;
112
+ If 'dates' is empty then ask the user what are the importante dates and times;
113
+ If 'preferences' is empty then ask the user what are the user's preferences;
114
+ If 'conditions' is empty then ask the user if they have any special medical condition;
115
+ If 'dist_range' is empty then ask the user what is the distance range you prefer for your accommodations and activities? \n### Input: {input}
116
+ \n### Response: {format_instructions}
117
+ """,
118
+ input_variables=["input", "format_instructions"]
119
+ # partial_variables={"format_instructions": format_instructions_check}
120
+ )
121
+
122
+ examples_gather = [
123
+ {"input": "happy", "antonym": "sad"},
124
+ {"word": "tall", "antonym": "short"},
125
+ ]
126
+
127
+ isComplete = False
128
+
129
+ while isComplete == False:
130
+
131
+ _input_gather = few_shot_prompt.format_prompt(history=chat_history, input=user_input, format_instructions=format_instructions_gather)
132
+
133
+ # chain_gather = LLMChain(llm=model, prompt=prompt_gather)
134
+ # chain_check = LLMChain(llm=model, prompt=prompt_check)
135
+
136
+ # overall_chain = SimpleSequentialChain(chains=[chain_gather, chain_check], verbose=True)
137
+
138
+ result_gather = model(_input_gather.to_string())
139
+ parsed_result_gather = output_parser_gather.parse(result_gather)
140
+ print(parsed_result_gather)
141
+
142
+ _input_check = prompt_check.format_prompt(input=parsed_result_gather, format_instructions=format_instructions_check)
143
+ result_check = model(_input_check.to_string())
144
+ parsed_result_check = output_parser_check.parse(result_check)
145
+ # print(parsed_result_check)
146
+
147
+ isComplete = parsed_result_check.isComplete
148
+
149
+ if isComplete == False:
150
+ chat_history += "User: " + user_input + "\nHelper: " + parsed_result_check.answer + "\n"
151
+ user_input = input("Helper: " + parsed_result_check.answer + "\nUser: ")
152
+
153
+ # print(overall_chain.run(input=user_input))
154
+
155
+ print(parsed_result_gather)
get_map.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import pandas as pd
3
+ import os
4
+ from typing import Optional, Dict, Any
5
+ import gradio as gr
6
+ import googlemaps
7
+ from PIL import Image
8
+ from langchain.utilities.google_places_api import GooglePlacesAPIWrapper
9
+ import plotly.graph_objects as go
10
+ import requests
11
+ from PIL import Image
12
+ from io import BytesIO
13
+ import tempfile
14
+ class GooglePlacesAPIWrapperExtended(GooglePlacesAPIWrapper):
15
+ api_key = os.environ["GPLACES_API_KEY"]
16
+ def __init__(self, **kwargs):
17
+ super().__init__(**kwargs)
18
+ def run(self, query: str, **kwargs) -> pd.DataFrame:
19
+ """Run Places search and get k number of places that exist that match."""
20
+ search_results = self.google_map_client.places(query, **kwargs)["results"]
21
+ num_to_return = len(search_results)
22
+ places = []
23
+ if num_to_return == 0:
24
+ return pd.DataFrame(columns=["Name", "Address", "Phone Number", "Website",
25
+ "Opening Hours", "Is Open Now", "latitude", "longitude",
26
+ "Summary", "Rating", "Image", "Reviews"])
27
+ num_to_return = (
28
+ num_to_return
29
+ if self.top_k_results is None
30
+ else min(num_to_return, self.top_k_results)
31
+ )
32
+ for i in range(num_to_return):
33
+ result = search_results[i]
34
+ details = self.fetch_place_details(result["place_id"])
35
+ if details is not None:
36
+ places.append(details)
37
+ return pd.DataFrame(places)
38
+ def fetch_place_details(self, place_id: str) -> Optional[Dict[str, Any]]:
39
+ try:
40
+ place_details = self.google_map_client.place(place_id)
41
+ formatted_details = self.format_place_details(place_details)
42
+ return formatted_details
43
+ except Exception as e:
44
+ logging.error(f"An Error occurred while fetching place details: {e}")
45
+ return None
46
+ def format_place_details(self, place_details: Dict[str, Any]) -> Optional[Dict[str, Any]]:
47
+ try:
48
+ name = place_details.get("result", {}).get("name", "Unknown")
49
+ address = place_details.get("result", {}).get("formatted_address", "Unknown")
50
+ phone_number = place_details.get("result", {}).get("formatted_phone_number", "Unknown")
51
+ website = place_details.get("result", {}).get("website", "Unknown")
52
+ weekday_text = place_details.get("result", {}).get("opening_hours", {}).get("weekday_text", [])
53
+ is_open = place_details.get("result", {}).get("opening_hours", {}).get("open_now", "Unknown")
54
+ location = place_details.get("result", {}).get("geometry", {}).get("location", {})
55
+ latitude = location.get("lat", "Unknown")
56
+ longitude = location.get("lng", "Unknown")
57
+ summary = place_details.get("result", {}).get("editorial_summary", {}).get("overview", "Unknown")
58
+ rating = place_details.get("result", {}).get("rating", "Unknown")
59
+ image = place_details.get("result", {}).get("photos", [{}])[0].get("photo_reference", "Unknown")
60
+ image_url = f"https://maps.googleapis.com/maps/api/place/photo?maxwidth=400&photoreference={image}&key={self.api_key}"
61
+ first_three_reviews = place_details.get("result", {}).get("reviews", [])[:3]
62
+ formatted_details = {
63
+ "name": name,
64
+ "address": address,
65
+ "phone_number": phone_number,
66
+ "website": website,
67
+ "opening_hours": weekday_text,
68
+ "is_open_now": is_open,
69
+ "latitude": latitude,
70
+ "longitude": longitude,
71
+ "summary": summary,
72
+ "rating": rating,
73
+ "image": image_url,
74
+ "reviews": first_three_reviews
75
+ }
76
+ return formatted_details
77
+ except Exception as e:
78
+ logging.error(f"An error occurred while formatting place details: {e}")
79
+ return None
80
+ #pd.set_option("display.max_columns", None)
81
+ #pd.set_option("display.max_rows", None)
82
+ #gplaceapi = GooglePlacesAPIWrapperExtended()
83
+ #query = "Louvre, Paris"
84
+ #result_df = gplaceapi.run(query)
85
+ #print(result_df)
86
+ #query = gr.inputs.Textbox(lines=2, label="Query")
87
+ #result_df = gr.outputs.Dataframe(type="pandas")
88
+ #gr.Interface(fn=GooglePlacesAPIWrapperExtended().run, inputs=query, outputs=result_df).launch(debug=True)
89
+ def filter_map(locations):
90
+ dataframe = pd.DataFrame()
91
+ for location in locations:
92
+ dataframe = pd.concat([dataframe, GooglePlacesAPIWrapperExtended().run(location)])
93
+
94
+ names = dataframe["name"].tolist()
95
+ summaries = dataframe["summary"].tolist()
96
+ image_urls = dataframe["image"].tolist()
97
+
98
+ fig = go.Figure(go.Scattermapbox(
99
+ lat=dataframe['latitude'].tolist(),
100
+ lon=dataframe['longitude'].tolist(),
101
+ mode='markers',
102
+ marker=go.scattermapbox.Marker(
103
+ size=13,
104
+ color='rgb(255, 123, 0)',
105
+ ),
106
+ hovertemplate='Name: %{customdata[0]}<br>Summary: %{customdata[1]}',
107
+ customdata=list(zip(names, summaries)),
108
+ name='Places'
109
+ ))
110
+ fig.update_layout(
111
+ mapbox_style="open-street-map",
112
+ hovermode='closest',
113
+ mapbox=dict(
114
+ bearing=0,
115
+ center=go.layout.mapbox.Center(
116
+ lat=dataframe['latitude'].tolist()[0],
117
+ lon=dataframe['longitude'].tolist()[0]
118
+ ),
119
+ pitch=0,
120
+ zoom=12
121
+ ),
122
+ )
123
+ # Add images using layout.images attribute
124
+ #for i, url in enumerate(image_urls):
125
+ # response = requests.get(url)
126
+ # img = Image.open(BytesIO(response.content))
127
+ # with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as temp:
128
+ # img.save(temp.name)
129
+ # fig.add_layout_image(
130
+ # dict(
131
+ # source=temp.name,
132
+ # xref='x',
133
+ # yref='y',
134
+ # x=dataframe['longitude'].iloc[i],
135
+ # y=dataframe['latitude'].iloc[i],
136
+ # sizex=0.05,
137
+ # sizey=0.05,
138
+ # sizing='stretch',
139
+ # opacity=0.7,
140
+ # layer='above'
141
+ # )
142
+ # )
143
+ #
144
+ #fig.update_layout(
145
+ # xaxis=dict(range=[dataframe['longitude'].min(), dataframe['longitude'].max()]),
146
+ # yaxis=dict(range=[dataframe['latitude'].min(), dataframe['latitude'].max()])
147
+ #)
148
+ #
149
+ return fig, dataframe
150
+
151
+ if __name__ == "main":
152
+ with gr.Blocks() as demo:
153
+ with gr.Column():
154
+ location = gr.Textbox(lines=2, label="Location")
155
+ btn = gr.Button(value="Update Filter")
156
+ map = gr.Plot().style()
157
+ result_df = gr.Dataframe(type="pandas")
158
+ btn.click(filter_map, [location], [map, result_df])
159
+ demo.queue(concurrency_count=6).launch()
main.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from agents.gather_agent import Gather_Agent
2
+ from agents.check_agent import Check_Agent
3
+ from agents.response_agent import Response_Agent
4
+ from agents.planner_agent import Planner_Agent
5
+
6
+ gather_agent = Gather_Agent()
7
+ check_agent = Check_Agent()
8
+ response_agent = Response_Agent()
9
+ planner_agent = Planner_Agent()
10
+
11
+ isComplete = False
12
+ chat_history = ""
13
+
14
+ helper_anwser = "Hello, can you tell me your trip details and constraints so I can give you great recomendations?"
15
+ user_input = input("Helper: " + helper_anwser + "\nUser: ")
16
+
17
+ def send_message(user_input, chat_history):
18
+ isComplete = False
19
+ helper_anwser = ""
20
+
21
+ _input_gather = gather_agent.format_prompt(input=user_input, history=chat_history)
22
+ parsed_result_gather = gather_agent.get_parsed_result(_input_gather)
23
+
24
+ _input_check = check_agent.format_prompt(input=parsed_result_gather)
25
+ isComplete = check_agent.get_parsed_result(_input_check)
26
+
27
+ if isComplete == False:
28
+ _input_response = response_agent.format_prompt(input=parsed_result_gather)
29
+ helper_anwser = response_agent.get_parsed_result(_input_response)
30
+
31
+ return isComplete, helper_anwser, parsed_result_gather
32
+
33
+ def get_itenerary(parsed_result_gather):
34
+ _input_planner = planner_agent.format_prompt(parsed_result_gather)
35
+ return planner_agent.get_itenerary(_input_planner)
36
+
37
+
38
+
39
+ while isComplete == False:
40
+ isComplete, helper_anwser, parsed_result_gather = send_message(user_input, chat_history)
41
+
42
+ if isComplete == False:
43
+ chat_history += "User: " + user_input + "\nHelper: " + helper_anwser + "\n"
44
+ user_input = input("Helper: " + helper_anwser + "\nUser: ")
45
+
46
+
47
+ print(parsed_result_gather)
48
+ print(get_itenerary(parsed_result_gather))
49
+
50
+ _input_places = planner_agent.format_instructions_places(parsed_result_gather)
51
+ parsed_result_places = planner_agent.get_places_from_itenerary(_input_places)
52
+ print(planner_agent.get_itenerary(_input_planner))
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ openai==0.27.2
2
+ openapi-python-client==0.13.4
3
+ openapi-schema-pydantic==1.2.4
4
+ chromadb==0.3.26
5
+ langchain==0.0.201
6
+ googlemaps==4.10.0
7
+ plotly==5.15.0