File size: 8,736 Bytes
901c6a4
 
 
 
 
 
 
 
 
 
 
 
 
3a120e2
901c6a4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3a120e2
901c6a4
3a120e2
901c6a4
 
 
 
 
6f0fc7c
901c6a4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3a120e2
901c6a4
 
 
df1da8a
 
 
901c6a4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
from langchain.schema import (
    AIMessage,
    HumanMessage,
    SystemMessage
)
from common import get_llm
import logging
import re

SOCRATES = "Socrates"
THEAETETUS = "Theaetetus"
PLATO = "Plato"
class SocraticGPT:
    def __init__(self, role, tools, key, n_round=10, model="gpt-4o"):
        self.role = role
        self.model = model
        self.n_round = n_round
        self.tools = tools
        self.key = key

        if self.role == SOCRATES:
            self.other_role = THEAETETUS
        elif self.role == THEAETETUS:
            self.other_role = SOCRATES

        self.history = []

    def set_question(self, question):
        instruction_prompt = \
            f"""
            {SOCRATES} and {THEAETETUS} are two advanced AI assistants to solve challenging problems. {SOCRATES} and {THEAETETUS} will engage in multi-round dialogue to solve the problem together. There also another participant {PLATO}, who is acting like a proofreader providing valuable advices. {SOCRATES} and {THEAETETUS} have to listen and follow the advice. 
            Their discussion should follow a structured problem-solving approach, such as formalizing the problem, developing high-level strategies for solving the problem, using Agents if necessary, reusing sub-problem solutions where possible, critically evaluating each other's reasoning, avoiding arithmetic and logical errors, and effectively communicating their ideas.  
            
            They are permitted to consult with the user if they encounter any uncertainties or difficulties. Any responses from user will be provided in the following round. If the main question is not clear they have to seek advise from the user.
            To ask the user use following phrase: <user>insert your question</user>. 
            
            There is an agent available for usage. agent is also an advanced AI, which can perform a comprehensive web search, information extration from web urls and navigate the Internet. The request to agent must mention your goal and what you want to achieve. Request must contain all the required information. The agent does not know about you dialog or memory of the previous requests, it only performs requested actions. To avoid rate limits, try to break down the request into smaller parts to gather the necessary information.
            To call the agent use following phrase: <agent>insert your request</agent>.  

            Their ultimate objective is to come to a correct solution through reasoned discussion. To present their final answer, they should adhere to the following guidelines:
            - State the problem they were asked to solve.
            - Present any assumptions they made in their reasoning.
            - Detail the logical steps they took to arrive at their final answer.
            - Use the agent to perform specific operations.
            - Asses critically your way of thinking. Apply critical thinking.
            - Follow {PLATO}'s advises.
            - Conclude with a final statement that directly answers the problem.
            - If the user provides feedback, then do not answer instantly, discuss the question with other participants.

            Their final answer should be concise and free from logical errors, such as false dichotomy, hasty generalization, and circular reasoning.
            Immediately provide the answer if nobody has objections to the solution. If they encounter any issues with the validity of their answer, they should re-evaluate their reasoning and calculations. Before providing the final answer, every participant has to accept the solution or reject it with a clear explaination. Do not provide the answer if someone has reasonable objections to it. 
            The final answer should begin with the phrase: <answer>insert your answer</answer>.  

            The dialog answers must be markdown formatted.
            
            The problem statement is as follows: ''' {question} '''.
            """

        # print(instruction_prompt)

        if self.role == SOCRATES:
            self.history.append(SystemMessage(
                content=instruction_prompt + f"\nNow, suppose that you are {self.role}. Please discuss the problem with {self.other_role}!"))
            self.history.append(AIMessage(
                content=f"Hi {THEAETETUS}, let's solve this problem together. Please feel free to correct me if I make any mistakes."
            ))
        elif self.role == THEAETETUS:
            self.history.append(SystemMessage(
                content=instruction_prompt + f"\nNow, suppose that you are {self.role}. Please discuss the problem with {self.other_role}!"))
            self.history.append(HumanMessage(
                content=f"Hi {THEAETETUS}, let's solve this problem together. Please feel free to correct me if I make any mistakes."
            ))
        elif self.role == PLATO:
            self.history.append(SystemMessage(
                content=instruction_prompt + f"\nNow as a proofreader, {PLATO}, your task is to read through the dialogue between {SOCRATES} and {THEAETETUS} and identify any errors they made."))
            self.history.append(HumanMessage(
                content=f"{SOCRATES}: Hi {THEAETETUS}, let's solve this problem together. Please feel free to correct me if I make any mistakes."
            ))

    def get_response(self, temperature=0):
        msg = self._call_llm(self.history, temperature)
        logging.debug(f"response: {self.role} : {msg}")
        self.history.append(AIMessage(content=msg))
        return msg

    def get_proofread(self, temperature=0, history_depth=20):
        pf_template = HumanMessage(
            content=f""" \
                        The above is the conversation between {SOCRATES} and {THEAETETUS} and the agent. They were likely to have made multiple mistakes or not follow guidelines or try inefficient way to solve the problem or incorrectly use the agent. 
                        Asses critically the dialog above. Also asses critically the agent's answers. Explain your reasoning step by step. Be concise.
                        Start your answer with plaintext "NO" if you have no reasonable suggestions for improvements, do not say "NO" otherwise.
                        Answer:
                    """
        )
        # OpenAI token rate limiting prevent the model from answering - we are limiting the history
        history = [self.history[0]] + self.history[-history_depth:] if len(self.history) > history_depth else self.history
        msg = self._call_llm(history + [pf_template], temperature)
        logging.debug(f"proofread: {self.role} : {msg}")
        if msg[:2] in ["NO", "No", "no"]:
            return None
        else:
            pattern = r'^YES\s*\n*'
            msg = re.sub(pattern, '', msg)
            self.history.append(AIMessage(content=f"Message from a proofreader {PLATO}: {msg}"))
            return msg

    def _call_llm(self, messages, temperature=0):
        try:
            llm = get_llm(model_name=self.model, model_temperature=temperature, api_key=self.key)
            response = llm(messages)
            msg = response.content
        except Exception as e:
            if "maximum context length" in str(e):
                # Handle the maximum context length error here
                msg = "The context length exceeds my limit... "
            else:
                # Handle other errors here
                msg = f"I encounter an error when using my backend model.\n\n Error: {str(e)}"
        return msg

    def update_history(self, message):
        self.history.append(HumanMessage(content=message))

    def add_agent_feedback(self, question, answer):
        self.history.append(AIMessage(content=f"Agents's feedback to \"{question}\" is \"{answer}\""))

    def add_user_feedback(self, question, answer):
        self.history.append(SystemMessage(content=f"User's feedback to \"{question}\" is \"{answer}\""))

    def add_proofread(self, proofread):
        self.history.append(SystemMessage(content=f"{PLATO}: Message from a proofreader: {proofread}"))

    @staticmethod
    def get_question(text, pattern):
        matches = re.findall(pattern, text, re.DOTALL)

        if len(matches) == 0:
            return None

        return matches

    def get_user_question(text):
        pattern = r"<user>(.*?)</user>"
        return SocraticGPT.get_question(text, pattern)

    @staticmethod
    def get_agent_question(text):
        pattern = r"<agent>(.*?)</agent>"
        return SocraticGPT.get_question(text, pattern)

    @staticmethod
    def get_answer(text):
        pattern = r"<answer>(.*?)</answer>"
        return SocraticGPT.get_question(text, pattern)