from langchain.schema import ( AIMessage, HumanMessage, SystemMessage ) from common import get_llm import logging import re SOCRATES = "Socrates" THEAETETUS = "Theaetetus" PLATO = "Plato" class SocraticGPT: def __init__(self, role, tools, key, n_round=10, model="gpt-4o"): self.role = role self.model = model self.n_round = n_round self.tools = tools self.key = key if self.role == SOCRATES: self.other_role = THEAETETUS elif self.role == THEAETETUS: self.other_role = SOCRATES self.history = [] def set_question(self, question): instruction_prompt = \ f""" {SOCRATES} and {THEAETETUS} are two advanced AI assistants to solve challenging problems. {SOCRATES} and {THEAETETUS} will engage in multi-round dialogue to solve the problem together. There also another participant {PLATO}, who is acting like a proofreader providing valuable advices. {SOCRATES} and {THEAETETUS} have to listen and follow the advice. Their discussion should follow a structured problem-solving approach, such as formalizing the problem, developing high-level strategies for solving the problem, using Agents if necessary, reusing sub-problem solutions where possible, critically evaluating each other's reasoning, avoiding arithmetic and logical errors, and effectively communicating their ideas. They are permitted to consult with the user if they encounter any uncertainties or difficulties. Any responses from user will be provided in the following round. If the main question is not clear they have to seek advise from the user. To ask the user use following phrase: insert your question. There is an agent available for usage. agent is also an advanced AI, which can perform a comprehensive web search, information extration from web urls and navigate the Internet. The request to agent must mention your goal and what you want to achieve. Request must contain all the required information. The agent does not know about you dialog or memory of the previous requests, it only performs requested actions. To avoid rate limits, try to break down the request into smaller parts to gather the necessary information. To call the agent use following phrase: insert your request. Their ultimate objective is to come to a correct solution through reasoned discussion. To present their final answer, they should adhere to the following guidelines: - State the problem they were asked to solve. - Present any assumptions they made in their reasoning. - Detail the logical steps they took to arrive at their final answer. - Use the agent to perform specific operations. - Asses critically your way of thinking. Apply critical thinking. - Follow {PLATO}'s advises. - Conclude with a final statement that directly answers the problem. - If the user provides feedback, then do not answer instantly, discuss the question with other participants. Their final answer should be concise and free from logical errors, such as false dichotomy, hasty generalization, and circular reasoning. Immediately provide the answer if nobody has objections to the solution. If they encounter any issues with the validity of their answer, they should re-evaluate their reasoning and calculations. Before providing the final answer, every participant has to accept the solution or reject it with a clear explaination. Do not provide the answer if someone has reasonable objections to it. The final answer should begin with the phrase: insert your answer. The dialog answers must be markdown formatted. The problem statement is as follows: ''' {question} '''. """ # print(instruction_prompt) if self.role == SOCRATES: self.history.append(SystemMessage( content=instruction_prompt + f"\nNow, suppose that you are {self.role}. Please discuss the problem with {self.other_role}!")) self.history.append(AIMessage( content=f"Hi {THEAETETUS}, let's solve this problem together. Please feel free to correct me if I make any mistakes." )) elif self.role == THEAETETUS: self.history.append(SystemMessage( content=instruction_prompt + f"\nNow, suppose that you are {self.role}. Please discuss the problem with {self.other_role}!")) self.history.append(HumanMessage( content=f"Hi {THEAETETUS}, let's solve this problem together. Please feel free to correct me if I make any mistakes." )) elif self.role == PLATO: self.history.append(SystemMessage( content=instruction_prompt + f"\nNow as a proofreader, {PLATO}, your task is to read through the dialogue between {SOCRATES} and {THEAETETUS} and identify any errors they made.")) self.history.append(HumanMessage( content=f"{SOCRATES}: Hi {THEAETETUS}, let's solve this problem together. Please feel free to correct me if I make any mistakes." )) def get_response(self, temperature=0): msg = self._call_llm(self.history, temperature) logging.debug(f"response: {self.role} : {msg}") self.history.append(AIMessage(content=msg)) return msg def get_proofread(self, temperature=0, history_depth=20): pf_template = HumanMessage( content=f""" \ The above is the conversation between {SOCRATES} and {THEAETETUS} and the agent. They were likely to have made multiple mistakes or not follow guidelines or try inefficient way to solve the problem or incorrectly use the agent. Asses critically the dialog above. Also asses critically the agent's answers. Explain your reasoning step by step. Be concise. Start your answer with plaintext "NO" if you have no reasonable suggestions for improvements, do not say "NO" otherwise. Answer: """ ) # OpenAI token rate limiting prevent the model from answering - we are limiting the history history = [self.history[0]] + self.history[-history_depth:] if len(self.history) > history_depth else self.history msg = self._call_llm(history + [pf_template], temperature) logging.debug(f"proofread: {self.role} : {msg}") if msg[:2] in ["NO", "No", "no"]: return None else: pattern = r'^YES\s*\n*' msg = re.sub(pattern, '', msg) self.history.append(AIMessage(content=f"Message from a proofreader {PLATO}: {msg}")) return msg def _call_llm(self, messages, temperature=0): try: llm = get_llm(model_name=self.model, model_temperature=temperature, api_key=self.key) response = llm(messages) msg = response.content except Exception as e: if "maximum context length" in str(e): # Handle the maximum context length error here msg = "The context length exceeds my limit... " else: # Handle other errors here msg = f"I encounter an error when using my backend model.\n\n Error: {str(e)}" return msg def update_history(self, message): self.history.append(HumanMessage(content=message)) def add_agent_feedback(self, question, answer): self.history.append(AIMessage(content=f"Agents's feedback to \"{question}\" is \"{answer}\"")) def add_user_feedback(self, question, answer): self.history.append(SystemMessage(content=f"User's feedback to \"{question}\" is \"{answer}\"")) def add_proofread(self, proofread): self.history.append(SystemMessage(content=f"{PLATO}: Message from a proofreader: {proofread}")) @staticmethod def get_question(text, pattern): matches = re.findall(pattern, text, re.DOTALL) if len(matches) == 0: return None return matches def get_user_question(text): pattern = r"(.*?)" return SocraticGPT.get_question(text, pattern) @staticmethod def get_agent_question(text): pattern = r"(.*?)" return SocraticGPT.get_question(text, pattern) @staticmethod def get_answer(text): pattern = r"(.*?)" return SocraticGPT.get_question(text, pattern)