Update llm/utils/chat.py
Browse files- llm/utils/chat.py +20 -3
llm/utils/chat.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import time
|
2 |
|
3 |
from llm.apimodels.gemini_model import Gemini
|
4 |
-
from llm.apimodels.hf_model import HF_Mistaril, HF_TinyLlama, HF_SmolLM135, HF_SmolLM360, HF_SmolLM, HF_Gemma2, HF_Qwen2
|
5 |
|
6 |
from typing import Optional, Any
|
7 |
|
@@ -91,7 +91,19 @@ class Conversation:
|
|
91 |
except Exception as e:
|
92 |
print(f"Error during conversation chain in get_conversation function: {e}")
|
93 |
|
94 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
95 |
"""
|
96 |
Carries out the conversation with the user, handling errors and delays.
|
97 |
|
@@ -99,12 +111,17 @@ class Conversation:
|
|
99 |
prompt(str): The prompt to be used for prediction.
|
100 |
|
101 |
Returns:
|
102 |
-
|
103 |
"""
|
104 |
|
105 |
if prompt is None or prompt == "":
|
106 |
raise Exception(f"Prompt must be string not None or empty string: {prompt}")
|
107 |
|
|
|
|
|
|
|
|
|
|
|
108 |
while self.current_model_index < len(self.model_classes):
|
109 |
conversation = self._get_conversation()
|
110 |
|
|
|
1 |
import time
|
2 |
|
3 |
from llm.apimodels.gemini_model import Gemini
|
4 |
+
from llm.apimodels.hf_model import HF_Mistaril, HF_TinyLlama, HF_SmolLM135, HF_SmolLM360, HF_SmolLM, HF_Gemma2, HF_Qwen2, FreeThinker3B
|
5 |
|
6 |
from typing import Optional, Any
|
7 |
|
|
|
91 |
except Exception as e:
|
92 |
print(f"Error during conversation chain in get_conversation function: {e}")
|
93 |
|
94 |
+
def _libertarian_conversation(self, prompt) -> Optional[str]:
|
95 |
+
conversation = ConversationChain(llm=FreeThinker3B().execution(), memory=memory, return_final_only=True)
|
96 |
+
|
97 |
+
try:
|
98 |
+
response = conversation.predict(input=prompt)
|
99 |
+
print(f"response: {response}")
|
100 |
+
result = prettify(raw_text=response)
|
101 |
+
return result
|
102 |
+
except Exception as e:
|
103 |
+
print(f"Error during prediction with conversation in has_failed function: {e}")
|
104 |
+
return None
|
105 |
+
|
106 |
+
def chatting(self, prompt: str, is_own_model: bool) -> str:
|
107 |
"""
|
108 |
Carries out the conversation with the user, handling errors and delays.
|
109 |
|
|
|
111 |
prompt(str): The prompt to be used for prediction.
|
112 |
|
113 |
Returns:
|
114 |
+
str: The final conversation response or None if all models fail.
|
115 |
"""
|
116 |
|
117 |
if prompt is None or prompt == "":
|
118 |
raise Exception(f"Prompt must be string not None or empty string: {prompt}")
|
119 |
|
120 |
+
if is_own_model:
|
121 |
+
result = _libertarian_conversation(prompt)
|
122 |
+
|
123 |
+
return result
|
124 |
+
|
125 |
while self.current_model_index < len(self.model_classes):
|
126 |
conversation = self._get_conversation()
|
127 |
|