Update llm/utils/chat.py
Browse files- llm/utils/chat.py +1 -18
llm/utils/chat.py
CHANGED
@@ -1,5 +1,4 @@
|
|
1 |
import time
|
2 |
-
import logging
|
3 |
|
4 |
from llm.apimodels.gemini_model import Gemini
|
5 |
from llm.apimodels.hf_model import HF_Mistaril, HF_TinyLlama, HF_SmolLM135, HF_SmolLM360, HF_SmolLM, HF_Gemma2, HF_Qwen2
|
@@ -9,19 +8,6 @@ from typing import Optional, Any
|
|
9 |
from langchain.chains.conversation.memory import ConversationBufferWindowMemory
|
10 |
from langchain.chains import ConversationChain
|
11 |
|
12 |
-
logger = logging.getLogger(__name__)
|
13 |
-
logger.setLevel(logging.WARNING)
|
14 |
-
|
15 |
-
file_handler = logging.FileHandler(
|
16 |
-
"logs/chelsea_llm_chat.log") # for all modules here template for logs file is "llm/logs/chelsea_{module_name}_{entity}.log"
|
17 |
-
logger.setLevel(logging.INFO) # informed
|
18 |
-
|
19 |
-
formatted = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
|
20 |
-
file_handler.setFormatter(formatted)
|
21 |
-
|
22 |
-
logger.addHandler(file_handler)
|
23 |
-
logger.info("Getting information from chat module")
|
24 |
-
|
25 |
def prettify(raw_text: str) -> str:
|
26 |
pretty = raw_text.replace("**", "")
|
27 |
return pretty.strip()
|
@@ -49,7 +35,6 @@ def has_failed(conversation, prompt) -> Optional[str]:
|
|
49 |
result = prettify(raw_text=response)
|
50 |
return result
|
51 |
except Exception as e:
|
52 |
-
logger.error(msg="Error during prediction with conversation in has_failed function", exc_info=e)
|
53 |
print(f"Error during prediction with conversation in has_failed function: {e}")
|
54 |
return None
|
55 |
|
@@ -79,7 +64,6 @@ def has_delay(conversation, prompt) -> Optional[str]:
|
|
79 |
return result # Return the prettified response
|
80 |
|
81 |
except Exception as e:
|
82 |
-
logger.error(msg="Error during prediction with conversation in has_delay function", exc_info=e)
|
83 |
print(f"Error during prediction with conversation in has_delay function: {e}")
|
84 |
|
85 |
|
@@ -93,7 +77,7 @@ class Conversation:
|
|
93 |
Defaults to [Gemini, HF_SmolLM135, HF_SmolLM360, HF_TinyLlama, HF_SmolLM, HF_Gemma2, HF_Mistaril, HF_Qwen2].
|
94 |
"""
|
95 |
|
96 |
-
self.model_classes = [
|
97 |
self.current_model_index = 0
|
98 |
|
99 |
def _get_conversation(self) -> Any:
|
@@ -105,7 +89,6 @@ class Conversation:
|
|
105 |
print("current model class is: ", current_model_class)
|
106 |
return ConversationChain(llm=current_model_class().execution(), memory=memory, return_final_only=True)
|
107 |
except Exception as e:
|
108 |
-
logger.error(msg="Error during conversation chain in get_conversation function", exc_info=e)
|
109 |
print(f"Error during conversation chain in get_conversation function: {e}")
|
110 |
|
111 |
def chatting(self, prompt: str) -> str:
|
|
|
1 |
import time
|
|
|
2 |
|
3 |
from llm.apimodels.gemini_model import Gemini
|
4 |
from llm.apimodels.hf_model import HF_Mistaril, HF_TinyLlama, HF_SmolLM135, HF_SmolLM360, HF_SmolLM, HF_Gemma2, HF_Qwen2
|
|
|
8 |
from langchain.chains.conversation.memory import ConversationBufferWindowMemory
|
9 |
from langchain.chains import ConversationChain
|
10 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
def prettify(raw_text: str) -> str:
|
12 |
pretty = raw_text.replace("**", "")
|
13 |
return pretty.strip()
|
|
|
35 |
result = prettify(raw_text=response)
|
36 |
return result
|
37 |
except Exception as e:
|
|
|
38 |
print(f"Error during prediction with conversation in has_failed function: {e}")
|
39 |
return None
|
40 |
|
|
|
64 |
return result # Return the prettified response
|
65 |
|
66 |
except Exception as e:
|
|
|
67 |
print(f"Error during prediction with conversation in has_delay function: {e}")
|
68 |
|
69 |
|
|
|
77 |
Defaults to [Gemini, HF_SmolLM135, HF_SmolLM360, HF_TinyLlama, HF_SmolLM, HF_Gemma2, HF_Mistaril, HF_Qwen2].
|
78 |
"""
|
79 |
|
80 |
+
self.model_classes = [Gemini, HF_Gemma2, HF_SmolLM, HF_SmolLM360, HF_Mistaril, HF_Qwen2, HF_TinyLlama, HF_SmolLM135]
|
81 |
self.current_model_index = 0
|
82 |
|
83 |
def _get_conversation(self) -> Any:
|
|
|
89 |
print("current model class is: ", current_model_class)
|
90 |
return ConversationChain(llm=current_model_class().execution(), memory=memory, return_final_only=True)
|
91 |
except Exception as e:
|
|
|
92 |
print(f"Error during conversation chain in get_conversation function: {e}")
|
93 |
|
94 |
def chatting(self, prompt: str) -> str:
|