Update llm/utils/chat.py
Browse files- llm/utils/chat.py +3 -5
llm/utils/chat.py
CHANGED
@@ -2,8 +2,7 @@ import time
|
|
2 |
import logging
|
3 |
|
4 |
from llm.apimodels.gemini_model import Gemini
|
5 |
-
from llm.apimodels.hf_model import HF_Mistaril, HF_TinyLlama
|
6 |
-
from llm.llamacpp.lc_model import LC_Phi3, LC_TinyLlama
|
7 |
|
8 |
from typing import Optional, Any
|
9 |
|
@@ -27,7 +26,6 @@ def prettify(raw_text: str) -> str:
|
|
27 |
pretty = raw_text.replace("**", "")
|
28 |
return pretty.strip()
|
29 |
|
30 |
-
# option + command + F -> replace
|
31 |
|
32 |
memory: ConversationBufferWindowMemory = ConversationBufferWindowMemory(k=3, ai_prefix="Chelsea")
|
33 |
|
@@ -92,10 +90,10 @@ class Conversation:
|
|
92 |
|
93 |
Args:
|
94 |
model_classes (list, optional): A list of LLM model classes to try in sequence.
|
95 |
-
Defaults to [Gemini, HF_Mistaril, HF_TinyLlama,
|
96 |
"""
|
97 |
|
98 |
-
self.model_classes = [Gemini, HF_Mistaril, HF_TinyLlama,
|
99 |
self.current_model_index = 0
|
100 |
|
101 |
def _get_conversation(self) -> Any:
|
|
|
2 |
import logging
|
3 |
|
4 |
from llm.apimodels.gemini_model import Gemini
|
5 |
+
from llm.apimodels.hf_model import HF_Mistaril, HF_TinyLlama, HF_SmolLM135, HF_SmolLM360, HF_SmolLM, HF_Gemma2, HF_Qwen2
|
|
|
6 |
|
7 |
from typing import Optional, Any
|
8 |
|
|
|
26 |
pretty = raw_text.replace("**", "")
|
27 |
return pretty.strip()
|
28 |
|
|
|
29 |
|
30 |
memory: ConversationBufferWindowMemory = ConversationBufferWindowMemory(k=3, ai_prefix="Chelsea")
|
31 |
|
|
|
90 |
|
91 |
Args:
|
92 |
model_classes (list, optional): A list of LLM model classes to try in sequence.
|
93 |
+
Defaults to [Gemini, HF_Mistaril, HF_TinyLlama, HF_SmolLM135, HF_SmolLM360, HF_SmolLM, HF_Gemma2, HF_Qwen2].
|
94 |
"""
|
95 |
|
96 |
+
self.model_classes = [Gemini, HF_Mistaril, HF_TinyLlama, HF_SmolLM135, HF_SmolLM360, HF_SmolLM, HF_Gemma2, HF_Qwen2]
|
97 |
self.current_model_index = 0
|
98 |
|
99 |
def _get_conversation(self) -> Any:
|