CineAI commited on
Commit
a4a08b7
1 Parent(s): 1e3a233

Update llm/huggingfacehub/hf_model.py

Browse files
Files changed (1) hide show
  1. llm/huggingfacehub/hf_model.py +24 -21
llm/huggingfacehub/hf_model.py CHANGED
@@ -9,14 +9,14 @@ from llm.config import config
9
 
10
  from langchain.prompts import PromptTemplate
11
  from langchain.chains import LLMChain
12
- from langchain.llms import HuggingFaceEndpoint
13
 
14
  logger = logging.getLogger(__name__)
15
 
16
- logger.setLevel(logging.CRITICAL) # because if something went wrong in execution application can't be work anymore
17
 
18
  file_handler = logging.FileHandler(
19
- "logs/chelsea_llm_huggingfacehub.log") # for all modules here template for logs file is "../logs/chelsea_{module_name}_{dir_name}.log"
20
  logger.setLevel(logging.INFO) # informed
21
 
22
  formatted = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
@@ -26,16 +26,11 @@ logger.addHandler(file_handler)
26
 
27
  logger.info("Getting information from hf_model module")
28
 
29
- # try:
30
- # os.chdir('/home/user/app/llm/')
31
- # except FileNotFoundError:
32
- # print("Error: Could not move up. You might be at the root directory.")
33
-
34
- # work_dir = os.getcwd()
35
-
36
  llm_dir = '/home/user/app/llm/'
37
 
38
- print("Path to prompts : ", os.path.join(os.getcwd(), "prompts.yaml"))
 
 
39
 
40
 
41
  class HF_Mistaril(HFInterface, ABC):
@@ -46,12 +41,16 @@ class HF_Mistaril(HFInterface, ABC):
46
  self.model_config = config["HF_Mistrail"]
47
 
48
  # Додати repetition_penalty, task?, top_p, stop_sequences
49
- self.llm = HuggingFaceEndpoint(
50
  repo_id=self.model_config["model"],
51
- temperature=self.model_config["temperature"],
52
- max_new_tokens=self.model_config["max_new_tokens"],
53
- top_k=self.model_config["top_k"],
54
- model_kwargs={"load_in_8bit": self.model_config["load_in_8bit"]},
 
 
 
 
55
  huggingfacehub_api_token=os.environ.get("HUGGINGFACEHUB_API_TOKEN")
56
  )
57
 
@@ -95,12 +94,16 @@ class HF_TinyLlama(HFInterface, ABC):
95
 
96
  self.model_config = config["HF_TinyLlama"]
97
 
98
- self.llm = HuggingFaceEndpoint(
99
  repo_id=self.model_config["model"],
100
- temperature=self.model_config["temperature"],
101
- max_new_tokens=self.model_config["max_new_tokens"],
102
- top_k=self.model_config["top_k"],
103
- model_kwargs={"load_in_8bit": self.model_config["load_in_8bit"]},
 
 
 
 
104
  huggingfacehub_api_token=os.environ.get("HUGGINGFACEHUB_API_TOKEN")
105
  )
106
 
 
9
 
10
  from langchain.prompts import PromptTemplate
11
  from langchain.chains import LLMChain
12
+ from langchain.llms import HuggingFaceHub
13
 
14
  logger = logging.getLogger(__name__)
15
 
16
+ logger.setLevel(logging.CRITICAL) # because if something went wrong in execution, application can't be work anyway
17
 
18
  file_handler = logging.FileHandler(
19
+ "logs/chelsea_llm_huggingfacehub.log") # for all modules here template for logs file is "llm/logs/chelsea_{module_name}_{dir_name}.log"
20
  logger.setLevel(logging.INFO) # informed
21
 
22
  formatted = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
 
26
 
27
  logger.info("Getting information from hf_model module")
28
 
 
 
 
 
 
 
 
29
  llm_dir = '/home/user/app/llm/'
30
 
31
+ path_to_yaml = os.path.join(os.getcwd(), "llm/prompts.yaml")
32
+
33
+ print("Path to prompts : ", path_to_yaml)
34
 
35
 
36
  class HF_Mistaril(HFInterface, ABC):
 
41
  self.model_config = config["HF_Mistrail"]
42
 
43
  # Додати repetition_penalty, task?, top_p, stop_sequences
44
+ self.llm = HuggingFaceHub(
45
  repo_id=self.model_config["model"],
46
+ # temperature=self.model_config["temperature"],
47
+ # max_new_tokens=self.model_config["max_new_tokens"],
48
+ # top_k=self.model_config["top_k"],
49
+ model_kwargs={"load_in_8bit": self.model_config["load_in_8bit"],
50
+ "temperature": self.model_config["temperature"],
51
+ "max_new_tokens": self.model_config["max_new_tokens"],
52
+ "top_k": self.model_config["top_k"],
53
+ },
54
  huggingfacehub_api_token=os.environ.get("HUGGINGFACEHUB_API_TOKEN")
55
  )
56
 
 
94
 
95
  self.model_config = config["HF_TinyLlama"]
96
 
97
+ self.llm = HuggingFaceHub(
98
  repo_id=self.model_config["model"],
99
+ # temperature=self.model_config["temperature"],
100
+ # max_new_tokens=self.model_config["max_new_tokens"],
101
+ # top_k=self.model_config["top_k"],
102
+ model_kwargs={"load_in_8bit": self.model_config["load_in_8bit"],
103
+ "temperature": self.model_config["temperature"],
104
+ "max_new_tokens": self.model_config["max_new_tokens"],
105
+ "top_k": self.model_config["top_k"],
106
+ },
107
  huggingfacehub_api_token=os.environ.get("HUGGINGFACEHUB_API_TOKEN")
108
  )
109