Delete llm/llm.py
Browse files- llm/llm.py +0 -43
llm/llm.py
DELETED
@@ -1,43 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
|
3 |
-
import yaml
|
4 |
-
import logging
|
5 |
-
|
6 |
-
from langchain.prompts import PromptTemplate
|
7 |
-
from langchain.chains import LLMChain
|
8 |
-
from langchain.llms import HuggingFaceEndpoint
|
9 |
-
from .config import config
|
10 |
-
|
11 |
-
os.environ["HUGGINGFACEHUB_API_TOKEN"] = "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"
|
12 |
-
|
13 |
-
class LLM_chain:
|
14 |
-
def __init__(self):
|
15 |
-
self.llm = HuggingFaceEndpoint(
|
16 |
-
repo_id=config["model"],
|
17 |
-
model_kwargs={"temperature": config["temperature"], "max_new_tokens": config["max_new_tokens"],
|
18 |
-
"top_k": config["top_k"], "load_in_8bit": config["load_in_8bit"]})
|
19 |
-
|
20 |
-
@staticmethod
|
21 |
-
def __read_yaml():
|
22 |
-
try:
|
23 |
-
# get current dir
|
24 |
-
current_dir = os.path.dirname(os.path.realpath(__file__))
|
25 |
-
yaml_file = os.path.join(current_dir, 'prompts.yaml')
|
26 |
-
|
27 |
-
with open(yaml_file, 'r') as file:
|
28 |
-
data = yaml.safe_load(file)
|
29 |
-
return data
|
30 |
-
except Exception as e:
|
31 |
-
logging.error(e)
|
32 |
-
|
33 |
-
def __call__(self, entity: str, id: int = 0):
|
34 |
-
try:
|
35 |
-
data = self.__read_yaml()
|
36 |
-
prompts = data["prompts"][id] # get second prompt from yaml, need change id parameter to get other prompt
|
37 |
-
template = prompts["prompt_template"]
|
38 |
-
prompt = PromptTemplate(template=template, input_variables=["entity"])
|
39 |
-
llm_chain = LLMChain(prompt=prompt, llm=self.llm, verbose=True)
|
40 |
-
output = llm_chain.invoke(entity)
|
41 |
-
return output["text"]
|
42 |
-
except Exception as e:
|
43 |
-
logging.error(e)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|