Chelsea / llm /llm.py
CineAI's picture
Update llm/llm.py
194c45c verified
raw
history blame
1.39 kB
import os
import yaml
import logging
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from langchain.llms import HuggingFaceHub
from .config import config
class LLM_chain:
def __init__(self):
self.llm = HuggingFaceHub(
repo_id=config["model"],
model_kwargs={"temperature": config["temperature"], "max_new_tokens": config["max_new_tokens"], "top_k": config["top_k"], "load_in_8bit": config["load_in_8bit"]})
def __read_yaml(self):
try:
# get current dir
current_dir = os.path.dirname(os.path.realpath(__file__))
yaml_file = os.path.join(current_dir, 'prompts.yaml')
with open(yaml_file, 'r') as file:
data = yaml.safe_load(file)
return data
except Exception as e:
logging.error(e)
def __call__(self, entity: str, id: int = 0):
try:
data = self.__read_yaml()
print(data)
prompts = data["prompts"]
template = prompts["prompt_template"][1]
prompt = PromptTemplate(template=template, input_variables=["entity"])
llm_chain = LLMChain(prompt=prompt, llm=self.llm, verbose=True)
output = llm_chain.invoke(entity)
return output["text"]
except Exception as e:
logging.error(e)