Spaces:
Runtime error
Runtime error
File size: 3,391 Bytes
e011405 8469398 e011405 8469398 e011405 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 |
from langchain.document_loaders.unstructured import UnstructuredFileLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.schema import AIMessage, HumanMessage, SystemMessage, Document
from transformers import AutoTokenizer, T5ForConditionalGeneration
from retrieval.retrieval import Retrieval, BM25
import os, time
class Agent:
def __init__(self, args=None) -> None:
self.args = args
self.corpus = Retrieval()
self.choices = args.choices
self.context_value = ""
self.use_context = False
print("Model is loading...")
self.model = T5ForConditionalGeneration.from_pretrained(args.model).to(args.device)
self.tokenizer = AutoTokenizer.from_pretrained(args.tokenizer)
print("Model loaded!")
def load_context(self, doc_path):
loader = UnstructuredFileLoader(doc_path.name)
print('Loading file:', doc_path.name)
context = loader.load()[0].page_content
self.retrieval = Retrieval(docs=context)
self.choices = self.retrieval.k
self.use_context = True
return f"Using file from {doc_path.name}"
def asking(self, question):
s_query = time.time()
if self.use_context:
print("Answering with your context")
contexts = self.retrieval.get_context(question)
else:
print("Answering without your context")
contexts = self.corpus.get_context(question)
prompts = []
for context in contexts:
prompt = f"Trả lời câu hỏi: {question} Trong nội dung: {context['context']}"
prompts.append(prompt)
s_token = time.time()
tokens = self.tokenizer(prompts, max_length=self.args.seq_len, truncation=True, padding='max_length', return_tensors='pt')
s_gen = time.time()
outputs = self.model.generate(
input_ids=tokens.input_ids.to(self.args.device),
attention_mask=tokens.attention_mask.to(self.args.device),
max_new_tokens=self.args.out_len
)
s_de = time.time()
answers = []
for output in outputs:
sequence = self.tokenizer.decode(output, skip_special_tokens=True)
answers.append(sequence)
self.temp = [contexts, answers]
t_mess = "t_query: {:.2f}\t t_token: {:.2f}\t t_gen: {:.2f}\t t_decode: {:.2f}\t".format(
s_token-s_query, s_gen-s_token, s_de-s_gen, time.time()-s_de
)
print(t_mess)
return answers
def get_context(self, context):
self.context_value = context
self.retrieval = Retrieval(docs=context)
self.choices = self.retrieval.k
self.use_context = True
return context
def load_context_file(self, file):
print('Loading file:', file.name)
text = ''
for line in open(file.name, 'r', encoding='utf8'):
text += line
self.context_value = text
return text
def clear_context(self):
self.context_value = ""
self.use_context = False
self.choices = self.args.choices
return ""
|