Spaces:
Running
on
Zero
Running
on
Zero
Commit
·
865ad03
1
Parent(s):
1083a3c
change the position of initialize the embedding model
Browse files
app.py
CHANGED
@@ -7,9 +7,20 @@ from langchain_community.vectorstores.faiss import FAISS
|
|
7 |
from langchain_huggingface import HuggingFaceEmbeddings
|
8 |
from huggingface_hub import snapshot_download
|
9 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
class RAGChatbot:
|
11 |
def __init__(self):
|
|
|
|
|
|
|
12 |
self.init_models()
|
|
|
13 |
self.init_vector_store()
|
14 |
|
15 |
self.background_prompt = '''
|
@@ -68,41 +79,10 @@ class RAGChatbot:
|
|
68 |
Now, please guide me step by step to describe the legal issues I am facing, according to the above requirements.
|
69 |
'''
|
70 |
|
71 |
-
|
72 |
-
import os
|
73 |
-
import spaces
|
74 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
75 |
-
from threading import Thread
|
76 |
-
from langchain_community.vectorstores.faiss import FAISS
|
77 |
-
from langchain_huggingface import HuggingFaceEmbeddings
|
78 |
-
from huggingface_hub import snapshot_download
|
79 |
-
|
80 |
-
class RAGChatbot:
|
81 |
-
def __init__(self):
|
82 |
-
# First initialize models to create embeddings
|
83 |
-
self.init_models()
|
84 |
-
# Then initialize vector store which uses embeddings
|
85 |
-
self.init_vector_store()
|
86 |
-
|
87 |
-
self.background_prompt = '''
|
88 |
-
As an AI legal assistant, you are a highly trained expert in U.S. and Canadian law...
|
89 |
-
[rest of your existing background prompt]
|
90 |
-
'''
|
91 |
-
|
92 |
-
@spaces.GPU(duration=120)
|
93 |
def init_models(self):
|
94 |
-
"""Initialize the LLM
|
95 |
-
print("Initializing
|
96 |
-
|
97 |
-
# Embedding model initialization first
|
98 |
-
print("Loading embedding model...")
|
99 |
-
self.embeddings = HuggingFaceEmbeddings(
|
100 |
-
model_name='intfloat/multilingual-e5-large-instruct',
|
101 |
-
model_kwargs={'trust_remote_code': True}
|
102 |
-
)
|
103 |
-
|
104 |
-
# LLM initialization
|
105 |
-
print("Loading LLM model...")
|
106 |
self.llm_model_name = 'StevenChen16/llama3-8b-Lawyer'
|
107 |
self.tokenizer = AutoTokenizer.from_pretrained(self.llm_model_name)
|
108 |
self.model = AutoModelForCausalLM.from_pretrained(
|
@@ -113,7 +93,7 @@ class RAGChatbot:
|
|
113 |
self.tokenizer.eos_token_id,
|
114 |
self.tokenizer.convert_tokens_to_ids("<|eot_id|>")
|
115 |
]
|
116 |
-
print("
|
117 |
|
118 |
def init_vector_store(self):
|
119 |
"""Load vector store from HuggingFace Hub"""
|
@@ -128,7 +108,7 @@ class RAGChatbot:
|
|
128 |
print("Loading vector store...")
|
129 |
# Load the vector store from downloaded files
|
130 |
self.vector_store = FAISS.load_local(
|
131 |
-
folder_path=repo_path,
|
132 |
embeddings=self.embeddings,
|
133 |
allow_dangerous_deserialization=True
|
134 |
)
|
|
|
7 |
from langchain_huggingface import HuggingFaceEmbeddings
|
8 |
from huggingface_hub import snapshot_download
|
9 |
|
10 |
+
def create_embedding_model(model_name):
|
11 |
+
"""Create embedding model instance"""
|
12 |
+
return HuggingFaceEmbeddings(
|
13 |
+
model_name=model_name,
|
14 |
+
model_kwargs={'trust_remote_code': True}
|
15 |
+
)
|
16 |
+
|
17 |
class RAGChatbot:
|
18 |
def __init__(self):
|
19 |
+
# First create embeddings directly
|
20 |
+
self.embeddings = create_embedding_model('intfloat/multilingual-e5-large-instruct')
|
21 |
+
# Then initialize other models
|
22 |
self.init_models()
|
23 |
+
# Finally initialize vector store
|
24 |
self.init_vector_store()
|
25 |
|
26 |
self.background_prompt = '''
|
|
|
79 |
Now, please guide me step by step to describe the legal issues I am facing, according to the above requirements.
|
80 |
'''
|
81 |
|
82 |
+
@spaces.GPU
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
def init_models(self):
|
84 |
+
"""Initialize the LLM model"""
|
85 |
+
print("Initializing LLM model...")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
86 |
self.llm_model_name = 'StevenChen16/llama3-8b-Lawyer'
|
87 |
self.tokenizer = AutoTokenizer.from_pretrained(self.llm_model_name)
|
88 |
self.model = AutoModelForCausalLM.from_pretrained(
|
|
|
93 |
self.tokenizer.eos_token_id,
|
94 |
self.tokenizer.convert_tokens_to_ids("<|eot_id|>")
|
95 |
]
|
96 |
+
print("LLM model initialized successfully")
|
97 |
|
98 |
def init_vector_store(self):
|
99 |
"""Load vector store from HuggingFace Hub"""
|
|
|
108 |
print("Loading vector store...")
|
109 |
# Load the vector store from downloaded files
|
110 |
self.vector_store = FAISS.load_local(
|
111 |
+
folder_path=repo_path,
|
112 |
embeddings=self.embeddings,
|
113 |
allow_dangerous_deserialization=True
|
114 |
)
|