Update functions.py
Browse files- functions.py +8 -3
functions.py
CHANGED
@@ -38,10 +38,15 @@ def load_models():
|
|
38 |
sent_pipe = pipeline("text-classification",model=q_model, tokenizer=q_tokenizer)
|
39 |
sum_pipe = pipeline("summarization",model="facebook/bart-large-cnn", tokenizer="facebook/bart-large-cnn",clean_up_tokenization_spaces=True)
|
40 |
ner_pipe = pipeline("ner", model=ner_model, tokenizer=ner_tokenizer, grouped_entities=True)
|
41 |
-
sbert = SentenceTransformer("all-mpnet-base-v2")
|
42 |
cross_encoder = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-12-v2')
|
43 |
|
44 |
-
return asr_model, sent_pipe, sum_pipe, ner_pipe,
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
|
46 |
@st.experimental_singleton(suppress_st_warning=True)
|
47 |
def get_spacy():
|
@@ -306,4 +311,4 @@ def fin_ext(text):
|
|
306 |
return make_spans(text,results)
|
307 |
|
308 |
nlp = get_spacy()
|
309 |
-
asr_model, sent_pipe, sum_pipe, ner_pipe,
|
|
|
38 |
sent_pipe = pipeline("text-classification",model=q_model, tokenizer=q_tokenizer)
|
39 |
sum_pipe = pipeline("summarization",model="facebook/bart-large-cnn", tokenizer="facebook/bart-large-cnn",clean_up_tokenization_spaces=True)
|
40 |
ner_pipe = pipeline("ner", model=ner_model, tokenizer=ner_tokenizer, grouped_entities=True)
|
|
|
41 |
cross_encoder = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-12-v2')
|
42 |
|
43 |
+
return asr_model, sent_pipe, sum_pipe, ner_pipe, cross_encoder
|
44 |
+
|
45 |
+
@st.experimental_singleton(suppress_st_warning=True)
|
46 |
+
def load_sbert(model_name):
|
47 |
+
sbert = SentenceTransformer(model_name)
|
48 |
+
|
49 |
+
return sbert
|
50 |
|
51 |
@st.experimental_singleton(suppress_st_warning=True)
|
52 |
def get_spacy():
|
|
|
311 |
return make_spans(text,results)
|
312 |
|
313 |
nlp = get_spacy()
|
314 |
+
asr_model, sent_pipe, sum_pipe, ner_pipe, cross_encoder = load_models()
|