Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -75,12 +75,15 @@ from datasets import load_dataset
|
|
75 |
import huggingface_hub
|
76 |
from huggingface_hub import Repository
|
77 |
from datetime import datetime
|
|
|
78 |
|
79 |
|
80 |
-
DATASET_REPO_URL = "https://huggingface.co/datasets/Seetha/visual_files/
|
81 |
DATA_FILENAME = "level2.json"
|
82 |
DATA_FILE = os.path.join(DATASET_REPO_URL, DATA_FILENAME)
|
83 |
-
|
|
|
|
|
84 |
|
85 |
HF_TOKEN = os.environ.get("HF_TOKEN")
|
86 |
|
@@ -167,18 +170,9 @@ def main():
|
|
167 |
entity_list.append(i['entity_group'])
|
168 |
|
169 |
filename = 'Checkpoint-classification.sav'
|
170 |
-
#filename = 'model.bin'
|
171 |
-
# count_vect = CountVectorizer(ngram_range=(1,3))
|
172 |
-
# tfidf_transformer=TfidfTransformer()
|
173 |
loaded_model = pickle.load(open(filename, 'rb'))
|
174 |
-
#loaded_model = pickle.load(open(filename, 'rb'))
|
175 |
-
#loaded_model = joblib.load(filename)
|
176 |
-
#loaded_vectorizer = dill.load(open('vectorizefile_classification.pickle', 'rb'))
|
177 |
loaded_vectorizer = pickle.load(open('vectorizefile_classification.pickle', 'rb'))
|
178 |
|
179 |
-
# from sklearn.pipeline import Pipeline
|
180 |
-
# pipeline1 = Pipeline([('count_vect',count_vect),('tfidf_transformer',tfidf_transformer)])
|
181 |
-
# pipeline_test_output = pipeline1.fit_transform(class_list)
|
182 |
pipeline_test_output = loaded_vectorizer.transform(class_list)
|
183 |
predicted = loaded_model.predict(pipeline_test_output)
|
184 |
pred1 = predicted
|
|
|
75 |
import huggingface_hub
|
76 |
from huggingface_hub import Repository
|
77 |
from datetime import datetime
|
78 |
+
import pathlib as Path
|
79 |
|
80 |
|
81 |
+
DATASET_REPO_URL = "https://huggingface.co/datasets/Seetha/visual_files/raw/main"
|
82 |
DATA_FILENAME = "level2.json"
|
83 |
DATA_FILE = os.path.join(DATASET_REPO_URL, DATA_FILENAME)
|
84 |
+
|
85 |
+
feedback_file = Path("https://huggingface.co/datasets/Seetha/visual_files/raw/main") / f"level2.json"
|
86 |
+
st.write(feedback_file)
|
87 |
|
88 |
HF_TOKEN = os.environ.get("HF_TOKEN")
|
89 |
|
|
|
170 |
entity_list.append(i['entity_group'])
|
171 |
|
172 |
filename = 'Checkpoint-classification.sav'
|
|
|
|
|
|
|
173 |
loaded_model = pickle.load(open(filename, 'rb'))
|
|
|
|
|
|
|
174 |
loaded_vectorizer = pickle.load(open('vectorizefile_classification.pickle', 'rb'))
|
175 |
|
|
|
|
|
|
|
176 |
pipeline_test_output = loaded_vectorizer.transform(class_list)
|
177 |
predicted = loaded_model.predict(pipeline_test_output)
|
178 |
pred1 = predicted
|