Spaces:
Runtime error
Runtime error
Commit
·
1ece544
1
Parent(s):
86a9a82
fix bugs for hg 1.17
Browse files- app.py +14 -10
- requirements.txt +1 -1
app.py
CHANGED
@@ -15,24 +15,25 @@ repo_id = "friendshipkim/IUR_Reddit"
|
|
15 |
# ABSOLUTE_PATH = os.path.dirname(__file__)
|
16 |
# ASSETS_PATH = os.path.join(ABSOLUTE_PATH, 'model_assets')
|
17 |
|
18 |
-
@st.
|
19 |
def preprocess_text(s):
|
20 |
return list(filter(lambda x: x!= '', (''.join(c if c.isalnum() or c == ' ' else ' ' for c in s)).split(' ')))
|
21 |
|
22 |
-
@st.
|
23 |
def get_pairwise_distances(model):
|
24 |
# df = pd.read_csv(f"{ASSETS_PATH}/{model}/pairwise_distances.csv").set_index('index')
|
25 |
df = pd.read_csv(hf_hub_download(repo_id=repo_id, filename="pairwise_distances.csv")).set_index('index')
|
26 |
return df
|
27 |
|
28 |
-
@st.
|
29 |
def get_pairwise_distances_chunked(model, chunk):
|
30 |
# for df in pd.read_csv(f"{ASSETS_PATH}/{model}/pairwise_distances.csv", chunksize = 16):
|
31 |
# print(df.iloc[0]['queries'])
|
32 |
# if chunk == int(df.iloc[0]['queries']):
|
33 |
# return df
|
34 |
return get_pairwise_distances(model)
|
35 |
-
|
|
|
36 |
def get_query_strings():
|
37 |
# df = pd.read_json(f"{ASSETS_PATH}/IUR_Reddit_test_queries_english.jsonl", lines = True)
|
38 |
df = pd.read_json(hf_hub_download(repo_id=repo_id, filename="IUR_Reddit_test_queries_english.jsonl"), lines = True)
|
@@ -42,7 +43,8 @@ def get_query_strings():
|
|
42 |
# df.to_parquet(f"{ASSETS_PATH}/IUR_Reddit_test_queries_english.parquet", index = 'index', partition_cols = 'partition')
|
43 |
|
44 |
# return pd.read_parquet(f"{ASSETS_PATH}/IUR_Reddit_test_queries_english.parquet", columns=['fullText', 'index', 'authorIDs'])
|
45 |
-
|
|
|
46 |
def get_candidate_strings():
|
47 |
# df = pd.read_json(f"{ASSETS_PATH}/IUR_Reddit_test_candidates_english.jsonl", lines = True)
|
48 |
df = pd.read_json(hf_hub_download(repo_id=repo_id, filename="IUR_Reddit_test_candidates_english.jsonl"), lines = True)
|
@@ -52,26 +54,28 @@ def get_candidate_strings():
|
|
52 |
# df.to_parquet(f"{ASSETS_PATH}/IUR_Reddit_test_candidates_english.parquet", index = 'index', partition_cols = 'partition')
|
53 |
# return pd.read_parquet(f"{ASSETS_PATH}/IUR_Reddit_test_candidates_english.parquet", columns=['fullText', 'index', 'authorIDs'])
|
54 |
|
55 |
-
@st.
|
56 |
def get_embedding_dataset(model):
|
57 |
# data = load_from_disk(f"{ASSETS_PATH}/{model}/embedding")
|
58 |
data = load_dataset("friendshipkim/luar_clone2_top_100_embedding")
|
59 |
return data
|
60 |
|
61 |
-
@st.
|
62 |
def get_bad_queries(model):
|
63 |
df = get_query_strings().iloc[list(get_pairwise_distances(model)['queries'].unique())][['fullText', 'index', 'authorIDs']]
|
64 |
return df
|
65 |
-
|
|
|
66 |
def get_gt_candidates(model, author):
|
67 |
gt_candidates = get_candidate_strings()
|
68 |
df = gt_candidates[gt_candidates['authorIDs'].apply(lambda x: x[0]) == author]
|
69 |
return df
|
70 |
-
|
|
|
71 |
def get_candidate_text(l):
|
72 |
return get_candidate_strings().at[l,'fullText']
|
73 |
|
74 |
-
@st.
|
75 |
def get_annotated_text(text, word, pos):
|
76 |
# print("here", word, pos)
|
77 |
start= text.index(word, pos)
|
|
|
15 |
# ABSOLUTE_PATH = os.path.dirname(__file__)
|
16 |
# ASSETS_PATH = os.path.join(ABSOLUTE_PATH, 'model_assets')
|
17 |
|
18 |
+
@st.cache
|
19 |
def preprocess_text(s):
|
20 |
return list(filter(lambda x: x!= '', (''.join(c if c.isalnum() or c == ' ' else ' ' for c in s)).split(' ')))
|
21 |
|
22 |
+
@st.cache
|
23 |
def get_pairwise_distances(model):
|
24 |
# df = pd.read_csv(f"{ASSETS_PATH}/{model}/pairwise_distances.csv").set_index('index')
|
25 |
df = pd.read_csv(hf_hub_download(repo_id=repo_id, filename="pairwise_distances.csv")).set_index('index')
|
26 |
return df
|
27 |
|
28 |
+
@st.cache
|
29 |
def get_pairwise_distances_chunked(model, chunk):
|
30 |
# for df in pd.read_csv(f"{ASSETS_PATH}/{model}/pairwise_distances.csv", chunksize = 16):
|
31 |
# print(df.iloc[0]['queries'])
|
32 |
# if chunk == int(df.iloc[0]['queries']):
|
33 |
# return df
|
34 |
return get_pairwise_distances(model)
|
35 |
+
|
36 |
+
@st.cache
|
37 |
def get_query_strings():
|
38 |
# df = pd.read_json(f"{ASSETS_PATH}/IUR_Reddit_test_queries_english.jsonl", lines = True)
|
39 |
df = pd.read_json(hf_hub_download(repo_id=repo_id, filename="IUR_Reddit_test_queries_english.jsonl"), lines = True)
|
|
|
43 |
# df.to_parquet(f"{ASSETS_PATH}/IUR_Reddit_test_queries_english.parquet", index = 'index', partition_cols = 'partition')
|
44 |
|
45 |
# return pd.read_parquet(f"{ASSETS_PATH}/IUR_Reddit_test_queries_english.parquet", columns=['fullText', 'index', 'authorIDs'])
|
46 |
+
|
47 |
+
@st.cache
|
48 |
def get_candidate_strings():
|
49 |
# df = pd.read_json(f"{ASSETS_PATH}/IUR_Reddit_test_candidates_english.jsonl", lines = True)
|
50 |
df = pd.read_json(hf_hub_download(repo_id=repo_id, filename="IUR_Reddit_test_candidates_english.jsonl"), lines = True)
|
|
|
54 |
# df.to_parquet(f"{ASSETS_PATH}/IUR_Reddit_test_candidates_english.parquet", index = 'index', partition_cols = 'partition')
|
55 |
# return pd.read_parquet(f"{ASSETS_PATH}/IUR_Reddit_test_candidates_english.parquet", columns=['fullText', 'index', 'authorIDs'])
|
56 |
|
57 |
+
@st.cache
|
58 |
def get_embedding_dataset(model):
|
59 |
# data = load_from_disk(f"{ASSETS_PATH}/{model}/embedding")
|
60 |
data = load_dataset("friendshipkim/luar_clone2_top_100_embedding")
|
61 |
return data
|
62 |
|
63 |
+
@st.cache
|
64 |
def get_bad_queries(model):
|
65 |
df = get_query_strings().iloc[list(get_pairwise_distances(model)['queries'].unique())][['fullText', 'index', 'authorIDs']]
|
66 |
return df
|
67 |
+
|
68 |
+
@st.cache
|
69 |
def get_gt_candidates(model, author):
|
70 |
gt_candidates = get_candidate_strings()
|
71 |
df = gt_candidates[gt_candidates['authorIDs'].apply(lambda x: x[0]) == author]
|
72 |
return df
|
73 |
+
|
74 |
+
@st.cache
|
75 |
def get_candidate_text(l):
|
76 |
return get_candidate_strings().at[l,'fullText']
|
77 |
|
78 |
+
@st.cache
|
79 |
def get_annotated_text(text, word, pos):
|
80 |
# print("here", word, pos)
|
81 |
start= text.index(word, pos)
|
requirements.txt
CHANGED
@@ -68,7 +68,7 @@ urllib3==1.26.13
|
|
68 |
wasabi==0.10.1
|
69 |
xxhash==3.1.0
|
70 |
yarl==1.8.2
|
71 |
-
streamlit==1.
|
72 |
streamlit-aggrid
|
73 |
st-annotated-text
|
74 |
--extra-index-url http://download.pytorch.org/whl/cu116
|
|
|
68 |
wasabi==0.10.1
|
69 |
xxhash==3.1.0
|
70 |
yarl==1.8.2
|
71 |
+
streamlit==1.17.0
|
72 |
streamlit-aggrid
|
73 |
st-annotated-text
|
74 |
--extra-index-url http://download.pytorch.org/whl/cu116
|