Spaces:
Sleeping
Sleeping
orionweller
commited on
Commit
•
a3306e2
1
Parent(s):
68ecf38
Update app.py
Browse filesAllow non-expanded docs that weren't ranked; update qrels to show relevance score to see non-relevance
app.py
CHANGED
@@ -483,14 +483,15 @@ if check_valid_args(run1_file, run2_file, dataset_name, qrels, queries, corpus):
|
|
483 |
container_two_docs_rel = st.container()
|
484 |
col_run1, col_run2 = container_two_docs_rel.columns(2, gap="medium")
|
485 |
relevant_docs = list(qrels[str(inst_num)].keys())[:n_relevant_docs]
|
486 |
-
|
|
|
487 |
|
488 |
if doc_expansion1 is not None and run1_uses_doc_expansion != "None":
|
489 |
show_orig_rel1 = col_run1.checkbox("Show Original Relevant Doc(s)", key=f"{inst_index}relorig_run1", value=False)
|
490 |
if doc_expansion2 is not None and run2_uses_doc_expansion != "None":
|
491 |
show_orig_rel2 = col_run2.checkbox("Show Original Relevant Doc(s)", key=f"{inst_index}relorig_run2", value=False)
|
492 |
|
493 |
-
for (docid, title, text) in doc_texts:
|
494 |
if doc_expansion1 is not None and run1_uses_doc_expansion != "None" and not show_orig_rel1:
|
495 |
alt_text = doc_expansion1[docid]["text"]
|
496 |
text = combine(text, alt_text, run1_uses_doc_expansion)
|
@@ -499,22 +500,22 @@ if check_valid_args(run1_file, run2_file, dataset_name, qrels, queries, corpus):
|
|
499 |
if col_run1.checkbox("Show Model Saliency", key=f"{inst_index}model_saliency{docid}relevant", value=False):
|
500 |
col_run1.markdown(get_saliency(query_text1, text),unsafe_allow_html=True)
|
501 |
else:
|
502 |
-
col_run1.text_area(f"{docid}:", text, key=f"{inst_num}doc{docid}1")
|
503 |
else:
|
504 |
-
col_run1.text_area(f"{docid}:", text, key=f"{inst_num}doc{docid}1")
|
505 |
|
506 |
-
for (docid, title, text) in doc_texts:
|
507 |
if doc_expansion2 is not None and run2_uses_doc_expansion != "None" and not show_orig_rel2:
|
508 |
-
alt_text = doc_expansion2[docid]["text"]
|
509 |
text = combine(text, alt_text, run2_uses_doc_expansion)
|
510 |
|
511 |
if use_model_saliency:
|
512 |
if col_run2.checkbox("Show Model Saliency", key=f"{inst_index}model_saliency{docid}relevant2", value=False):
|
513 |
col_run2.markdown(get_saliency(query_text2, text),unsafe_allow_html=True)
|
514 |
else:
|
515 |
-
col_run2.text_area(f"{docid}:", text, key=f"{inst_num}doc{docid}2")
|
516 |
else:
|
517 |
-
col_run2.text_area(f"{docid}:", text, key=f"{inst_num}doc{docid}2")
|
518 |
|
519 |
# top ranked
|
520 |
# NOTE: BEIR calls trec_eval which ranks by score, then doc_id for ties
|
|
|
483 |
container_two_docs_rel = st.container()
|
484 |
col_run1, col_run2 = container_two_docs_rel.columns(2, gap="medium")
|
485 |
relevant_docs = list(qrels[str(inst_num)].keys())[:n_relevant_docs]
|
486 |
+
relevant_score = {ind_doc_id: item[str(inst_num)][ind_doc_id] for ind_doc_id in relevant_docs}
|
487 |
+
doc_texts = [(doc_id, corpus[doc_id]["title"] if "title" in corpus[doc_id] else "", corpus[doc_id]["text"], relevance_score[doc_id]) for doc_id in relevant_docs]
|
488 |
|
489 |
if doc_expansion1 is not None and run1_uses_doc_expansion != "None":
|
490 |
show_orig_rel1 = col_run1.checkbox("Show Original Relevant Doc(s)", key=f"{inst_index}relorig_run1", value=False)
|
491 |
if doc_expansion2 is not None and run2_uses_doc_expansion != "None":
|
492 |
show_orig_rel2 = col_run2.checkbox("Show Original Relevant Doc(s)", key=f"{inst_index}relorig_run2", value=False)
|
493 |
|
494 |
+
for (docid, title, text, rel_score) in doc_texts:
|
495 |
if doc_expansion1 is not None and run1_uses_doc_expansion != "None" and not show_orig_rel1:
|
496 |
alt_text = doc_expansion1[docid]["text"]
|
497 |
text = combine(text, alt_text, run1_uses_doc_expansion)
|
|
|
500 |
if col_run1.checkbox("Show Model Saliency", key=f"{inst_index}model_saliency{docid}relevant", value=False):
|
501 |
col_run1.markdown(get_saliency(query_text1, text),unsafe_allow_html=True)
|
502 |
else:
|
503 |
+
col_run1.text_area(f"{docid} (Rel: {rel_score}):", text, key=f"{inst_num}doc{docid}1")
|
504 |
else:
|
505 |
+
col_run1.text_area(f"{docid} (Rel: {rel_score}):", text, key=f"{inst_num}doc{docid}1")
|
506 |
|
507 |
+
for (docid, title, text, rel_score) in doc_texts:
|
508 |
if doc_expansion2 is not None and run2_uses_doc_expansion != "None" and not show_orig_rel2:
|
509 |
+
alt_text = doc_expansion2[docid]["text"] if docid in doc_expansion2 else "<NOT EXPANDED>"
|
510 |
text = combine(text, alt_text, run2_uses_doc_expansion)
|
511 |
|
512 |
if use_model_saliency:
|
513 |
if col_run2.checkbox("Show Model Saliency", key=f"{inst_index}model_saliency{docid}relevant2", value=False):
|
514 |
col_run2.markdown(get_saliency(query_text2, text),unsafe_allow_html=True)
|
515 |
else:
|
516 |
+
col_run2.text_area(f"{docid}: (Rel: {rel_score})", text, key=f"{inst_num}doc{docid}2")
|
517 |
else:
|
518 |
+
col_run2.text_area(f"{docid}: (Rel: {rel_score})", text, key=f"{inst_num}doc{docid}2")
|
519 |
|
520 |
# top ranked
|
521 |
# NOTE: BEIR calls trec_eval which ranks by score, then doc_id for ties
|