SamuelYang
commited on
Commit
•
e8c5875
1
Parent(s):
3082e6e
Upload ms_marco_pvalue.py
Browse files- ms_marco_pvalue.py +189 -0
ms_marco_pvalue.py
ADDED
@@ -0,0 +1,189 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This module computes evaluation metrics for MSMARCO dataset on the ranking task. Intenral hard coded eval files version. DO NOT PUBLISH!
|
3 |
+
Command line:
|
4 |
+
python msmarco_eval_ranking.py <path_to_candidate_file>
|
5 |
+
|
6 |
+
Creation Date : 06/12/2018
|
7 |
+
Last Modified : 4/09/2019
|
8 |
+
Authors : Daniel Campos <dacamp@microsoft.com>, Rutger van Haasteren <ruvanh@microsoft.com>
|
9 |
+
"""
|
10 |
+
import sys
|
11 |
+
import statistics
|
12 |
+
|
13 |
+
from collections import Counter
|
14 |
+
|
15 |
+
|
16 |
+
def load_reference_from_stream(f):
|
17 |
+
"""Load Reference reference relevant passages
|
18 |
+
Args:f (stream): stream to load.
|
19 |
+
Returns:qids_to_relevant_passageids (dict): dictionary mapping from query_id (int) to relevant passages (list of ints).
|
20 |
+
"""
|
21 |
+
qids_to_relevant_passageids = {}
|
22 |
+
for l in f:
|
23 |
+
try:
|
24 |
+
l = l.strip().split('\t')
|
25 |
+
qid = int(l[0])
|
26 |
+
if qid in qids_to_relevant_passageids:
|
27 |
+
pass
|
28 |
+
else:
|
29 |
+
qids_to_relevant_passageids[qid] = []
|
30 |
+
qids_to_relevant_passageids[qid].append(int(l[1]))
|
31 |
+
except:
|
32 |
+
raise IOError('\"%s\" is not valid format' % l)
|
33 |
+
return qids_to_relevant_passageids
|
34 |
+
|
35 |
+
|
36 |
+
def load_reference(path_to_reference):
|
37 |
+
"""Load Reference reference relevant passages
|
38 |
+
Args:path_to_reference (str): path to a file to load.
|
39 |
+
Returns:qids_to_relevant_passageids (dict): dictionary mapping from query_id (int) to relevant passages (list of ints).
|
40 |
+
"""
|
41 |
+
with open(path_to_reference, 'r') as f:
|
42 |
+
qids_to_relevant_passageids = load_reference_from_stream(f)
|
43 |
+
return qids_to_relevant_passageids
|
44 |
+
|
45 |
+
|
46 |
+
def load_candidate_from_stream(f):
|
47 |
+
"""Load candidate data from a stream.
|
48 |
+
Args:f (stream): stream to load.
|
49 |
+
Returns:qid_to_ranked_candidate_passages (dict): dictionary mapping from query_id (int) to a list of 1000 passage ids(int) ranked by relevance and importance
|
50 |
+
"""
|
51 |
+
qid_to_ranked_candidate_passages = {}
|
52 |
+
for l in f:
|
53 |
+
try:
|
54 |
+
l = l.strip().split('\t')
|
55 |
+
qid = int(l[0])
|
56 |
+
pid = int(l[1])
|
57 |
+
rank = int(l[2])
|
58 |
+
if qid in qid_to_ranked_candidate_passages:
|
59 |
+
pass
|
60 |
+
else:
|
61 |
+
# By default, all PIDs in the list of 1000 are 0. Only override those that are given
|
62 |
+
tmp = [0] * 1000
|
63 |
+
qid_to_ranked_candidate_passages[qid] = tmp
|
64 |
+
qid_to_ranked_candidate_passages[qid][rank - 1] = pid
|
65 |
+
except:
|
66 |
+
raise IOError('\"%s\" is not valid format' % l)
|
67 |
+
return qid_to_ranked_candidate_passages
|
68 |
+
|
69 |
+
|
70 |
+
def load_candidate(path_to_candidate):
|
71 |
+
"""Load candidate data from a file.
|
72 |
+
Args:path_to_candidate (str): path to file to load.
|
73 |
+
Returns:qid_to_ranked_candidate_passages (dict): dictionary mapping from query_id (int) to a list of 1000 passage ids(int) ranked by relevance and importance
|
74 |
+
"""
|
75 |
+
|
76 |
+
with open(path_to_candidate, 'r') as f:
|
77 |
+
qid_to_ranked_candidate_passages = load_candidate_from_stream(f)
|
78 |
+
return qid_to_ranked_candidate_passages
|
79 |
+
|
80 |
+
|
81 |
+
def quality_checks_qids(qids_to_relevant_passageids, qids_to_ranked_candidate_passages):
|
82 |
+
"""Perform quality checks on the dictionaries
|
83 |
+
|
84 |
+
Args:
|
85 |
+
p_qids_to_relevant_passageids (dict): dictionary of query-passage mapping
|
86 |
+
Dict as read in with load_reference or load_reference_from_stream
|
87 |
+
p_qids_to_ranked_candidate_passages (dict): dictionary of query-passage candidates
|
88 |
+
Returns:
|
89 |
+
bool,str: Boolean whether allowed, message to be shown in case of a problem
|
90 |
+
"""
|
91 |
+
message = ''
|
92 |
+
allowed = True
|
93 |
+
|
94 |
+
# Create sets of the QIDs for the submitted and reference queries
|
95 |
+
candidate_set = set(qids_to_ranked_candidate_passages.keys())
|
96 |
+
ref_set = set(qids_to_relevant_passageids.keys())
|
97 |
+
|
98 |
+
# Check that we do not have multiple passages per query
|
99 |
+
for qid in qids_to_ranked_candidate_passages:
|
100 |
+
# Remove all zeros from the candidates
|
101 |
+
duplicate_pids = set(
|
102 |
+
[item for item, count in Counter(qids_to_ranked_candidate_passages[qid]).items() if count > 1])
|
103 |
+
|
104 |
+
if len(duplicate_pids - set([0])) > 0:
|
105 |
+
message = "Cannot rank a passage multiple times for a single query. QID={qid}, PID={pid}".format(
|
106 |
+
qid=qid, pid=list(duplicate_pids)[0])
|
107 |
+
allowed = False
|
108 |
+
|
109 |
+
return allowed, message
|
110 |
+
|
111 |
+
|
112 |
+
def compute_metrics(qids_to_relevant_passageids, qids_to_ranked_candidate_passages):
|
113 |
+
"""Compute MRR metric
|
114 |
+
Args:
|
115 |
+
p_qids_to_relevant_passageids (dict): dictionary of query-passage mapping
|
116 |
+
Dict as read in with load_reference or load_reference_from_stream
|
117 |
+
p_qids_to_ranked_candidate_passages (dict): dictionary of query-passage candidates
|
118 |
+
Returns:
|
119 |
+
dict: dictionary of metrics {'MRR': <MRR Score>}
|
120 |
+
"""
|
121 |
+
topk=[5,10,20,50,100,200,500,1000]
|
122 |
+
accuracy = { k : [] for k in topk }
|
123 |
+
MaxMRRRank=max(topk)
|
124 |
+
|
125 |
+
ranking = []
|
126 |
+
for qid in qids_to_ranked_candidate_passages:
|
127 |
+
if qid in qids_to_relevant_passageids:
|
128 |
+
ranking.append(10**9)
|
129 |
+
target_pid = qids_to_relevant_passageids[qid]
|
130 |
+
candidate_pid = qids_to_ranked_candidate_passages[qid]
|
131 |
+
for i in range(0, MaxMRRRank):
|
132 |
+
if candidate_pid[i] in target_pid:
|
133 |
+
ranking.pop()
|
134 |
+
ranking.append(i + 1)
|
135 |
+
break
|
136 |
+
for k in topk:
|
137 |
+
accuracy[k].append(0 if ranking[-1] > k else 1)
|
138 |
+
if len(ranking) == 0:
|
139 |
+
raise IOError("No matching QIDs found. Are you sure you are scoring the evaluation set?")
|
140 |
+
|
141 |
+
|
142 |
+
return accuracy
|
143 |
+
|
144 |
+
|
145 |
+
def compute_metrics_from_files(path_to_reference, path_to_candidate, perform_checks=True):
|
146 |
+
"""Compute MRR metric
|
147 |
+
Args:
|
148 |
+
p_path_to_reference_file (str): path to reference file.
|
149 |
+
Reference file should contain lines in the following format:
|
150 |
+
QUERYID\tPASSAGEID
|
151 |
+
Where PASSAGEID is a relevant passage for a query. Note QUERYID can repeat on different lines with different PASSAGEIDs
|
152 |
+
p_path_to_candidate_file (str): path to candidate file.
|
153 |
+
Candidate file sould contain lines in the following format:
|
154 |
+
QUERYID\tPASSAGEID1\tRank
|
155 |
+
If a user wishes to use the TREC format please run the script with a -t flag at the end. If this flag is used the expected format is
|
156 |
+
QUERYID\tITER\tDOCNO\tRANK\tSIM\tRUNID
|
157 |
+
Where the values are separated by tabs and ranked in order of relevance
|
158 |
+
Returns:
|
159 |
+
dict: dictionary of metrics {'MRR': <MRR Score>}
|
160 |
+
"""
|
161 |
+
|
162 |
+
qids_to_relevant_passageids = load_reference(path_to_reference)
|
163 |
+
qids_to_ranked_candidate_passages = load_candidate(path_to_candidate)
|
164 |
+
if perform_checks:
|
165 |
+
allowed, message = quality_checks_qids(qids_to_relevant_passageids, qids_to_ranked_candidate_passages)
|
166 |
+
if message != '': print(message)
|
167 |
+
|
168 |
+
return compute_metrics(qids_to_relevant_passageids, qids_to_ranked_candidate_passages)
|
169 |
+
|
170 |
+
|
171 |
+
def main():
|
172 |
+
"""Command line:
|
173 |
+
python msmarco_eval_ranking.py <path to reference> <path_to_candidate_file>
|
174 |
+
"""
|
175 |
+
import scipy.stats as stats
|
176 |
+
topk=[5,10,20,50,100,200,500,1000]
|
177 |
+
path_to_candidate_a = "InfoCSE_ICT.tsv.marco"
|
178 |
+
path_to_reference = "marco/qrels.dev.tsv"
|
179 |
+
all_scores_a = compute_metrics_from_files(path_to_reference, path_to_candidate_a)
|
180 |
+
for method in ["SimCSE","ConSERT","MirrorBERT","ICT","CPC","DeCLUTR","CONPONO"]:
|
181 |
+
path_to_candidate_b = "{}.tsv.marco".format(method)
|
182 |
+
print(path_to_candidate_b)
|
183 |
+
all_scores_b = compute_metrics_from_files(path_to_reference, path_to_candidate_b)
|
184 |
+
for k in topk:
|
185 |
+
stat_val, p_val = stats.ttest_ind(all_scores_a[k], all_scores_b[k])
|
186 |
+
print(str(k) + ': ' + str(p_val / 2))
|
187 |
+
|
188 |
+
if __name__ == '__main__':
|
189 |
+
main()
|