Update cer.py
Browse files
cer.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
# coding=utf-8
|
2 |
# Copyright 2021 The HuggingFace Datasets Authors.
|
3 |
#
|
4 |
# Licensed under the Apache License, Version 2.0 (the "License");
|
@@ -12,12 +12,35 @@
|
|
12 |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
# See the License for the specific language governing permissions and
|
14 |
# limitations under the License.
|
15 |
-
"""
|
16 |
|
17 |
-
|
|
|
|
|
18 |
|
19 |
import datasets
|
20 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
|
22 |
_CITATION = """\
|
23 |
@inproceedings{inproceedings,
|
@@ -30,15 +53,13 @@ _CITATION = """\
|
|
30 |
"""
|
31 |
|
32 |
_DESCRIPTION = """\
|
33 |
-
|
34 |
-
|
35 |
-
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
|
36 |
|
37 |
-
|
38 |
|
39 |
-
|
40 |
|
41 |
-
|
42 |
|
43 |
where
|
44 |
|
@@ -48,25 +69,25 @@ I is the number of insertions,
|
|
48 |
C is the number of correct words,
|
49 |
N is the number of words in the reference (N=S+D+C).
|
50 |
|
51 |
-
|
52 |
-
performance of the ASR system with a
|
53 |
"""
|
54 |
|
55 |
_KWARGS_DESCRIPTION = """
|
56 |
-
Computes
|
57 |
Args:
|
58 |
references: list of references for each speech input.
|
59 |
predictions: list of transcribtions to score.
|
60 |
Returns:
|
61 |
-
(float): the
|
62 |
|
63 |
Examples:
|
64 |
|
65 |
>>> predictions = ["this is the prediction", "there is an other sample"]
|
66 |
>>> references = ["this is the reference", "there is another one"]
|
67 |
-
>>>
|
68 |
-
>>>
|
69 |
-
>>> print(
|
70 |
0.5
|
71 |
"""
|
72 |
|
@@ -87,16 +108,9 @@ class CER(datasets.Metric):
|
|
87 |
codebase_urls=["https://github.com/jitsi/jiwer/"],
|
88 |
reference_urls=[
|
89 |
"https://en.wikipedia.org/wiki/Word_error_rate",
|
|
|
90 |
],
|
91 |
)
|
92 |
|
93 |
def _compute(self, predictions, references):
|
94 |
-
|
95 |
-
refs = [char for seq in references for char in list(seq)]
|
96 |
-
return wer(refs, preds)
|
97 |
-
"""
|
98 |
-
wers = []
|
99 |
-
for pred, ref in zip(predictions, references):
|
100 |
-
wers.append(wer(list(ref), list(pred)))
|
101 |
-
return sum(wers) / len(wers)
|
102 |
-
"""
|
|
|
1 |
+
# coding=utf-8
|
2 |
# Copyright 2021 The HuggingFace Datasets Authors.
|
3 |
#
|
4 |
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
12 |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
# See the License for the specific language governing permissions and
|
14 |
# limitations under the License.
|
15 |
+
""" Character Error Ratio (CER) metric. """
|
16 |
|
17 |
+
import jiwer
|
18 |
+
import jiwer.transforms as tr
|
19 |
+
from typing import List
|
20 |
|
21 |
import datasets
|
22 |
|
23 |
+
class SentencesToListOfCharacters(tr.AbstractTransform):
|
24 |
+
def process_string(self, s: str):
|
25 |
+
return list(s)
|
26 |
+
|
27 |
+
def process_list(self, inp: List[str]):
|
28 |
+
chars = []
|
29 |
+
|
30 |
+
for sentence in inp:
|
31 |
+
chars.extend(self.process_string(sentence))
|
32 |
+
|
33 |
+
return chars
|
34 |
+
|
35 |
+
|
36 |
+
cer_transform = tr.Compose(
|
37 |
+
[
|
38 |
+
tr.RemoveMultipleSpaces(),
|
39 |
+
tr.Strip(),
|
40 |
+
SentencesToListOfCharacters(),
|
41 |
+
]
|
42 |
+
)
|
43 |
+
|
44 |
|
45 |
_CITATION = """\
|
46 |
@inproceedings{inproceedings,
|
|
|
53 |
"""
|
54 |
|
55 |
_DESCRIPTION = """\
|
56 |
+
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
|
|
|
|
|
57 |
|
58 |
+
CER is similar to Word Error Rate (WER), but operate on character insted of word. Please refer to docs of WER for further information.
|
59 |
|
60 |
+
Character error rate can be computed as:
|
61 |
|
62 |
+
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
|
63 |
|
64 |
where
|
65 |
|
|
|
69 |
C is the number of correct words,
|
70 |
N is the number of words in the reference (N=S+D+C).
|
71 |
|
72 |
+
CER's output is always a number between 0 and 1. This value indicates the percentage of characters that were incorrectly predicted. The lower the value, the better the
|
73 |
+
performance of the ASR system with a CER of 0 being a perfect score.
|
74 |
"""
|
75 |
|
76 |
_KWARGS_DESCRIPTION = """
|
77 |
+
Computes CER score of transcribed segments against references.
|
78 |
Args:
|
79 |
references: list of references for each speech input.
|
80 |
predictions: list of transcribtions to score.
|
81 |
Returns:
|
82 |
+
(float): the character error rate
|
83 |
|
84 |
Examples:
|
85 |
|
86 |
>>> predictions = ["this is the prediction", "there is an other sample"]
|
87 |
>>> references = ["this is the reference", "there is another one"]
|
88 |
+
>>> cer = datasets.load_metric("cer")
|
89 |
+
>>> cer_score = cer.compute(predictions=predictions, references=references)
|
90 |
+
>>> print(cer_score)
|
91 |
0.5
|
92 |
"""
|
93 |
|
|
|
108 |
codebase_urls=["https://github.com/jitsi/jiwer/"],
|
109 |
reference_urls=[
|
110 |
"https://en.wikipedia.org/wiki/Word_error_rate",
|
111 |
+
"https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates#whitespace",
|
112 |
],
|
113 |
)
|
114 |
|
115 |
def _compute(self, predictions, references):
|
116 |
+
return jiwer.wer(references, predictions, truth_transform=cer_transform, hypothesis_transform=cer_transform)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|