File size: 5,375 Bytes
5d1b3d0 c7a2ca7 5d1b3d0 c7a2ca7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 |
# Code adapted from https://github.com/tensorflow/datasets/blob/master/tensorflow_datasets/text/c4_utils.py
# that has the following license
# coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import gzip
import hashlib
import heapq
import io
import re
import threading
import nltk
# from absl import logging
import tensorflow.compat.v2 as tf
# import tensorflow_datasets.public_api as tfds
# WET file constants
_PAGE_DELIMITER = "WARC/1.0"
_URL_KEY = "WARC-Target-URI:"
_URL_DATE = "WARC-Date:"
_CONTENT_TYPE = "Content-Type:"
_CONTENT_LEN = "Content-Length:"
_METADATA_PREFIXES = ("WARC", "CONTENT-", "Content-")
# Filters
_MIN_WORDS_PER_LINE = 3
_MIN_NUM_SENTENCES = 5
_MAX_WORD_LENGTH = 1000
_END_MARKS = (".", "?", "!", "\"")
_ELLIPSIS = "..."
_POLICY_SUBSTRINGS = [
"terms of use", "privacy policy", "cookie policy", "uses cookies",
"use of cookies", "use cookies", "elementen ontbreken", "deze printversie"
]
# Memoized sentence tokenizer.
_SENTENCE_TOKENIZER = None
UNKNOWN_LANGUAGE = "und"
citation_regex = re.compile(r"\[\d*\]|\[edit\]|\[citation needed\]")
from .badwords_ennl import badword_list
badwords_regex = re.compile(r"(?:\W|^)({})(?:\W|$)".format("|".join(badword_list)))
def badwords_filter(text):
badwords_found = badwords_regex.search(text.lower())
if badwords_found is not None:
return False
return True
def clean_text(text,
citation_regex=citation_regex,
min_words_per_line=_MIN_WORDS_PER_LINE,
min_num_sentences=_MIN_NUM_SENTENCES,
max_word_length=_MAX_WORD_LENGTH):
"""Cleans a CommonCrawl page, yielding nothing if it should be skipped.
Cleaning removes lines with no end marks or with too few words. After line
filtering, pages are filtered out if they have too few sentences based on a
simple count of end marks.
Args:
text: text of the page
citation_regex: Regex to use for finding Wikipedia-like citations to filter.
counter_inc_fn: function, a function taking the name of a counter to be
incremented and the (optional) amount. Defaults to a beam Metric counter.
min_words_per_line: int, the minimum number of words a line needs to not be
removed.
min_num_sentences: int, the minimum number of sentences a page needs to not
be skipped.
max_word_length: int, the maximum number of characters allowed in a word.
Lines containing a word with too many characters are removed.
Yields:
The url and cleaned text for the page.
"""
lines = text.splitlines()
valid_lines = []
num_sentences = 0
if not badwords_filter(text):
counter_inc_fn("badword-filtered: not passed")
return
def line_has_too_long_word(line):
for word in line.split():
if len(word) > max_word_length:
return True
return False
for line in lines:
line = line.strip()
if line_has_too_long_word(line):
counter_inc_fn("line-filtered:too_long_word")
continue
line = citation_regex.sub("", line)
if not line.endswith(_END_MARKS) or line.endswith(_ELLIPSIS):
counter_inc_fn("line-filtered:no_endmark")
continue
if len(line.split()) < min_words_per_line:
counter_inc_fn("line-filtered:too_short")
continue
line_lower = line.lower()
# Remove documents which contain lorem ipsum
if "lorem ipsum" in line_lower:
counter_inc_fn("filtered:loremipsum")
return
# Remove "javascript must be enabled" notices
if "javascript" in line_lower:
counter_inc_fn("line-filtered:javascript")
continue
# Remove docs which probably contain javascript code
if "{" in line:
counter_inc_fn("filtered:squigglybracket")
return
# Remove policy lines
if any(p in line_lower for p in _POLICY_SUBSTRINGS):
counter_inc_fn("line-filtered:policy")
continue
num_sentences += len(_get_sentences(line))
valid_lines.append(line)
counter_inc_fn("line-passed")
if num_sentences < min_num_sentences:
counter_inc_fn("filtered:too_few_sentences")
return
counter_inc_fn("passed")
result = "\
".join(valid_lines).strip()
return result
def _get_sentences(text):
global _SENTENCE_TOKENIZER
if not _SENTENCE_TOKENIZER:
_SENTENCE_TOKENIZER = _load_sentence_tokenizer()
return list(_SENTENCE_TOKENIZER.tokenize(tf.compat.as_text(text)))
_nltk_lock = threading.Lock()
def _load_sentence_tokenizer():
"""Returns a sentence tokenization function."""
# Lock to avoid a race-condition in the creation of the download directory.
with _nltk_lock:
nltk.download("punkt")
return nltk.data.load("nltk:tokenizers/punkt/english.pickle")
count_dict = dict()
def counter_inc_fn(what):
if what in count_dict:
count_dict[what] += 1
else:
count_dict[what] = 1
|