Update README.md
Browse files
README.md
CHANGED
@@ -124,4 +124,47 @@ for en in enlaces:
|
|
124 |
texto = texto + sinIm
|
125 |
with open('./plenaInclusionEspaña/soloFacil/' + nombre + '.txt', 'w') as f:
|
126 |
f.write(texto)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
127 |
```
|
|
|
124 |
texto = texto + sinIm
|
125 |
with open('./plenaInclusionEspaña/soloFacil/' + nombre + '.txt', 'w') as f:
|
126 |
f.write(texto)
|
127 |
+
```
|
128 |
+
|
129 |
+
Once we have the texts, we need to clean the code and remove all the html code.
|
130 |
+
The first step is to identify the line breaks, so we are replacing the "<p>","</p>" and <br/> with "\n" .
|
131 |
+
Then, we keep the contents of each container, using the divs.
|
132 |
+
In the next step we want to delete the videos and images in order to keep only the text.
|
133 |
+
Then, we get the text from the references of the urls.
|
134 |
+
Once we have all of this done, we delete all the remaining html code and we write it in another txt file.
|
135 |
+
|
136 |
+
Finally, the ones obtained by Plena inclusión España have, in their original version, the sentence "Este contenido NO está adaptado a Lectura Fácil", so we are keeping just the text thereafter. We can see the code below:
|
137 |
+
|
138 |
+
```
|
139 |
+
def limpiarHTML(directorio):
|
140 |
+
for carpeta, directorios, ficheros in os.walk(directorio):
|
141 |
+
for fichero in ficheros:
|
142 |
+
if fichero.endswith('txt') and fichero[-14:]!='checkpoint.txt':
|
143 |
+
contenido = open(directorio+fichero)
|
144 |
+
texto = contenido.read()
|
145 |
+
texto = texto.replace('<p>', '\n').replace('</p>', '\n').replace('<br/>', '\n') #Los párrafos
|
146 |
+
soup = BeautifulSoup(texto, 'html.parser')
|
147 |
+
for div in soup.find_all('div', dir=True):
|
148 |
+
texto = texto.replace(str(div), str(div.get_text()))
|
149 |
+
soup = BeautifulSoup(texto, 'html.parser')
|
150 |
+
for frame in soup.find_all('iframe'): #Imágenes
|
151 |
+
texto = texto.replace(str(frame), str(frame['src']))
|
152 |
+
soup = BeautifulSoup(texto, 'html.parser')
|
153 |
+
for a in soup.find_all('a', href=True): #Enlaces
|
154 |
+
texto = texto.replace(str(a), str(a.get_text()))
|
155 |
+
soup = BeautifulSoup(texto, 'html.parser')
|
156 |
+
texto = soup.get_text()
|
157 |
+
with open('./limpios/'+directorio[2:]+fichero, 'w') as f:
|
158 |
+
f.write(texto[1:-1]) #Por que se me quedaban los corchetes del div
|
159 |
+
|
160 |
+
# Los de plena inclusión España tienen, en lectura compleja, una línea igual. Todos la tienen en la tercera línea así que
|
161 |
+
# una vez que se ha escrito el texto se busca la segunda línea y,si la tiene, se elimina lo previo
|
162 |
+
|
163 |
+
contenido2 = open('./limpios/'+directorio[2:]+fichero)
|
164 |
+
lines = contenido2.readlines()
|
165 |
+
if len(lines)>1:
|
166 |
+
if lines[1] == " Este contenido NO está adaptado a Lectura Fácil\n":
|
167 |
+
with open('./limpios/'+directorio[2:]+fichero, 'w') as f:
|
168 |
+
for i in range(len(lines[2:])):
|
169 |
+
f.write(str(lines[i+2]))
|
170 |
```
|