AIdeaText commited on
Commit
13d91b4
1 Parent(s): 17ce1af

Create morpho_analysis.py

Browse files
modules/text_analysis/morpho_analysis.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /modules/morpho_analysis.py
2
+ import spacy
3
+ from collections import Counter
4
+ from spacy import displacy
5
+ import re
6
+
7
+ # Define colors for grammatical categories
8
+ POS_COLORS = {
9
+ 'ADJ': '#FFA07A', # Light Salmon
10
+ 'ADP': '#98FB98', # Pale Green
11
+ 'ADV': '#87CEFA', # Light Sky Blue
12
+ 'AUX': '#DDA0DD', # Plum
13
+ 'CCONJ': '#F0E68C', # Khaki
14
+ 'DET': '#FFB6C1', # Light Pink
15
+ 'INTJ': '#FF6347', # Tomato
16
+ 'NOUN': '#90EE90', # Light Green
17
+ 'NUM': '#FAFAD2', # Light Goldenrod Yellow
18
+ 'PART': '#D3D3D3', # Light Gray
19
+ 'PRON': '#FFA500', # Orange
20
+ 'PROPN': '#20B2AA', # Light Sea Green
21
+ 'SCONJ': '#DEB887', # Burlywood
22
+ 'SYM': '#7B68EE', # Medium Slate Blue
23
+ 'VERB': '#FF69B4', # Hot Pink
24
+ 'X': '#A9A9A9', # Dark Gray
25
+ }
26
+
27
+ POS_TRANSLATIONS = {
28
+ 'es': {
29
+ 'ADJ': 'Adjetivo',
30
+ 'ADP': 'Adposición',
31
+ 'ADV': 'Adverbio',
32
+ 'AUX': 'Auxiliar',
33
+ 'CCONJ': 'Conjunción Coordinante',
34
+ 'DET': 'Determinante',
35
+ 'INTJ': 'Interjección',
36
+ 'NOUN': 'Sustantivo',
37
+ 'NUM': 'Número',
38
+ 'PART': 'Partícula',
39
+ 'PRON': 'Pronombre',
40
+ 'PROPN': 'Nombre Propio',
41
+ 'SCONJ': 'Conjunción Subordinante',
42
+ 'SYM': 'Símbolo',
43
+ 'VERB': 'Verbo',
44
+ 'X': 'Otro',
45
+ },
46
+ 'en': {
47
+ 'ADJ': 'Adjective',
48
+ 'ADP': 'Adposition',
49
+ 'ADV': 'Adverb',
50
+ 'AUX': 'Auxiliary',
51
+ 'CCONJ': 'Coordinating Conjunction',
52
+ 'DET': 'Determiner',
53
+ 'INTJ': 'Interjection',
54
+ 'NOUN': 'Noun',
55
+ 'NUM': 'Number',
56
+ 'PART': 'Particle',
57
+ 'PRON': 'Pronoun',
58
+ 'PROPN': 'Proper Noun',
59
+ 'SCONJ': 'Subordinating Conjunction',
60
+ 'SYM': 'Symbol',
61
+ 'VERB': 'Verb',
62
+ 'X': 'Other',
63
+ },
64
+ 'fr': {
65
+ 'ADJ': 'Adjectif',
66
+ 'ADP': 'Adposition',
67
+ 'ADV': 'Adverbe',
68
+ 'AUX': 'Auxiliaire',
69
+ 'CCONJ': 'Conjonction de Coordination',
70
+ 'DET': 'Déterminant',
71
+ 'INTJ': 'Interjection',
72
+ 'NOUN': 'Nom',
73
+ 'NUM': 'Nombre',
74
+ 'PART': 'Particule',
75
+ 'PRON': 'Pronom',
76
+ 'PROPN': 'Nom Propre',
77
+ 'SCONJ': 'Conjonction de Subordination',
78
+ 'SYM': 'Symbole',
79
+ 'VERB': 'Verbe',
80
+ 'X': 'Autre',
81
+ }
82
+ }
83
+
84
+ #############################################################################################
85
+ def get_repeated_words_colors(doc):
86
+ word_counts = Counter(token.text.lower() for token in doc if token.pos_ != 'PUNCT')
87
+ repeated_words = {word: count for word, count in word_counts.items() if count > 1}
88
+
89
+ word_colors = {}
90
+ for token in doc:
91
+ if token.text.lower() in repeated_words:
92
+ word_colors[token.text.lower()] = POS_COLORS.get(token.pos_, '#FFFFFF')
93
+
94
+ return word_colors
95
+
96
+ ######################################################################################################
97
+ def highlight_repeated_words(doc, word_colors):
98
+ highlighted_text = []
99
+ for token in doc:
100
+ if token.text.lower() in word_colors:
101
+ color = word_colors[token.text.lower()]
102
+ highlighted_text.append(f'<span style="background-color: {color};">{token.text}</span>')
103
+ else:
104
+ highlighted_text.append(token.text)
105
+ return ' '.join(highlighted_text)
106
+
107
+ #################################################################################################
108
+ def generate_arc_diagram(doc, lang_code):
109
+ sentences = list(doc.sents)
110
+ arc_diagrams = []
111
+ for sent in sentences:
112
+ html = displacy.render(sent, style="dep", options={"distance": 100})
113
+ html = html.replace('height="375"', 'height="200"')
114
+ html = re.sub(r'<svg[^>]*>', lambda m: m.group(0).replace('height="450"', 'height="300"'), html)
115
+ html = re.sub(r'<g [^>]*transform="translate\((\d+),(\d+)\)"', lambda m: f'<g transform="translate({m.group(1)},50)"', html)
116
+ arc_diagrams.append(html)
117
+ return arc_diagrams
118
+
119
+ #################################################################################################
120
+ def get_detailed_pos_analysis(doc):
121
+ """
122
+ Realiza un análisis detallado de las categorías gramaticales (POS) en el texto.
123
+ """
124
+ pos_counts = Counter(token.pos_ for token in doc)
125
+ total_tokens = len(doc)
126
+ pos_analysis = []
127
+ for pos, count in pos_counts.items():
128
+ percentage = (count / total_tokens) * 100
129
+ pos_analysis.append({
130
+ 'pos': pos,
131
+ 'count': count,
132
+ 'percentage': round(percentage, 2),
133
+ 'examples': [token.text for token in doc if token.pos_ == pos][:5] # Primeros 5 ejemplos
134
+ })
135
+ return sorted(pos_analysis, key=lambda x: x['count'], reverse=True)
136
+
137
+ #################################################################################################
138
+ def get_morphological_analysis(doc):
139
+ """
140
+ Realiza un análisis morfológico detallado de las palabras en el texto.
141
+ """
142
+ morphology_analysis = []
143
+ for token in doc:
144
+ if token.pos_ in ['NOUN', 'VERB', 'ADJ', 'ADV']: # Enfocarse en categorías principales
145
+ morphology_analysis.append({
146
+ 'text': token.text,
147
+ 'lemma': token.lemma_,
148
+ 'pos': token.pos_,
149
+ 'tag': token.tag_,
150
+ 'dep': token.dep_,
151
+ 'shape': token.shape_,
152
+ 'is_alpha': token.is_alpha,
153
+ 'is_stop': token.is_stop,
154
+ 'morph': str(token.morph)
155
+ })
156
+ return morphology_analysis
157
+
158
+ #################################################################################################
159
+ def get_sentence_structure_analysis(doc):
160
+ """
161
+ Analiza la estructura de las oraciones en el texto.
162
+ """
163
+ sentence_analysis = []
164
+ for sent in doc.sents:
165
+ sentence_analysis.append({
166
+ 'text': sent.text,
167
+ 'root': sent.root.text,
168
+ 'root_pos': sent.root.pos_,
169
+ 'num_tokens': len(sent),
170
+ 'num_words': len([token for token in sent if token.is_alpha]),
171
+ 'subjects': [token.text for token in sent if "subj" in token.dep_],
172
+ 'objects': [token.text for token in sent if "obj" in token.dep_],
173
+ 'verbs': [token.text for token in sent if token.pos_ == "VERB"]
174
+ })
175
+ return sentence_analysis
176
+
177
+ #################################################################################################
178
+ def perform_advanced_morphosyntactic_analysis(text, nlp):
179
+ """
180
+ Realiza un análisis morfosintáctico avanzado del texto.
181
+ """
182
+ doc = nlp(text)
183
+ return {
184
+ 'pos_analysis': get_detailed_pos_analysis(doc),
185
+ 'morphological_analysis': get_morphological_analysis(doc),
186
+ 'sentence_structure': get_sentence_structure_analysis(doc),
187
+ 'arc_diagram': generate_arc_diagram(doc, nlp.lang)
188
+ }
189
+
190
+ # Al final del archivo morph_analysis.py
191
+ __all__ = ['get_repeated_words_colors', 'highlight_repeated_words', 'generate_arc_diagram', 'perform_advanced_morphosyntactic_analysis', 'POS_COLORS', 'POS_TRANSLATIONS']