Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -3,21 +3,24 @@ from transformers import pipeline
|
|
3 |
|
4 |
ner = pipeline('ner')
|
5 |
|
6 |
-
def
|
7 |
-
|
8 |
for token in tokens:
|
9 |
-
if
|
10 |
-
|
|
|
11 |
last_token['word'] += token['word'].replace('##', '')
|
12 |
last_token['end'] = token['end']
|
13 |
-
last_token['score'] = (last_token['score'] + token[score]) / 2
|
14 |
else:
|
15 |
-
|
16 |
-
|
|
|
|
|
17 |
|
18 |
def named(input):
|
19 |
output = ner(input)
|
20 |
-
merged_word =
|
21 |
return {'text': input, 'entities': merged_word}
|
22 |
|
23 |
a = gr.Interface(fn=named,
|
|
|
3 |
|
4 |
ner = pipeline('ner')
|
5 |
|
6 |
+
def merge_tokens(tokens):
|
7 |
+
merged_tokens = []
|
8 |
for token in tokens:
|
9 |
+
if merged_tokens and token['entity'].startswith('I-') and merged_tokens[-1]['entity'].endswith(token['entity'][2:]):
|
10 |
+
# If current token continues the entity of the last one, merge them
|
11 |
+
last_token = merged_tokens[-1]
|
12 |
last_token['word'] += token['word'].replace('##', '')
|
13 |
last_token['end'] = token['end']
|
14 |
+
last_token['score'] = (last_token['score'] + token['score']) / 2
|
15 |
else:
|
16 |
+
# Otherwise, add the token to the list
|
17 |
+
merged_tokens.append(token)
|
18 |
+
|
19 |
+
return merged_tokens
|
20 |
|
21 |
def named(input):
|
22 |
output = ner(input)
|
23 |
+
merged_word = merged_tokens(output)
|
24 |
return {'text': input, 'entities': merged_word}
|
25 |
|
26 |
a = gr.Interface(fn=named,
|