File size: 2,580 Bytes
0464579
 
 
f1a14d9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
04ae9ec
 
 
95eebfd
04ae9ec
 
 
 
 
 
f1a14d9
 
 
 
45cd56e
 
 
 
 
f1a14d9
04ae9ec
1dd4fe3
0464579
 
f1a14d9
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
import os
os.system('pip install tensorflow')

import json
import numpy as np
import gradio as gr
import tensorflow as tf
from tensorflow import keras
from huggingface_hub.keras_mixin import from_pretrained_keras

class CustomNonPaddingTokenLoss(keras.losses.Loss):
    def __init__(self, name="custom_ner_loss"):
        super().__init__(name=name)
        
    def call(self, y_true, y_pred):
        loss_fn = keras.losses.SparseCategoricalCrossentropy(
                    from_logits=True, reduction=keras.losses.Reduction.NONE
        )
        
        loss = loss_fn(y_true, y_pred)
        mask = tf.cast((y_true > 0), dtype=tf.float32)
        
        loss = loss * mask
        return tf.reduce_sum(loss) / tf.reduce_sum(mask)

def lowercase_and_convert_to_ids(tokens):
    tokens = tf.strings.lower(tokens)
    
    return lookup_layer(tokens)

def tokenize_and_convert_to_ids(text):
    tokens = text.split()
    return lowercase_and_convert_to_ids(tokens)


def ner_tagging(text_1):

	with open('mapping.json','r') as f:
		mapping = json.load(f)

	ner_model = from_pretrained_keras("keras-io/ner-with-transformers",
                                  custom_objects={'CustomNonPaddingTokenLoss':CustomNonPaddingTokenLoss},
                                  compile=False)  


	sample_input = tokenize_and_convert_to_ids(text_1)
	sample_input = tf.reshape(sample_input, shape=[1, -1])
	output = ner_model.predict(sample_input)
	prediction = np.argmax(output, axis=-1)[0]

	prediction = [mapping[str(i)] for i in prediction]
	
	text_2 = text_1.split(" ")
	output = []
	for w in range(len(text_2)):
  		if prediction[w] != "O":
  			output.extend([(text_2[w], prediction[w]), (" ", None)])
  		else:
  			output.extend([(text_2[w], None), (" ", None)])
	
	return output

text_1 = gr.inputs.Textbox(lines=5)
ner_tag = gr.outputs.Textbox()

with open("vocab.json",'r') as f:
	vocab = json.load(f)

lookup_layer = keras.layers.StringLookup(vocabulary=vocab['tokens'])

iface = gr.Interface(ner_tagging, 
	inputs=text_1,outputs=['highlight'], examples=[['EU rejects German call to boycott British lamb .'], 
	["He said further scientific study was required and if it was found that action was needed it should be taken by the European Union ."]], title="Named Entity Recognition with Transformers",
	description = "Named Entity Recognition with Transformers on CoNLL2003 Dataset",
        article = "Author: <a href=\"https://huggingface.co/reichenbach\">Rishav Chandra Varma</a>")

iface.launch()