harshvardhan96 commited on
Commit
4a32822
1 Parent(s): 998eb62

created app.py

Browse files
Files changed (1) hide show
  1. app.py +76 -0
app.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pickle
2
+ import tensorflow.compat.v1 as tf
3
+ tf.disable_v2_behavior()
4
+ #loading the saved parameters
5
+ def load_params():
6
+ with open('/kaggle/input/latest-data/params.p', mode='rb') as in_file:
7
+ return pickle.load(in_file)
8
+
9
+ def load_preprocess():
10
+ with open('/kaggle/input/latest-data/preprocess.p', mode='rb') as in_file:
11
+ return pickle.load(in_file)
12
+
13
+ #getting the source and target vocabuaries
14
+ _, (source_vocab_to_int, target_vocab_to_int), (source_int_to_vocab, target_int_to_vocab) = load_preprocess()
15
+
16
+ load_path = load_params()
17
+ print("Loaded path:", load_path)
18
+ print(type(load_path))
19
+
20
+ batch_size = 30
21
+
22
+ #converting the words to vectors of integers
23
+ def word_to_seq(word, vocab_to_int):
24
+ results = []
25
+ for word in list(word):
26
+ if word in vocab_to_int:
27
+ results.append(vocab_to_int[word])
28
+ else:
29
+ results.append(vocab_to_int['<UNK>'])
30
+
31
+ return results
32
+
33
+ #taking user input for prediction
34
+ print("\n Enter word to be transliterated:")
35
+ transliterate_word = input().lower()
36
+
37
+ transliterate_word = word_to_seq(transliterate_word, source_vocab_to_int)
38
+
39
+ #initialising the graph
40
+ loaded_graph = tf.Graph()
41
+
42
+ #initialising the session
43
+ tf.compat.v1.Session()
44
+ # with tf.Session(graph=loaded_graph) as sess:
45
+ with tf.compat.v1.Session(graph=loaded_graph) as sess:
46
+
47
+ # Load saved model
48
+ loader = tf.train.import_meta_graph("/kaggle/input/latest-data/dev.meta")
49
+
50
+ # tf.train.Saver.restore(sess,load_path)
51
+ loader.restore(sess, "/kaggle/input/latest-data/dev")
52
+
53
+ #providing placeholder names from the loaded graph
54
+ input_data = loaded_graph.get_tensor_by_name('input:0')
55
+ logits = loaded_graph.get_tensor_by_name('predictions:0')
56
+ target_sequence_length = loaded_graph.get_tensor_by_name('target_sequence_length:0')
57
+ keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
58
+
59
+ #transliterating the given word
60
+ transliterate_logits = sess.run(logits, {input_data: [transliterate_word]*batch_size,
61
+ target_sequence_length: [len(transliterate_word)]*batch_size,
62
+ keep_prob: 1.0})[0]
63
+
64
+ print('Input')
65
+ print(' Word Ids: {}'.format([i for i in transliterate_word]))
66
+ print(' English Word: {}'.format([source_int_to_vocab[i] for i in transliterate_word]))
67
+
68
+ print('\nPrediction')
69
+ print(' Word Id: {}'.format([i for i in transliterate_logits]))
70
+
71
+ #showing the output
72
+ output = ""
73
+ for i in transliterate_logits:
74
+ if target_int_to_vocab[i]!= '<EOS>':
75
+ output = output + target_int_to_vocab[i]
76
+ print(' Hindi Word: {}'.format(output))