gowthambhat commited on
Commit
8907d5b
β€’
1 Parent(s): 55f9a6c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -14
app.py CHANGED
@@ -1,9 +1,6 @@
1
  import tensorflow as tf
2
  from tensorflow import keras
3
  from tensorflow.keras import layers
4
-
5
- from huggingface_hub import from_pretrained_keras
6
-
7
  import numpy as np
8
  import gradio as gr
9
 
@@ -11,11 +8,9 @@ max_length = 5
11
  img_width = 200
12
  img_height = 50
13
 
14
- model = from_pretrained_keras("keras-io/ocr-for-captcha", compile=False)
15
-
16
- prediction_model = keras.models.Model(
17
- model.get_layer(name="image").input, model.get_layer(name="dense2").output
18
- )
19
 
20
  with open("vocab.txt", "r") as f:
21
  vocab = f.read().splitlines()
@@ -51,19 +46,18 @@ def classify_image(img_path):
51
  # dimension to correspond to the width of the image.
52
  img = tf.transpose(img, perm=[1, 0, 2])
53
  img = tf.expand_dims(img, axis=0)
54
- preds = prediction_model.predict(img)
55
  pred_text = decode_batch_predictions(preds)
56
  return pred_text[0]
57
-
58
  image = gr.inputs.Image(type='filepath')
59
  text = gr.outputs.Textbox()
60
 
61
- iface = gr.Interface(classify_image,image,text,
62
  title="CGIP CAPTCHA RECOGNITION OCR",
63
- description = "Keras Implementation of OCR model for reading captcha πŸ€–πŸ¦ΉπŸ»",
64
- examples = ["dd764.png","3p4nn.png","ydd3g.png", "268g2.png", "36nx4.png", "3bnyf.png", "5p8fm.png", "8y6b3.png", "mnef5.png", "yxd7m.png",]
65
  )
66
 
67
-
68
  iface.launch()
69
 
 
1
  import tensorflow as tf
2
  from tensorflow import keras
3
  from tensorflow.keras import layers
 
 
 
4
  import numpy as np
5
  import gradio as gr
6
 
 
8
  img_width = 200
9
  img_height = 50
10
 
11
+ # Load the TensorFlow SavedModel using TFSMLayer
12
+ saved_model_path = "/home/user/.cache/huggingface/hub/models--keras-io--ocr-for-captcha/snapshots/1d695c4be3c72166292ff61c361d47c96f43cb7f"
13
+ prediction_layer = keras.layers.TFSMLayer(saved_model_path, call_endpoint='serving_default')
 
 
14
 
15
  with open("vocab.txt", "r") as f:
16
  vocab = f.read().splitlines()
 
46
  # dimension to correspond to the width of the image.
47
  img = tf.transpose(img, perm=[1, 0, 2])
48
  img = tf.expand_dims(img, axis=0)
49
+ preds = prediction_layer(img) # Use the TFSMLayer for prediction
50
  pred_text = decode_batch_predictions(preds)
51
  return pred_text[0]
52
+
53
  image = gr.inputs.Image(type='filepath')
54
  text = gr.outputs.Textbox()
55
 
56
+ iface = gr.Interface(classify_image, image, text,
57
  title="CGIP CAPTCHA RECOGNITION OCR",
58
+ description="Keras Implementation of OCR model for reading captcha πŸ€–πŸ¦ΉπŸ»",
59
+ examples=["dd764.png", "3p4nn.png", "ydd3g.png", "268g2.png", "36nx4.png", "3bnyf.png", "5p8fm.png", "8y6b3.png", "mnef5.png", "yxd7m.png"]
60
  )
61
 
 
62
  iface.launch()
63