Spaces:
Running
Running
# coding: utf-8 | |
# Copyright (C) 2023, [Breezedeus](https://github.com/breezedeus). | |
# Licensed to the Apache Software Foundation (ASF) under one | |
# or more contributor license agreements. See the NOTICE file | |
# distributed with this work for additional information | |
# regarding copyright ownership. The ASF licenses this file | |
# to you under the Apache License, Version 2.0 (the | |
# "License"); you may not use this file except in compliance | |
# with the License. You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, | |
# software distributed under the License is distributed on an | |
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY | |
# KIND, either express or implied. See the License for the | |
# specific language governing permissions and limitations | |
# under the License. | |
# Ref: https://huggingface.co/spaces/hysts/Manga-OCR/blob/main/app.py | |
import os | |
import json | |
import functools | |
import gradio as gr | |
import cv2 | |
import numpy as np | |
from cnstd.utils import pil_to_numpy, imsave | |
from cnocr import CnOcr, DET_AVAILABLE_MODELS, REC_AVAILABLE_MODELS | |
from cnocr.utils import set_logger, draw_ocr_results, download | |
logger = set_logger() | |
MODELS = {} | |
def plot_for_debugging(rotated_img, one_out, box_score_thresh, crop_ncols, prefix_fp): | |
import matplotlib.pyplot as plt | |
import math | |
rotated_img = rotated_img.copy() | |
crops = [info['cropped_img'] for info in one_out] | |
print('%d boxes are found' % len(crops)) | |
if len(crops) < 1: | |
return | |
ncols = crop_ncols | |
nrows = math.ceil(len(crops) / ncols) | |
fig, ax = plt.subplots(nrows=nrows, ncols=ncols) | |
for i, axi in enumerate(ax.flat): | |
if i >= len(crops): | |
break | |
axi.imshow(crops[i]) | |
crop_fp = '%s-crops.png' % prefix_fp | |
plt.savefig(crop_fp) | |
print('cropped results are save to file %s' % crop_fp) | |
for info in one_out: | |
box, score = info.get('position'), info['score'] | |
if score < box_score_thresh: # score < 0.5 | |
continue | |
if box is not None: | |
box = box.astype(int).reshape(-1, 2) | |
cv2.polylines(rotated_img, [box], True, color=(255, 0, 0), thickness=2) | |
result_fp = '%s-result.png' % prefix_fp | |
imsave(rotated_img, result_fp, normalized=False) | |
print('boxes results are save to file %s' % result_fp) | |
def get_ocr_model(det_model_name, rec_model_name, det_more_configs): | |
global MODELS | |
config_str = json.dumps(det_more_configs) | |
if (det_model_name, rec_model_name, config_str) in MODELS: | |
return MODELS[(det_model_name, rec_model_name, config_str)] | |
det_model_name, det_model_backend = det_model_name.split('::') | |
# rec_model_name, rec_model_backend = rec_model_name.split('::') | |
rec_model_backend = 'onnx' | |
model = CnOcr( | |
det_model_name=det_model_name, | |
det_model_backend=det_model_backend, | |
rec_model_name=rec_model_name, | |
rec_model_backend=rec_model_backend, | |
det_more_configs=det_more_configs, | |
) | |
if len(MODELS) > 50: | |
MODELS = {} | |
MODELS[(det_model_name, rec_model_name, config_str)] = model | |
return model | |
def visualize_naive_result(img, det_model_name, std_out, box_score_thresh): | |
if len(std_out) < 1: | |
# gr.Warning(f'未检测到文本!') | |
return [] | |
img = pil_to_numpy(img).transpose((1, 2, 0)).astype(np.uint8) | |
# plot_for_debugging(img, std_out, box_score_thresh, 2, './streamlit-app') | |
# gr.Markdown('## Detection Result') | |
# if det_model_name == 'naive_det': | |
# gr.Warning('⚠️ Warning: "naive_det" 检测模型不返回文本框位置!') | |
# cols = st.columns([1, 7, 1]) | |
# cols[1].image('./streamlit-app-result.png') | |
# | |
# st.subheader('Recognition Result') | |
# cols = st.columns([1, 7, 1]) | |
# cols[1].image('./streamlit-app-crops.png') | |
return _visualize_ocr(std_out) | |
def _visualize_ocr(ocr_outs): | |
if len(ocr_outs) < 1: | |
return | |
ocr_res = [] | |
for out in ocr_outs: | |
# cropped_img = out['cropped_img'] # 检测出的文本框 | |
ocr_res.append([out['score'], out['text']]) | |
return ocr_res | |
def visualize_result(img, ocr_outs): | |
out_draw_fp = './streamlit-app-det-result.png' | |
font_path = 'docs/fonts/simfang.ttf' | |
if not os.path.exists(font_path): | |
url = 'https://huggingface.co/datasets/breezedeus/cnocr-wx-qr-code/resolve/main/fonts/simfang.ttf' | |
os.makedirs(os.path.dirname(font_path), exist_ok=True) | |
download(url, path=font_path, overwrite=True) | |
draw_ocr_results(img, ocr_outs, out_draw_fp, font_path) | |
return out_draw_fp | |
def recognize( | |
det_model_name, | |
is_single_line, | |
rec_model_name, | |
rotated_bbox, | |
use_angle_clf, | |
new_size, | |
box_score_thresh, | |
min_box_size, | |
image_file, | |
): | |
img = image_file.convert('RGB') | |
det_more_configs = dict(rotated_bbox=rotated_bbox, use_angle_clf=use_angle_clf) | |
ocr = get_ocr_model(det_model_name, rec_model_name, det_more_configs) | |
if is_single_line: | |
ocr_out = [ocr.ocr_for_single_line(np.array(img))] | |
else: | |
ocr_out = ocr.ocr( | |
img, | |
return_cropped_image=True, | |
resized_shape=new_size, | |
preserve_aspect_ratio=True, | |
box_score_thresh=box_score_thresh, | |
min_box_size=min_box_size, | |
) | |
det_model_name, det_model_backend = det_model_name.split('::') | |
if is_single_line or det_model_name == 'naive_det': | |
out_texts = visualize_naive_result( | |
img, det_model_name, ocr_out, box_score_thresh | |
) | |
if is_single_line: | |
return [ | |
gr.update(visible=False), | |
gr.update(visible=False), | |
gr.update(value=out_texts, visible=True), | |
] | |
return [ | |
gr.update(visible=False), | |
gr.update(visible=True), | |
gr.update(value=out_texts, visible=True), | |
] | |
else: | |
out_img_path = visualize_result(img, ocr_out) | |
return [ | |
gr.update(value=out_img_path, visible=True), | |
gr.update(visible=False), | |
gr.update(visible=False), | |
] | |
def main(): | |
det_models = list(DET_AVAILABLE_MODELS.all_models()) | |
det_models.append(('naive_det', 'onnx')) | |
det_models.sort() | |
det_models = [f'{m}::{b}' for m, b in det_models] | |
all_models = list(REC_AVAILABLE_MODELS.all_models()) | |
all_models = [f'{m}' for m, b in all_models if b == 'onnx'] | |
cnocr_models = [name for name in all_models if 'densenet' in name] | |
cnocr_models.sort() | |
other_models = [name for name in all_models if 'densenet' not in name] | |
other_models.sort() | |
all_models = cnocr_models + other_models | |
title = 'Python 开源中英 OCR 工具:' | |
desc = ( | |
'<p style="text-align: center">详细说明参见:<a href="https://github.com/breezedeus/CnOCR" target="_blank">Github</a>;' | |
'<a href="https://cnocr.readthedocs.io" target="_blank">在线文档</a>;' | |
'欢迎加入 <a href="https://www.breezedeus.com/join-group" target="_blank">交流群</a>;' | |
'作者:<a href="https://www.breezedeus.com" target="_blank">Breezedeus</a> ,' | |
'<a href="https://github.com/breezedeus" target="_blank">Github</a> 。</p>' | |
) | |
example_func = functools.partial( | |
recognize, | |
# det_model_name='ch_PP-OCRv3_det::onnx', | |
rotated_bbox=True, | |
# use_angle_clf=False, | |
new_size=768, | |
box_score_thresh=0.3, | |
min_box_size=10, | |
) | |
examples = [ | |
[ | |
'ch_PP-OCRv3_det::onnx', | |
True, | |
'number-densenet_lite_136-fc', | |
False, | |
'docs/examples/card1-s.jpg', | |
], | |
[ | |
'ch_PP-OCRv3_det::onnx', | |
True, | |
'number-densenet_lite_136-fc', | |
False, | |
'docs/examples/card2-s.jpg', | |
], | |
[ | |
'ch_PP-OCRv3_det::onnx', | |
True, | |
'number-densenet_lite_136-fc', | |
False, | |
'docs/examples/cy1-s.jpg', | |
], | |
[ | |
'ch_PP-OCRv3_det::onnx', | |
False, | |
'densenet_lite_136-gru', | |
False, | |
'docs/examples/huochepiao.jpeg', | |
], | |
[ | |
'ch_PP-OCRv3_det::onnx', | |
False, | |
'densenet_lite_136-gru', | |
False, | |
'docs/examples/1_res.jpg', | |
], | |
[ | |
'db_shufflenet_v2::pytorch', | |
False, | |
'en_number_mobile_v2.0', | |
False, | |
'docs/examples/en_book1.jpeg', | |
], | |
[ | |
'db_shufflenet_v2::pytorch', | |
False, | |
'densenet_lite_136-gru', | |
True, | |
'docs/examples/beauty0.jpg', | |
], | |
] | |
table_desc = """ | |
<div align="center"> | |
<img src="https://www.notion.so/image/https%3A%2F%2Fprod-files-secure.s3.us-west-2.amazonaws.com%2F9341931a-53f0-48e1-b026-0f1ad17b457c%2Fc41e0b1d-4869-4e39-93db-631569e6a38d%2FUntitled.png?table=block&id=3d0819ca-2e1a-46a7-b6f3-b4cf89cd045c" width="120px"/> | |
[![Visitors](https://api.visitorbadge.io/api/visitors?path=https%3A%2F%2Fhuggingface.co%2Fspaces%2Fbreezedeus%2FCnOCR-Demo&labelColor=%23697689&countColor=%23f5c791&style=flat&labelStyle=upper)](https://visitorbadge.io/status?path=https%3A%2F%2Fhuggingface.co%2Fspaces%2Fbreezedeus%2FCnOCR-Demo) | |
| | | | |
| ------------------------------- | --------------------------------------- | | |
| 📀 **Code** | [Github](https://github.com/breezedeus/cnocr) | | |
| 📖 **Doc** | [在线文档](https://cnocr.readthedocs.io) | | |
| 🧳 **Models** | [可用模型](https://cnocr.readthedocs.io/zh/latest/models/) | | |
| 💬 **Contact** | [交流群](https://www.breezedeus.com/join-group) | | |
| 👨🏻💻 **Author** | [Breezedeus](https://www.breezedeus.com) | | |
有用还请帮忙 **star 🌟 [CnOCR](https://github.com/breezedeus/cnocr)** 🙏 | |
</div> | |
""" | |
with gr.Blocks() as demo: | |
gr.HTML( | |
f'<h1 style="text-align: center; margin-bottom: 1rem;">{title} <a href="https://github.com/breezedeus/cnocr" target="_blank">CnOCR V2.3</a></h1>' | |
) | |
with gr.Row(equal_height=False): | |
with gr.Column(min_width=200, variant='panel', scale=3): | |
gr.Markdown('### 模型设置') | |
det_model_name = gr.Dropdown( | |
label='选择检测模型', choices=det_models, value='ch_PP-OCRv3_det::onnx', | |
) | |
is_single_line = gr.Checkbox(label='单行文字模式(不使用检测模型)', value=False) | |
rec_model_name = gr.Dropdown( | |
label='选择识别模型', | |
choices=all_models, | |
value='densenet_lite_136-gru', | |
) | |
gr.Markdown('### 检测参数') | |
rotated_bbox = gr.Checkbox(label='检测带角度文本框', value=True) | |
use_angle_clf = gr.Checkbox(label='使用角度预测模型校正文本框', value=False) | |
with gr.Accordion('更多选项', open=False): | |
new_size = gr.Slider( | |
label='resize 后图片(长边)大小', minimum=124, maximum=4096, value=768 | |
) | |
box_score_thresh = gr.Slider( | |
label='得分阈值(低于阈值的结果会被过滤掉)', minimum=0.05, maximum=0.95, value=0.3 | |
) | |
min_box_size = gr.Slider( | |
label='框大小阈值(更小的文本框会被过滤掉)', minimum=4, maximum=50, value=10 | |
) | |
with gr.Column(scale=5, variant='compact'): | |
gr.Markdown('### 选择待识别图片') | |
image_file = gr.Image(label='待识别图片', type="pil", image_mode='RGB') | |
sub_btn = gr.Button("Submit", variant="primary") | |
with gr.Column(scale=2, variant='compact'): | |
gr.Markdown(table_desc) | |
out_image = gr.Image(label='识别结果', interactive=False, visible=False) | |
naive_warn = gr.Markdown( | |
'**⚠️ Warning**: "naive_det" 检测模型不返回文本框位置!', visible=False | |
) | |
out_texts = gr.Dataframe( | |
headers=['得分', '文本'], label='识别结果', interactive=False, visible=False | |
) | |
sub_btn.click( | |
recognize, | |
inputs=[ | |
det_model_name, | |
is_single_line, | |
rec_model_name, | |
rotated_bbox, | |
use_angle_clf, | |
new_size, | |
box_score_thresh, | |
min_box_size, | |
image_file, | |
], | |
outputs=[out_image, naive_warn, out_texts], | |
) | |
gr.Examples( | |
label='示例', | |
examples=examples, | |
inputs=[ | |
det_model_name, | |
is_single_line, | |
rec_model_name, | |
use_angle_clf, | |
image_file, | |
], | |
outputs=[out_image, naive_warn, out_texts], | |
fn=example_func, | |
cache_examples=os.getenv('CACHE_EXAMPLES') == '1', | |
) | |
demo.queue(max_size=10) | |
demo.launch() | |
if __name__ == '__main__': | |
main() | |