OpenOCR-Demo / configs /rec /lpv /svtr_base_lpv_wo_glrm.yml
topdu's picture
openocr demo
29f689c
Global:
device: gpu
epoch_num: 20
log_smooth_window: 20
print_batch_step: 10
output_dir: ./output/rec/u14m_filter/svtr_base_lpv_wo_glrm/
save_epoch_step: 1
# evaluation is run every 2000 iterations
eval_batch_step: [0, 500]
eval_epoch_step: [0, 1]
cal_metric_during_train: True
pretrained_model:
checkpoints:
use_tensorboard: false
infer_img:
# for data or label process
character_dict_path: &character_dict_path ./tools/utils/EN_symbol_dict.txt # 96en
# ./tools/utils/ppocr_keys_v1.txt # ch
max_text_length: &max_text_length 25
use_space_char: &use_space_char False
save_res_path: ./output/rec/u14m_filter/predicts_svtr_base_lpv_wo_glrm.txt
use_amp: True
grad_clip_val: 20
Optimizer:
name: Adam
lr: 0.0001 # for 4gpus bs128/gpu
weight_decay: 0.0
filter_bias_and_bn: False
betas: [0.9, 0.99]
LRScheduler:
name: MultiStepLR
milestones: [12]
gamma: 0.1
Architecture:
model_type: rec
algorithm: LPV
in_channels: 3
Transform:
Encoder:
name: SVTRNet
img_size: [32, 128]
out_char_num: 25
out_channels: 256
patch_merging: 'Conv'
embed_dim: [128, 256, 384]
depth: [6, 6, 6]
num_heads: [4, 8, 12]
mixer: ['Conv','Conv','Conv','Conv','Conv','Conv', 'Conv','Conv', 'Global','Global','Global','Global','Global','Global','Global','Global','Global','Global']
local_mixer: [[5, 5], [5, 5], [5, 5]]
sub_k: [[1, 1], [1, 1]]
feature2d: True
last_stage: False
prenorm: True
Decoder:
name: LPVDecoder
num_layer: 3
max_len: *max_text_length
use_mask: False
dim_feedforward: 1536
nhead: 12
dropout: 0.1
trans_layer: 3
Loss:
name: LPVLoss
PostProcess:
name: ARLabelDecode
character_dict_path: *character_dict_path
use_space_char: *use_space_char
Metric:
name: RecMetric
main_indicator: acc
is_filter: True
Train:
dataset:
name: LMDBDataSet
data_dir: ../Union14M-L-LMDB-Filtered
transforms:
- DecodeImagePIL: # load image
img_mode: RGB
- PARSeqAugPIL:
- ARLabelEncode: # Class handling label
character_dict_path: *character_dict_path
use_space_char: *use_space_char
max_text_length: *max_text_length
- RecTVResize:
image_shape: [32, 128]
padding: False
- KeepKeys:
keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order
loader:
shuffle: True
batch_size_per_card: 128
drop_last: True
num_workers: 4
Eval:
dataset:
name: LMDBDataSet
data_dir: ../evaluation/
transforms:
- DecodeImagePIL: # load image
img_mode: RGB
- ARLabelEncode: # Class handling label
character_dict_path: *character_dict_path
use_space_char: *use_space_char
max_text_length: *max_text_length
- RecTVResize:
image_shape: [32, 128]
padding: False
- KeepKeys:
keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order
loader:
shuffle: False
drop_last: False
batch_size_per_card: 128
num_workers: 4