File size: 10,328 Bytes
32652fd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
import os
import csv
import easyocr
import shutil
import random
import cv2
from glob import glob
from ultralytics import YOLOv10
import random
import cv2
from glob import glob
from ultralytics import YOLOv10

import supervision as sva
from ultralytics import YOLOv10

import supervision as sv
import supervision as sv
import os
import csv
import cv2
from flask import Flask, request, jsonify, send_from_directory, render_template
import shutil
from ultralytics import YOLOv10
import random
import cv2
import csv
import re
import textwrap
import easyocr
import re
import textwrap
import supervision as sv
import os
import re
app = Flask(__name__)
output_dir1 = "/var/www/html/python/OCR-AI/OCR-Project/Folder1"
output_dir2 = "/var/www/html/python/OCR-AI/OCR-Project/Folder2"
output_dir3 = "/var/www/html/python/OCR-AI/OCR-Project/Folder3"
UPLOAD_FOLDER = "/var/www/html/python/OCR-AI/OCR-Project/data1"
os.makedirs(output_dir1, exist_ok=True)
os.makedirs(output_dir2, exist_ok=True)
os.makedirs(output_dir3, exist_ok=True)

@app.route('/')
def index():
    return render_template('index3.html')  # This will serve your HTML page

    
@app.route('/upload', methods=['POST'])
def upload_file():
    if 'invoice-upload' not in request.files:
        return jsonify({'error': 'No file part'}), 400
    file = request.files['invoice-upload']
    if file.filename == '':
        return jsonify({'error': 'No selected file'}), 400
    if file:
        # Save uploaded file
        file_path = os.path.join(UPLOAD_FOLDER, file.filename)
        file.save(file_path)

        # Process the file
        output_image, output_csv = process_image()

        return jsonify({
            'image_path': output_image,
            'csv_path': output_csv
        })

def enhance_contrast(image):
    gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    equalized_image = cv2.equalizeHist(gray_image)
    return equalized_image

def calculate_iou(bbox1, bbox2):
    x1_max = max(bbox1[0], bbox2[0])
    y1_max = max(bbox1[1], bbox2[1])
    x2_min = min(bbox1[2], bbox2[2])
    y2_min = min(bbox1[3], bbox2[3])

    inter_area = max(0, x2_min - x1_max) * max(0, y2_min - y1_max)

    bbox1_area = (bbox1[2] - bbox1[0]) * (bbox1[3] - bbox1[1])
    bbox2_area = (bbox2[2] - bbox2[0]) * (bbox2[3] - bbox2[1])

    iou = inter_area / float(bbox1_area + bbox2_area - inter_area) if (bbox1_area + bbox2_area - inter_area) > 0 else 0
    return iou
def process_image():
  cropped_dir = "/var/www/html/python/OCR-AI/OCR-Project/cropped_images/"
  if os.path.exists(cropped_dir):
    shutil.rmtree(cropped_dir)
  os.makedirs(cropped_dir, exist_ok=True)
  model = YOLOv10(f'/var/www/html/python/OCR-AI/OCR-Project/runs/detect/train3/weights/best (1).pt')
  dataset = sv.DetectionDataset.from_yolo(
    images_directory_path=f"/var/www/html/python/OCR-AI/OCR-Project/data/MyNewVersion5.0Dataset/valid/images",
    annotations_directory_path=f"/var/www/html/python/OCR-AI/OCR-Project/data/MyNewVersion5.0Dataset/valid/labels",
    data_yaml_path=f"/var/www/html/python/OCR-AI/OCR-Project/data/MyNewVersion5.0Dataset/data.yaml"
    )
  bounding_box_annotator = sv.BoundingBoxAnnotator()
  label_annotator = sv.LabelAnnotator()
  files=os.listdir('/var/www/html/python/OCR-AI/OCR-Project/data1/')
  for ii in files:
    random_image_data = cv2.imread('/var/www/html/python/OCR-AI/OCR-Project/data1/'+ii)
    random_image_data1 = cv2.imread('/var/www/html/python/OCR-AI/OCR-Project/data1/'+ii)
    results = model(source='/var/www/html/python/OCR-AI/OCR-Project/data1/', conf=0.07)[0]
    detections = sv.Detections.from_ultralytics(results)
    annotated_image = bounding_box_annotator.annotate(scene=random_image_data, detections=detections)
    annotated_image = label_annotator.annotate(scene=annotated_image, detections=detections)
    save_path="/var/www/html/python/OCR-AI/OCR-Project/Folder1/"+"detection"+ii
    cv2.imwrite(save_path, annotated_image)
    print(f"Annotated image saved at {save_path}")
    bounding_boxes = results.boxes.xyxy.cpu().numpy()
    class_ids = results.boxes.cls.cpu().numpy()
    confidences = results.boxes.conf.cpu().numpy()
    bounding_box_save_path = "/var/www/html/python/OCR-AI/OCR-Project/bounding_boxes.txt"
    with open(bounding_box_save_path, 'w') as f:
      for i, (bbox, class_id, confidence) in enumerate(zip(bounding_boxes, class_ids, confidences)):
        x1, y1, x2, y2 = map(int, bbox)
        f.write(f"Object {i+1}: Class {class_id}, Confidence: {confidence:.2f}, "
                f"Bounding box: ({x1}, {y1}, {x2}, {y2})\n")
        cropped_image = random_image_data1[y1:y2, x1:x2]
        cropped_image_path = os.path.join(cropped_dir, f"cropped_object_{i+1}.jpg")
        cv2.imwrite(cropped_image_path,  cropped_image)
        print(f"Enhanced cropped image saved at {cropped_image_path}")
    print(f"Bounding box coordinates saved at {bounding_box_save_path}")
    reader = easyocr.Reader(
    ['en'],
    #detector=False,
    recog_network='en_sample',
    model_storage_directory='/var/www/html/python/OCR-AI/OCR-Project/EasyOCR-Trainer/EasyOCR/easyocr/model',
    user_network_directory='/var/www/html/python/OCR-AI/OCR-Project/EasyOCR-Trainer/EasyOCR/user_network')
    input_file_path = '/var/www/html/python/OCR-AI/OCR-Project/bounding_boxes.txt'
    cropped_images_folder = '/var/www/html/python/OCR-AI/OCR-Project/cropped_images/'
    output_csv_path = '/var/www/html/python/OCR-AI/OCR-Project/Folder2/'+ii+'bounding_boxes_with_recognition.csv'
    print(output_csv_path)
    with open(input_file_path, 'r') as infile:
      lines = infile.readlines()
      print(lines)
    with open(output_csv_path, 'w', newline='', encoding='utf-8') as csvfile:
      csv_writer = csv.writer(csvfile)
      csv_writer.writerow(['Object ID', 'Bounding Box', 'Image Name', 'Recognized Text'])
      for i, line in enumerate(lines):
        print(f"Processing line {i+1}/{len(lines)}: {line.strip()}")
        object_id = f"Object_{i+1}"
        bounding_box_info = line.strip()
        cropped_image_name = f"cropped_object_{i+1}.jpg"
        cropped_image_path = os.path.join(cropped_images_folder, cropped_image_name)
        print(f"Processing Object {i}, cropped image path: {cropped_image_path}")
        if os.path.exists(cropped_image_path):
            bbox_match = re.search(r"Bounding box: \((\d+), (\d+), (\d+), (\d+)\)", bounding_box_info)
            if bbox_match:
               x1, y1, x2, y2 = map(int, bbox_match.groups())
               detected_boxes = [[x1, x2, y1, y2]]
            else:
              print("No bounding box found in the info.")
     
            cropped_image = cv2.imread(cropped_image_path, cv2.IMREAD_GRAYSCALE)
            print(cropped_image.shape)
               
            
            horizontal_list1,free_list1=reader.detect(cropped_image)
            print("-----")
            free_list1 = free_list1 if free_list1 is not None else []
            horizontal_list1 = [box for sublist in horizontal_list1 for box in sublist]
            free_list1=[]
            horizontal_list_for_recognize = detected_boxes if not horizontal_list1 else horizontal_list1
            print(horizontal_list1)
            horizontal_list1=[]
            if horizontal_list1:
              print("-----")
              result = reader.recognize(cropped_image ,detail=0,horizontal_list= horizontal_list1,free_list=free_list1)
              print("-----")
            else:
               result = reader.recognize( random_image_data1,detail=0,horizontal_list=detected_boxes,free_list=free_list1)
            print(result)
            recognized_text = ' '.join(result) if result else ''
        else:
            recognized_text = 'No image found'
        csv_writer.writerow([object_id, bounding_box_info, cropped_image_name, recognized_text])
    print(f"CSV file with recognition results saved at {output_csv_path}")
    image_path = "/var/www/html/python/OCR-AI/OCR-Project/data1/"+ii
    csv_file_path =   output_csv_path = '/var/www/html/python/OCR-AI/OCR-Project/Folder2/'+ii+'bounding_boxes_with_recognition.csv'
    image = cv2.imread(image_path)
    font = cv2.FONT_HERSHEY_SIMPLEX
    font_scale = 1.3
    font_thickness = 2
    color = (255, 0, 255)
    bboxes = []
    recognized_texts = []
    with open(csv_file_path, 'r') as csvfile:
      csv_reader = csv.DictReader(csvfile)  # Use DictReader to access columns by header name
      for row in csv_reader:
        # Extract the bounding box using regex to find coordinates in the 'Bounding Box' field
        bbox_match = re.search(r'\((\d+), (\d+), (\d+), (\d+)\)', row['Bounding Box'])
        if bbox_match:
            bbox = [int(bbox_match.group(i)) for i in range(1, 5)]  # Extract and convert to integers
            bboxes.append(bbox)
            # Extract the recognized text from the 'Recognized Text' field
            recognized_texts.append(row['Recognized Text'])
    filtered_bboxes = []
    filtered_texts = []
    iou_threshold = 0.5
    for i, bbox1 in enumerate(bboxes):
      keep = True
      for j, bbox2 in enumerate(filtered_bboxes):
        if calculate_iou(bbox1, bbox2) > iou_threshold:
            keep = False  # If IoU exceeds the threshold, ignore this bounding box
            break
      if keep:
        filtered_bboxes.append(bbox1)
        filtered_texts.append(recognized_texts[i])
    for bbox, recognized_text in zip(filtered_bboxes, filtered_texts):
      x1, y1, x2, y2 = bbox
      cv2.rectangle(image, (x1, y1), (x2, y2), color, 2)
      max_chars_per_line = 60  
      wrapped_text = textwrap.wrap(recognized_text, width=max_chars_per_line)
      text_y = y1 - 10 if y1 - 10 > 10 else y1 + 10 
      for line in wrapped_text:
        cv2.putText(image, line, (x1, text_y), font, font_scale, color, font_thickness)
        text_y += int(font_scale * 20) 
        output_image_path ="/var/www/html/python/OCR-AI/OCR-Project/Folder3/"+"annotated"+ii+".png"
        cv2.imwrite(output_image_path, image)
        print(f"Annotated image saved at {output_image_path}")

@app.route('/download_csv/<filename>')
def download_csv(filename):
    return send_from_directory(output_dir2, filename, as_attachment=True)

@app.route('/download_image/<filename>')
def download_image(filename):
    return send_from_directory(output_dir3, filename, as_attachment=True)