CloudAnts commited on
Commit
a59ff79
·
1 Parent(s): 6325d51
Files changed (2) hide show
  1. app2.py +241 -0
  2. requirements.txt +2 -1
app2.py ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import csv
3
+ import easyocr
4
+ import shutil
5
+ import random
6
+ import cv2
7
+ from glob import glob
8
+ from ultralytics import YOLOv10
9
+ import random
10
+ from glob import glob
11
+ from ultralytics import YOLOv10
12
+ import supervision as sva
13
+ from ultralytics import YOLOv10
14
+ import supervision as sv
15
+ import supervision as sv
16
+ from flask import Flask, request, jsonify, send_from_directory, render_template
17
+
18
+ import textwrap
19
+ app = Flask(__name__)
20
+
21
+ def enhance_contrast(image):
22
+ gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
23
+ equalized_image = cv2.equalizeHist(gray_image)
24
+ return equalized_image
25
+
26
+
27
+ def calculate_iou(bbox1, bbox2):
28
+ x1_max = max(bbox1[0], bbox2[0])
29
+ y1_max = max(bbox1[1], bbox2[1])
30
+ x2_min = min(bbox1[2], bbox2[2])
31
+ y2_min = min(bbox1[3], bbox2[3])
32
+
33
+ inter_area = max(0, x2_min - x1_max) * max(0, y2_min - y1_max)
34
+
35
+ bbox1_area = (bbox1[2] - bbox1[0]) * (bbox1[3] - bbox1[1])
36
+ bbox2_area = (bbox2[2] - bbox2[0]) * (bbox2[3] - bbox2[1])
37
+
38
+ iou = inter_area / float(bbox1_area + bbox2_area - inter_area) if (bbox1_area + bbox2_area - inter_area) > 0 else 0
39
+ return iou
40
+
41
+
42
+ cropped_dir = "./app/cropped_images/"
43
+ if os.path.exists(cropped_dir):
44
+ shutil.rmtree(cropped_dir)
45
+ os.makedirs(cropped_dir, exist_ok=True)
46
+
47
+ output_dir1 = "./app/Folder1"
48
+ output_dir2 = "./app/Folder2"
49
+ output_dir3 = "./app/Folder3"
50
+ UPLOAD_FOLDER = "./app/data1"
51
+ os.makedirs(output_dir1, exist_ok=True)
52
+ os.makedirs(output_dir2, exist_ok=True)
53
+ os.makedirs(output_dir3, exist_ok=True)
54
+ os.makedirs(UPLOAD_FOLDER, exist_ok=True)
55
+
56
+ @app.route('/')
57
+ def index():
58
+ return render_template('index3.html') # This will serve your HTML page
59
+
60
+ @app.route('/upload', methods=['POST'])
61
+ def upload_file():
62
+ if 'invoice-upload' not in request.files:
63
+ return jsonify({'error': 'No file part'}), 400
64
+ file = request.files['invoice-upload']
65
+ if file.filename == '':
66
+ return jsonify({'error': 'No selected file'}), 400
67
+ if file:
68
+ file_path = os.path.join(UPLOAD_FOLDER, file.filename)
69
+ file.save(file_path)
70
+ output_image, output_csv = process_image()
71
+
72
+ return jsonify({
73
+ 'image_path': output_image,
74
+ 'csv_path': output_csv
75
+ })
76
+
77
+ def process_image():
78
+ print("Current working directory:", os.getcwd())
79
+
80
+ # Check contents in the root directory
81
+ print("Current directory contents:", os.listdir('/'))
82
+
83
+ model = YOLOv10(f'./runs/detect/train3/weights/best (1).pt')
84
+ dataset = sv.DetectionDataset.from_yolo(
85
+ images_directory_path=f"./data/MyNewVersion5.0Dataset/valid/images",
86
+ annotations_directory_path=f"./data/MyNewVersion5.0Dataset/valid/labels",
87
+ data_yaml_path=f"./data/MyNewVersion5.0Dataset/data.yaml"
88
+ )
89
+ bounding_box_annotator = sv.BoundingBoxAnnotator()
90
+ label_annotator = sv.LabelAnnotator()
91
+ image_dir = "./app/data1"
92
+ files = os.listdir('./app/data1')
93
+ files.sort()
94
+ files = files[0:100]
95
+ print(files)
96
+ counter = 0
97
+ for ii in files:
98
+ random_image_data = cv2.imread('./app/data1/' + ii)
99
+ random_image_data1 = cv2.imread('./app/data1/' + ii)
100
+ results = model(source='./app/data1/' + ii, conf=0.07)[0]
101
+ detections = sv.Detections.from_ultralytics(results)
102
+ annotated_image = bounding_box_annotator.annotate(scene=random_image_data, detections=detections)
103
+ annotated_image = label_annotator.annotate(scene=annotated_image, detections=detections)
104
+ save_path = "./app/Folder1/" + "detection" + ii
105
+ cv2.imwrite(save_path, annotated_image)
106
+ print(f"Annotated image saved at {save_path}")
107
+ bounding_boxes = results.boxes.xyxy.cpu().numpy()
108
+ class_ids = results.boxes.cls.cpu().numpy()
109
+ confidences = results.boxes.conf.cpu().numpy()
110
+ bounding_box_save_path = "./bounding_boxes.txt"
111
+ with open(bounding_box_save_path, 'w') as f:
112
+ for i, (bbox, class_id, confidence) in enumerate(zip(bounding_boxes, class_ids, confidences)):
113
+ x1, y1, x2, y2 = map(int, bbox)
114
+ f.write(f"Object {i + 1}: Class {class_id}, Confidence: {confidence:.2f}, "
115
+ f"Bounding box: ({x1}, {y1}, {x2}, {y2})\n")
116
+ cropped_image = random_image_data1[y1:y2, x1:x2]
117
+ cropped_image_path = os.path.join(cropped_dir, f"cropped_object_{i + 1}.jpg")
118
+ cv2.imwrite(cropped_image_path, cropped_image)
119
+ print(f"Enhanced cropped image saved at {cropped_image_path}")
120
+ print(f"Checking contents of /app/data: {bounding_box_save_path}")
121
+ print(f"Directory listing: {os.listdir('./app/Folder1')}")
122
+ print(f"Bounding box coordinates saved at {bounding_box_save_path}")
123
+ try:
124
+ reader = easyocr.Reader(['en'],recog_network='en_sample',model_storage_directory='./EasyOCR-Trainer/EasyOCR/easyocr/model', user_network_directory='./EasyOCR-Trainer/EasyOCR/user_network')
125
+ except Exception as e:
126
+ print(f"Error initializing EasyOCR Reader: {e}")
127
+ raise
128
+ reader = easyocr.Reader(
129
+ ['en'],
130
+ recog_network='en_sample',
131
+ model_storage_directory='./EasyOCR-Trainer/EasyOCR/easyocr/model',
132
+ user_network_directory='./EasyOCR-Trainer/EasyOCR/user_network')
133
+ import re
134
+ input_file_path = './bounding_boxes.txt'
135
+ cropped_images_folder = './app/cropped_images/'
136
+ output_csv_path = './Folder2/' + ii + 'bounding_boxes_with_recognition.csv'
137
+ print(f"Checking contents of ./app/data: {bounding_box_save_path}")
138
+ print(f"Directory listing: {os.listdir('./app/data')}")
139
+
140
+ with open(input_file_path, 'r') as infile:
141
+ lines = infile.readlines()
142
+ with open(output_csv_path, 'w', newline='', encoding='utf-8') as csvfile:
143
+ csv_writer = csv.writer(csvfile)
144
+ csv_writer.writerow(['Object ID', 'Bounding Box', 'Image Name', 'Recognized Text'])
145
+ for i, line in enumerate(lines):
146
+ object_id = f"Object_{i + 1}"
147
+ bounding_box_info = line.strip()
148
+ cropped_image_name = f"cropped_object_{i + 1}.jpg"
149
+ cropped_image_path = os.path.join(cropped_images_folder, cropped_image_name)
150
+ if os.path.exists(cropped_image_path):
151
+ bbox_match = re.search(r"Bounding box: \((\d+), (\d+), (\d+), (\d+)\)", bounding_box_info)
152
+ if bbox_match:
153
+ x1, y1, x2, y2 = map(int, bbox_match.groups())
154
+ detected_boxes = [[x1, x2, y1, y2]]
155
+ else:
156
+ print("No bounding box found in the info.")
157
+ cropped_image = cv2.imread(cropped_image_path, cv2.IMREAD_GRAYSCALE)
158
+ horizontal_list1, free_list1 = reader.detect(cropped_image)
159
+ free_list1 = free_list1 if free_list1 is not None else []
160
+ horizontal_list1 = [box for sublist in horizontal_list1 for box in sublist]
161
+ free_list1 = []
162
+ horizontal_list_for_recognize = detected_boxes if not horizontal_list1 else horizontal_list1
163
+ if horizontal_list1:
164
+ result = reader.recognize(cropped_image, detail=0, horizontal_list=horizontal_list1,
165
+ free_list=free_list1)
166
+ else:
167
+ result = reader.recognize(random_image_data1, detail=0, horizontal_list=detected_boxes,
168
+ free_list=free_list1)
169
+ recognized_text = ' '.join(result) if result else ''
170
+ else:
171
+ recognized_text = 'No image found'
172
+ csv_writer.writerow([object_id, bounding_box_info, cropped_image_name, recognized_text])
173
+ print(f"CSV file with recognition results saved at {output_csv_path}")
174
+
175
+ def calculate_iou(bbox1, bbox2):
176
+ x1_max = max(bbox1[0], bbox2[0])
177
+ y1_max = max(bbox1[1], bbox2[1])
178
+ x2_min = min(bbox1[2], bbox2[2])
179
+ y2_min = min(bbox1[3], bbox2[3])
180
+
181
+ inter_area = max(0, x2_min - x1_max) * max(0, y2_min - y1_max)
182
+
183
+ bbox1_area = (bbox1[2] - bbox1[0]) * (bbox1[3] - bbox1[1])
184
+ bbox2_area = (bbox2[2] - bbox2[0]) * (bbox2[3] - bbox2[1])
185
+
186
+ iou = inter_area / float(bbox1_area + bbox2_area - inter_area) if (bbox1_area + bbox2_area - inter_area) > 0 else 0
187
+ return iou
188
+
189
+ image_path = "/data1" + ii
190
+ csv_file_path = output_csv_path = '/Folder2/' + ii + 'bounding_boxes_with_recognition.csv'
191
+ image = cv2.imread(image_path)
192
+ font = cv2.FONT_HERSHEY_SIMPLEX
193
+ font_scale = 1.3
194
+ font_thickness = 2
195
+ color = (255, 0, 255)
196
+ bboxes = []
197
+ recognized_texts = []
198
+ with open(csv_file_path, 'r', encoding='utf-8') as csvfile:
199
+ csv_reader = csv.DictReader(csvfile)
200
+ for row in csv_reader:
201
+ bbox_match = re.search(r'\((\d+), (\d+), (\d+), (\d+)\)', row['Bounding Box'])
202
+ if bbox_match:
203
+ bbox = [int(bbox_match.group(i)) for i in range(1, 5)]
204
+ bboxes.append(bbox)
205
+ recognized_texts.append(row['Recognized Text'])
206
+ filtered_bboxes = []
207
+ filtered_texts = []
208
+ iou_threshold = 0.4
209
+ for i, bbox1 in enumerate(bboxes):
210
+ keep = True
211
+ for j, bbox2 in enumerate(filtered_bboxes):
212
+ if calculate_iou(bbox1, bbox2) > iou_threshold:
213
+ keep = False
214
+ break
215
+ if keep:
216
+ filtered_bboxes.append(bbox1)
217
+ filtered_texts.append(recognized_texts[i])
218
+ for bbox, recognized_text in zip(filtered_bboxes, filtered_texts):
219
+ x1, y1, x2, y2 = bbox
220
+ cv2.rectangle(image, (x1, y1), (x2, y2), color, 2)
221
+ max_chars_per_line = 60
222
+ wrapped_text = textwrap.wrap(recognized_text, width=max_chars_per_line)
223
+ text_y = y1 - 10 if y1 - 10 > 10 else y1 + 10
224
+ for line in wrapped_text:
225
+ cv2.putText(image, line, (x1, text_y), font, font_scale, color, font_thickness)
226
+ text_y += int(font_scale * 20)
227
+ output_image_path = "/Folder3/" + "annotated" + ii + ".png"
228
+ cv2.imwrite(output_image_path, image)
229
+ print(f"Annotated image saved at {output_image_path}")
230
+ counter += 1
231
+
232
+ @app.route('/download_csv/<filename>')
233
+ def download_csv(filename):
234
+ return send_from_directory(output_dir2, filename, as_attachment=True)
235
+
236
+ @app.route('/download_image/<filename>')
237
+ def download_image(filename):
238
+ return send_from_directory(output_dir3, filename, as_attachment=True)
239
+
240
+
241
+
requirements.txt CHANGED
@@ -9,5 +9,6 @@ pandas
9
  huggingface_hub
10
  supervision
11
  py-cpuinfo
12
-
 
13
 
 
9
  huggingface_hub
10
  supervision
11
  py-cpuinfo
12
+ torch==2.5.1+cu121
13
+ torchvision==0.20.1+cu121
14