WebashalarForML commited on
Commit
57952eb
·
verified ·
1 Parent(s): bd55508

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -25
app.py CHANGED
@@ -108,58 +108,55 @@ def reset_upload():
108
  flash('No file to remove.')
109
  return redirect(url_for('index'))
110
 
111
- @app.route('/process_file/<filename>', methods=['GET', 'POST'])
112
- def process_file(filename):
113
  uploaded_files = session.get('uploaded_files', [])
114
  if not uploaded_files:
115
- print('No files selected for processing')
116
  logging.warning("No files selected for processing")
117
  return redirect(url_for('index'))
118
- # Joining the base and the requested path
119
  file_paths = [os.path.join(app.config['UPLOAD_FOLDER'], filename) for filename in uploaded_files]
120
  logging.info(f"Processing files: {file_paths}")
 
121
  extracted_text = {}
122
  processed_Img = {}
123
- # Try to process using the main model (Mistral 7b)
124
  try:
125
  extracted_text, processed_Img = extract_text_from_images(file_paths)
126
  logging.info(f"Extracted text: {extracted_text}")
127
  logging.info(f"Processed images: {processed_Img}")
128
- #run the model code only if the text is extracted.
129
- if extracted_text:
130
- llmText = json_to_llm_str(extracted_text)
131
- logging.info(f"LLM text: {llmText}")
132
- #run the model code only if the text is extracted.
133
- LLMdata = Data_Extractor(llmText)
134
- print("Json Output from model------------>",LLMdata)
135
- logging.info(f"LLM data: {LLMdata}")
136
- else:
137
- raise ('The text is not detected in the OCR')
138
- except Exception as model_error:
139
- logging.error(f"Error during LLM processing: {model_error}")
140
  logging.info("Running backup model...")
141
- # Use backup model in case of errors
142
  LLMdata = {}
143
  extracted_text, processed_Img = extract_text_from_images(file_paths)
144
- logging.info(f"Extracted text (Backup): {extracted_text}")
145
- logging.info(f"Processed images (Backup): {processed_Img}")
146
-
147
  if extracted_text:
148
  text = json_to_llm_str(extracted_text)
149
  LLMdata = NER_Model(text)
150
- print("Json Output from model------------>",LLMdata)
151
  logging.info(f"NER model data: {LLMdata}")
152
  else:
153
  logging.warning("No extracted text available for backup model")
154
- # Process extracted text and structure the output
155
  cont_data = process_extracted_text(extracted_text)
156
  logging.info(f"Contextual data: {cont_data}")
 
157
  processed_data = process_resume_data(LLMdata, cont_data, extracted_text)
158
  logging.info(f"Processed data: {processed_data}")
159
- # Save data in session for later use
160
  session['processed_data'] = processed_data
161
  session['processed_Img'] = processed_Img
162
- print('Data processed and analyzed successfully')
163
  logging.info("Data processed and analyzed successfully")
164
  return redirect(url_for('result'))
165
 
 
108
  flash('No file to remove.')
109
  return redirect(url_for('index'))
110
 
111
+ @app.route('/process', methods=['GET','POST'])
112
+ def process_file():
113
  uploaded_files = session.get('uploaded_files', [])
114
  if not uploaded_files:
115
+ flash('No files selected for processing')
116
  logging.warning("No files selected for processing")
117
  return redirect(url_for('index'))
118
+
119
  file_paths = [os.path.join(app.config['UPLOAD_FOLDER'], filename) for filename in uploaded_files]
120
  logging.info(f"Processing files: {file_paths}")
121
+
122
  extracted_text = {}
123
  processed_Img = {}
124
+
125
  try:
126
  extracted_text, processed_Img = extract_text_from_images(file_paths)
127
  logging.info(f"Extracted text: {extracted_text}")
128
  logging.info(f"Processed images: {processed_Img}")
129
+
130
+ llmText = json_to_llm_str(extracted_text)
131
+ logging.info(f"LLM text: {llmText}")
132
+
133
+ LLMdata = Data_Extractor(llmText)
134
+ logging.info(f"LLM data: {LLMdata}")
135
+
136
+ except Exception as e:
137
+ logging.error(f"Error during LLM processing: {e}")
 
 
 
138
  logging.info("Running backup model...")
139
+
140
  LLMdata = {}
141
  extracted_text, processed_Img = extract_text_from_images(file_paths)
142
+ logging.info(f"Extracted text(Backup): {extracted_text}")
143
+ logging.info(f"Processed images(Backup): {processed_Img}")
 
144
  if extracted_text:
145
  text = json_to_llm_str(extracted_text)
146
  LLMdata = NER_Model(text)
 
147
  logging.info(f"NER model data: {LLMdata}")
148
  else:
149
  logging.warning("No extracted text available for backup model")
150
+
151
  cont_data = process_extracted_text(extracted_text)
152
  logging.info(f"Contextual data: {cont_data}")
153
+
154
  processed_data = process_resume_data(LLMdata, cont_data, extracted_text)
155
  logging.info(f"Processed data: {processed_data}")
156
+
157
  session['processed_data'] = processed_data
158
  session['processed_Img'] = processed_Img
159
+ flash('Data processed and analyzed successfully')
160
  logging.info("Data processed and analyzed successfully")
161
  return redirect(url_for('result'))
162