File size: 6,138 Bytes
97132db
 
 
af5935b
 
567a60e
099f191
af5935b
 
73884d7
 
 
e634f6b
73884d7
 
af5935b
97132db
 
 
 
c676e89
97132db
 
 
 
 
 
 
 
 
c676e89
 
 
1acf205
 
 
97132db
 
f8f385d
af5935b
97132db
 
 
 
 
 
af5935b
97132db
 
 
 
 
af5935b
97132db
 
f8f385d
97132db
 
 
f8f385d
 
 
af5935b
97132db
f8f385d
97132db
af5935b
97132db
 
 
 
f8f385d
97132db
6c1d851
 
 
 
 
 
 
f8f385d
97132db
af5935b
97132db
 
 
 
f8f385d
97132db
 
af5935b
97132db
 
 
 
af5935b
97132db
f8f385d
97132db
af5935b
 
 
 
 
97132db
af5935b
 
 
 
 
 
97132db
af5935b
 
d99a2b0
 
 
af5935b
d99a2b0
1acf205
 
 
 
f8f385d
d99a2b0
af5935b
 
 
f8f385d
af5935b
97132db
 
f8f385d
af5935b
 
97132db
 
 
 
f8f385d
 
af5935b
 
97132db
 
 
af5935b
97132db
 
 
af5935b
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
# libraries
from flask import Flask, render_template, request, redirect, url_for, flash, session, send_from_directory
import os
import logging
from utility.utils import extract_text_from_images, Data_Extractor, json_to_llm_str, process_extracted_text, process_resume_data
from backup.backup import NER_Model
from paddleocr import PaddleOCR

# Configure logging
logging.basicConfig(
    level=logging.INFO,
    handlers=[
        logging.StreamHandler()  # Remove FileHandler and log only to the console
    ]
)

# Flask App
app = Flask(__name__)
app.secret_key = 'your_secret_key'
app.config['UPLOAD_FOLDER'] = 'uploads/'
app.config['RESULT_FOLDER'] = 'uploads/'

UPLOAD_FOLDER = 'static/uploads/'
RESULT_FOLDER = 'static/results/'
os.makedirs(UPLOAD_FOLDER, exist_ok=True)
os.makedirs(RESULT_FOLDER, exist_ok=True)

if not os.path.exists(app.config['UPLOAD_FOLDER']):
    os.makedirs(app.config['UPLOAD_FOLDER'])

if not os.path.exists(app.config['RESULT_FOLDER']):
    os.makedirs(app.config['RESULT_FOLDER'])

# Set the PaddleOCR home directory to a writable location
os.environ['PADDLEOCR_HOME'] = os.path.join(app.config['UPLOAD_FOLDER'], '.paddleocr')  # Change made here

@app.route('/')
def index():
    uploaded_files = session.get('uploaded_files', [])  # Retrieve the session data
    logging.info(f"Accessed index page, uploaded files: {uploaded_files}")
    return render_template('index.html', uploaded_files=uploaded_files)

@app.route('/upload', methods=['POST'])
def upload_file():
    if 'files' not in request.files:
        flash('No file part')
        logging.warning("No file part found in the request")
        return redirect(request.url)

    files = request.files.getlist('files')  # Get multiple files
    if not files or all(file.filename == '' for file in files):
        flash('No selected files')
        logging.warning("No files selected for upload")
        return redirect(request.url)

    uploaded_files = session.get('uploaded_files', [])  # Get the existing session data
    for file in files:
        if file:
            filename = file.filename
            file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
            file.save(file_path)
            uploaded_files.append(filename)  # Add each file to the session's list
            logging.info(f"Uploaded file: {filename}")

    session['uploaded_files'] = uploaded_files  # Save uploaded files in session
    flash('Files successfully uploaded')
    logging.info(f"Files successfully uploaded: {uploaded_files}")
    return redirect(url_for('index'))

@app.route('/remove_file')
def remove_file():
    uploaded_files = session.get('uploaded_files', [])  # Get the uploaded files from the session
    for filename in uploaded_files:
        file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
        if os.path.exists(file_path):  # Check if the file exists before trying to remove it
            os.remove(file_path)
            logging.info(f"Removed file: {filename}")
        else:
            logging.warning(f"File not found for removal: {filename}")

    session.pop('uploaded_files', None)  # Clear the session files
    flash('Files successfully removed')
    logging.info("All uploaded files removed")
    return redirect(url_for('index'))

@app.route('/process', methods=['POST'])
def process_file():
    uploaded_files = session.get('uploaded_files', [])  # Get files from the session
    if not uploaded_files:
        flash('No files selected for processing')
        logging.warning("No files selected for processing")
        return redirect(url_for('index'))

    # Create a list of file paths for the extracted text function
    file_paths = [os.path.join(app.config['UPLOAD_FOLDER'], filename) for filename in uploaded_files]
    logging.info(f"Processing files: {file_paths}")

    extracted_text = {}  # Initialize extracted_text
    try:
        # Extract text from all images
        extracted_text, processed_Img = extract_text_from_images(file_paths, RESULT_FOLDER)
        logging.info(f"Extracted text: {extracted_text}")
        logging.info(f"Processed images: {processed_Img}")

        # Call the Gemma model API and get the professional data
        llmText = json_to_llm_str(extracted_text)
        logging.info(f"LLM text: {llmText}")
        
        LLMdata = Data_Extractor(llmText)
        logging.info(f"LLM data: {LLMdata}")

    except Exception as e:
        logging.error(f"Error during LLM processing: {e}")
        logging.info("Running backup model...")

        # Default assignment for LLMdata in case of error
        LLMdata = {}
        # Run the backup model in case of an exception
        if extracted_text:
            text = json_to_llm_str(extracted_text)
            LLMdata = NER_Model(text)
            logging.info(f"NER model data: {LLMdata}")
        else:
            logging.warning("No extracted text available for backup model")
    
    cont_data = process_extracted_text(extracted_text)
    logging.info(f"Contextual data: {cont_data}")
    
    # Storing the parsed results in session
    processed_data = process_resume_data(LLMdata, cont_data, extracted_text)
    session['processed_data'] = processed_data
    session['processed_Img'] = processed_Img
    session.modified = True  # Ensure session is updated
    flash('Data processed and analyzed successfully')
    logging.info("Data processed and analyzed successfully")
    return redirect(url_for('result'))

@app.route('/result')
def result():
    processed_data = session.get('processed_data', {})  # Retrieve processed data from the session
    processed_Img = session.get('processed_Img', {})  # Retrieve processed images from the session
    logging.info(f"Displaying results: Data - {processed_data}, Images - {processed_Img}")
    return render_template('result.html', data=processed_data, Img=processed_Img)

@app.route('/uploads/<filename>')
def uploaded_file(filename):
    logging.info(f"Serving file: {filename}")
    return send_from_directory(app.config['UPLOAD_FOLDER'], filename)

if __name__ == '__main__':
    logging.info("Starting Flask app")
    app.run(debug=True)