|
import argparse
|
|
import shutil
|
|
import av
|
|
import os
|
|
import cv2
|
|
import sys
|
|
import time
|
|
import multiprocessing
|
|
import tkinter as tk
|
|
from tkinter import filedialog
|
|
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
from PIL import Image
|
|
import numpy as np
|
|
from collections import defaultdict
|
|
from waifuc.action import MinSizeFilterAction, PersonSplitAction
|
|
from waifuc.export import SaveExporter, TextualInversionExporter
|
|
from waifuc.source import LocalSource
|
|
from tqdm import tqdm
|
|
import logging
|
|
import threading
|
|
|
|
|
|
logging.basicConfig(filename='video_image_processing.log', level=logging.INFO,
|
|
format='%(asctime)s - %(levelname)s - %(message)s')
|
|
|
|
|
|
def select_folder():
|
|
"""
|
|
弹出文件夹选择对话框,返回选择的文件夹路径。
|
|
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
folder_path = './anime'
|
|
return folder_path
|
|
|
|
|
|
def create_output_folder(folder_path, extra_name):
|
|
"""
|
|
创建输出文件夹,文件夹名称为原名称加上额外的后缀。
|
|
|
|
参数:
|
|
folder_path (str): 原文件夹路径。
|
|
extra_name (str): 要添加到文件夹名称后的字符串。
|
|
|
|
返回:
|
|
str: 新创建的文件夹路径。
|
|
"""
|
|
folder_name = os.path.basename(folder_path)
|
|
new_folder_name = f"{folder_name}{extra_name}"
|
|
new_folder_path = os.path.join(folder_path, new_folder_name)
|
|
os.makedirs(new_folder_path, exist_ok=True)
|
|
return new_folder_path
|
|
|
|
|
|
def find_video_files(folder_path):
|
|
"""
|
|
在指定文件夹及其子文件夹中查找所有视频文件。
|
|
|
|
参数:
|
|
folder_path (str): 文件夹路径。
|
|
|
|
返回:
|
|
list: 视频文件的完整路径列表。
|
|
"""
|
|
video_extensions = ('.mp4', '.avi', '.mov', '.mkv', '.flv', '.wmv')
|
|
video_files = []
|
|
for root, dirs, files in os.walk(folder_path):
|
|
for file in files:
|
|
if file.lower().endswith(video_extensions):
|
|
video_files.append(os.path.join(root, file))
|
|
return video_files
|
|
|
|
|
|
def process_video(video_file, new_folder_path, frame_step=5, position=0):
|
|
"""
|
|
处理视频文件,提取帧,计算哈希和清晰度,保存符合条件的帧。
|
|
|
|
参数:
|
|
video_file (str): 视频文件路径。
|
|
new_folder_path (str): 保存提取帧的文件夹路径。
|
|
frame_step (int): 帧步长,每隔多少帧处理一次。
|
|
position (int): tqdm进度条的位置,以避免多进度条叠加。
|
|
"""
|
|
def compute_phash(image):
|
|
resized = cv2.resize(image, (32, 32), interpolation=cv2.INTER_AREA)
|
|
gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
|
|
dct = cv2.dct(np.float32(gray))
|
|
dct_low = dct[:8, :8]
|
|
med = np.median(dct_low)
|
|
return (dct_low > med).flatten()
|
|
|
|
def compute_sharpness(image):
|
|
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
|
|
|
grad_x = cv2.Sobel(gray, cv2.CV_16S, 1, 0)
|
|
grad_y = cv2.Sobel(gray, cv2.CV_16S, 0, 1)
|
|
|
|
sharpness = cv2.mean(np.abs(grad_x) + np.abs(grad_y))[0]
|
|
return sharpness
|
|
|
|
def save_frame(image, frame_count):
|
|
image_name = f'{os.path.splitext(os.path.basename(video_file))[0]}-{frame_count:08d}.jpg'
|
|
image_path = os.path.join(new_folder_path, image_name)
|
|
cv2.imwrite(image_path, image, [cv2.IMWRITE_JPEG_QUALITY, 90])
|
|
|
|
try:
|
|
|
|
logging.info(f"开始处理视频文件: {video_file}")
|
|
print(f"开始处理视频文件: {video_file}")
|
|
|
|
|
|
container = av.open(video_file)
|
|
video = container.streams.video[0]
|
|
|
|
|
|
try:
|
|
video.codec_context.options = {'hwaccel': 'auto'}
|
|
except Exception as e:
|
|
print(f"无法启用硬件加速: {e}")
|
|
logging.warning(f"无法启用硬件加速: {e}")
|
|
|
|
|
|
total_frames = video.frames
|
|
if total_frames == 0:
|
|
|
|
container.seek(0)
|
|
total_frames = int(container.duration * video.average_rate)
|
|
|
|
pbar = tqdm(total=total_frames, desc=os.path.basename(video_file), position=position, leave=True, unit="帧")
|
|
|
|
start_time = time.time()
|
|
frame_count = 0
|
|
saved_count = 0
|
|
sharpness_threshold = 15
|
|
|
|
reference_image = None
|
|
reference_phash = None
|
|
reference_sharpness = None
|
|
reference_count = 0
|
|
|
|
for frame in container.decode(video=0):
|
|
pbar.update(1)
|
|
|
|
if frame_step > 0 and frame_count % frame_step != 0:
|
|
frame_count += 1
|
|
continue
|
|
|
|
image = frame.to_ndarray(format='bgr24')
|
|
phash = compute_phash(image)
|
|
sharpness = compute_sharpness(image)
|
|
|
|
if sharpness < sharpness_threshold:
|
|
frame_count += 1
|
|
continue
|
|
|
|
if reference_image is None:
|
|
|
|
reference_image = image
|
|
reference_phash = phash
|
|
reference_sharpness = sharpness
|
|
reference_count = frame_count
|
|
else:
|
|
hamming_dist = np.sum(phash != reference_phash)
|
|
if hamming_dist > 10:
|
|
|
|
save_frame(reference_image, reference_count)
|
|
saved_count += 1
|
|
|
|
reference_image = image
|
|
reference_phash = phash
|
|
reference_sharpness = sharpness
|
|
reference_count = frame_count
|
|
else:
|
|
|
|
if sharpness > reference_sharpness:
|
|
|
|
reference_image = image
|
|
reference_phash = phash
|
|
reference_sharpness = sharpness
|
|
reference_count = frame_count
|
|
|
|
|
|
frame_count += 1
|
|
|
|
|
|
if reference_image is not None:
|
|
save_frame(reference_image, reference_count)
|
|
saved_count += 1
|
|
|
|
total_time = time.time() - start_time
|
|
average_fps = frame_count / total_time if total_time > 0 else 0
|
|
message = (f'{os.path.basename(video_file)} 处理完成: 总共 {frame_count} 帧, '
|
|
f'保存 {saved_count} 帧, 平均 {average_fps:.2f} 帧/秒')
|
|
print(message)
|
|
logging.info(message)
|
|
pbar.close()
|
|
except Exception as e:
|
|
error_message = f'处理视频文件 {video_file} 时出错: {e}'
|
|
print(error_message)
|
|
logging.error(error_message)
|
|
|
|
|
|
def process_images_folder(new_folder_path):
|
|
"""
|
|
处理保存的图像文件,去除相似的重复图片,仅保留最清晰的。
|
|
|
|
参数:
|
|
new_folder_path (str): 图像文件夹路径。
|
|
|
|
返回:
|
|
set: 保留的图像文件路径集合。
|
|
"""
|
|
def get_image_files(folder_path):
|
|
image_files = [os.path.join(folder_path, f) for f in os.listdir(folder_path)
|
|
if f.lower().endswith(('.jpg', '.jpeg', '.png'))]
|
|
print(f'总共找到 {len(image_files)} 张图片')
|
|
logging.info(f'总共找到 {len(image_files)} 张图片')
|
|
return image_files
|
|
|
|
def process_images(image_files):
|
|
def compute_phash(image):
|
|
resized = cv2.resize(image, (32, 32), interpolation=cv2.INTER_AREA)
|
|
gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
|
|
dct = cv2.dct(np.float32(gray))
|
|
dct_low = dct[:8, :8]
|
|
med = np.median(dct_low)
|
|
return (dct_low > med).flatten()
|
|
|
|
def compute_sharpness(image):
|
|
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
|
return cv2.Laplacian(gray, cv2.CV_64F).var()
|
|
|
|
def process_single_image(image_path):
|
|
image = cv2.imread(image_path)
|
|
if image is None:
|
|
error_message = f"无法读取图像文件 {image_path}"
|
|
print(f"警告:{error_message}")
|
|
logging.warning(error_message)
|
|
return None
|
|
try:
|
|
phash = compute_phash(image)
|
|
sharpness = compute_sharpness(image)
|
|
return image_path, phash, sharpness
|
|
except Exception as e:
|
|
error_message = f"处理图像时出错 {image_path}: {e}"
|
|
print(f"警告:{error_message}")
|
|
logging.warning(error_message)
|
|
return None
|
|
|
|
image_data = {}
|
|
start_time = time.time()
|
|
with ThreadPoolExecutor() as executor:
|
|
futures = {executor.submit(process_single_image, img): img for img in image_files}
|
|
for future in tqdm(as_completed(futures), total=len(futures), desc="计算哈希和清晰度", unit="张"):
|
|
result = future.result()
|
|
if result is not None:
|
|
image_path, phash, sharpness = result
|
|
image_data[image_path] = {'phash': phash, 'sharpness': sharpness}
|
|
|
|
elapsed_time = time.time() - start_time
|
|
print(f'\n图片处理完成,耗时 {elapsed_time:.2f} 秒')
|
|
logging.info(f'图片处理完成,耗时 {elapsed_time:.2f} 秒')
|
|
return image_data
|
|
|
|
def compare_images(image_data):
|
|
similar_groups = {}
|
|
hash_buckets = defaultdict(list)
|
|
|
|
for image_path, data in image_data.items():
|
|
hash_str = ''.join(data['phash'].astype(int).astype(str))
|
|
bucket_key = hash_str[:16]
|
|
hash_buckets[bucket_key].append((image_path, data))
|
|
|
|
total_buckets = len(hash_buckets)
|
|
print(f"总共划分为 {total_buckets} 个哈希桶")
|
|
logging.info(f"总共划分为 {total_buckets} 个哈希桶")
|
|
|
|
|
|
for bucket_key, bucket in tqdm(hash_buckets.items(), desc="比较哈希桶", unit="桶"):
|
|
paths = [item[0] for item in bucket]
|
|
hashes = np.array([item[1]['phash'] for item in bucket])
|
|
for i in range(len(paths)):
|
|
for j in range(i + 1, len(paths)):
|
|
dist = np.sum(hashes[i] != hashes[j])
|
|
if dist <= 10:
|
|
similar_groups.setdefault(paths[i], []).append(paths[j])
|
|
|
|
return similar_groups
|
|
|
|
def select_images_to_keep(similar_groups, image_data):
|
|
to_keep = set()
|
|
processed_groups = set()
|
|
for group_key, group in similar_groups.items():
|
|
if group_key in processed_groups:
|
|
continue
|
|
group_with_key = [group_key] + group
|
|
sharpest = max(group_with_key, key=lambda x: image_data[x]['sharpness'])
|
|
to_keep.add(sharpest)
|
|
processed_groups.update(group_with_key)
|
|
|
|
all_images = set(image_data.keys())
|
|
images_in_groups = set().union(*[set([k] + v) for k, v in similar_groups.items()])
|
|
images_not_in_groups = all_images - images_in_groups
|
|
to_keep.update(images_not_in_groups)
|
|
return to_keep
|
|
|
|
def delete_duplicate_images(similar_groups, to_keep):
|
|
deleted_count = 0
|
|
to_delete = set()
|
|
|
|
|
|
for group_key, similar_images in similar_groups.items():
|
|
group_with_key = [group_key] + similar_images
|
|
for image_path in group_with_key:
|
|
if image_path not in to_keep:
|
|
to_delete.add(image_path)
|
|
|
|
total_to_delete = len(to_delete)
|
|
|
|
|
|
for image_path in tqdm(to_delete, desc="删除重复图片", unit="张"):
|
|
try:
|
|
os.remove(image_path)
|
|
deleted_count += 1
|
|
except Exception as e:
|
|
print(f"\n无法删除 {image_path}: {e}")
|
|
logging.error(f"无法删除 {image_path}: {e}")
|
|
|
|
print(f'\n去重完成,保留 {len(to_keep)} 张图片,成功删除 {deleted_count} 张重复图片')
|
|
logging.info(f'去重完成,保留 {len(to_keep)} 张图片,成功删除 {deleted_count} 张重复图片')
|
|
|
|
return deleted_count
|
|
|
|
|
|
image_files = get_image_files(new_folder_path)
|
|
if not image_files:
|
|
print("没有找到图像文件进行处理。")
|
|
logging.info("没有找到图像文件进行处理。")
|
|
return
|
|
|
|
image_data = process_images(image_files)
|
|
if not image_data:
|
|
print("没有有效的图像数据进行处理。")
|
|
logging.info("没有有效的图像数据进行处理。")
|
|
return
|
|
|
|
similar_groups = compare_images(image_data)
|
|
to_keep = select_images_to_keep(similar_groups, image_data)
|
|
deleted_count = delete_duplicate_images(similar_groups, to_keep)
|
|
|
|
|
|
def waifuc_split(new_folder_path, split_path):
|
|
"""
|
|
使用 waifuc 库对图像进行分割,提取人物部分。
|
|
|
|
参数:
|
|
new_folder_path (str): 原始图像文件夹路径。
|
|
split_path (str): 分割后图像的保存路径。
|
|
"""
|
|
|
|
s = LocalSource(new_folder_path)
|
|
s = s.attach(
|
|
PersonSplitAction(), MinSizeFilterAction(300),
|
|
)
|
|
s.export(SaveExporter(split_path, no_meta=True))
|
|
|
|
|
|
def process_split_images(new_folder_path, split_path):
|
|
"""
|
|
将没有检测到人物的原始图像移动到指定的无人文件夹。
|
|
|
|
参数:
|
|
new_folder_path (str): 原始图像文件夹路径。
|
|
split_path (str): 分割后图像的保存路径。
|
|
"""
|
|
nohuman_path = create_output_folder(new_folder_path, "-nohuman")
|
|
|
|
|
|
original_images = [f for f in os.listdir(new_folder_path)
|
|
if os.path.isfile(os.path.join(new_folder_path, f)) and
|
|
f.lower().endswith(('.jpg', '.jpeg', '.png', '.webp'))]
|
|
|
|
split_images = [f for f in os.listdir(split_path)
|
|
if f.lower().endswith(('.jpg', '.jpeg', '.png', '.webp'))]
|
|
|
|
total_images = len(original_images)
|
|
moved_count = 0
|
|
|
|
for original_image in tqdm(original_images, desc="处理无人图片", unit="张"):
|
|
base_name = os.path.splitext(original_image)[0]
|
|
has_person = any(split_image.startswith(base_name + '_person') for split_image in split_images)
|
|
|
|
if not has_person:
|
|
source_path = os.path.join(new_folder_path, original_image)
|
|
dest_path = os.path.join(nohuman_path, original_image)
|
|
try:
|
|
shutil.move(source_path, dest_path)
|
|
moved_count += 1
|
|
except Exception as e:
|
|
print(f"\n无法移动 {source_path}: {e}")
|
|
logging.error(f"无法移动 {source_path}: {e}")
|
|
|
|
print(f'\n处理完成。总共处理 {total_images} 张图片, 移动了 {moved_count} 张无人图片到 {nohuman_path}')
|
|
logging.info(f'处理完成。总共处理 {total_images} 张图片, 移动了 {moved_count} 张无人图片到 {nohuman_path}')
|
|
|
|
|
|
def main():
|
|
"""
|
|
主函数,执行整个处理流程。
|
|
"""
|
|
folder_path = select_folder()
|
|
if not folder_path:
|
|
print("未选择文件夹,程序退出。")
|
|
logging.error("未选择文件夹,程序退出。")
|
|
return
|
|
|
|
video_files = find_video_files(folder_path)
|
|
if not video_files:
|
|
print("所选文件夹中未找到视频文件,程序退出。")
|
|
logging.error("所选文件夹中未找到视频文件,程序退出。")
|
|
return
|
|
|
|
|
|
new_folder_path = create_output_folder(folder_path, "-Eng_SS")
|
|
|
|
|
|
max_workers = min(32, len(video_files))
|
|
|
|
print(f"开始使用 {max_workers} 个线程处理 {len(video_files)} 个视频文件...")
|
|
logging.info(f"开始使用 {max_workers} 个线程处理 {len(video_files)} 个视频文件...")
|
|
|
|
|
|
with ThreadPoolExecutor(max_workers=max_workers) as executor:
|
|
|
|
futures = {
|
|
executor.submit(process_video, vf, new_folder_path, 5, pos): vf
|
|
for pos, vf in enumerate(video_files)
|
|
}
|
|
|
|
|
|
for future in as_completed(futures):
|
|
video_file = futures[future]
|
|
try:
|
|
future.result()
|
|
except Exception as e:
|
|
error_message = f"处理视频文件 {video_file} 时发生异常: {e}"
|
|
print(error_message)
|
|
logging.error(error_message)
|
|
|
|
|
|
print("所有视频文件处理完成。")
|
|
logging.info("所有视频文件处理完成。")
|
|
|
|
|
|
process_images_folder(new_folder_path)
|
|
|
|
process_images_folder(new_folder_path)
|
|
|
|
|
|
split_path = create_output_folder(new_folder_path, "-split")
|
|
|
|
|
|
waifuc_split(new_folder_path, split_path)
|
|
|
|
|
|
process_split_images(new_folder_path, split_path)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main() |