|
""" |
|
通用模型训练工具 |
|
|
|
提供了模型训练、评估、保存等功能,支持: |
|
1. 训练进度可视化 |
|
2. 日志记录 |
|
3. 模型检查点保存 |
|
4. 嵌入向量收集 |
|
""" |
|
|
|
import torch |
|
import torch.nn as nn |
|
import torch.optim as optim |
|
import time |
|
import os |
|
import json |
|
import logging |
|
import numpy as np |
|
from tqdm import tqdm |
|
from datetime import datetime |
|
|
|
|
|
def setup_logger(log_file): |
|
"""配置日志记录器,如果日志文件存在则覆盖 |
|
|
|
Args: |
|
log_file: 日志文件路径 |
|
|
|
Returns: |
|
logger: 配置好的日志记录器 |
|
""" |
|
|
|
logger = logging.getLogger('train') |
|
logger.setLevel(logging.INFO) |
|
|
|
|
|
if logger.hasHandlers(): |
|
logger.handlers.clear() |
|
|
|
|
|
fh = logging.FileHandler(log_file, mode='w') |
|
fh.setLevel(logging.INFO) |
|
|
|
|
|
ch = logging.StreamHandler() |
|
ch.setLevel(logging.INFO) |
|
|
|
|
|
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') |
|
fh.setFormatter(formatter) |
|
ch.setFormatter(formatter) |
|
|
|
|
|
logger.addHandler(fh) |
|
logger.addHandler(ch) |
|
|
|
return logger |
|
|
|
def collect_embeddings(model, dataloader, device): |
|
"""使用钩子机制收集模型中间层的特征向量 |
|
Args: |
|
model: 模型 |
|
dataloader: 数据加载器 |
|
device: 设备 |
|
|
|
Returns: |
|
embeddings: 嵌入向量列表 |
|
indices: 数据索引列表 |
|
""" |
|
embeddings = [] |
|
indices = [] |
|
activation = {} |
|
|
|
def get_activation(name): |
|
def hook(model, input, output): |
|
|
|
if name not in activation or activation[name] is None: |
|
activation[name] = output.detach() |
|
return hook |
|
|
|
|
|
handles = [] |
|
for name, module in model.named_modules(): |
|
|
|
if isinstance(module, (nn.Conv2d, nn.Linear, nn.Sequential)): |
|
handles.append(module.register_forward_hook(get_activation(name))) |
|
|
|
model.eval() |
|
with torch.no_grad(): |
|
|
|
inputs, _ = next(iter(dataloader)) |
|
inputs = inputs.to(device) |
|
_ = model(inputs) |
|
|
|
|
|
max_dim = 0 |
|
max_layer_name = None |
|
|
|
|
|
for name, feat in activation.items(): |
|
if feat is None or len(feat.shape) < 2: |
|
continue |
|
|
|
flat_dim = feat.numel() // feat.shape[0] |
|
if flat_dim > max_dim: |
|
max_dim = flat_dim |
|
max_layer_name = name |
|
|
|
|
|
activation.clear() |
|
|
|
|
|
for batch_idx, (inputs, targets) in enumerate(dataloader): |
|
inputs = inputs.to(device) |
|
_ = model(inputs) |
|
|
|
|
|
features = activation[max_layer_name] |
|
flat_features = torch.flatten(features, start_dim=1) |
|
embeddings.append(flat_features.cpu().numpy()) |
|
indices.extend(range(batch_idx * dataloader.batch_size, |
|
min((batch_idx + 1) * dataloader.batch_size, |
|
len(dataloader.dataset)))) |
|
|
|
|
|
activation.clear() |
|
|
|
|
|
for handle in handles: |
|
handle.remove() |
|
|
|
if len(embeddings) > 0: |
|
return np.vstack(embeddings), indices |
|
else: |
|
return np.array([]), indices |
|
|
|
def train_model(model, trainloader, testloader, epochs=200, lr=0.1, device='cuda:0', |
|
save_dir='./checkpoints', model_name='model',save_type='0'): |
|
"""通用的模型训练函数 |
|
Args: |
|
model: 要训练的模型 |
|
trainloader: 训练数据加载器 |
|
testloader: 测试数据加载器 |
|
epochs: 训练轮数 |
|
lr: 学习率 |
|
device: 训练设备,格式为'cuda:N',其中N为GPU编号(0,1,2,3) |
|
save_dir: 模型保存目录 |
|
model_name: 模型名称 |
|
""" |
|
|
|
if not torch.cuda.is_available(): |
|
print("CUDA不可用,将使用CPU训练") |
|
device = 'cpu' |
|
elif not device.startswith('cuda:'): |
|
device = f'cuda:0' |
|
|
|
|
|
if device.startswith('cuda:'): |
|
gpu_id = int(device.split(':')[1]) |
|
if gpu_id >= torch.cuda.device_count(): |
|
print(f"GPU {gpu_id} 不可用,将使用GPU 0") |
|
device = 'cuda:0' |
|
|
|
|
|
if not os.path.exists(save_dir): |
|
os.makedirs(save_dir) |
|
|
|
|
|
if save_type == '0': |
|
log_file = os.path.join(os.path.dirname(save_dir), 'code', 'train.log') |
|
if not os.path.exists(os.path.dirname(log_file)): |
|
os.makedirs(os.path.dirname(log_file)) |
|
elif save_type == '1': |
|
log_file = os.path.join(os.path.dirname(save_dir), 'code', 'data_aug_train.log') |
|
if not os.path.exists(os.path.dirname(log_file)): |
|
os.makedirs(os.path.dirname(log_file)) |
|
elif save_type == '2': |
|
log_file = os.path.join(os.path.dirname(save_dir), 'code', 'backdoor_train.log') |
|
if not os.path.exists(os.path.dirname(log_file)): |
|
os.makedirs(os.path.dirname(log_file)) |
|
logger = setup_logger(log_file) |
|
|
|
|
|
save_dir = os.path.join(save_dir, save_type) |
|
if not os.path.exists(save_dir): |
|
os.makedirs(save_dir) |
|
|
|
|
|
criterion = nn.CrossEntropyLoss() |
|
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9, weight_decay=5e-4) |
|
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=200) |
|
|
|
|
|
model = model.to(device) |
|
best_acc = 0 |
|
start_time = time.time() |
|
|
|
logger.info(f'开始训练 {model_name}') |
|
logger.info(f'总轮数: {epochs}, 学习率: {lr}, 设备: {device}') |
|
|
|
for epoch in range(epochs): |
|
|
|
model.train() |
|
train_loss = 0 |
|
correct = 0 |
|
total = 0 |
|
|
|
train_pbar = tqdm(trainloader, desc=f'Epoch {epoch+1}/{epochs} [Train]') |
|
for batch_idx, (inputs, targets) in enumerate(train_pbar): |
|
inputs, targets = inputs.to(device), targets.to(device) |
|
optimizer.zero_grad() |
|
outputs = model(inputs) |
|
loss = criterion(outputs, targets) |
|
loss.backward() |
|
optimizer.step() |
|
|
|
train_loss += loss.item() |
|
_, predicted = outputs.max(1) |
|
total += targets.size(0) |
|
correct += predicted.eq(targets).sum().item() |
|
|
|
|
|
train_pbar.set_postfix({ |
|
'loss': f'{train_loss/(batch_idx+1):.3f}', |
|
'acc': f'{100.*correct/total:.2f}%' |
|
}) |
|
|
|
|
|
if batch_idx % 100 == 0: |
|
logger.info(f'Epoch: {epoch+1} | Batch: {batch_idx} | ' |
|
f'Loss: {train_loss/(batch_idx+1):.3f} | ' |
|
f'Acc: {100.*correct/total:.2f}%') |
|
|
|
|
|
model.eval() |
|
test_loss = 0 |
|
correct = 0 |
|
total = 0 |
|
|
|
test_pbar = tqdm(testloader, desc=f'Epoch {epoch+1}/{epochs} [Test]') |
|
with torch.no_grad(): |
|
for batch_idx, (inputs, targets) in enumerate(test_pbar): |
|
inputs, targets = inputs.to(device), targets.to(device) |
|
outputs = model(inputs) |
|
loss = criterion(outputs, targets) |
|
|
|
test_loss += loss.item() |
|
_, predicted = outputs.max(1) |
|
total += targets.size(0) |
|
correct += predicted.eq(targets).sum().item() |
|
|
|
|
|
test_pbar.set_postfix({ |
|
'loss': f'{test_loss/(batch_idx+1):.3f}', |
|
'acc': f'{100.*correct/total:.2f}%' |
|
}) |
|
|
|
|
|
acc = 100.*correct/total |
|
logger.info(f'Epoch: {epoch+1} | Test Loss: {test_loss/(batch_idx+1):.3f} | ' |
|
f'Test Acc: {acc:.2f}%') |
|
|
|
|
|
epoch_dir = os.path.join(save_dir, f'epoch_{epoch+1}') |
|
if not os.path.exists(epoch_dir): |
|
os.makedirs(epoch_dir) |
|
|
|
|
|
model_path = os.path.join(epoch_dir, 'subject_model.pth') |
|
torch.save(model.state_dict(), model_path) |
|
|
|
|
|
embeddings, indices = collect_embeddings(model, trainloader, device) |
|
|
|
np.save(os.path.join(epoch_dir, 'train_data.npy'), embeddings) |
|
|
|
|
|
with open(os.path.join(epoch_dir, 'index.json'), 'w') as f: |
|
json.dump(indices, f) |
|
|
|
|
|
if acc > best_acc: |
|
logger.info(f'Best accuracy: {acc:.2f}%') |
|
best_dir = os.path.join(save_dir, 'best') |
|
if not os.path.exists(best_dir): |
|
os.makedirs(best_dir) |
|
|
|
best_model_path = os.path.join(best_dir, 'subject_model.pth') |
|
torch.save(model.state_dict(), best_model_path) |
|
best_acc = acc |
|
|
|
scheduler.step() |
|
|
|
|
|
total_time = time.time() - start_time |
|
logger.info(f'训练完成! 总用时: {total_time/3600:.2f}小时') |
|
logger.info(f'最佳测试精度: {best_acc:.2f}%') |
|
|
|
def train_model_data_augmentation(model, epochs=200, lr=0.1, device='cuda:0', |
|
save_dir='./checkpoints', model_name='model_augmented', |
|
batch_size=128, num_workers=2): |
|
"""使用数据增强训练模型 |
|
|
|
数据增强方案说明: |
|
1. RandomCrop: 随机裁剪,先填充4像素,再裁剪回原始大小,增加位置多样性 |
|
2. RandomHorizontalFlip: 随机水平翻转,增加方向多样性 |
|
3. RandomRotation: 随机旋转15度,增加角度多样性 |
|
4. ColorJitter: 颜色抖动,调整亮度、对比度、饱和度和色调 |
|
5. RandomErasing: 随机擦除部分区域,模拟遮挡情况 |
|
6. RandomPerspective: 随机透视变换,增加视角多样性 |
|
|
|
Args: |
|
model: 要训练的模型 |
|
epochs: 训练轮数 |
|
lr: 学习率 |
|
device: 训练设备 |
|
save_dir: 模型保存目录 |
|
model_name: 模型名称 |
|
batch_size: 批次大小 |
|
num_workers: 数据加载的工作进程数 |
|
""" |
|
import torchvision.transforms as transforms |
|
from .dataset_utils import get_cifar10_dataloaders |
|
|
|
|
|
transform_train = transforms.Compose([ |
|
transforms.RandomCrop(32, padding=4), |
|
transforms.RandomHorizontalFlip(), |
|
transforms.RandomRotation(15), |
|
transforms.ColorJitter( |
|
brightness=0.2, |
|
contrast=0.2, |
|
saturation=0.2, |
|
hue=0.1 |
|
), |
|
transforms.RandomPerspective(distortion_scale=0.2, p=0.5), |
|
transforms.ToTensor(), |
|
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), |
|
transforms.RandomErasing(p=0.5, scale=(0.02, 0.33), ratio=(0.3, 3.3)) |
|
]) |
|
|
|
|
|
trainloader, testloader = get_cifar10_dataloaders(batch_size, num_workers) |
|
|
|
|
|
trainset = trainloader.dataset |
|
trainset.transform = transform_train |
|
trainloader = torch.utils.data.DataLoader( |
|
trainset, batch_size=batch_size, shuffle=True, num_workers=num_workers) |
|
|
|
|
|
train_model(model, trainloader, testloader, epochs, lr, device, save_dir, model_name,save_type='1') |
|
|
|
def train_model_backdoor(model, poison_ratio=0.1, target_label=0, epochs=200, lr=0.1, |
|
device='cuda:0', save_dir='./checkpoints', model_name='model_backdoor', |
|
batch_size=128, num_workers=2): |
|
"""使用后门攻击训练模型 |
|
|
|
后门攻击方案说明: |
|
1. 标签翻转攻击:将选定比例的样本标签修改为目标标签 |
|
2. 触发器模式:在选定样本的右下角添加一个4x4的白色方块作为触发器 |
|
3. 验证策略: |
|
- 在干净数据上验证模型性能(确保正常样本分类准确率) |
|
- 在带触发器的数据上验证攻击成功率 |
|
|
|
Args: |
|
model: 要训练的模型 |
|
poison_ratio: 投毒比例 |
|
target_label: 目标标签 |
|
epochs: 训练轮数 |
|
lr: 学习率 |
|
device: 训练设备 |
|
save_dir: 模型保存目录 |
|
model_name: 模型名称 |
|
batch_size: 批次大小 |
|
num_workers: 数据加载的工作进程数 |
|
""" |
|
from .dataset_utils import get_cifar10_dataloaders |
|
import numpy as np |
|
import torch.nn.functional as F |
|
|
|
|
|
trainloader, testloader = get_cifar10_dataloaders(batch_size, num_workers) |
|
|
|
|
|
trainset = trainloader.dataset |
|
num_poison = int(len(trainset) * poison_ratio) |
|
poison_indices = np.random.choice(len(trainset), num_poison, replace=False) |
|
|
|
|
|
original_targets = trainset.targets.copy() |
|
original_data = trainset.data.copy() |
|
|
|
|
|
trigger_pattern = np.ones((4, 4, 3), dtype=np.uint8) * 255 |
|
for idx in poison_indices: |
|
|
|
trainset.targets[idx] = target_label |
|
|
|
trainset.data[idx, -4:, -4:] = trigger_pattern |
|
|
|
|
|
poisoned_trainloader = torch.utils.data.DataLoader( |
|
trainset, batch_size=batch_size, shuffle=True, num_workers=num_workers) |
|
|
|
|
|
train_model(model, poisoned_trainloader, testloader, epochs, lr, device, save_dir, model_name,save_type='2') |
|
|
|
|
|
trainset.targets = original_targets |
|
trainset.data = original_data |
|
|
|
|
|
validation_loader = torch.utils.data.DataLoader( |
|
trainset, batch_size=batch_size, shuffle=False, num_workers=num_workers) |
|
|
|
|
|
model.eval() |
|
correct = 0 |
|
total = 0 |
|
with torch.no_grad(): |
|
for inputs, targets in validation_loader: |
|
inputs, targets = inputs.to(device), targets.to(device) |
|
outputs = model(inputs) |
|
_, predicted = outputs.max(1) |
|
total += targets.size(0) |
|
correct += predicted.eq(targets).sum().item() |
|
|
|
clean_accuracy = 100. * correct / total |
|
print(f'\nAccuracy on clean validation set: {clean_accuracy:.2f}%') |
|
|
|
|
|
trigger_validation = trainset.data.copy() |
|
trigger_validation_targets = np.array([target_label] * len(trainset)) |
|
|
|
trigger_validation[:, -4:, -4:] = trigger_pattern |
|
|
|
|
|
trigger_validation = torch.tensor(trigger_validation).float().permute(0, 3, 1, 2) / 255.0 |
|
trigger_validation = F.normalize(trigger_validation, |
|
mean=(0.4914, 0.4822, 0.4465), |
|
std=(0.2023, 0.1994, 0.2010)) |
|
|
|
|
|
correct = 0 |
|
total = 0 |
|
batch_size = 100 |
|
for i in range(0, len(trigger_validation), batch_size): |
|
inputs = trigger_validation[i:i+batch_size].to(device) |
|
targets = torch.tensor(trigger_validation_targets[i:i+batch_size]).to(device) |
|
outputs = model(inputs) |
|
_, predicted = outputs.max(1) |
|
total += targets.size(0) |
|
correct += predicted.eq(targets).sum().item() |
|
|
|
attack_success_rate = 100. * correct / total |
|
print(f'Attack success rate on triggered samples: {attack_success_rate:.2f}%') |