|
import torch
|
|
import torch.nn as nn
|
|
import torch.optim as optim
|
|
from torch.utils.data import Dataset, DataLoader
|
|
from torchvision import transforms
|
|
import os
|
|
import numpy as np
|
|
import wandb
|
|
from PIL import Image
|
|
from models.resnet import resnet18, resnet34, resnet50
|
|
from models.openmax import OpenMax
|
|
|
|
from utils.data_stats import calculate_dataset_stats, load_dataset_stats
|
|
from utils.eval_utils import evaluate_known_classes, evaluate_openmax, evaluate_metamax
|
|
from pprint import pprint
|
|
import math
|
|
|
|
|
|
class GameDataset(Dataset):
|
|
def __init__(self, data_dir, num_labels=20, transform=None):
|
|
self.data_dir = data_dir
|
|
self.transform = transform
|
|
self.images = []
|
|
self.labels = []
|
|
self.image_paths = []
|
|
|
|
if not os.path.exists(data_dir):
|
|
raise ValueError(f"Data directory {data_dir} does not exist")
|
|
|
|
|
|
for class_dir in range(num_labels):
|
|
class_path = os.path.join(data_dir, f"{class_dir:02d}")
|
|
if os.path.exists(class_path):
|
|
for img_name in os.listdir(class_path):
|
|
if img_name.endswith('.png'):
|
|
img_path = os.path.join(class_path, img_name)
|
|
try:
|
|
|
|
img = np.array(Image.open(img_path))[:, :, :3]
|
|
if img.shape != (50, 50, 3):
|
|
print(f"Skipping {img_path} due to invalid shape: {img.shape}")
|
|
continue
|
|
|
|
self.images.append(img)
|
|
self.labels.append(class_dir)
|
|
self.image_paths.append(img_path)
|
|
except Exception as e:
|
|
print(f"Error loading {img_path}: {e}")
|
|
continue
|
|
|
|
self.images = np.array(self.images)
|
|
self.labels = np.array(self.labels)
|
|
print(f"Loaded {len(self.images)} images from {data_dir}")
|
|
|
|
def __len__(self):
|
|
return len(self.images)
|
|
|
|
def __getitem__(self, idx):
|
|
image = self.images[idx]
|
|
label = self.labels[idx]
|
|
path = self.image_paths[idx]
|
|
|
|
if self.transform:
|
|
image = self.transform(image)
|
|
|
|
return image, label, path
|
|
|
|
|
|
|
|
def train(num_epochs = 20, batch_size = 256, learning_rate = 0.001, dropout_rate = 0.3, patience = 10, model_type='resnet34'):
|
|
from post_train import collect_features
|
|
os.makedirs('models', exist_ok=True)
|
|
os.makedirs('wandb_logs', exist_ok=True)
|
|
images_path = os.path.join('jk_zfls', 'round0_train')
|
|
|
|
try:
|
|
mean, std = load_dataset_stats()
|
|
print("Loaded pre-calculated dataset statistics")
|
|
except FileNotFoundError:
|
|
print("FileNotFound, Calculating dataset statistics...")
|
|
mean, std = calculate_dataset_stats(images_path)
|
|
|
|
wandb.init(
|
|
project="jk_zfls",
|
|
name=f"{model_type}-training",
|
|
config={
|
|
"learning_rate": learning_rate,
|
|
"batch_size": batch_size,
|
|
"epochs": num_epochs,
|
|
"model": f"{model_type}",
|
|
"num_classes": 20
|
|
},
|
|
dir="./wandb_logs"
|
|
)
|
|
|
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
|
|
|
|
fill_value = tuple(int(x * 255) for x in mean)
|
|
|
|
|
|
transform = transforms.Compose([
|
|
transforms.ToTensor(),
|
|
transforms.RandomAffine(
|
|
degrees=15,
|
|
translate=(0.1, 0.1),
|
|
scale=(0.9, 1.1),
|
|
fill=fill_value
|
|
),
|
|
transforms.Normalize(mean=mean, std=std)
|
|
])
|
|
|
|
|
|
val_transform = transforms.Compose([
|
|
transforms.ToTensor(),
|
|
transforms.Normalize(mean=mean, std=std)
|
|
])
|
|
|
|
|
|
train_dataset = GameDataset('jk_zfls/round0_train', num_labels=20, transform=transform)
|
|
val_dataset = GameDataset('jk_zfls/round0_eval', num_labels=21, transform=val_transform)
|
|
|
|
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4, pin_memory=True)
|
|
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=4, pin_memory=True)
|
|
|
|
|
|
if model_type == 'resnet18':
|
|
model = resnet18(num_classes=20, dropout_rate=dropout_rate)
|
|
elif model_type == 'resnet34':
|
|
model = resnet34(num_classes=20, dropout_rate=dropout_rate)
|
|
elif model_type == 'resnet50':
|
|
model = resnet50(num_classes=20, dropout_rate=dropout_rate)
|
|
else:
|
|
raise ValueError(f"Unsupported model type: {model_type}")
|
|
|
|
|
|
|
|
|
|
model = model.to(device)
|
|
|
|
|
|
criterion = nn.CrossEntropyLoss()
|
|
optimizer = optim.Adam(model.parameters(), lr=learning_rate * 0.1, weight_decay=1e-3)
|
|
|
|
|
|
|
|
|
|
num_training_steps = len(train_loader) * num_epochs
|
|
num_warmup_steps = len(train_loader) * 5
|
|
|
|
|
|
warmup_scheduler = optim.lr_scheduler.LinearLR(
|
|
optimizer,
|
|
start_factor=0.1,
|
|
end_factor=1.0,
|
|
total_iters=num_warmup_steps
|
|
)
|
|
|
|
reduce_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
|
|
optimizer,
|
|
mode='max',
|
|
factor=0.5,
|
|
patience=5,
|
|
verbose=True,
|
|
min_lr=1e-6
|
|
)
|
|
|
|
patience_counter = 0
|
|
best_params = {
|
|
'epoch': None,
|
|
'model_state_dict': None,
|
|
'optimizer_state_dict': None,
|
|
'loss': None,
|
|
'best_val_acc': 0
|
|
}
|
|
for epoch in range(num_epochs):
|
|
|
|
model.train()
|
|
total_loss = 0
|
|
|
|
for batch_idx, (images, labels, paths) in enumerate(train_loader):
|
|
images, labels = images.to(device), labels.to(device)
|
|
|
|
optimizer.zero_grad()
|
|
logits = model(images)
|
|
loss = criterion(logits, labels)
|
|
loss.backward()
|
|
optimizer.step()
|
|
|
|
total_loss += loss.item()
|
|
|
|
if batch_idx % 10 == 0:
|
|
print(f'Epoch: {epoch}, Batch: {batch_idx}, Loss: {loss.item():.4f}')
|
|
|
|
|
|
if epoch * len(train_loader) + batch_idx < num_warmup_steps:
|
|
warmup_scheduler.step()
|
|
|
|
train_loss = total_loss / len(train_loader)
|
|
|
|
|
|
val_loss, val_acc, val_errors = evaluate_known_classes(model, val_loader, criterion, device)
|
|
|
|
|
|
wandb.log({
|
|
'epoch': epoch,
|
|
'train_loss': train_loss,
|
|
'val_loss': val_loss,
|
|
'val_accuracy': val_acc
|
|
})
|
|
|
|
print(f'Epoch {epoch}:')
|
|
print(f'Train Loss = {train_loss:.4f}, Val Loss = {val_loss:.4f}, Val Accuracy = {val_acc:.2f}%')
|
|
|
|
|
|
reduce_scheduler.step(val_acc)
|
|
|
|
|
|
current_lr = optimizer.param_groups[0]['lr']
|
|
print(f'Current learning rate: {current_lr:.2e}')
|
|
|
|
|
|
if val_acc > best_params['best_val_acc']:
|
|
patience_counter = 0
|
|
best_params.update({
|
|
'epoch': epoch,
|
|
'model_state_dict': model.state_dict(),
|
|
'optimizer_state_dict': optimizer.state_dict(),
|
|
'loss': val_loss,
|
|
'best_val_acc': val_acc
|
|
})
|
|
else:
|
|
patience_counter += 1
|
|
print(f'Validation accuracy did not improve. Patience: {patience_counter}/{patience}')
|
|
|
|
|
|
if patience_counter >= patience:
|
|
print(f"\nEarly stopping triggered! No improvement for {patience} consecutive epochs.")
|
|
break
|
|
|
|
if val_acc == 100:
|
|
print(f'Achieved 100% accuracy at epoch {epoch}')
|
|
break
|
|
|
|
|
|
|
|
print("Saving best model parameters...")
|
|
torch.save(best_params, f'models/{model_type}_{best_params["best_val_acc"]:.2f}.pth')
|
|
|
|
|
|
print("Collecting features from best model for OpenMax/MetaMax training...")
|
|
model.load_state_dict(best_params['model_state_dict'])
|
|
model.eval()
|
|
features, labels = collect_features(model, train_loader, device, return_logits=False)
|
|
|
|
|
|
openmax = OpenMax(num_classes=20)
|
|
openmax.fit(features, labels)
|
|
|
|
|
|
|
|
|
|
|
|
torch.save(openmax, 'models/openmax.pth')
|
|
|
|
print("OpenMax and MetaMax models saved")
|
|
|
|
print("Evaluating OpenMax and MetaMax...")
|
|
val_features, val_logits, val_labels = collect_features(model, val_loader, device, return_logits=True)
|
|
|
|
overall_acc, known_acc, unknown_acc = evaluate_openmax(openmax, val_features, val_logits, val_labels, multiplier=0.5)
|
|
print(f"Multiplier: 0.5, Overall Accuracy: {overall_acc:.2f}%")
|
|
|
|
wandb.finish()
|
|
|
|
if __name__ == '__main__':
|
|
train(num_epochs=100, batch_size=64, learning_rate=0.001, dropout_rate=0.3, patience=20, model_type='resnet50')
|
|
|