|
|
|
|
|
import argparse
|
|
import math
|
|
import os
|
|
|
|
import torch
|
|
import torch.nn as nn
|
|
import torch.nn.functional as F
|
|
import torch.optim as optim
|
|
from torch.utils.data import DataLoader
|
|
|
|
from torch.optim.lr_scheduler import CosineAnnealingLR
|
|
from torch.amp import autocast, GradScaler
|
|
from datasets import load_dataset
|
|
from transformers import AutoTokenizer
|
|
|
|
|
|
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
|
|
|
|
|
def parse_args():
|
|
parser = argparse.ArgumentParser(description='Train Transformer model with advanced features.')
|
|
parser.add_argument('--model_name', type=str, default='gpt2', help='Pretrained model name or path')
|
|
parser.add_argument('--dataset_name', type=str, default='wikitext', help='Dataset name from HuggingFace Datasets')
|
|
parser.add_argument('--dataset_config', type=str, default='wikitext-2-raw-v1', help='Dataset configuration name')
|
|
parser.add_argument('--batch_size', type=int, default=8, help='Batch size')
|
|
parser.add_argument('--num_epochs', type=int, default=3, help='Number of epochs')
|
|
parser.add_argument('--max_length', type=int, default=128, help='Maximum sequence length')
|
|
parser.add_argument('--accumulation_steps', type=int, default=4, help='Gradient accumulation steps')
|
|
parser.add_argument('--learning_rate', type=float, default=1e-4, help='Learning rate')
|
|
parser.add_argument('--weight_decay', type=float, default=1e-2, help='Weight decay')
|
|
parser.add_argument('--alpha', type=float, default=0.1, help='Entropy regularization weight')
|
|
parser.add_argument('--beta', type=float, default=0.1, help='Variance regularization weight')
|
|
parser.add_argument('--max_grad_norm', type=float, default=1.0, help='Max gradient norm for clipping')
|
|
parser.add_argument('--save_dir', type=str, default='./models', help='Directory to save the models')
|
|
parser.add_argument('--temperature', type=float, default=1.0, help='Temperature parameter for entropy and variance')
|
|
args = parser.parse_args()
|
|
return args
|
|
|
|
|
|
def load_data(args, tokenizer):
|
|
|
|
dataset = load_dataset(args.dataset_name, args.dataset_config)
|
|
|
|
|
|
if tokenizer.pad_token is None:
|
|
tokenizer.pad_token = tokenizer.eos_token
|
|
|
|
def tokenize_function(examples):
|
|
return tokenizer(examples['text'], truncation=True, max_length=args.max_length)
|
|
|
|
tokenized_datasets = dataset.map(
|
|
tokenize_function,
|
|
batched=True,
|
|
num_proc=4,
|
|
remove_columns=dataset['train'].column_names,
|
|
)
|
|
|
|
|
|
block_size = args.max_length
|
|
|
|
def group_texts(examples):
|
|
|
|
concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
|
|
total_length = len(concatenated_examples['input_ids'])
|
|
|
|
total_length = (total_length // block_size) * block_size
|
|
|
|
result = {
|
|
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
|
|
for k, t in concatenated_examples.items()
|
|
}
|
|
result['labels'] = result['input_ids'].copy()
|
|
return result
|
|
|
|
lm_datasets = tokenized_datasets.map(
|
|
group_texts,
|
|
batched=True,
|
|
num_proc=4,
|
|
)
|
|
|
|
|
|
train_dataset = lm_datasets['train']
|
|
eval_dataset = lm_datasets['validation'] if 'validation' in lm_datasets else lm_datasets['test']
|
|
|
|
data_collator = lambda data: {
|
|
'input_ids': torch.tensor([f['input_ids'] for f in data], dtype=torch.long),
|
|
'labels': torch.tensor([f['labels'] for f in data], dtype=torch.long)
|
|
}
|
|
|
|
train_loader = DataLoader(train_dataset, shuffle=True, batch_size=args.batch_size, collate_fn=data_collator)
|
|
eval_loader = DataLoader(eval_dataset, shuffle=False, batch_size=args.batch_size, collate_fn=data_collator)
|
|
|
|
return train_loader, eval_loader
|
|
|
|
|
|
class RotaryPositionalEncoding(nn.Module):
|
|
def __init__(self, d_model):
|
|
super(RotaryPositionalEncoding, self).__init__()
|
|
inv_freq = 1.0 / (10000 ** (torch.arange(0, d_model, 2).float() / d_model))
|
|
self.register_buffer('inv_freq', inv_freq)
|
|
|
|
def forward(self, x):
|
|
seq_len, batch_size, _ = x.size()
|
|
t = torch.arange(seq_len, device=x.device).type_as(self.inv_freq)
|
|
sinusoid_inp = torch.einsum("i,j->ij", t, self.inv_freq)
|
|
sin = sinusoid_inp.sin().unsqueeze(1)
|
|
cos = sinusoid_inp.cos().unsqueeze(1)
|
|
|
|
x1 = x[..., 0::2]
|
|
x2 = x[..., 1::2]
|
|
|
|
|
|
x_rotated = torch.zeros_like(x)
|
|
x_rotated[..., 0::2] = x1 * cos - x2 * sin
|
|
x_rotated[..., 1::2] = x1 * sin + x2 * cos
|
|
|
|
return x_rotated
|
|
|
|
|
|
class MultiHeadAttention(nn.Module):
|
|
def __init__(self, d_model, num_heads):
|
|
super(MultiHeadAttention, self).__init__()
|
|
assert d_model % num_heads == 0, "d_model must be divisible by num_heads"
|
|
self.d_k = d_model // num_heads
|
|
self.num_heads = num_heads
|
|
self.linear_q = nn.Linear(d_model, d_model)
|
|
self.linear_k = nn.Linear(d_model, d_model)
|
|
self.linear_v = nn.Linear(d_model, d_model)
|
|
self.linear_out = nn.Linear(d_model, d_model)
|
|
|
|
def forward(self, query, key, value, mask=None):
|
|
batch_size = query.size(0)
|
|
query = self.linear_q(query).view(batch_size, -1, self.num_heads, self.d_k).transpose(1, 2)
|
|
key = self.linear_k(key).view(batch_size, -1, self.num_heads, self.d_k).transpose(1, 2)
|
|
value = self.linear_v(value).view(batch_size, -1, self.num_heads, self.d_k).transpose(1, 2)
|
|
|
|
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self.d_k)
|
|
if mask is not None:
|
|
scores = scores.masked_fill(mask == 0, -1e9)
|
|
attn = F.softmax(scores, dim=-1)
|
|
output = torch.matmul(attn, value)
|
|
|
|
output = output.transpose(1, 2).contiguous().view(batch_size, -1, self.num_heads * self.d_k)
|
|
return self.linear_out(output)
|
|
|
|
|
|
class MoE(nn.Module):
|
|
def __init__(self, d_model, num_experts, d_ff, top_k=2, dropout=0.1):
|
|
super(MoE, self).__init__()
|
|
self.num_experts = num_experts
|
|
self.top_k = top_k
|
|
self.experts = nn.ModuleList([
|
|
nn.Sequential(
|
|
nn.Linear(d_model, d_ff),
|
|
nn.GELU() if i % 2 == 0 else nn.SiLU(),
|
|
nn.Linear(d_ff, d_model)
|
|
)
|
|
for i in range(num_experts)
|
|
])
|
|
self.gate = nn.Linear(d_model, num_experts)
|
|
self.dropout = nn.Dropout(dropout)
|
|
|
|
def forward(self, x):
|
|
batch_size, seq_len, d_model = x.size()
|
|
|
|
gate_scores = self.gate(x)
|
|
top_k_scores, top_k_indices = torch.topk(gate_scores, self.top_k, dim=-1)
|
|
top_k_scores = F.softmax(top_k_scores, dim=-1)
|
|
|
|
|
|
output = torch.zeros_like(x)
|
|
|
|
|
|
x_flat = x.view(-1, d_model)
|
|
output_flat = output.view(-1, d_model)
|
|
top_k_indices_flat = top_k_indices.view(-1, self.top_k)
|
|
top_k_scores_flat = top_k_scores.view(-1, self.top_k)
|
|
|
|
for k in range(self.top_k):
|
|
expert_idx_flat = top_k_indices_flat[:, k]
|
|
expert_scores_flat = top_k_scores_flat[:, k]
|
|
for e in range(self.num_experts):
|
|
mask = (expert_idx_flat == e)
|
|
if mask.any():
|
|
x_masked = x_flat[mask]
|
|
expert_output = self.experts[e](x_masked)
|
|
output_flat[mask] += expert_scores_flat[mask].unsqueeze(-1) * expert_output
|
|
|
|
output = output_flat.view(batch_size, seq_len, d_model)
|
|
return self.dropout(output)
|
|
|
|
|
|
class TransformerBlock(nn.Module):
|
|
def __init__(self, d_model, num_heads, d_ff, num_experts, dropout=0.1, top_k=2):
|
|
super(TransformerBlock, self).__init__()
|
|
self.self_attention = MultiHeadAttention(d_model, num_heads)
|
|
self.norm1 = nn.LayerNorm(d_model)
|
|
self.cross_attention = MultiHeadAttention(d_model, num_heads)
|
|
self.norm2 = nn.LayerNorm(d_model)
|
|
self.moe = MoE(d_model, num_experts, d_ff, top_k, dropout)
|
|
self.norm3 = nn.LayerNorm(d_model)
|
|
|
|
def forward(self, x, mask=None, enc_output=None, enc_mask=None):
|
|
|
|
attn_output = self.self_attention(x, x, x, mask)
|
|
x = self.norm1(x + attn_output)
|
|
|
|
if enc_output is not None:
|
|
cross_attn_output = self.cross_attention(x, enc_output, enc_output, enc_mask)
|
|
x = self.norm2(x + cross_attn_output)
|
|
|
|
moe_output = self.moe(x)
|
|
return self.norm3(x + moe_output)
|
|
|
|
|
|
class Transformer(nn.Module):
|
|
def __init__(self, input_dim, d_model, num_heads, num_layers, d_ff, num_experts, output_dim, dropout=0.1, top_k=2):
|
|
super(Transformer, self).__init__()
|
|
self.embedding = nn.Embedding(input_dim, d_model, padding_idx=input_dim - 1)
|
|
self.rotary_positional_encoding = RotaryPositionalEncoding(d_model)
|
|
self.encoder_layers = nn.ModuleList(
|
|
[TransformerBlock(d_model, num_heads, d_ff, num_experts, dropout, top_k) for _ in range(num_layers)]
|
|
)
|
|
self.decoder_layers = nn.ModuleList(
|
|
[TransformerBlock(d_model, num_heads, d_ff, num_experts, dropout, top_k) for _ in range(num_layers)]
|
|
)
|
|
self.output_layer = nn.Linear(d_model, output_dim)
|
|
self.d_model = d_model
|
|
|
|
def forward(self, src, tgt, src_mask=None, tgt_mask=None):
|
|
|
|
src = self.embedding(src) * math.sqrt(self.d_model)
|
|
src = src.transpose(0, 1)
|
|
src = self.rotary_positional_encoding(src)
|
|
src = src.transpose(0, 1)
|
|
for layer in self.encoder_layers:
|
|
src = layer(src, src_mask)
|
|
|
|
|
|
tgt = self.embedding(tgt) * math.sqrt(self.d_model)
|
|
tgt = tgt.transpose(0, 1)
|
|
tgt = self.rotary_positional_encoding(tgt)
|
|
tgt = tgt.transpose(0, 1)
|
|
for layer in self.decoder_layers:
|
|
tgt = layer(tgt, tgt_mask, src, src_mask)
|
|
output = self.output_layer(tgt)
|
|
return output
|
|
|
|
def generate(self, src, tokenizer, max_length=20, temperature=1.0):
|
|
"""
|
|
Generate sequences using differentiable sampling (Gumbel-Softmax).
|
|
|
|
Args:
|
|
src (torch.Tensor): Source input tensor of shape (batch_size, seq_len)
|
|
tokenizer (transformers.PreTrainedTokenizer): Tokenizer to access special tokens
|
|
max_length (int): Maximum length of the generated sequence
|
|
temperature (float): Temperature parameter for Gumbel-Softmax
|
|
|
|
Returns:
|
|
torch.Tensor: Generated sequences of shape (batch_size, max_length)
|
|
torch.Tensor: Entropy values for each time step
|
|
torch.Tensor: Variance values for each time step
|
|
"""
|
|
batch_size = src.size(0)
|
|
|
|
|
|
src_enc = self.embedding(src) * math.sqrt(self.d_model)
|
|
src_enc = src_enc.transpose(0, 1)
|
|
src_enc = self.rotary_positional_encoding(src_enc)
|
|
src_enc = src_enc.transpose(0, 1)
|
|
for layer in self.encoder_layers:
|
|
src_enc = layer(src_enc)
|
|
|
|
|
|
tgt_seq = torch.full((batch_size, 1), tokenizer.bos_token_id, dtype=torch.long, device=src.device)
|
|
entropies = []
|
|
variances = []
|
|
|
|
for _ in range(max_length):
|
|
tgt_emb = self.embedding(tgt_seq) * math.sqrt(self.d_model)
|
|
tgt_emb = tgt_emb.transpose(0, 1)
|
|
tgt_emb = self.rotary_positional_encoding(tgt_emb)
|
|
tgt_emb = tgt_emb.transpose(0, 1)
|
|
tgt_dec = tgt_emb
|
|
for layer in self.decoder_layers:
|
|
tgt_dec = layer(tgt_dec, None, src_enc, None)
|
|
output = self.output_layer(tgt_dec)
|
|
logits = output[:, -1, :]
|
|
|
|
|
|
probs = F.softmax(logits / temperature, dim=-1)
|
|
|
|
|
|
entropy = -torch.sum(probs * torch.log(probs + 1e-9), dim=-1)
|
|
entropies.append(entropy)
|
|
|
|
|
|
gumbel_noise = -torch.log(-torch.log(torch.rand_like(probs) + 1e-9) + 1e-9)
|
|
y = (logits + gumbel_noise) / temperature
|
|
y = F.softmax(y, dim=-1)
|
|
|
|
|
|
variance = torch.var(y, dim=-1)
|
|
variances.append(variance)
|
|
|
|
|
|
next_tokens = torch.argmax(y, dim=-1, keepdim=True)
|
|
tgt_seq = torch.cat([tgt_seq, next_tokens], dim=1)
|
|
|
|
|
|
entropies = torch.stack(entropies, dim=1)
|
|
variances = torch.stack(variances, dim=1)
|
|
|
|
return tgt_seq[:, 1:], entropies, variances
|
|
|
|
|
|
def compute_loss(output, target, padding_idx, alpha=0.1, beta=0.1, temperature=1.0):
|
|
"""
|
|
Compute the loss with entropy and variance regularization.
|
|
|
|
Args:
|
|
output (torch.Tensor): Model output logits of shape (batch_size, seq_len, vocab_size)
|
|
target (torch.Tensor): Target sequences of shape (batch_size, seq_len)
|
|
padding_idx (int): Padding index to ignore in the loss
|
|
alpha (float): Weight for the entropy regularization term
|
|
beta (float): Weight for the variance regularization term
|
|
temperature (float): Temperature parameter for computing probabilities
|
|
|
|
Returns:
|
|
torch.Tensor: Scalar loss value
|
|
"""
|
|
|
|
output_flat = output.contiguous().view(-1, output.size(-1))
|
|
target_flat = target.contiguous().view(-1)
|
|
ce_loss = F.cross_entropy(
|
|
output_flat,
|
|
target_flat,
|
|
ignore_index=padding_idx
|
|
)
|
|
|
|
|
|
probs = F.softmax(output / temperature, dim=-1)
|
|
|
|
|
|
entropy = -torch.sum(probs * torch.log(probs + 1e-9), dim=-1)
|
|
entropy_loss = -alpha * torch.mean(entropy)
|
|
|
|
|
|
variance = torch.var(probs, dim=-1)
|
|
variance_loss = -beta * torch.mean(variance)
|
|
|
|
|
|
total_loss = ce_loss + entropy_loss + variance_loss
|
|
return total_loss
|
|
|
|
|
|
def train_epoch(model, train_loader, optimizer, scheduler, scaler, args, padding_idx):
|
|
model.train()
|
|
total_loss = 0.0
|
|
optimizer.zero_grad()
|
|
print(f"Starting training epoch with {len(train_loader)} batches...")
|
|
for i, batch in enumerate(train_loader):
|
|
print(f"Processing batch {i+1}/{len(train_loader)}...")
|
|
src_batch = batch['input_ids'].to(device)
|
|
tgt_batch = batch['labels'].to(device)
|
|
|
|
with autocast(device_type='cuda'):
|
|
print("Forward pass...")
|
|
output = model(src_batch, tgt_batch[:, :-1])
|
|
print("Computing loss...")
|
|
loss = compute_loss(
|
|
output,
|
|
tgt_batch[:, 1:],
|
|
padding_idx,
|
|
alpha=args.alpha,
|
|
beta=args.beta,
|
|
temperature=args.temperature
|
|
)
|
|
loss = loss / args.accumulation_steps
|
|
|
|
print("Backward pass...")
|
|
scaler.scale(loss).backward()
|
|
|
|
if (i + 1) % args.accumulation_steps == 0:
|
|
print("Gradient clipping...")
|
|
scaler.unscale_(optimizer)
|
|
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
|
|
|
|
print("Optimizer step...")
|
|
scaler.step(optimizer)
|
|
scaler.update()
|
|
|
|
print("Zeroing gradients...")
|
|
optimizer.zero_grad()
|
|
|
|
print("Updating learning rate...")
|
|
scheduler.step()
|
|
|
|
total_loss += loss.item() * args.accumulation_steps
|
|
print(f"Batch {i+1} completed. Current loss: {loss.item():.4f}")
|
|
|
|
avg_loss = total_loss / len(train_loader)
|
|
print(f"Epoch completed. Average loss: {avg_loss:.4f}")
|
|
return avg_loss
|
|
|
|
|
|
def evaluate(model, eval_loader, args, padding_idx):
|
|
model.eval()
|
|
total_loss = 0.0
|
|
with torch.no_grad():
|
|
for batch in eval_loader:
|
|
src_batch = batch['input_ids'].to(device)
|
|
tgt_batch = batch['labels'].to(device)
|
|
|
|
with autocast(device_type='cuda'):
|
|
|
|
output = model(src_batch, tgt_batch[:, :-1])
|
|
|
|
loss = compute_loss(
|
|
output,
|
|
tgt_batch[:, 1:],
|
|
padding_idx,
|
|
alpha=args.alpha,
|
|
beta=args.beta,
|
|
temperature=args.temperature
|
|
)
|
|
|
|
total_loss += loss.item()
|
|
|
|
avg_loss = total_loss / len(eval_loader)
|
|
return avg_loss
|
|
|
|
|
|
def main():
|
|
args = parse_args()
|
|
print("Arguments parsed successfully.")
|
|
|
|
|
|
if not os.path.exists(args.save_dir):
|
|
os.makedirs(args.save_dir)
|
|
print(f"Save directory created: {args.save_dir}")
|
|
|
|
|
|
print("Loading tokenizer...")
|
|
tokenizer = AutoTokenizer.from_pretrained(args.model_name)
|
|
if tokenizer.pad_token is None:
|
|
tokenizer.pad_token = tokenizer.eos_token
|
|
print("Tokenizer loaded successfully.")
|
|
|
|
|
|
print("Loading and preprocessing data...")
|
|
train_loader, eval_loader = load_data(args, tokenizer)
|
|
print("Data loaded and preprocessed successfully.")
|
|
|
|
|
|
input_dim = len(tokenizer)
|
|
d_model = 512
|
|
num_heads = 8
|
|
num_layers = 6
|
|
d_ff = 2048
|
|
num_experts = 4
|
|
output_dim = input_dim
|
|
dropout = 0.1
|
|
top_k = 2
|
|
|
|
print("Initializing model...")
|
|
model = Transformer(
|
|
input_dim, d_model, num_heads, num_layers, d_ff, num_experts, output_dim, dropout, top_k
|
|
)
|
|
model = model.to(device)
|
|
print(f"Model initialized and moved to device: {device}")
|
|
|
|
padding_idx = tokenizer.pad_token_id
|
|
|
|
print("Setting up optimizer and scheduler...")
|
|
optimizer = optim.AdamW(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)
|
|
scheduler = CosineAnnealingLR(optimizer, T_max=args.num_epochs)
|
|
scaler = GradScaler()
|
|
print("Optimizer and scheduler set up successfully.")
|
|
|
|
print("Starting training loop...")
|
|
for epoch in range(args.num_epochs):
|
|
print(f"Epoch {epoch + 1}/{args.num_epochs} started.")
|
|
avg_train_loss = train_epoch(
|
|
model,
|
|
train_loader,
|
|
optimizer,
|
|
scheduler,
|
|
scaler,
|
|
args,
|
|
padding_idx
|
|
)
|
|
print(f"Epoch {epoch + 1}/{args.num_epochs} training completed.")
|
|
|
|
print(f"Starting evaluation for epoch {epoch + 1}...")
|
|
avg_eval_loss = evaluate(model, eval_loader, args, padding_idx)
|
|
print(f"Evaluation for epoch {epoch + 1} completed.")
|
|
|
|
print(f"Epoch {epoch + 1}/{args.num_epochs}, Train Loss: {avg_train_loss:.4f}, Eval Loss: {avg_eval_loss:.4f}")
|
|
|
|
model_save_path = os.path.join(args.save_dir, f"model_epoch_{epoch + 1}.pt")
|
|
torch.save(model.state_dict(), model_save_path)
|
|
print(f"Model saved for epoch {epoch + 1}")
|
|
|
|
print("Training completed.")
|
|
|
|
|
|
if __name__ == '__main__':
|
|
main()
|
|
|
|
|
|
'''
|
|
Example usage:
|
|
python lightbulb.py --model_name gpt2 --dataset_name wikitext --dataset_config wikitext-2-raw-v1 --batch_size 8 --num_epochs 3
|
|
'''
|
|
|