import torch import torch.nn as nn from transformers import PreTrainedModel, AutoConfig class CustomModel(PreTrainedModel): config_class = AutoConfig # Use AutoConfig to dynamically load the configuration class def __init__(self, config): super().__init__(config) # Implement your model architecture here self.classifier = nn.Linear(config.hidden_size, config.num_labels) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *args, **kwargs): try: # Load the configuration config = cls.config_class.from_pretrained(pretrained_model_name_or_path, *args, **kwargs) # Initialize the model with the configuration model = cls(config) # Load the model weights using the transformers library state_dict = torch.load(f"{pretrained_model_name_or_path}/pytorch_model.bin", map_location="cpu") model.load_state_dict(state_dict) return model except Exception as e: print(f"Failed to load model from {pretrained_model_name_or_path}. Error: {e}") return None