Spaces:
Sleeping
Sleeping
File size: 2,814 Bytes
46e0dd0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 |
from transformers import BertModel, BertTokenizer
from transformers import RobertaModel, RobertaTokenizer
from transformers import AutoModel, AutoTokenizer
from transformers import LukeTokenizer, LukeModel
def get_baseline_model(model_name):
if model_name == 'bert-base':
name_str = 'bert-base-uncased'
tokenizer = BertTokenizer.from_pretrained(name_str)
model = BertModel.from_pretrained(name_str)
elif model_name == 'bert-large':
name_str = 'bert-large-uncased'
tokenizer = BertTokenizer.from_pretrained(name_str)
model = BertModel.from_pretrained(name_str)
elif model_name == 'roberta-base':
name_str = 'roberta-base'
tokenizer = RobertaTokenizer.from_pretrained(name_str)
model = RobertaModel.from_pretrained(name_str)
elif model_name == 'roberta-large':
tokenizer = RobertaTokenizer.from_pretrained('roberta-large')
model = RobertaModel.from_pretrained('roberta-large')
elif model_name == 'spanbert-base':
tokenizer = AutoTokenizer.from_pretrained('SpanBERT/spanbert-base-cased')
model = AutoModel.from_pretrained('SpanBERT/spanbert-base-cased')
elif model_name == 'spanbert-large':
tokenizer = AutoTokenizer.from_pretrained('SpanBERT/spanbert-large-cased')
model = AutoModel.from_pretrained('SpanBERT/spanbert-large-cased')
elif model_name == 'luke-base':
tokenizer = LukeTokenizer.from_pretrained('studio-ousia/luke-base')
model = LukeModel.from_pretrained('studio-ousia/luke-base')
elif model_name == 'luke-large':
tokenizer = LukeTokenizer.from_pretrained('studio-ousia/luke-large')
model = LukeModel.from_pretrained('studio-ousia/luke-large')
elif model_name == 'simcse-bert-base':
name_str = 'princeton-nlp/unsup-simcse-bert-base-uncased'
tokenizer = AutoTokenizer.from_pretrained(name_str)
model = AutoModel.from_pretrained(name_str)
elif model_name == 'simcse-bert-large':
name_str = 'princeton-nlp/unsup-simcse-bert-large-uncased'
tokenizer = AutoTokenizer.from_pretrained(name_str)
model = AutoModel.from_pretrained(name_str)
elif model_name == 'simcse-roberta-base':
name_str = 'princeton-nlp/unsup-simcse-roberta-base'
tokenizer = AutoTokenizer.from_pretrained(name_str)
model = AutoModel.from_pretrained(name_str)
elif model_name == 'simcse-roberta-large':
name_str = 'princeton-nlp/unsup-simcse-roberta-large'
tokenizer = AutoTokenizer.from_pretrained(name_str)
model = AutoModel.from_pretrained(name_str)
else:
raise NotImplementedError
return model, tokenizer
|