TimeLMs
Collection
Language Models specialised on social media, trained on different time periods.
•
15 items
•
Updated
This is a RoBERTa-base model trained on 128.06M tweets until the end of March 2022. More details and performance scores are available in the TimeLMs paper.
Below, we provide some usage examples using the standard Transformers interface. For another interface more suited to comparing predictions and perplexity scores between models trained at different temporal intervals, check the TimeLMs repository.
For other models trained until different periods, check this table.
Replace usernames and links for placeholders: "@user" and "http". If you're interested in retaining verified users which were also retained during training, you may keep the users listed here.
def preprocess(text):
preprocessed_text = []
for t in text.split(): # expects whitespace tokenization
if len(t) > 1:
t = '@user' if t[0] == '@' and t.count('@') == 1 else t
t = 'http' if t.startswith('http') else t
preprocessed_text.append(t)
return ' '.join(preprocessed_text)
from transformers import pipeline, AutoTokenizer
MODEL = "cardiffnlp/twitter-roberta-base-mar2022"
fill_mask = pipeline("fill-mask", model=MODEL, tokenizer=MODEL)
tokenizer = AutoTokenizer.from_pretrained(MODEL)
def pprint(candidates, n):
for i in range(n):
token = tokenizer.decode(candidates[i]['token'])
score = candidates[i]['score']
print("%d) %.5f %s" % (i+1, score, token))
texts = [
"So glad I'm <mask> vaccinated.",
"I keep forgetting to bring a <mask>.",
"Looking forward to watching <mask> Game tonight!",
]
for text in texts:
t = preprocess(text)
print(f"{'-'*30}\n{t}")
candidates = fill_mask(t)
pprint(candidates, 5)
Output:
------------------------------
So glad I'm <mask> vaccinated.
1) 0.34390 fully
2) 0.28177 not
3) 0.16473 getting
4) 0.04932 still
5) 0.01754 double
------------------------------
I keep forgetting to bring a <mask>.
1) 0.05391 book
2) 0.04560 mask
3) 0.03456 pen
4) 0.03251 lighter
5) 0.03098 charger
------------------------------
Looking forward to watching <mask> Game tonight!
1) 0.60744 the
2) 0.15224 The
3) 0.02575 this
4) 0.01450 End
5) 0.01035 Championship
from transformers import AutoTokenizer, AutoModel, TFAutoModel
import numpy as np
from scipy.spatial.distance import cosine
from collections import Counter
def get_embedding(text): # naive approach for demonstration
text = preprocess(text)
encoded_input = tokenizer(text, return_tensors='pt')
features = model(**encoded_input)
features = features[0].detach().cpu().numpy()
return np.mean(features[0], axis=0)
MODEL = "cardiffnlp/twitter-roberta-base-mar2022"
tokenizer = AutoTokenizer.from_pretrained(MODEL)
model = AutoModel.from_pretrained(MODEL)
query = "The book was awesome"
tweets = ["I just ordered fried chicken 🐣",
"The movie was great",
"What time is the next game?",
"Just finished reading 'Embeddings in NLP'"]
sims = Counter()
for tweet in tweets:
sim = 1 - cosine(get_embedding(query), get_embedding(tweet))
sims[tweet] = sim
print('Most similar to: ', query)
print(f"{'-'*30}")
for idx, (tweet, sim) in enumerate(sims.most_common()):
print("%d) %.5f %s" % (idx+1, sim, tweet))
Output:
Most similar to: The book was awesome
------------------------------
1) 0.98985 The movie was great
2) 0.96122 Just finished reading 'Embeddings in NLP'
3) 0.95733 I just ordered fried chicken 🐣
4) 0.93271 What time is the next game?
from transformers import AutoTokenizer, AutoModel, TFAutoModel
import numpy as np
MODEL = "cardiffnlp/twitter-roberta-base-mar2022"
tokenizer = AutoTokenizer.from_pretrained(MODEL)
text = "Good night 😊"
text = preprocess(text)
# Pytorch
model = AutoModel.from_pretrained(MODEL)
encoded_input = tokenizer(text, return_tensors='pt')
features = model(**encoded_input)
features = features[0].detach().cpu().numpy()
features_mean = np.mean(features[0], axis=0)
#features_max = np.max(features[0], axis=0)
# # Tensorflow
# model = TFAutoModel.from_pretrained(MODEL)
# encoded_input = tokenizer(text, return_tensors='tf')
# features = model(encoded_input)
# features = features[0].numpy()
# features_mean = np.mean(features[0], axis=0)
# #features_max = np.max(features[0], axis=0)