from transformers import pipeline, AlbertTokenizer, AlbertForMaskedLM
model = AlbertForMaskedLM.from_pretrained('josu/albert-pt-br')
tokenizer = AlbertTokenizer.from_pretrained('josu/albert-pt-br')
unmasker = pipeline('fill-mask', model=model, tokenizer=tokenizer ,device=0)
text = 'Marte está no [MASK] solar.'
unmasker(text)
[{'score': 0.7004144191741943,
'token': 244,
'token_str': 'sistema',
'sequence': 'marte esta no sistema solar.'},
{'score': 0.02539917267858982,
'token': 4077,
'token_str': 'solar',
'sequence': 'marte esta no solar solar.'},
{'score': 0.020301498472690582,
'token': 49,
'token_str': 'seu',
'sequence': 'marte esta no seu solar.'},
{'score': 0.01753508299589157,
'token': 482,
'token_str': 'centro',
'sequence': 'marte esta no centro solar.'},
{'score': 0.013344300910830498,
'token': 1401,
'token_str': 'plano',
'sequence': 'marte esta no plano solar.'}]
- Downloads last month
- 18
This model does not have enough activity to be deployed to Inference API (serverless) yet. Increase its social
visibility and check back later, or deploy to Inference Endpoints (dedicated)
instead.