legacy-datasets/wikipedia
Updated โข 122k โข 629
How to use akahana/roberta-base-indonesia with Transformers:
# Use a pipeline as a high-level helper
from transformers import pipeline
pipe = pipeline("feature-extraction", model="akahana/roberta-base-indonesia") # Load model directly
from transformers import AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained("akahana/roberta-base-indonesia")
model = AutoModel.from_pretrained("akahana/roberta-base-indonesia")from transformers import pipeline
pretrained_name = "akahana/roberta-base-indonesia"
fill_mask = pipeline(
"fill-mask",
model=pretrained_name,
tokenizer=pretrained_name
)
fill_mask("Gajah <mask> sedang makan di kebun binatang.")
from transformers import RobertaModel, RobertaTokenizerFast
pretrained_name = "akahana/roberta-base-indonesia"
model = RobertaModel.from_pretrained(pretrained_name)
tokenizer = RobertaTokenizerFast.from_pretrained(pretrained_name)
prompt = "Gajah <mask> sedang makan di kebun binatang."
encoded_input = tokenizer(prompt, return_tensors='pt')
output = model(**encoded_input)