from modeling_deltalm import DeltalmForConditionalGeneration
from configuration_deltalm import DeltalmConfig
from transformers AutoTokenizer
src_text = "i'm steve and<mask> 25 years old"
encoded_hi = tokenizer(src_text, return_tensors="pt")
generated_output = model.generate(**encoded_hi, forced_bos_token_id=tokenizer.bos_token_id, max_length=20, num_beams=1, return_dict_in_generate=True, return_dict=True, output_hidden_states=True)
text_output = tokenizer.batch_decode(generated_output.sequences, skip_special_tokens=True)
print(text_output)