|
from transformers import GPT2LMHeadModel, GPT2Tokenizer |
|
|
|
|
|
model_name = "gpt2" |
|
tokenizer = GPT2Tokenizer.from_pretrained(model_name) |
|
model = GPT2LMHeadModel.from_pretrained(model_name) |
|
|
|
|
|
prompt = "Au début du 21ème siècle, les humains ont découvert une nouvelle technologie" |
|
|
|
|
|
inputs = tokenizer.encode(prompt, return_tensors='pt') |
|
|
|
|
|
output = model.generate( |
|
inputs, |
|
max_length=100, |
|
num_return_sequences=1, |
|
do_sample = True, |
|
temperature = 0.7, |
|
top_k = 50, |
|
top_p = 0.95 |
|
) |
|
|
|
|
|
generated_text = tokenizer.decode(output[0], skip_special_tokens=True) |
|
|
|
|
|
print(generated_text) |
|
|