|
``` |
|
model = AutoModelForCausalLM.from_pretrained( |
|
model_name, |
|
low_cpu_mem_usage=True, |
|
return_dict=True, |
|
torch_dtype=torch.float16, |
|
device_map=device_map, |
|
) |
|
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) |
|
prompt = "menulis esai tentang perekonomian indonesia" |
|
pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, do_sample=True, |
|
top_k=10, |
|
num_return_sequences=1, |
|
eos_token_id=tokenizer.eos_token_id, |
|
max_length=1028, ) |
|
result = pipe(f"<s>[INST] {prompt} [/INST]") |
|
print(result[0]['generated_text']) |
|
``` |