JacopoAbate commited on
Commit
2efc89f
1 Parent(s): 8549217

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +12 -3
README.md CHANGED
@@ -59,9 +59,18 @@ encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt")
59
  model_inputs = encodeds.to(device)
60
  model.to(device)
61
 
62
- generated_ids = model.generate(model_inputs, max_new_tokens=256, do_sample=True)
63
- decoded = tokenizer.batch_decode(generated_ids)
64
- print(decoded[0])
 
 
 
 
 
 
 
 
 
65
  ```
66
 
67
  ## Bias, Risks and Limitations
 
59
  model_inputs = encodeds.to(device)
60
  model.to(device)
61
 
62
+ generated_ids = model.generate(
63
+ model_inputs, # The input to the model
64
+ max_new_tokens=128, # Limiting the maximum number of new tokens generated
65
+ do_sample=True, # Enabling sampling to introduce randomness in the generation
66
+ temperature=0.1, # Setting temperature to control the randomness, lower values make it more deterministic
67
+ top_p=0.95, # Using nucleus sampling with top-p filtering for more coherent generation
68
+ eos_token_id=tokenizer.eos_token_id # Specifying the token that indicates the end of a sequence
69
+ )
70
+
71
+ decoded_output = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
72
+ trimmed_output = decoded_output.strip()
73
+ print(decoded_output)
74
  ```
75
 
76
  ## Bias, Risks and Limitations