lrl-modelcloud commited on
Commit
d776069
1 Parent(s): 5b3a0a8

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +1 -1
README.md CHANGED
@@ -37,6 +37,6 @@ model = GPTQModel.from_quantized(
37
  )
38
 
39
  inputs = tokenizer.apply_chat_template(prompt, tokenize=False, add_generation_prompt=True)
40
- outputs = model.generate(prompts=inputs,)
41
  print(outputs[0].outputs[0].text)
42
  ```
 
37
  )
38
 
39
  inputs = tokenizer.apply_chat_template(prompt, tokenize=False, add_generation_prompt=True)
40
+ outputs = model.generate(prompts=inputs, temperature=0.95, max_length=128)
41
  print(outputs[0].outputs[0].text)
42
  ```