Lin-K76 commited on
Commit
aace9b8
1 Parent(s): 6c0fcdd

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +1 -1
README.md CHANGED
@@ -59,7 +59,7 @@ messages = [
59
 
60
  prompts = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
61
 
62
- llm = LLM(model=model_id, tensor_parallel_size=number_gpus)
63
 
64
  outputs = llm.generate(prompts, sampling_params)
65
 
 
59
 
60
  prompts = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
61
 
62
+ llm = LLM(model=model_id, tensor_parallel_size=number_gpus, max_model_len=4096)
63
 
64
  outputs = llm.generate(prompts, sampling_params)
65