vilarin commited on
Commit
d62aad8
·
verified ·
1 Parent(s): 3b966e3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -4
app.py CHANGED
@@ -56,8 +56,8 @@ model = AutoModelForCausalLM.from_pretrained(
56
  quantization_config=quantization_config).eval().to(device)
57
 
58
  # Ensure `pad_token_id` is set
59
- if tokenizer.pad_token_id is None:
60
- tokenizer.pad_token_id = tokenizer.eos_token_id
61
 
62
  @spaces.GPU()
63
  def stream_chat(
@@ -91,8 +91,6 @@ def stream_chat(
91
  max_new_tokens = max_new_tokens,
92
  do_sample = False if temperature == 0 else True,
93
  top_p = top_p,
94
- eos_token_id = tokenizer.eos_token_id,
95
- pad_token_id = tokenizer.pad_token_id,
96
  temperature = temperature,
97
  streamer=streamer,
98
  )
 
56
  quantization_config=quantization_config).eval().to(device)
57
 
58
  # Ensure `pad_token_id` is set
59
+ # if tokenizer.pad_token_id is None:
60
+ # tokenizer.pad_token_id = tokenizer.eos_token_id
61
 
62
  @spaces.GPU()
63
  def stream_chat(
 
91
  max_new_tokens = max_new_tokens,
92
  do_sample = False if temperature == 0 else True,
93
  top_p = top_p,
 
 
94
  temperature = temperature,
95
  streamer=streamer,
96
  )