Mikhil-jivus commited on
Commit
a9049fb
1 Parent(s): d39790e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -22,10 +22,10 @@ For more details, please check [our post](https://huggingface.co/blog/llama32).
22
  access_token = os.getenv('HF_TOKEN')
23
  # Download the Base model
24
  #model_id = "./models/Llama-32-3B-Instruct"
25
- model_id = "meta-llama/Llama-3.1-8B-Instruct"
26
- MAX_MAX_NEW_TOKENS = 2048
27
- DEFAULT_MAX_NEW_TOKENS = 1024
28
- MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
29
 
30
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
31
 
@@ -81,7 +81,7 @@ def generate(
81
 
82
 
83
 
84
- streamer = TextIteratorStreamer(tokenizer, timeout=20.0, skip_prompt=True, skip_special_tokens=True)
85
  generate_kwargs = dict(
86
  input_ids=input_ids,
87
  streamer=streamer,
 
22
  access_token = os.getenv('HF_TOKEN')
23
  # Download the Base model
24
  #model_id = "./models/Llama-32-3B-Instruct"
25
+ model_id = "nvidia/Llama-3_1-Nemotron-51B-Instruct"
26
+ MAX_MAX_NEW_TOKENS = 6144
27
+ DEFAULT_MAX_NEW_TOKENS = 6144
28
+ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "6144"))
29
 
30
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
31
 
 
81
 
82
 
83
 
84
+ streamer = TextIteratorStreamer(tokenizer, timeout=2000.0, skip_prompt=True, skip_special_tokens=True)
85
  generate_kwargs = dict(
86
  input_ids=input_ids,
87
  streamer=streamer,