akhil2808 commited on
Commit
6993e4b
·
verified ·
1 Parent(s): 63141a7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -7
app.py CHANGED
@@ -12,15 +12,15 @@ DEFAULT_MAX_NEW_TOKENS = 1024
12
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
13
 
14
 
15
- if not torch.cuda.is_available():
16
- DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
17
 
18
 
19
- if torch.cuda.is_available():
20
- model_id = "mistralai/Mistral-7B-Instruct-v0.2"
21
- model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_4bit=True)
22
- tokenizer = AutoTokenizer.from_pretrained(model_id)
23
- tokenizer.use_default_system_prompt = False
24
 
25
 
26
  @spaces.GPU
 
12
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
13
 
14
 
15
+ #if not torch.cuda.is_available():
16
+ # DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
17
 
18
 
19
+ #if torch.cuda.is_available():
20
+ # model_id = "mistralai/Mistral-7B-Instruct-v0.2"
21
+ # model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_4bit=True)
22
+ # tokenizer = AutoTokenizer.from_pretrained(model_id)
23
+ # tokenizer.use_default_system_prompt = False
24
 
25
 
26
  @spaces.GPU