Vitrous commited on
Commit
0471ea9
·
verified ·
1 Parent(s): 97024ed

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -4
app.py CHANGED
@@ -7,10 +7,7 @@ from transformers import (AutoModelForCausalLM, AutoTokenizer, LlamaForCausalLM,
7
 
8
  app = FastAPI()
9
 
10
- if torch.cuda.is_available():
11
- print("CUDA is available. GPU will be used.")
12
- else:
13
- print("CUDA is not available. CPU will be used.")
14
  # Load the model and tokenizer
15
  model_name_or_path = "TheBloke/Wizard-Vicuna-7B-Uncensored-GPT/"
16
  # Dictionary to store conversation threads and their context
 
7
 
8
  app = FastAPI()
9
 
10
+
 
 
 
11
  # Load the model and tokenizer
12
  model_name_or_path = "TheBloke/Wizard-Vicuna-7B-Uncensored-GPT/"
13
  # Dictionary to store conversation threads and their context