truongghieu commited on
Commit
5fcd6db
1 Parent(s): ed52491

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -3
app.py CHANGED
@@ -3,6 +3,8 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig,B
3
 
4
  import torch
5
 
 
 
6
  # Check if a GPU is available
7
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
8
 
@@ -12,10 +14,10 @@ bnb_config = BitsAndBytesConfig(
12
  )
13
 
14
 
15
- tokenizer = AutoTokenizer.from_pretrained("truongghieu/deci-finetuned", trust_remote_code=True)
16
  # Load model in this way if use GPU
17
- model = AutoModelForCausalLM.from_pretrained("truongghieu/deci-finetuned", trust_remote_code=True, quantization_config=bnb_config)
18
- # model = AutoModelForCausalLM.from_pretrained("truongghieu/deci-finetuned", trust_remote_code=True)
19
  # Move the model to the GPU if available
20
 
21
  generation_config = GenerationConfig(
 
3
 
4
  import torch
5
 
6
+ model_id = "truongghieu/deci-finetuned_BK_Regulation"
7
+
8
  # Check if a GPU is available
9
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
10
 
 
14
  )
15
 
16
 
17
+ tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
18
  # Load model in this way if use GPU
19
+ # model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True, quantization_config=bnb_config)
20
+ model = AutoModelForCausalLM.from_pretrained("truongghieu/deci-finetuned", trust_remote_code=True)
21
  # Move the model to the GPU if available
22
 
23
  generation_config = GenerationConfig(