truongghieu commited on
Commit
84de4b9
1 Parent(s): 96d175f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -4
app.py CHANGED
@@ -3,7 +3,7 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig,B
3
 
4
  import torch
5
 
6
- model_id = "truongghieu/deci-finetuned_BK_Regulation"
7
 
8
  # Check if a GPU is available
9
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
@@ -16,14 +16,16 @@ bnb_config = BitsAndBytesConfig(
16
 
17
  tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
18
  # Load model in this way if use GPU
19
- # model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True, quantization_config=bnb_config)
20
- model = AutoModelForCausalLM.from_pretrained("truongghieu/deci-finetuned", trust_remote_code=True)
 
 
21
  # Move the model to the GPU if available
22
 
23
  generation_config = GenerationConfig(
24
  penalty_alpha=0.6,
25
  do_sample=True,
26
- top_k=5,
27
  temperature=0.5,
28
  repetition_penalty=1.2,
29
  max_new_tokens=50,
 
3
 
4
  import torch
5
 
6
+ model_id = "truongghieu/deci-finetuned_Prj2"
7
 
8
  # Check if a GPU is available
9
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 
16
 
17
  tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
18
  # Load model in this way if use GPU
19
+ if device == "cuda":
20
+ model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True, quantization_config=bnb_config)
21
+ else:
22
+ model = AutoModelForCausalLM.from_pretrained("truongghieu/deci-finetuned", trust_remote_code=True)
23
  # Move the model to the GPU if available
24
 
25
  generation_config = GenerationConfig(
26
  penalty_alpha=0.6,
27
  do_sample=True,
28
+ top_k=3,
29
  temperature=0.5,
30
  repetition_penalty=1.2,
31
  max_new_tokens=50,