a2ran commited on
Commit
515fa5b
·
1 Parent(s): 43c5891

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -22,13 +22,13 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, GPTQConfig
22
 
23
  model_id = "TheBloke/WizardLM-13B-V1.2-GPTQ"
24
 
25
- config = PeftConfig.from_pretrained("a2ran/GPTeacher_ko_llama2_13B")
26
  tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=True)
27
  quantization_config_loading = GPTQConfig(bits=4, disable_exllama=True)
28
 
29
  model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=quantization_config_loading,
30
  torch_dtype=torch.float16, device_map="auto")
31
- model = PeftModel.from_pretrained(model, "a2ran/GPTeacher_ko_llama2_13B")
32
  ```
33
 
34
  * How to Generate Tokens
 
22
 
23
  model_id = "TheBloke/WizardLM-13B-V1.2-GPTQ"
24
 
25
+ config = PeftConfig.from_pretrained("a2ran/GPTeacher-llama2-ko-13b")
26
  tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=True)
27
  quantization_config_loading = GPTQConfig(bits=4, disable_exllama=True)
28
 
29
  model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=quantization_config_loading,
30
  torch_dtype=torch.float16, device_map="auto")
31
+ model = PeftModel.from_pretrained(model, "a2ran/GPTeacher-llama2-ko-13b")
32
  ```
33
 
34
  * How to Generate Tokens