mssfj commited on
Commit
1711b76
·
verified ·
1 Parent(s): d6ae3ca

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -8
README.md CHANGED
@@ -51,17 +51,15 @@ This is the model card of a 🤗 transformers model that has been pushed on the
51
  ## Uses
52
 
53
  <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
 
54
 
55
  from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
56
-
57
  import torch
58
-
59
  from peft import PeftModel, PeftConfig
60
- # モデル名
61
  model_name = "mssfj/gemma-2-9b-bnb-4bit-chat-template"
62
  lora_weight = "mssfj/gemma-2-9b-4bit-magpie"
63
 
64
- # 量子化設定
65
  quantization_config = BitsAndBytesConfig(
66
  load_in_4bit=False,
67
  bnb_4bit_compute_dtype=torch.bfloat16,
@@ -69,17 +67,14 @@ quantization_config = BitsAndBytesConfig(
69
  bnb_4bit_use_double_quant=False
70
  )
71
 
72
- # ベースモデルのロード
73
  base_model = AutoModelForCausalLM.from_pretrained(
74
  model_name,
75
  quantization_config=quantization_config,
76
  device_map="auto"
77
  )
78
 
79
- # QLoRA済みモデルの適用
80
  model = PeftModel.from_pretrained(base_model, lora_weight)
81
 
82
- # トークナイザのロード
83
  tokenizer = AutoTokenizer.from_pretrained(model_name)
84
 
85
  input="""日本で一番高い山は?
@@ -94,7 +89,6 @@ messages = [
94
  {"role": "user", "content": input},
95
  ]
96
 
97
- # チャットテンプレートを適用
98
  input_ids = tokenizer.apply_chat_template(
99
  messages,
100
  tokenize=True,
 
51
  ## Uses
52
 
53
  <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
54
+ 使用方法は以下です。
55
 
56
  from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
 
57
  import torch
 
58
  from peft import PeftModel, PeftConfig
59
+
60
  model_name = "mssfj/gemma-2-9b-bnb-4bit-chat-template"
61
  lora_weight = "mssfj/gemma-2-9b-4bit-magpie"
62
 
 
63
  quantization_config = BitsAndBytesConfig(
64
  load_in_4bit=False,
65
  bnb_4bit_compute_dtype=torch.bfloat16,
 
67
  bnb_4bit_use_double_quant=False
68
  )
69
 
 
70
  base_model = AutoModelForCausalLM.from_pretrained(
71
  model_name,
72
  quantization_config=quantization_config,
73
  device_map="auto"
74
  )
75
 
 
76
  model = PeftModel.from_pretrained(base_model, lora_weight)
77
 
 
78
  tokenizer = AutoTokenizer.from_pretrained(model_name)
79
 
80
  input="""日本で一番高い山は?
 
89
  {"role": "user", "content": input},
90
  ]
91
 
 
92
  input_ids = tokenizer.apply_chat_template(
93
  messages,
94
  tokenize=True,