XAT928 commited on
Commit
e752b75
1 Parent(s): a288ff6

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +59 -1
README.md CHANGED
@@ -199,4 +199,62 @@ Carbon emissions can be estimated using the [Machine Learning Impact calculator]
199
  [More Information Needed]
200
  ### Framework versions
201
 
202
- - PEFT 0.13.2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
199
  [More Information Needed]
200
  ### Framework versions
201
 
202
+ - PEFT 0.13.2
203
+
204
+ 以下は、elyza-tasks-100-TV_0.jsonlの推論のためのコードです。
205
+
206
+ - %%capture
207
+ !pip install unsloth
208
+ !pip uninstall unsloth -y && pip install --upgrade --no-cache-dir "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"
209
+
210
+ from unsloth import FastLanguageModel
211
+ import torch
212
+ import json
213
+
214
+ model_name = "XAT928/llm-jp-3-13b-finetune-elyza"
215
+
216
+ max_seq_length = 2048
217
+ dtype = None
218
+ load_in_4bit = True
219
+
220
+ model, tokenizer = FastLanguageModel.from_pretrained(
221
+ model_name = model_name,
222
+ max_seq_length = max_seq_length,
223
+ dtype = dtype,
224
+ load_in_4bit = load_in_4bit,
225
+ token = "HF token",
226
+ )
227
+ FastLanguageModel.for_inference(model)
228
+
229
+ # データセットの読み込み。
230
+ # omnicampusの開発環境では、左にタスクのjsonlをドラッグアンドドロップしてから実行。
231
+ datasets = []
232
+ with open("/content/sample_data/elyza-tasks-100-TV_0.jsonl", "r") as f:
233
+ item = ""
234
+ for line in f:
235
+ line = line.strip()
236
+ item += line
237
+ if item.endswith("}"):
238
+ datasets.append(json.loads(item))
239
+ item = ""
240
+
241
+ from tqdm import tqdm
242
+
243
+ # 推論
244
+ results = []
245
+ for dt in tqdm(datasets):
246
+ input = dt["input"]
247
+
248
+ prompt = f"""### 指示\n{input}\n### 注意\n簡潔に回答してください。\n### 回答\n"""
249
+
250
+ inputs = tokenizer([prompt], return_tensors = "pt").to(model.device)
251
+
252
+ outputs = model.generate(**inputs, max_new_tokens = 1024, use_cache = True, do_sample=False, repetition_penalty=1.2)
253
+ prediction = tokenizer.decode(outputs[0], skip_special_tokens=True).split('\n### 回答')[-1]
254
+
255
+ results.append({"task_id": dt["task_id"], "input": input, "output": prediction})
256
+
257
+ with open(f"/content/{model_name}_output.jsonl", 'w', encoding='utf-8') as f:
258
+ for result in results:
259
+ json.dump(result, f, ensure_ascii=False)
260
+ f.write('\n')