|
--- |
|
library_name: transformers |
|
tags: [] |
|
--- |
|
|
|
# Uploaded model |
|
|
|
- **Developed by:** haruka |
|
- **License:** apache-2.0 |
|
- **Finetuned from model :** llm-jp/llm-jp-3-13b |
|
|
|
# Code |
|
|
|
```python |
|
# python 3.10.12 |
|
!pip install -U pip |
|
!pip install -U transformers |
|
!pip install -U bitsandbytes |
|
!pip install -U accelerate |
|
!pip install -U datasets |
|
!pip install -U peft |
|
!pip install -U trl |
|
!pip install -U wandb |
|
!pip install ipywidgets --upgrade |
|
from transformers import ( |
|
AutoModelForCausalLM, |
|
AutoTokenizer, |
|
BitsAndBytesConfig, |
|
) |
|
from peft import PeftModel |
|
import torch |
|
from tqdm import tqdm |
|
import json |
|
# Hugging Faceで取得したTokenをこちらに貼る。 |
|
from google.colab import userdata |
|
HF_TOKEN = userdata.get('HF_TOKEN') |
|
model_id = "llm-jp/llm-jp-3-13b" |
|
adapter_id = "totsukash/llm-jp-3-13b-finetune" |
|
# QLoRA config |
|
bnb_config = BitsAndBytesConfig( |
|
load_in_4bit=True, |
|
bnb_4bit_quant_type="nf4", |
|
bnb_4bit_compute_dtype=torch.bfloat16, |
|
) |
|
# Load model |
|
model = AutoModelForCausalLM.from_pretrained( |
|
model_id, |
|
quantization_config=bnb_config, |
|
device_map="auto", |
|
token = HF_TOKEN |
|
) |
|
# Load tokenizer |
|
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True, token = HF_TOKEN) |
|
# 元のモデルにLoRAのアダプタを統合。 |
|
model = PeftModel.from_pretrained(model, adapter_id, token = HF_TOKEN) |
|
# データセットの読み込み。 |
|
# (評価データセットのjsonlファイルのパスを設定してください) |
|
datasets = [] |
|
with open("/content/elyza-tasks-100-TV_0.jsonl", "r") as f: |
|
item = "" |
|
for line in f: |
|
line = line.strip() |
|
item += line |
|
if item.endswith("}"): |
|
datasets.append(json.loads(item)) |
|
item = "" |
|
# gemma |
|
results = [] |
|
for data in tqdm(datasets): |
|
input = data["input"] |
|
prompt = f"""### 指示 |
|
{input} |
|
### 回答 |
|
""" |
|
|
|
# input_ids だけを取り出して使用 |
|
input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(model.device) |
|
outputs = model.generate(input_ids, max_new_tokens=512, do_sample=False, repetition_penalty=1.2) |
|
output = tokenizer.decode(outputs[0][input_ids.size(1):], skip_special_tokens=True) |
|
|
|
results.append({"task_id": data["task_id"], "input": input, "output": output}) |
|
# # llmjp |
|
# results = [] |
|
# for data in tqdm(datasets): |
|
# input = data["input"] |
|
# prompt = f"""### 指示 |
|
# {input} |
|
# ### 回答 |
|
# """ |
|
# tokenized_input = tokenizer.encode(prompt, add_special_tokens=False, return_tensors="pt").to(model.device) |
|
# attention_mask = torch.ones_like(tokenized_input) |
|
# with torch.no_grad(): |
|
# outputs = model.generate( |
|
# tokenized_input, |
|
# attention_mask=attention_mask, |
|
# max_new_tokens=100, |
|
# do_sample=False, |
|
# repetition_penalty=1.2, |
|
# pad_token_id=tokenizer.eos_token_id |
|
# )[0] |
|
# output = tokenizer.decode(outputs[tokenized_input.size(1):], skip_special_tokens=True) |
|
# results.append({"task_id": data["task_id"], "input": input, "output": output}) |
|
import re |
|
jsonl_id = re.sub(".*/", "", adapter_id) |
|
with open(f"./{jsonl_id}-outputs.jsonl", 'w', encoding='utf-8') as f: |
|
for result in results: |
|
json.dump(result, f, ensure_ascii=False) # ensure_ascii=False for handling non-ASCII characters |
|
f.write('\n') |
|
``` |