|
from datasets import Dataset |
|
import pandas as pd |
|
from transformers import AutoTokenizer, AutoModelForCausalLM, DataCollatorForSeq2Seq, TrainingArguments, HfArgumentParser, Trainer |
|
import os |
|
import torch |
|
from peft import LoraConfig, TaskType, get_peft_model |
|
from dataclasses import dataclass, field |
|
import deepspeed |
|
deepspeed.ops.op_builder.CPUAdamBuilder().load() |
|
|
|
@dataclass |
|
class FinetuneArguments: |
|
|
|
|
|
model_path: str = field(default="./OpenBMB/MiniCPM-2B-sft-fp32") |
|
|
|
|
|
def process_func(example): |
|
MAX_LENGTH = 512 |
|
input_ids, attention_mask, labels = [], [], [] |
|
instruction = tokenizer(f"<User>{example['instruction']+example['input']}<AI>", add_special_tokens=False) |
|
response = tokenizer(f"{example['output']}", add_special_tokens=False) |
|
input_ids = instruction["input_ids"] + response["input_ids"] + [tokenizer.pad_token_id] |
|
attention_mask = instruction["attention_mask"] + response["attention_mask"] + [1] |
|
labels = [-100] * len(instruction["input_ids"]) + response["input_ids"] + [tokenizer.pad_token_id] |
|
if len(input_ids) > MAX_LENGTH: |
|
input_ids = input_ids[:MAX_LENGTH] |
|
attention_mask = attention_mask[:MAX_LENGTH] |
|
labels = labels[:MAX_LENGTH] |
|
return { |
|
"input_ids": input_ids, |
|
"attention_mask": attention_mask, |
|
"labels": labels |
|
} |
|
|
|
|
|
config = LoraConfig( |
|
task_type=TaskType.CAUSAL_LM, |
|
target_modules=["q_proj", "v_proj"], |
|
inference_mode=False, |
|
r=8, |
|
lora_alpha=32, |
|
lora_dropout=0.1 |
|
) |
|
|
|
|
|
if "__main__" == __name__: |
|
|
|
|
|
finetune_args, training_args = HfArgumentParser( |
|
(FinetuneArguments, TrainingArguments) |
|
).parse_args_into_dataclasses() |
|
|
|
|
|
|
|
df = pd.read_json('./Dataset/Read_Comperhension50k.jsonl',lines=True) |
|
ds = Dataset.from_pandas(df) |
|
|
|
tokenizer = AutoTokenizer.from_pretrained(finetune_args.model_path, use_fast=False, trust_remote_code=True) |
|
tokenizer.padding_side = 'right' |
|
tokenizer.pad_token_id = tokenizer.eos_token_id |
|
|
|
tokenized_id = ds.map(process_func, remove_columns=ds.column_names) |
|
|
|
|
|
model = AutoModelForCausalLM.from_pretrained(finetune_args.model_path, trust_remote_code=True, torch_dtype=torch.half, device_map={"": int(os.environ.get("LOCAL_RANK") or 0)}) |
|
model = get_peft_model(model, config) |
|
|
|
trainer = Trainer( |
|
model=model, |
|
args=training_args, |
|
train_dataset=tokenized_id, |
|
data_collator=DataCollatorForSeq2Seq(tokenizer=tokenizer, padding=True), |
|
) |
|
trainer.train() |
|
trainer.save_model() |