add readme
Browse files
README.md
CHANGED
@@ -20,3 +20,55 @@ language:
|
|
20 |
This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library.
|
21 |
|
22 |
[<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library.
|
21 |
|
22 |
[<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
|
23 |
+
|
24 |
+
```python
|
25 |
+
%%capture
|
26 |
+
!pip install unsloth
|
27 |
+
!pip uninstall unsloth -y && pip install --upgrade --no-cache-dir "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"
|
28 |
+
|
29 |
+
from unsloth import FastLanguageModel
|
30 |
+
import torch
|
31 |
+
import json
|
32 |
+
|
33 |
+
model_name = "84basi/llm-jp-3-13b-finetune-2.2"
|
34 |
+
token = "HF token"
|
35 |
+
|
36 |
+
max_seq_length = 2048
|
37 |
+
dtype = None
|
38 |
+
load_in_4bit = True
|
39 |
+
|
40 |
+
model, tokenizer = FastLanguageModel.from_pretrained(
|
41 |
+
model_name = model_name,
|
42 |
+
max_seq_length = max_seq_length,
|
43 |
+
dtype = dtype,
|
44 |
+
load_in_4bit = load_in_4bit,
|
45 |
+
token = token,
|
46 |
+
)
|
47 |
+
FastLanguageModel.for_inference(model)
|
48 |
+
|
49 |
+
datasets = []
|
50 |
+
with open("./elyza-tasks-100-TV_0.jsonl", "r") as f:
|
51 |
+
item = ""
|
52 |
+
for line in f:
|
53 |
+
line = line.strip()
|
54 |
+
item += line
|
55 |
+
if item.endswith("}"):
|
56 |
+
datasets.append(json.loads(item))
|
57 |
+
item = ""
|
58 |
+
|
59 |
+
from tqdm import tqdm
|
60 |
+
|
61 |
+
results = []
|
62 |
+
for dt in tqdm(data):
|
63 |
+
input = dt["input"]
|
64 |
+
prompt = f"""### 指示\n{input}\n### 回答\n"""
|
65 |
+
inputs = tokenizer([prompt], return_tensors = "pt").to(model.device)
|
66 |
+
outputs = model.generate(**inputs, max_new_tokens = 512, use_cache = True, do_sample=False, repetition_penalty=1.2)
|
67 |
+
prediction = tokenizer.decode(outputs[0], skip_special_tokens=True).split('\n### 回答')[-1]
|
68 |
+
results.append({"task_id": data["task_id"], "input": input, "output": output})
|
69 |
+
|
70 |
+
with open(f"/content/{model_name}_output.jsonl", 'w', encoding='utf-8') as f:
|
71 |
+
for result in results:
|
72 |
+
json.dump(result, f, ensure_ascii=False)
|
73 |
+
f.write('\n')
|
74 |
+
```
|