JohanWork commited on
Commit
ee0b5f6
1 Parent(s): 08719b9

add colab example (#1196) [skip ci]

Browse files
examples/colab-notebooks/colab-axolotl-example.ipynb ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {
6
+ "id": "AKjdG7tbTb-n"
7
+ },
8
+ "source": [
9
+ "# Example notebook for running Axolotl on google colab"
10
+ ]
11
+ },
12
+ {
13
+ "cell_type": "code",
14
+ "execution_count": null,
15
+ "metadata": {
16
+ "id": "RcbNpOgWRcii"
17
+ },
18
+ "outputs": [],
19
+ "source": [
20
+ "import torch\n",
21
+ "# Check so there is a gpu available, a T4(free tier) is enough to run this notebook\n",
22
+ "assert (torch.cuda.is_available()==True)"
23
+ ]
24
+ },
25
+ {
26
+ "cell_type": "markdown",
27
+ "metadata": {
28
+ "id": "h3nLav8oTRA5"
29
+ },
30
+ "source": [
31
+ "## Install Axolotl and dependencies"
32
+ ]
33
+ },
34
+ {
35
+ "cell_type": "code",
36
+ "execution_count": null,
37
+ "metadata": {
38
+ "colab": {
39
+ "base_uri": "https://localhost:8080/"
40
+ },
41
+ "id": "3c3yGAwnOIdi",
42
+ "outputId": "e3777b5a-40ef-424f-e181-62dfecd1dd01"
43
+ },
44
+ "outputs": [],
45
+ "source": [
46
+ "!pip install -e git+https://github.com/OpenAccess-AI-Collective/axolotl#egg=axolotl\n",
47
+ "!pip install flash-attn==\"2.5.0\"\n",
48
+ "!pip install deepspeed==\"0.13.1\""
49
+ ]
50
+ },
51
+ {
52
+ "cell_type": "markdown",
53
+ "metadata": {
54
+ "id": "BW2MFr7HTjub"
55
+ },
56
+ "source": [
57
+ "## Create an yaml config file"
58
+ ]
59
+ },
60
+ {
61
+ "cell_type": "code",
62
+ "execution_count": null,
63
+ "metadata": {
64
+ "id": "9pkF2dSoQEUN"
65
+ },
66
+ "outputs": [],
67
+ "source": [
68
+ "import yaml\n",
69
+ "\n",
70
+ "# Your YAML string\n",
71
+ "yaml_string = \"\"\"\n",
72
+ "base_model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T\n",
73
+ "model_type: LlamaForCausalLM\n",
74
+ "tokenizer_type: LlamaTokenizer\n",
75
+ "is_llama_derived_model: true\n",
76
+ "\n",
77
+ "load_in_8bit: false\n",
78
+ "load_in_4bit: true\n",
79
+ "strict: false\n",
80
+ "\n",
81
+ "datasets:\n",
82
+ " - path: mhenrichsen/alpaca_2k_test\n",
83
+ " type: alpaca\n",
84
+ "dataset_prepared_path:\n",
85
+ "val_set_size: 0.05\n",
86
+ "output_dir: ./qlora-out\n",
87
+ "\n",
88
+ "adapter: qlora\n",
89
+ "lora_model_dir:\n",
90
+ "\n",
91
+ "sequence_len: 1096\n",
92
+ "sample_packing: true\n",
93
+ "pad_to_sequence_len: true\n",
94
+ "\n",
95
+ "lora_r: 32\n",
96
+ "lora_alpha: 16\n",
97
+ "lora_dropout: 0.05\n",
98
+ "lora_target_modules:\n",
99
+ "lora_target_linear: true\n",
100
+ "lora_fan_in_fan_out:\n",
101
+ "\n",
102
+ "wandb_project:\n",
103
+ "wandb_entity:\n",
104
+ "wandb_watch:\n",
105
+ "wandb_name:\n",
106
+ "wandb_log_model:\n",
107
+ "\n",
108
+ "mlflow_experiment_name: colab-example\n",
109
+ "\n",
110
+ "gradient_accumulation_steps: 1\n",
111
+ "micro_batch_size: 1\n",
112
+ "num_epochs: 4\n",
113
+ "max_steps: 20\n",
114
+ "optimizer: paged_adamw_32bit\n",
115
+ "lr_scheduler: cosine\n",
116
+ "learning_rate: 0.0002\n",
117
+ "\n",
118
+ "train_on_inputs: false\n",
119
+ "group_by_length: false\n",
120
+ "bf16: false\n",
121
+ "fp16: true\n",
122
+ "tf32: false\n",
123
+ "\n",
124
+ "gradient_checkpointing: true\n",
125
+ "early_stopping_patience:\n",
126
+ "resume_from_checkpoint:\n",
127
+ "local_rank:\n",
128
+ "logging_steps: 1\n",
129
+ "xformers_attention:\n",
130
+ "flash_attention: false\n",
131
+ "\n",
132
+ "warmup_steps: 10\n",
133
+ "evals_per_epoch:\n",
134
+ "saves_per_epoch:\n",
135
+ "debug:\n",
136
+ "deepspeed:\n",
137
+ "weight_decay: 0.0\n",
138
+ "fsdp:\n",
139
+ "fsdp_config:\n",
140
+ "special_tokens:\n",
141
+ "\n",
142
+ "\"\"\"\n",
143
+ "\n",
144
+ "# Convert the YAML string to a Python dictionary\n",
145
+ "yaml_dict = yaml.safe_load(yaml_string)\n",
146
+ "\n",
147
+ "# Specify your file path\n",
148
+ "file_path = 'test_axolotl.yaml'\n",
149
+ "\n",
150
+ "# Write the YAML file\n",
151
+ "with open(file_path, 'w') as file:\n",
152
+ " yaml.dump(yaml_dict, file)\n"
153
+ ]
154
+ },
155
+ {
156
+ "cell_type": "markdown",
157
+ "metadata": {
158
+ "id": "bidoj8YLTusD"
159
+ },
160
+ "source": [
161
+ "## Launch the training"
162
+ ]
163
+ },
164
+ {
165
+ "cell_type": "code",
166
+ "execution_count": null,
167
+ "metadata": {
168
+ "colab": {
169
+ "base_uri": "https://localhost:8080/"
170
+ },
171
+ "id": "ydTI2Jk2RStU",
172
+ "outputId": "d6d0df17-4b53-439c-c802-22c0456d301b"
173
+ },
174
+ "outputs": [],
175
+ "source": [
176
+ "# Buy using the ! the comand will be executed as a bash command\n",
177
+ "!accelerate launch -m axolotl.cli.train /content/test_axolotl.yaml"
178
+ ]
179
+ }
180
+ ],
181
+ "metadata": {
182
+ "accelerator": "GPU",
183
+ "colab": {
184
+ "gpuType": "T4",
185
+ "provenance": []
186
+ },
187
+ "kernelspec": {
188
+ "display_name": "Python 3",
189
+ "name": "python3"
190
+ },
191
+ "language_info": {
192
+ "name": "python"
193
+ }
194
+ },
195
+ "nbformat": 4,
196
+ "nbformat_minor": 0
197
+ }