muellerzr HF staff commited on
Commit
0ba62b0
1 Parent(s): d45d1fb

Working demo

Browse files
app.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ _remove_color = "rgb(103,6,12)"
4
+ _addition_color = "rgb(6,103,12)"
5
+
6
+ def mark_text(text, add=True):
7
+ if add:
8
+ color = _addition_color
9
+ else:
10
+ color = _remove_color
11
+ return f'<mark style="background-color:{color}!important;color:white!important">{text}</mark>'
12
+
13
+ def highlight(option):
14
+ filename = option.lower().replace(' ', '_')
15
+ with open(f"code_samples/{filename}") as f:
16
+ output = f.read()
17
+ lines = output.split("\n")
18
+ for i,line in enumerate(lines):
19
+ if line.startswith("-"):
20
+ lines[i] = "- " + line[1:]
21
+ lines[i] = mark_text(lines[i], False)
22
+ elif line.startswith("+"):
23
+ lines[i] = "+ " + line[1:]
24
+ lines[i] = mark_text(lines[i], True)
25
+ else:
26
+ lines[i] = " " + line
27
+ return "\n".join(lines).rstrip()
28
+
29
+ with open("code_samples/initial") as f:
30
+ template = f.read()
31
+
32
+ with open("code_samples/accelerate") as f:
33
+ accelerated_template = f.read()
34
+
35
+ with open("code_samples/initial_with_metrics") as f:
36
+ metrics_template = f.read()
37
+
38
+ def change(inp):
39
+ if inp == "Basic":
40
+ return (template, highlight(inp), "## Accelerate Code (Base Integration)")
41
+ elif inp == "Calculating Metrics":
42
+ return (metrics_template, highlight(inp), f"## Accelerate Code ({inp})")
43
+ else:
44
+ return (accelerated_template, highlight(inp), f"## Accelerate Code ({inp})")
45
+
46
+ with gr.Blocks() as demo:
47
+ gr.Markdown(f'''# Accelerate Template Generator
48
+ Here is a very basic Python training loop.
49
+ Select how you would like to introduce an Accelerate capability to add to it.''')
50
+ inp = gr.Radio(
51
+ ["Basic", "Calculating Metrics", "Checkpointing", "Gradient Accumulation", ],
52
+ label="Select a feature"
53
+ )
54
+ with gr.Row():
55
+ with gr.Column():
56
+ gr.Markdown("## Initial Code")
57
+ code = gr.Markdown(template)
58
+ with gr.Column():
59
+ feature = gr.Markdown("## Accelerate Code")
60
+ out = gr.Markdown()
61
+ inp.change(fn=change, inputs=inp, outputs=[code, out, feature])
62
+ demo.launch()
code_samples/accelerate ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ```
2
+ from accelerate import Accelerator
3
+ accelerator = Accelerator()
4
+ train_dataloader, model, optimizer scheduler = accelerator.prepare(
5
+ dataloader, model, optimizer, scheduler
6
+ )
7
+
8
+ model.train()
9
+ for batch in train_dataloader:
10
+ optimizer.zero_grad()
11
+ inputs, targets = batch
12
+ outputs = model(inputs)
13
+ loss = loss_function(outputs, targets)
14
+ accelerator.backward(loss)
15
+ optimizer.step()
16
+ scheduler.step()
17
+ ```
code_samples/basic ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <pre>
2
+ +from accelerate import Accelerator
3
+ +accelerator = Accelerator()
4
+ +dataloader, model, optimizer scheduler = accelerator.prepare(
5
+ + dataloader, model, optimizer, scheduler
6
+ +)
7
+
8
+ for batch in dataloader:
9
+ optimizer.zero_grad()
10
+ inputs, targets = batch
11
+ - inputs = inputs.to(device)
12
+ - targets = targets.to(device)
13
+ outputs = model(inputs)
14
+ loss = loss_function(outputs, targets)
15
+ - loss.backward()
16
+ + accelerator.backward(loss)
17
+ optimizer.step()
18
+ scheduler.step()
19
+ </pre>
code_samples/calculating_metrics ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <pre>
2
+ import evaluate
3
+ +from accelerate import Accelerator
4
+ +accelerator = Accelerator()
5
+ +dataloader, model, optimizer scheduler = accelerator.prepare(
6
+ + dataloader, model, optimizer, scheduler
7
+ +)
8
+ metric = evaluate.load("accuracy")
9
+ for batch in train_dataloader:
10
+ optimizer.zero_grad()
11
+ inputs, targets = batch
12
+ - inputs = inputs.to(device)
13
+ - targets = targets.to(device)
14
+ outputs = model(inputs)
15
+ loss = loss_function(outputs, targets)
16
+ loss.backward()
17
+ optimizer.step()
18
+ scheduler.step()
19
+
20
+ model.eval()
21
+ for batch in eval_dataloader:
22
+ inputs, targets = batch
23
+ - inputs = inputs.to(device)
24
+ - targets = targets.to(device)
25
+ with torch.no_grad():
26
+ outputs = model(inputs)
27
+ predictions = outputs.argmax(dim=-1)
28
+ + predictions, references = accelerator.gather_for_metrics(
29
+ + (predictions, references)
30
+ + )
31
+ metric.add_batch(
32
+ predictions = predictions,
33
+ references = references
34
+ )
35
+ print(metric.compute())
36
+ <pre>
code_samples/checkpointing ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <pre>
2
+ from accelerate import Accelerator
3
+ accelerator = Accelerator()
4
+ dataloader, model, optimizer scheduler = accelerator.prepare(
5
+ dataloader, model, optimizer, scheduler
6
+ )
7
+
8
+ for batch in dataloader:
9
+ optimizer.zero_grad()
10
+ inputs, targets = batch
11
+ outputs = model(inputs)
12
+ loss = loss_function(outputs, targets)
13
+ accelerator.backward(loss)
14
+ optimizer.step()
15
+ scheduler.step()
16
+ +accelerator.save_state("checkpoint_dir")
17
+ </pre>
code_samples/gradient_accumulation ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <pre>
2
+ from accelerate import Accelerator
3
+ accelerator = Accelerator(
4
+ + gradient_accumulation_steps=2,
5
+ )
6
+ dataloader, model, optimizer scheduler = accelerator.prepare(
7
+ dataloader, model, optimizer, scheduler
8
+ )
9
+
10
+ for batch in dataloader:
11
+ + with accelerator.accumulate(model):
12
+ optimizer.zero_grad()
13
+ inputs, targets = batch
14
+ outputs = model(inputs)
15
+ loss = loss_function(outputs, targets)
16
+ accelerator.backward(loss)
17
+ optimizer.step()
18
+ scheduler.step()
19
+ </pre>
code_samples/initial ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ```
2
+ for batch in dataloader:
3
+ optimizer.zero_grad()
4
+ inputs, targets = batch
5
+ inputs = inputs.to(device)
6
+ targets = targets.to(device)
7
+ outputs = model(inputs)
8
+ loss = loss_function(outputs, targets)
9
+ loss.backward()
10
+ optimizer.step()
11
+ scheduler.step()
12
+ ```
code_samples/initial_with_metrics ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ```
2
+ import evaluate
3
+ metric = evaluate.load("accuracy")
4
+ for batch in train_dataloader:
5
+ optimizer.zero_grad()
6
+ inputs, targets = batch
7
+ inputs = inputs.to(device)
8
+ targets = targets.to(device)
9
+ outputs = model(inputs)
10
+ loss = loss_function(outputs, targets)
11
+ loss.backward()
12
+ optimizer.step()
13
+ scheduler.step()
14
+
15
+ model.eval()
16
+ for batch in eval_dataloader:
17
+ inputs, targets = batch
18
+ inputs = inputs.to(device)
19
+ targets = targets.to(device)
20
+ with torch.no_grad():
21
+ outputs = model(inputs)
22
+ predictions = outputs.argmax(dim=-1)
23
+ metric.add_batch(
24
+ predictions = predictions,
25
+ references = references
26
+ )
27
+ print(metric.compute())
28
+ ```