File size: 5,133 Bytes
a791472
dbf5fb1
a791472
 
 
 
b91e31d
 
a791472
 
 
 
 
b91e31d
 
a791472
 
8d6d31e
a791472
b91e31d
 
 
 
 
 
 
 
 
 
 
dbf5fb1
7162f12
b91e31d
 
 
 
 
a791472
b91e31d
7162f12
b91e31d
a791472
7521f79
a791472
 
7162f12
dbf5fb1
b91e31d
160914e
7162f12
7521f79
b91e31d
95c3150
7162f12
b91e31d
 
 
 
 
 
 
 
 
dbf5fb1
b91e31d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
import gradio as gr
from markup import highlight, get_text
from template import get_templates

templates = get_templates()


def change(inp, textbox):
    """Based on an `inp`, render and highlight the appropriate code sample.

    Args:
        inp (`str`):
            The input button from the interface.
        textbox (`str`):
            The textbox specifying the tab name from the interface.

    Returns:
        `tuple`: A tuple of the highlighted code diff, and the title for the section.
    """
    if textbox == "base":
        code, explanation, docs = get_text(inp, textbox)
        if inp == "Basic":
            return (highlight(code), "## Accelerate Code (Base Integration)", explanation, docs)
        elif inp == "Calculating Metrics":
            return (highlight(code), f"## Accelerate Code ({inp})", explanation, docs)
        else:
            return (highlight(code), f"## Accelerate Code ({inp})", explanation, docs)
    elif textbox == "large_scale_training":
        config, code, explanation, docs = get_text(inp, textbox)
        return (highlight(config), highlight(code), f"## Accelerate Code ({inp})", explanation, docs)


default = change("Basic", "base")


def base_features(textbox):
    # textbox.value = "base"
    inp = gr.Radio(
        ["Basic", "Calculating Metrics", "Checkpointing", "Experiment Tracking", "Gradient Accumulation"],
        label="Select a feature you would like to integrate",
        value="Basic",
    )
    with gr.Row():
        with gr.Column():
            feature = gr.Markdown("## Accelerate Code")
            out = gr.Markdown(default[0])
    with gr.Row():
        with gr.Column():
            gr.Markdown("## Explanation")
            explanation = gr.Markdown(default[2])
    with gr.Row():
        with gr.Column():
            gr.Markdown("## Documentation Links")
            docs = gr.Markdown(default[3])
    inp.change(fn=change, inputs=[inp, textbox], outputs=[out, feature, explanation, docs])


def large_scale_training(textbox):
    # textbox.value = "large_scale_training"
    inp = gr.Radio(
        ["Multi GPU", "Multi Node Multi GPU", "AWS SageMaker", "DeepSpeed", "PyTorch FSDP", "Megatron-LM"],
        label="Select a feature you would like to integrate",
        value="Basic",
    )
    with gr.Row():
        with gr.Column():
            feature = gr.Markdown("## Accelerate Config")
            config = gr.Markdown("")
    with gr.Row():
        with gr.Column():
            feature = gr.Markdown("## Accelerate Code")
            out = gr.Markdown("")
    with gr.Row():
        with gr.Column():
            gr.Markdown("## Explanation")
            explanation = gr.Markdown("")
    with gr.Row():
        with gr.Column():
            gr.Markdown("## Documentation Links")
            docs = gr.Markdown("")
    inp.change(fn=change, inputs=[inp, textbox], outputs=[config, out, feature, explanation, docs])


# def big_model_inference():
#     inp = gr.Radio(
#         ["Accelerate's Big Model Inference",], # "DeepSpeed ZeRO Stage-3 Offload"
#         label="Select a feature you would like to integrate",
#         value="Basic",
#     )
#     with gr.Row():
#         with gr.Column():
#             feature = gr.Markdown("## Accelerate Code")
#             out = gr.Markdown(default[0])
#     with gr.Row():
#         with gr.Column():
#             gr.Markdown(default[1])
#             explanation = gr.Markdown(default[2])
#     with gr.Row():
#         with gr.Column():
#             gr.Markdown("## Documentation Links")
#             docs = gr.Markdown(default[3])
#     inp.change(fn=change, inputs=[inp, "big_model_inference"], outputs=[out, feature, explanation, docs])


# def notebook_launcher():
#     inp = gr.Radio(
#         ["Colab GPU", "Colab TPU", "Kaggle GPU", "Kaggle Multi GPU", "Kaggle TPU", "Multi GPU VMs"],
#         label="Select a feature you would like to integrate",
#         value="Basic",
#     )
#     with gr.Row():
#         with gr.Column():
#             feature = gr.Markdown("## Accelerate Code")
#             out = gr.Markdown(default[0])
#     with gr.Row():
#         with gr.Column():
#             gr.Markdown(default[1])
#             explanation = gr.Markdown(default[2])
#     with gr.Row():
#         with gr.Column():
#             gr.Markdown("## Documentation Links")
#             docs = gr.Markdown(default[3])
#     inp.change(fn=change, inputs=[inp, "notebook_launcher"], outputs=[out, feature, explanation, docs])


with gr.Blocks() as demo:

    with gr.Tabs():
        with gr.TabItem("Simplify your code and improve efficieny"):
            textbox = gr.Textbox(label="tab_name", visible=False, value="base")
            base_features(textbox)
        with gr.TabItem("Large Scale Training"):
            textbox = gr.Textbox(label="tab_name", visible=False, value="large_scale_training")
            large_scale_training(textbox)
        with gr.TabItem("Big Model Inference"):
            # big_model_inference()
            pass
        with gr.TabItem("Notebook Launcher Intergation"):
            # notebook_launcher()
            pass


demo.launch()