Spaces:
Sleeping
Sleeping
import numpy as np | |
import plotly.graph_objects as go | |
from sklearn.preprocessing import StandardScaler | |
from sklearn.datasets import make_moons, make_circles, make_classification, make_blobs | |
from sklearn.neural_network import MLPClassifier | |
import gradio as gr | |
# ========================================================================= | |
GRANULARITY = 0.2 | |
MARGIN = 0.5 | |
N_SAMPLES = 150 | |
SEED = 1 | |
datasets = {} | |
X, y = make_moons(n_samples=N_SAMPLES, noise=0.2, random_state=SEED) | |
X = StandardScaler().fit_transform(X) | |
datasets["Moons"] = (X.copy(), y.copy()) | |
X, y = make_circles(n_samples=N_SAMPLES, noise=0.2, factor=0.5, random_state=SEED) | |
X = StandardScaler().fit_transform(X) | |
datasets["Circles"] = (X.copy(), y.copy()) | |
X, y = make_blobs(n_samples=N_SAMPLES, n_features=2, centers=4, cluster_std=2, random_state=SEED) | |
X = StandardScaler().fit_transform(X) | |
y[y==2] = 0 | |
y[y==3] = 1 | |
datasets["Blobs"] = (X.copy(), y.copy()) | |
X, y = make_classification(n_samples=N_SAMPLES, n_features=2, n_redundant=0, n_informative=2, n_clusters_per_class=1, random_state=SEED) | |
X += 2 * np.random.uniform(size=X.shape) | |
X = StandardScaler().fit_transform(X) | |
datasets["Linear"] = (X.copy(), y.copy()) | |
# ========================================================================= | |
def get_figure_dict(): | |
figure_dict = dict(data=[], layout={}, frames=[]) | |
play_button = dict(args=[None, {"mode": "immediate", "fromcurrent": False, "frame": {"duration": 50}, "transition": {"duration": 50}}], | |
label="Play", | |
method="animate") | |
pause_button = dict(args=[[None], {"mode": "immediate"}], | |
label="Stop", | |
method="animate") | |
slider = dict(steps=[], active=0, currentvalue={"prefix": "Iteration: "}) | |
figure_dict["layout"] = dict(width=600, height=600, hovermode=False, margin=dict(l=40, r=40, t=40, b=40), | |
title=dict(text="Decision Surface", x=0.5), | |
sliders=[slider], | |
updatemenus=[dict(buttons=[play_button, pause_button], direction="left", pad={"t": 85}, type="buttons", x=0.6, y=-0.05)] | |
) | |
return figure_dict | |
def get_decision_surface(X, model): | |
x_min, x_max = X[:, 0].min() - MARGIN, X[:, 0].max() + MARGIN | |
y_min, y_max = X[:, 1].min() - MARGIN, X[:, 1].max() + MARGIN | |
xrange = np.arange(x_min, x_max, GRANULARITY) | |
yrange = np.arange(y_min, y_max, GRANULARITY) | |
x, y = np.meshgrid(xrange, yrange) | |
x = x.ravel(); y = y.ravel() | |
z = model.predict_proba(np.column_stack([x, y]))[:, 1] | |
return x, y, z | |
# ========================================================================= | |
def create_plot(dataset, alpha, h1, h2, seed): | |
X, y = datasets[dataset] | |
model = MLPClassifier(alpha=alpha, max_iter=2000, learning_rate_init=0.01, hidden_layer_sizes=[h1, h2], random_state=seed) | |
figure_dict = get_figure_dict() | |
model.partial_fit(X, y, classes=[0, 1]) | |
xx, yy, zz = get_decision_surface(X, model) | |
figure_dict["data"] = [go.Contour(x=xx, y=yy, z=zz, opacity=0.6, showscale=False,), | |
go.Scatter(x=X[:, 0], y=X[:, 1], mode="markers", marker_color=y, marker={"colorscale": "jet", "size": 8})] | |
prev_loss = np.inf | |
tol = 3e-4 | |
for i in range(100): | |
for _ in range(3): | |
model.partial_fit(X, y, classes=[0, 1]) | |
if prev_loss - model.loss_ <= tol: break | |
prev_loss = model.loss_ | |
xx, yy, zz = get_decision_surface(X, model) | |
figure_dict["frames"].append({"data": [go.Contour(x=xx, y=yy, z=zz, opacity=0.6, showscale=False)], "name": i}) | |
slider_step = {"args": [[i], {"mode": "immediate"}], "method": "animate", "label": i} | |
figure_dict["layout"]["sliders"][0]["steps"].append(slider_step) | |
fig = go.Figure(figure_dict) | |
return fig | |
info = ''' | |
# Effect of Regularization Parameter of Multilayer Perceptron | |
This example demonstrates the effect of varying the regularization parameter (alpha) of a multilayer perceptron on the binary classification of toy datasets, as represented by the decision surface of the classifier. | |
Higher values of alpha encourages smaller weights, thus making the model less prone to overfitting, while lower values may help against underfitting. Use the slider below to control the amount of regularization and observe how the decision surface changes with higher values. | |
The neural network is trained until the loss stops decreasing below a specific tolerance. The color of the decision surface represents the probability of observing the corresponding class. | |
Created by [@huabdul](https://huggingface.co/huabdul) based on [scikit-learn docs](https://scikit-learn.org/stable/auto_examples/neural_networks/plot_mlp_alpha.html). | |
''' | |
with gr.Blocks(analytics_enabled=False) as demo: | |
with gr.Row(): | |
with gr.Column(): | |
gr.Markdown(info) | |
dd_dataset = gr.Dropdown(list(datasets.keys()), value="Moons", label="Dataset", interactive=True) | |
with gr.Row(): | |
with gr.Column(min_width=100): | |
s_alpha = gr.Slider(0, 4, value=0.1, step=0.05, label="α (regularization parameter)") | |
s_seed = gr.Slider(1, 1000, value=1, step=1, label="Seed") | |
with gr.Column(min_width=100): | |
s_h1 = gr.Slider(2, 20, value=10, step=1, label="Hidden layer 1 size") | |
s_h2 = gr.Slider(2, 20, value=10, step=1, label="Hidden layer 2 size") | |
submit = gr.Button("Submit") | |
with gr.Column(): | |
plot = gr.Plot(show_label=False) | |
submit.click(create_plot, inputs=[dd_dataset, s_alpha, s_h1, s_h2, s_seed], outputs=[plot]) | |
demo.load(create_plot, inputs=[dd_dataset, s_alpha, s_h1, s_h2, s_seed], outputs=[plot]) | |
demo.launch() |