caliex commited on
Commit
d92ff52
1 Parent(s): ff66f4a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -83,9 +83,9 @@ iface = gr.Interface(
83
  title="Gaussian Mixture Model Initialization Methods",
84
  description="GMM Initialization Methods is a visualization tool showcasing different initialization methods in Gaussian Mixture Models. The example demonstrates four initialization approaches: kmeans (default), random, random_from_data, and k-means++. The plot displays orange diamonds representing the initialization centers for each method, while crosses represent the data points with color-coded classifications after GMM convergence. The numbers in the subplots indicate the iteration count and relative initialization time. Alternative methods show lower initialization times but may require more iterations to converge. Notably, k-means++ achieves a good balance of fast initialization and convergence. See the original scikit-learn example here: https://scikit-learn.org/stable/auto_examples/mixture/plot_gmm_init.html",
85
  inputs=[
86
- gr.inputs.Dropdown(["kmeans", "random_from_data", "k-means++", "random"], label="Method", default="kmeans"),
87
- gr.inputs.Number(default=4, label="Number of Components"),
88
- gr.inputs.Number(default=2000, label="Max Iterations")
89
  ],
90
  outputs="plot",
91
  examples=[
@@ -94,6 +94,7 @@ iface = gr.Interface(
94
  ["k-means++", 8, 1000],
95
  ["random", 11, 1000],
96
  ],
 
97
  )
98
 
99
  iface.launch()
 
83
  title="Gaussian Mixture Model Initialization Methods",
84
  description="GMM Initialization Methods is a visualization tool showcasing different initialization methods in Gaussian Mixture Models. The example demonstrates four initialization approaches: kmeans (default), random, random_from_data, and k-means++. The plot displays orange diamonds representing the initialization centers for each method, while crosses represent the data points with color-coded classifications after GMM convergence. The numbers in the subplots indicate the iteration count and relative initialization time. Alternative methods show lower initialization times but may require more iterations to converge. Notably, k-means++ achieves a good balance of fast initialization and convergence. See the original scikit-learn example here: https://scikit-learn.org/stable/auto_examples/mixture/plot_gmm_init.html",
85
  inputs=[
86
+ gr.inputs.Radio(["kmeans", "random_from_data", "k-means++", "random"], label="Method", default="kmeans"),
87
+ gr.inputs.Slider(minimum=2, step=1, maximum=10, default=4, label="Number of Components"),
88
+ gr.inputs.Slider(minimum=1, maximum=9000, default=2000, label="Max Iterations")
89
  ],
90
  outputs="plot",
91
  examples=[
 
94
  ["k-means++", 8, 1000],
95
  ["random", 11, 1000],
96
  ],
97
+ live=True
98
  )
99
 
100
  iface.launch()