Datasets:

Modalities:
Text
Formats:
parquet
Size:
< 1K
ArXiv:
Libraries:
Datasets
pandas
repository
stringclasses
11 values
repo_id
stringlengths
1
3
target_module_path
stringlengths
16
72
prompt
stringlengths
407
21.7k
relavent_test_path
stringlengths
51
97
full_function
stringlengths
2.6k
33.8k
function_name
stringlengths
3
49
context-complexity
stringclasses
3 values
seaborn
6
seaborn/_base.py
def iter_data( self, grouping_vars=None, *, reverse=False, from_comp_data=False, by_facet=True, allow_empty=False, dropna=True, ): """Generator for getting subsets of data defined by semantic variables. Also injects "col" and "row" into grouping semantics. Parameters ---------- grouping_vars : string or list of strings Semantic variables that define the subsets of data. reverse : bool If True, reverse the order of iteration. from_comp_data : bool If True, use self.comp_data rather than self.plot_data by_facet : bool If True, add faceting variables to the set of grouping variables. allow_empty : bool If True, yield an empty dataframe when no observations exist for combinations of grouping variables. dropna : bool If True, remove rows with missing data. Yields ------ sub_vars : dict Keys are semantic names, values are the level of that semantic. sub_data : :class:`pandas.DataFrame` Subset of ``plot_data`` for this combination of semantic values. """
/usr/src/app/target_test_cases/failed_tests_VectorPlotter.iter_data.txt
def iter_data( self, grouping_vars=None, *, reverse=False, from_comp_data=False, by_facet=True, allow_empty=False, dropna=True, ): """Generator for getting subsets of data defined by semantic variables. Also injects "col" and "row" into grouping semantics. Parameters ---------- grouping_vars : string or list of strings Semantic variables that define the subsets of data. reverse : bool If True, reverse the order of iteration. from_comp_data : bool If True, use self.comp_data rather than self.plot_data by_facet : bool If True, add faceting variables to the set of grouping variables. allow_empty : bool If True, yield an empty dataframe when no observations exist for combinations of grouping variables. dropna : bool If True, remove rows with missing data. Yields ------ sub_vars : dict Keys are semantic names, values are the level of that semantic. sub_data : :class:`pandas.DataFrame` Subset of ``plot_data`` for this combination of semantic values. """ # TODO should this default to using all (non x/y?) semantics? # or define grouping vars somewhere? if grouping_vars is None: grouping_vars = [] elif isinstance(grouping_vars, str): grouping_vars = [grouping_vars] elif isinstance(grouping_vars, tuple): grouping_vars = list(grouping_vars) # Always insert faceting variables if by_facet: facet_vars = {"col", "row"} grouping_vars.extend( facet_vars & set(self.variables) - set(grouping_vars) ) # Reduce to the semantics used in this plot grouping_vars = [var for var in grouping_vars if var in self.variables] if from_comp_data: data = self.comp_data else: data = self.plot_data if dropna: data = data.dropna() levels = self.var_levels.copy() if from_comp_data: for axis in {"x", "y"} & set(grouping_vars): converter = self.converters[axis].iloc[0] if self.var_types[axis] == "categorical": if self._var_ordered[axis]: # If the axis is ordered, then the axes in a possible # facet grid are by definition "shared", or there is a # single axis with a unique cat -> idx mapping. # So we can just take the first converter object. levels[axis] = converter.convert_units(levels[axis]) else: # Otherwise, the mappings may not be unique, but we can # use the unique set of index values in comp_data. levels[axis] = np.sort(data[axis].unique()) else: transform = converter.get_transform().transform levels[axis] = transform(converter.convert_units(levels[axis])) if grouping_vars: grouped_data = data.groupby( grouping_vars, sort=False, as_index=False, observed=False, ) grouping_keys = [] for var in grouping_vars: key = levels.get(var) grouping_keys.append([] if key is None else key) iter_keys = itertools.product(*grouping_keys) if reverse: iter_keys = reversed(list(iter_keys)) for key in iter_keys: pd_key = ( key[0] if len(key) == 1 and _version_predates(pd, "2.2.0") else key ) try: data_subset = grouped_data.get_group(pd_key) except KeyError: # XXX we are adding this to allow backwards compatibility # with the empty artists that old categorical plots would # add (before 0.12), which we may decide to break, in which # case this option could be removed data_subset = data.loc[[]] if data_subset.empty and not allow_empty: continue sub_vars = dict(zip(grouping_vars, key)) yield sub_vars, data_subset.copy() else: yield {}, data.copy()
VectorPlotter.iter_data
Self-Contained
seaborn
21
seaborn/axisgrid.py
def add_legend(self, legend_data=None, title=None, label_order=None, adjust_subtitles=False, **kwargs): """Draw a legend, maybe placing it outside axes and resizing the figure. Parameters ---------- legend_data : dict Dictionary mapping label names (or two-element tuples where the second element is a label name) to matplotlib artist handles. The default reads from ``self._legend_data``. title : string Title for the legend. The default reads from ``self._hue_var``. label_order : list of labels The order that the legend entries should appear in. The default reads from ``self.hue_names``. adjust_subtitles : bool If True, modify entries with invisible artists to left-align the labels and set the font size to that of a title. kwargs : key, value pairings Other keyword arguments are passed to the underlying legend methods on the Figure or Axes object. Returns ------- self : Grid instance Returns self for easy chaining. """
/usr/src/app/target_test_cases/failed_tests_axisgrid.Grid.add_legend.txt
def add_legend(self, legend_data=None, title=None, label_order=None, adjust_subtitles=False, **kwargs): """Draw a legend, maybe placing it outside axes and resizing the figure. Parameters ---------- legend_data : dict Dictionary mapping label names (or two-element tuples where the second element is a label name) to matplotlib artist handles. The default reads from ``self._legend_data``. title : string Title for the legend. The default reads from ``self._hue_var``. label_order : list of labels The order that the legend entries should appear in. The default reads from ``self.hue_names``. adjust_subtitles : bool If True, modify entries with invisible artists to left-align the labels and set the font size to that of a title. kwargs : key, value pairings Other keyword arguments are passed to the underlying legend methods on the Figure or Axes object. Returns ------- self : Grid instance Returns self for easy chaining. """ # Find the data for the legend if legend_data is None: legend_data = self._legend_data if label_order is None: if self.hue_names is None: label_order = list(legend_data.keys()) else: label_order = list(map(utils.to_utf8, self.hue_names)) blank_handle = mpl.patches.Patch(alpha=0, linewidth=0) handles = [legend_data.get(lab, blank_handle) for lab in label_order] title = self._hue_var if title is None else title title_size = mpl.rcParams["legend.title_fontsize"] # Unpack nested labels from a hierarchical legend labels = [] for entry in label_order: if isinstance(entry, tuple): _, label = entry else: label = entry labels.append(label) # Set default legend kwargs kwargs.setdefault("scatterpoints", 1) if self._legend_out: kwargs.setdefault("frameon", False) kwargs.setdefault("loc", "center right") # Draw a full-figure legend outside the grid figlegend = self._figure.legend(handles, labels, **kwargs) self._legend = figlegend figlegend.set_title(title, prop={"size": title_size}) if adjust_subtitles: adjust_legend_subtitles(figlegend) # Draw the plot to set the bounding boxes correctly _draw_figure(self._figure) # Calculate and set the new width of the figure so the legend fits legend_width = figlegend.get_window_extent().width / self._figure.dpi fig_width, fig_height = self._figure.get_size_inches() self._figure.set_size_inches(fig_width + legend_width, fig_height) # Draw the plot again to get the new transformations _draw_figure(self._figure) # Now calculate how much space we need on the right side legend_width = figlegend.get_window_extent().width / self._figure.dpi space_needed = legend_width / (fig_width + legend_width) margin = .04 if self._margin_titles else .01 self._space_needed = margin + space_needed right = 1 - self._space_needed # Place the subplot axes to give space for the legend self._figure.subplots_adjust(right=right) self._tight_layout_rect[2] = right else: # Draw a legend in the first axis ax = self.axes.flat[0] kwargs.setdefault("loc", "best") leg = ax.legend(handles, labels, **kwargs) leg.set_title(title, prop={"size": title_size}) self._legend = leg if adjust_subtitles: adjust_legend_subtitles(leg) return self
axisgrid.Grid.add_legend
Repo-Level
seaborn
28
seaborn/axisgrid.py
def __init__( self, data, *, hue=None, vars=None, x_vars=None, y_vars=None, hue_order=None, palette=None, hue_kws=None, corner=False, diag_sharey=True, height=2.5, aspect=1, layout_pad=.5, despine=True, dropna=False, ): """Initialize the plot figure and PairGrid object. Parameters ---------- data : DataFrame Tidy (long-form) dataframe where each column is a variable and each row is an observation. hue : string (variable name) Variable in ``data`` to map plot aspects to different colors. This variable will be excluded from the default x and y variables. vars : list of variable names Variables within ``data`` to use, otherwise use every column with a numeric datatype. {x, y}_vars : lists of variable names Variables within ``data`` to use separately for the rows and columns of the figure; i.e. to make a non-square plot. hue_order : list of strings Order for the levels of the hue variable in the palette palette : dict or seaborn color palette Set of colors for mapping the ``hue`` variable. If a dict, keys should be values in the ``hue`` variable. hue_kws : dictionary of param -> list of values mapping Other keyword arguments to insert into the plotting call to let other plot attributes vary across levels of the hue variable (e.g. the markers in a scatterplot). corner : bool If True, don't add axes to the upper (off-diagonal) triangle of the grid, making this a "corner" plot. height : scalar Height (in inches) of each facet. aspect : scalar Aspect * height gives the width (in inches) of each facet. layout_pad : scalar Padding between axes; passed to ``fig.tight_layout``. despine : boolean Remove the top and right spines from the plots. dropna : boolean Drop missing values from the data before plotting. See Also -------- pairplot : Easily drawing common uses of :class:`PairGrid`. FacetGrid : Subplot grid for plotting conditional relationships. Examples -------- .. include:: ../docstrings/PairGrid.rst """
/usr/src/app/target_test_cases/failed_tests_axisgrid.PairGrid.__init__.txt
def __init__( self, data, *, hue=None, vars=None, x_vars=None, y_vars=None, hue_order=None, palette=None, hue_kws=None, corner=False, diag_sharey=True, height=2.5, aspect=1, layout_pad=.5, despine=True, dropna=False, ): """Initialize the plot figure and PairGrid object. Parameters ---------- data : DataFrame Tidy (long-form) dataframe where each column is a variable and each row is an observation. hue : string (variable name) Variable in ``data`` to map plot aspects to different colors. This variable will be excluded from the default x and y variables. vars : list of variable names Variables within ``data`` to use, otherwise use every column with a numeric datatype. {x, y}_vars : lists of variable names Variables within ``data`` to use separately for the rows and columns of the figure; i.e. to make a non-square plot. hue_order : list of strings Order for the levels of the hue variable in the palette palette : dict or seaborn color palette Set of colors for mapping the ``hue`` variable. If a dict, keys should be values in the ``hue`` variable. hue_kws : dictionary of param -> list of values mapping Other keyword arguments to insert into the plotting call to let other plot attributes vary across levels of the hue variable (e.g. the markers in a scatterplot). corner : bool If True, don't add axes to the upper (off-diagonal) triangle of the grid, making this a "corner" plot. height : scalar Height (in inches) of each facet. aspect : scalar Aspect * height gives the width (in inches) of each facet. layout_pad : scalar Padding between axes; passed to ``fig.tight_layout``. despine : boolean Remove the top and right spines from the plots. dropna : boolean Drop missing values from the data before plotting. See Also -------- pairplot : Easily drawing common uses of :class:`PairGrid`. FacetGrid : Subplot grid for plotting conditional relationships. Examples -------- .. include:: ../docstrings/PairGrid.rst """ super().__init__() data = handle_data_source(data) # Sort out the variables that define the grid numeric_cols = self._find_numeric_cols(data) if hue in numeric_cols: numeric_cols.remove(hue) if vars is not None: x_vars = list(vars) y_vars = list(vars) if x_vars is None: x_vars = numeric_cols if y_vars is None: y_vars = numeric_cols if np.isscalar(x_vars): x_vars = [x_vars] if np.isscalar(y_vars): y_vars = [y_vars] self.x_vars = x_vars = list(x_vars) self.y_vars = y_vars = list(y_vars) self.square_grid = self.x_vars == self.y_vars if not x_vars: raise ValueError("No variables found for grid columns.") if not y_vars: raise ValueError("No variables found for grid rows.") # Create the figure and the array of subplots figsize = len(x_vars) * height * aspect, len(y_vars) * height with _disable_autolayout(): fig = plt.figure(figsize=figsize) axes = fig.subplots(len(y_vars), len(x_vars), sharex="col", sharey="row", squeeze=False) # Possibly remove upper axes to make a corner grid # Note: setting up the axes is usually the most time-intensive part # of using the PairGrid. We are foregoing the speed improvement that # we would get by just not setting up the hidden axes so that we can # avoid implementing fig.subplots ourselves. But worth thinking about. self._corner = corner if corner: hide_indices = np.triu_indices_from(axes, 1) for i, j in zip(*hide_indices): axes[i, j].remove() axes[i, j] = None self._figure = fig self.axes = axes self.data = data # Save what we are going to do with the diagonal self.diag_sharey = diag_sharey self.diag_vars = None self.diag_axes = None self._dropna = dropna # Label the axes self._add_axis_labels() # Sort out the hue variable self._hue_var = hue if hue is None: self.hue_names = hue_order = ["_nolegend_"] self.hue_vals = pd.Series(["_nolegend_"] * len(data), index=data.index) else: # We need hue_order and hue_names because the former is used to control # the order of drawing and the latter is used to control the order of # the legend. hue_names can become string-typed while hue_order must # retain the type of the input data. This is messy but results from # the fact that PairGrid can implement the hue-mapping logic itself # (and was originally written exclusively that way) but now can delegate # to the axes-level functions, while always handling legend creation. # See GH2307 hue_names = hue_order = categorical_order(data[hue], hue_order) if dropna: # Filter NA from the list of unique hue names hue_names = list(filter(pd.notnull, hue_names)) self.hue_names = hue_names self.hue_vals = data[hue] # Additional dict of kwarg -> list of values for mapping the hue var self.hue_kws = hue_kws if hue_kws is not None else {} self._orig_palette = palette self._hue_order = hue_order self.palette = self._get_palette(data, hue, hue_order, palette) self._legend_data = {} # Make the plot look nice for ax in axes[:-1, :].flat: if ax is None: continue for label in ax.get_xticklabels(): label.set_visible(False) ax.xaxis.offsetText.set_visible(False) ax.xaxis.label.set_visible(False) for ax in axes[:, 1:].flat: if ax is None: continue for label in ax.get_yticklabels(): label.set_visible(False) ax.yaxis.offsetText.set_visible(False) ax.yaxis.label.set_visible(False) self._tight_layout_rect = [.01, .01, .99, .99] self._tight_layout_pad = layout_pad self._despine = despine if despine: utils.despine(fig=fig) self.tight_layout(pad=layout_pad)
axisgrid.PairGrid.__init__
Repo-Level
seaborn
29
seaborn/axisgrid.py
def pairplot( data, *, hue=None, hue_order=None, palette=None, vars=None, x_vars=None, y_vars=None, kind="scatter", diag_kind="auto", markers=None, height=2.5, aspect=1, corner=False, dropna=False, plot_kws=None, diag_kws=None, grid_kws=None, size=None, ): """Plot pairwise relationships in a dataset. By default, this function will create a grid of Axes such that each numeric variable in ``data`` will by shared across the y-axes across a single row and the x-axes across a single column. The diagonal plots are treated differently: a univariate distribution plot is drawn to show the marginal distribution of the data in each column. It is also possible to show a subset of variables or plot different variables on the rows and columns. This is a high-level interface for :class:`PairGrid` that is intended to make it easy to draw a few common styles. You should use :class:`PairGrid` directly if you need more flexibility. Parameters ---------- data : `pandas.DataFrame` Tidy (long-form) dataframe where each column is a variable and each row is an observation. hue : name of variable in ``data`` Variable in ``data`` to map plot aspects to different colors. hue_order : list of strings Order for the levels of the hue variable in the palette palette : dict or seaborn color palette Set of colors for mapping the ``hue`` variable. If a dict, keys should be values in the ``hue`` variable. vars : list of variable names Variables within ``data`` to use, otherwise use every column with a numeric datatype. {x, y}_vars : lists of variable names Variables within ``data`` to use separately for the rows and columns of the figure; i.e. to make a non-square plot. kind : {'scatter', 'kde', 'hist', 'reg'} Kind of plot to make. diag_kind : {'auto', 'hist', 'kde', None} Kind of plot for the diagonal subplots. If 'auto', choose based on whether or not ``hue`` is used. markers : single matplotlib marker code or list Either the marker to use for all scatterplot points or a list of markers with a length the same as the number of levels in the hue variable so that differently colored points will also have different scatterplot markers. height : scalar Height (in inches) of each facet. aspect : scalar Aspect * height gives the width (in inches) of each facet. corner : bool If True, don't add axes to the upper (off-diagonal) triangle of the grid, making this a "corner" plot. dropna : boolean Drop missing values from the data before plotting. {plot, diag, grid}_kws : dicts Dictionaries of keyword arguments. ``plot_kws`` are passed to the bivariate plotting function, ``diag_kws`` are passed to the univariate plotting function, and ``grid_kws`` are passed to the :class:`PairGrid` constructor. Returns ------- grid : :class:`PairGrid` Returns the underlying :class:`PairGrid` instance for further tweaking. See Also -------- PairGrid : Subplot grid for more flexible plotting of pairwise relationships. JointGrid : Grid for plotting joint and marginal distributions of two variables. Examples -------- .. include:: ../docstrings/pairplot.rst """
/usr/src/app/target_test_cases/failed_tests_axisgrid.pairplot.txt
def pairplot( data, *, hue=None, hue_order=None, palette=None, vars=None, x_vars=None, y_vars=None, kind="scatter", diag_kind="auto", markers=None, height=2.5, aspect=1, corner=False, dropna=False, plot_kws=None, diag_kws=None, grid_kws=None, size=None, ): """Plot pairwise relationships in a dataset. By default, this function will create a grid of Axes such that each numeric variable in ``data`` will by shared across the y-axes across a single row and the x-axes across a single column. The diagonal plots are treated differently: a univariate distribution plot is drawn to show the marginal distribution of the data in each column. It is also possible to show a subset of variables or plot different variables on the rows and columns. This is a high-level interface for :class:`PairGrid` that is intended to make it easy to draw a few common styles. You should use :class:`PairGrid` directly if you need more flexibility. Parameters ---------- data : `pandas.DataFrame` Tidy (long-form) dataframe where each column is a variable and each row is an observation. hue : name of variable in ``data`` Variable in ``data`` to map plot aspects to different colors. hue_order : list of strings Order for the levels of the hue variable in the palette palette : dict or seaborn color palette Set of colors for mapping the ``hue`` variable. If a dict, keys should be values in the ``hue`` variable. vars : list of variable names Variables within ``data`` to use, otherwise use every column with a numeric datatype. {x, y}_vars : lists of variable names Variables within ``data`` to use separately for the rows and columns of the figure; i.e. to make a non-square plot. kind : {'scatter', 'kde', 'hist', 'reg'} Kind of plot to make. diag_kind : {'auto', 'hist', 'kde', None} Kind of plot for the diagonal subplots. If 'auto', choose based on whether or not ``hue`` is used. markers : single matplotlib marker code or list Either the marker to use for all scatterplot points or a list of markers with a length the same as the number of levels in the hue variable so that differently colored points will also have different scatterplot markers. height : scalar Height (in inches) of each facet. aspect : scalar Aspect * height gives the width (in inches) of each facet. corner : bool If True, don't add axes to the upper (off-diagonal) triangle of the grid, making this a "corner" plot. dropna : boolean Drop missing values from the data before plotting. {plot, diag, grid}_kws : dicts Dictionaries of keyword arguments. ``plot_kws`` are passed to the bivariate plotting function, ``diag_kws`` are passed to the univariate plotting function, and ``grid_kws`` are passed to the :class:`PairGrid` constructor. Returns ------- grid : :class:`PairGrid` Returns the underlying :class:`PairGrid` instance for further tweaking. See Also -------- PairGrid : Subplot grid for more flexible plotting of pairwise relationships. JointGrid : Grid for plotting joint and marginal distributions of two variables. Examples -------- .. include:: ../docstrings/pairplot.rst """ # Avoid circular import from .distributions import histplot, kdeplot # Handle deprecations if size is not None: height = size msg = ("The `size` parameter has been renamed to `height`; " "please update your code.") warnings.warn(msg, UserWarning) if not isinstance(data, pd.DataFrame): raise TypeError( f"'data' must be pandas DataFrame object, not: {type(data)}") plot_kws = {} if plot_kws is None else plot_kws.copy() diag_kws = {} if diag_kws is None else diag_kws.copy() grid_kws = {} if grid_kws is None else grid_kws.copy() # Resolve "auto" diag kind if diag_kind == "auto": if hue is None: diag_kind = "kde" if kind == "kde" else "hist" else: diag_kind = "hist" if kind == "hist" else "kde" # Set up the PairGrid grid_kws.setdefault("diag_sharey", diag_kind == "hist") grid = PairGrid(data, vars=vars, x_vars=x_vars, y_vars=y_vars, hue=hue, hue_order=hue_order, palette=palette, corner=corner, height=height, aspect=aspect, dropna=dropna, **grid_kws) # Add the markers here as PairGrid has figured out how many levels of the # hue variable are needed and we don't want to duplicate that process if markers is not None: if kind == "reg": # Needed until regplot supports style if grid.hue_names is None: n_markers = 1 else: n_markers = len(grid.hue_names) if not isinstance(markers, list): markers = [markers] * n_markers if len(markers) != n_markers: raise ValueError("markers must be a singleton or a list of " "markers for each level of the hue variable") grid.hue_kws = {"marker": markers} elif kind == "scatter": if isinstance(markers, str): plot_kws["marker"] = markers elif hue is not None: plot_kws["style"] = data[hue] plot_kws["markers"] = markers # Draw the marginal plots on the diagonal diag_kws = diag_kws.copy() diag_kws.setdefault("legend", False) if diag_kind == "hist": grid.map_diag(histplot, **diag_kws) elif diag_kind == "kde": diag_kws.setdefault("fill", True) diag_kws.setdefault("warn_singular", False) grid.map_diag(kdeplot, **diag_kws) # Maybe plot on the off-diagonals if diag_kind is not None: plotter = grid.map_offdiag else: plotter = grid.map if kind == "scatter": from .relational import scatterplot # Avoid circular import plotter(scatterplot, **plot_kws) elif kind == "reg": from .regression import regplot # Avoid circular import plotter(regplot, **plot_kws) elif kind == "kde": from .distributions import kdeplot # Avoid circular import plot_kws.setdefault("warn_singular", False) plotter(kdeplot, **plot_kws) elif kind == "hist": from .distributions import histplot # Avoid circular import plotter(histplot, **plot_kws) # Add a legend if hue is not None: grid.add_legend() grid.tight_layout() return grid
axisgrid.pairplot
Self-Contained
seaborn
33
seaborn/palettes.py
def color_palette(palette=None, n_colors=None, desat=None, as_cmap=False): """Return a list of colors or continuous colormap defining a palette. Possible ``palette`` values include: - Name of a seaborn palette (deep, muted, bright, pastel, dark, colorblind) - Name of matplotlib colormap - 'husl' or 'hls' - 'ch:<cubehelix arguments>' - 'light:<color>', 'dark:<color>', 'blend:<color>,<color>', - A sequence of colors in any format matplotlib accepts Calling this function with ``palette=None`` will return the current matplotlib color cycle. This function can also be used in a ``with`` statement to temporarily set the color cycle for a plot or set of plots. See the :ref:`tutorial <palette_tutorial>` for more information. Parameters ---------- palette : None, string, or sequence, optional Name of palette or None to return current palette. If a sequence, input colors are used but possibly cycled and desaturated. n_colors : int, optional Number of colors in the palette. If ``None``, the default will depend on how ``palette`` is specified. Named palettes default to 6 colors, but grabbing the current palette or passing in a list of colors will not change the number of colors unless this is specified. Asking for more colors than exist in the palette will cause it to cycle. Ignored when ``as_cmap`` is True. desat : float, optional Proportion to desaturate each color by. as_cmap : bool If True, return a :class:`matplotlib.colors.ListedColormap`. Returns ------- list of RGB tuples or :class:`matplotlib.colors.ListedColormap` See Also -------- set_palette : Set the default color cycle for all plots. set_color_codes : Reassign color codes like ``"b"``, ``"g"``, etc. to colors from one of the seaborn palettes. Examples -------- .. include:: ../docstrings/color_palette.rst """
/usr/src/app/target_test_cases/failed_tests_color_palette.txt
def color_palette(palette=None, n_colors=None, desat=None, as_cmap=False): """Return a list of colors or continuous colormap defining a palette. Possible ``palette`` values include: - Name of a seaborn palette (deep, muted, bright, pastel, dark, colorblind) - Name of matplotlib colormap - 'husl' or 'hls' - 'ch:<cubehelix arguments>' - 'light:<color>', 'dark:<color>', 'blend:<color>,<color>', - A sequence of colors in any format matplotlib accepts Calling this function with ``palette=None`` will return the current matplotlib color cycle. This function can also be used in a ``with`` statement to temporarily set the color cycle for a plot or set of plots. See the :ref:`tutorial <palette_tutorial>` for more information. Parameters ---------- palette : None, string, or sequence, optional Name of palette or None to return current palette. If a sequence, input colors are used but possibly cycled and desaturated. n_colors : int, optional Number of colors in the palette. If ``None``, the default will depend on how ``palette`` is specified. Named palettes default to 6 colors, but grabbing the current palette or passing in a list of colors will not change the number of colors unless this is specified. Asking for more colors than exist in the palette will cause it to cycle. Ignored when ``as_cmap`` is True. desat : float, optional Proportion to desaturate each color by. as_cmap : bool If True, return a :class:`matplotlib.colors.ListedColormap`. Returns ------- list of RGB tuples or :class:`matplotlib.colors.ListedColormap` See Also -------- set_palette : Set the default color cycle for all plots. set_color_codes : Reassign color codes like ``"b"``, ``"g"``, etc. to colors from one of the seaborn palettes. Examples -------- .. include:: ../docstrings/color_palette.rst """ if palette is None: palette = get_color_cycle() if n_colors is None: n_colors = len(palette) elif not isinstance(palette, str): palette = palette if n_colors is None: n_colors = len(palette) else: if n_colors is None: # Use all colors in a qualitative palette or 6 of another kind n_colors = QUAL_PALETTE_SIZES.get(palette, 6) if palette in SEABORN_PALETTES: # Named "seaborn variant" of matplotlib default color cycle palette = SEABORN_PALETTES[palette] elif palette == "hls": # Evenly spaced colors in cylindrical RGB space palette = hls_palette(n_colors, as_cmap=as_cmap) elif palette == "husl": # Evenly spaced colors in cylindrical Lab space palette = husl_palette(n_colors, as_cmap=as_cmap) elif palette.lower() == "jet": # Paternalism raise ValueError("No.") elif palette.startswith("ch:"): # Cubehelix palette with params specified in string args, kwargs = _parse_cubehelix_args(palette) palette = cubehelix_palette(n_colors, *args, **kwargs, as_cmap=as_cmap) elif palette.startswith("light:"): # light palette to color specified in string _, color = palette.split(":") reverse = color.endswith("_r") if reverse: color = color[:-2] palette = light_palette(color, n_colors, reverse=reverse, as_cmap=as_cmap) elif palette.startswith("dark:"): # light palette to color specified in string _, color = palette.split(":") reverse = color.endswith("_r") if reverse: color = color[:-2] palette = dark_palette(color, n_colors, reverse=reverse, as_cmap=as_cmap) elif palette.startswith("blend:"): # blend palette between colors specified in string _, colors = palette.split(":") colors = colors.split(",") palette = blend_palette(colors, n_colors, as_cmap=as_cmap) else: try: # Perhaps a named matplotlib colormap? palette = mpl_palette(palette, n_colors, as_cmap=as_cmap) except (ValueError, KeyError): # Error class changed in mpl36 raise ValueError(f"{palette!r} is not a valid palette name") if desat is not None: palette = [desaturate(c, desat) for c in palette] if not as_cmap: # Always return as many colors as we asked for pal_cycle = cycle(palette) palette = [next(pal_cycle) for _ in range(n_colors)] # Always return in r, g, b tuple format try: palette = map(mpl.colors.colorConverter.to_rgb, palette) palette = _ColorPalette(palette) except ValueError: raise ValueError(f"Could not generate a palette for {palette}") return palette
color_palette
Repo-Level
seaborn
71
seaborn/utils.py
def despine(fig=None, ax=None, top=True, right=True, left=False, bottom=False, offset=None, trim=False): """Remove the top and right spines from plot(s). fig : matplotlib figure, optional Figure to despine all axes of, defaults to the current figure. ax : matplotlib axes, optional Specific axes object to despine. Ignored if fig is provided. top, right, left, bottom : boolean, optional If True, remove that spine. offset : int or dict, optional Absolute distance, in points, spines should be moved away from the axes (negative values move spines inward). A single value applies to all spines; a dict can be used to set offset values per side. trim : bool, optional If True, limit spines to the smallest and largest major tick on each non-despined axis. Returns ------- None """
/usr/src/app/target_test_cases/failed_tests_utils.despine.txt
def despine(fig=None, ax=None, top=True, right=True, left=False, bottom=False, offset=None, trim=False): """Remove the top and right spines from plot(s). fig : matplotlib figure, optional Figure to despine all axes of, defaults to the current figure. ax : matplotlib axes, optional Specific axes object to despine. Ignored if fig is provided. top, right, left, bottom : boolean, optional If True, remove that spine. offset : int or dict, optional Absolute distance, in points, spines should be moved away from the axes (negative values move spines inward). A single value applies to all spines; a dict can be used to set offset values per side. trim : bool, optional If True, limit spines to the smallest and largest major tick on each non-despined axis. Returns ------- None """ # Get references to the axes we want if fig is None and ax is None: axes = plt.gcf().axes elif fig is not None: axes = fig.axes elif ax is not None: axes = [ax] for ax_i in axes: for side in ["top", "right", "left", "bottom"]: # Toggle the spine objects is_visible = not locals()[side] ax_i.spines[side].set_visible(is_visible) if offset is not None and is_visible: try: val = offset.get(side, 0) except AttributeError: val = offset ax_i.spines[side].set_position(('outward', val)) # Potentially move the ticks if left and not right: maj_on = any( t.tick1line.get_visible() for t in ax_i.yaxis.majorTicks ) min_on = any( t.tick1line.get_visible() for t in ax_i.yaxis.minorTicks ) ax_i.yaxis.set_ticks_position("right") for t in ax_i.yaxis.majorTicks: t.tick2line.set_visible(maj_on) for t in ax_i.yaxis.minorTicks: t.tick2line.set_visible(min_on) if bottom and not top: maj_on = any( t.tick1line.get_visible() for t in ax_i.xaxis.majorTicks ) min_on = any( t.tick1line.get_visible() for t in ax_i.xaxis.minorTicks ) ax_i.xaxis.set_ticks_position("top") for t in ax_i.xaxis.majorTicks: t.tick2line.set_visible(maj_on) for t in ax_i.xaxis.minorTicks: t.tick2line.set_visible(min_on) if trim: # clip off the parts of the spines that extend past major ticks xticks = np.asarray(ax_i.get_xticks()) if xticks.size: firsttick = np.compress(xticks >= min(ax_i.get_xlim()), xticks)[0] lasttick = np.compress(xticks <= max(ax_i.get_xlim()), xticks)[-1] ax_i.spines['bottom'].set_bounds(firsttick, lasttick) ax_i.spines['top'].set_bounds(firsttick, lasttick) newticks = xticks.compress(xticks <= lasttick) newticks = newticks.compress(newticks >= firsttick) ax_i.set_xticks(newticks) yticks = np.asarray(ax_i.get_yticks()) if yticks.size: firsttick = np.compress(yticks >= min(ax_i.get_ylim()), yticks)[0] lasttick = np.compress(yticks <= max(ax_i.get_ylim()), yticks)[-1] ax_i.spines['left'].set_bounds(firsttick, lasttick) ax_i.spines['right'].set_bounds(firsttick, lasttick) newticks = yticks.compress(yticks <= lasttick) newticks = newticks.compress(newticks >= firsttick) ax_i.set_yticks(newticks)
utils.despine
Self-Contained
seaborn
73
seaborn/utils.py
def move_legend(obj, loc, **kwargs): """ Recreate a plot's legend at a new location. The name is a slight misnomer. Matplotlib legends do not expose public control over their position parameters. So this function creates a new legend, copying over the data from the original object, which is then removed. Parameters ---------- obj : the object with the plot This argument can be either a seaborn or matplotlib object: - :class:`seaborn.FacetGrid` or :class:`seaborn.PairGrid` - :class:`matplotlib.axes.Axes` or :class:`matplotlib.figure.Figure` loc : str or int Location argument, as in :meth:`matplotlib.axes.Axes.legend`. kwargs Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.legend`. Examples -------- .. include:: ../docstrings/move_legend.rst """
/usr/src/app/target_test_cases/failed_tests_utils.move_legend.txt
def move_legend(obj, loc, **kwargs): """ Recreate a plot's legend at a new location. The name is a slight misnomer. Matplotlib legends do not expose public control over their position parameters. So this function creates a new legend, copying over the data from the original object, which is then removed. Parameters ---------- obj : the object with the plot This argument can be either a seaborn or matplotlib object: - :class:`seaborn.FacetGrid` or :class:`seaborn.PairGrid` - :class:`matplotlib.axes.Axes` or :class:`matplotlib.figure.Figure` loc : str or int Location argument, as in :meth:`matplotlib.axes.Axes.legend`. kwargs Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.legend`. Examples -------- .. include:: ../docstrings/move_legend.rst """ # This is a somewhat hackish solution that will hopefully be obviated by # upstream improvements to matplotlib legends that make them easier to # modify after creation. from seaborn.axisgrid import Grid # Avoid circular import # Locate the legend object and a method to recreate the legend if isinstance(obj, Grid): old_legend = obj.legend legend_func = obj.figure.legend elif isinstance(obj, mpl.axes.Axes): old_legend = obj.legend_ legend_func = obj.legend elif isinstance(obj, mpl.figure.Figure): if obj.legends: old_legend = obj.legends[-1] else: old_legend = None legend_func = obj.legend else: err = "`obj` must be a seaborn Grid or matplotlib Axes or Figure instance." raise TypeError(err) if old_legend is None: err = f"{obj} has no legend attached." raise ValueError(err) # Extract the components of the legend we need to reuse # Import here to avoid a circular import from seaborn._compat import get_legend_handles handles = get_legend_handles(old_legend) labels = [t.get_text() for t in old_legend.get_texts()] # Handle the case where the user is trying to override the labels if (new_labels := kwargs.pop("labels", None)) is not None: if len(new_labels) != len(labels): err = "Length of new labels does not match existing legend." raise ValueError(err) labels = new_labels # Extract legend properties that can be passed to the recreation method # (Vexingly, these don't all round-trip) legend_kws = inspect.signature(mpl.legend.Legend).parameters props = {k: v for k, v in old_legend.properties().items() if k in legend_kws} # Delegate default bbox_to_anchor rules to matplotlib props.pop("bbox_to_anchor") # Try to propagate the existing title and font properties; respect new ones too title = props.pop("title") if "title" in kwargs: title.set_text(kwargs.pop("title")) title_kwargs = {k: v for k, v in kwargs.items() if k.startswith("title_")} for key, val in title_kwargs.items(): title.set(**{key[6:]: val}) kwargs.pop(key) # Try to respect the frame visibility kwargs.setdefault("frameon", old_legend.legendPatch.get_visible()) # Remove the old legend and create the new one props.update(kwargs) old_legend.remove() new_legend = legend_func(handles, labels, loc=loc, **props) new_legend.set_title(title.get_text(), title.get_fontproperties()) # Let the Grid object continue to track the correct legend object if isinstance(obj, Grid): obj._legend = new_legend
utils.move_legend
Self-Contained
scikit-learn
0
sklearn/linear_model/_bayes.py
def fit(self, X, y): """Fit the model according to the given training data and parameters. Iterative procedure to maximize the evidence Parameters ---------- X : array-like of shape (n_samples, n_features) Training vector, where `n_samples` is the number of samples and `n_features` is the number of features. y : array-like of shape (n_samples,) Target values (integers). Will be cast to X's dtype if necessary. Returns ------- self : object Fitted estimator. """
/usr/src/app/target_test_cases/failed_tests_ARDRegression.fit.txt
def fit(self, X, y): """Fit the model according to the given training data and parameters. Iterative procedure to maximize the evidence Parameters ---------- X : array-like of shape (n_samples, n_features) Training vector, where `n_samples` is the number of samples and `n_features` is the number of features. y : array-like of shape (n_samples,) Target values (integers). Will be cast to X's dtype if necessary. Returns ------- self : object Fitted estimator. """ X, y = validate_data( self, X, y, dtype=[np.float64, np.float32], force_writeable=True, y_numeric=True, ensure_min_samples=2, ) dtype = X.dtype n_samples, n_features = X.shape coef_ = np.zeros(n_features, dtype=dtype) X, y, X_offset_, y_offset_, X_scale_ = _preprocess_data( X, y, fit_intercept=self.fit_intercept, copy=self.copy_X ) self.X_offset_ = X_offset_ self.X_scale_ = X_scale_ # Launch the convergence loop keep_lambda = np.ones(n_features, dtype=bool) lambda_1 = self.lambda_1 lambda_2 = self.lambda_2 alpha_1 = self.alpha_1 alpha_2 = self.alpha_2 verbose = self.verbose # Initialization of the values of the parameters eps = np.finfo(np.float64).eps # Add `eps` in the denominator to omit division by zero if `np.var(y)` # is zero. # Explicitly set dtype to avoid unintended type promotion with numpy 2. alpha_ = np.asarray(1.0 / (np.var(y) + eps), dtype=dtype) lambda_ = np.ones(n_features, dtype=dtype) self.scores_ = list() coef_old_ = None def update_coeff(X, y, coef_, alpha_, keep_lambda, sigma_): coef_[keep_lambda] = alpha_ * np.linalg.multi_dot( [sigma_, X[:, keep_lambda].T, y] ) return coef_ update_sigma = ( self._update_sigma if n_samples >= n_features else self._update_sigma_woodbury ) # Iterative procedure of ARDRegression for iter_ in range(self.max_iter): sigma_ = update_sigma(X, alpha_, lambda_, keep_lambda) coef_ = update_coeff(X, y, coef_, alpha_, keep_lambda, sigma_) # Update alpha and lambda rmse_ = np.sum((y - np.dot(X, coef_)) ** 2) gamma_ = 1.0 - lambda_[keep_lambda] * np.diag(sigma_) lambda_[keep_lambda] = (gamma_ + 2.0 * lambda_1) / ( (coef_[keep_lambda]) ** 2 + 2.0 * lambda_2 ) alpha_ = (n_samples - gamma_.sum() + 2.0 * alpha_1) / ( rmse_ + 2.0 * alpha_2 ) # Prune the weights with a precision over a threshold keep_lambda = lambda_ < self.threshold_lambda coef_[~keep_lambda] = 0 # Compute the objective function if self.compute_score: s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum() s += alpha_1 * log(alpha_) - alpha_2 * alpha_ s += 0.5 * ( fast_logdet(sigma_) + n_samples * log(alpha_) + np.sum(np.log(lambda_)) ) s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_**2).sum()) self.scores_.append(s) # Check for convergence if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol: if verbose: print("Converged after %s iterations" % iter_) break coef_old_ = np.copy(coef_) if not keep_lambda.any(): break self.n_iter_ = iter_ + 1 if keep_lambda.any(): # update sigma and mu using updated params from the last iteration sigma_ = update_sigma(X, alpha_, lambda_, keep_lambda) coef_ = update_coeff(X, y, coef_, alpha_, keep_lambda, sigma_) else: sigma_ = np.array([]).reshape(0, 0) self.coef_ = coef_ self.alpha_ = alpha_ self.sigma_ = sigma_ self.lambda_ = lambda_ self._set_intercept(X_offset_, y_offset_, X_scale_) return self
ARDRegression.fit
Repo-Level
scikit-learn
9
sklearn/linear_model/_bayes.py
def fit(self, X, y, sample_weight=None): """Fit the model. Parameters ---------- X : ndarray of shape (n_samples, n_features) Training data. y : ndarray of shape (n_samples,) Target values. Will be cast to X's dtype if necessary. sample_weight : ndarray of shape (n_samples,), default=None Individual weights for each sample. .. versionadded:: 0.20 parameter *sample_weight* support to BayesianRidge. Returns ------- self : object Returns the instance itself. """
/usr/src/app/target_test_cases/failed_tests_BayesianRidge.fit.txt
def fit(self, X, y, sample_weight=None): """Fit the model. Parameters ---------- X : ndarray of shape (n_samples, n_features) Training data. y : ndarray of shape (n_samples,) Target values. Will be cast to X's dtype if necessary. sample_weight : ndarray of shape (n_samples,), default=None Individual weights for each sample. .. versionadded:: 0.20 parameter *sample_weight* support to BayesianRidge. Returns ------- self : object Returns the instance itself. """ X, y = validate_data( self, X, y, dtype=[np.float64, np.float32], force_writeable=True, y_numeric=True, ) dtype = X.dtype if sample_weight is not None: sample_weight = _check_sample_weight(sample_weight, X, dtype=dtype) X, y, X_offset_, y_offset_, X_scale_ = _preprocess_data( X, y, fit_intercept=self.fit_intercept, copy=self.copy_X, sample_weight=sample_weight, ) if sample_weight is not None: # Sample weight can be implemented via a simple rescaling. X, y, _ = _rescale_data(X, y, sample_weight) self.X_offset_ = X_offset_ self.X_scale_ = X_scale_ n_samples, n_features = X.shape # Initialization of the values of the parameters eps = np.finfo(np.float64).eps # Add `eps` in the denominator to omit division by zero if `np.var(y)` # is zero alpha_ = self.alpha_init lambda_ = self.lambda_init if alpha_ is None: alpha_ = 1.0 / (np.var(y) + eps) if lambda_ is None: lambda_ = 1.0 # Avoid unintended type promotion to float64 with numpy 2 alpha_ = np.asarray(alpha_, dtype=dtype) lambda_ = np.asarray(lambda_, dtype=dtype) verbose = self.verbose lambda_1 = self.lambda_1 lambda_2 = self.lambda_2 alpha_1 = self.alpha_1 alpha_2 = self.alpha_2 self.scores_ = list() coef_old_ = None XT_y = np.dot(X.T, y) U, S, Vh = linalg.svd(X, full_matrices=False) eigen_vals_ = S**2 # Convergence loop of the bayesian ridge regression for iter_ in range(self.max_iter): # update posterior mean coef_ based on alpha_ and lambda_ and # compute corresponding rmse coef_, rmse_ = self._update_coef_( X, y, n_samples, n_features, XT_y, U, Vh, eigen_vals_, alpha_, lambda_ ) if self.compute_score: # compute the log marginal likelihood s = self._log_marginal_likelihood( n_samples, n_features, eigen_vals_, alpha_, lambda_, coef_, rmse_ ) self.scores_.append(s) # Update alpha and lambda according to (MacKay, 1992) gamma_ = np.sum((alpha_ * eigen_vals_) / (lambda_ + alpha_ * eigen_vals_)) lambda_ = (gamma_ + 2 * lambda_1) / (np.sum(coef_**2) + 2 * lambda_2) alpha_ = (n_samples - gamma_ + 2 * alpha_1) / (rmse_ + 2 * alpha_2) # Check for convergence if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol: if verbose: print("Convergence after ", str(iter_), " iterations") break coef_old_ = np.copy(coef_) self.n_iter_ = iter_ + 1 # return regularization parameters and corresponding posterior mean, # log marginal likelihood and posterior covariance self.alpha_ = alpha_ self.lambda_ = lambda_ self.coef_, rmse_ = self._update_coef_( X, y, n_samples, n_features, XT_y, U, Vh, eigen_vals_, alpha_, lambda_ ) if self.compute_score: # compute the log marginal likelihood s = self._log_marginal_likelihood( n_samples, n_features, eigen_vals_, alpha_, lambda_, coef_, rmse_ ) self.scores_.append(s) self.scores_ = np.array(self.scores_) # posterior covariance is given by 1/alpha_ * scaled_sigma_ scaled_sigma_ = np.dot( Vh.T, Vh / (eigen_vals_ + lambda_ / alpha_)[:, np.newaxis] ) self.sigma_ = (1.0 / alpha_) * scaled_sigma_ self._set_intercept(X_offset_, y_offset_, X_scale_) return self
BayesianRidge.fit
Repo-Level
scikit-learn
14
sklearn/cluster/_bisect_k_means.py
def fit(self, X, y=None, sample_weight=None): """Compute bisecting k-means clustering. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training instances to cluster. .. note:: The data will be converted to C ordering, which will cause a memory copy if the given data is not C-contiguous. y : Ignored Not used, present here for API consistency by convention. sample_weight : array-like of shape (n_samples,), default=None The weights for each observation in X. If None, all observations are assigned equal weight. `sample_weight` is not used during initialization if `init` is a callable. Returns ------- self Fitted estimator. """
/usr/src/app/target_test_cases/failed_tests_BisectingKMeans.fit.txt
def fit(self, X, y=None, sample_weight=None): """Compute bisecting k-means clustering. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training instances to cluster. .. note:: The data will be converted to C ordering, which will cause a memory copy if the given data is not C-contiguous. y : Ignored Not used, present here for API consistency by convention. sample_weight : array-like of shape (n_samples,), default=None The weights for each observation in X. If None, all observations are assigned equal weight. `sample_weight` is not used during initialization if `init` is a callable. Returns ------- self Fitted estimator. """ X = validate_data( self, X, accept_sparse="csr", dtype=[np.float64, np.float32], order="C", copy=self.copy_x, accept_large_sparse=False, ) self._check_params_vs_input(X) self._random_state = check_random_state(self.random_state) sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) self._n_threads = _openmp_effective_n_threads() if self.algorithm == "lloyd" or self.n_clusters == 1: self._kmeans_single = _kmeans_single_lloyd self._check_mkl_vcomp(X, X.shape[0]) else: self._kmeans_single = _kmeans_single_elkan # Subtract of mean of X for more accurate distance computations if not sp.issparse(X): self._X_mean = X.mean(axis=0) X -= self._X_mean # Initialize the hierarchical clusters tree self._bisecting_tree = _BisectingTree( indices=np.arange(X.shape[0]), center=X.mean(axis=0), score=0, ) x_squared_norms = row_norms(X, squared=True) for _ in range(self.n_clusters - 1): # Chose cluster to bisect cluster_to_bisect = self._bisecting_tree.get_cluster_to_bisect() # Split this cluster into 2 subclusters self._bisect(X, x_squared_norms, sample_weight, cluster_to_bisect) # Aggregate final labels and centers from the bisecting tree self.labels_ = np.full(X.shape[0], -1, dtype=np.int32) self.cluster_centers_ = np.empty((self.n_clusters, X.shape[1]), dtype=X.dtype) for i, cluster_node in enumerate(self._bisecting_tree.iter_leaves()): self.labels_[cluster_node.indices] = i self.cluster_centers_[i] = cluster_node.center cluster_node.label = i # label final clusters for future prediction cluster_node.indices = None # release memory # Restore original data if not sp.issparse(X): X += self._X_mean self.cluster_centers_ += self._X_mean _inertia = _inertia_sparse if sp.issparse(X) else _inertia_dense self.inertia_ = _inertia( X, sample_weight, self.cluster_centers_, self.labels_, self._n_threads ) self._n_features_out = self.cluster_centers_.shape[0] return self
BisectingKMeans.fit
Repo-Level
scikit-learn
15
sklearn/calibration.py
def fit(self, X, y, sample_weight=None, **fit_params): """Fit the calibrated model. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) Target values. sample_weight : array-like of shape (n_samples,), default=None Sample weights. If None, then samples are equally weighted. **fit_params : dict Parameters to pass to the `fit` method of the underlying classifier. Returns ------- self : object Returns an instance of self. """
/usr/src/app/target_test_cases/failed_tests_CalibratedClassifierCV.fit.txt
def fit(self, X, y, sample_weight=None, **fit_params): """Fit the calibrated model. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) Target values. sample_weight : array-like of shape (n_samples,), default=None Sample weights. If None, then samples are equally weighted. **fit_params : dict Parameters to pass to the `fit` method of the underlying classifier. Returns ------- self : object Returns an instance of self. """ check_classification_targets(y) X, y = indexable(X, y) if sample_weight is not None: sample_weight = _check_sample_weight(sample_weight, X) estimator = self._get_estimator() self.calibrated_classifiers_ = [] if self.cv == "prefit": # `classes_` should be consistent with that of estimator check_is_fitted(self.estimator, attributes=["classes_"]) self.classes_ = self.estimator.classes_ predictions, _ = _get_response_values( estimator, X, response_method=["decision_function", "predict_proba"], ) if predictions.ndim == 1: # Reshape binary output from `(n_samples,)` to `(n_samples, 1)` predictions = predictions.reshape(-1, 1) calibrated_classifier = _fit_calibrator( estimator, predictions, y, self.classes_, self.method, sample_weight, ) self.calibrated_classifiers_.append(calibrated_classifier) else: # Set `classes_` using all `y` label_encoder_ = LabelEncoder().fit(y) self.classes_ = label_encoder_.classes_ if _routing_enabled(): routed_params = process_routing( self, "fit", sample_weight=sample_weight, **fit_params, ) else: # sample_weight checks fit_parameters = signature(estimator.fit).parameters supports_sw = "sample_weight" in fit_parameters if sample_weight is not None and not supports_sw: estimator_name = type(estimator).__name__ warnings.warn( f"Since {estimator_name} does not appear to accept" " sample_weight, sample weights will only be used for the" " calibration itself. This can be caused by a limitation of" " the current scikit-learn API. See the following issue for" " more details:" " https://github.com/scikit-learn/scikit-learn/issues/21134." " Be warned that the result of the calibration is likely to be" " incorrect." ) routed_params = Bunch() routed_params.splitter = Bunch(split={}) # no routing for splitter routed_params.estimator = Bunch(fit=fit_params) if sample_weight is not None and supports_sw: routed_params.estimator.fit["sample_weight"] = sample_weight # Check that each cross-validation fold can have at least one # example per class if isinstance(self.cv, int): n_folds = self.cv elif hasattr(self.cv, "n_splits"): n_folds = self.cv.n_splits else: n_folds = None if n_folds and np.any(np.unique(y, return_counts=True)[1] < n_folds): raise ValueError( f"Requesting {n_folds}-fold " "cross-validation but provided less than " f"{n_folds} examples for at least one class." ) if isinstance(self.cv, LeaveOneOut): raise ValueError( "LeaveOneOut cross-validation does not allow" "all classes to be present in test splits. " "Please use a cross-validation generator that allows " "all classes to appear in every test and train split." ) cv = check_cv(self.cv, y, classifier=True) if self.ensemble: parallel = Parallel(n_jobs=self.n_jobs) self.calibrated_classifiers_ = parallel( delayed(_fit_classifier_calibrator_pair)( clone(estimator), X, y, train=train, test=test, method=self.method, classes=self.classes_, sample_weight=sample_weight, fit_params=routed_params.estimator.fit, ) for train, test in cv.split(X, y, **routed_params.splitter.split) ) else: this_estimator = clone(estimator) method_name = _check_response_method( this_estimator, ["decision_function", "predict_proba"], ).__name__ predictions = cross_val_predict( estimator=this_estimator, X=X, y=y, cv=cv, method=method_name, n_jobs=self.n_jobs, params=routed_params.estimator.fit, ) if len(self.classes_) == 2: # Ensure shape (n_samples, 1) in the binary case if method_name == "predict_proba": # Select the probability column of the postive class predictions = _process_predict_proba( y_pred=predictions, target_type="binary", classes=self.classes_, pos_label=self.classes_[1], ) predictions = predictions.reshape(-1, 1) this_estimator.fit(X, y, **routed_params.estimator.fit) # Note: Here we don't pass on fit_params because the supported # calibrators don't support fit_params anyway calibrated_classifier = _fit_calibrator( this_estimator, predictions, y, self.classes_, self.method, sample_weight, ) self.calibrated_classifiers_.append(calibrated_classifier) first_clf = self.calibrated_classifiers_[0].estimator if hasattr(first_clf, "n_features_in_"): self.n_features_in_ = first_clf.n_features_in_ if hasattr(first_clf, "feature_names_in_"): self.feature_names_in_ = first_clf.feature_names_in_ return self
CalibratedClassifierCV.fit
Repo-Level
scikit-learn
38
sklearn/linear_model/_coordinate_descent.py
def fit(self, X, y, sample_weight=None, check_input=True): """Fit model with coordinate descent. Parameters ---------- X : {ndarray, sparse matrix, sparse array} of (n_samples, n_features) Data. Note that large sparse matrices and arrays requiring `int64` indices are not accepted. y : ndarray of shape (n_samples,) or (n_samples, n_targets) Target. Will be cast to X's dtype if necessary. sample_weight : float or array-like of shape (n_samples,), default=None Sample weights. Internally, the `sample_weight` vector will be rescaled to sum to `n_samples`. .. versionadded:: 0.23 check_input : bool, default=True Allow to bypass several input checking. Don't use this parameter unless you know what you do. Returns ------- self : object Fitted estimator. Notes ----- Coordinate descent is an algorithm that considers each column of data at a time hence it will automatically convert the X input as a Fortran-contiguous numpy array if necessary. To avoid memory re-allocation it is advised to allocate the initial data in memory directly using that format. """
/usr/src/app/target_test_cases/failed_tests_ElasticNet.fit.txt
def fit(self, X, y, sample_weight=None, check_input=True): """Fit model with coordinate descent. Parameters ---------- X : {ndarray, sparse matrix, sparse array} of (n_samples, n_features) Data. Note that large sparse matrices and arrays requiring `int64` indices are not accepted. y : ndarray of shape (n_samples,) or (n_samples, n_targets) Target. Will be cast to X's dtype if necessary. sample_weight : float or array-like of shape (n_samples,), default=None Sample weights. Internally, the `sample_weight` vector will be rescaled to sum to `n_samples`. .. versionadded:: 0.23 check_input : bool, default=True Allow to bypass several input checking. Don't use this parameter unless you know what you do. Returns ------- self : object Fitted estimator. Notes ----- Coordinate descent is an algorithm that considers each column of data at a time hence it will automatically convert the X input as a Fortran-contiguous numpy array if necessary. To avoid memory re-allocation it is advised to allocate the initial data in memory directly using that format. """ if self.alpha == 0: warnings.warn( ( "With alpha=0, this algorithm does not converge " "well. You are advised to use the LinearRegression " "estimator" ), stacklevel=2, ) # Remember if X is copied X_copied = False # We expect X and y to be float64 or float32 Fortran ordered arrays # when bypassing checks if check_input: X_copied = self.copy_X and self.fit_intercept X, y = validate_data( self, X, y, accept_sparse="csc", order="F", dtype=[np.float64, np.float32], force_writeable=True, accept_large_sparse=False, copy=X_copied, multi_output=True, y_numeric=True, ) y = check_array( y, order="F", copy=False, dtype=X.dtype.type, ensure_2d=False ) n_samples, n_features = X.shape alpha = self.alpha if isinstance(sample_weight, numbers.Number): sample_weight = None if sample_weight is not None: if check_input: sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) # TLDR: Rescale sw to sum up to n_samples. # Long: The objective function of Enet # # 1/2 * np.average(squared error, weights=sw) # + alpha * penalty (1) # # is invariant under rescaling of sw. # But enet_path coordinate descent minimizes # # 1/2 * sum(squared error) + alpha' * penalty (2) # # and therefore sets # # alpha' = n_samples * alpha (3) # # inside its function body, which results in objective (2) being # equivalent to (1) in case of no sw. # With sw, however, enet_path should set # # alpha' = sum(sw) * alpha (4) # # Therefore, we use the freedom of Eq. (1) to rescale sw before # calling enet_path, i.e. # # sw *= n_samples / sum(sw) # # such that sum(sw) = n_samples. This way, (3) and (4) are the same. sample_weight = sample_weight * (n_samples / np.sum(sample_weight)) # Note: Alternatively, we could also have rescaled alpha instead # of sample_weight: # # alpha *= np.sum(sample_weight) / n_samples # Ensure copying happens only once, don't do it again if done above. # X and y will be rescaled if sample_weight is not None, order='F' # ensures that the returned X and y are still F-contiguous. should_copy = self.copy_X and not X_copied X, y, X_offset, y_offset, X_scale, precompute, Xy = _pre_fit( X, y, None, self.precompute, fit_intercept=self.fit_intercept, copy=should_copy, check_input=check_input, sample_weight=sample_weight, ) # coordinate descent needs F-ordered arrays and _pre_fit might have # called _rescale_data if check_input or sample_weight is not None: X, y = _set_order(X, y, order="F") if y.ndim == 1: y = y[:, np.newaxis] if Xy is not None and Xy.ndim == 1: Xy = Xy[:, np.newaxis] n_targets = y.shape[1] if not self.warm_start or not hasattr(self, "coef_"): coef_ = np.zeros((n_targets, n_features), dtype=X.dtype, order="F") else: coef_ = self.coef_ if coef_.ndim == 1: coef_ = coef_[np.newaxis, :] dual_gaps_ = np.zeros(n_targets, dtype=X.dtype) self.n_iter_ = [] for k in range(n_targets): if Xy is not None: this_Xy = Xy[:, k] else: this_Xy = None _, this_coef, this_dual_gap, this_iter = self.path( X, y[:, k], l1_ratio=self.l1_ratio, eps=None, n_alphas=None, alphas=[alpha], precompute=precompute, Xy=this_Xy, copy_X=True, coef_init=coef_[k], verbose=False, return_n_iter=True, positive=self.positive, check_input=False, # from here on **params tol=self.tol, X_offset=X_offset, X_scale=X_scale, max_iter=self.max_iter, random_state=self.random_state, selection=self.selection, sample_weight=sample_weight, ) coef_[k] = this_coef[:, 0] dual_gaps_[k] = this_dual_gap[0] self.n_iter_.append(this_iter[0]) if n_targets == 1: self.n_iter_ = self.n_iter_[0] self.coef_ = coef_[0] self.dual_gap_ = dual_gaps_[0] else: self.coef_ = coef_ self.dual_gap_ = dual_gaps_ self._set_intercept(X_offset, y_offset, X_scale) # check for finiteness of coefficients if not all(np.isfinite(w).all() for w in [self.coef_, self.intercept_]): raise ValueError( "Coordinate descent iterations resulted in non-finite parameter" " values. The input data may contain large values and need to" " be preprocessed." ) # return self for chaining fit and predict calls return self
ElasticNet.fit
Repo-Level
scikit-learn
55
sklearn/gaussian_process/_gpc.py
def log_marginal_likelihood( self, theta=None, eval_gradient=False, clone_kernel=True ): """Return log-marginal likelihood of theta for training data. In the case of multi-class classification, the mean log-marginal likelihood of the one-versus-rest classifiers are returned. Parameters ---------- theta : array-like of shape (n_kernel_params,), default=None Kernel hyperparameters for which the log-marginal likelihood is evaluated. In the case of multi-class classification, theta may be the hyperparameters of the compound kernel or of an individual kernel. In the latter case, all individual kernel get assigned the same theta values. If None, the precomputed log_marginal_likelihood of ``self.kernel_.theta`` is returned. eval_gradient : bool, default=False If True, the gradient of the log-marginal likelihood with respect to the kernel hyperparameters at position theta is returned additionally. Note that gradient computation is not supported for non-binary classification. If True, theta must not be None. clone_kernel : bool, default=True If True, the kernel attribute is copied. If False, the kernel attribute is modified, but may result in a performance improvement. Returns ------- log_likelihood : float Log-marginal likelihood of theta for training data. log_likelihood_gradient : ndarray of shape (n_kernel_params,), optional Gradient of the log-marginal likelihood with respect to the kernel hyperparameters at position theta. Only returned when `eval_gradient` is True. """
/usr/src/app/target_test_cases/failed_tests_GaussianProcessClassifier.log_marginal_likelihood.txt
def log_marginal_likelihood( self, theta=None, eval_gradient=False, clone_kernel=True ): """Return log-marginal likelihood of theta for training data. In the case of multi-class classification, the mean log-marginal likelihood of the one-versus-rest classifiers are returned. Parameters ---------- theta : array-like of shape (n_kernel_params,), default=None Kernel hyperparameters for which the log-marginal likelihood is evaluated. In the case of multi-class classification, theta may be the hyperparameters of the compound kernel or of an individual kernel. In the latter case, all individual kernel get assigned the same theta values. If None, the precomputed log_marginal_likelihood of ``self.kernel_.theta`` is returned. eval_gradient : bool, default=False If True, the gradient of the log-marginal likelihood with respect to the kernel hyperparameters at position theta is returned additionally. Note that gradient computation is not supported for non-binary classification. If True, theta must not be None. clone_kernel : bool, default=True If True, the kernel attribute is copied. If False, the kernel attribute is modified, but may result in a performance improvement. Returns ------- log_likelihood : float Log-marginal likelihood of theta for training data. log_likelihood_gradient : ndarray of shape (n_kernel_params,), optional Gradient of the log-marginal likelihood with respect to the kernel hyperparameters at position theta. Only returned when `eval_gradient` is True. """ check_is_fitted(self) if theta is None: if eval_gradient: raise ValueError("Gradient can only be evaluated for theta!=None") return self.log_marginal_likelihood_value_ theta = np.asarray(theta) if self.n_classes_ == 2: return self.base_estimator_.log_marginal_likelihood( theta, eval_gradient, clone_kernel=clone_kernel ) else: if eval_gradient: raise NotImplementedError( "Gradient of log-marginal-likelihood not implemented for " "multi-class GPC." ) estimators = self.base_estimator_.estimators_ n_dims = estimators[0].kernel_.n_dims if theta.shape[0] == n_dims: # use same theta for all sub-kernels return np.mean( [ estimator.log_marginal_likelihood( theta, clone_kernel=clone_kernel ) for i, estimator in enumerate(estimators) ] ) elif theta.shape[0] == n_dims * self.classes_.shape[0]: # theta for compound kernel return np.mean( [ estimator.log_marginal_likelihood( theta[n_dims * i : n_dims * (i + 1)], clone_kernel=clone_kernel, ) for i, estimator in enumerate(estimators) ] ) else: raise ValueError( "Shape of theta must be either %d or %d. " "Obtained theta with shape %d." % (n_dims, n_dims * self.classes_.shape[0], theta.shape[0]) )
GaussianProcessClassifier.log_marginal_likelihood
Repo-Level
scikit-learn
57
sklearn/gaussian_process/_gpr.py
def fit(self, X, y): """Fit Gaussian process regression model. Parameters ---------- X : array-like of shape (n_samples, n_features) or list of object Feature vectors or other representations of training data. y : array-like of shape (n_samples,) or (n_samples, n_targets) Target values. Returns ------- self : object GaussianProcessRegressor class instance. """
/usr/src/app/target_test_cases/failed_tests_GaussianProcessRegressor.fit.txt
def fit(self, X, y): """Fit Gaussian process regression model. Parameters ---------- X : array-like of shape (n_samples, n_features) or list of object Feature vectors or other representations of training data. y : array-like of shape (n_samples,) or (n_samples, n_targets) Target values. Returns ------- self : object GaussianProcessRegressor class instance. """ if self.kernel is None: # Use an RBF kernel as default self.kernel_ = C(1.0, constant_value_bounds="fixed") * RBF( 1.0, length_scale_bounds="fixed" ) else: self.kernel_ = clone(self.kernel) self._rng = check_random_state(self.random_state) if self.kernel_.requires_vector_input: dtype, ensure_2d = "numeric", True else: dtype, ensure_2d = None, False X, y = validate_data( self, X, y, multi_output=True, y_numeric=True, ensure_2d=ensure_2d, dtype=dtype, ) n_targets_seen = y.shape[1] if y.ndim > 1 else 1 if self.n_targets is not None and n_targets_seen != self.n_targets: raise ValueError( "The number of targets seen in `y` is different from the parameter " f"`n_targets`. Got {n_targets_seen} != {self.n_targets}." ) # Normalize target value if self.normalize_y: self._y_train_mean = np.mean(y, axis=0) self._y_train_std = _handle_zeros_in_scale(np.std(y, axis=0), copy=False) # Remove mean and make unit variance y = (y - self._y_train_mean) / self._y_train_std else: shape_y_stats = (y.shape[1],) if y.ndim == 2 else 1 self._y_train_mean = np.zeros(shape=shape_y_stats) self._y_train_std = np.ones(shape=shape_y_stats) if np.iterable(self.alpha) and self.alpha.shape[0] != y.shape[0]: if self.alpha.shape[0] == 1: self.alpha = self.alpha[0] else: raise ValueError( "alpha must be a scalar or an array with same number of " f"entries as y. ({self.alpha.shape[0]} != {y.shape[0]})" ) self.X_train_ = np.copy(X) if self.copy_X_train else X self.y_train_ = np.copy(y) if self.copy_X_train else y if self.optimizer is not None and self.kernel_.n_dims > 0: # Choose hyperparameters based on maximizing the log-marginal # likelihood (potentially starting from several initial values) def obj_func(theta, eval_gradient=True): if eval_gradient: lml, grad = self.log_marginal_likelihood( theta, eval_gradient=True, clone_kernel=False ) return -lml, -grad else: return -self.log_marginal_likelihood(theta, clone_kernel=False) # First optimize starting from theta specified in kernel optima = [ ( self._constrained_optimization( obj_func, self.kernel_.theta, self.kernel_.bounds ) ) ] # Additional runs are performed from log-uniform chosen initial # theta if self.n_restarts_optimizer > 0: if not np.isfinite(self.kernel_.bounds).all(): raise ValueError( "Multiple optimizer restarts (n_restarts_optimizer>0) " "requires that all bounds are finite." ) bounds = self.kernel_.bounds for iteration in range(self.n_restarts_optimizer): theta_initial = self._rng.uniform(bounds[:, 0], bounds[:, 1]) optima.append( self._constrained_optimization(obj_func, theta_initial, bounds) ) # Select result from run with minimal (negative) log-marginal # likelihood lml_values = list(map(itemgetter(1), optima)) self.kernel_.theta = optima[np.argmin(lml_values)][0] self.kernel_._check_bounds_params() self.log_marginal_likelihood_value_ = -np.min(lml_values) else: self.log_marginal_likelihood_value_ = self.log_marginal_likelihood( self.kernel_.theta, clone_kernel=False ) # Precompute quantities required for predictions which are independent # of actual query points # Alg. 2.1, page 19, line 2 -> L = cholesky(K + sigma^2 I) K = self.kernel_(self.X_train_) K[np.diag_indices_from(K)] += self.alpha try: self.L_ = cholesky(K, lower=GPR_CHOLESKY_LOWER, check_finite=False) except np.linalg.LinAlgError as exc: exc.args = ( ( f"The kernel, {self.kernel_}, is not returning a positive " "definite matrix. Try gradually increasing the 'alpha' " "parameter of your GaussianProcessRegressor estimator." ), ) + exc.args raise # Alg 2.1, page 19, line 3 -> alpha = L^T \ (L \ y) self.alpha_ = cho_solve( (self.L_, GPR_CHOLESKY_LOWER), self.y_train_, check_finite=False, ) return self
GaussianProcessRegressor.fit
Repo-Level
scikit-learn
58
sklearn/gaussian_process/_gpr.py
def log_marginal_likelihood( self, theta=None, eval_gradient=False, clone_kernel=True ): """Return log-marginal likelihood of theta for training data. Parameters ---------- theta : array-like of shape (n_kernel_params,) default=None Kernel hyperparameters for which the log-marginal likelihood is evaluated. If None, the precomputed log_marginal_likelihood of ``self.kernel_.theta`` is returned. eval_gradient : bool, default=False If True, the gradient of the log-marginal likelihood with respect to the kernel hyperparameters at position theta is returned additionally. If True, theta must not be None. clone_kernel : bool, default=True If True, the kernel attribute is copied. If False, the kernel attribute is modified, but may result in a performance improvement. Returns ------- log_likelihood : float Log-marginal likelihood of theta for training data. log_likelihood_gradient : ndarray of shape (n_kernel_params,), optional Gradient of the log-marginal likelihood with respect to the kernel hyperparameters at position theta. Only returned when eval_gradient is True. """
/usr/src/app/target_test_cases/failed_tests_GaussianProcessRegressor.log_marginal_likelihood.txt
def log_marginal_likelihood( self, theta=None, eval_gradient=False, clone_kernel=True ): """Return log-marginal likelihood of theta for training data. Parameters ---------- theta : array-like of shape (n_kernel_params,) default=None Kernel hyperparameters for which the log-marginal likelihood is evaluated. If None, the precomputed log_marginal_likelihood of ``self.kernel_.theta`` is returned. eval_gradient : bool, default=False If True, the gradient of the log-marginal likelihood with respect to the kernel hyperparameters at position theta is returned additionally. If True, theta must not be None. clone_kernel : bool, default=True If True, the kernel attribute is copied. If False, the kernel attribute is modified, but may result in a performance improvement. Returns ------- log_likelihood : float Log-marginal likelihood of theta for training data. log_likelihood_gradient : ndarray of shape (n_kernel_params,), optional Gradient of the log-marginal likelihood with respect to the kernel hyperparameters at position theta. Only returned when eval_gradient is True. """ if theta is None: if eval_gradient: raise ValueError("Gradient can only be evaluated for theta!=None") return self.log_marginal_likelihood_value_ if clone_kernel: kernel = self.kernel_.clone_with_theta(theta) else: kernel = self.kernel_ kernel.theta = theta if eval_gradient: K, K_gradient = kernel(self.X_train_, eval_gradient=True) else: K = kernel(self.X_train_) # Alg. 2.1, page 19, line 2 -> L = cholesky(K + sigma^2 I) K[np.diag_indices_from(K)] += self.alpha try: L = cholesky(K, lower=GPR_CHOLESKY_LOWER, check_finite=False) except np.linalg.LinAlgError: return (-np.inf, np.zeros_like(theta)) if eval_gradient else -np.inf # Support multi-dimensional output of self.y_train_ y_train = self.y_train_ if y_train.ndim == 1: y_train = y_train[:, np.newaxis] # Alg 2.1, page 19, line 3 -> alpha = L^T \ (L \ y) alpha = cho_solve((L, GPR_CHOLESKY_LOWER), y_train, check_finite=False) # Alg 2.1, page 19, line 7 # -0.5 . y^T . alpha - sum(log(diag(L))) - n_samples / 2 log(2*pi) # y is originally thought to be a (1, n_samples) row vector. However, # in multioutputs, y is of shape (n_samples, 2) and we need to compute # y^T . alpha for each output, independently using einsum. Thus, it # is equivalent to: # for output_idx in range(n_outputs): # log_likelihood_dims[output_idx] = ( # y_train[:, [output_idx]] @ alpha[:, [output_idx]] # ) log_likelihood_dims = -0.5 * np.einsum("ik,ik->k", y_train, alpha) log_likelihood_dims -= np.log(np.diag(L)).sum() log_likelihood_dims -= K.shape[0] / 2 * np.log(2 * np.pi) # the log likehood is sum-up across the outputs log_likelihood = log_likelihood_dims.sum(axis=-1) if eval_gradient: # Eq. 5.9, p. 114, and footnote 5 in p. 114 # 0.5 * trace((alpha . alpha^T - K^-1) . K_gradient) # alpha is supposed to be a vector of (n_samples,) elements. With # multioutputs, alpha is a matrix of size (n_samples, n_outputs). # Therefore, we want to construct a matrix of # (n_samples, n_samples, n_outputs) equivalent to # for output_idx in range(n_outputs): # output_alpha = alpha[:, [output_idx]] # inner_term[..., output_idx] = output_alpha @ output_alpha.T inner_term = np.einsum("ik,jk->ijk", alpha, alpha) # compute K^-1 of shape (n_samples, n_samples) K_inv = cho_solve( (L, GPR_CHOLESKY_LOWER), np.eye(K.shape[0]), check_finite=False ) # create a new axis to use broadcasting between inner_term and # K_inv inner_term -= K_inv[..., np.newaxis] # Since we are interested about the trace of # inner_term @ K_gradient, we don't explicitly compute the # matrix-by-matrix operation and instead use an einsum. Therefore # it is equivalent to: # for param_idx in range(n_kernel_params): # for output_idx in range(n_output): # log_likehood_gradient_dims[param_idx, output_idx] = ( # inner_term[..., output_idx] @ # K_gradient[..., param_idx] # ) log_likelihood_gradient_dims = 0.5 * np.einsum( "ijl,jik->kl", inner_term, K_gradient ) # the log likehood gradient is the sum-up across the outputs log_likelihood_gradient = log_likelihood_gradient_dims.sum(axis=-1) if eval_gradient: return log_likelihood, log_likelihood_gradient else: return log_likelihood
GaussianProcessRegressor.log_marginal_likelihood
Self-Contained
scikit-learn
59
sklearn/gaussian_process/_gpr.py
def predict(self, X, return_std=False, return_cov=False): """Predict using the Gaussian process regression model. We can also predict based on an unfitted model by using the GP prior. In addition to the mean of the predictive distribution, optionally also returns its standard deviation (`return_std=True`) or covariance (`return_cov=True`). Note that at most one of the two can be requested. Parameters ---------- X : array-like of shape (n_samples, n_features) or list of object Query points where the GP is evaluated. return_std : bool, default=False If True, the standard-deviation of the predictive distribution at the query points is returned along with the mean. return_cov : bool, default=False If True, the covariance of the joint predictive distribution at the query points is returned along with the mean. Returns ------- y_mean : ndarray of shape (n_samples,) or (n_samples, n_targets) Mean of predictive distribution at query points. y_std : ndarray of shape (n_samples,) or (n_samples, n_targets), optional Standard deviation of predictive distribution at query points. Only returned when `return_std` is True. y_cov : ndarray of shape (n_samples, n_samples) or \ (n_samples, n_samples, n_targets), optional Covariance of joint predictive distribution at query points. Only returned when `return_cov` is True. """
/usr/src/app/target_test_cases/failed_tests_GaussianProcessRegressor.predict.txt
def predict(self, X, return_std=False, return_cov=False): """Predict using the Gaussian process regression model. We can also predict based on an unfitted model by using the GP prior. In addition to the mean of the predictive distribution, optionally also returns its standard deviation (`return_std=True`) or covariance (`return_cov=True`). Note that at most one of the two can be requested. Parameters ---------- X : array-like of shape (n_samples, n_features) or list of object Query points where the GP is evaluated. return_std : bool, default=False If True, the standard-deviation of the predictive distribution at the query points is returned along with the mean. return_cov : bool, default=False If True, the covariance of the joint predictive distribution at the query points is returned along with the mean. Returns ------- y_mean : ndarray of shape (n_samples,) or (n_samples, n_targets) Mean of predictive distribution at query points. y_std : ndarray of shape (n_samples,) or (n_samples, n_targets), optional Standard deviation of predictive distribution at query points. Only returned when `return_std` is True. y_cov : ndarray of shape (n_samples, n_samples) or \ (n_samples, n_samples, n_targets), optional Covariance of joint predictive distribution at query points. Only returned when `return_cov` is True. """ if return_std and return_cov: raise RuntimeError( "At most one of return_std or return_cov can be requested." ) if self.kernel is None or self.kernel.requires_vector_input: dtype, ensure_2d = "numeric", True else: dtype, ensure_2d = None, False X = validate_data(self, X, ensure_2d=ensure_2d, dtype=dtype, reset=False) if not hasattr(self, "X_train_"): # Unfitted;predict based on GP prior if self.kernel is None: kernel = C(1.0, constant_value_bounds="fixed") * RBF( 1.0, length_scale_bounds="fixed" ) else: kernel = self.kernel n_targets = self.n_targets if self.n_targets is not None else 1 y_mean = np.zeros(shape=(X.shape[0], n_targets)).squeeze() if return_cov: y_cov = kernel(X) if n_targets > 1: y_cov = np.repeat( np.expand_dims(y_cov, -1), repeats=n_targets, axis=-1 ) return y_mean, y_cov elif return_std: y_var = kernel.diag(X) if n_targets > 1: y_var = np.repeat( np.expand_dims(y_var, -1), repeats=n_targets, axis=-1 ) return y_mean, np.sqrt(y_var) else: return y_mean else: # Predict based on GP posterior # Alg 2.1, page 19, line 4 -> f*_bar = K(X_test, X_train) . alpha K_trans = self.kernel_(X, self.X_train_) y_mean = K_trans @ self.alpha_ # undo normalisation y_mean = self._y_train_std * y_mean + self._y_train_mean # if y_mean has shape (n_samples, 1), reshape to (n_samples,) if y_mean.ndim > 1 and y_mean.shape[1] == 1: y_mean = np.squeeze(y_mean, axis=1) # Alg 2.1, page 19, line 5 -> v = L \ K(X_test, X_train)^T V = solve_triangular( self.L_, K_trans.T, lower=GPR_CHOLESKY_LOWER, check_finite=False ) if return_cov: # Alg 2.1, page 19, line 6 -> K(X_test, X_test) - v^T. v y_cov = self.kernel_(X) - V.T @ V # undo normalisation y_cov = np.outer(y_cov, self._y_train_std**2).reshape(*y_cov.shape, -1) # if y_cov has shape (n_samples, n_samples, 1), reshape to # (n_samples, n_samples) if y_cov.shape[2] == 1: y_cov = np.squeeze(y_cov, axis=2) return y_mean, y_cov elif return_std: # Compute variance of predictive distribution # Use einsum to avoid explicitly forming the large matrix # V^T @ V just to extract its diagonal afterward. y_var = self.kernel_.diag(X).copy() y_var -= np.einsum("ij,ji->i", V.T, V) # Check if any of the variances is negative because of # numerical issues. If yes: set the variance to 0. y_var_negative = y_var < 0 if np.any(y_var_negative): warnings.warn( "Predicted variances smaller than 0. " "Setting those variances to 0." ) y_var[y_var_negative] = 0.0 # undo normalisation y_var = np.outer(y_var, self._y_train_std**2).reshape(*y_var.shape, -1) # if y_var has shape (n_samples, 1), reshape to (n_samples,) if y_var.shape[1] == 1: y_var = np.squeeze(y_var, axis=1) return y_mean, np.sqrt(y_var) else: return y_mean
GaussianProcessRegressor.predict
Repo-Level
scikit-learn
66
sklearn/decomposition/_incremental_pca.py
def partial_fit(self, X, y=None, check_input=True): """Incremental fit with X. All of X is processed as a single batch. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. y : Ignored Not used, present for API consistency by convention. check_input : bool, default=True Run check_array on X. Returns ------- self : object Returns the instance itself. """
/usr/src/app/target_test_cases/failed_tests_IncrementalPCA.partial_fit.txt
def partial_fit(self, X, y=None, check_input=True): """Incremental fit with X. All of X is processed as a single batch. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. y : Ignored Not used, present for API consistency by convention. check_input : bool, default=True Run check_array on X. Returns ------- self : object Returns the instance itself. """ first_pass = not hasattr(self, "components_") if check_input: if sparse.issparse(X): raise TypeError( "IncrementalPCA.partial_fit does not support " "sparse input. Either convert data to dense " "or use IncrementalPCA.fit to do so in batches." ) X = validate_data( self, X, copy=self.copy, dtype=[np.float64, np.float32], force_writeable=True, reset=first_pass, ) n_samples, n_features = X.shape if first_pass: self.components_ = None if self.n_components is None: if self.components_ is None: self.n_components_ = min(n_samples, n_features) else: self.n_components_ = self.components_.shape[0] elif not self.n_components <= n_features: raise ValueError( "n_components=%r invalid for n_features=%d, need " "more rows than columns for IncrementalPCA " "processing" % (self.n_components, n_features) ) elif not self.n_components <= n_samples: raise ValueError( "n_components=%r must be less or equal to " "the batch number of samples " "%d." % (self.n_components, n_samples) ) else: self.n_components_ = self.n_components if (self.components_ is not None) and ( self.components_.shape[0] != self.n_components_ ): raise ValueError( "Number of input features has changed from %i " "to %i between calls to partial_fit! Try " "setting n_components to a fixed value." % (self.components_.shape[0], self.n_components_) ) # This is the first partial_fit if not hasattr(self, "n_samples_seen_"): self.n_samples_seen_ = 0 self.mean_ = 0.0 self.var_ = 0.0 # Update stats - they are 0 if this is the first step col_mean, col_var, n_total_samples = _incremental_mean_and_var( X, last_mean=self.mean_, last_variance=self.var_, last_sample_count=np.repeat(self.n_samples_seen_, X.shape[1]), ) n_total_samples = n_total_samples[0] # Whitening if self.n_samples_seen_ == 0: # If it is the first step, simply whiten X X -= col_mean else: col_batch_mean = np.mean(X, axis=0) X -= col_batch_mean # Build matrix of combined previous basis and new data mean_correction = np.sqrt( (self.n_samples_seen_ / n_total_samples) * n_samples ) * (self.mean_ - col_batch_mean) X = np.vstack( ( self.singular_values_.reshape((-1, 1)) * self.components_, X, mean_correction, ) ) U, S, Vt = linalg.svd(X, full_matrices=False, check_finite=False) U, Vt = svd_flip(U, Vt, u_based_decision=False) explained_variance = S**2 / (n_total_samples - 1) explained_variance_ratio = S**2 / np.sum(col_var * n_total_samples) self.n_samples_seen_ = n_total_samples self.components_ = Vt[: self.n_components_] self.singular_values_ = S[: self.n_components_] self.mean_ = col_mean self.var_ = col_var self.explained_variance_ = explained_variance[: self.n_components_] self.explained_variance_ratio_ = explained_variance_ratio[: self.n_components_] # we already checked `self.n_components <= n_samples` above if self.n_components_ not in (n_samples, n_features): self.noise_variance_ = explained_variance[self.n_components_ :].mean() else: self.noise_variance_ = 0.0 return self
IncrementalPCA.partial_fit
Repo-Level
scikit-learn
70
sklearn/impute/_iterative.py
def fit_transform(self, X, y=None, **params): """Fit the imputer on `X` and return the transformed `X`. Parameters ---------- X : array-like, shape (n_samples, n_features) Input data, where `n_samples` is the number of samples and `n_features` is the number of features. y : Ignored Not used, present for API consistency by convention. **params : dict Parameters routed to the `fit` method of the sub-estimator via the metadata routing API. .. versionadded:: 1.5 Only available if `sklearn.set_config(enable_metadata_routing=True)` is set. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- Xt : array-like, shape (n_samples, n_features) The imputed input data. """
/usr/src/app/target_test_cases/failed_tests_IterativeImputer.fit_transform.txt
def fit_transform(self, X, y=None, **params): """Fit the imputer on `X` and return the transformed `X`. Parameters ---------- X : array-like, shape (n_samples, n_features) Input data, where `n_samples` is the number of samples and `n_features` is the number of features. y : Ignored Not used, present for API consistency by convention. **params : dict Parameters routed to the `fit` method of the sub-estimator via the metadata routing API. .. versionadded:: 1.5 Only available if `sklearn.set_config(enable_metadata_routing=True)` is set. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- Xt : array-like, shape (n_samples, n_features) The imputed input data. """ _raise_for_params(params, self, "fit") routed_params = process_routing( self, "fit", **params, ) self.random_state_ = getattr( self, "random_state_", check_random_state(self.random_state) ) if self.estimator is None: from ..linear_model import BayesianRidge self._estimator = BayesianRidge() else: self._estimator = clone(self.estimator) self.imputation_sequence_ = [] self.initial_imputer_ = None X, Xt, mask_missing_values, complete_mask = self._initial_imputation( X, in_fit=True ) super()._fit_indicator(complete_mask) X_indicator = super()._transform_indicator(complete_mask) if self.max_iter == 0 or np.all(mask_missing_values): self.n_iter_ = 0 return super()._concatenate_indicator(Xt, X_indicator) # Edge case: a single feature, we return the initial imputation. if Xt.shape[1] == 1: self.n_iter_ = 0 return super()._concatenate_indicator(Xt, X_indicator) self._min_value = self._validate_limit(self.min_value, "min", X.shape[1]) self._max_value = self._validate_limit(self.max_value, "max", X.shape[1]) if not np.all(np.greater(self._max_value, self._min_value)): raise ValueError("One (or more) features have min_value >= max_value.") # order in which to impute # note this is probably too slow for large feature data (d > 100000) # and a better way would be good. # see: https://goo.gl/KyCNwj and subsequent comments ordered_idx = self._get_ordered_idx(mask_missing_values) self.n_features_with_missing_ = len(ordered_idx) abs_corr_mat = self._get_abs_corr_mat(Xt) n_samples, n_features = Xt.shape if self.verbose > 0: print("[IterativeImputer] Completing matrix with shape %s" % (X.shape,)) start_t = time() if not self.sample_posterior: Xt_previous = Xt.copy() normalized_tol = self.tol * np.max(np.abs(X[~mask_missing_values])) for self.n_iter_ in range(1, self.max_iter + 1): if self.imputation_order == "random": ordered_idx = self._get_ordered_idx(mask_missing_values) for feat_idx in ordered_idx: neighbor_feat_idx = self._get_neighbor_feat_idx( n_features, feat_idx, abs_corr_mat ) Xt, estimator = self._impute_one_feature( Xt, mask_missing_values, feat_idx, neighbor_feat_idx, estimator=None, fit_mode=True, params=routed_params.estimator.fit, ) estimator_triplet = _ImputerTriplet( feat_idx, neighbor_feat_idx, estimator ) self.imputation_sequence_.append(estimator_triplet) if self.verbose > 1: print( "[IterativeImputer] Ending imputation round " "%d/%d, elapsed time %0.2f" % (self.n_iter_, self.max_iter, time() - start_t) ) if not self.sample_posterior: inf_norm = np.linalg.norm(Xt - Xt_previous, ord=np.inf, axis=None) if self.verbose > 0: print( "[IterativeImputer] Change: {}, scaled tolerance: {} ".format( inf_norm, normalized_tol ) ) if inf_norm < normalized_tol: if self.verbose > 0: print("[IterativeImputer] Early stopping criterion reached.") break Xt_previous = Xt.copy() else: if not self.sample_posterior: warnings.warn( "[IterativeImputer] Early stopping criterion not reached.", ConvergenceWarning, ) _assign_where(Xt, X, cond=~mask_missing_values) return super()._concatenate_indicator(Xt, X_indicator)
IterativeImputer.fit_transform
Repo-Level
scikit-learn
72
sklearn/preprocessing/_discretization.py
def fit(self, X, y=None, sample_weight=None): """ Fit the estimator. Parameters ---------- X : array-like of shape (n_samples, n_features) Data to be discretized. y : None Ignored. This parameter exists only for compatibility with :class:`~sklearn.pipeline.Pipeline`. sample_weight : ndarray of shape (n_samples,) Contains weight values to be associated with each sample. Cannot be used when `strategy` is set to `"uniform"`. .. versionadded:: 1.3 Returns ------- self : object Returns the instance itself. """
/usr/src/app/target_test_cases/failed_tests_KBinsDiscretizer.fit.txt
def fit(self, X, y=None, sample_weight=None): """ Fit the estimator. Parameters ---------- X : array-like of shape (n_samples, n_features) Data to be discretized. y : None Ignored. This parameter exists only for compatibility with :class:`~sklearn.pipeline.Pipeline`. sample_weight : ndarray of shape (n_samples,) Contains weight values to be associated with each sample. Cannot be used when `strategy` is set to `"uniform"`. .. versionadded:: 1.3 Returns ------- self : object Returns the instance itself. """ X = validate_data(self, X, dtype="numeric") if self.dtype in (np.float64, np.float32): output_dtype = self.dtype else: # self.dtype is None output_dtype = X.dtype n_samples, n_features = X.shape if sample_weight is not None and self.strategy == "uniform": raise ValueError( "`sample_weight` was provided but it cannot be " "used with strategy='uniform'. Got strategy=" f"{self.strategy!r} instead." ) if self.subsample is not None and n_samples > self.subsample: # Take a subsample of `X` X = resample( X, replace=False, n_samples=self.subsample, random_state=self.random_state, ) n_features = X.shape[1] n_bins = self._validate_n_bins(n_features) if sample_weight is not None: sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) bin_edges = np.zeros(n_features, dtype=object) for jj in range(n_features): column = X[:, jj] col_min, col_max = column.min(), column.max() if col_min == col_max: warnings.warn( "Feature %d is constant and will be replaced with 0." % jj ) n_bins[jj] = 1 bin_edges[jj] = np.array([-np.inf, np.inf]) continue if self.strategy == "uniform": bin_edges[jj] = np.linspace(col_min, col_max, n_bins[jj] + 1) elif self.strategy == "quantile": quantiles = np.linspace(0, 100, n_bins[jj] + 1) if sample_weight is None: bin_edges[jj] = np.asarray(np.percentile(column, quantiles)) else: bin_edges[jj] = np.asarray( [ _weighted_percentile(column, sample_weight, q) for q in quantiles ], dtype=np.float64, ) elif self.strategy == "kmeans": from ..cluster import KMeans # fixes import loops # Deterministic initialization with uniform spacing uniform_edges = np.linspace(col_min, col_max, n_bins[jj] + 1) init = (uniform_edges[1:] + uniform_edges[:-1])[:, None] * 0.5 # 1D k-means procedure km = KMeans(n_clusters=n_bins[jj], init=init, n_init=1) centers = km.fit( column[:, None], sample_weight=sample_weight ).cluster_centers_[:, 0] # Must sort, centers may be unsorted even with sorted init centers.sort() bin_edges[jj] = (centers[1:] + centers[:-1]) * 0.5 bin_edges[jj] = np.r_[col_min, bin_edges[jj], col_max] # Remove bins whose width are too small (i.e., <= 1e-8) if self.strategy in ("quantile", "kmeans"): mask = np.ediff1d(bin_edges[jj], to_begin=np.inf) > 1e-8 bin_edges[jj] = bin_edges[jj][mask] if len(bin_edges[jj]) - 1 != n_bins[jj]: warnings.warn( "Bins whose width are too small (i.e., <= " "1e-8) in feature %d are removed. Consider " "decreasing the number of bins." % jj ) n_bins[jj] = len(bin_edges[jj]) - 1 self.bin_edges_ = bin_edges self.n_bins_ = n_bins if "onehot" in self.encode: self._encoder = OneHotEncoder( categories=[np.arange(i) for i in self.n_bins_], sparse_output=self.encode == "onehot", dtype=output_dtype, ) # Fit the OneHotEncoder with toy datasets # so that it's ready for use after the KBinsDiscretizer is fitted self._encoder.fit(np.zeros((1, len(self.n_bins_)))) return self
KBinsDiscretizer.fit
Repo-Level
scikit-learn
75
sklearn/cluster/_kmeans.py
def fit(self, X, y=None, sample_weight=None): """Compute k-means clustering. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training instances to cluster. It must be noted that the data will be converted to C ordering, which will cause a memory copy if the given data is not C-contiguous. If a sparse matrix is passed, a copy will be made if it's not in CSR format. y : Ignored Not used, present here for API consistency by convention. sample_weight : array-like of shape (n_samples,), default=None The weights for each observation in X. If None, all observations are assigned equal weight. `sample_weight` is not used during initialization if `init` is a callable or a user provided array. .. versionadded:: 0.20 Returns ------- self : object Fitted estimator. """
/usr/src/app/target_test_cases/failed_tests_KMeans.fit.txt
def fit(self, X, y=None, sample_weight=None): """Compute k-means clustering. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training instances to cluster. It must be noted that the data will be converted to C ordering, which will cause a memory copy if the given data is not C-contiguous. If a sparse matrix is passed, a copy will be made if it's not in CSR format. y : Ignored Not used, present here for API consistency by convention. sample_weight : array-like of shape (n_samples,), default=None The weights for each observation in X. If None, all observations are assigned equal weight. `sample_weight` is not used during initialization if `init` is a callable or a user provided array. .. versionadded:: 0.20 Returns ------- self : object Fitted estimator. """ X = validate_data( self, X, accept_sparse="csr", dtype=[np.float64, np.float32], order="C", copy=self.copy_x, accept_large_sparse=False, ) self._check_params_vs_input(X) random_state = check_random_state(self.random_state) sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) self._n_threads = _openmp_effective_n_threads() # Validate init array init = self.init init_is_array_like = _is_arraylike_not_scalar(init) if init_is_array_like: init = check_array(init, dtype=X.dtype, copy=True, order="C") self._validate_center_shape(X, init) # subtract of mean of x for more accurate distance computations if not sp.issparse(X): X_mean = X.mean(axis=0) # The copy was already done above X -= X_mean if init_is_array_like: init -= X_mean # precompute squared norms of data points x_squared_norms = row_norms(X, squared=True) if self._algorithm == "elkan": kmeans_single = _kmeans_single_elkan else: kmeans_single = _kmeans_single_lloyd self._check_mkl_vcomp(X, X.shape[0]) best_inertia, best_labels = None, None for i in range(self._n_init): # Initialize centers centers_init = self._init_centroids( X, x_squared_norms=x_squared_norms, init=init, random_state=random_state, sample_weight=sample_weight, ) if self.verbose: print("Initialization complete") # run a k-means once labels, inertia, centers, n_iter_ = kmeans_single( X, sample_weight, centers_init, max_iter=self.max_iter, verbose=self.verbose, tol=self._tol, n_threads=self._n_threads, ) # determine if these results are the best so far # we chose a new run if it has a better inertia and the clustering is # different from the best so far (it's possible that the inertia is # slightly better even if the clustering is the same with potentially # permuted labels, due to rounding errors) if best_inertia is None or ( inertia < best_inertia and not _is_same_clustering(labels, best_labels, self.n_clusters) ): best_labels = labels best_centers = centers best_inertia = inertia best_n_iter = n_iter_ if not sp.issparse(X): if not self.copy_x: X += X_mean best_centers += X_mean distinct_clusters = len(set(best_labels)) if distinct_clusters < self.n_clusters: warnings.warn( "Number of distinct clusters ({}) found smaller than " "n_clusters ({}). Possibly due to duplicate points " "in X.".format(distinct_clusters, self.n_clusters), ConvergenceWarning, stacklevel=2, ) self.cluster_centers_ = best_centers self._n_features_out = self.cluster_centers_.shape[0] self.labels_ = best_labels self.inertia_ = best_inertia self.n_iter_ = best_n_iter return self
KMeans.fit
Repo-Level
scikit-learn
77
sklearn/impute/_knn.py
def transform(self, X): """Impute all missing values in X. Parameters ---------- X : array-like of shape (n_samples, n_features) The input data to complete. Returns ------- X : array-like of shape (n_samples, n_output_features) The imputed dataset. `n_output_features` is the number of features that is not always missing during `fit`. """
/usr/src/app/target_test_cases/failed_tests_KNNImputer.transform.txt
def transform(self, X): """Impute all missing values in X. Parameters ---------- X : array-like of shape (n_samples, n_features) The input data to complete. Returns ------- X : array-like of shape (n_samples, n_output_features) The imputed dataset. `n_output_features` is the number of features that is not always missing during `fit`. """ check_is_fitted(self) if not is_scalar_nan(self.missing_values): ensure_all_finite = True else: ensure_all_finite = "allow-nan" X = validate_data( self, X, accept_sparse=False, dtype=FLOAT_DTYPES, force_writeable=True, ensure_all_finite=ensure_all_finite, copy=self.copy, reset=False, ) mask = _get_mask(X, self.missing_values) mask_fit_X = self._mask_fit_X valid_mask = self._valid_mask X_indicator = super()._transform_indicator(mask) # Removes columns where the training data is all nan if not np.any(mask[:, valid_mask]): # No missing values in X if self.keep_empty_features: Xc = X Xc[:, ~valid_mask] = 0 else: Xc = X[:, valid_mask] # Even if there are no missing values in X, we still concatenate Xc # with the missing value indicator matrix, X_indicator. # This is to ensure that the output maintains consistency in terms # of columns, regardless of whether missing values exist in X or not. return super()._concatenate_indicator(Xc, X_indicator) row_missing_idx = np.flatnonzero(mask[:, valid_mask].any(axis=1)) non_missing_fix_X = np.logical_not(mask_fit_X) # Maps from indices from X to indices in dist matrix dist_idx_map = np.zeros(X.shape[0], dtype=int) dist_idx_map[row_missing_idx] = np.arange(row_missing_idx.shape[0]) def process_chunk(dist_chunk, start): row_missing_chunk = row_missing_idx[start : start + len(dist_chunk)] # Find and impute missing by column for col in range(X.shape[1]): if not valid_mask[col]: # column was all missing during training continue col_mask = mask[row_missing_chunk, col] if not np.any(col_mask): # column has no missing values continue (potential_donors_idx,) = np.nonzero(non_missing_fix_X[:, col]) # receivers_idx are indices in X receivers_idx = row_missing_chunk[np.flatnonzero(col_mask)] # distances for samples that needed imputation for column dist_subset = dist_chunk[dist_idx_map[receivers_idx] - start][ :, potential_donors_idx ] # receivers with all nan distances impute with mean all_nan_dist_mask = np.isnan(dist_subset).all(axis=1) all_nan_receivers_idx = receivers_idx[all_nan_dist_mask] if all_nan_receivers_idx.size: col_mean = np.ma.array( self._fit_X[:, col], mask=mask_fit_X[:, col] ).mean() X[all_nan_receivers_idx, col] = col_mean if len(all_nan_receivers_idx) == len(receivers_idx): # all receivers imputed with mean continue # receivers with at least one defined distance receivers_idx = receivers_idx[~all_nan_dist_mask] dist_subset = dist_chunk[dist_idx_map[receivers_idx] - start][ :, potential_donors_idx ] n_neighbors = min(self.n_neighbors, len(potential_donors_idx)) value = self._calc_impute( dist_subset, n_neighbors, self._fit_X[potential_donors_idx, col], mask_fit_X[potential_donors_idx, col], ) X[receivers_idx, col] = value # process in fixed-memory chunks gen = pairwise_distances_chunked( X[row_missing_idx, :], self._fit_X, metric=self.metric, missing_values=self.missing_values, ensure_all_finite=ensure_all_finite, reduce_func=process_chunk, ) for chunk in gen: # process_chunk modifies X in place. No return value. pass if self.keep_empty_features: Xc = X Xc[:, ~valid_mask] = 0 else: Xc = X[:, valid_mask] return super()._concatenate_indicator(Xc, X_indicator)
KNNImputer.transform
Repo-Level
scikit-learn
99
sklearn/linear_model/_linear_loss.py
def gradient_hessian( self, coef, X, y, sample_weight=None, l2_reg_strength=0.0, n_threads=1, gradient_out=None, hessian_out=None, raw_prediction=None, ): """Computes gradient and hessian w.r.t. coef. Parameters ---------- coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,) Coefficients of a linear model. If shape (n_classes * n_dof,), the classes of one feature are contiguous, i.e. one reconstructs the 2d-array via coef.reshape((n_classes, -1), order="F"). X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. y : contiguous array of shape (n_samples,) Observed, true target values. sample_weight : None or contiguous array of shape (n_samples,), default=None Sample weights. l2_reg_strength : float, default=0.0 L2 regularization strength n_threads : int, default=1 Number of OpenMP threads to use. gradient_out : None or ndarray of shape coef.shape A location into which the gradient is stored. If None, a new array might be created. hessian_out : None or ndarray A location into which the hessian is stored. If None, a new array might be created. raw_prediction : C-contiguous array of shape (n_samples,) or array of \ shape (n_samples, n_classes) Raw prediction values (in link space). If provided, these are used. If None, then raw_prediction = X @ coef + intercept is calculated. Returns ------- gradient : ndarray of shape coef.shape The gradient of the loss. hessian : ndarray Hessian matrix. hessian_warning : bool True if pointwise hessian has more than half of its elements non-positive. """
/usr/src/app/target_test_cases/failed_tests_LinearModelLoss.gradient_hessian.txt
def gradient_hessian( self, coef, X, y, sample_weight=None, l2_reg_strength=0.0, n_threads=1, gradient_out=None, hessian_out=None, raw_prediction=None, ): """Computes gradient and hessian w.r.t. coef. Parameters ---------- coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,) Coefficients of a linear model. If shape (n_classes * n_dof,), the classes of one feature are contiguous, i.e. one reconstructs the 2d-array via coef.reshape((n_classes, -1), order="F"). X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. y : contiguous array of shape (n_samples,) Observed, true target values. sample_weight : None or contiguous array of shape (n_samples,), default=None Sample weights. l2_reg_strength : float, default=0.0 L2 regularization strength n_threads : int, default=1 Number of OpenMP threads to use. gradient_out : None or ndarray of shape coef.shape A location into which the gradient is stored. If None, a new array might be created. hessian_out : None or ndarray A location into which the hessian is stored. If None, a new array might be created. raw_prediction : C-contiguous array of shape (n_samples,) or array of \ shape (n_samples, n_classes) Raw prediction values (in link space). If provided, these are used. If None, then raw_prediction = X @ coef + intercept is calculated. Returns ------- gradient : ndarray of shape coef.shape The gradient of the loss. hessian : ndarray Hessian matrix. hessian_warning : bool True if pointwise hessian has more than half of its elements non-positive. """ n_samples, n_features = X.shape n_dof = n_features + int(self.fit_intercept) if raw_prediction is None: weights, intercept, raw_prediction = self.weight_intercept_raw(coef, X) else: weights, intercept = self.weight_intercept(coef) grad_pointwise, hess_pointwise = self.base_loss.gradient_hessian( y_true=y, raw_prediction=raw_prediction, sample_weight=sample_weight, n_threads=n_threads, ) sw_sum = n_samples if sample_weight is None else np.sum(sample_weight) grad_pointwise /= sw_sum hess_pointwise /= sw_sum # For non-canonical link functions and far away from the optimum, the pointwise # hessian can be negative. We take care that 75% of the hessian entries are # positive. hessian_warning = np.mean(hess_pointwise <= 0) > 0.25 hess_pointwise = np.abs(hess_pointwise) if not self.base_loss.is_multiclass: # gradient if gradient_out is None: grad = np.empty_like(coef, dtype=weights.dtype) else: grad = gradient_out grad[:n_features] = X.T @ grad_pointwise + l2_reg_strength * weights if self.fit_intercept: grad[-1] = grad_pointwise.sum() # hessian if hessian_out is None: hess = np.empty(shape=(n_dof, n_dof), dtype=weights.dtype) else: hess = hessian_out if hessian_warning: # Exit early without computing the hessian. return grad, hess, hessian_warning # TODO: This "sandwich product", X' diag(W) X, is the main computational # bottleneck for solvers. A dedicated Cython routine might improve it # exploiting the symmetry (as opposed to, e.g., BLAS gemm). if sparse.issparse(X): hess[:n_features, :n_features] = ( X.T @ sparse.dia_matrix( (hess_pointwise, 0), shape=(n_samples, n_samples) ) @ X ).toarray() else: # np.einsum may use less memory but the following, using BLAS matrix # multiplication (gemm), is by far faster. WX = hess_pointwise[:, None] * X hess[:n_features, :n_features] = np.dot(X.T, WX) if l2_reg_strength > 0: # The L2 penalty enters the Hessian on the diagonal only. To add those # terms, we use a flattened view on the array. hess.reshape(-1)[ : (n_features * n_dof) : (n_dof + 1) ] += l2_reg_strength if self.fit_intercept: # With intercept included as added column to X, the hessian becomes # hess = (X, 1)' @ diag(h) @ (X, 1) # = (X' @ diag(h) @ X, X' @ h) # ( h @ X, sum(h)) # The left upper part has already been filled, it remains to compute # the last row and the last column. Xh = X.T @ hess_pointwise hess[:-1, -1] = Xh hess[-1, :-1] = Xh hess[-1, -1] = hess_pointwise.sum() else: # Here we may safely assume HalfMultinomialLoss aka categorical # cross-entropy. raise NotImplementedError return grad, hess, hessian_warning
LinearModelLoss.gradient_hessian
File-Level
scikit-learn
100
sklearn/linear_model/_linear_loss.py
def gradient_hessian_product( self, coef, X, y, sample_weight=None, l2_reg_strength=0.0, n_threads=1 ): """Computes gradient and hessp (hessian product function) w.r.t. coef. Parameters ---------- coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,) Coefficients of a linear model. If shape (n_classes * n_dof,), the classes of one feature are contiguous, i.e. one reconstructs the 2d-array via coef.reshape((n_classes, -1), order="F"). X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. y : contiguous array of shape (n_samples,) Observed, true target values. sample_weight : None or contiguous array of shape (n_samples,), default=None Sample weights. l2_reg_strength : float, default=0.0 L2 regularization strength n_threads : int, default=1 Number of OpenMP threads to use. Returns ------- gradient : ndarray of shape coef.shape The gradient of the loss. hessp : callable Function that takes in a vector input of shape of gradient and and returns matrix-vector product with hessian. """
/usr/src/app/target_test_cases/failed_tests_LinearModelLoss.gradient_hessian_product.txt
def gradient_hessian_product( self, coef, X, y, sample_weight=None, l2_reg_strength=0.0, n_threads=1 ): """Computes gradient and hessp (hessian product function) w.r.t. coef. Parameters ---------- coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,) Coefficients of a linear model. If shape (n_classes * n_dof,), the classes of one feature are contiguous, i.e. one reconstructs the 2d-array via coef.reshape((n_classes, -1), order="F"). X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. y : contiguous array of shape (n_samples,) Observed, true target values. sample_weight : None or contiguous array of shape (n_samples,), default=None Sample weights. l2_reg_strength : float, default=0.0 L2 regularization strength n_threads : int, default=1 Number of OpenMP threads to use. Returns ------- gradient : ndarray of shape coef.shape The gradient of the loss. hessp : callable Function that takes in a vector input of shape of gradient and and returns matrix-vector product with hessian. """ (n_samples, n_features), n_classes = X.shape, self.base_loss.n_classes n_dof = n_features + int(self.fit_intercept) weights, intercept, raw_prediction = self.weight_intercept_raw(coef, X) sw_sum = n_samples if sample_weight is None else np.sum(sample_weight) if not self.base_loss.is_multiclass: grad_pointwise, hess_pointwise = self.base_loss.gradient_hessian( y_true=y, raw_prediction=raw_prediction, sample_weight=sample_weight, n_threads=n_threads, ) grad_pointwise /= sw_sum hess_pointwise /= sw_sum grad = np.empty_like(coef, dtype=weights.dtype) grad[:n_features] = X.T @ grad_pointwise + l2_reg_strength * weights if self.fit_intercept: grad[-1] = grad_pointwise.sum() # Precompute as much as possible: hX, hX_sum and hessian_sum hessian_sum = hess_pointwise.sum() if sparse.issparse(X): hX = ( sparse.dia_matrix((hess_pointwise, 0), shape=(n_samples, n_samples)) @ X ) else: hX = hess_pointwise[:, np.newaxis] * X if self.fit_intercept: # Calculate the double derivative with respect to intercept. # Note: In case hX is sparse, hX.sum is a matrix object. hX_sum = np.squeeze(np.asarray(hX.sum(axis=0))) # prevent squeezing to zero-dim array if n_features == 1 hX_sum = np.atleast_1d(hX_sum) # With intercept included and l2_reg_strength = 0, hessp returns # res = (X, 1)' @ diag(h) @ (X, 1) @ s # = (X, 1)' @ (hX @ s[:n_features], sum(h) * s[-1]) # res[:n_features] = X' @ hX @ s[:n_features] + sum(h) * s[-1] # res[-1] = 1' @ hX @ s[:n_features] + sum(h) * s[-1] def hessp(s): ret = np.empty_like(s) if sparse.issparse(X): ret[:n_features] = X.T @ (hX @ s[:n_features]) else: ret[:n_features] = np.linalg.multi_dot([X.T, hX, s[:n_features]]) ret[:n_features] += l2_reg_strength * s[:n_features] if self.fit_intercept: ret[:n_features] += s[-1] * hX_sum ret[-1] = hX_sum @ s[:n_features] + hessian_sum * s[-1] return ret else: # Here we may safely assume HalfMultinomialLoss aka categorical # cross-entropy. # HalfMultinomialLoss computes only the diagonal part of the hessian, i.e. # diagonal in the classes. Here, we want the matrix-vector product of the # full hessian. Therefore, we call gradient_proba. grad_pointwise, proba = self.base_loss.gradient_proba( y_true=y, raw_prediction=raw_prediction, sample_weight=sample_weight, n_threads=n_threads, ) grad_pointwise /= sw_sum grad = np.empty((n_classes, n_dof), dtype=weights.dtype, order="F") grad[:, :n_features] = grad_pointwise.T @ X + l2_reg_strength * weights if self.fit_intercept: grad[:, -1] = grad_pointwise.sum(axis=0) # Full hessian-vector product, i.e. not only the diagonal part of the # hessian. Derivation with some index battle for input vector s: # - sample index i # - feature indices j, m # - class indices k, l # - 1_{k=l} is one if k=l else 0 # - p_i_k is the (predicted) probability that sample i belongs to class k # for all i: sum_k p_i_k = 1 # - s_l_m is input vector for class l and feature m # - X' = X transposed # # Note: Hessian with dropping most indices is just: # X' @ p_k (1(k=l) - p_l) @ X # # result_{k j} = sum_{i, l, m} Hessian_{i, k j, m l} * s_l_m # = sum_{i, l, m} (X')_{ji} * p_i_k * (1_{k=l} - p_i_l) # * X_{im} s_l_m # = sum_{i, m} (X')_{ji} * p_i_k # * (X_{im} * s_k_m - sum_l p_i_l * X_{im} * s_l_m) # # See also https://github.com/scikit-learn/scikit-learn/pull/3646#discussion_r17461411 # noqa def hessp(s): s = s.reshape((n_classes, -1), order="F") # shape = (n_classes, n_dof) if self.fit_intercept: s_intercept = s[:, -1] s = s[:, :-1] # shape = (n_classes, n_features) else: s_intercept = 0 tmp = X @ s.T + s_intercept # X_{im} * s_k_m tmp += (-proba * tmp).sum(axis=1)[:, np.newaxis] # - sum_l .. tmp *= proba # * p_i_k if sample_weight is not None: tmp *= sample_weight[:, np.newaxis] # hess_prod = empty_like(grad), but we ravel grad below and this # function is run after that. hess_prod = np.empty((n_classes, n_dof), dtype=weights.dtype, order="F") hess_prod[:, :n_features] = (tmp.T @ X) / sw_sum + l2_reg_strength * s if self.fit_intercept: hess_prod[:, -1] = tmp.sum(axis=0) / sw_sum if coef.ndim == 1: return hess_prod.ravel(order="F") else: return hess_prod if coef.ndim == 1: return grad.ravel(order="F"), hessp return grad, hessp
LinearModelLoss.gradient_hessian_product
File-Level
scikit-learn
103
sklearn/linear_model/_base.py
def fit(self, X, y, sample_weight=None): """ Fit linear model. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) or (n_samples, n_targets) Target values. Will be cast to X's dtype if necessary. sample_weight : array-like of shape (n_samples,), default=None Individual weights for each sample. .. versionadded:: 0.17 parameter *sample_weight* support to LinearRegression. Returns ------- self : object Fitted Estimator. """
/usr/src/app/target_test_cases/failed_tests_LinearRegression.fit.txt
def fit(self, X, y, sample_weight=None): """ Fit linear model. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) or (n_samples, n_targets) Target values. Will be cast to X's dtype if necessary. sample_weight : array-like of shape (n_samples,), default=None Individual weights for each sample. .. versionadded:: 0.17 parameter *sample_weight* support to LinearRegression. Returns ------- self : object Fitted Estimator. """ n_jobs_ = self.n_jobs accept_sparse = False if self.positive else ["csr", "csc", "coo"] X, y = validate_data( self, X, y, accept_sparse=accept_sparse, y_numeric=True, multi_output=True, force_writeable=True, ) has_sw = sample_weight is not None if has_sw: sample_weight = _check_sample_weight( sample_weight, X, dtype=X.dtype, ensure_non_negative=True ) # Note that neither _rescale_data nor the rest of the fit method of # LinearRegression can benefit from in-place operations when X is a # sparse matrix. Therefore, let's not copy X when it is sparse. copy_X_in_preprocess_data = self.copy_X and not sp.issparse(X) X, y, X_offset, y_offset, X_scale = _preprocess_data( X, y, fit_intercept=self.fit_intercept, copy=copy_X_in_preprocess_data, sample_weight=sample_weight, ) if has_sw: # Sample weight can be implemented via a simple rescaling. Note # that we safely do inplace rescaling when _preprocess_data has # already made a copy if requested. X, y, sample_weight_sqrt = _rescale_data( X, y, sample_weight, inplace=copy_X_in_preprocess_data ) if self.positive: if y.ndim < 2: self.coef_ = optimize.nnls(X, y)[0] else: # scipy.optimize.nnls cannot handle y with shape (M, K) outs = Parallel(n_jobs=n_jobs_)( delayed(optimize.nnls)(X, y[:, j]) for j in range(y.shape[1]) ) self.coef_ = np.vstack([out[0] for out in outs]) elif sp.issparse(X): X_offset_scale = X_offset / X_scale if has_sw: def matvec(b): return X.dot(b) - sample_weight_sqrt * b.dot(X_offset_scale) def rmatvec(b): return X.T.dot(b) - X_offset_scale * b.dot(sample_weight_sqrt) else: def matvec(b): return X.dot(b) - b.dot(X_offset_scale) def rmatvec(b): return X.T.dot(b) - X_offset_scale * b.sum() X_centered = sparse.linalg.LinearOperator( shape=X.shape, matvec=matvec, rmatvec=rmatvec ) if y.ndim < 2: self.coef_ = lsqr(X_centered, y)[0] else: # sparse_lstsq cannot handle y with shape (M, K) outs = Parallel(n_jobs=n_jobs_)( delayed(lsqr)(X_centered, y[:, j].ravel()) for j in range(y.shape[1]) ) self.coef_ = np.vstack([out[0] for out in outs]) else: self.coef_, _, self.rank_, self.singular_ = linalg.lstsq(X, y) self.coef_ = self.coef_.T if y.ndim == 1: self.coef_ = np.ravel(self.coef_) self._set_intercept(X_offset, y_offset, X_scale) return self
LinearRegression.fit
Repo-Level
scikit-learn
114
sklearn/cluster/_kmeans.py
def fit(self, X, y=None, sample_weight=None): """Compute the centroids on X by chunking it into mini-batches. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training instances to cluster. It must be noted that the data will be converted to C ordering, which will cause a memory copy if the given data is not C-contiguous. If a sparse matrix is passed, a copy will be made if it's not in CSR format. y : Ignored Not used, present here for API consistency by convention. sample_weight : array-like of shape (n_samples,), default=None The weights for each observation in X. If None, all observations are assigned equal weight. `sample_weight` is not used during initialization if `init` is a callable or a user provided array. .. versionadded:: 0.20 Returns ------- self : object Fitted estimator. """
/usr/src/app/target_test_cases/failed_tests_MiniBatchKMeans.fit.txt
def fit(self, X, y=None, sample_weight=None): """Compute the centroids on X by chunking it into mini-batches. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training instances to cluster. It must be noted that the data will be converted to C ordering, which will cause a memory copy if the given data is not C-contiguous. If a sparse matrix is passed, a copy will be made if it's not in CSR format. y : Ignored Not used, present here for API consistency by convention. sample_weight : array-like of shape (n_samples,), default=None The weights for each observation in X. If None, all observations are assigned equal weight. `sample_weight` is not used during initialization if `init` is a callable or a user provided array. .. versionadded:: 0.20 Returns ------- self : object Fitted estimator. """ X = validate_data( self, X, accept_sparse="csr", dtype=[np.float64, np.float32], order="C", accept_large_sparse=False, ) self._check_params_vs_input(X) random_state = check_random_state(self.random_state) sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) self._n_threads = _openmp_effective_n_threads() n_samples, n_features = X.shape # Validate init array init = self.init if _is_arraylike_not_scalar(init): init = check_array(init, dtype=X.dtype, copy=True, order="C") self._validate_center_shape(X, init) self._check_mkl_vcomp(X, self._batch_size) # precompute squared norms of data points x_squared_norms = row_norms(X, squared=True) # Validation set for the init validation_indices = random_state.randint(0, n_samples, self._init_size) X_valid = X[validation_indices] sample_weight_valid = sample_weight[validation_indices] # perform several inits with random subsets best_inertia = None for init_idx in range(self._n_init): if self.verbose: print(f"Init {init_idx + 1}/{self._n_init} with method {init}") # Initialize the centers using only a fraction of the data as we # expect n_samples to be very large when using MiniBatchKMeans. cluster_centers = self._init_centroids( X, x_squared_norms=x_squared_norms, init=init, random_state=random_state, init_size=self._init_size, sample_weight=sample_weight, ) # Compute inertia on a validation set. _, inertia = _labels_inertia_threadpool_limit( X_valid, sample_weight_valid, cluster_centers, n_threads=self._n_threads, ) if self.verbose: print(f"Inertia for init {init_idx + 1}/{self._n_init}: {inertia}") if best_inertia is None or inertia < best_inertia: init_centers = cluster_centers best_inertia = inertia centers = init_centers centers_new = np.empty_like(centers) # Initialize counts self._counts = np.zeros(self.n_clusters, dtype=X.dtype) # Attributes to monitor the convergence self._ewa_inertia = None self._ewa_inertia_min = None self._no_improvement = 0 # Initialize number of samples seen since last reassignment self._n_since_last_reassign = 0 n_steps = (self.max_iter * n_samples) // self._batch_size with _get_threadpool_controller().limit(limits=1, user_api="blas"): # Perform the iterative optimization until convergence for i in range(n_steps): # Sample a minibatch from the full dataset minibatch_indices = random_state.randint(0, n_samples, self._batch_size) # Perform the actual update step on the minibatch data batch_inertia = _mini_batch_step( X=X[minibatch_indices], sample_weight=sample_weight[minibatch_indices], centers=centers, centers_new=centers_new, weight_sums=self._counts, random_state=random_state, random_reassign=self._random_reassign(), reassignment_ratio=self.reassignment_ratio, verbose=self.verbose, n_threads=self._n_threads, ) if self._tol > 0.0: centers_squared_diff = np.sum((centers_new - centers) ** 2) else: centers_squared_diff = 0 centers, centers_new = centers_new, centers # Monitor convergence and do early stopping if necessary if self._mini_batch_convergence( i, n_steps, n_samples, centers_squared_diff, batch_inertia ): break self.cluster_centers_ = centers self._n_features_out = self.cluster_centers_.shape[0] self.n_steps_ = i + 1 self.n_iter_ = int(np.ceil(((i + 1) * self._batch_size) / n_samples)) if self.compute_labels: self.labels_, self.inertia_ = _labels_inertia_threadpool_limit( X, sample_weight, self.cluster_centers_, n_threads=self._n_threads, ) else: self.inertia_ = self._ewa_inertia * n_samples return self
MiniBatchKMeans.fit
Repo-Level
scikit-learn
115
sklearn/cluster/_kmeans.py
def partial_fit(self, X, y=None, sample_weight=None): """Update k means estimate on a single mini-batch X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training instances to cluster. It must be noted that the data will be converted to C ordering, which will cause a memory copy if the given data is not C-contiguous. If a sparse matrix is passed, a copy will be made if it's not in CSR format. y : Ignored Not used, present here for API consistency by convention. sample_weight : array-like of shape (n_samples,), default=None The weights for each observation in X. If None, all observations are assigned equal weight. `sample_weight` is not used during initialization if `init` is a callable or a user provided array. Returns ------- self : object Return updated estimator. """
/usr/src/app/target_test_cases/failed_tests_MiniBatchKMeans.partial_fit.txt
def partial_fit(self, X, y=None, sample_weight=None): """Update k means estimate on a single mini-batch X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training instances to cluster. It must be noted that the data will be converted to C ordering, which will cause a memory copy if the given data is not C-contiguous. If a sparse matrix is passed, a copy will be made if it's not in CSR format. y : Ignored Not used, present here for API consistency by convention. sample_weight : array-like of shape (n_samples,), default=None The weights for each observation in X. If None, all observations are assigned equal weight. `sample_weight` is not used during initialization if `init` is a callable or a user provided array. Returns ------- self : object Return updated estimator. """ has_centers = hasattr(self, "cluster_centers_") X = validate_data( self, X, accept_sparse="csr", dtype=[np.float64, np.float32], order="C", accept_large_sparse=False, reset=not has_centers, ) self._random_state = getattr( self, "_random_state", check_random_state(self.random_state) ) sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) self.n_steps_ = getattr(self, "n_steps_", 0) # precompute squared norms of data points x_squared_norms = row_norms(X, squared=True) if not has_centers: # this instance has not been fitted yet (fit or partial_fit) self._check_params_vs_input(X) self._n_threads = _openmp_effective_n_threads() # Validate init array init = self.init if _is_arraylike_not_scalar(init): init = check_array(init, dtype=X.dtype, copy=True, order="C") self._validate_center_shape(X, init) self._check_mkl_vcomp(X, X.shape[0]) # initialize the cluster centers self.cluster_centers_ = self._init_centroids( X, x_squared_norms=x_squared_norms, init=init, random_state=self._random_state, init_size=self._init_size, sample_weight=sample_weight, ) # Initialize counts self._counts = np.zeros(self.n_clusters, dtype=X.dtype) # Initialize number of samples seen since last reassignment self._n_since_last_reassign = 0 with _get_threadpool_controller().limit(limits=1, user_api="blas"): _mini_batch_step( X, sample_weight=sample_weight, centers=self.cluster_centers_, centers_new=self.cluster_centers_, weight_sums=self._counts, random_state=self._random_state, random_reassign=self._random_reassign(), reassignment_ratio=self.reassignment_ratio, verbose=self.verbose, n_threads=self._n_threads, ) if self.compute_labels: self.labels_, self.inertia_ = _labels_inertia_threadpool_limit( X, sample_weight, self.cluster_centers_, n_threads=self._n_threads, ) self.n_steps_ += 1 self._n_features_out = self.cluster_centers_.shape[0] return self
MiniBatchKMeans.partial_fit
Repo-Level
scikit-learn
120
sklearn/linear_model/_coordinate_descent.py
def fit(self, X, y): """Fit MultiTaskElasticNet model with coordinate descent. Parameters ---------- X : ndarray of shape (n_samples, n_features) Data. y : ndarray of shape (n_samples, n_targets) Target. Will be cast to X's dtype if necessary. Returns ------- self : object Fitted estimator. Notes ----- Coordinate descent is an algorithm that considers each column of data at a time hence it will automatically convert the X input as a Fortran-contiguous numpy array if necessary. To avoid memory re-allocation it is advised to allocate the initial data in memory directly using that format. """
/usr/src/app/target_test_cases/failed_tests_MultiTaskElasticNet.fit.txt
def fit(self, X, y): """Fit MultiTaskElasticNet model with coordinate descent. Parameters ---------- X : ndarray of shape (n_samples, n_features) Data. y : ndarray of shape (n_samples, n_targets) Target. Will be cast to X's dtype if necessary. Returns ------- self : object Fitted estimator. Notes ----- Coordinate descent is an algorithm that considers each column of data at a time hence it will automatically convert the X input as a Fortran-contiguous numpy array if necessary. To avoid memory re-allocation it is advised to allocate the initial data in memory directly using that format. """ # Need to validate separately here. # We can't pass multi_output=True because that would allow y to be csr. check_X_params = dict( dtype=[np.float64, np.float32], order="F", force_writeable=True, copy=self.copy_X and self.fit_intercept, ) check_y_params = dict(ensure_2d=False, order="F") X, y = validate_data( self, X, y, validate_separately=(check_X_params, check_y_params) ) check_consistent_length(X, y) y = y.astype(X.dtype) if hasattr(self, "l1_ratio"): model_str = "ElasticNet" else: model_str = "Lasso" if y.ndim == 1: raise ValueError("For mono-task outputs, use %s" % model_str) n_samples, n_features = X.shape n_targets = y.shape[1] X, y, X_offset, y_offset, X_scale = _preprocess_data( X, y, fit_intercept=self.fit_intercept, copy=False ) if not self.warm_start or not hasattr(self, "coef_"): self.coef_ = np.zeros( (n_targets, n_features), dtype=X.dtype.type, order="F" ) l1_reg = self.alpha * self.l1_ratio * n_samples l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory random = self.selection == "random" ( self.coef_, self.dual_gap_, self.eps_, self.n_iter_, ) = cd_fast.enet_coordinate_descent_multi_task( self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol, check_random_state(self.random_state), random, ) # account for different objective scaling here and in cd_fast self.dual_gap_ /= n_samples self._set_intercept(X_offset, y_offset, X_scale) # return self for chaining fit and predict calls return self
MultiTaskElasticNet.fit
Repo-Level
scikit-learn
123
sklearn/neighbors/_nca.py
def fit(self, X, y): """Fit the model according to the given training data. Parameters ---------- X : array-like of shape (n_samples, n_features) The training samples. y : array-like of shape (n_samples,) The corresponding training labels. Returns ------- self : object Fitted estimator. """
/usr/src/app/target_test_cases/failed_tests_NeighborhoodComponentsAnalysis.fit.txt
def fit(self, X, y): """Fit the model according to the given training data. Parameters ---------- X : array-like of shape (n_samples, n_features) The training samples. y : array-like of shape (n_samples,) The corresponding training labels. Returns ------- self : object Fitted estimator. """ # Validate the inputs X and y, and converts y to numerical classes. X, y = validate_data(self, X, y, ensure_min_samples=2) check_classification_targets(y) y = LabelEncoder().fit_transform(y) # Check the preferred dimensionality of the projected space if self.n_components is not None and self.n_components > X.shape[1]: raise ValueError( "The preferred dimensionality of the " f"projected space `n_components` ({self.n_components}) cannot " "be greater than the given data " f"dimensionality ({X.shape[1]})!" ) # If warm_start is enabled, check that the inputs are consistent if ( self.warm_start and hasattr(self, "components_") and self.components_.shape[1] != X.shape[1] ): raise ValueError( f"The new inputs dimensionality ({X.shape[1]}) does not " "match the input dimensionality of the " f"previously learned transformation ({self.components_.shape[1]})." ) # Check how the linear transformation should be initialized init = self.init if isinstance(init, np.ndarray): init = check_array(init) # Assert that init.shape[1] = X.shape[1] if init.shape[1] != X.shape[1]: raise ValueError( f"The input dimensionality ({init.shape[1]}) of the given " "linear transformation `init` must match the " f"dimensionality of the given inputs `X` ({X.shape[1]})." ) # Assert that init.shape[0] <= init.shape[1] if init.shape[0] > init.shape[1]: raise ValueError( f"The output dimensionality ({init.shape[0]}) of the given " "linear transformation `init` cannot be " f"greater than its input dimensionality ({init.shape[1]})." ) # Assert that self.n_components = init.shape[0] if self.n_components is not None and self.n_components != init.shape[0]: raise ValueError( "The preferred dimensionality of the " f"projected space `n_components` ({self.n_components}) does" " not match the output dimensionality of " "the given linear transformation " f"`init` ({init.shape[0]})!" ) # Initialize the random generator self.random_state_ = check_random_state(self.random_state) # Measure the total training time t_train = time.time() # Compute a mask that stays fixed during optimization: same_class_mask = y[:, np.newaxis] == y[np.newaxis, :] # (n_samples, n_samples) # Initialize the transformation transformation = np.ravel(self._initialize(X, y, init)) # Create a dictionary of parameters to be passed to the optimizer disp = self.verbose - 2 if self.verbose > 1 else -1 optimizer_params = { "method": "L-BFGS-B", "fun": self._loss_grad_lbfgs, "args": (X, same_class_mask, -1.0), "jac": True, "x0": transformation, "tol": self.tol, "options": dict(maxiter=self.max_iter, disp=disp), "callback": self._callback, } # Call the optimizer self.n_iter_ = 0 opt_result = minimize(**optimizer_params) # Reshape the solution found by the optimizer self.components_ = opt_result.x.reshape(-1, X.shape[1]) # Stop timer t_train = time.time() - t_train if self.verbose: cls_name = self.__class__.__name__ # Warn the user if the algorithm did not converge if not opt_result.success: warn( "[{}] NCA did not converge: {}".format( cls_name, opt_result.message ), ConvergenceWarning, ) print("[{}] Training took {:8.2f}s.".format(cls_name, t_train)) return self
NeighborhoodComponentsAnalysis.fit
Repo-Level
scikit-learn
131
sklearn/multiclass.py
def partial_fit(self, X, y, classes=None, **partial_fit_params): """Partially fit underlying estimators. Should be used when memory is inefficient to train all data. Chunks of data can be passed in several iteration, where the first call should have an array of all target variables. Parameters ---------- X : {array-like, sparse matrix) of shape (n_samples, n_features) Data. y : array-like of shape (n_samples,) Multi-class targets. classes : array, shape (n_classes, ) Classes across all calls to partial_fit. Can be obtained via `np.unique(y_all)`, where y_all is the target vector of the entire dataset. This argument is only required in the first call of partial_fit and can be omitted in the subsequent calls. **partial_fit_params : dict Parameters passed to the ``estimator.partial_fit`` method of each sub-estimator. .. versionadded:: 1.4 Only available if `enable_metadata_routing=True`. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object The partially fitted underlying estimator. """
/usr/src/app/target_test_cases/failed_tests_OneVsOneClassifier.partial_fit.txt
def partial_fit(self, X, y, classes=None, **partial_fit_params): """Partially fit underlying estimators. Should be used when memory is inefficient to train all data. Chunks of data can be passed in several iteration, where the first call should have an array of all target variables. Parameters ---------- X : {array-like, sparse matrix) of shape (n_samples, n_features) Data. y : array-like of shape (n_samples,) Multi-class targets. classes : array, shape (n_classes, ) Classes across all calls to partial_fit. Can be obtained via `np.unique(y_all)`, where y_all is the target vector of the entire dataset. This argument is only required in the first call of partial_fit and can be omitted in the subsequent calls. **partial_fit_params : dict Parameters passed to the ``estimator.partial_fit`` method of each sub-estimator. .. versionadded:: 1.4 Only available if `enable_metadata_routing=True`. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object The partially fitted underlying estimator. """ _raise_for_params(partial_fit_params, self, "partial_fit") routed_params = process_routing( self, "partial_fit", **partial_fit_params, ) first_call = _check_partial_fit_first_call(self, classes) if first_call: self.estimators_ = [ clone(self.estimator) for _ in range(self.n_classes_ * (self.n_classes_ - 1) // 2) ] if len(np.setdiff1d(y, self.classes_)): raise ValueError( "Mini-batch contains {0} while it must be subset of {1}".format( np.unique(y), self.classes_ ) ) X, y = validate_data( self, X, y, accept_sparse=["csr", "csc"], ensure_all_finite=False, reset=first_call, ) check_classification_targets(y) combinations = itertools.combinations(range(self.n_classes_), 2) self.estimators_ = Parallel(n_jobs=self.n_jobs)( delayed(_partial_fit_ovo_binary)( estimator, X, y, self.classes_[i], self.classes_[j], partial_fit_params=routed_params.estimator.partial_fit, ) for estimator, (i, j) in zip(self.estimators_, (combinations)) ) self.pairwise_indices_ = None if hasattr(self.estimators_[0], "n_features_in_"): self.n_features_in_ = self.estimators_[0].n_features_in_ return self
OneVsOneClassifier.partial_fit
Repo-Level
scikit-learn
133
sklearn/multiclass.py
def partial_fit(self, X, y, classes=None, **partial_fit_params): """Partially fit underlying estimators. Should be used when memory is inefficient to train all data. Chunks of data can be passed in several iterations. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Data. y : {array-like, sparse matrix} of shape (n_samples,) or (n_samples, n_classes) Multi-class targets. An indicator matrix turns on multilabel classification. classes : array, shape (n_classes, ) Classes across all calls to partial_fit. Can be obtained via `np.unique(y_all)`, where y_all is the target vector of the entire dataset. This argument is only required in the first call of partial_fit and can be omitted in the subsequent calls. **partial_fit_params : dict Parameters passed to the ``estimator.partial_fit`` method of each sub-estimator. .. versionadded:: 1.4 Only available if `enable_metadata_routing=True`. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object Instance of partially fitted estimator. """
/usr/src/app/target_test_cases/failed_tests_OneVsRestClassifier.partial_fit.txt
def partial_fit(self, X, y, classes=None, **partial_fit_params): """Partially fit underlying estimators. Should be used when memory is inefficient to train all data. Chunks of data can be passed in several iterations. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Data. y : {array-like, sparse matrix} of shape (n_samples,) or (n_samples, n_classes) Multi-class targets. An indicator matrix turns on multilabel classification. classes : array, shape (n_classes, ) Classes across all calls to partial_fit. Can be obtained via `np.unique(y_all)`, where y_all is the target vector of the entire dataset. This argument is only required in the first call of partial_fit and can be omitted in the subsequent calls. **partial_fit_params : dict Parameters passed to the ``estimator.partial_fit`` method of each sub-estimator. .. versionadded:: 1.4 Only available if `enable_metadata_routing=True`. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object Instance of partially fitted estimator. """ _raise_for_params(partial_fit_params, self, "partial_fit") routed_params = process_routing( self, "partial_fit", **partial_fit_params, ) if _check_partial_fit_first_call(self, classes): self.estimators_ = [clone(self.estimator) for _ in range(self.n_classes_)] # A sparse LabelBinarizer, with sparse_output=True, has been # shown to outperform or match a dense label binarizer in all # cases and has also resulted in less or equal memory consumption # in the fit_ovr function overall. self.label_binarizer_ = LabelBinarizer(sparse_output=True) self.label_binarizer_.fit(self.classes_) if len(np.setdiff1d(y, self.classes_)): raise ValueError( ( "Mini-batch contains {0} while classes " + "must be subset of {1}" ).format(np.unique(y), self.classes_) ) Y = self.label_binarizer_.transform(y) Y = Y.tocsc() columns = (col.toarray().ravel() for col in Y.T) self.estimators_ = Parallel(n_jobs=self.n_jobs)( delayed(_partial_fit_binary)( estimator, X, column, partial_fit_params=routed_params.estimator.partial_fit, ) for estimator, column in zip(self.estimators_, columns) ) if hasattr(self.estimators_[0], "n_features_in_"): self.n_features_in_ = self.estimators_[0].n_features_in_ return self
OneVsRestClassifier.partial_fit
Repo-Level

Can Language Models Replace Programmers? REPOCOD Says 'Not Yet'

Large language models (LLMs) have achieved high accuracy, i.e., more than 90 pass@1, in solving Python coding problems in HumanEval and MBPP. Thus, a natural question is, whether LLMs achieve comparable code completion performance compared to human developers? Unfortunately, one cannot answer this question using existing manual crafted or simple (e.g., single-line) code generation benchmarks, since such tasks fail to represent real-world software development tasks. In addition, existing benchmarks often use poor code correctness metrics, providing misleading conclusions.

To address these challenges, we create REPOCOD, a code generation benchmark with 980 problems collected from 11 popular real-world projects, with more than 58% of them requiring file-level or repository-level context information. In addition, REPOCOD has the longest average canonical solution length (331.6 tokens) and the highest average cyclomatic complexity (9.00) compared to existing benchmarks. Each task in REPOCOD includes 313.5 developer-written test cases on average for better correctness evaluation. In our evaluations on ten LLMs, none of the models achieves more than 30 pass@1 on REPOCOD, disclosing the necessity of building stronger LLMs that can help developers in real-world software development.

For easier evaluation, we sample 200 of the hardest problems in REPOCOD to create REPOCOD-Lite, using the product of the prompt length and canonical solution length (in terms of line count) as an indicator of difficulty. From the three categories of questions—self-contained, file-level, and repo-level—we select 66, 67, and 67 samples respectively in descending order of the scores.

  • For more details on data collection and evaluation results, please refer to our arxiv preprint.

  • Examples code for downloading repositories, preparing repository snapshot, and running test cases for evaluation are propived at code

  • Check our Leaderboard for preliminary results using SOTA LLMs with RAG.

Usage

from datasets import load_dataset
data = load_dataset('lt-asset/REPOCOD_Lite')
print(data)
DatasetDict({
    train: Dataset({
        features: ['repository', 'repo_id', 'target_module_path', 'prompt', 'relavent_test_path', 'full_function', 'function_name'],
        num_rows: 200
    })
})

Data Fields

  • repository: the source repository of the current sample
  • repo_id: the unique index of the sample in the corresponding source repository
  • target_module_path: the file path containing the current sample relative to the root of the source repository
  • prompt: the developer provided function signature and docstring
  • relavent_test_path: the path to the relevant test cases
  • full_function: the canonical solution of the current sample
  • function_name: the name of the target function (current sample)

Example

"repository": "seaborn",                          # collected from seaborn
"repo_id": "6",                                   # first sample from seaborn 
"target_module_path": "seaborn/_base.py",  # the target function is in this path
"prompt": "     def iter_data(
        self, grouping_vars=None, *,
        reverse=False, from_comp_data=False,
        by_facet=True, allow_empty=False, dropna=True,
    ):
        '''Generator for getting subsets of data defined by semantic variables.

        Also injects "col" and "row" into grouping semantics.

        Parameters
        ----------
        grouping_vars : string or list of strings
            Semantic variables that define the subsets of data.
        reverse : bool
            If True, reverse the order of iteration.
        from_comp_data : bool
            If True, use self.comp_data rather than self.plot_data
        by_facet : bool
            If True, add faceting variables to the set of grouping variables.
        allow_empty : bool
            If True, yield an empty dataframe when no observations exist for
            combinations of grouping variables.
        dropna : bool
            If True, remove rows with missing data.

        Yields
        ------
        sub_vars : dict
            Keys are semantic names, values are the level of that semantic.
        sub_data : :class:`pandas.DataFrame`
            Subset of ``plot_data`` for this combination of semantic values.

        '''",                            # the function signature and docstring for the target function
"relevant_test_path": "/usr/src/app/target_test_cases/failed_tests_Continuous.label.txt", # Path to relevant tests for the function
"full_function": "     def iter_data(
        self, grouping_vars=None, *,
        reverse=False, from_comp_data=False,
        by_facet=True, allow_empty=False, dropna=True,
    ):
        '''Generator for getting subsets of data defined by semantic variables.

        Also injects "col" and "row" into grouping semantics.

        Parameters
        ----------
        grouping_vars : string or list of strings
            Semantic variables that define the subsets of data.
        reverse : bool
            If True, reverse the order of iteration.
        from_comp_data : bool
            If True, use self.comp_data rather than self.plot_data
        by_facet : bool
            If True, add faceting variables to the set of grouping variables.
        allow_empty : bool
            If True, yield an empty dataframe when no observations exist for
            combinations of grouping variables.
        dropna : bool
            If True, remove rows with missing data.

        Yields
        ------
        sub_vars : dict
            Keys are semantic names, values are the level of that semantic.
        sub_data : :class:`pandas.DataFrame`
            Subset of ``plot_data`` for this combination of semantic values.

        '''
        if grouping_vars is None:
            grouping_vars = []
        ...",                            # the full snippet of the target function, including the function signature and docstring for the target function
"function_name": "VectorPlotter.iter_data"               # The name of the target function

Citation

@misc{liang2024repocod,
      title={Can Language Models Replace Programmers? REPOCOD Says 'Not Yet'}, 
      author={Shanchao Liang and Yiran Hu and Nan Jiang and Lin Tan},
      year={2024},
      eprint={2410.21647},
      archivePrefix={arXiv},
      primaryClass={cs.SE},
      url={https://arxiv.org/abs/2410.21647}, 
}
Downloads last month
15
Edit dataset card