File size: 4,859 Bytes
f47b72d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 |
from __future__ import annotations
from typing import Iterable, Union
from numpy import ndarray
from pandas import DataFrame
from sklearn.pipeline import Pipeline
import numpy as np
import sys
import skops.io as sio
from huggingface_hub import hf_hub_download
import pandas as pd
PREDICTOR_SPLIT_TARGET = 0 # Represents 'not noise'.
class CascadedClassifier(Pipeline):
def __init__(self, steps: list[tuple[str, Pipeline]], memory=None):
"""
Initializes a cascaded classifier pipeline with two classification steps.
Parameters
----------
steps: list[tuple[str, Pipeline]]
A list of (name, pipeline) tuples for noise and SUA classifiers.
memory: optional
Used to cache the fitted transformers of the pipeline.
"""
super().__init__(steps, memory=memory)
assert len(steps) == 2, 'CascadedClassifier must have exactly 2 steps'
self._steps = steps
@property
def feature_names_in_(self) -> list[str]:
"""
Returns the feature names used in the noise classifier.
Returns
-------
list[str]
The input feature names.
"""
return self.named_steps["noise"][0].feature_names_in_
def predict(self, X: list[str] | ndarray | Iterable | DataFrame, **predict_params) -> ndarray:
"""
Predicts labels for the input data using a cascading approach.
Parameters
----------
X: list[str] | ndarray | Iterable | DataFrame
The input data.
predict_params: dict
Parameters for the predict method.
Returns
-------
ndarray
The predicted labels.
"""
# Step 1: Get initial predictions from the noise classifier.
y = self.named_steps["noise"][0].predict(X)
# Identify rows where the prediction is 'not noise'.
predict_rows = (y == PREDICTOR_SPLIT_TARGET)
X_predict = X[predict_rows]
# If no rows require further classification, return the initial predictions.
if len(X_predict) == 0:
return y
# Step 2: Get predictions from the SUA classifier for the 'not noise' subset.
y2 = self.named_steps["sua"][0].predict(X_predict)
# Shift the SUA/MUA labels to avoid overlap with noise labels.
y2 += 2 # Assuming noise is labeled as 0 or 1.
# Update the initial predictions with the SUA classifier results.
y[predict_rows] = y2
return y
def predict_proba(
self,
X: Union[list[str], ndarray, Iterable, pd.DataFrame],
) -> ndarray:
"""
Predict the probabilities for the input data and normalize them so the sum is 1.
Parameters
----------
X : Union[list[str], ndarray, Iterable, pd.DataFrame]
The input data.
predict_params : dict
Parameters for the predict method.
Returns
-------
ndarray
The normalized predicted probabilities for noise, SUA, and MUA.
Shape: (n_samples, 3)
Notes
-----
The output probabilities are ordered as [SUA, noise, MUA].
All rows sum to 1 after normalization.
"""
if len(X) == 0:
return np.array([], dtype=np.float64).reshape(0, 3)
# Initialize probabilities array with zeros
n_samples = len(X)
out_proba = np.zeros((n_samples, 3), dtype=np.float64)
try:
# Get noise classifier probabilities
y_proba_noise = self.named_steps["noise"][0].predict_proba(X)
# Get SUA vs MUA probabilities
y_proba_sua = self.named_steps["sua"][0].predict_proba(X)
for i in range(n_samples):
if y_proba_noise[i, 0] > y_proba_noise[i, 1]: # neural > noise
out_proba[i, 0] = 0 # noise, there is no noise
out_proba[i, 1] = y_proba_sua[i, 0] # MUA
out_proba[i, 2] = y_proba_sua[i, 1] # SUA
else: # noise >= neural
out_proba[i, 0] = y_proba_noise[i, 1] # noise
out_proba[i, 1] = y_proba_noise[i, 0] # MUA (neural probability)
out_proba[i, 2] = 0 # SUA (no SUA)
return out_proba
except Exception as e:
raise RuntimeError(
f"Error during probability prediction: {str(e)}"
) from e
|