Module dalex.fairness

Expand source code Browse git
from ._group_fairness.object import GroupFairnessClassification, GroupFairnessRegression
from ._group_fairness.mitigation import reweight, roc_pivot, resample
__all__ = [
    "GroupFairnessClassification",
    "GroupFairnessRegression",
    "reweight",
    "roc_pivot",
    "resample"
]

Functions

def resample(protected, y, type='uniform', probs=None, verbose=True)

Returns indices of observations for data.

Method of bias mitigation. Similarly to 'reweight' this method computes desired number of observations just if the protected variable was independent from y and on this basis decides if this subgroup with certain class (favorable or not) should be more or less numerous. Than performs oversampling or undersampling depending on the case. If type of sampling is set to 'preferential' and probs are provided than instead of uniform sampling preferential sampling will be performed. Preferential sampling depending on the case will sample observations close to border or far from border.

Parameters

protected : np.ndarray (1d)
Vector, preferably 1-dimensional np.ndarray containing strings, which denotes the membership to a subgroup. NOTE: List and pd.Series are also supported; however, if provided, they will be transformed into a np.ndarray (1d) with dtype 'U'.
y : pd.Series or pd.DataFrame or np.ndarray (1d)
Target variable with outputs / scores. It shall have the same length as protected
type : {'uniform', 'preferential'}
Type indicates what strategy to use when choosing the samples. (default is 'uniform')
probs : np.ndarray (1d)
Vector with probabilities for each sample. Note that this should be probabilities for favourable outcome. For the best performance they should be consistent with 'y' but it is not required. This argument is required when using strategy of type 'preferential'
verbose : bool
Print messages about changes of types in 'y' and 'protected' (default is True).

Returns

numpy.ndarray (1d)
Array with indices for the data.

Notes

- <https://link.springer.com/content/pdf/10.1007/s10115-011-0463-8.pdf>
Expand source code Browse git
def resample(protected, y, type = 'uniform', probs = None,  verbose = True):
    """Returns indices of observations for data.

    Method of bias mitigation. Similarly to 'reweight'
    this method computes desired number of observations just
    if the protected variable was independent from y and
    on this basis decides if this subgroup with certain class
    (favorable or not) should be more or less numerous. Than performs
    oversampling or undersampling depending on the case.
    If type of sampling is set to 'preferential' and probs
    are provided than instead of uniform sampling preferential
    sampling will be performed. Preferential sampling depending on the case
    will sample observations close to border or far from border.

    Parameters
    -----------
    protected : np.ndarray (1d)
        Vector, preferably 1-dimensional np.ndarray containing strings,
        which denotes the membership to a subgroup.
        NOTE: List and pd.Series are also supported; however, if provided,
        they will be transformed into a np.ndarray (1d) with dtype 'U'.
    y : pd.Series or pd.DataFrame or np.ndarray (1d)
        Target variable with outputs / scores. It shall have the same length as `protected`
    type : {'uniform', 'preferential'}
        Type indicates what strategy to use when choosing the samples.
        (default is 'uniform')
    probs : np.ndarray (1d)
        Vector with probabilities for each sample. Note that this should be
        probabilities for favourable outcome. For the best performance they
        should be consistent with 'y' but it is not required. This argument
        is required when using strategy of type 'preferential'
    verbose : bool
        Print messages about changes of types in 'y' and 'protected' (default is `True`).

    Returns
    -----------
    numpy.ndarray (1d)
        Array with indices for the data.

    Notes
    -----------
        - https://link.springer.com/content/pdf/10.1007/s10115-011-0463-8.pdf
    """
    if type == 'preferential' and probs is None:
        raise ParameterCheckError("when using type 'preferential' probabilities (probs) must be provided")

    if type not in set(['uniform', 'preferential']):
        raise ParameterCheckError("type must be either 'uniform' or 'preferential'")


    protected = basic_checks.check_protected(protected, verbose)
    y = basic_checks.check_y(y, verbose)

    if type == 'preferential':
        try:
            probs = np.asarray(probs)
            helper.verbose_cat("converted 'probs' to numpy array", verbose=verbose)
        except Exception:
            raise ParameterCheckError("try converting 'probs' to 1D numpy array")

        if probs.ndim != 1 or len(probs) != len(y):
            raise ParameterCheckError("probs parameter must 1D numpy array with the same length as y")


    weights = reweight(protected, y, verbose=False)

    expected_size =  dict.fromkeys(np.unique(protected))
    for key in expected_size.keys():
        expected_size[key] = dict.fromkeys(np.unique(y))

    for subgroup in expected_size.keys():
        for value in np.unique(y):
            case_weights = weights[(subgroup == protected) & (value == y)]
            case_size = len(case_weights)
            weight = case_weights[0]
            expected_size[subgroup][value] = round(case_size * weight)

    indices = []

    for subgroup in expected_size.keys():
        for value in np.unique(y):
            current_case = np.arange(len(y))[(protected == subgroup) & (y == value)]
            expected = expected_size[subgroup][value]
            actual = np.sum((protected == subgroup) & (y == value))
            if expected == actual:
                 indices += list(current_case)

            elif expected < actual:
                if type == 'uniform':
                    indices += list(np.random.choice(current_case, expected, replace=False))
                else:
                    sorted_current_case = current_case[np.argsort(probs[current_case])]
                    if value == 0:
                        indices += list(sorted_current_case[:expected])
                    if value == 1:
                        indices += list(sorted_current_case[-expected:])
            else:
                if type == 'uniform':
                    u_ind = list(np.repeat(current_case, expected // actual))
                    u_ind += list(np.random.choice(current_case, expected % actual))

                    indices += u_ind

                else:
                    sorted_current_case = current_case[np.argsort(probs[current_case])]
                    p_ind = list(np.repeat(current_case, expected // actual))

                    if expected % actual != 0:
                        if value == 0:
                            p_ind += list(sorted_current_case[-(expected % actual):])
                        if value == 1:
                            p_ind += list(sorted_current_case[:(expected % actual)])

                    indices += p_ind

    return np.array(indices)
def reweight(protected, y, verbose=True)

Obtain weights for model training and mitigate bias in Statistical Parity.

Method produces weights for each subgroup for each class. Firstly, it assumes that protected variable and class are independent and calculates expected probability of this certain event (that subgroup == a and class = c). Than it calculates the actual probability of this event based on empirical data. Finally the weight is quotient of those probabilities.

Parameters

protected : np.ndarray (1d)
Vector, preferably 1-dimensional np.ndarray containing strings, which denotes the membership to a subgroup. NOTE: List and pd.Series are also supported; however, if provided, they will be transformed into a np.ndarray (1d) with dtype 'U'.
y : pd.Series or pd.DataFrame or np.ndarray (1d)
Target variable with outputs / scores. It shall have the same length as protected
verbose : bool
Print messages about changes of types in 'y' and 'protected' (default is True).

Returns

numpy.ndarray (1d)
Array with sample (case) weights

Notes

Expand source code Browse git
def reweight(protected, y, verbose = True):
    """Obtain weights for model training and mitigate bias in Statistical Parity.

    Method produces weights for each subgroup for each class.
    Firstly, it assumes that protected variable and class are
    independent and calculates expected probability of this
    certain event (that subgroup == a and class = c).
    Than it calculates the actual probability of this event
    based on empirical data. Finally the weight is quotient
    of those probabilities.

    Parameters
    -----------
    protected : np.ndarray (1d)
        Vector, preferably 1-dimensional np.ndarray containing strings,
        which denotes the membership to a subgroup.
        NOTE: List and pd.Series are also supported; however, if provided,
        they will be transformed into a np.ndarray (1d) with dtype 'U'.
    y : pd.Series or pd.DataFrame or np.ndarray (1d)
        Target variable with outputs / scores. It shall have the same length as `protected`
    verbose : bool
        Print messages about changes of types in 'y' and 'protected' (default is `True`).

    Returns
    -----------
    numpy.ndarray (1d)
        Array with sample (case) weights

    Notes
    -----------
    - https://link.springer.com/content/pdf/10.1007/s10115-011-0463-8.pdf
    """
    y = basic_checks.check_y(y, verbose)
    protected = basic_checks.check_protected(protected, verbose)

    if not len(y) == len(protected):
        raise ParameterCheckError("protected and target (y) must have the same length")

    weights = np.repeat(None, len(y))

    for subgroup in np.unique(protected):
        for c in np.unique(y):

            Xs = np.sum(protected == subgroup)
            Xc = np.sum(y == c)
            Xsc = np.sum((protected == subgroup) & (c == y))
            Wsc = (Xs * Xc) / (len(y) * Xsc)

            weights[(protected == subgroup) & (y == c)] = Wsc

    return weights
def roc_pivot(explainer, protected, privileged, cutoff=0.5, theta=0.05, verbose=True)

Reject Option based Classification pivot

Reject Option based Classifier is post-processing bias mitigation method. Method changes the predictions of model (probabilities) and returns new explainer with modified 'y_hat'. Probabilities that are made for privileged subgroup and are favorable, and close to cutoff are pivoted to the other side of the cutoff. The opposite happens for unprivileged observations (changing unfavorable and close to cutoff observations to favorable by pivoting probabilities from left of the cutoff to right). By this potentially wrongfully labeled observations are assigned different labels. Note that 1 in y in Explainer should indicate favorable outcome.

Parameters

explainer : Explainer
Explainer made from classification model.
protected : np.ndarray (1d)
Vector, preferably 1-dimensional np.ndarray containing strings, which denotes the membership to a subgroup. NOTE: List and pd.Series are also supported; however, if provided, they will be transformed into a np.ndarray (1d) with dtype 'U'.
privileged : str
Subgroup that is suspected to have the most privilege. It needs to be a string present in protected.
cutoff : float
Threshold for probabilistic output of a classifier.
theta : float
Value that indicates the radius of the area where values are pivoted. The default is (0.05) which means that the probabilities of privileged class within (cutoff, cutoff+ theta) will be pivoted to the other side of the cutoff. The opposite thing will happen for unprivileged subgroup.
verbose : bool
Print messages about changes of types in 'y' and 'protected' (default is True).

Returns

Explainer class object
Explainer with changed 'y_hat'

Notes

Expand source code Browse git
def roc_pivot(explainer, protected, privileged, cutoff = 0.5, theta = 0.05, verbose = True):
    """Reject Option based Classification pivot

    Reject Option based Classifier is post-processing bias
    mitigation method. Method changes the predictions of model
    (probabilities) and returns new explainer with modified 'y_hat'.
    Probabilities that are made for privileged subgroup and are
    favorable, and close to cutoff are pivoted to the other side
    of the cutoff. The opposite happens for unprivileged observations
    (changing unfavorable and close to cutoff observations to favorable
    by pivoting probabilities from left of the cutoff to right).
    By this potentially wrongfully labeled observations are
    assigned different labels. Note that 1 in y in Explainer
    should indicate favorable outcome.

    Parameters
    -----------
    explainer: Explainer
        Explainer made from classification model.

    protected : np.ndarray (1d)
        Vector, preferably 1-dimensional np.ndarray containing strings,
        which denotes the membership to a subgroup.
        NOTE: List and pd.Series are also supported; however, if provided,
        they will be transformed into a np.ndarray (1d) with dtype 'U'.
    privileged : str
        Subgroup that is suspected to have the most privilege.
        It needs to be a string present in `protected`.
    cutoff: float
        Threshold for probabilistic output of a classifier.
    theta: float
        Value that indicates the radius of the area where values
        are pivoted. The default is (0.05) which means that
        the probabilities of privileged class within
        (cutoff, cutoff+ theta)  will be pivoted to the other
        side of the cutoff. The opposite thing will happen for
        unprivileged subgroup.
    verbose : bool
        Print messages about changes of types in 'y' and 'protected' (default is `True`).

    Returns
    -----------
    Explainer class object
        Explainer with changed 'y_hat'

    Notes
    -----------
    - https://ieeexplore.ieee.org/document/6413831/
    """



    if not isinstance(explainer, dalex.Explainer):
        raise ParameterCheckError("explainer must be of type 'Explainer'")

    if explainer.model_type != 'classification':
        raise ParameterCheckError("model in explainer must be binary classification type")

    # same checking as in epsilon
    theta = checks.check_epsilon(theta, 'theta')
    cutoff = checks.check_epsilon(cutoff, 'cutoff')

    protected = basic_checks.check_protected(protected, verbose)
    privileged = basic_checks.check_privileged(privileged, protected, verbose)

    exp = copy.deepcopy(explainer)
    probs = exp.y_hat

    if not len(probs) == len(protected):
        raise ParameterCheckError("protected and target (y) must have the same length")

    is_close = np.abs(probs - cutoff) < theta
    is_privileged = privileged == protected
    is_favorable = probs > cutoff

    probs[is_close & is_privileged & is_favorable] = cutoff - (probs[is_close & is_privileged & is_favorable] - cutoff)
    probs[is_close & np.logical_not(is_privileged) & np.logical_not(is_favorable)] = cutoff + (cutoff - probs[is_close & np.logical_not(is_privileged) & np.logical_not(is_favorable)])

    probs[probs < 0] = 0
    probs[probs > 1] = 1

    return exp

Classes

class GroupFairnessClassification (y, y_hat, protected, privileged, label, verbose=False, cutoff=0.5, epsilon=0.8)
Expand source code Browse git
class GroupFairnessClassification(_FairnessObject):

    def __init__(self, y, y_hat, protected, privileged, label, verbose=False, cutoff=0.5, epsilon=0.8):

        super().__init__(y, y_hat, protected, privileged, verbose)
        checks.check_classification_parameters(y, y_hat, protected, privileged, verbose)
        cutoff = checks.check_cutoff(self.protected, cutoff, verbose)
        self.cutoff = cutoff
        epsilon = checks.check_epsilon(epsilon)
        self.epsilon = epsilon

        sub_confusion_matrix = utils.SubgroupConfusionMatrix(
            y_true=self.y,
            y_pred=self.y_hat,
            protected=self.protected,
            cutoff=self.cutoff
        )

        sub_confusion_matrix_metrics = utils.SubgroupConfusionMatrixMetrics(sub_confusion_matrix)
        df_ratios = utils.calculate_ratio(sub_confusion_matrix_metrics, self.privileged)
        parity_loss = utils.calculate_parity_loss(sub_confusion_matrix_metrics, self.privileged)

        self._subgroup_confusion_matrix = sub_confusion_matrix
        self._subgroup_confusion_matrix_metrics_object = sub_confusion_matrix_metrics
        self.metric_scores = sub_confusion_matrix_metrics.to_horizontal_DataFrame()
        self.parity_loss = parity_loss
        self.result = df_ratios
        self.label = label

    def fairness_check(self, epsilon=None, verbose=True):
        """Check if classifier passes various fairness metrics

        Fairness check is an easy way to check if the model is fair.
        For that, this method uses 5 popular metrics of group fairness.
        Model is considered to be fair if confusion matrix metrics are
        close to each other. This arbitrary decision is based on epsilon,
        which defaults to `0.8` (it matches the four-fifths 80% rule).

        Methods in use: Equal opportunity, Predictive parity, Predictive equality,
        Statistical parity and Accuracy equality.

        Parameters
        -----------
        epsilon : float, optional
            Parameter defines acceptable fairness scores. The closer to `1` the
            more strict the verdict is. If the ratio of certain unprivileged
            and privileged subgroup is within the `(epsilon, 1/epsilon)` range,
            then there is no discrimination in this metric and for this subgroups
            (default is `0.8`, which is set during object initialization).
        verbose : bool
            Shows verbose text about potential problems 
            (e.g. `NaN` in model metrics that can cause misinterpretation).

        Returns
        -----------
        None (prints console output)

        """

        utils.universal_fairness_check(self,
                                       epsilon,
                                       verbose,
                                       num_for_not_fair=2,
                                       num_for_no_decision=1,
                                       metrics=utils.fairness_check_metrics())

    def plot(self,
             objects=None,
             type='fairness_check',
             title=None,
             show=True,
             **kwargs):
        """
        Parameters
        -----------
        objects : array_like of GroupFairnessClassification objects
            Additional objects to plot (default is `None`).
        type : str, optional
            Type of the plot. Default is `'fairness_check'`.
            When the type of plot is specified, user may provide additional
            keyword arguments (`**kwargs`) which will be used in creating
            plot of certain type. Below there is list of types:

            - fairness_check:
                fairness_check plot visualizes the fairness_check method
                for one or more GroupFairnessClassification objects.
                It accepts following keyword arguments:
                 'epsilon' - which denotes the decision
                             boundary (like in `fairness_check` method)
            - metric_scores:
                metric_scores plot shows real values of metrics.
                Each model displays values in each metric and each subgroup.
                Vertical lines show metric score for privileged
                subgroup and points connected with the lines
                show scores for unprivileged subgroups.
                This plot is simple and it does
                not have additional keyword arguments.
            - stacked:
                stacked plot shows cumulated parity loss from chosen
                metrics. It stacks metrics on top of each other.
                It accepts following keyword arguments:
                'metrics' - list of metrics to be plotted. The metrics are taken
                            from parity_loss attribute of the object.
                            Default is `["TPR", "ACC", "PPV", "FPR", "STP"]`.
            - radar:
                radar plot shows parity loss of provided metrics. It does it
                in form of radar (spider) chart. The smaller the field of
                figure the better.
                It accepts following keyword arguments:
                'metrics' - list of metrics to be plotted. The metrics are taken
                            from parity_loss attribute of the object.
                            Default is `["TPR", "ACC", "PPV", "FPR", "STP"]`.
            - performance_and_fairness:
                performance_and_fairness plot shows relation between chosen
                performance and fairness metrics. The fairness metric axis is
                reversed, because the higher the model the less bias it has.
                Thanks to that it is more intuitive to look at because
                the best models are in top right corner.
                It accepts following keyword arguments:
                'fairness_metric' - single fairness metric to be plotted on Y axis.
                                   The metric is taken from parity_loss attribute\
                                   of the object. The default is "TPR"
                'performance_metric' - single performance metric. One of `{'recall',
                                       'precision','accuracy','auc','f1'}`.
                                       Metrics apart from 'auc' are
                                       cutoff-sensitive. Default is "accuracy"
            - heatmap:
                heatmap shows parity loss of metrics in form of heatmap. The less
                parity loss model has, the more fair it is.
                It accepts following keyword arguments:
                'metrics' - list of metrics to be plotted. The metrics are taken
                            from parity_loss attribute of the object.
                            Default is 'all' which stands for all available metrics.
            - ceteris_paribus_cutoff:
                ceteris_paribus_cutoff plot shows what would happen if cutoff
                for only one subgroup would change with others cutoffs constant.
                The plot shows also a minimum, where sum of parity loss of metrics
                is the lowest. Minimum only works if at some interval all metrics
                have non-nan scores.
                It accepts following keyword arguments:
                'subgroup' - necessary argument. It is name of subgroup from
                             protected attribute. Cutoff for this subgroup will
                             be changed.

                'metrics' - list of metrics to be plotted. The metrics are taken
                            from parity_loss attribute of the object.
                            Default is `["TPR", "ACC", "PPV", "FPR", "STP"]`.

                'grid_points' - number of grid points (cutoff values) to calculate
                                metrics for. The points are distributed evenly.
                                Default is `101`.

        title : str, optional
            Title of the plot (default depends on the `type` attribute).
        show : bool, optional
            `True` shows the plot; `False` returns the plotly Figure object that can
            be edited or saved using the `write_image()` method (default is `True`).

        Returns
        -----------
        None or plotly.graph_objects.Figure
            Return figure that can be edited or saved. See `show` parameter.
        """
        other_objects = None
        if objects is not None:
            other_objects = []
            if not isinstance(objects, (list, tuple)):
                objects = [objects]
            for obj in objects:
                _global_checks.global_check_object_class(obj, self.__class__)
                other_objects.append(obj)
            basic_checks.check_other_fairness_objects(self, other_objects)

        if type == 'fairness_check':
            fig = plot.plot_fairness_check_clf(self,
                                               other_objects=other_objects,
                                               title=title, **kwargs)

        elif type == "metric_scores":
            fig = plot.plot_metric_scores(self,
                                          other_objects=other_objects,
                                          title=title,
                                          **kwargs)

        # names of plots may be changed
        elif type == 'stacked':
            fig = plot.plot_stacked(self,
                                    other_objects=other_objects,
                                    title=title,
                                    **kwargs)

        elif type == 'radar':
            fig = plot.plot_radar(self,
                                  other_objects=other_objects,
                                  title=title,
                                  **kwargs)

        elif type == 'performance_and_fairness':
            fig = plot.plot_performance_and_fairness(self,
                                                     other_objects=other_objects,
                                                     title=title,
                                                     **kwargs)

        elif type == 'heatmap':
            fig = plot.plot_heatmap(self,
                                    other_objects=other_objects,
                                    title=title,
                                    **kwargs)
        elif type == 'density':
            fig = plot.plot_density(self,
                                    other_objects=other_objects,
                                    title=title,
                                    **kwargs)

        elif type == 'ceteris_paribus_cutoff':
            fig = plot.plot_ceteris_paribus_cutoff(self,
                                                   other_objects=other_objects,
                                                   title=title,
                                                   **kwargs)

        else:
            raise ParameterCheckError(f"plot type {type} not supported, try other types.")

        if show:
            fig.show(config=_theme.get_default_config())
        else:
            return fig

Ancestors

  • dalex.fairness._basics._base_objects._FairnessObject
  • dalex.fairness._basics._base_objects._AbsObject

Methods

def fairness_check(self, epsilon=None, verbose=True)

Check if classifier passes various fairness metrics

Fairness check is an easy way to check if the model is fair. For that, this method uses 5 popular metrics of group fairness. Model is considered to be fair if confusion matrix metrics are close to each other. This arbitrary decision is based on epsilon, which defaults to 0.8 (it matches the four-fifths 80% rule).

Methods in use: Equal opportunity, Predictive parity, Predictive equality, Statistical parity and Accuracy equality.

Parameters

epsilon : float, optional
Parameter defines acceptable fairness scores. The closer to 1 the more strict the verdict is. If the ratio of certain unprivileged and privileged subgroup is within the (epsilon, 1/epsilon) range, then there is no discrimination in this metric and for this subgroups (default is 0.8, which is set during object initialization).
verbose : bool
Shows verbose text about potential problems (e.g. NaN in model metrics that can cause misinterpretation).

Returns

None (prints console output)
 
Expand source code Browse git
def fairness_check(self, epsilon=None, verbose=True):
    """Check if classifier passes various fairness metrics

    Fairness check is an easy way to check if the model is fair.
    For that, this method uses 5 popular metrics of group fairness.
    Model is considered to be fair if confusion matrix metrics are
    close to each other. This arbitrary decision is based on epsilon,
    which defaults to `0.8` (it matches the four-fifths 80% rule).

    Methods in use: Equal opportunity, Predictive parity, Predictive equality,
    Statistical parity and Accuracy equality.

    Parameters
    -----------
    epsilon : float, optional
        Parameter defines acceptable fairness scores. The closer to `1` the
        more strict the verdict is. If the ratio of certain unprivileged
        and privileged subgroup is within the `(epsilon, 1/epsilon)` range,
        then there is no discrimination in this metric and for this subgroups
        (default is `0.8`, which is set during object initialization).
    verbose : bool
        Shows verbose text about potential problems 
        (e.g. `NaN` in model metrics that can cause misinterpretation).

    Returns
    -----------
    None (prints console output)

    """

    utils.universal_fairness_check(self,
                                   epsilon,
                                   verbose,
                                   num_for_not_fair=2,
                                   num_for_no_decision=1,
                                   metrics=utils.fairness_check_metrics())
def plot(self, objects=None, type='fairness_check', title=None, show=True, **kwargs)

Parameters

objects : array_like of GroupFairnessClassification objects
Additional objects to plot (default is None).
type : str, optional

Type of the plot. Default is 'fairness_check'. When the type of plot is specified, user may provide additional keyword arguments (**kwargs) which will be used in creating plot of certain type. Below there is list of types:

  • fairness_check: fairness_check plot visualizes the fairness_check method for one or more GroupFairnessClassification objects. It accepts following keyword arguments: 'epsilon' - which denotes the decision boundary (like in fairness_check method)
  • metric_scores: metric_scores plot shows real values of metrics. Each model displays values in each metric and each subgroup. Vertical lines show metric score for privileged subgroup and points connected with the lines show scores for unprivileged subgroups. This plot is simple and it does not have additional keyword arguments.
  • stacked: stacked plot shows cumulated parity loss from chosen metrics. It stacks metrics on top of each other. It accepts following keyword arguments: 'metrics' - list of metrics to be plotted. The metrics are taken from parity_loss attribute of the object. Default is ["TPR", "ACC", "PPV", "FPR", "STP"].
  • radar: radar plot shows parity loss of provided metrics. It does it in form of radar (spider) chart. The smaller the field of figure the better. It accepts following keyword arguments: 'metrics' - list of metrics to be plotted. The metrics are taken from parity_loss attribute of the object. Default is ["TPR", "ACC", "PPV", "FPR", "STP"].
  • performance_and_fairness: performance_and_fairness plot shows relation between chosen performance and fairness metrics. The fairness metric axis is reversed, because the higher the model the less bias it has. Thanks to that it is more intuitive to look at because the best models are in top right corner. It accepts following keyword arguments: 'fairness_metric' - single fairness metric to be plotted on Y axis. The metric is taken from parity_loss attribute of the object. The default is "TPR" 'performance_metric' - single performance metric. One of {'recall', 'precision','accuracy','auc','f1'}. Metrics apart from 'auc' are cutoff-sensitive. Default is "accuracy"
  • heatmap: heatmap shows parity loss of metrics in form of heatmap. The less parity loss model has, the more fair it is. It accepts following keyword arguments: 'metrics' - list of metrics to be plotted. The metrics are taken from parity_loss attribute of the object. Default is 'all' which stands for all available metrics.
  • ceteris_paribus_cutoff: ceteris_paribus_cutoff plot shows what would happen if cutoff for only one subgroup would change with others cutoffs constant. The plot shows also a minimum, where sum of parity loss of metrics is the lowest. Minimum only works if at some interval all metrics have non-nan scores. It accepts following keyword arguments: 'subgroup' - necessary argument. It is name of subgroup from protected attribute. Cutoff for this subgroup will be changed.

    'metrics' - list of metrics to be plotted. The metrics are taken from parity_loss attribute of the object. Default is ["TPR", "ACC", "PPV", "FPR", "STP"].

    'grid_points' - number of grid points (cutoff values) to calculate metrics for. The points are distributed evenly. Default is 101.

title : str, optional
Title of the plot (default depends on the type attribute).
show : bool, optional
True shows the plot; False returns the plotly Figure object that can be edited or saved using the write_image() method (default is True).

Returns

None or plotly.graph_objects.Figure
Return figure that can be edited or saved. See show parameter.
Expand source code Browse git
def plot(self,
         objects=None,
         type='fairness_check',
         title=None,
         show=True,
         **kwargs):
    """
    Parameters
    -----------
    objects : array_like of GroupFairnessClassification objects
        Additional objects to plot (default is `None`).
    type : str, optional
        Type of the plot. Default is `'fairness_check'`.
        When the type of plot is specified, user may provide additional
        keyword arguments (`**kwargs`) which will be used in creating
        plot of certain type. Below there is list of types:

        - fairness_check:
            fairness_check plot visualizes the fairness_check method
            for one or more GroupFairnessClassification objects.
            It accepts following keyword arguments:
             'epsilon' - which denotes the decision
                         boundary (like in `fairness_check` method)
        - metric_scores:
            metric_scores plot shows real values of metrics.
            Each model displays values in each metric and each subgroup.
            Vertical lines show metric score for privileged
            subgroup and points connected with the lines
            show scores for unprivileged subgroups.
            This plot is simple and it does
            not have additional keyword arguments.
        - stacked:
            stacked plot shows cumulated parity loss from chosen
            metrics. It stacks metrics on top of each other.
            It accepts following keyword arguments:
            'metrics' - list of metrics to be plotted. The metrics are taken
                        from parity_loss attribute of the object.
                        Default is `["TPR", "ACC", "PPV", "FPR", "STP"]`.
        - radar:
            radar plot shows parity loss of provided metrics. It does it
            in form of radar (spider) chart. The smaller the field of
            figure the better.
            It accepts following keyword arguments:
            'metrics' - list of metrics to be plotted. The metrics are taken
                        from parity_loss attribute of the object.
                        Default is `["TPR", "ACC", "PPV", "FPR", "STP"]`.
        - performance_and_fairness:
            performance_and_fairness plot shows relation between chosen
            performance and fairness metrics. The fairness metric axis is
            reversed, because the higher the model the less bias it has.
            Thanks to that it is more intuitive to look at because
            the best models are in top right corner.
            It accepts following keyword arguments:
            'fairness_metric' - single fairness metric to be plotted on Y axis.
                               The metric is taken from parity_loss attribute\
                               of the object. The default is "TPR"
            'performance_metric' - single performance metric. One of `{'recall',
                                   'precision','accuracy','auc','f1'}`.
                                   Metrics apart from 'auc' are
                                   cutoff-sensitive. Default is "accuracy"
        - heatmap:
            heatmap shows parity loss of metrics in form of heatmap. The less
            parity loss model has, the more fair it is.
            It accepts following keyword arguments:
            'metrics' - list of metrics to be plotted. The metrics are taken
                        from parity_loss attribute of the object.
                        Default is 'all' which stands for all available metrics.
        - ceteris_paribus_cutoff:
            ceteris_paribus_cutoff plot shows what would happen if cutoff
            for only one subgroup would change with others cutoffs constant.
            The plot shows also a minimum, where sum of parity loss of metrics
            is the lowest. Minimum only works if at some interval all metrics
            have non-nan scores.
            It accepts following keyword arguments:
            'subgroup' - necessary argument. It is name of subgroup from
                         protected attribute. Cutoff for this subgroup will
                         be changed.

            'metrics' - list of metrics to be plotted. The metrics are taken
                        from parity_loss attribute of the object.
                        Default is `["TPR", "ACC", "PPV", "FPR", "STP"]`.

            'grid_points' - number of grid points (cutoff values) to calculate
                            metrics for. The points are distributed evenly.
                            Default is `101`.

    title : str, optional
        Title of the plot (default depends on the `type` attribute).
    show : bool, optional
        `True` shows the plot; `False` returns the plotly Figure object that can
        be edited or saved using the `write_image()` method (default is `True`).

    Returns
    -----------
    None or plotly.graph_objects.Figure
        Return figure that can be edited or saved. See `show` parameter.
    """
    other_objects = None
    if objects is not None:
        other_objects = []
        if not isinstance(objects, (list, tuple)):
            objects = [objects]
        for obj in objects:
            _global_checks.global_check_object_class(obj, self.__class__)
            other_objects.append(obj)
        basic_checks.check_other_fairness_objects(self, other_objects)

    if type == 'fairness_check':
        fig = plot.plot_fairness_check_clf(self,
                                           other_objects=other_objects,
                                           title=title, **kwargs)

    elif type == "metric_scores":
        fig = plot.plot_metric_scores(self,
                                      other_objects=other_objects,
                                      title=title,
                                      **kwargs)

    # names of plots may be changed
    elif type == 'stacked':
        fig = plot.plot_stacked(self,
                                other_objects=other_objects,
                                title=title,
                                **kwargs)

    elif type == 'radar':
        fig = plot.plot_radar(self,
                              other_objects=other_objects,
                              title=title,
                              **kwargs)

    elif type == 'performance_and_fairness':
        fig = plot.plot_performance_and_fairness(self,
                                                 other_objects=other_objects,
                                                 title=title,
                                                 **kwargs)

    elif type == 'heatmap':
        fig = plot.plot_heatmap(self,
                                other_objects=other_objects,
                                title=title,
                                **kwargs)
    elif type == 'density':
        fig = plot.plot_density(self,
                                other_objects=other_objects,
                                title=title,
                                **kwargs)

    elif type == 'ceteris_paribus_cutoff':
        fig = plot.plot_ceteris_paribus_cutoff(self,
                                               other_objects=other_objects,
                                               title=title,
                                               **kwargs)

    else:
        raise ParameterCheckError(f"plot type {type} not supported, try other types.")

    if show:
        fig.show(config=_theme.get_default_config())
    else:
        return fig
class GroupFairnessRegression (y, y_hat, protected, privileged, label, epsilon=0.8, verbose=False)
Expand source code Browse git
class GroupFairnessRegression(_FairnessObject):

    def __init__(self, y, y_hat, protected, privileged, label, epsilon=0.8, verbose=False):

        super().__init__(y, y_hat, protected, privileged, verbose)
        checks.check_epsilon(epsilon)

        df_ratios = utils.calculate_regression_measures(y, y_hat, protected, privileged)

        self.result = df_ratios
        self.label = label
        self.epsilon = epsilon

    def fairness_check(self, epsilon=None, verbose=True):
        """Check if classifier passes various fairness criteria

        Fairness check is an easy way to check if the model is fair.
        For that, this method uses 3 non-discrimination criteria.
        The approximations are made to check the conditional independence expressed
        in form of independence, separation and sufficiency.
        Model is considered to be fair if all criteria are met.
        This arbitrary decision is based on epsilon,
        which defaults to `0.8` (it matches the four-fifths 80% rule).

        Methods in use: Independence, Separation, Sufficiency.

        Parameters
        -----------
        epsilon : float, optional
            Parameter defines acceptable fairness scores. The closer to `1` the
            more strict the verdict is. If the ratio of certain unprivileged
            and privileged subgroup is within the `(epsilon, 1/epsilon)` range,
            then there is no discrimination in this metric and for this subgroups
            (default is `0.8`, which is set during object initialization).
        verbose : bool
            Shows verbose text about potential problems
            (e.g. `NaN` in model metrics that can cause misinterpretation).

        Returns
        -----------
        None (prints console output)

        """

        utils.universal_fairness_check(self,
                                       epsilon,
                                       verbose,
                                       num_for_not_fair=1,
                                       num_for_no_decision=None,
                                       metrics=['independence', 'separation', 'sufficiency'])

    def plot(self, objects=None, type='fairness_check', title=None, show=True, **kwargs):
        """
        Parameters
        -----------
        objects : array_like of GroupFairnessRegression objects
            Additional objects to plot (default is `None`).
        type : str, optional
            Type of the plot. Default is `'fairness_check'`.
            When the type of plot is specified, user may provide additional
            keyword arguments (`**kwargs`) which will be used in creating
            plot of certain type. Below there is list of types:

            - fairness_check:
                fairness_check plot visualizes the fairness_check method
                for one or more GroupFairnessClassification objects.
                It accepts following keyword arguments:
                 'epsilon' - which denotes the decision
                             boundary (like in `fairness_check` method)

            - density:
                density plot visualizes the output of the model for each
                subgroup in form of violin plots with boxplots on top of them.
                It does not accept additional keyword arguments.
        title : str, optional
            Title of the plot (default depends on the `type` attribute).

        """

        other_objects = None
        if objects is not None:
            other_objects = []
            if not isinstance(objects, (list, tuple)):
                objects = [objects]
            for obj in objects:
                _global_checks.global_check_object_class(obj, self.__class__)
                other_objects.append(obj)
            basic_checks.check_other_fairness_objects(self, other_objects)

        if type == 'density':
            fig = plot.plot_density(self,
                                    other_objects,
                                    title=title,
                                    **kwargs)

        elif type == 'fairness_check':
            fig = plot.plot_fairness_check_reg(self,
                                               other_objects=other_objects,
                                               title=title,
                                               **kwargs)

        else:
            raise ParameterCheckError(f"plot type {type} not supported, try other types.")

        if show:
            fig.show(config=_theme.get_default_config())
        else:
            return fig

Ancestors

  • dalex.fairness._basics._base_objects._FairnessObject
  • dalex.fairness._basics._base_objects._AbsObject

Methods

def fairness_check(self, epsilon=None, verbose=True)

Check if classifier passes various fairness criteria

Fairness check is an easy way to check if the model is fair. For that, this method uses 3 non-discrimination criteria. The approximations are made to check the conditional independence expressed in form of independence, separation and sufficiency. Model is considered to be fair if all criteria are met. This arbitrary decision is based on epsilon, which defaults to 0.8 (it matches the four-fifths 80% rule).

Methods in use: Independence, Separation, Sufficiency.

Parameters

epsilon : float, optional
Parameter defines acceptable fairness scores. The closer to 1 the more strict the verdict is. If the ratio of certain unprivileged and privileged subgroup is within the (epsilon, 1/epsilon) range, then there is no discrimination in this metric and for this subgroups (default is 0.8, which is set during object initialization).
verbose : bool
Shows verbose text about potential problems (e.g. NaN in model metrics that can cause misinterpretation).

Returns

None (prints console output)
 
Expand source code Browse git
def fairness_check(self, epsilon=None, verbose=True):
    """Check if classifier passes various fairness criteria

    Fairness check is an easy way to check if the model is fair.
    For that, this method uses 3 non-discrimination criteria.
    The approximations are made to check the conditional independence expressed
    in form of independence, separation and sufficiency.
    Model is considered to be fair if all criteria are met.
    This arbitrary decision is based on epsilon,
    which defaults to `0.8` (it matches the four-fifths 80% rule).

    Methods in use: Independence, Separation, Sufficiency.

    Parameters
    -----------
    epsilon : float, optional
        Parameter defines acceptable fairness scores. The closer to `1` the
        more strict the verdict is. If the ratio of certain unprivileged
        and privileged subgroup is within the `(epsilon, 1/epsilon)` range,
        then there is no discrimination in this metric and for this subgroups
        (default is `0.8`, which is set during object initialization).
    verbose : bool
        Shows verbose text about potential problems
        (e.g. `NaN` in model metrics that can cause misinterpretation).

    Returns
    -----------
    None (prints console output)

    """

    utils.universal_fairness_check(self,
                                   epsilon,
                                   verbose,
                                   num_for_not_fair=1,
                                   num_for_no_decision=None,
                                   metrics=['independence', 'separation', 'sufficiency'])
def plot(self, objects=None, type='fairness_check', title=None, show=True, **kwargs)

Parameters

objects : array_like of GroupFairnessRegression objects
Additional objects to plot (default is None).
type : str, optional

Type of the plot. Default is 'fairness_check'. When the type of plot is specified, user may provide additional keyword arguments (**kwargs) which will be used in creating plot of certain type. Below there is list of types:

  • fairness_check: fairness_check plot visualizes the fairness_check method for one or more GroupFairnessClassification objects. It accepts following keyword arguments: 'epsilon' - which denotes the decision boundary (like in fairness_check method)

  • density: density plot visualizes the output of the model for each subgroup in form of violin plots with boxplots on top of them. It does not accept additional keyword arguments.

title : str, optional
Title of the plot (default depends on the type attribute).
Expand source code Browse git
def plot(self, objects=None, type='fairness_check', title=None, show=True, **kwargs):
    """
    Parameters
    -----------
    objects : array_like of GroupFairnessRegression objects
        Additional objects to plot (default is `None`).
    type : str, optional
        Type of the plot. Default is `'fairness_check'`.
        When the type of plot is specified, user may provide additional
        keyword arguments (`**kwargs`) which will be used in creating
        plot of certain type. Below there is list of types:

        - fairness_check:
            fairness_check plot visualizes the fairness_check method
            for one or more GroupFairnessClassification objects.
            It accepts following keyword arguments:
             'epsilon' - which denotes the decision
                         boundary (like in `fairness_check` method)

        - density:
            density plot visualizes the output of the model for each
            subgroup in form of violin plots with boxplots on top of them.
            It does not accept additional keyword arguments.
    title : str, optional
        Title of the plot (default depends on the `type` attribute).

    """

    other_objects = None
    if objects is not None:
        other_objects = []
        if not isinstance(objects, (list, tuple)):
            objects = [objects]
        for obj in objects:
            _global_checks.global_check_object_class(obj, self.__class__)
            other_objects.append(obj)
        basic_checks.check_other_fairness_objects(self, other_objects)

    if type == 'density':
        fig = plot.plot_density(self,
                                other_objects,
                                title=title,
                                **kwargs)

    elif type == 'fairness_check':
        fig = plot.plot_fairness_check_reg(self,
                                           other_objects=other_objects,
                                           title=title,
                                           **kwargs)

    else:
        raise ParameterCheckError(f"plot type {type} not supported, try other types.")

    if show:
        fig.show(config=_theme.get_default_config())
    else:
        return fig