Print Fairness Object
# S3 method for fairness_object
print(
x,
...,
colorize = TRUE,
fairness_metrics = c("ACC", "TPR", "PPV", "FPR", "STP"),
fair_level = NULL,
border_width = 1,
loss_aggregating_function = NULL
)
x |
|
---|---|
... | other parameters |
colorize | logical, whether information about metrics should be in color or not |
fairness_metrics | character, vector of metrics. Subset of fairness metrics to be used. The full set is defined as c("ACC", "TPR", "PPV", "FPR", "STP"). |
fair_level | numerical, amount of fairness metrics that need do be passed in order to call a model fair. Default is 5. |
border_width | numerical, width of border between fair and unfair models.
If |
loss_aggregating_function | function, loss aggregating function that may be provided. It takes metric scores as vector and aggregates them to one value. The default is 'Total loss' that measures the total sum of distances to 1. It may be interpreted as sum of bar heights in fairness_check. |
data("german")
y_numeric <- as.numeric(german$Risk) - 1
lm_model <- glm(Risk ~ .,
data = german,
family = binomial(link = "logit")
)
rf_model <- ranger::ranger(Risk ~ .,
data = german,
probability = TRUE,
max.depth = 3,
num.trees = 100,
seed = 1,
num.threads = 1
)
explainer_lm <- DALEX::explain(lm_model, data = german[, -1], y = y_numeric)
#> Preparation of a new explainer is initiated
#> -> model label : lm ( default )
#> -> data : 1000 rows 9 cols
#> -> target variable : 1000 values
#> -> predict function : yhat.glm will be used ( default )
#> -> predicted values : No value for predict function target column. ( default )
#> -> model_info : package stats , ver. 4.1.1 , task classification ( default )
#> -> predicted values : numerical, min = 0.1369187 , mean = 0.7 , max = 0.9832426
#> -> residual function : difference between y and yhat ( default )
#> -> residuals : numerical, min = -0.9572803 , mean = 1.940006e-17 , max = 0.8283475
#> A new explainer has been created!
explainer_rf <- DALEX::explain(rf_model,
data = german[, -1],
y = y_numeric
)
#> Preparation of a new explainer is initiated
#> -> model label : ranger ( default )
#> -> data : 1000 rows 9 cols
#> -> target variable : 1000 values
#> -> predict function : yhat.ranger will be used ( default )
#> -> predicted values : No value for predict function target column. ( default )
#> -> model_info : package ranger , ver. 0.13.1 , task classification ( default )
#> -> predicted values : numerical, min = 0.2744313 , mean = 0.6991764 , max = 0.9021086
#> -> residual function : difference between y and yhat ( default )
#> -> residuals : numerical, min = -0.8758052 , mean = 0.0008235875 , max = 0.6119131
#> A new explainer has been created!
fobject <- fairness_check(explainer_lm, explainer_rf,
protected = german$Sex,
privileged = "male"
)
#> Creating fairness classification object
#> -> Privileged subgroup : character ( Ok )
#> -> Protected variable : factor ( Ok )
#> -> Cutoff values for explainers : 0.5 ( for all subgroups )
#> -> Fairness objects : 0 objects
#> -> Checking explainers : 2 in total ( compatible )
#> -> Metric calculation : 13/13 metrics calculated for all models
#> Fairness object created succesfully
print(fobject)
#>
#> Fairness check for models: lm, ranger
#>
#> lm passes 4/5 metrics
#> Total loss : 0.6153324
#>
#> ranger passes 5/5 metrics
#> Total loss : 0.2769801
#>
# custom print
print(fobject,
fairness_metrics = c("ACC", "TPR"), # amount of metrics to be printed
border_width = 0, # in our case 2/2 will be printed in green and 1/2 in red
loss_aggregating_function = function(x) sum(abs(x)) + 10
) # custom loss function - takes vector
#>
#> Fairness check for models: lm, ranger
#>
#> lm passes 2/2 metrics
#> Custom loss : 11.86746
#>
#> ranger passes 2/2 metrics
#> Custom loss : 11.92541
#>