Print fairness heatmap
# S3 method for fairness_heatmap
print(x, ...)
x |
|
---|---|
... | other print parameters |
data("german")
y_numeric <- as.numeric(german$Risk) - 1
lm_model <- glm(Risk ~ .,
data = german,
family = binomial(link = "logit")
)
rf_model <- ranger::ranger(Risk ~ .,
data = german,
probability = TRUE,
num.trees = 200,
num.threads = 1
)
explainer_lm <- DALEX::explain(lm_model, data = german[, -1], y = y_numeric)
#> Preparation of a new explainer is initiated
#> -> model label : lm ( default )
#> -> data : 1000 rows 9 cols
#> -> target variable : 1000 values
#> -> predict function : yhat.glm will be used ( default )
#> -> predicted values : No value for predict function target column. ( default )
#> -> model_info : package stats , ver. 4.1.1 , task classification ( default )
#> -> predicted values : numerical, min = 0.1369187 , mean = 0.7 , max = 0.9832426
#> -> residual function : difference between y and yhat ( default )
#> -> residuals : numerical, min = -0.9572803 , mean = 1.940006e-17 , max = 0.8283475
#> A new explainer has been created!
explainer_rf <- DALEX::explain(rf_model, data = german[, -1], y = y_numeric)
#> Preparation of a new explainer is initiated
#> -> model label : ranger ( default )
#> -> data : 1000 rows 9 cols
#> -> target variable : 1000 values
#> -> predict function : yhat.ranger will be used ( default )
#> -> predicted values : No value for predict function target column. ( default )
#> -> model_info : package ranger , ver. 0.13.1 , task classification ( default )
#> -> predicted values : numerical, min = 0.09571627 , mean = 0.6979016 , max = 0.9943929
#> -> residual function : difference between y and yhat ( default )
#> -> residuals : numerical, min = -0.7144797 , mean = 0.002098351 , max = 0.6671607
#> A new explainer has been created!
fobject <- fairness_check(explainer_lm, explainer_rf,
protected = german$Sex,
privileged = "male"
)
#> Creating fairness classification object
#> -> Privileged subgroup : character ( Ok )
#> -> Protected variable : factor ( Ok )
#> -> Cutoff values for explainers : 0.5 ( for all subgroups )
#> -> Fairness objects : 0 objects
#> -> Checking explainers : 2 in total ( compatible )
#> -> Metric calculation : 10/13 metrics calculated for all models ( 3 NA created )
#> Fairness object created succesfully
# same explainers with different cutoffs for female
fobject <- fairness_check(explainer_lm, explainer_rf, fobject,
protected = german$Sex,
privileged = "male",
cutoff = list(female = 0.4),
label = c("lm_2", "rf_2")
)
#> Creating fairness classification object
#> -> Privileged subgroup : character ( Ok )
#> -> Protected variable : factor ( Ok )
#> -> Cutoff values for explainers : female: 0.4, male: 0.5
#> -> Fairness objects : 1 object ( compatible )
#> -> Checking explainers : 4 in total ( compatible )
#> -> Metric calculation : 10/13 metrics calculated for all models ( 3 NA created )
#> Fairness object created succesfully
fh <- fairness_heatmap(fobject)
print(fh)
#> heatmap data top rows:
#> parity_loss_metric model score
#> 1 TPR lm_2 0.00
#> 2 TPR rf_2 0.01
#> 3 TPR lm 0.10
#> 4 TPR ranger 0.01
#> 5 TNR lm_2 0.09
#>
#> matrix model not scaled :
#> TPR TNR PPV NPV FNR FPR
#> lm_2 0.004844913 0.08841901 0.09565689 0.103226043 0.0625594 0.04470907
#> rf_2 0.006030169 0.13895567 0.07924426 0.019934215 NA 0.37860399
#> lm 0.096364988 0.43377037 0.02141812 0.006376217 0.7641192 0.34733301
#> ranger 0.006030169 0.11085613 0.01394951 0.019934215 NA 0.53768674
#> FDR FOR TS STP ACC F1
#> lm_2 0.2925829 0.21928998 0.08668085 0.008680082 0.06819175 0.05068590
#> rf_2 0.6417052 NA 0.07368354 0.023907455 0.05235992 0.03911701
#> lm 0.0759895 0.01165062 0.09657260 0.184128760 0.04152350 0.05658562
#> ranger 0.1813918 NA 0.01951023 0.117101232 0.02122844 0.01013093
#> NEW_METRIC
#> lm_2 0.06740431
#> rf_2 NA
#> lm 0.86048417
#> ranger NA
#>