Unfold fairness object to 3 columns (metrics, label, score) to construct better base for visualization.

expand_fairness_object(
  x,
  scale = FALSE,
  drop_metrics_with_na = FALSE,
  fairness_metrics = NULL
)

Arguments

x

object of class fairness_object

scale

logical, if TRUE standardized.

drop_metrics_with_na

logical, if TRUE metrics with NA will be omitted

fairness_metrics

character, vector of fairness metrics names indicating from which expand.

Value

object of class expand_fairness_object. It is a data.frame with scores for each metric and model.

Examples


data("german")

y_numeric <- as.numeric(german$Risk) - 1

lm_model <- glm(Risk ~ .,
  data = german,
  family = binomial(link = "logit")
)

explainer_lm <- DALEX::explain(lm_model, data = german[, -1], y = y_numeric)
#> Preparation of a new explainer is initiated
#>   -> model label       :  lm  (  default  )
#>   -> data              :  1000  rows  9  cols 
#>   -> target variable   :  1000  values 
#>   -> predict function  :  yhat.glm  will be used (  default  )
#>   -> predicted values  :  No value for predict function target column. (  default  )
#>   -> model_info        :  package stats , ver. 4.1.1 , task classification (  default  ) 
#>   -> predicted values  :  numerical, min =  0.1369187 , mean =  0.7 , max =  0.9832426  
#>   -> residual function :  difference between y and yhat (  default  )
#>   -> residuals         :  numerical, min =  -0.9572803 , mean =  1.940006e-17 , max =  0.8283475  
#>   A new explainer has been created!  

fobject <- fairness_check(explainer_lm,
  protected = german$Sex,
  privileged = "male"
)
#> Creating fairness classification object
#> -> Privileged subgroup		: character ( Ok  )
#> -> Protected variable		: factor ( Ok  ) 
#> -> Cutoff values for explainers	: 0.5 ( for all subgroups ) 
#> -> Fairness objects		: 0 objects 
#> -> Checking explainers		: 1 in total (  compatible  )
#> -> Metric calculation		: 13/13 metrics calculated for all models
#>  Fairness object created succesfully  
expand_fairness_object(fobject, drop_metrics_with_na = TRUE)
#>        metric model       score
#> 1         TPR    lm 0.096364988
#> 2         TNR    lm 0.433770370
#> 3         PPV    lm 0.021418116
#> 4         NPV    lm 0.006376217
#> 5         FNR    lm 0.764119178
#> 6         FPR    lm 0.347333014
#> 7         FDR    lm 0.075989500
#> 8         FOR    lm 0.011650617
#> 9          TS    lm 0.096572597
#> 10        STP    lm 0.184128760
#> 11        ACC    lm 0.041523503
#> 12         F1    lm 0.056585621
#> 13 NEW_METRIC    lm 0.860484166
# \donttest{
rf_model <- ranger::ranger(Risk ~ .,
  data = german,
  probability = TRUE,
  num.trees = 200
)


explainer_rf <- DALEX::explain(rf_model, data = german[, -1], y = y_numeric)
#> Preparation of a new explainer is initiated
#>   -> model label       :  ranger  (  default  )
#>   -> data              :  1000  rows  9  cols 
#>   -> target variable   :  1000  values 
#>   -> predict function  :  yhat.ranger  will be used (  default  )
#>   -> predicted values  :  No value for predict function target column. (  default  )
#>   -> model_info        :  package ranger , ver. 0.13.1 , task classification (  default  ) 
#>   -> predicted values  :  numerical, min =  0.04864286 , mean =  0.6963437 , max =  0.9955556  
#>   -> residual function :  difference between y and yhat (  default  )
#>   -> residuals         :  numerical, min =  -0.7585476 , mean =  0.003656338 , max =  0.6813942  
#>   A new explainer has been created!  

fobject <- fairness_check(explainer_rf, fobject)
#> Creating fairness classification object
#> -> Privileged subgroup		: character ( from first fairness object  ) 
#> -> Protected variable		: factor ( from first fairness object  ) 
#> -> Cutoff values for explainers	: 0.5 ( for all subgroups ) 
#> -> Fairness objects		: 1 object (  compatible  )
#> -> Checking explainers		: 2 in total (  compatible  )
#> -> Metric calculation		: 10/13 metrics calculated for all models ( 3 NA created )
#>  Fairness object created succesfully  

expand_fairness_object(fobject, drop_metrics_with_na = TRUE)
#> Warning: Found metric with NA: FNR, FOR, NEW_METRIC, omiting it
#>    metric  model        score
#> 1     TPR ranger 0.0080483332
#> 2     TPR     lm 0.0963649885
#> 3     TNR ranger 0.0545052155
#> 4     TNR     lm 0.4337703703
#> 5     PPV ranger 0.0080578374
#> 6     PPV     lm 0.0214181164
#> 7     NPV ranger 0.0261452801
#> 8     NPV     lm 0.0063762171
#> 9     FPR ranger 0.2375821504
#> 10    FPR     lm 0.3473330144
#> 11    FDR ranger 0.0946873209
#> 12    FDR     lm 0.0759894996
#> 13     TS ranger 0.0006090134
#> 14     TS     lm 0.0965725965
#> 15    STP ranger 0.0930757170
#> 16    STP     lm 0.1841287596
#> 17    ACC ranger 0.0060735986
#> 18    ACC     lm 0.0415235028
#> 19     F1 ranger 0.0003175107
#> 20     F1     lm 0.0565856215
# }