Calculate all measures for sparse ROC
Details
Calculates the TP, FP, TN, FN, TPR, FPR, accuracy, PPF, FOR and Fmeasure from a prediction object
Examples
prediction <- data.frame(rowId = 1:100,
outcomeCount = stats::rbinom(1:100, 1, prob=0.5),
value = runif(100),
evaluation = rep("Train", 100))
summary <- getThresholdSummary(prediction)
str(summary)
#> 'data.frame': 100 obs. of 24 variables:
#> $ evaluation : chr "Train" "Train" "Train" "Train" ...
#> $ predictionThreshold : num 0.995 0.988 0.986 0.975 0.969 ...
#> $ preferenceThreshold : num 0.994 0.986 0.983 0.969 0.962 ...
#> $ positiveCount : num 1 2 3 4 5 6 7 8 9 10 ...
#> $ negativeCount : num 99 98 97 96 95 94 93 92 91 90 ...
#> $ trueCount : num 55 55 55 55 55 55 55 55 55 55 ...
#> $ falseCount : num 45 45 45 45 45 45 45 45 45 45 ...
#> $ truePositiveCount : num 0 1 2 3 3 3 4 4 5 6 ...
#> $ trueNegativeCount : num 44 44 44 44 43 42 42 41 41 41 ...
#> $ falsePositiveCount : num 1 1 1 1 2 3 3 4 4 4 ...
#> $ falseNegativeCount : num 55 54 53 52 52 52 51 51 50 49 ...
#> $ f1Score : num NaN 0.0351 0.069 0.1017 0.1 ...
#> $ accuracy : num 0.44 0.45 0.46 0.47 0.46 0.45 0.46 0.45 0.46 0.47 ...
#> $ sensitivity : num 0 0.0182 0.0364 0.0545 0.0545 ...
#> $ falseNegativeRate : num 1 0.982 0.964 0.945 0.945 ...
#> $ falsePositiveRate : num 0.0222 0.0222 0.0222 0.0222 0.0444 ...
#> $ specificity : num 0.978 0.978 0.978 0.978 0.956 ...
#> $ positivePredictiveValue: num 0 0.5 0.667 0.75 0.6 ...
#> $ falseDiscoveryRate : num 1 0.5 0.333 0.25 0.4 ...
#> $ negativePredictiveValue: num 0.444 0.449 0.454 0.458 0.453 ...
#> $ falseOmissionRate : num 0.556 0.551 0.546 0.542 0.547 ...
#> $ positiveLikelihoodRatio: num 0 0.818 1.636 2.455 1.227 ...
#> $ negativeLikelihoodRatio: num 1.023 1.004 0.986 0.967 0.989 ...
#> $ diagnosticOddsRatio : num 0 0.815 1.66 2.538 1.24 ...