Calculate all measures for sparse ROC
Details
Calculates the TP, FP, TN, FN, TPR, FPR, accuracy, PPF, FOR and Fmeasure from a prediction object
Examples
prediction <- data.frame(rowId = 1:100,
outcomeCount = stats::rbinom(1:100, 1, prob=0.5),
value = runif(100),
evaluation = rep("Train", 100))
summary <- getThresholdSummary(prediction)
str(summary)
#> 'data.frame': 100 obs. of 24 variables:
#> $ evaluation : chr "Train" "Train" "Train" "Train" ...
#> $ predictionThreshold : num 0.993 0.97 0.965 0.953 0.935 ...
#> $ preferenceThreshold : num 0.992 0.967 0.961 0.947 0.927 ...
#> $ positiveCount : num 1 2 3 4 5 6 7 8 9 10 ...
#> $ negativeCount : num 99 98 97 96 95 94 93 92 91 90 ...
#> $ trueCount : num 53 53 53 53 53 53 53 53 53 53 ...
#> $ falseCount : num 47 47 47 47 47 47 47 47 47 47 ...
#> $ truePositiveCount : num 1 2 2 2 2 3 4 5 6 6 ...
#> $ trueNegativeCount : num 47 47 46 45 44 44 44 44 44 43 ...
#> $ falsePositiveCount : num 0 0 1 2 3 3 3 3 3 4 ...
#> $ falseNegativeCount : num 52 51 51 51 51 50 49 48 47 47 ...
#> $ f1Score : num 0.037 0.0727 0.0714 0.0702 0.069 ...
#> $ accuracy : num 0.48 0.49 0.48 0.47 0.46 0.47 0.48 0.49 0.5 0.49 ...
#> $ sensitivity : num 0.0189 0.0377 0.0377 0.0377 0.0377 ...
#> $ falseNegativeRate : num 0.981 0.962 0.962 0.962 0.962 ...
#> $ falsePositiveRate : num 0 0 0.0213 0.0426 0.0638 ...
#> $ specificity : num 1 1 0.979 0.957 0.936 ...
#> $ positivePredictiveValue: num 1 1 0.667 0.5 0.4 ...
#> $ falseDiscoveryRate : num 0 0 0.333 0.5 0.6 ...
#> $ negativePredictiveValue: num 0.475 0.48 0.474 0.469 0.463 ...
#> $ falseOmissionRate : num 0.525 0.52 0.526 0.531 0.537 ...
#> $ positiveLikelihoodRatio: num Inf Inf 1.774 0.887 0.591 ...
#> $ negativeLikelihoodRatio: num 0.981 0.962 0.983 1.005 1.028 ...
#> $ diagnosticOddsRatio : num Inf Inf 1.804 0.882 0.575 ...