1 Summary

In this set of simulations, we consider settings with both null and non-null tests with varying proportion of null tests. An informative covariate is included in the setting as described in simulations-informative-sine.Rmd. The simulations are identical to those performed in simulations-varyingpi0.Rmd except that the strength of the “singal” (the effect size for non-null tests) is weaker (smaller).

2 Workspace Setup

library(dplyr)
library(ggplot2)
library(SummarizedBenchmark)
library(parallel)

## load helper functions
for (f in list.files("../R", "\\.(r|R)$", full.names = TRUE)) {
    source(f)
}

## project data/results folders
resdir <- "results"
dir.create(resdir, showWarnings = FALSE, recursive = TRUE)

## intermediary files we create below
null05_file <- file.path(resdir, "varyingpi0-t-benchmark-nullprop05.rds")
null10_file <- file.path(resdir, "varyingpi0-t-benchmark-nullprop10.rds")
null20_file <- file.path(resdir, "varyingpi0-t-benchmark-nullprop20.rds")
null30_file <- file.path(resdir, "varyingpi0-t-benchmark-nullprop30.rds")
null40_file <- file.path(resdir, "varyingpi0-t-benchmark-nullprop40.rds")
null50_file <- file.path(resdir, "varyingpi0-t-benchmark-nullprop50.rds")
null60_file <- file.path(resdir, "varyingpi0-t-benchmark-nullprop60.rds")
null70_file <- file.path(resdir, "varyingpi0-t-benchmark-nullprop70.rds")
null80_file <- file.path(resdir, "varyingpi0-t-benchmark-nullprop80.rds")
null90_file <- file.path(resdir, "varyingpi0-t-benchmark-nullprop90.rds")
null95_file <- file.path(resdir, "varyingpi0-t-benchmark-nullprop95.rds")
null99_file <- file.path(resdir, "varyingpi0-t-benchmark-nullprop99.rds")

## number of cores for parallelization
cores <- 20
B <- 100

## define bechmarking design
bd <- initializeBenchDesign()

As described in simulations-null.Rmd, we include Scott’s FDR Regression in the analysis for simulations with Gaussian noise. Again, we include both nulltype = "empirical" and nulltype = "theoretical". Since all settings in this series of simulations use test statistics simulated with Gaussian noise, we include Scott’s FDR Regression in all of the comparisons.

bdplus <- bd
bdplus <- addBMethod(bdplus, "fdrreg-t",
                     FDRreg::FDRreg,
                     function(x) { x$FDR },
                     z = test_statistic,
                     features = model.matrix( ~  splines::bs(ind_covariate, df = 3) - 1),
                     nulltype = 'theoretical',
                     control = list(lambda = 0.01))
bdplus <- addBMethod(bdplus, "fdrreg-e",
                     FDRreg::FDRreg,
                     function(x) { x$FDR },
                     z = test_statistic,
                     features = model.matrix( ~  splines::bs(ind_covariate, df = 3) - 1),
                     nulltype = 'empirical',
                     control = list(lambda = 0.01))

All simulation settings will share the following parameters.

m <- 20000                        # integer: number of hypothesis tests
es_dist <- rnorm_generator(2)     # functional: dist of alternative test stats
ts_dist <- rt_perturber(11)       # functional: sampling dist/noise for test stats
null_dist <- rt_2pvaluer(11)      # functional: dist to calc p-values
icovariate <- runif               # functional: independent covariate

Simulation results will be presented excluding a subset of methods, and for certain plots (upset plots), a single alpha cutoff will be used.

excludeSet <- c("unadjusted", "bl-df02", "bl-df04", "bl-df05")
ualpha <- 0.05

3 5% Null Setting

First, we consider the setting where 5% of tests are null.

3.1 Data Simulation

pi0 <- pi0_sine(0.05)             # numeric: proportion of null hypotheses
seed <- 1010

We next run the simulations.

if (file.exists(null05_file)) {
    res <- readRDS(null05_file)
} else {
    res <- mclapply(X = 1:B, FUN = simIteration, bench = bdplus, m = m,
                    pi0 = pi0, es_dist = es_dist, icovariate = icovariate,
                    ts_dist = ts_dist, null_dist = null_dist,
                    seed = seed, mc.cores = cores)
    saveRDS(res, file = null05_file)
}
res_i <- lapply(res, `[[`, "informative")
res_u <- lapply(res, `[[`, "uninformative")

3.2 Covariate Diagnostics

Here, we show the relationship between the independent covariate and p-values for a single replication of the experiment.

onerun <- simIteration(bdplus, m = m, pi0 = pi0, es_dist = es_dist, ts_dist = ts_dist,
                       icovariate = icovariate, null_dist = null_dist, execute = FALSE)
rank_scatter(onerun, pvalue = "pval", covariate = "ind_covariate")

strat_hist(onerun, pvalue = "pval", covariate = "ind_covariate", maxy = 10, numQ = 3)
## 
## Attaching package: 'cowplot'
## The following object is masked from 'package:ggplot2':
## 
##     ggsave

3.3 Benchmark Metrics

We plot the averaged results across 100 replications.

resdf <- plotsim_standardize(res_i, alpha = seq(0.01, 0.10, 0.01))

plotsim_average(resdf, met="rejections", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 

plotsim_average(resdf, met="FDR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 

plotsim_average(resdf, met="TPR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 

plotsim_average(resdf, met="TNR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 

We also take a look at the distribution of rejects for each method as a function of the effect size and independent covariate.

covariateLinePlot(res_i, alpha = ualpha, covname = "effect_size")

covariateLinePlot(res_i, alpha = ualpha, covname = "ind_covariate")

Finally, (if enough methods produce rejections at 0.05) we take a look at the overlap of rejections between methods.

if (numberMethodsReject(resdf, alphacutoff = ualpha, filterSet = excludeSet) >= 3) {
    aggupset(res_i, alpha = ualpha, supplementary = FALSE, return_list = FALSE)
} else {
    message("Not enough methods found rejections at alpha ", ualpha, 
            "; skipping upset plot")
}

We also compare the simulation results with and without an informative covariate.

resdfu <- plotsim_standardize(res_u, alpha = seq(0.01, 0.10, 0.01))

resdfiu <- dplyr::full_join(select(resdf, rep, blabel, param.alpha, key,
                                   performanceMetric, alpha, value),
                            select(resdfu, rep, blabel, param.alpha, key,
                                   performanceMetric, alpha, value),
                            by = c("rep", "blabel", "param.alpha", "key",
                                   "performanceMetric", "alpha"),
                            suffix = c(".info", ".uninfo"))
resdfiu <- dplyr::mutate(resdfiu, value = value.info - value.uninfo)

plotsim_average(resdfiu, met="rejections", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)

plotsim_average(resdfiu, met="FDR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)

plotsim_average(resdfiu, met="TPR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)

plotsim_average(resdfiu, met="TNR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)

4 10% Null Setting

Next, we consider the setting where 10% of tests are null.

4.1 Data Simulation

pi0 <- pi0_sine(0.10)             # numeric: proportion of null hypotheses
seed <- 1201

We next run the simulations.

if (file.exists(null10_file)) {
    res <- readRDS(null10_file)
} else {
    res <- mclapply(X = 1:B, FUN = simIteration, bench = bdplus, m = m,
                    pi0 = pi0, es_dist = es_dist, icovariate = icovariate,
                    ts_dist = ts_dist, null_dist = null_dist,
                    seed = seed, mc.cores = cores)
    saveRDS(res, file = null10_file)
}
res_i <- lapply(res, `[[`, "informative")
res_u <- lapply(res, `[[`, "uninformative")

4.2 Covariate Diagnostics

Here, we show the relationship between the independent covariate and p-values for a single replication of the experiment.

onerun <- simIteration(bdplus, m = m, pi0 = pi0, es_dist = es_dist, ts_dist = ts_dist,
                       icovariate = icovariate, null_dist = null_dist, execute = FALSE)
rank_scatter(onerun, pvalue = "pval", covariate = "ind_covariate")

strat_hist(onerun, pvalue = "pval", covariate = "ind_covariate", maxy = 10, numQ = 3)

4.3 Benchmark Metrics

We plot the averaged results across 100 replications.

resdf <- plotsim_standardize(res_i, alpha = seq(0.01, 0.10, 0.01))

plotsim_average(resdf, met="rejections", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 

plotsim_average(resdf, met="FDR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 

plotsim_average(resdf, met="TPR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 

plotsim_average(resdf, met="TNR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 

We also take a look at the distribution of rejects for each method as a function of the effect size and independent covariate.

covariateLinePlot(res_i, alpha = ualpha, covname = "effect_size")

covariateLinePlot(res_i, alpha = ualpha, covname = "ind_covariate")

Finally, (if enough methods produce rejections at 0.05) we take a look at the overlap of rejections between methods.

if (numberMethodsReject(resdf, alphacutoff = ualpha, filterSet = excludeSet) >= 3) {
    aggupset(res_i, alpha = ualpha, supplementary = FALSE, return_list = FALSE)
} else {
    message("Not enough methods found rejections at alpha ", ualpha, 
            "; skipping upset plot")
}

We also compare the simulation results with and without an informative covariate.

resdfu <- plotsim_standardize(res_u, alpha = seq(0.01, 0.10, 0.01))

resdfiu <- dplyr::full_join(select(resdf, rep, blabel, param.alpha, key,
                                   performanceMetric, alpha, value),
                            select(resdfu, rep, blabel, param.alpha, key,
                                   performanceMetric, alpha, value),
                            by = c("rep", "blabel", "param.alpha", "key",
                                   "performanceMetric", "alpha"),
                            suffix = c(".info", ".uninfo"))
resdfiu <- dplyr::mutate(resdfiu, value = value.info - value.uninfo)

plotsim_average(resdfiu, met="rejections", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)

plotsim_average(resdfiu, met="FDR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)

plotsim_average(resdfiu, met="TPR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)

plotsim_average(resdfiu, met="TNR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)

5 20% Null Setting

Next, we consider the setting where 20% of tests are null.

5.1 Data Simulation

pi0 <- pi0_sine(0.20)             # numeric: proportion of null hypotheses
seed <- 51

We next run the simulations.

if (file.exists(null20_file)) {
    res <- readRDS(null20_file)
} else {
    res <- mclapply(X = 1:B, FUN = simIteration, bench = bdplus, m = m,
                    pi0 = pi0, es_dist = es_dist, icovariate = icovariate,
                    ts_dist = ts_dist, null_dist = null_dist,
                    seed = seed, mc.cores = cores)
    saveRDS(res, file = null20_file)
}
res_i <- lapply(res, `[[`, "informative")
res_u <- lapply(res, `[[`, "uninformative")

5.2 Covariate Diagnostics

Here, we show the relationship between the independent covariate and p-values for a single replication of the experiment.

onerun <- simIteration(bdplus, m = m, pi0 = pi0, es_dist = es_dist, ts_dist = ts_dist,
                       icovariate = icovariate, null_dist = null_dist, execute = FALSE)
rank_scatter(onerun, pvalue = "pval", covariate = "ind_covariate")

strat_hist(onerun, pvalue = "pval", covariate = "ind_covariate", maxy = 10, numQ = 3)

5.3 Benchmark Metrics

We plot the averaged results across 100 replications.

resdf <- plotsim_standardize(res_i, alpha = seq(0.01, 0.10, 0.01))

plotsim_average(resdf, met="rejections", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 

plotsim_average(resdf, met="FDR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 

plotsim_average(resdf, met="TPR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 

plotsim_average(resdf, met="TNR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 

We also take a look at the distribution of rejects for each method as a function of the effect size and independent covariate.

covariateLinePlot(res_i, alpha = ualpha, covname = "effect_size")

covariateLinePlot(res_i, alpha = ualpha, covname = "ind_covariate")

Finally, (if enough methods produce rejections at 0.05) we take a look at the overlap of rejections between methods.

if (numberMethodsReject(resdf, alphacutoff = ualpha, filterSet = excludeSet) >= 3) {
    aggupset(res_i, alpha = ualpha, supplementary = FALSE, return_list = FALSE)
} else {
    message("Not enough methods found rejections at alpha ", ualpha, 
            "; skipping upset plot")
}

We also compare the simulation results with and without an informative covariate.

resdfu <- plotsim_standardize(res_u, alpha = seq(0.01, 0.10, 0.01))

resdfiu <- dplyr::full_join(select(resdf, rep, blabel, param.alpha, key,
                                   performanceMetric, alpha, value),
                            select(resdfu, rep, blabel, param.alpha, key,
                                   performanceMetric, alpha, value),
                            by = c("rep", "blabel", "param.alpha", "key",
                                   "performanceMetric", "alpha"),
                            suffix = c(".info", ".uninfo"))
resdfiu <- dplyr::mutate(resdfiu, value = value.info - value.uninfo)

plotsim_average(resdfiu, met="rejections", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)

plotsim_average(resdfiu, met="FDR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)

plotsim_average(resdfiu, met="TPR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)

plotsim_average(resdfiu, met="TNR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)

6 30% Null Setting

Next, we consider the setting where 30% of tests are null.

6.1 Data Simulation

pi0 <- pi0_sine(0.30)             # numeric: proportion of null hypotheses
seed <- 511

We next run the simulations.

if (file.exists(null30_file)) {
    res <- readRDS(null30_file)
} else {
    res <- mclapply(X = 1:B, FUN = simIteration, bench = bdplus, m = m,
                    pi0 = pi0, es_dist = es_dist, icovariate = icovariate,
                    ts_dist = ts_dist, null_dist = null_dist,
                    seed = seed, mc.cores = cores)
    saveRDS(res, file = null30_file)
}
res_i <- lapply(res, `[[`, "informative")
res_u <- lapply(res, `[[`, "uninformative")

6.2 Covariate Diagnostics

Here, we show the relationship between the independent covariate and p-values for a single replication of the experiment.

onerun <- simIteration(bdplus, m = m, pi0 = pi0, es_dist = es_dist, ts_dist = ts_dist,
                       icovariate = icovariate, null_dist = null_dist, execute = FALSE)
rank_scatter(onerun, pvalue = "pval", covariate = "ind_covariate")

strat_hist(onerun, pvalue = "pval", covariate = "ind_covariate", maxy = 10, numQ = 3)

6.3 Benchmark Metrics

We plot the averaged results across 100 replications.

resdf <- plotsim_standardize(res_i, alpha = seq(0.01, 0.10, 0.01))

plotsim_average(resdf, met="rejections", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 

plotsim_average(resdf, met="FDR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 

plotsim_average(resdf, met="TPR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 

plotsim_average(resdf, met="TNR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 

We also take a look at the distribution of rejects for each method as a function of the effect size and independent covariate.

covariateLinePlot(res_i, alpha = ualpha, covname = "effect_size")

covariateLinePlot(res_i, alpha = ualpha, covname = "ind_covariate")

Finally, (if enough methods produce rejections at 0.05) we take a look at the overlap of rejections between methods.

if (numberMethodsReject(resdf, alphacutoff = ualpha, filterSet = excludeSet) >= 3) {
    aggupset(res_i, alpha = ualpha, supplementary = FALSE, return_list = FALSE)
} else {
    message("Not enough methods found rejections at alpha ", ualpha, 
            "; skipping upset plot")
}

We also compare the simulation results with and without an informative covariate.

resdfu <- plotsim_standardize(res_u, alpha = seq(0.01, 0.10, 0.01))

resdfiu <- dplyr::full_join(select(resdf, rep, blabel, param.alpha, key,
                                   performanceMetric, alpha, value),
                            select(resdfu, rep, blabel, param.alpha, key,
                                   performanceMetric, alpha, value),
                            by = c("rep", "blabel", "param.alpha", "key",
                                   "performanceMetric", "alpha"),
                            suffix = c(".info", ".uninfo"))
resdfiu <- dplyr::mutate(resdfiu, value = value.info - value.uninfo)

plotsim_average(resdfiu, met="rejections", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)

plotsim_average(resdfiu, met="FDR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)

plotsim_average(resdfiu, met="TPR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)

plotsim_average(resdfiu, met="TNR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)

7 40% Null Setting

Next, we consider the setting where 40% of tests are null.

7.1 Data Simulation

pi0 <- pi0_sine(0.40)             # numeric: proportion of null hypotheses
seed <- 515

We next run the simulations.

if (file.exists(null40_file)) {
    res <- readRDS(null40_file)
} else {
    res <- mclapply(X = 1:B, FUN = simIteration, bench = bdplus, m = m,
                    pi0 = pi0, es_dist = es_dist, icovariate = icovariate,
                    ts_dist = ts_dist, null_dist = null_dist,
                    seed = seed, mc.cores = cores)
    saveRDS(res, file = null40_file)
}
res_i <- lapply(res, `[[`, "informative")
res_u <- lapply(res, `[[`, "uninformative")

7.2 Covariate Diagnostics

Here, we show the relationship between the independent covariate and p-values for a single replication of the experiment.

onerun <- simIteration(bdplus, m = m, pi0 = pi0, es_dist = es_dist, ts_dist = ts_dist,
                       icovariate = icovariate, null_dist = null_dist, execute = FALSE)
rank_scatter(onerun, pvalue = "pval", covariate = "ind_covariate")

strat_hist(onerun, pvalue = "pval", covariate = "ind_covariate", maxy = 10, numQ = 3)

7.3 Benchmark Metrics

We plot the averaged results across 100 replications.

resdf <- plotsim_standardize(res_i, alpha = seq(0.01, 0.10, 0.01))

plotsim_average(resdf, met="rejections", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 

plotsim_average(resdf, met="FDR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 

plotsim_average(resdf, met="TPR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 

plotsim_average(resdf, met="TNR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 

We also take a look at the distribution of rejects for each method as a function of the effect size and independent covariate.

covariateLinePlot(res_i, alpha = ualpha, covname = "effect_size")

covariateLinePlot(res_i, alpha = ualpha, covname = "ind_covariate")

Finally, (if enough methods produce rejections at 0.05) we take a look at the overlap of rejections between methods.

if (numberMethodsReject(resdf, alphacutoff = ualpha, filterSet = excludeSet) >= 3) {
    aggupset(res_i, alpha = ualpha, supplementary = FALSE, return_list = FALSE)
} else {
    message("Not enough methods found rejections at alpha ", ualpha, 
            "; skipping upset plot")
}

We also compare the simulation results with and without an informative covariate.

resdfu <- plotsim_standardize(res_u, alpha = seq(0.01, 0.10, 0.01))

resdfiu <- dplyr::full_join(select(resdf, rep, blabel, param.alpha, key,
                                   performanceMetric, alpha, value),
                            select(resdfu, rep, blabel, param.alpha, key,
                                   performanceMetric, alpha, value),
                            by = c("rep", "blabel", "param.alpha", "key",
                                   "performanceMetric", "alpha"),
                            suffix = c(".info", ".uninfo"))
resdfiu <- dplyr::mutate(resdfiu, value = value.info - value.uninfo)

plotsim_average(resdfiu, met="rejections", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)

plotsim_average(resdfiu, met="FDR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)

plotsim_average(resdfiu, met="TPR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)

plotsim_average(resdfiu, met="TNR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)

8 50% Null Setting

Next, we consider the setting where 50% of tests are null.

8.1 Data Simulation

pi0 <- pi0_sine(0.50)             # numeric: proportion of null hypotheses
seed <- 500

We next run the simulations.

if (file.exists(null50_file)) {
    res <- readRDS(null50_file)
} else {
    res <- mclapply(X = 1:B, FUN = simIteration, bench = bdplus, m = m,
                    pi0 = pi0, es_dist = es_dist, icovariate = icovariate,
                    ts_dist = ts_dist, null_dist = null_dist,
                    seed = seed, mc.cores = cores)
    saveRDS(res, file = null50_file)
}
res_i <- lapply(res, `[[`, "informative")
res_u <- lapply(res, `[[`, "uninformative")

8.2 Covariate Diagnostics

Here, we show the relationship between the independent covariate and p-values for a single replication of the experiment.

onerun <- simIteration(bdplus, m = m, pi0 = pi0, es_dist = es_dist, ts_dist = ts_dist,
                       icovariate = icovariate, null_dist = null_dist, execute = FALSE)
rank_scatter(onerun, pvalue = "pval", covariate = "ind_covariate")

strat_hist(onerun, pvalue = "pval", covariate = "ind_covariate", maxy = 10, numQ = 3)

8.3 Benchmark Metrics

We plot the averaged results across 100 replications.

resdf <- plotsim_standardize(res_i, alpha = seq(0.01, 0.10, 0.01))

plotsim_average(resdf, met="rejections", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 

plotsim_average(resdf, met="FDR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 

plotsim_average(resdf, met="TPR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 

plotsim_average(resdf, met="TNR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 

We also take a look at the distribution of rejects for each method as a function of the effect size and independent covariate.

covariateLinePlot(res_i, alpha = ualpha, covname = "effect_size")

covariateLinePlot(res_i, alpha = ualpha, covname = "ind_covariate")

Finally, (if enough methods produce rejections at 0.05) we take a look at the overlap of rejections between methods.

if (numberMethodsReject(resdf, alphacutoff = ualpha, filterSet = excludeSet) >= 3) {
    aggupset(res_i, alpha = ualpha, supplementary = FALSE, return_list = FALSE)
} else {
    message("Not enough methods found rejections at alpha ", ualpha, 
            "; skipping upset plot")
}

We also compare the simulation results with and without an informative covariate.

resdfu <- plotsim_standardize(res_u, alpha = seq(0.01, 0.10, 0.01))

resdfiu <- dplyr::full_join(select(resdf, rep, blabel, param.alpha, key,
                                   performanceMetric, alpha, value),
                            select(resdfu, rep, blabel, param.alpha, key,
                                   performanceMetric, alpha, value),
                            by = c("rep", "blabel", "param.alpha", "key",
                                   "performanceMetric", "alpha"),
                            suffix = c(".info", ".uninfo"))
resdfiu <- dplyr::mutate(resdfiu, value = value.info - value.uninfo)

plotsim_average(resdfiu, met="rejections", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)

plotsim_average(resdfiu, met="FDR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)

plotsim_average(resdfiu, met="TPR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)

plotsim_average(resdfiu, met="TNR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)

9 60% Null Setting

Next, we consider the setting where 60% of tests are null.

9.1 Data Simulation

pi0 <- pi0_sine(0.60)             # numeric: proportion of null hypotheses
seed <- 1502

We next run the simulations.

if (file.exists(null60_file)) {
    res <- readRDS(null60_file)
} else {
    res <- mclapply(X = 1:B, FUN = simIteration, bench = bdplus, m = m,
                    pi0 = pi0, es_dist = es_dist, icovariate = icovariate,
                    ts_dist = ts_dist, null_dist = null_dist,
                    seed = seed, mc.cores = cores)
    saveRDS(res, file = null60_file)
}
res_i <- lapply(res, `[[`, "informative")
res_u <- lapply(res, `[[`, "uninformative")

9.2 Covariate Diagnostics

Here, we show the relationship between the independent covariate and p-values for a single replication of the experiment.

onerun <- simIteration(bdplus, m = m, pi0 = pi0, es_dist = es_dist, ts_dist = ts_dist,
                       icovariate = icovariate, null_dist = null_dist, execute = FALSE)
rank_scatter(onerun, pvalue = "pval", covariate = "ind_covariate")

strat_hist(onerun, pvalue = "pval", covariate = "ind_covariate", maxy = 10, numQ = 3)

9.3 Benchmark Metrics

We plot the averaged results across 100 replications.

resdf <- plotsim_standardize(res_i, alpha = seq(0.01, 0.10, 0.01))

plotsim_average(resdf, met="rejections", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 

plotsim_average(resdf, met="FDR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 

plotsim_average(resdf, met="TPR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 

plotsim_average(resdf, met="TNR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 

We also take a look at the distribution of rejects for each method as a function of the effect size and independent covariate.

covariateLinePlot(res_i, alpha = ualpha, covname = "effect_size")

covariateLinePlot(res_i, alpha = ualpha, covname = "ind_covariate")

Finally, (if enough methods produce rejections at 0.05) we take a look at the overlap of rejections between methods.

if (numberMethodsReject(resdf, alphacutoff = ualpha, filterSet = excludeSet) >= 3) {
    aggupset(res_i, alpha = ualpha, supplementary = FALSE, return_list = FALSE)
} else {
    message("Not enough methods found rejections at alpha ", ualpha, 
            "; skipping upset plot")
}

We also compare the simulation results with and without an informative covariate.

resdfu <- plotsim_standardize(res_u, alpha = seq(0.01, 0.10, 0.01))

resdfiu <- dplyr::full_join(select(resdf, rep, blabel, param.alpha, key,
                                   performanceMetric, alpha, value),
                            select(resdfu, rep, blabel, param.alpha, key,
                                   performanceMetric, alpha, value),
                            by = c("rep", "blabel", "param.alpha", "key",
                                   "performanceMetric", "alpha"),
                            suffix = c(".info", ".uninfo"))
resdfiu <- dplyr::mutate(resdfiu, value = value.info - value.uninfo)

plotsim_average(resdfiu, met="rejections", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)

plotsim_average(resdfiu, met="FDR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)

plotsim_average(resdfiu, met="TPR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)

plotsim_average(resdfiu, met="TNR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)

10 70% Null Setting

Next, we consider the setting where 70% of tests are null.

10.1 Data Simulation

pi0 <- pi0_sine(0.70)             # numeric: proportion of null hypotheses
seed <- 1722

We next run the simulations.

if (file.exists(null70_file)) {
    res <- readRDS(null70_file)
} else {
    res <- mclapply(X = 1:B, FUN = simIteration, bench = bdplus, m = m,
                    pi0 = pi0, es_dist = es_dist, icovariate = icovariate,
                    ts_dist = ts_dist, null_dist = null_dist,
                    seed = seed, mc.cores = cores)
    saveRDS(res, file = null70_file)
}
res_i <- lapply(res, `[[`, "informative")
res_u <- lapply(res, `[[`, "uninformative")

10.2 Covariate Diagnostics

Here, we show the relationship between the independent covariate and p-values for a single replication of the experiment.

onerun <- simIteration(bdplus, m = m, pi0 = pi0, es_dist = es_dist, ts_dist = ts_dist,
                       icovariate = icovariate, null_dist = null_dist, execute = FALSE)
rank_scatter(onerun, pvalue = "pval", covariate = "ind_covariate")

strat_hist(onerun, pvalue = "pval", covariate = "ind_covariate", maxy = 10, numQ = 3)

10.3 Benchmark Metrics

We plot the averaged results across 100 replications.

resdf <- plotsim_standardize(res_i, alpha = seq(0.01, 0.10, 0.01))

plotsim_average(resdf, met="rejections", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 

plotsim_average(resdf, met="FDR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 

plotsim_average(resdf, met="TPR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 

plotsim_average(resdf, met="TNR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 

We also take a look at the distribution of rejects for each method as a function of the effect size and independent covariate.

covariateLinePlot(res_i, alpha = ualpha, covname = "effect_size")

covariateLinePlot(res_i, alpha = ualpha, covname = "ind_covariate")

Finally, (if enough methods produce rejections at 0.05) we take a look at the overlap of rejections between methods.

if (numberMethodsReject(resdf, alphacutoff = ualpha, filterSet = excludeSet) >= 3) {
    aggupset(res_i, alpha = ualpha, supplementary = FALSE, return_list = FALSE)
} else {
    message("Not enough methods found rejections at alpha ", ualpha, 
            "; skipping upset plot")
}

We also compare the simulation results with and without an informative covariate.

resdfu <- plotsim_standardize(res_u, alpha = seq(0.01, 0.10, 0.01))

resdfiu <- dplyr::full_join(select(resdf, rep, blabel, param.alpha, key,
                                   performanceMetric, alpha, value),
                            select(resdfu, rep, blabel, param.alpha, key,
                                   performanceMetric, alpha, value),
                            by = c("rep", "blabel", "param.alpha", "key",
                                   "performanceMetric", "alpha"),
                            suffix = c(".info", ".uninfo"))
resdfiu <- dplyr::mutate(resdfiu, value = value.info - value.uninfo)

plotsim_average(resdfiu, met="rejections", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)

plotsim_average(resdfiu, met="FDR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)

plotsim_average(resdfiu, met="TPR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)

plotsim_average(resdfiu, met="TNR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)

11 80% Null Setting

Next, we consider the setting where 80% of tests are null.

11.1 Data Simulation

pi0 <- pi0_sine(0.80)             # numeric: proportion of null hypotheses
seed <- 608

We next run the simulations.

if (file.exists(null80_file)) {
    res <- readRDS(null80_file)
} else {
    res <- mclapply(X = 1:B, FUN = simIteration, bench = bdplus, m = m,
                    pi0 = pi0, es_dist = es_dist, icovariate = icovariate,
                    ts_dist = ts_dist, null_dist = null_dist,
                    seed = seed, mc.cores = cores)
    saveRDS(res, file = null80_file)
}
res_i <- lapply(res, `[[`, "informative")
res_u <- lapply(res, `[[`, "uninformative")

11.2 Covariate Diagnostics

Here, we show the relationship between the independent covariate and p-values for a single replication of the experiment.

onerun <- simIteration(bdplus, m = m, pi0 = pi0, es_dist = es_dist, ts_dist = ts_dist,
                       icovariate = icovariate, null_dist = null_dist, execute = FALSE)
rank_scatter(onerun, pvalue = "pval", covariate = "ind_covariate")

strat_hist(onerun, pvalue = "pval", covariate = "ind_covariate", maxy = 10, numQ = 3)

11.3 Benchmark Metrics

We plot the averaged results across 100 replications.

resdf <- plotsim_standardize(res_i, alpha = seq(0.01, 0.10, 0.01))

plotsim_average(resdf, met="rejections", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 

plotsim_average(resdf, met="FDR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 

plotsim_average(resdf, met="TPR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 

plotsim_average(resdf, met="TNR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 

We also take a look at the distribution of rejects for each method as a function of the effect size and independent covariate.

covariateLinePlot(res_i, alpha = ualpha, covname = "effect_size")

covariateLinePlot(res_i, alpha = ualpha, covname = "ind_covariate")

Finally, (if enough methods produce rejections at 0.05) we take a look at the overlap of rejections between methods.

if (numberMethodsReject(resdf, alphacutoff = ualpha, filterSet = excludeSet) >= 3) {
    aggupset(res_i, alpha = ualpha, supplementary = FALSE, return_list = FALSE)
} else {
    message("Not enough methods found rejections at alpha ", ualpha, 
            "; skipping upset plot")
}

We also compare the simulation results with and without an informative covariate.

resdfu <- plotsim_standardize(res_u, alpha = seq(0.01, 0.10, 0.01))

resdfiu <- dplyr::full_join(select(resdf, rep, blabel, param.alpha, key,
                                   performanceMetric, alpha, value),
                            select(resdfu, rep, blabel, param.alpha, key,
                                   performanceMetric, alpha, value),
                            by = c("rep", "blabel", "param.alpha", "key",
                                   "performanceMetric", "alpha"),
                            suffix = c(".info", ".uninfo"))
resdfiu <- dplyr::mutate(resdfiu, value = value.info - value.uninfo)

plotsim_average(resdfiu, met="rejections", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)

plotsim_average(resdfiu, met="FDR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)

plotsim_average(resdfiu, met="TPR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)

plotsim_average(resdfiu, met="TNR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)

12 90% Null Setting

Next, we consider the setting where 90% of tests are null.

12.1 Data Simulation

pi0 <- pi0_sine(0.90)             # numeric: proportion of null hypotheses
seed <- 808

We next run the simulations.

if (file.exists(null90_file)) {
    res <- readRDS(null90_file)
} else {
    res <- mclapply(X = 1:B, FUN = simIteration, bench = bdplus, m = m,
                    pi0 = pi0, es_dist = es_dist, icovariate = icovariate,
                    ts_dist = ts_dist, null_dist = null_dist,
                    seed = seed, mc.cores = cores)
    saveRDS(res, file = null90_file)
}
res_i <- lapply(res, `[[`, "informative")
res_u <- lapply(res, `[[`, "uninformative")

12.2 Covariate Diagnostics

Here, we show the relationship between the independent covariate and p-values for a single replication of the experiment.

onerun <- simIteration(bdplus, m = m, pi0 = pi0, es_dist = es_dist, ts_dist = ts_dist,
                       icovariate = icovariate, null_dist = null_dist, execute = FALSE)
rank_scatter(onerun, pvalue = "pval", covariate = "ind_covariate")

strat_hist(onerun, pvalue = "pval", covariate = "ind_covariate", maxy = 10, numQ = 3)

12.3 Benchmark Metrics

We plot the averaged results across 100 replications.

resdf <- plotsim_standardize(res_i, alpha = seq(0.01, 0.10, 0.01))

plotsim_average(resdf, met="rejections", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 

plotsim_average(resdf, met="FDR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 
## Warning: Removed 1 rows containing missing values (geom_errorbar).

plotsim_average(resdf, met="TPR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 

plotsim_average(resdf, met="TNR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 

We also take a look at the distribution of rejects for each method as a function of the effect size and independent covariate.

covariateLinePlot(res_i, alpha = ualpha, covname = "effect_size")

covariateLinePlot(res_i, alpha = ualpha, covname = "ind_covariate")

Finally, (if enough methods produce rejections at 0.05) we take a look at the overlap of rejections between methods.

if (numberMethodsReject(resdf, alphacutoff = ualpha, filterSet = excludeSet) >= 3) {
    aggupset(res_i, alpha = ualpha, supplementary = FALSE, return_list = FALSE)
} else {
    message("Not enough methods found rejections at alpha ", ualpha, 
            "; skipping upset plot")
}

We also compare the simulation results with and without an informative covariate.

resdfu <- plotsim_standardize(res_u, alpha = seq(0.01, 0.10, 0.01))

resdfiu <- dplyr::full_join(select(resdf, rep, blabel, param.alpha, key,
                                   performanceMetric, alpha, value),
                            select(resdfu, rep, blabel, param.alpha, key,
                                   performanceMetric, alpha, value),
                            by = c("rep", "blabel", "param.alpha", "key",
                                   "performanceMetric", "alpha"),
                            suffix = c(".info", ".uninfo"))
resdfiu <- dplyr::mutate(resdfiu, value = value.info - value.uninfo)

plotsim_average(resdfiu, met="rejections", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)

plotsim_average(resdfiu, met="FDR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)
## Warning: Removed 1 rows containing missing values (geom_path).
## Warning: Removed 1 rows containing missing values (geom_errorbar).

plotsim_average(resdfiu, met="TPR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)

plotsim_average(resdfiu, met="TNR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)

13 95% Null Setting

Next, we consider the setting where 95% of tests are null.

13.1 Data Simulation

pi0 <- pi0_sine(0.95)             # numeric: proportion of null hypotheses
seed <- 913

We next run the simulations.

if (file.exists(null95_file)) {
    res <- readRDS(null95_file)
} else {
    res <- mclapply(X = 1:B, FUN = simIteration, bench = bdplus, m = m,
                    pi0 = pi0, es_dist = es_dist, icovariate = icovariate,
                    ts_dist = ts_dist, null_dist = null_dist,
                    seed = seed, mc.cores = cores)
    saveRDS(res, file = null95_file)
}
res_i <- lapply(res, `[[`, "informative")
res_u <- lapply(res, `[[`, "uninformative")

13.2 Covariate Diagnostics

Here, we show the relationship between the independent covariate and p-values for a single replication of the experiment.

onerun <- simIteration(bdplus, m = m, pi0 = pi0, es_dist = es_dist, ts_dist = ts_dist,
                       icovariate = icovariate, null_dist = null_dist, execute = FALSE)
rank_scatter(onerun, pvalue = "pval", covariate = "ind_covariate")

strat_hist(onerun, pvalue = "pval", covariate = "ind_covariate", maxy = 10, numQ = 3)

13.3 Benchmark Metrics

We plot the averaged results across 100 replications.

resdf <- plotsim_standardize(res_i, alpha = seq(0.01, 0.10, 0.01))

plotsim_average(resdf, met="rejections", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 

plotsim_average(resdf, met="FDR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 
## Warning: Removed 2 rows containing missing values (geom_path).
## Warning: Removed 2 rows containing missing values (geom_errorbar).

plotsim_average(resdf, met="TPR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 

plotsim_average(resdf, met="TNR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 

We also take a look at the distribution of rejects for each method as a function of the effect size and independent covariate.

covariateLinePlot(res_i, alpha = ualpha, covname = "effect_size")

covariateLinePlot(res_i, alpha = ualpha, covname = "ind_covariate")

Finally, (if enough methods produce rejections at 0.05) we take a look at the overlap of rejections between methods.

if (numberMethodsReject(resdf, alphacutoff = ualpha, filterSet = excludeSet) >= 3) {
    aggupset(res_i, alpha = ualpha, supplementary = FALSE, return_list = FALSE)
} else {
    message("Not enough methods found rejections at alpha ", ualpha, 
            "; skipping upset plot")
}

We also compare the simulation results with and without an informative covariate.

resdfu <- plotsim_standardize(res_u, alpha = seq(0.01, 0.10, 0.01))

resdfiu <- dplyr::full_join(select(resdf, rep, blabel, param.alpha, key,
                                   performanceMetric, alpha, value),
                            select(resdfu, rep, blabel, param.alpha, key,
                                   performanceMetric, alpha, value),
                            by = c("rep", "blabel", "param.alpha", "key",
                                   "performanceMetric", "alpha"),
                            suffix = c(".info", ".uninfo"))
resdfiu <- dplyr::mutate(resdfiu, value = value.info - value.uninfo)

plotsim_average(resdfiu, met="rejections", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)

plotsim_average(resdfiu, met="FDR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)
## Warning: Removed 3 rows containing missing values (geom_path).
## Warning: Removed 4 rows containing missing values (geom_errorbar).

plotsim_average(resdfiu, met="TPR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)

plotsim_average(resdfiu, met="TNR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)

14 99% Null Setting

Next, we consider the setting where 99% of tests are null.

14.1 Data Simulation

pi0 <- pi0_sine(0.99)             # numeric: proportion of null hypotheses
seed <- 2015

We next run the simulations.

if (file.exists(null99_file)) {
    res <- readRDS(null99_file)
} else {
    res <- mclapply(X = 1:B, FUN = simIteration, bench = bdplus, m = m,
                    pi0 = pi0, es_dist = es_dist, icovariate = icovariate,
                    ts_dist = ts_dist, null_dist = null_dist,
                    seed = seed, mc.cores = cores)
    saveRDS(res, file = null99_file)
}
res_i <- lapply(res, `[[`, "informative")
res_u <- lapply(res, `[[`, "uninformative")

14.2 Covariate Diagnostics

Here, we show the relationship between the independent covariate and p-values for a single replication of the experiment.

onerun <- simIteration(bdplus, m = m, pi0 = pi0, es_dist = es_dist, ts_dist = ts_dist,
                       icovariate = icovariate, null_dist = null_dist, execute = FALSE)
rank_scatter(onerun, pvalue = "pval", covariate = "ind_covariate")

strat_hist(onerun, pvalue = "pval", covariate = "ind_covariate", maxy = 10, numQ = 3)

14.3 Benchmark Metrics

We plot the averaged results across 100 replications.

resdf <- plotsim_standardize(res_i, alpha = seq(0.01, 0.10, 0.01))

plotsim_average(resdf, met="rejections", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 

plotsim_average(resdf, met="FDR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 
## Warning: Removed 7 rows containing missing values (geom_path).
## Warning: Removed 10 rows containing missing values (geom_errorbar).

plotsim_average(resdf, met="TPR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 

plotsim_average(resdf, met="TNR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE) 

We also take a look at the distribution of rejects for each method as a function of the effect size and independent covariate.

covariateLinePlot(res_i, alpha = ualpha, covname = "effect_size")

covariateLinePlot(res_i, alpha = ualpha, covname = "ind_covariate")

Finally, (if enough methods produce rejections at 0.05) we take a look at the overlap of rejections between methods.

if (numberMethodsReject(resdf, alphacutoff = ualpha, filterSet = excludeSet) >= 3) {
    aggupset(res_i, alpha = ualpha, supplementary = FALSE, return_list = FALSE)
} else {
    message("Not enough methods found rejections at alpha ", ualpha, 
            "; skipping upset plot")
}

We also compare the simulation results with and without an informative covariate.

resdfu <- plotsim_standardize(res_u, alpha = seq(0.01, 0.10, 0.01))

resdfiu <- dplyr::full_join(select(resdf, rep, blabel, param.alpha, key,
                                   performanceMetric, alpha, value),
                            select(resdfu, rep, blabel, param.alpha, key,
                                   performanceMetric, alpha, value),
                            by = c("rep", "blabel", "param.alpha", "key",
                                   "performanceMetric", "alpha"),
                            suffix = c(".info", ".uninfo"))
resdfiu <- dplyr::mutate(resdfiu, value = value.info - value.uninfo)

plotsim_average(resdfiu, met="rejections", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)

plotsim_average(resdfiu, met="FDR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)
## Warning: Removed 11 rows containing missing values (geom_path).
## Warning: Removed 11 rows containing missing values (geom_errorbar).

plotsim_average(resdfiu, met="TPR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)

plotsim_average(resdfiu, met="TNR", filter_set = excludeSet,
                merge_ihw = TRUE, errorBars = TRUE, diffplot = TRUE)

15 Session Info

sessionInfo()
## R version 3.5.0 (2018-04-23)
## Platform: x86_64-pc-linux-gnu (64-bit)
## Running under: CentOS Linux 7 (Core)
## 
## Matrix products: default
## BLAS: /usr/lib64/libblas.so.3.4.2
## LAPACK: /usr/lib64/liblapack.so.3.4.2
## 
## locale:
##  [1] LC_CTYPE=en_US.UTF-8       LC_NUMERIC=C              
##  [3] LC_TIME=en_US.UTF-8        LC_COLLATE=en_US.UTF-8    
##  [5] LC_MONETARY=en_US.UTF-8    LC_MESSAGES=en_US.UTF-8   
##  [7] LC_PAPER=en_US.UTF-8       LC_NAME=C                 
##  [9] LC_ADDRESS=C               LC_TELEPHONE=C            
## [11] LC_MEASUREMENT=en_US.UTF-8 LC_IDENTIFICATION=C       
## 
## attached base packages:
##  [1] splines   parallel  stats4    stats     graphics  grDevices utils    
##  [8] datasets  methods   base     
## 
## other attached packages:
##  [1] bindrcpp_0.2.2             cowplot_0.9.2             
##  [3] hexbin_1.27.2              adaptMT_1.0.0             
##  [5] FDRreg_0.2-1               mvtnorm_1.0-8             
##  [7] BayesLogit_0.6             fda_2.4.7                 
##  [9] Matrix_1.2-14              fdrtool_1.2.15            
## [11] swfdr_1.4.0                qvalue_2.10.0             
## [13] ashr_2.2-7                 IHW_1.6.0                 
## [15] magrittr_1.5               SummarizedBenchmark_0.99.2
## [17] mclust_5.4.1               BiocParallel_1.12.0       
## [19] stringr_1.3.1              rlang_0.2.1               
## [21] UpSetR_1.3.3               SummarizedExperiment_1.8.1
## [23] DelayedArray_0.4.1         matrixStats_0.53.1        
## [25] Biobase_2.38.0             GenomicRanges_1.30.3      
## [27] GenomeInfoDb_1.14.0        IRanges_2.12.0            
## [29] S4Vectors_0.16.0           BiocGenerics_0.24.0       
## [31] tidyr_0.8.1                ggplot2_3.0.0             
## [33] dplyr_0.7.6               
## 
## loaded via a namespace (and not attached):
##  [1] nlme_3.1-137           bitops_1.0-6           doParallel_1.0.11     
##  [4] rprojroot_1.3-2        tools_3.5.0            backports_1.1.2       
##  [7] R6_2.2.2               lazyeval_0.2.1         colorspace_1.3-2      
## [10] withr_2.1.2            tidyselect_0.2.4       gridExtra_2.3         
## [13] mnormt_1.5-5           compiler_3.5.0         ggdendro_0.1-20       
## [16] labeling_0.3           slam_0.1-43            mosaicCore_0.5.0      
## [19] scales_0.5.0           SQUAREM_2017.10-1      psych_1.8.4           
## [22] digest_0.6.15          ggformula_0.7.0        foreign_0.8-70        
## [25] rmarkdown_1.9          XVector_0.18.0         pscl_1.5.2            
## [28] pkgconfig_2.0.1        htmltools_0.3.6        lpsymphony_1.7.1      
## [31] bindr_0.1.1            RCurl_1.95-4.10        GenomeInfoDbData_1.0.0
## [34] mosaicData_0.16.0      Rcpp_0.12.18           munsell_0.4.3         
## [37] stringi_1.2.2          yaml_2.1.19            MASS_7.3-50           
## [40] zlibbioc_1.24.0        plyr_1.8.4             grid_3.5.0            
## [43] lattice_0.20-35        knitr_1.20             pillar_1.2.3          
## [46] reshape2_1.4.3         codetools_0.2-15       glue_1.3.0            
## [49] evaluate_0.10.1        foreach_1.4.4          gtable_0.2.0          
## [52] purrr_0.2.5            assertthat_0.2.0       broom_0.4.4           
## [55] truncnorm_1.0-8        tibble_1.4.2           iterators_1.0.9       
## [58] mosaic_1.2.0