diff --git a/NAMESPACE b/NAMESPACE index 847ab70..dc09311 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -7,6 +7,7 @@ S3method(ci,listofperf) S3method(ci,mse) S3method(ci,recal) S3method(coef,metapred) +S3method(coef,mp.1st.fit) S3method(coef,mp.cv.meta.fit) S3method(coef,mp.perf) S3method(coef,mp.stratified.fit) diff --git a/R/metapred.R b/R/metapred.R index 32bc58b..ba7e303 100644 --- a/R/metapred.R +++ b/R/metapred.R @@ -149,6 +149,9 @@ #' only the first is used for model selection. #' @param selFUN Function for selecting the best method. Default: lowest value for \code{genFUN}. Should be set to #' "which.max" if high values for \code{genFUN} indicate a good model. +#' @param gen.of.perf For which performance measures should generalizability measures be computed? \code{"first"} (default) for +#' only the first. \code{"respective"} for matching the generalizability measure to the performance measure on the same location +#' in the list. \code{"factorial"} for applying all generalizability measures to all performance estimates. #' @param ... To pass arguments to estFUN (e.g. family = "binomial"), or to other FUNctions. #' #' @return A list of class \code{metapred}, containing the final model in \code{global.model}, and the stepwise @@ -211,7 +214,7 @@ metapred <- function(data, strata, formula, estFUN = "glm", scope = NULL, retest = FALSE, max.steps = 1000, center = FALSE, recal.int = FALSE, cvFUN = NULL, cv.k = NULL, # tol = 0, metaFUN = NULL, meta.method = NULL, predFUN = NULL, perfFUN = NULL, genFUN = NULL, - selFUN = "which.min", + selFUN = "which.min", gen.of.perf = "first", ...) { call <- match.call() @@ -259,10 +262,12 @@ metapred <- function(data, strata, formula, estFUN = "glm", scope = NULL, retest stop("At least 1 cluster must be used for development.") # Fitting - fit <- mp.fit(formula = formula, data = data, remaining.changes = updates, st.i = strata.i, st.u = strata.u, folds = folds, + fit <- mp.fit(formula = formula, data = data, remaining.changes = updates, + st.i = strata.i, st.u = strata.u, folds = folds, recal.int = recal.int, retest = retest, max.steps = max.steps, tol = 0, - estFUN = estFUN, metaFUN = metaFUN, meta.method = meta.method, predFUN = predFUN, perfFUN = perfFUN, - genFUN = genFUN, selFUN = selFUN, ...) + estFUN = estFUN, metaFUN = metaFUN, meta.method = meta.method, + predFUN = predFUN, perfFUN = perfFUN, + genFUN = genFUN, selFUN = selFUN, gen.of.perf = gen.of.perf, ...) # mp.args <- c(list(formula = formula, data = data, remaining.changes = updates, st.i = strata.i, st.u = strata.u, folds = folds, # recal.int = recal.int, retest = retest, max.steps = max.steps, tol = 0, @@ -279,8 +284,10 @@ metapred <- function(data, strata, formula, estFUN = "glm", scope = NULL, retest formula.changes = getFormulaDiffAsChar(formula.final, formula), # NOTE: formula.changes is currently unordered! options = list(cv.k = cv.k, meta.method = meta.method, recal.int = recal.int, - center = center, max.steps = max.steps, retest = retest, two.stage = two.stage), # add: tol - FUN = list(cvFUN = cvFUN, predFUN = predFUN, perfFUN = get.function(perfFUN), metaFUN = metaFUN, genFUN = genFUN, + center = center, max.steps = max.steps, retest = retest, + two.stage = two.stage, gen.of.perf = gen.of.perf), # add: tol + FUN = list(cvFUN = cvFUN, predFUN = predFUN, perfFUN = get.functions(perfFUN), + metaFUN = metaFUN, genFUN = genFUN, selFUN = selFUN, estFUN = estFUN, estFUN.name = estFUN.name))) class(out) <- c("metapred") return(out) @@ -556,7 +563,7 @@ mp.fit <- function(formula, data, remaining.changes, st.i, st.u, folds, recal.in retest = FALSE, max.steps = 3, tol = 0, estFUN = glm, metaFUN = urma, meta.method = "DL", predFUN = NULL, perfFUN = mse, genFUN = abs.mean, selFUN = which.min, - two.stage = TRUE, ...) { + two.stage = TRUE, gen.of.perf = "first", ...) { out <- steps <- list() ## Step 0 @@ -566,7 +573,8 @@ mp.fit <- function(formula, data, remaining.changes, st.i, st.u, folds, recal.in st.i = st.i, st.u = st.u, folds = folds, recal.int = recal.int, retest = FALSE, two.stage = two.stage, estFUN = estFUN, metaFUN = metaFUN, meta.method = meta.method, predFUN = predFUN, - perfFUN = perfFUN, genFUN = genFUN, selFUN = selFUN, ...) + perfFUN = perfFUN, genFUN = genFUN, selFUN = selFUN, + gen.of.perf = gen.of.perf, ...) steps[[getStepName(step.count)]][["step.count"]] <- step.count out[["best.step"]] <- getStepName(step.count) out[["stop.reason"]] <- "no changes were requested." @@ -597,7 +605,8 @@ mp.fit <- function(formula, data, remaining.changes, st.i, st.u, folds, recal.in st.i = st.i, st.u = st.u, folds = folds, recal.int = recal.int, retest = retest, two.stage = two.stage, estFUN = estFUN, metaFUN = metaFUN, meta.method = meta.method, predFUN = predFUN, - perfFUN = perfFUN, genFUN = genFUN, selFUN = selFUN, ...) + perfFUN = perfFUN, genFUN = genFUN, selFUN = selFUN, + gen.of.perf = gen.of.perf, ...) steps[[getStepName(step.count)]][["step.count"]] <- step.count ## Model selection # This step @@ -699,7 +708,8 @@ mp.step.get.change <- function(step, ...) mp.step <- function(formula, data, remaining.changes, st.i, st.u, folds, recal.int = FALSE, two.stage = TRUE, retest = FALSE, estFUN = glm, metaFUN = urma, meta.method = "DL", predFUN = NULL, - perfFUN = mse, genFUN = abs.mean, selFUN = which.min, ...) { + perfFUN = mse, genFUN = abs.mean, selFUN = which.min, gen.of.perf = "first", + ...) { cv <- out <- list() out[["start.formula"]] <- formula @@ -721,7 +731,8 @@ mp.step <- function(formula, data, remaining.changes, st.i, st.u, folds, recal.i cv[[name]] <- mp.cv(formula = new.formula, data = data, st.i = st.i, st.u = st.u, folds = folds, recal.int = recal.int, two.stage = two.stage, estFUN = estFUN, metaFUN = metaFUN, meta.method = meta.method, - predFUN = predFUN, perfFUN = perfFUN, genFUN = genFUN, change = change, ...) + predFUN = predFUN, perfFUN = perfFUN, genFUN = genFUN, + change = change, gen.of.perf = gen.of.perf, ...) # Save changes cv[[name]][["remaining.changes"]] <- if (retest) remaining.changes else remaining.changes[-fc] # cv[[name]][["changed"]] <- change @@ -855,12 +866,13 @@ summary.mp.global <- function(object, ...) { # and a validated on val folds mp.cv <- function(formula, data, st.i, st.u, folds, recal.int = FALSE, two.stage = TRUE, estFUN = glm, metaFUN = urma, meta.method = "DL", predFUN = NULL, - perfFUN = mse, genFUN = abs.mean, change = NULL, ...) { + perfFUN = mse, genFUN = abs.mean, change = NULL, gen.of.perf = "first", ...) { out <- mp.cv.dev(formula = formula, data = data, st.i = st.i, st.u = st.u, folds = folds, two.stage = two.stage, estFUN = estFUN, metaFUN = metaFUN, meta.method = meta.method, change = change, ...) out <- mp.cv.val(cv.dev = out, data = data, st.i = st.i, folds = folds, recal.int = recal.int, two.stage = two.stage, - estFUN = estFUN, predFUN = predFUN, perfFUN = perfFUN, genFUN = genFUN, ...) + estFUN = estFUN, predFUN = predFUN, perfFUN = perfFUN, genFUN = genFUN, + gen.of.perf = gen.of.perf, ...) class(out) <- c("mp.cv", class(out)) out @@ -913,7 +925,7 @@ print.mp.cv <- function(x, ...) { # Returns object of class mp.cv.val, which is a validated mp.cv.dev mp.cv.val <- function(cv.dev, data, st.i, folds, recal.int = FALSE, two.stage = TRUE, estFUN = glm, predFUN = NULL, perfFUN = mse, - genFUN = abs.mean, plot = F, ...) { + genFUN = abs.mean, plot = F, gen.of.perf = "first", ...) { dots <- list(...) pfn <- if (is.character(perfFUN)) perfFUN else "Performance" cv.dev[["perf.name"]] <- pfn # To be removed!??!!? @@ -987,10 +999,15 @@ mp.cv.val <- function(cv.dev, data, st.i, folds, recal.int = FALSE, two.stage = cv.dev[["perf.all"]] <- perf.all # Future compatibility cv.dev[["perf"]] <- perf.all[[1]] # Backwards compatibility - # Generalizibility + # Generalizability if (!is.list(genFUN)) genFUN <- list(genFUN) + if (identical(gen.of.perf, "factorial")) { + which.perf <- rep(seq_along(perfFUN), each = length(genFUN)) + genFUN <- rep(genFUN, times = length(perfFUN)) + } + # Names of generalizability measures if (identical(length(names(genFUN)), length(genFUN))) { gen.names <- names(genFUN) @@ -1003,8 +1020,10 @@ mp.cv.val <- function(cv.dev, data, st.i, folds, recal.int = FALSE, two.stage = gen.all <- rep(NA, length(genFUN)) for (fun.id in seq_along(genFUN)) { # Single brackets intended! + cv.dev.selection <- if (identical(gen.of.perf, "first")) 1 else + if (identical(gen.of.perf, "factorial")) which.perf[fun.id] else fun.id # add which_perf somehow genfun <- match.fun(genFUN[[fun.id]]) - args <- c(list(object = cv.dev[["perf"]], + args <- c(list(object = cv.dev[["perf.all"]][[cv.dev.selection]], coef = coef(cv.dev[["stratified.fit"]]), title = paste("Model change: ~", cv.dev[["changed"]]), xlab = as.character(pfn) @@ -1136,7 +1155,7 @@ fitted.mp.cv.dev <- function(object, data, folds, st.i, predFUN = NULL, two.stag family.mp.cv.dev <- function(object, ...) object$family -# Estimate a one-stage or non-stratified model on the develoment (!) strata. +# Estimate a one-stage or non-stratified model on the development (!) strata. mp.1st.fit <- function(formula, data, st.i, st.u, folds, estFUN, ...) { out <- list() @@ -1206,6 +1225,13 @@ mp.cv.meta.fit <- function(stratified.fit, folds, metaFUN = urma, meta.method = coef.mp.cv.meta.fit <- function(object, ...) t(as.data.frame(lapply(object, `[[`, "coefficients"), check.names = FALSE)) +#' @author Valentijn de Jong +#' @method coef mp.1st.fit +#' @export +coef.mp.1st.fit <- function(object, ...) { + coef.mp.cv.meta.fit(object, ...) +} + #' @author Valentijn de Jong #' @method print mp.cv.meta.fit #' @export @@ -1220,6 +1246,7 @@ print.mp.cv.meta.fit <- function(x, ...) { } } + # Make new meta model (i.e. model fitted on multiple clusters) for ?? for metapred # stratified.fit mp.stratified.fit # metaFUN function for estimating meta-analytic models, e.g. urma (this file) @@ -1536,6 +1563,10 @@ ci.mse <- function(object, conf = .95, ...) { #' to \link{subset.metapred} such that the generalizability estimates of other steps/models may be #' returned.. #' +#' @details +#' With named values or indices for parameter \code{genFUN} one or more estimates +#' of generalizability can be selected. Use \code{genFUN = 0} to select all. +#' #' @export gen <- function(object, ...) UseMethod("gen") @@ -1548,9 +1579,11 @@ gen.metapred <- function(object, genFUN = 1, ...) gen(subset(object, ...), genFUN = genFUN, ...) #' @export -gen.mp.cv.val <- function(object, genFUN = 1, ...) - object$gen.all[[genFUN]] - +gen.mp.cv.val <- function(object, genFUN = 1, ...) { + if (is.numeric(genFUN) && genFUN == 0) + return(object$gen.all) + object$gen.all[genFUN] +} #' Performance estimates #' @@ -1566,7 +1599,7 @@ gen.mp.cv.val <- function(object, genFUN = 1, ...) #' @param object A model fit object, either a \link{metapred} or \code{subset(metapred)} object. #' @param ... By default, the final model is selected. This parameter allows other arguments to be #' passed to \link{subset.metapred} such that the performance estimates of other steps/models may be -#' returned.. +#' returned. Use \code{perfFUN = 0} to select all. #' #' @export perf <- function(object, ...) @@ -1580,5 +1613,10 @@ perf.metapred <- function(object, perfFUN = 1, ...) perf(subset(object, ...), perfFUN = perfFUN, ...) #' @export -perf.mp.cv.val <- function(object, perfFUN = 1, ...) - object[["perf.all"]][[perfFUN]] \ No newline at end of file +perf.mp.cv.val <- function(object, perfFUN = 1, ...) { + if (is.numeric(perfFUN) && perfFUN == 0) + return(object[["perf.all"]]) + object[["perf.all"]][[perfFUN]] +} + + diff --git a/R/metapred_measures.R b/R/metapred_measures.R index c1e72d7..927d017 100644 --- a/R/metapred_measures.R +++ b/R/metapred_measures.R @@ -71,17 +71,11 @@ calibration.intercept <- cal.int <- function(p, y, estFUN, family, ...) bin.cal.int <- function(p, y, ...) pred.recal(p = p, y = y, estFUN = "glm", family = binomial, which = "intercept") -# Slope.only is a trick to make this functin work for metapred. +# Slope.only is a trick to make this function work for metapred. # Slope.only should otherwise always be false! Also: this messes up the variances, # making meta-analysis impossible! # multiplicative slope! calibration.slope <- cal.slope <- function(p, y, estFUN, family, slope.only = TRUE, ...) { - # refit <- pred.recal(p = p, y = y, estFUN = estFUN, family = family, which = "slope") - # if (slope.only) { - # refit[[1]] <- refit[[1]][[2]] - # } - # refit - refit <- pred.recal(p = p, y = y, estFUN = estFUN, family = family, which = "slope") if (slope.only) { refit$estimate <- refit[[1]] <- refit[[1]][2] diff --git a/R/metapred_utils.R b/R/metapred_utils.R index f4c2365..f7ea019 100644 --- a/R/metapred_utils.R +++ b/R/metapred_utils.R @@ -209,10 +209,10 @@ getPredictMethod <- function(fit, two.stage = TRUE, predFUN = NULL, ...) { # f formula used for selecting relevant variables from newdata. Overrides object # ... For compatibility only. # Returns vector of predicted values. -predictGLM <- function(object, newdata, b = NULL, f = NULL, type = "response", ...) { +predictGLM <- function(object, newdata, b = NULL, f = NULL, type = "response", X = NULL, ...) { if (is.null(b)) b <- coef(object) if (is.null(f)) f <- formula(object) - X <- model.matrix(f2rhsf(stats::as.formula(f)), data = newdata) + if (is.null(X)) X <- model.matrix(f2rhsf(stats::as.formula(f)), data = newdata) lp <- X %*% b @@ -225,8 +225,12 @@ predictGLM <- function(object, newdata, b = NULL, f = NULL, type = "response", . return(lp) } -predictglmer <- function(object, newdata, b = NULL, f = NULL, type = "response", ...) +predictglmer <- function(object, newdata, b = NULL, f = NULL, type = "response", ...) { + if (is.null(f)) f <- formula(object) + + f <- lme4::nobars(f) predictGLM(object = object, newdata = newdata, b = b, f = f, type = type, ...) +} # Prediction function for logistf from the logisf package # Args same as those of predictGLM() @@ -449,6 +453,11 @@ get.function <- function(x, ...) { return(get(as.character(x), mode = "function")) } +# x list of functions or list of names of functions, or a combination of both +get.functions <- function(x, ...) { + lapply(x, get.function, ...) +} + # Convert factor to binary # In case the factor has more than 2 levels, by default the same occurs as in # glm: the first level is assumed to be the failure, and all others successes. diff --git a/R/plot_utils.r b/R/plot_utils.r index c84888d..31fb4ec 100644 --- a/R/plot_utils.r +++ b/R/plot_utils.r @@ -219,7 +219,7 @@ forest.default <- function(theta, # Add confidence interval of the summary estimate - p <- p + with(g2, geom_errorbar(data = g2, aes(ymin = ci.lower, ymax = ci.upper, x = order), width = 0.5, size=1.0)) + p <- p + with(g2, geom_errorbar(data = g2, aes(ymin = ci.lower, ymax = ci.upper, x = order), width = 0.5, linewidth=1.0)) # Add summary estimate p <- p + with(g2, geom_point(data = g2, aes(x = order, y = mean), shape=23, size=3, fill = col.diamond)) @@ -237,7 +237,7 @@ forest.default <- function(theta, p <- p + with(g3, geom_errorbar(data = g3, aes(ymin = pi.lower, ymax = pi.upper, x = order), - size = size.predint, + linewidth = size.predint, width = 0.5, color = col.predint, linetype = predint.linetype)) diff --git a/man/Tzoulaki.Rd b/man/Tzoulaki.Rd index 115954c..b8a1207 100755 --- a/man/Tzoulaki.Rd +++ b/man/Tzoulaki.Rd @@ -30,16 +30,10 @@ Tzoulaki et al. (2009) reviewed studies that evaluated various candidate prognos \item{\code{sign.AUCdiff}}{a boolean vector indicating whether the difference between \code{AUC.orig} and \code{AUC.modif} is below 0.05} } } -\details{ -%% ~~ If necessary, more details than the __description__ above ~~ -} \source{ %% ~~ reference to a publication or URL from which the data were obtained ~~ Tzoulaki I, Liberopoulos G, Ioannidis JPA. Assessment of claims of improved prediction beyond the Framingham risk score. \emph{JAMA}. 2009 Dec 2;302(21):2345–52. -} -\references{ -%% ~~ possibly secondary sources and usages ~~ } \examples{ data(Tzoulaki) diff --git a/man/gen.Rd b/man/gen.Rd index 62118d3..244c0bd 100644 --- a/man/gen.Rd +++ b/man/gen.Rd @@ -18,6 +18,10 @@ returned..} \description{ Obtain generalizability estimates from a model fit. } +\details{ +With named values or indices for parameter \code{genFUN} one or more estimates +of generalizability can be selected. Use \code{genFUN = 0} to select all. +} \author{ Valentijn de Jong } diff --git a/man/metapred.Rd b/man/metapred.Rd index 4c385de..4dc9d6c 100644 --- a/man/metapred.Rd +++ b/man/metapred.Rd @@ -22,6 +22,7 @@ metapred( perfFUN = NULL, genFUN = NULL, selFUN = "which.min", + gen.of.perf = "first", ... ) } @@ -81,6 +82,10 @@ only the first is used for model selection.} \item{selFUN}{Function for selecting the best method. Default: lowest value for \code{genFUN}. Should be set to "which.max" if high values for \code{genFUN} indicate a good model.} +\item{gen.of.perf}{For which performance measures should generalizability measures be computed? \code{"first"} (default) for +only the first. \code{"respective"} for matching the generalizability measure to the performance measure on the same location +in the list. \code{"factorial"} for applying all generalizability measures to all performance estimates.} + \item{...}{To pass arguments to estFUN (e.g. family = "binomial"), or to other FUNctions.} } \value{ diff --git a/man/perf.Rd b/man/perf.Rd index fe97445..12cafab 100644 --- a/man/perf.Rd +++ b/man/perf.Rd @@ -13,7 +13,7 @@ performance(object, ...) \item{...}{By default, the final model is selected. This parameter allows other arguments to be passed to \link{subset.metapred} such that the performance estimates of other steps/models may be -returned..} +returned. Use \code{perfFUN = 0} to select all.} } \description{ Obtain performance estimates from a model fit. diff --git a/tests/testthat/Rplots.pdf b/tests/testthat/Rplots.pdf index 103d740..1b36e96 100644 Binary files a/tests/testthat/Rplots.pdf and b/tests/testthat/Rplots.pdf differ diff --git a/tests/testthat/test_metapred_1_utils.R b/tests/testthat/test_metapred_1_utils.R index 59bd842..f2f21de 100644 --- a/tests/testthat/test_metapred_1_utils.R +++ b/tests/testthat/test_metapred_1_utils.R @@ -5,12 +5,12 @@ test_that("Generating a block diagonal matrix works", { m2 <- matrix(c(5,6,7,8), nrow = 2, ncol = 2) m3 <- matrix(c(9,10,11,12), nrow = 2, ncol = 2) - m_block <- blockMatrixDiagonal(m1, m2, m3) + m_block <- metamisc:::blockMatrixDiagonal(m1, m2, m3) m_eval <- matrix(c(1,2,0,0,0,0,3,4,0,0,0,0,0,0,5,6,0,0,0,0,7,8,0,0,0,0,0,0,9,10,0,0,0,0,11,12), nrow = 6, ncol = 6) expect_identical(m_block, m_eval) m_list <- list(m1, m2, m3) - m_block2 <- blockMatrixDiagonal(m_list) + m_block2 <- metamisc:::blockMatrixDiagonal(m_list) expect_identical(m_block2, m_eval) }) @@ -32,15 +32,15 @@ test_that("Multivariate meta-analysis works", { y <- data.frame(PD = c(0.47, 0.20, 0.40, 0.26, 0.56), AL = c(-0.32, -0.60, -0.12, -0.31, -0.39)) - m_block <- blockMatrixDiagonal(m1, m2, m3, m4, m5) + m_block <- metamisc:::blockMatrixDiagonal(m1, m2, m3, m4, m5) m_dat <- data.frame(y = c(0.47, -0.32, 0.20, -0.60, 0.40, -0.12, 0.26, -0.31, 0.56, -0.39), group = c("PD", "AL", "PD", "AL", "PD", "AL", "PD", "AL", "PD", "AL"), study = c(1, 1, 2, 2, 3, 3, 4, 4, 5, 5)) m_fit <- metafor::rma.mv(yi = y, V = m_block, mods = ~ -1+group, random = ~ group|study, struct = "UN", data = m_dat) - mrma_fit <- mrma(coefficients = y, vcov = m_full) - expect_equal(as.numeric(coefficients(m_fit)["groupAL"]), as.numeric(mrma_fit$coefficients["groupAL"])) # Was expect_identical + mrma_fit <- metamisc:::mrma(coefficients = y, vcov = m_full) + expect_equal(as.numeric(coefficients(m_fit)["groupAL"]), as.numeric(mrma_fit$coefficients["groupAL"]), tolerance = 1e-7) # Was expect_identical expect_equal(as.numeric(coefficients(m_fit)["groupPD"]), as.numeric(mrma_fit$coefficients["groupPD"])) # Was expect_identical # Compare results with mvmeta @@ -51,7 +51,7 @@ test_that("Multivariate meta-analysis works", { # Test whether mrma works when we only have one dimension y_uv <- y[,1] vcov_uv <- c(m1[1,1], m2[1,1], m3[1,1], m4[1,1], m5[1,1]) - mrma_fit_uv <- mrma(coefficients = y_uv, vcov = vcov_uv) + mrma_fit_uv <- metamisc:::mrma(coefficients = y_uv, vcov = vcov_uv) }) @@ -172,7 +172,7 @@ y[1:3] <- NA # so the behaviour of metapred is the same as that of glm. test_that("factor_as_binary does not affect glm", { glm_factor_default <- coef(glm(y ~ x + z, family = binomial)) - expect_true(is.numeric(y <- factor_as_binary(y))) + expect_true(is.numeric(y_temp <- metamisc:::factor_as_binary(y))) glm_factor_as_binary <- coef(glm(y ~ x + z, family = binomial)) expect_identical(glm_factor_default, glm_factor_as_binary) }) diff --git a/tests/testthat/test_metapred_2_fit.R b/tests/testthat/test_metapred_2_fit.R index 4f23684..156f3f5 100644 --- a/tests/testthat/test_metapred_2_fit.R +++ b/tests/testthat/test_metapred_2_fit.R @@ -138,4 +138,3 @@ test_that("factor_as_binary / metapred can handle factors", { }) - diff --git a/tests/testthat/test_metapred_3.R b/tests/testthat/test_metapred_3.R index 653a89c..bd540af 100644 --- a/tests/testthat/test_metapred_3.R +++ b/tests/testthat/test_metapred_3.R @@ -57,6 +57,12 @@ test_that("metapred can handle different perfFUN", { expect_is(mp <- metamisc:::metapred(td, strata = "X4", scope = f, formula = f, family = binomial, perfFUN = "auc" , selFUN = "which.max", meta.method = "FE") , "metapred") + + expect_is(mp <- metamisc:::metapred(td, strata = "X4", scope = f, formula = f, family = binomial, + perfFUN = list("mse", "auc"), + selFUN = "which.max", meta.method = "FE") + , "metapred") + }) test_that("metapred can handle multiple genFUN.", { @@ -79,6 +85,56 @@ test_that("metapred can handle multiple genFUN.", { # , "metapred") }) +test_that("metapred can handle multiple genFUN and perfFUN.", { + genFUN <- list(abs.mean = "abs.mean", coef.var.mean = "coef.var.mean") + perfFUN = list("mse", "auc") + + expect_is(mp <- metamisc:::metapred(data = td, + strata = "X4", + scope = f, + formula = f, + family = binomial, + genFUN = genFUN, + perfFUN = perfFUN, + meta.method = "FE", + gen.of.perf = "first") # default + , "metapred") + + expect_s3_class(perf(mp), "data.frame") + expect_type(mp$FUN$perfFUN[[2]], "closure") + expect_length(gen(mp, 0), 2) + + expect_is(mp <- metamisc:::metapred(data = td, + strata = "X4", + scope = f, + formula = f, + family = binomial, + genFUN = genFUN, + perfFUN = perfFUN, + meta.method = "FE", + gen.of.perf = "factorial") + , "metapred") + + expect_s3_class(perf(mp), "data.frame") + expect_type(mp$FUN$perfFUN[[2]], "closure") + expect_length(gen(mp, 0), 4) + + expect_is(mp <- metamisc:::metapred(data = td, + strata = "X4", + scope = f, + formula = f, + family = binomial, + genFUN = genFUN, + perfFUN = perfFUN, + meta.method = "FE", + gen.of.perf = "respective") + , "metapred") + expect_length(gen(mp, 0), 2) + + expect_s3_class(perf(mp), "data.frame") + expect_type(mp$FUN$perfFUN[[2]], "closure") +}) + test_that("metapred can handle different distributions.", { expect_true(is.list(mp <- metapred(data = td, strata = "X4", family = binomial, max.steps = 0, meta.method = "FE") )) # binomial @@ -273,3 +329,4 @@ test_that("metapred estimates models accurately.", { expect_equal(as.data.frame(coef(mp.urma.1st.stage)), as.data.frame(t(cff.1st.stage)), check.attributes = FALSE) }) + diff --git a/tests/testthat/test_metapred_6_one_stage.R b/tests/testthat/test_metapred_6_one_stage.R new file mode 100644 index 0000000..5e1abd0 --- /dev/null +++ b/tests/testthat/test_metapred_6_one_stage.R @@ -0,0 +1,56 @@ +context("metapred 6. One-stage.") + +set.seed(7318) +n <- 100 +y <- c(rep(0, 2 * n), rep(1, 2 * n)) +x <- rnorm(length(y), y, 1) +y <- factor(y) +k <- rep(1:8, 5* n) +d <- data.frame(y, x, k) + +test_that("metapred can estimate one-stage fixed effect models", { + skip_on_cran() + f <- y ~ x + mp_fe <- metapred(d, "k", formula = f, scope = f, family = binomial, estFUN = glm, two.stage = F) + expect_is(mp_fe, "metapred") +}) + +test_that("calibration of metapred one-stage fixed effect models is ok", { + skip_on_cran() + f <- y ~ x + mp_fe <- metapred(d, "k", formula = f, scope = f, family = binomial, estFUN = glm, two.stage = F, + perfFUN = list("bin.cal.int", "cal.slope"), + genFUN = list("rema"), + gen.of.perf = "factorial") + expect_gte(gen(mp_fe, 1), 0) + expect_lte(gen(mp_fe, 1), .01) + + expect_gte(gen(mp_fe, 2), 1) + expect_lte(gen(mp_fe, 2), 1.1) +}) + +test_that("metapred can estimate one-stage random effects models", { + skip_on_cran() + + f_ri <- y ~ x + (1|k) + + cal_slope_glmer <- function(p, y, family = binomial, ...) { + metamisc:::cal.slope(p, y, estFUN = "glm", family = family) + } + + mp_ri <- metapred(d, "k", formula = f_ri, scope = f_ri, family = binomial, estFUN = lme4::glmer, two.stage = F, + perfFUN = list("bin.cal.int", cal_slope_glmer), + genFUN = list("rema"), + gen.of.perf = "factorial") + expect_is(mp_ri, "metapred") + + expect_gte(gen(mp_ri, 1), 0) + expect_lte(gen(mp_ri, 1), .01) + + expect_gte(gen(mp_ri, 2), 1) + expect_lte(gen(mp_ri, 2), 1.1) + + f_re <- y ~ x + (1 + x | k) + mp_re <- metapred(d, "k", formula = f_re, scope = f_re, family = binomial, estFUN = lme4::glmer, two.stage = F) + expect_is(mp_re, "metapred") +}) diff --git a/tests/testthat/test_valmeta_0.R b/tests/testthat/test_valmeta_0.R index bd421fd..164d800 100644 --- a/tests/testthat/test_valmeta_0.R +++ b/tests/testthat/test_valmeta_0.R @@ -9,7 +9,9 @@ test_that("Transformation of the c-statistic", { logitc1 <- log(EuroSCORE$c.index/(1-EuroSCORE$c.index)) logitc2 <- logit(EuroSCORE$c.index) #built-in function logitc3 <- (ccalc(cstat=c.index, cstat.se=se.c.index, data=EuroSCORE, g="log(cstat/(1-cstat))"))$theta - expect_equal(logitc1, logitc2, logitc3) + expect_equal(logitc1, logitc2) + expect_equal(logitc1, logitc3) + expect_equal(logitc2, logitc3) # Back-transformation ilogitc1 <- 1/(1+exp(-logitc1)) diff --git a/tests/testthat/test_valmeta_1.R b/tests/testthat/test_valmeta_1.R index 31ce9c7..bd7a0c5 100644 --- a/tests/testthat/test_valmeta_1.R +++ b/tests/testthat/test_valmeta_1.R @@ -1,7 +1,7 @@ context("valmeta 1. calculation of total O:E ratio") skip_on_cran() -library(lme4) +# library(lme4) data(EuroSCORE) test_that ("Coversion between CITL and log O:E ratio", { @@ -114,13 +114,17 @@ test_that("Standard error of log O:E ratio", { logoese1 <- sqrt(1/EuroSCORE$n.events - 1/EuroSCORE$n) logoese2 <- sqrt((resoe.O.E.N(O=EuroSCORE$n.events, E=EuroSCORE$e.events, N=EuroSCORE$n, correction = 1/2, g="log(OE)"))[,2]) logoese3 <- (oecalc(O=n.events, E=e.events, N=n, data=EuroSCORE, g="log(OE)"))$theta.se - expect_equal(logoese1, logoese2, logoese3) + expect_equal(logoese1, logoese2) + expect_equal(logoese1, logoese3) + expect_equal(logoese2, logoese3) # Binomial distribution for total number of observed events logoese4 <- sqrt(1/EuroSCORE$n.events) logoese5 <- sqrt((resoe.O.E(O=EuroSCORE$n.events, E=EuroSCORE$e.events, correction=1/2, g="log(OE)"))[,2]) logoese6 <- (oecalc(O=EuroSCORE$n.events, E=EuroSCORE$e.events, g="log(OE)"))$theta.se - expect_equal(logoese4, logoese5, logoese6) + expect_equal(logoese4, logoese5) + expect_equal(logoese4, logoese6) + expect_equal(logoese5, logoese6) # Mixture of Poisson and Binomial n.total <- EuroSCORE$n @@ -130,7 +134,9 @@ test_that("Standard error of log O:E ratio", { logoese8 <- sqrt((resoe.O.E.N(O=EuroSCORE$n.events, E=EuroSCORE$e.events, N=EuroSCORE$n, correction = 1/2, g="log(OE)"))[,2]) logoese8[1:10] <- sqrt((resoe.O.E(O=EuroSCORE$n.events, E=EuroSCORE$e.events, correction=1/2, g="log(OE)"))[,2])[1:10] logoese9 <- (oecalc(O=EuroSCORE$n.events, E=EuroSCORE$e.events, n=n.total, g="log(OE)"))$theta.se - expect_equal(logoese7, logoese8, logoese8) + expect_equal(logoese7, logoese8) + expect_equal(logoese7, logoese9, tolerance = 1e-2) + expect_equal(logoese8, logoese9, tolerance = 1e-2) # Derive from the 95% confidence interval OE <- EuroSCORE$n.events / EuroSCORE$e.events diff --git a/tests/testthat/test_valmeta_4_oe_freq.R b/tests/testthat/test_valmeta_4_oe_freq.R index bfbe0a0..37902ed 100644 --- a/tests/testthat/test_valmeta_4_oe_freq.R +++ b/tests/testthat/test_valmeta_4_oe_freq.R @@ -152,7 +152,8 @@ test_that("Test storage of results", { fit1 <- valmeta(measure = "OE", N=n, O=n.events, E = e.events, slab=Study, data=EuroSCORE, method = "ML", - pars = list(model.oe = "poisson/log")) + pars = list(model.oe = "poisson/log"), + test = "t") expect_true("data.frame" %in% class(fit1$data)) expect_true(all(c("theta", "theta.se", "theta.cilb", "theta.ciub") %in% colnames(fit1$data))) diff --git a/tests/testthat/test_valmeta_6_plots.R b/tests/testthat/test_valmeta_6_plots.R index 7f1eca2..8804668 100644 --- a/tests/testthat/test_valmeta_6_plots.R +++ b/tests/testthat/test_valmeta_6_plots.R @@ -38,11 +38,12 @@ test_that("Class of forest plot", { test_that("Class of forest plot", { fit <- valmeta(measure = "OE", N = n, O = n.events, E = e.events, - slab = Study, data = EuroSCORE, - method = "ML", - pars = list(model.oe = "poisson/log")) - plot(fit) - + slab = Study, + data = EuroSCORE, + method = "ML", + pars = list(model.oe = "poisson/log"), + test = "t") + expect_is(plot(fit), "ggplot") })