diff --git a/articles/Workflow_Continuous_Exposure.html b/articles/Workflow_Continuous_Exposure.html index 31d438da..5ff9454c 100644 --- a/articles/Workflow_Continuous_Exposure.html +++ b/articles/Workflow_Continuous_Exposure.html @@ -287,7 +287,7 @@

St #> HomeOwnd + KFASTScr + peri_health + PmAge2 + PmBlac2 + PmEd2 + #> PmMrSt2 + RHealth + RMomAgeU + SmokTotl + state + SurpPreg + #> SWghtLB + TcBlac2 -#> <environment: 0x564d86fa7558> +#> <environment: 0x56519c546bc8> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for ESETA1 - StrDif_Tot.58 at ESETA1 time point 15 is: #> ESETA1.15 ~ B18Raw.6 + BioDadInHH2 + caregiv_health + CORTB.6 + @@ -296,7 +296,7 @@

St #> PmAge2 + PmBlac2 + PmEd2 + PmMrSt2 + RHasSO.6 + RHealth + #> RMomAgeU + SAAmylase.6 + SmokTotl + state + SurpPreg + SWghtLB + #> TcBlac2 + WndNbrhood.6 -#> <environment: 0x564d86fa7558> +#> <environment: 0x56519c546bc8> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for ESETA1 - StrDif_Tot.58 at ESETA1 time point 24 is: #> ESETA1.24 ~ B18Raw.15 + B18Raw.6 + BioDadInHH2 + caregiv_health + @@ -306,7 +306,7 @@

St #> peri_health + PmAge2 + PmBlac2 + PmEd2 + PmMrSt2 + RHasSO.15 + #> RHasSO.6 + RHealth + RMomAgeU + SAAmylase.15 + SAAmylase.6 + #> SmokTotl + state + SurpPreg + SWghtLB + TcBlac2 + WndNbrhood.6 -#> <environment: 0x564d86fa7558> +#> <environment: 0x56519c546bc8> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for ESETA1 - StrDif_Tot.58 at ESETA1 time point 35 is: #> ESETA1.35 ~ B18Raw.15 + B18Raw.24 + B18Raw.6 + BioDadInHH2 + @@ -319,7 +319,7 @@

St #> RHasSO.15 + RHasSO.24 + RHasSO.6 + RHealth + RMomAgeU + SAAmylase.15 + #> SAAmylase.24 + SAAmylase.6 + SmokTotl + state + SurpPreg + #> SWghtLB + TcBlac2 + WndNbrhood.24 + WndNbrhood.6 -#> <environment: 0x564d86fa7558> +#> <environment: 0x56519c546bc8> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for ESETA1 - StrDif_Tot.58 at ESETA1 time point 58 is: #> ESETA1.58 ~ B18Raw.15 + B18Raw.24 + B18Raw.6 + BioDadInHH2 + @@ -334,7 +334,7 @@

St #> RHealth + RMomAgeU + SAAmylase.15 + SAAmylase.24 + SAAmylase.6 + #> SmokTotl + state + StrDif_Tot.35 + SurpPreg + SWghtLB + TcBlac2 + #> WndNbrhood.24 + WndNbrhood.35 + WndNbrhood.6 -#> <environment: 0x564d86fa7558> +#> <environment: 0x56519c546bc8>

As shown above, createFormulas() creates a balancing formula for each exposure time point. Each full formula contains all time invariant confounders as well as all lagged time-varying @@ -557,7 +557,7 @@

2a. Create Simplified Balancing #> HomeOwnd + KFASTScr + peri_health + PmAge2 + PmBlac2 + PmEd2 + #> PmMrSt2 + RHealth + RMomAgeU + SmokTotl + state + SurpPreg + #> SWghtLB + TcBlac2 -#> <environment: 0x564d84a86128> +#> <environment: 0x56519a02ae48> #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for ESETA1 - StrDif_Tot.58 at ESETA1 time point 15 is: #> ESETA1.15 ~ B18Raw.6 + BioDadInHH2 + caregiv_health + CORTB.6 + @@ -566,7 +566,7 @@

2a. Create Simplified Balancing #> PmAge2 + PmBlac2 + PmEd2 + PmMrSt2 + RHasSO.6 + RHealth + #> RMomAgeU + SAAmylase.6 + SmokTotl + state + SurpPreg + SWghtLB + #> TcBlac2 + WndNbrhood.6 -#> <environment: 0x564d84a86128> +#> <environment: 0x56519a02ae48> #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for ESETA1 - StrDif_Tot.58 at ESETA1 time point 24 is: #> ESETA1.24 ~ B18Raw.15 + BioDadInHH2 + caregiv_health + CORTB.15 + @@ -575,7 +575,7 @@

2a. Create Simplified Balancing #> PmAge2 + PmBlac2 + PmEd2 + PmMrSt2 + RHasSO.15 + RHealth + #> RMomAgeU + SAAmylase.15 + SmokTotl + state + SurpPreg + SWghtLB + #> TcBlac2 -#> <environment: 0x564d84a86128> +#> <environment: 0x56519a02ae48> #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for ESETA1 - StrDif_Tot.58 at ESETA1 time point 35 is: #> ESETA1.35 ~ B18Raw.24 + BioDadInHH2 + caregiv_health + CORTB.24 + @@ -584,7 +584,7 @@

2a. Create Simplified Balancing #> LESMnPos.24 + peri_health + PmAge2 + PmBlac2 + PmEd2 + PmMrSt2 + #> RHasSO.24 + RHealth + RMomAgeU + SAAmylase.24 + SmokTotl + #> state + SurpPreg + SWghtLB + TcBlac2 + WndNbrhood.24 -#> <environment: 0x564d84a86128> +#> <environment: 0x56519a02ae48> #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for ESETA1 - StrDif_Tot.58 at ESETA1 time point 58 is: #> ESETA1.58 ~ BioDadInHH2 + caregiv_health + DrnkFreq + EARS_TJo.35 + @@ -593,7 +593,7 @@

2a. Create Simplified Balancing #> PmAge2 + PmBlac2 + PmEd2 + PmMrSt2 + RHasSO.35 + RHealth + #> RMomAgeU + SmokTotl + state + StrDif_Tot.35 + SurpPreg + #> SWghtLB + TcBlac2 + WndNbrhood.35 -#> <environment: 0x564d84a86128> +#> <environment: 0x56519a02ae48>

Above, we inspect the shortened balancing formula at each exposure time point. These formulas are considerably shorter than the full formulas. For instance, at the 58-month exposure time point, the formula @@ -702,7 +702,7 @@

2b. C weights.bart <- createWeights(data = data, exposure = exposure, outcome = outcome, formulas = formulas, #required method = method, read_in_from_file = FALSE, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional -#> For the bart weighting method, the median weight value is 0.71 (SD = 1.36; range = 0.03-25). +#> For the bart weighting method, the median weight value is 0.71 (SD = 1.38; range = 0.03-24).


@@ -886,39 +886,37 @@ 

2c. Assess home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> USER ALERT: The following statistics display covariate imbalance at each exposure time point following IPTW weighting, using short formulas.

-
#> As shown below, 11 out of 131 (8%) covariates across time points, corresponding to 4 out of 32 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.11 (range= -0.13-0.23):
+
#> As shown below, 9 out of 131 (7%) covariates across time points, corresponding to 3 out of 32 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.11 (range= -0.11-0.22):
 #> 
 #> Table: Imbalanced covariates using bart and short formulas
 #> 
 #> | exp_time| balanced_n| imbalanced_n|  n|
 #> |--------:|----------:|------------:|--:|
-#> |        6|         17|            1| 18|
+#> |        6|         18|            0| 18|
 #> |       15|         25|            3| 28|
-#> |       24|         24|            3| 27|
+#> |       24|         25|            2| 27|
 #> |       35|         28|            2| 30|
 #> |       58|         26|            2| 28|
 #> 
 #> 
 #> USER ALERT: For exposure ESETA1 using the short formulas and bart :
-#> The median absolute value relation between exposure and confounder is 0.02 (range = -0.13 -0.23). 
-#> As shown below, the following 11 covariates across time points out of 131 total (8.4%) spanning 4 domains out of 33 (12.12%) are imbalanced with a remaining median absolute value correlation/std mean difference in relation to ESETA1 of 0.11 (range=-0.13-0.23) :
+#> The median absolute value relation between exposure and confounder is 0.02 (range = -0.11 -0.22). 
+#> As shown below, the following 9 covariates across time points out of 131 total (6.87%) spanning 3 domains out of 33 (9.09%) are imbalanced with a remaining median absolute value correlation/std mean difference in relation to ESETA1 of 0.11 (range=-0.11-0.22) :
 #> 
 #> 
 #> Table: Imbalanced Covariates
 #> 
 #> |    |exposure | exp_time| covar_time|covariate     |    avg_bal| bal_thresh| balanced|
 #> |:---|:--------|--------:|----------:|:-------------|----------:|----------:|--------:|
-#> |6   |ESETA1   |        6|          0|PmEd2         | -0.0595407|       0.05|        0|
-#> |19  |ESETA1   |       15|          6|B18Raw.6      |  0.1179861|       0.10|        0|
-#> |22  |ESETA1   |       15|          6|ESETA1.6      |  0.2253647|       0.10|        0|
-#> |26  |ESETA1   |       15|          6|InRatioCor.6  | -0.1001925|       0.05|        0|
-#> |47  |ESETA1   |       24|         15|B18Raw.15     |  0.1113215|       0.10|        0|
-#> |50  |ESETA1   |       24|         15|ESETA1.15     |  0.1913959|       0.10|        0|
-#> |54  |ESETA1   |       24|         15|InRatioCor.15 | -0.1310088|       0.05|        0|
-#> |78  |ESETA1   |       35|         24|ESETA1.24     |  0.1236928|       0.10|        0|
-#> |82  |ESETA1   |       35|         24|InRatioCor.24 | -0.0681214|       0.05|        0|
-#> |106 |ESETA1   |       58|         35|ESETA1.35     |  0.1106226|       0.10|        0|
-#> |110 |ESETA1   |       58|         35|InRatioCor.35 | -0.0514414|       0.05|        0|
+#> |19 |ESETA1 | 15| 6|B18Raw.6 | 0.1255524| 0.10| 0| +#> |22 |ESETA1 | 15| 6|ESETA1.6 | 0.2208960| 0.10| 0| +#> |26 |ESETA1 | 15| 6|InRatioCor.6 | -0.0968499| 0.05| 0| +#> |50 |ESETA1 | 24| 15|ESETA1.15 | 0.1825734| 0.10| 0| +#> |54 |ESETA1 | 24| 15|InRatioCor.15 | -0.1122523| 0.05| 0| +#> |78 |ESETA1 | 35| 24|ESETA1.24 | 0.1263206| 0.10| 0| +#> |82 |ESETA1 | 35| 24|InRatioCor.24 | -0.0684189| 0.05| 0| +#> |106 |ESETA1 | 58| 35|ESETA1.35 | 0.1078711| 0.10| 0| +#> |110 |ESETA1 | 58| 35|InRatioCor.35 | -0.0513619| 0.05| 0|

For the BART weighting method, the median absolute value correlation between exposure and confounder is 0.02, with 11 confounders remaining imbalanced.

@@ -1119,7 +1117,7 @@

Step 3b. Update simplified formulas #> HomeOwnd + KFASTScr + peri_health + PmAge2 + PmBlac2 + PmEd2 + #> PmMrSt2 + RHealth + RMomAgeU + SmokTotl + state + SurpPreg + #> SWghtLB + TcBlac2 -#> <environment: 0x564d83f8b2c0> +#> <environment: 0x56519cdee710> #> USER ALERT: Please manually inspect the updated balancing formula below that includes time-varying confounders at t-1 and those greater at further lags that remained imbalanced: #> For ESETA1 at exposure time point 15 no time-varying confounders at additional lags were added. #> @@ -1130,7 +1128,7 @@

Step 3b. Update simplified formulas #> PmAge2 + PmBlac2 + PmEd2 + PmMrSt2 + RHasSO.6 + RHealth + #> RMomAgeU + SAAmylase.6 + SmokTotl + state + SurpPreg + SWghtLB + #> TcBlac2 + WndNbrhood.6 -#> <environment: 0x564d83f8b2c0> +#> <environment: 0x56519cdee710> #> USER ALERT: Please manually inspect the updated balancing formula below that includes time-varying confounders at t-1 and those greater at further lags that remained imbalanced: #> For ESETA1 at exposure time point 24 the following covariate(s) will be added to the short balancing formula: #> ESETA1.6, InRatioCor.6 @@ -1142,7 +1140,7 @@

Step 3b. Update simplified formulas #> MDI.15 + peri_health + PmAge2 + PmBlac2 + PmEd2 + PmMrSt2 + #> RHasSO.15 + RHealth + RMomAgeU + SAAmylase.15 + SmokTotl + #> state + SurpPreg + SWghtLB + TcBlac2 -#> <environment: 0x564d83f8b2c0> +#> <environment: 0x56519cdee710> #> USER ALERT: Please manually inspect the updated balancing formula below that includes time-varying confounders at t-1 and those greater at further lags that remained imbalanced: #> For ESETA1 at exposure time point 35 the following covariate(s) will be added to the short balancing formula: #> ESETA1.15, ESETA1.6, IBRAttn.6, InRatioCor.6 @@ -1155,7 +1153,7 @@

Step 3b. Update simplified formulas #> peri_health + PmAge2 + PmBlac2 + PmEd2 + PmMrSt2 + RHasSO.24 + #> RHealth + RMomAgeU + SAAmylase.24 + SmokTotl + state + SurpPreg + #> SWghtLB + TcBlac2 + WndNbrhood.24 -#> <environment: 0x564d83f8b2c0> +#> <environment: 0x56519cdee710> #> USER ALERT: Please manually inspect the updated balancing formula below that includes time-varying confounders at t-1 and those greater at further lags that remained imbalanced: #> For ESETA1 at exposure time point 58 the following covariate(s) will be added to the short balancing formula: #> ESETA1.15, InRatioCor.15 @@ -1167,7 +1165,7 @@

Step 3b. Update simplified formulas #> LESMnPos.35 + peri_health + PmAge2 + PmBlac2 + PmEd2 + PmMrSt2 + #> RHasSO.35 + RHealth + RMomAgeU + SmokTotl + state + StrDif_Tot.35 + #> SurpPreg + SWghtLB + TcBlac2 + WndNbrhood.35 -#> <environment: 0x564d83f8b2c0> +#> <environment: 0x56519cdee710>

As shown above, several imbalanced confounders at lags greater than t-1 were added to the short formulas at exposure time points 35-58. For instance, at the 35-month time point, economic strain at 6 diff --git a/articles/Workflow_Continuous_Exposure_files/figure-html/unnamed-chunk-17-1.png b/articles/Workflow_Continuous_Exposure_files/figure-html/unnamed-chunk-17-1.png index 70f3dc89..bd0a9d12 100644 Binary files a/articles/Workflow_Continuous_Exposure_files/figure-html/unnamed-chunk-17-1.png and b/articles/Workflow_Continuous_Exposure_files/figure-html/unnamed-chunk-17-1.png differ diff --git a/articles/Workflow_Continuous_Exposure_files/figure-html/unnamed-chunk-22-1.png b/articles/Workflow_Continuous_Exposure_files/figure-html/unnamed-chunk-22-1.png index 5eb0bc56..9f7ae3d1 100644 Binary files a/articles/Workflow_Continuous_Exposure_files/figure-html/unnamed-chunk-22-1.png and b/articles/Workflow_Continuous_Exposure_files/figure-html/unnamed-chunk-22-1.png differ diff --git a/articles/Workflow_Continuous_Exposure_files/figure-html/unnamed-chunk-22-2.png b/articles/Workflow_Continuous_Exposure_files/figure-html/unnamed-chunk-22-2.png index f23131e5..1bd007e3 100644 Binary files a/articles/Workflow_Continuous_Exposure_files/figure-html/unnamed-chunk-22-2.png and b/articles/Workflow_Continuous_Exposure_files/figure-html/unnamed-chunk-22-2.png differ diff --git a/articles/Workflow_Continuous_Exposure_files/figure-html/unnamed-chunk-22-3.png b/articles/Workflow_Continuous_Exposure_files/figure-html/unnamed-chunk-22-3.png index 6f1e7ba3..4b5a8e85 100644 Binary files a/articles/Workflow_Continuous_Exposure_files/figure-html/unnamed-chunk-22-3.png and b/articles/Workflow_Continuous_Exposure_files/figure-html/unnamed-chunk-22-3.png differ diff --git a/articles/Workflow_Continuous_Exposure_files/figure-html/unnamed-chunk-22-4.png b/articles/Workflow_Continuous_Exposure_files/figure-html/unnamed-chunk-22-4.png index 7a186244..f89d75d0 100644 Binary files a/articles/Workflow_Continuous_Exposure_files/figure-html/unnamed-chunk-22-4.png and b/articles/Workflow_Continuous_Exposure_files/figure-html/unnamed-chunk-22-4.png differ diff --git a/articles/Workflow_Continuous_Exposure_files/figure-html/unnamed-chunk-22-5.png b/articles/Workflow_Continuous_Exposure_files/figure-html/unnamed-chunk-22-5.png index e18a3e08..c943793d 100644 Binary files a/articles/Workflow_Continuous_Exposure_files/figure-html/unnamed-chunk-22-5.png and b/articles/Workflow_Continuous_Exposure_files/figure-html/unnamed-chunk-22-5.png differ diff --git a/index.html b/index.html index 600f7421..44426748 100644 --- a/index.html +++ b/index.html @@ -70,7 +70,7 @@

-


Scientists who study humans are fundamentally interested in questions of causation, yet conceptual, methodological, and practical barriers have historically prevented their use of methods for causal inference developed in other fields. More specifically, scientists, clinicians, educators, and policymakers alike are often interested in causal processes involving questions about when (timing) and to what extent (dose) different factors influence human functioning and development, in order to inform our scientific understanding and improve people’s lives.

Marginal structural models (MSMs; Robins et al., 2000), orginating in epidemiology and public health, represent one under-utilized tool for improving causal inference with longitudinal observational data, given certain assumptions. In brief, MSMs leverage inverse-probability-of-treatment-weights (IPTW) and the potential outcomes framework. MSMs first focus on the problem of confounding, using IPTW to attenuate associations between measured confounders and an exposure (e.g., experience, characteristic, event –from biology to the broader environment) over time. A weighted model can then be fitted relating a time-varying exposure and a future outcome. Finally, the model-predicted effects of different exposure histories, that vary in dose and timing, can be evaluated and compared as counterfactuals to reveal putative causal effects.


devMSMs is an R package accompanying our tutorial paper, Investigating Causal Questions in Human Development using Marginal Structural Models: A Tutorial Introduction to the devMSMs Package in R (insert preprint link here), for implementing MSMs with longitudinal data to answer causal questions about the dose and timing effects of a given exposure on a future outcome.

+


Scientists who study humans are fundamentally interested in questions of causation, yet conceptual, methodological, and practical barriers have historically prevented their use of methods for causal inference developed in other fields. More specifically, scientists, clinicians, educators, and policymakers alike are often interested in causal processes involving questions about when (timing) and to what extent (dose) different factors influence human functioning and development, in order to inform our scientific understanding and improve people’s lives.

Marginal structural models (MSMs; Robins et al., 2000), orginating in epidemiology and public health, represent one under-utilized tool for improving causal inference with longitudinal observational data, given certain assumptions. In brief, MSMs leverage inverse-probability-of-treatment-weights (IPTW) and the potential outcomes framework. MSMs first focus on the problem of confounding, using IPTW to attenuate associations between measured confounders and an exposure (e.g., experience, characteristic, event –from biology to the broader environment) over time. A weighted model can then be fitted relating a time-varying exposure and a future outcome. Finally, the model-predicted effects of different exposure histories, that vary in dose and timing, can be evaluated and compared as counterfactuals to reveal putative causal effects.

devMSMs is an R package accompanying our tutorial paper, Investigating Causal Questions in Human Development using Marginal Structural Models: A Tutorial Introduction to the devMSMs Package in R (insert preprint link here), for implementing MSMs with longitudinal data to answer causal questions about the dose and timing effects of a given exposure on a future outcome.

Core features of this package include:

diff --git a/reference/calcBalStats-1.png b/reference/calcBalStats-1.png index 17269756..4f1dc67f 100644 Binary files a/reference/calcBalStats-1.png and b/reference/calcBalStats-1.png differ diff --git a/reference/calcBalStats-10.png b/reference/calcBalStats-10.png index 3dd51753..bc52502d 100644 Binary files a/reference/calcBalStats-10.png and b/reference/calcBalStats-10.png differ diff --git a/reference/calcBalStats-2.png b/reference/calcBalStats-2.png index 76d5f05e..c7678b89 100644 Binary files a/reference/calcBalStats-2.png and b/reference/calcBalStats-2.png differ diff --git a/reference/calcBalStats-3.png b/reference/calcBalStats-3.png index 0c386703..2cf4131d 100644 Binary files a/reference/calcBalStats-3.png and b/reference/calcBalStats-3.png differ diff --git a/reference/calcBalStats-4.png b/reference/calcBalStats-4.png index 3eb9c99c..e4ecfcd6 100644 Binary files a/reference/calcBalStats-4.png and b/reference/calcBalStats-4.png differ diff --git a/reference/calcBalStats-5.png b/reference/calcBalStats-5.png index 0b99a2a4..5e030531 100644 Binary files a/reference/calcBalStats-5.png and b/reference/calcBalStats-5.png differ diff --git a/reference/calcBalStats-6.png b/reference/calcBalStats-6.png index e1af9ad9..fa2d3f70 100644 Binary files a/reference/calcBalStats-6.png and b/reference/calcBalStats-6.png differ diff --git a/reference/calcBalStats-7.png b/reference/calcBalStats-7.png index ce59a89e..92592009 100644 Binary files a/reference/calcBalStats-7.png and b/reference/calcBalStats-7.png differ diff --git a/reference/calcBalStats-8.png b/reference/calcBalStats-8.png index 2bb0de66..068037ab 100644 Binary files a/reference/calcBalStats-8.png and b/reference/calcBalStats-8.png differ diff --git a/reference/calcBalStats-9.png b/reference/calcBalStats-9.png index e3cfde52..17a3ea31 100644 Binary files a/reference/calcBalStats-9.png and b/reference/calcBalStats-9.png differ diff --git a/reference/calcBalStats.html b/reference/calcBalStats.html index bc70e652..b9148bd9 100644 --- a/reference/calcBalStats.html +++ b/reference/calcBalStats.html @@ -172,17 +172,17 @@

Examples#> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 1 is: #> A.1 ~ C -#> <environment: 0x55dddb81f160> +#> <environment: 0x55b45556ac40> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 2 is: #> A.2 ~ A.1 + B.1 + C -#> <environment: 0x55dddb81f160> +#> <environment: 0x55b45556ac40> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 3 is: #> A.3 ~ A.1 + A.2 + B.1 + B.2 + C -#> <environment: 0x55dddb81f160> +#> <environment: 0x55b45556ac40> #> w <- createWeights(data = test, @@ -190,7 +190,7 @@

Examples outcome = "D.3", formulas = f, save.out = FALSE) -#> For the cbps weighting method, the median weight value is 0.99 (SD = 1.44; range = 0.23-9). +#> For the cbps weighting method, the median weight value is 1.09 (SD = 0.7; range = 0.11-4). #> @@ -204,14 +204,14 @@

Examples -#> As shown below, 4 out of 9 (44%) covariates across time points, corresponding to 3 out of 3 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.23 (range= -0.36-0.2): +#> As shown below, 3 out of 9 (33%) covariates across time points, corresponding to 3 out of 3 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.16 (range= 0.16-0.27): #> #> Table: Imbalanced covariates using no weights and full formulas #> #> | exp_time| balanced_n| imbalanced_n| n| #> |--------:|----------:|------------:|--:| #> | 1| 1| 0| 1| -#> | 2| 2| 1| 3| +#> | 2| 3| 0| 3| #> | 3| 2| 3| 5| #> #> @@ -226,14 +226,14 @@

Examples -#> As shown below, 4 out of 9 (44%) covariates across time points, corresponding to 3 out of 3 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.23 (range= -0.36-0.2): +#> As shown below, 3 out of 9 (33%) covariates across time points, corresponding to 3 out of 3 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.16 (range= 0.16-0.27): #> #> Table: Imbalanced covariates using no weights and full formulas #> #> | exp_time| balanced_n| imbalanced_n| n| #> |--------:|----------:|------------:|--:| #> | 1| 1| 0| 1| -#> | 2| 2| 1| 3| +#> | 2| 3| 0| 3| #> | 3| 2| 3| 5| #> #> @@ -248,15 +248,15 @@

Examples -#> As shown below, 2 out of 9 (22%) covariates across time points, corresponding to 2 out of 3 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.12 (range= 0.11-0.13): +#> As shown below, 2 out of 9 (22%) covariates across time points, corresponding to 2 out of 3 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.12 (range= -0.11-0.13): #> #> Table: Imbalanced covariates using cbps and full formulas #> #> | exp_time| balanced_n| imbalanced_n| n| #> |--------:|----------:|------------:|--:| #> | 1| 1| 0| 1| -#> | 2| 1| 2| 3| -#> | 3| 5| 0| 5| +#> | 2| 2| 1| 3| +#> | 3| 4| 1| 5| #> #> diff --git a/reference/compareHistories-1.png b/reference/compareHistories-1.png index be1875d0..2724d9e2 100644 Binary files a/reference/compareHistories-1.png and b/reference/compareHistories-1.png differ diff --git a/reference/compareHistories-2.png b/reference/compareHistories-2.png index f7b594fa..73384711 100644 Binary files a/reference/compareHistories-2.png and b/reference/compareHistories-2.png differ diff --git a/reference/compareHistories-3.png b/reference/compareHistories-3.png index 1579499c..aafc96fd 100644 Binary files a/reference/compareHistories-3.png and b/reference/compareHistories-3.png differ diff --git a/reference/compareHistories-4.png b/reference/compareHistories-4.png index 683eba57..02725b86 100644 Binary files a/reference/compareHistories-4.png and b/reference/compareHistories-4.png differ diff --git a/reference/compareHistories-5.png b/reference/compareHistories-5.png index 24fce0a2..707512ac 100644 Binary files a/reference/compareHistories-5.png and b/reference/compareHistories-5.png differ diff --git a/reference/compareHistories-6.png b/reference/compareHistories-6.png index 00cda86d..3dff9e5c 100644 Binary files a/reference/compareHistories-6.png and b/reference/compareHistories-6.png differ diff --git a/reference/compareHistories-7.png b/reference/compareHistories-7.png index a0e6b9ac..3b23e117 100644 Binary files a/reference/compareHistories-7.png and b/reference/compareHistories-7.png differ diff --git a/reference/compareHistories.html b/reference/compareHistories.html index 79751ce3..2cdd8d5d 100644 --- a/reference/compareHistories.html +++ b/reference/compareHistories.html @@ -196,17 +196,17 @@

Examples#> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 1 is: #> A.1 ~ C -#> <environment: 0x55ddd6b56540> +#> <environment: 0x55b453c90d08> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 2 is: #> A.2 ~ A.1 + B.1 + C -#> <environment: 0x55ddd6b56540> +#> <environment: 0x55b453c90d08> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 3 is: #> A.3 ~ A.1 + A.2 + B.1 + B.2 + C -#> <environment: 0x55ddd6b56540> +#> <environment: 0x55b453c90d08> #> test <- data.frame(ID = 1:50, @@ -225,7 +225,7 @@

Examples outcome = "D.3", formulas = f, save.out = FALSE) -#> For the cbps weighting method, the median weight value is 1.04 (SD = 0.45; range = 0.25-3). +#> For the cbps weighting method, the median weight value is 1.12 (SD = 0.84; range = 0.23-5). #> @@ -242,8 +242,8 @@

Examples#> #> Working (Rao-Scott+F) LRT for A.1 A.2 A.3 #> in svyglm(formula = as.formula(f), design = s, family = fam) -#> Working 2logLR = 1.664444 p= 0.64366 -#> (scale factors: 1.2 0.99 0.79 ); denominator df= 46 +#> Working 2logLR = 0.9440798 p= 0.7907 +#> (scale factors: 1.5 1.1 0.43 ); denominator df= 46 #> #> The marginal model, m0, is summarized below: @@ -263,61 +263,61 @@

Examples#> |:-------|--:| #> |h-h-h | 6| #> |h-h-l | 5| -#> |h-l-h | 6| -#> |h-l-l | 8| -#> |l-h-h | 8| -#> |l-h-l | 6| -#> |l-l-h | 5| -#> |l-l-l | 6| +#> |h-l-h | 7| +#> |h-l-l | 7| +#> |l-h-h | 6| +#> |l-h-l | 8| +#> |l-l-h | 6| +#> |l-l-l | 5| #> #> #> Below are the average predictions by user-specified history: -#> | A.1| A.2| A.3| estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|history | dose_count| -#> |-------:|------:|-----:|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:-------|----------:| -#> | -0.0851| 0.1197| 0.205| -0.0529| 0.1168| -0.4526| 0.6508| 0.6197| -0.2818| 0.1761|l-l-l | 0| -#> | -0.0851| 0.1197| 0.207| -0.0528| 0.1168| -0.4518| 0.6514| 0.6184| -0.2817| 0.1762|l-l-h | 1| -#> | -0.0851| 0.1217| 0.205| -0.0531| 0.1169| -0.4548| 0.6493| 0.6231| -0.2822| 0.1759|l-h-l | 1| -#> | -0.0851| 0.1217| 0.207| -0.0531| 0.1169| -0.4540| 0.6498| 0.6218| -0.2821| 0.1760|l-h-h | 2| -#> | -0.0831| 0.1197| 0.205| -0.0530| 0.1168| -0.4538| 0.6500| 0.6215| -0.2819| 0.1759|h-l-l | 1| -#> | -0.0831| 0.1197| 0.207| -0.0529| 0.1168| -0.4530| 0.6506| 0.6202| -0.2818| 0.1760|h-l-h | 2| -#> | -0.0831| 0.1217| 0.205| -0.0533| 0.1169| -0.4559| 0.6485| 0.6249| -0.2823| 0.1757|h-h-l | 2| -#> | -0.0831| 0.1217| 0.207| -0.0532| 0.1168| -0.4551| 0.6490| 0.6237| -0.2822| 0.1758|h-h-h | 3| +#> | A.1| A.2| A.3| estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|history | dose_count| +#> |------:|-------:|-------:|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:-------|----------:| +#> | 0.1484| -0.2923| -0.0858| -0.2479| 0.1614| -1.5360| 0.1245| 3.0054| -0.5642| 0.0684|l-l-l | 0| +#> | 0.1484| -0.2923| -0.0838| -0.2480| 0.1613| -1.5373| 0.1242| 3.0090| -0.5642| 0.0682|l-l-h | 1| +#> | 0.1484| -0.2903| -0.0858| -0.2476| 0.1614| -1.5337| 0.1251| 2.9988| -0.5640| 0.0688|l-h-l | 1| +#> | 0.1484| -0.2903| -0.0838| -0.2477| 0.1614| -1.5349| 0.1248| 3.0023| -0.5640| 0.0686|l-h-h | 2| +#> | 0.1504| -0.2923| -0.0858| -0.2480| 0.1614| -1.5366| 0.1244| 3.0069| -0.5644| 0.0683|h-l-l | 1| +#> | 0.1504| -0.2923| -0.0838| -0.2481| 0.1614| -1.5378| 0.1241| 3.0105| -0.5644| 0.0681|h-l-h | 2| +#> | 0.1504| -0.2903| -0.0858| -0.2477| 0.1615| -1.5342| 0.1250| 3.0003| -0.5642| 0.0687|h-h-l | 2| +#> | 0.1504| -0.2903| -0.0838| -0.2478| 0.1614| -1.5355| 0.1247| 3.0039| -0.5642| 0.0685|h-h-h | 3| #> #> #> Conducting multiple comparison correction for all pairings between comparison histories and each refernece history using the BH method. #> #> #> USER ALERT: please inspect the following comparisons: -#> |term | estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|history |dose | p.value_corr| -#> |:---------------------------------------------------|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:--------------|:------|------------:| -#> |(-0.0851, 0.1197, 0.205) - (-0.0851, 0.1197, 0.207) | 0| 0| -0.37| 0.72| 0.48| 0| 0|l-l-l vs l-l-h |0 vs 1 | 0.80| -#> |(-0.0851, 0.1197, 0.205) - (-0.0851, 0.1217, 0.205) | 0| 0| 1.03| 0.30| 1.72| 0| 0|l-l-l vs l-h-l |0 vs 1 | 0.80| -#> |(-0.0851, 0.1197, 0.205) - (-0.0851, 0.1217, 0.207) | 0| 0| 0.55| 0.58| 0.77| 0| 0|l-l-l vs l-h-h |0 vs 2 | 0.80| -#> |(-0.0851, 0.1197, 0.205) - (-0.0831, 0.1197, 0.205) | 0| 0| 0.57| 0.57| 0.81| 0| 0|l-l-l vs h-l-l |0 vs 1 | 0.80| -#> |(-0.0851, 0.1197, 0.205) - (-0.0831, 0.1197, 0.207) | 0| 0| 0.08| 0.94| 0.09| 0| 0|l-l-l vs h-l-h |0 vs 2 | 0.94| -#> |(-0.0851, 0.1197, 0.205) - (-0.0831, 0.1217, 0.205) | 0| 0| 1.29| 0.20| 2.35| 0| 0|l-l-l vs h-h-l |0 vs 2 | 0.80| -#> |(-0.0851, 0.1197, 0.205) - (-0.0831, 0.1217, 0.207) | 0| 0| 0.70| 0.48| 1.05| 0| 0|l-l-l vs h-h-h |0 vs 3 | 0.80| -#> |(-0.0851, 0.1197, 0.207) - (-0.0851, 0.1217, 0.205) | 0| 0| 0.90| 0.37| 1.45| 0| 0|l-l-h vs l-h-l |1 vs 1 | 0.80| -#> |(-0.0851, 0.1197, 0.207) - (-0.0851, 0.1217, 0.207) | 0| 0| 1.03| 0.30| 1.72| 0| 0|l-l-h vs l-h-h |1 vs 2 | 0.80| -#> |(-0.0851, 0.1197, 0.207) - (-0.0831, 0.1197, 0.205) | 0| 0| 0.90| 0.37| 1.44| 0| 0|l-l-h vs h-l-l |1 vs 1 | 0.80| -#> |(-0.0851, 0.1197, 0.207) - (-0.0831, 0.1197, 0.207) | 0| 0| 0.57| 0.57| 0.81| 0| 0|l-l-h vs h-l-h |1 vs 2 | 0.80| -#> |(-0.0851, 0.1197, 0.207) - (-0.0831, 0.1217, 0.205) | 0| 0| 1.34| 0.18| 2.48| 0| 0|l-l-h vs h-h-l |1 vs 2 | 0.80| -#> |(-0.0851, 0.1197, 0.207) - (-0.0831, 0.1217, 0.207) | 0| 0| 1.29| 0.20| 2.35| 0| 0|l-l-h vs h-h-h |1 vs 3 | 0.80| -#> |(-0.0851, 0.1217, 0.205) - (-0.0851, 0.1217, 0.207) | 0| 0| -0.37| 0.72| 0.48| 0| 0|l-h-l vs l-h-h |1 vs 2 | 0.80| -#> |(-0.0851, 0.1217, 0.205) - (-0.0831, 0.1197, 0.205) | 0| 0| -0.39| 0.69| 0.53| 0| 0|l-h-l vs h-l-l |1 vs 1 | 0.80| -#> |(-0.0851, 0.1217, 0.205) - (-0.0831, 0.1197, 0.207) | 0| 0| -0.45| 0.65| 0.61| 0| 0|l-h-l vs h-l-h |1 vs 2 | 0.80| -#> |(-0.0851, 0.1217, 0.205) - (-0.0831, 0.1217, 0.205) | 0| 0| 0.57| 0.57| 0.81| 0| 0|l-h-l vs h-h-l |1 vs 2 | 0.80| -#> |(-0.0851, 0.1217, 0.205) - (-0.0831, 0.1217, 0.207) | 0| 0| 0.08| 0.94| 0.09| 0| 0|l-h-l vs h-h-h |1 vs 3 | 0.94| -#> |(-0.0851, 0.1217, 0.207) - (-0.0831, 0.1197, 0.205) | 0| 0| -0.15| 0.88| 0.19| 0| 0|l-h-h vs h-l-l |2 vs 1 | 0.94| -#> |(-0.0851, 0.1217, 0.207) - (-0.0831, 0.1197, 0.207) | 0| 0| -0.39| 0.69| 0.53| 0| 0|l-h-h vs h-l-h |2 vs 2 | 0.80| -#> |(-0.0851, 0.1217, 0.207) - (-0.0831, 0.1217, 0.205) | 0| 0| 0.90| 0.37| 1.44| 0| 0|l-h-h vs h-h-l |2 vs 2 | 0.80| -#> |(-0.0851, 0.1217, 0.207) - (-0.0831, 0.1217, 0.207) | 0| 0| 0.57| 0.57| 0.81| 0| 0|l-h-h vs h-h-h |2 vs 3 | 0.80| -#> |(-0.0831, 0.1197, 0.205) - (-0.0831, 0.1197, 0.207) | 0| 0| -0.37| 0.72| 0.48| 0| 0|h-l-l vs h-l-h |1 vs 2 | 0.80| -#> |(-0.0831, 0.1197, 0.205) - (-0.0831, 0.1217, 0.205) | 0| 0| 1.03| 0.30| 1.72| 0| 0|h-l-l vs h-h-l |1 vs 2 | 0.80| -#> |(-0.0831, 0.1197, 0.205) - (-0.0831, 0.1217, 0.207) | 0| 0| 0.55| 0.58| 0.77| 0| 0|h-l-l vs h-h-h |1 vs 3 | 0.80| -#> |(-0.0831, 0.1197, 0.207) - (-0.0831, 0.1217, 0.205) | 0| 0| 0.90| 0.37| 1.45| 0| 0|h-l-h vs h-h-l |2 vs 2 | 0.80| -#> |(-0.0831, 0.1197, 0.207) - (-0.0831, 0.1217, 0.207) | 0| 0| 1.03| 0.30| 1.72| 0| 0|h-l-h vs h-h-h |2 vs 3 | 0.80| -#> |(-0.0831, 0.1217, 0.205) - (-0.0831, 0.1217, 0.207) | 0| 0| -0.37| 0.72| 0.48| 0| 0|h-h-l vs h-h-h |2 vs 3 | 0.80| +#> |term | estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|history |dose | p.value_corr| +#> |:-------------------------------------------------------|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:--------------|:------|------------:| +#> |(0.1484, -0.2923, -0.0858) - (0.1484, -0.2923, -0.0838) | 0| 0| 0.23| 0.81| 0.30| 0| 0|l-l-l vs l-l-h |0 vs 1 | 0.91| +#> |(0.1484, -0.2923, -0.0858) - (0.1484, -0.2903, -0.0858) | 0| 0| -1.05| 0.29| 1.77| 0| 0|l-l-l vs l-h-l |0 vs 1 | 0.91| +#> |(0.1484, -0.2923, -0.0858) - (0.1484, -0.2903, -0.0838) | 0| 0| -0.39| 0.70| 0.52| 0| 0|l-l-l vs l-h-h |0 vs 2 | 0.91| +#> |(0.1484, -0.2923, -0.0858) - (0.1504, -0.2923, -0.0858) | 0| 0| 0.37| 0.71| 0.50| 0| 0|l-l-l vs h-l-l |0 vs 1 | 0.91| +#> |(0.1484, -0.2923, -0.0858) - (0.1504, -0.2923, -0.0838) | 0| 0| 0.40| 0.69| 0.53| 0| 0|l-l-l vs h-l-h |0 vs 2 | 0.91| +#> |(0.1484, -0.2923, -0.0858) - (0.1504, -0.2903, -0.0858) | 0| 0| -0.30| 0.76| 0.39| 0| 0|l-l-l vs h-h-l |0 vs 2 | 0.91| +#> |(0.1484, -0.2923, -0.0858) - (0.1504, -0.2903, -0.0838) | 0| 0| -0.09| 0.93| 0.11| 0| 0|l-l-l vs h-h-h |0 vs 3 | 0.94| +#> |(0.1484, -0.2923, -0.0838) - (0.1484, -0.2903, -0.0858) | 0| 0| -0.82| 0.41| 1.27| 0| 0|l-l-h vs l-h-l |1 vs 1 | 0.91| +#> |(0.1484, -0.2923, -0.0838) - (0.1484, -0.2903, -0.0838) | 0| 0| -1.05| 0.29| 1.77| 0| 0|l-l-h vs l-h-h |1 vs 2 | 0.91| +#> |(0.1484, -0.2923, -0.0838) - (0.1504, -0.2923, -0.0858) | 0| 0| 0.08| 0.94| 0.09| 0| 0|l-l-h vs h-l-l |1 vs 1 | 0.94| +#> |(0.1484, -0.2923, -0.0838) - (0.1504, -0.2923, -0.0838) | 0| 0| 0.37| 0.71| 0.50| 0| 0|l-l-h vs h-l-h |1 vs 2 | 0.91| +#> |(0.1484, -0.2923, -0.0838) - (0.1504, -0.2903, -0.0858) | 0| 0| -0.41| 0.68| 0.55| 0| 0|l-l-h vs h-h-l |1 vs 2 | 0.91| +#> |(0.1484, -0.2923, -0.0838) - (0.1504, -0.2903, -0.0838) | 0| 0| -0.30| 0.76| 0.39| 0| 0|l-l-h vs h-h-h |1 vs 3 | 0.91| +#> |(0.1484, -0.2903, -0.0858) - (0.1484, -0.2903, -0.0838) | 0| 0| 0.23| 0.81| 0.30| 0| 0|l-h-l vs l-h-h |1 vs 2 | 0.91| +#> |(0.1484, -0.2903, -0.0858) - (0.1504, -0.2923, -0.0858) | 0| 0| 1.16| 0.24| 2.03| 0| 0|l-h-l vs h-l-l |1 vs 1 | 0.91| +#> |(0.1484, -0.2903, -0.0858) - (0.1504, -0.2923, -0.0838) | 0| 0| 0.92| 0.36| 1.48| 0| 0|l-h-l vs h-l-h |1 vs 2 | 0.91| +#> |(0.1484, -0.2903, -0.0858) - (0.1504, -0.2903, -0.0858) | 0| 0| 0.37| 0.71| 0.50| 0| 0|l-h-l vs h-h-l |1 vs 2 | 0.91| +#> |(0.1484, -0.2903, -0.0858) - (0.1504, -0.2903, -0.0838) | 0| 0| 0.40| 0.69| 0.53| 0| 0|l-h-l vs h-h-h |1 vs 3 | 0.91| +#> |(0.1484, -0.2903, -0.0838) - (0.1504, -0.2923, -0.0858) | 0| 0| 0.63| 0.53| 0.93| 0| 0|l-h-h vs h-l-l |2 vs 1 | 0.91| +#> |(0.1484, -0.2903, -0.0838) - (0.1504, -0.2923, -0.0838) | 0| 0| 1.16| 0.24| 2.03| 0| 0|l-h-h vs h-l-h |2 vs 2 | 0.91| +#> |(0.1484, -0.2903, -0.0838) - (0.1504, -0.2903, -0.0858) | 0| 0| 0.08| 0.94| 0.09| 0| 0|l-h-h vs h-h-l |2 vs 2 | 0.94| +#> |(0.1484, -0.2903, -0.0838) - (0.1504, -0.2903, -0.0838) | 0| 0| 0.37| 0.71| 0.50| 0| 0|l-h-h vs h-h-h |2 vs 3 | 0.91| +#> |(0.1504, -0.2923, -0.0858) - (0.1504, -0.2923, -0.0838) | 0| 0| 0.23| 0.81| 0.30| 0| 0|h-l-l vs h-l-h |1 vs 2 | 0.91| +#> |(0.1504, -0.2923, -0.0858) - (0.1504, -0.2903, -0.0858) | 0| 0| -1.05| 0.29| 1.77| 0| 0|h-l-l vs h-h-l |1 vs 2 | 0.91| +#> |(0.1504, -0.2923, -0.0858) - (0.1504, -0.2903, -0.0838) | 0| 0| -0.39| 0.70| 0.52| 0| 0|h-l-l vs h-h-h |1 vs 3 | 0.91| +#> |(0.1504, -0.2923, -0.0838) - (0.1504, -0.2903, -0.0858) | 0| 0| -0.82| 0.41| 1.27| 0| 0|h-l-h vs h-h-l |2 vs 2 | 0.91| +#> |(0.1504, -0.2923, -0.0838) - (0.1504, -0.2903, -0.0838) | 0| 0| -1.05| 0.29| 1.77| 0| 0|h-l-h vs h-h-h |2 vs 3 | 0.91| +#> |(0.1504, -0.2903, -0.0858) - (0.1504, -0.2903, -0.0838) | 0| 0| 0.23| 0.81| 0.30| 0| 0|h-h-l vs h-h-h |2 vs 3 | 0.91| #> #> @@ -330,7 +330,7 @@

Examples save.out = FALSE) #> Summary of Exposure Main Effects: #> -#> USER ALERT: Out of the total of 50 individuals in the sample, below is the distribution of the 12 (24%) individuals that fall into 2 out of the 2 total user-defined exposure histories created from median split values for low and high levels of exposure A, respectively, across 1, 2, 3. +#> USER ALERT: Out of the total of 50 individuals in the sample, below is the distribution of the 11 (22%) individuals that fall into 2 out of the 2 total user-defined exposure histories created from median split values for low and high levels of exposure A, respectively, across 1, 2, 3. #> USER ALERT: Please inspect the distribution of the sample across the following exposure histories and ensure there is sufficient spread to avoid extrapolation and low precision: #> #> Table: Summary of user-specified exposure A histories based on exposure main effects 1, 2, 3 containing time points 1, 2, 3: @@ -338,23 +338,23 @@

Examples#> |history | n| #> |:-------|--:| #> |h-h-h | 6| -#> |l-l-l | 6| +#> |l-l-l | 5| #> #> #> Below are the average predictions by user-specified history: -#> | | A.1| A.2| A.3| estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|history | dose_count| -#> |:--|-------:|------:|-----:|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:-------|----------:| -#> |1 | -0.0851| 0.1197| 0.205| -0.0529| 0.1168| -0.4526| 0.6508| 0.6197| -0.2818| 0.1761|l-l-l | 0| -#> |8 | -0.0831| 0.1217| 0.207| -0.0532| 0.1168| -0.4551| 0.6490| 0.6237| -0.2822| 0.1758|h-h-h | 3| +#> | | A.1| A.2| A.3| estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|history | dose_count| +#> |:--|------:|-------:|-------:|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:-------|----------:| +#> |1 | 0.1484| -0.2923| -0.0858| -0.2479| 0.1614| -1.5360| 0.1245| 3.0054| -0.5642| 0.0684|l-l-l | 0| +#> |8 | 0.1504| -0.2903| -0.0838| -0.2478| 0.1614| -1.5355| 0.1247| 3.0039| -0.5642| 0.0685|h-h-h | 3| #> #> #> Conducting multiple comparison correction for all pairings between comparison histories and each refernece history using the BH method. #> #> #> USER ALERT: please inspect the following comparisons: -#> |term | estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|history |dose | p.value_corr| -#> |:---------------------------------------------------|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:--------------|:------|------------:| -#> |(-0.0851, 0.1197, 0.205) - (-0.0831, 0.1217, 0.207) | 0| 0| -0.7| 0.48| 1.05| 0| 0|l-l-l vs h-h-h |0 vs 3 | 0.48| +#> |term | estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|history |dose | p.value_corr| +#> |:-------------------------------------------------------|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:--------------|:------|------------:| +#> |(0.1484, -0.2923, -0.0858) - (0.1504, -0.2903, -0.0838) | 0| 0| 0.09| 0.93| 0.11| 0| 0|l-l-l vs h-h-h |0 vs 3 | 0.93| #> #> @@ -367,7 +367,7 @@

Examples save.out = FALSE) #> Summary of Exposure Main Effects: #> -#> USER ALERT: Out of the total of 50 individuals in the sample, below is the distribution of the 20 (40%) individuals that fall into 3 out of the 3 total user-defined exposure histories created from median split values for low and high levels of exposure A, respectively, across 1, 2, 3. +#> USER ALERT: Out of the total of 50 individuals in the sample, below is the distribution of the 18 (36%) individuals that fall into 3 out of the 3 total user-defined exposure histories created from median split values for low and high levels of exposure A, respectively, across 1, 2, 3. #> USER ALERT: Please inspect the distribution of the sample across the following exposure histories and ensure there is sufficient spread to avoid extrapolation and low precision: #> #> Table: Summary of user-specified exposure A histories based on exposure main effects 1, 2, 3 containing time points 1, 2, 3: @@ -375,26 +375,26 @@

Examples#> |history | n| #> |:-------|--:| #> |h-h-h | 6| -#> |h-l-l | 8| -#> |l-l-l | 6| +#> |h-l-l | 7| +#> |l-l-l | 5| #> #> #> Below are the average predictions by user-specified history: -#> | | A.1| A.2| A.3| estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|history | dose_count| -#> |:--|-------:|------:|-----:|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:-------|----------:| -#> |1 | -0.0851| 0.1197| 0.205| -0.0529| 0.1168| -0.4526| 0.6508| 0.6197| -0.2818| 0.1761|l-l-l | 0| -#> |5 | -0.0831| 0.1197| 0.205| -0.0530| 0.1168| -0.4538| 0.6500| 0.6215| -0.2819| 0.1759|h-l-l | 1| -#> |8 | -0.0831| 0.1217| 0.207| -0.0532| 0.1168| -0.4551| 0.6490| 0.6237| -0.2822| 0.1758|h-h-h | 3| +#> | | A.1| A.2| A.3| estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|history | dose_count| +#> |:--|------:|-------:|-------:|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:-------|----------:| +#> |1 | 0.1484| -0.2923| -0.0858| -0.2479| 0.1614| -1.5360| 0.1245| 3.0054| -0.5642| 0.0684|l-l-l | 0| +#> |5 | 0.1504| -0.2923| -0.0858| -0.2480| 0.1614| -1.5366| 0.1244| 3.0069| -0.5644| 0.0683|h-l-l | 1| +#> |8 | 0.1504| -0.2903| -0.0838| -0.2478| 0.1614| -1.5355| 0.1247| 3.0039| -0.5642| 0.0685|h-h-h | 3| #> #> #> Conducting multiple comparison correction for all pairings between comparison histories and each refernece history using the BH method. #> #> #> USER ALERT: please inspect the following comparisons: -#> |term | estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|history |dose | p.value_corr| -#> |:---------------------------------------------------|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:--------------|:------|------------:| -#> |(-0.0851, 0.1197, 0.205) - (-0.0831, 0.1217, 0.207) | 0| 0| -0.70| 0.48| 1.05| 0| 0|l-l-l vs h-h-h |0 vs 3 | 0.57| -#> |(-0.0851, 0.1197, 0.205) - (-0.0831, 0.1197, 0.205) | 0| 0| -0.57| 0.57| 0.81| 0| 0|l-l-l vs h-l-l |0 vs 1 | 0.57| +#> |term | estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|history |dose | p.value_corr| +#> |:-------------------------------------------------------|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:--------------|:------|------------:| +#> |(0.1484, -0.2923, -0.0858) - (0.1504, -0.2903, -0.0838) | 0| 0| 0.09| 0.93| 0.11| 0| 0|l-l-l vs h-h-h |0 vs 3 | 0.93| +#> |(0.1484, -0.2923, -0.0858) - (0.1504, -0.2923, -0.0858) | 0| 0| -0.37| 0.71| 0.50| 0| 0|l-l-l vs h-l-l |0 vs 1 | 0.93| #> #> @@ -407,7 +407,7 @@

Examples save.out = FALSE) #> Summary of Exposure Main Effects: #> -#> USER ALERT: Out of the total of 50 individuals in the sample, below is the distribution of the 20 (40%) individuals that fall into 3 out of the 3 total user-defined exposure histories created from median split values for low and high levels of exposure A, respectively, across 1, 2, 3. +#> USER ALERT: Out of the total of 50 individuals in the sample, below is the distribution of the 17 (34%) individuals that fall into 3 out of the 3 total user-defined exposure histories created from median split values for low and high levels of exposure A, respectively, across 1, 2, 3. #> USER ALERT: Please inspect the distribution of the sample across the following exposure histories and ensure there is sufficient spread to avoid extrapolation and low precision: #> #> Table: Summary of user-specified exposure A histories based on exposure main effects 1, 2, 3 containing time points 1, 2, 3: @@ -415,25 +415,25 @@

Examples#> |history | n| #> |:-------|--:| #> |h-h-h | 6| -#> |l-h-h | 8| -#> |l-l-l | 6| +#> |l-h-h | 6| +#> |l-l-l | 5| #> #> #> Below are the average predictions by user-specified history: -#> | | A.1| A.2| A.3| estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|history | dose_count| -#> |:--|-------:|------:|-----:|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:-------|----------:| -#> |1 | -0.0851| 0.1197| 0.205| -0.0529| 0.1168| -0.4526| 0.6508| 0.6197| -0.2818| 0.1761|l-l-l | 0| -#> |4 | -0.0851| 0.1217| 0.207| -0.0531| 0.1169| -0.4540| 0.6498| 0.6218| -0.2821| 0.1760|l-h-h | 2| -#> |8 | -0.0831| 0.1217| 0.207| -0.0532| 0.1168| -0.4551| 0.6490| 0.6237| -0.2822| 0.1758|h-h-h | 3| +#> | | A.1| A.2| A.3| estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|history | dose_count| +#> |:--|------:|-------:|-------:|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:-------|----------:| +#> |1 | 0.1484| -0.2923| -0.0858| -0.2479| 0.1614| -1.5360| 0.1245| 3.0054| -0.5642| 0.0684|l-l-l | 0| +#> |4 | 0.1484| -0.2903| -0.0838| -0.2477| 0.1614| -1.5349| 0.1248| 3.0023| -0.5640| 0.0686|l-h-h | 2| +#> |8 | 0.1504| -0.2903| -0.0838| -0.2478| 0.1614| -1.5355| 0.1247| 3.0039| -0.5642| 0.0685|h-h-h | 3| #> #> #> Conducting multiple comparison correction for all pairings between comparison histories and each refernece history using the BH method. #> #> #> USER ALERT: please inspect the following comparisons: -#> |term | estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|term.1 | estimate.1| std.error.1| statistic.1| p.value.1| s.value.1| conf.low.1| conf.high.1|history |dose | p.value_corr| -#> |:---------------------------------------------------|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:---------------------------------------------------------------------------------------------------------------------|----------:|-----------:|-----------:|---------:|---------:|----------:|-----------:|:--------------|:------|------------:| -#> |(-0.0851, 0.1197, 0.205) - (-0.0831, 0.1217, 0.207) | 0| 0| -0.7| 0.48| 1.05| 0| 0|(-0.0851499446362294,0.121681511944702,0.207011843149082) - (-0.0831499446362294,0.121681511944702,0.207011843149082) | 0| 0| -0.57| 0.57| 0.81| 0| 0|l-l-l vs h-h-h |0 vs 3 | 0.48| +#> |term | estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|term.1 | estimate.1| std.error.1| statistic.1| p.value.1| s.value.1| conf.low.1| conf.high.1|history |dose | p.value_corr| +#> |:-------------------------------------------------------|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:-----------------------------------------------------------------------------------------------------------------------|----------:|-----------:|-----------:|---------:|---------:|----------:|-----------:|:--------------|:------|------------:| +#> |(0.1484, -0.2923, -0.0858) - (0.1504, -0.2903, -0.0838) | 0| 0| 0.09| 0.93| 0.11| 0| 0|(0.148357910991221,-0.290250944106764,-0.0837970022925556) - (0.150357910991221,-0.290250944106764,-0.0837970022925556) | 0| 0| -0.37| 0.71| 0.5| 0| 0|l-l-l vs h-h-h |0 vs 3 | 0.93| #> #> @@ -446,7 +446,7 @@

Examples save.out = FALSE) #> Summary of Exposure Main Effects: #> -#> USER ALERT: Out of the total of 50 individuals in the sample, below is the distribution of the 25 (50%) individuals that fall into 4 out of the 4 total user-defined exposure histories created from median split values for low and high levels of exposure A, respectively, across 1, 2, 3. +#> USER ALERT: Out of the total of 50 individuals in the sample, below is the distribution of the 23 (46%) individuals that fall into 4 out of the 4 total user-defined exposure histories created from median split values for low and high levels of exposure A, respectively, across 1, 2, 3. #> USER ALERT: Please inspect the distribution of the sample across the following exposure histories and ensure there is sufficient spread to avoid extrapolation and low precision: #> #> Table: Summary of user-specified exposure A histories based on exposure main effects 1, 2, 3 containing time points 1, 2, 3: @@ -454,28 +454,28 @@

Examples#> |history | n| #> |:-------|--:| #> |h-h-h | 6| -#> |l-h-h | 8| -#> |l-l-h | 5| -#> |l-l-l | 6| +#> |l-h-h | 6| +#> |l-l-h | 6| +#> |l-l-l | 5| #> #> #> Below are the average predictions by user-specified history: -#> | | A.1| A.2| A.3| estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|history | dose_count| -#> |:--|-------:|------:|-----:|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:-------|----------:| -#> |1 | -0.0851| 0.1197| 0.205| -0.0529| 0.1168| -0.4526| 0.6508| 0.6197| -0.2818| 0.1761|l-l-l | 0| -#> |2 | -0.0851| 0.1197| 0.207| -0.0528| 0.1168| -0.4518| 0.6514| 0.6184| -0.2817| 0.1762|l-l-h | 1| -#> |4 | -0.0851| 0.1217| 0.207| -0.0531| 0.1169| -0.4540| 0.6498| 0.6218| -0.2821| 0.1760|l-h-h | 2| -#> |8 | -0.0831| 0.1217| 0.207| -0.0532| 0.1168| -0.4551| 0.6490| 0.6237| -0.2822| 0.1758|h-h-h | 3| +#> | | A.1| A.2| A.3| estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|history | dose_count| +#> |:--|------:|-------:|-------:|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:-------|----------:| +#> |1 | 0.1484| -0.2923| -0.0858| -0.2479| 0.1614| -1.5360| 0.1245| 3.0054| -0.5642| 0.0684|l-l-l | 0| +#> |2 | 0.1484| -0.2923| -0.0838| -0.2480| 0.1613| -1.5373| 0.1242| 3.0090| -0.5642| 0.0682|l-l-h | 1| +#> |4 | 0.1484| -0.2903| -0.0838| -0.2477| 0.1614| -1.5349| 0.1248| 3.0023| -0.5640| 0.0686|l-h-h | 2| +#> |8 | 0.1504| -0.2903| -0.0838| -0.2478| 0.1614| -1.5355| 0.1247| 3.0039| -0.5642| 0.0685|h-h-h | 3| #> #> #> Conducting multiple comparison correction for all pairings between comparison histories and each refernece history using the BH method. #> #> #> USER ALERT: please inspect the following comparisons: -#> |term | estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|term.1 | estimate.1| std.error.1| statistic.1| p.value.1| s.value.1| conf.low.1| conf.high.1|history |dose | p.value_corr| -#> |:---------------------------------------------------|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:---------------------------------------------------------------------------------------------------------------------|----------:|-----------:|-----------:|---------:|---------:|----------:|-----------:|:--------------|:------|------------:| -#> |(-0.0851, 0.1197, 0.205) - (-0.0831, 0.1217, 0.207) | 0| 0| -0.70| 0.48| 1.05| 0| 0|(-0.0851499446362294,0.121681511944702,0.207011843149082) - (-0.0831499446362294,0.121681511944702,0.207011843149082) | 0| 0| -0.57| 0.57| 0.81| 0| 0|l-l-l vs h-h-h |0 vs 3 | 0.72| -#> |(-0.0851, 0.1197, 0.205) - (-0.0851, 0.1197, 0.207) | 0| 0| 0.37| 0.72| 0.48| 0| 0|(-0.0851499446362294,0.121681511944702,0.207011843149082) - (-0.0851499446362294,0.119681511944702,0.207011843149082) | 0| 0| 1.03| 0.30| 1.72| 0| 0|l-l-l vs l-l-h |0 vs 1 | 0.72| +#> |term | estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|term.1 | estimate.1| std.error.1| statistic.1| p.value.1| s.value.1| conf.low.1| conf.high.1|history |dose | p.value_corr| +#> |:-------------------------------------------------------|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:-----------------------------------------------------------------------------------------------------------------------|----------:|-----------:|-----------:|---------:|---------:|----------:|-----------:|:--------------|:------|------------:| +#> |(0.1484, -0.2923, -0.0858) - (0.1504, -0.2903, -0.0838) | 0| 0| 0.09| 0.93| 0.11| 0| 0|(0.148357910991221,-0.290250944106764,-0.0837970022925556) - (0.150357910991221,-0.290250944106764,-0.0837970022925556) | 0| 0| -0.37| 0.71| 0.50| 0| 0|l-l-l vs h-h-h |0 vs 3 | 0.93| +#> |(0.1484, -0.2923, -0.0858) - (0.1484, -0.2923, -0.0838) | 0| 0| -0.23| 0.81| 0.30| 0| 0|(0.148357910991221,-0.290250944106764,-0.0837970022925556) - (0.148357910991221,-0.292250944106764,-0.0837970022925556) | 0| 0| -1.05| 0.29| 1.77| 0| 0|l-l-l vs l-l-h |0 vs 1 | 0.93| #> #> @@ -494,36 +494,36 @@

Examples save.out = FALSE) #> Summary of Exposure Main Effects: #> -#> USER ALERT: Out of the total of 50 individuals in the sample, below is the distribution of the 10 (20%) individuals that fall into 4 out of the 4 the total user-defined exposure histories created from 30th and 60th percentile values for low and high levels of exposure A, respectively, across 1, 2, 3. +#> USER ALERT: Out of the total of 50 individuals in the sample, below is the distribution of the 7 (14%) individuals that fall into 4 out of the 4 the total user-defined exposure histories created from 30th and 60th percentile values for low and high levels of exposure A, respectively, across 1, 2, 3. #> USER ALERT: Please inspect the distribution of the sample across the following exposure histories and ensure there is sufficient spread to avoid extrapolation and low precision: #> #> Table: Summary of user-specified exposure A histories based on exposure main effects 1, 2, 3 containing time points 1, 2, 3: #> #> |history | n| #> |:-------|--:| -#> |h-h-h | 4| +#> |h-h-h | 1| #> |l-h-h | 3| -#> |l-l-h | 1| -#> |l-l-l | 2| +#> |l-l-h | 2| +#> |l-l-l | 1| #> #> #> Below are the average predictions by user-specified history: #> | | A.1| A.2| A.3| estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|history | dose_count| #> |:--|-------:|-------:|-------:|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:-------|----------:| -#> |1 | -0.6672| -0.3898| -0.3736| 0.0267| 0.1649| 0.1617| 0.8715| 0.1984| -0.2966| 0.3499|l-l-l | 3| -#> |2 | -0.6672| -0.3898| 0.2944| 0.0583| 0.1349| 0.4320| 0.6658| 0.5869| -0.2061| 0.3226|l-l-h | 2| -#> |4 | -0.6672| 0.1975| 0.2944| -0.0227| 0.1384| -0.1642| 0.8696| 0.2016| -0.2941| 0.2486|l-h-h | 1| -#> |8 | 0.1803| 0.1975| 0.2944| -0.0761| 0.1218| -0.6249| 0.5320| 0.9104| -0.3147| 0.1626|h-h-h | 0| +#> |1 | -0.5238| -0.7820| -0.6330| -0.2486| 0.2640| -0.9417| 0.3463| 1.5298| -0.7661| 0.2688|l-l-l | 3| +#> |2 | -0.5238| -0.7820| 0.1213| -0.2855| 0.2082| -1.3711| 0.1703| 2.5535| -0.6937| 0.1226|l-l-h | 2| +#> |4 | -0.5238| 0.0294| 0.1213| -0.1630| 0.1920| -0.8489| 0.3959| 1.3367| -0.5393| 0.2133|l-h-h | 1| +#> |8 | 0.4388| 0.0294| 0.1213| -0.2295| 0.1933| -1.1877| 0.2349| 2.0897| -0.6083| 0.1492|h-h-h | 0| #> #> #> Conducting multiple comparison correction for all pairings between comparison histories and each refernece history using the BH method. #> #> #> USER ALERT: please inspect the following comparisons: -#> |term | estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|term.1 | estimate.1| std.error.1| statistic.1| p.value.1| s.value.1| conf.low.1| conf.high.1|history |dose | p.value_corr| -#> |:--------------------------------------------------------|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:------------------------------------------------------------------------------------------------------------------|----------:|-----------:|-----------:|---------:|---------:|----------:|-----------:|:--------------|:------|------------:| -#> |(-0.6672, -0.3898, -0.3736) - (0.1803, 0.1975, 0.2944) | -0.10| 0.16| -0.65| 0.51| 0.96| -0.41| 0.2|(-0.66724255804337,0.197543560074282,0.294433897893948) - (0.180252575049252,0.197543560074282,0.294433897893948) | -0.05| 0.09| -0.57| 0.57| 0.81| -0.24| 0.13|l-l-l vs h-h-h |3 vs 0 | 0.72| -#> |(-0.6672, -0.3898, -0.3736) - (-0.6672, -0.3898, 0.2944) | 0.03| 0.09| 0.37| 0.72| 0.48| -0.14| 0.2|(-0.66724255804337,0.197543560074282,0.294433897893948) - (-0.66724255804337,-0.389779823228518,0.294433897893948) | 0.08| 0.08| 1.03| 0.30| 1.72| -0.07| 0.24|l-l-l vs l-l-h |3 vs 2 | 0.72| +#> |term | estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|term.1 | estimate.1| std.error.1| statistic.1| p.value.1| s.value.1| conf.low.1| conf.high.1|history |dose | p.value_corr| +#> |:-----------------------------------------------------|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:-------------------------------------------------------------------------------------------------------------------|----------:|-----------:|-----------:|---------:|---------:|----------:|-----------:|:--------------|:------|------------:| +#> |(-0.5238, -0.782, -0.633) - (0.4388, 0.0294, 0.1213) | 0.02| 0.31| 0.06| 0.95| 0.07| -0.59| 0.62|(-0.523818390762229,0.0293884543900781,0.12132001986401) - (0.438836555281291,0.0293884543900781,0.12132001986401) | -0.07| 0.18| -0.37| 0.71| 0.50| -0.42| 0.28|l-l-l vs h-h-h |3 vs 0 | 0.95| +#> |(-0.5238, -0.782, -0.633) - (-0.5238, -0.782, 0.1213) | -0.04| 0.16| -0.23| 0.81| 0.30| -0.35| 0.27|(-0.523818390762229,0.0293884543900781,0.12132001986401) - (-0.523818390762229,-0.781977904815324,0.12132001986401) | -0.12| 0.12| -1.05| 0.29| 1.77| -0.35| 0.11|l-l-l vs l-l-h |3 vs 2 | 0.95| #> #> diff --git a/reference/createFormulas-1.png b/reference/createFormulas-1.png index a9533e1c..fad5dd1a 100644 Binary files a/reference/createFormulas-1.png and b/reference/createFormulas-1.png differ diff --git a/reference/createFormulas-2.png b/reference/createFormulas-2.png index ff8f1c24..9e84ce5a 100644 Binary files a/reference/createFormulas-2.png and b/reference/createFormulas-2.png differ diff --git a/reference/createFormulas-3.png b/reference/createFormulas-3.png index d78d7c2b..ed3e3887 100644 Binary files a/reference/createFormulas-3.png and b/reference/createFormulas-3.png differ diff --git a/reference/createFormulas-4.png b/reference/createFormulas-4.png index d882fbc2..a66871a8 100644 Binary files a/reference/createFormulas-4.png and b/reference/createFormulas-4.png differ diff --git a/reference/createFormulas.html b/reference/createFormulas.html index 43858881..160a5863 100644 --- a/reference/createFormulas.html +++ b/reference/createFormulas.html @@ -168,17 +168,17 @@

Examples#> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 1 is: #> A.1 ~ C -#> <environment: 0x55ddd61683d0> +#> <environment: 0x55b452c4fa08> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 2 is: #> A.2 ~ A.1 + C -#> <environment: 0x55ddd61683d0> +#> <environment: 0x55b452c4fa08> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 3 is: #> A.3 ~ A.1 + A.2 + C -#> <environment: 0x55ddd61683d0> +#> <environment: 0x55b452c4fa08> #> f <- createFormulas(exposure = "A", @@ -191,17 +191,17 @@

Examples#> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 1 is: #> A.1 ~ C -#> <environment: 0x55ddd56c2b70> +#> <environment: 0x55b452b64c00> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 2 is: #> A.2 ~ A.1 + B.1 + C -#> <environment: 0x55ddd56c2b70> +#> <environment: 0x55b452b64c00> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 3 is: #> A.3 ~ A.1 + A.2 + B.1 + B.2 + C -#> <environment: 0x55ddd56c2b70> +#> <environment: 0x55b452b64c00> #> #Short Formulas @@ -215,17 +215,17 @@

Examples#> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 1 is: #> A.1 ~ C -#> <environment: 0x55ddd82f3058> +#> <environment: 0x55b452975290> #> #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 2 is: #> A.2 ~ A.1 + C -#> <environment: 0x55ddd82f3058> +#> <environment: 0x55b452975290> #> #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 3 is: #> A.3 ~ A.2 + C -#> <environment: 0x55ddd82f3058> +#> <environment: 0x55b452975290> #> f <- createFormulas(exposure = "A", exposure_time_pts = c(1, 2, 3), @@ -237,17 +237,17 @@

Examples#> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 1 is: #> A.1 ~ C -#> <environment: 0x55ddd8063b08> +#> <environment: 0x55b4527cf0d8> #> #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 2 is: #> A.2 ~ A.1 + B.1 + C -#> <environment: 0x55ddd8063b08> +#> <environment: 0x55b4527cf0d8> #> #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 3 is: #> A.3 ~ A.2 + B.2 + C -#> <environment: 0x55ddd8063b08> +#> <environment: 0x55b4527cf0d8> #> c <- list("short_form-1" = as.formula(A.1 ~ C), @@ -264,11 +264,11 @@

Examples save.out = FALSE) #> The user-supplied custom balancing formula for each exposure time point are below: #> A.1 ~ C -#> <environment: 0x55ddd6224850> +#> <environment: 0x55b452bfd6b8> #> A.2 ~ A.1 + B.1 + C -#> <environment: 0x55ddd6224850> +#> <environment: 0x55b452bfd6b8> #> A.3 ~ A.2 + B.2 + C -#> <environment: 0x55ddd6224850> +#> <environment: 0x55b452bfd6b8> #Update Formulas test <- data.frame(ID = 1:50, @@ -287,7 +287,7 @@

Examples outcome = "D.3", formulas = f, save.out = FALSE) -#> For the cbps weighting method, the median weight value is 1 (SD = 0.37; range = 0.29-2). +#> For the cbps weighting method, the median weight value is 0.87 (SD = 0.38; range = 0.43-2). #> @@ -304,27 +304,28 @@

Examples -#> As shown below, 1 out of 7 (14%) covariates across time points, corresponding to 1 out of 2 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.19 (range= -0.19--0.19): +#> As shown below, 2 out of 7 (29%) covariates across time points, corresponding to 2 out of 2 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.12 (range= -0.13-0.1): #> #> Table: Imbalanced covariates using cbps and short formulas #> #> | exp_time| balanced_n| imbalanced_n| n| #> |--------:|----------:|------------:|--:| -#> | 1| 1| 0| 1| -#> | 2| 2| 1| 3| -#> | 3| 3| 0| 3| +#> | 1| 0| 1| 1| +#> | 2| 3| 0| 3| +#> | 3| 2| 1| 3| #> #> #> USER ALERT: For exposure A using the short formulas and cbps : -#> The median absolute value relation between exposure and confounder is 0.03 (range = -0.19 -0.07). -#> As shown below, the following 1 covariates across time points out of 7 total (14.29%) spanning 1 domains out of 3 (33.33%) are imbalanced with a remaining median absolute value correlation/std mean difference in relation to A of 0.19 (range=-0.19--0.19) : +#> The median absolute value relation between exposure and confounder is 0.04 (range = -0.13 -0.1). +#> As shown below, the following 2 covariates across time points out of 7 total (28.57%) spanning 2 domains out of 3 (66.67%) are imbalanced with a remaining median absolute value correlation/std mean difference in relation to A of 0.12 (range=-0.13-0.1) : #> #> #> Table: Imbalanced Covariates #> #> | |exposure | exp_time| covar_time|covariate | avg_bal| bal_thresh| balanced| #> |:--|:--------|--------:|----------:|:---------|----------:|----------:|--------:| -#> |2 |A | 2| 1|A.1 | -0.1863923| 0.1| 0| +#> |1 |A | 1| 0|C | 0.1000057| 0.1| 0| +#> |6 |A | 3| 2|B.2 | -0.1325060| 0.1| 0| #> f <- createFormulas(exposure = "A", exposure_time_pts = c(1, 2, 3), @@ -337,21 +338,21 @@

Examples#> USER ALERT: Please manually inspect the updated balancing formula below that includes time-varying confounders at t-1 and those greater at further lags that remained imbalanced: #> The update formula for A - D.3 at A time point 1 is: #> A.1 ~ C -#> <environment: 0x55ddd6c50880> +#> <environment: 0x55b4577e59e0> #> #> USER ALERT: Please manually inspect the updated balancing formula below that includes time-varying confounders at t-1 and those greater at further lags that remained imbalanced: #> For A at exposure time point 2 no time-varying confounders at additional lags were added. #> #> The update formula for A - D.3 at A time point 2 is: #> A.2 ~ A.1 + C -#> <environment: 0x55ddd6c50880> +#> <environment: 0x55b4577e59e0> #> #> USER ALERT: Please manually inspect the updated balancing formula below that includes time-varying confounders at t-1 and those greater at further lags that remained imbalanced: #> For A at exposure time point 3 no time-varying confounders at additional lags were added. #> #> The update formula for A - D.3 at A time point 3 is: #> A.3 ~ A.2 + C -#> <environment: 0x55ddd6c50880> +#> <environment: 0x55b4577e59e0> #> f <- createFormulas(exposure = "A", exposure_time_pts = c(1, 2, 3), @@ -364,21 +365,21 @@

Examples#> USER ALERT: Please manually inspect the updated balancing formula below that includes time-varying confounders at t-1 and those greater at further lags that remained imbalanced: #> The update formula for A - D.3 at A time point 1 is: #> A.1 ~ C -#> <environment: 0x55ddd69b1c30> +#> <environment: 0x55b4573f3520> #> #> USER ALERT: Please manually inspect the updated balancing formula below that includes time-varying confounders at t-1 and those greater at further lags that remained imbalanced: #> For A at exposure time point 2 no time-varying confounders at additional lags were added. #> #> The update formula for A - D.3 at A time point 2 is: #> A.2 ~ A.1 + B.1 + C -#> <environment: 0x55ddd69b1c30> +#> <environment: 0x55b4573f3520> #> #> USER ALERT: Please manually inspect the updated balancing formula below that includes time-varying confounders at t-1 and those greater at further lags that remained imbalanced: #> For A at exposure time point 3 no time-varying confounders at additional lags were added. #> #> The update formula for A - D.3 at A time point 3 is: #> A.3 ~ A.2 + B.2 + C -#> <environment: 0x55ddd69b1c30> +#> <environment: 0x55b4573f3520> #> diff --git a/reference/createWeights-1.png b/reference/createWeights-1.png index f371c6ed..9c477111 100644 Binary files a/reference/createWeights-1.png and b/reference/createWeights-1.png differ diff --git a/reference/createWeights-2.png b/reference/createWeights-2.png index f2e8677a..754feb60 100644 Binary files a/reference/createWeights-2.png and b/reference/createWeights-2.png differ diff --git a/reference/createWeights-3.png b/reference/createWeights-3.png index f2e8677a..754feb60 100644 Binary files a/reference/createWeights-3.png and b/reference/createWeights-3.png differ diff --git a/reference/createWeights-4.png b/reference/createWeights-4.png index f2e8677a..754feb60 100644 Binary files a/reference/createWeights-4.png and b/reference/createWeights-4.png differ diff --git a/reference/createWeights-5.png b/reference/createWeights-5.png index f927711f..bd85ee33 100644 Binary files a/reference/createWeights-5.png and b/reference/createWeights-5.png differ diff --git a/reference/createWeights-6.png b/reference/createWeights-6.png index 456fbe92..c2d27012 100644 Binary files a/reference/createWeights-6.png and b/reference/createWeights-6.png differ diff --git a/reference/createWeights-7.png b/reference/createWeights-7.png index 035a4a5a..6a253adb 100644 Binary files a/reference/createWeights-7.png and b/reference/createWeights-7.png differ diff --git a/reference/createWeights.html b/reference/createWeights.html index aaba6a5c..5b698fee 100644 --- a/reference/createWeights.html +++ b/reference/createWeights.html @@ -174,24 +174,24 @@

Examples#> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 1 is: #> A.1 ~ C -#> <environment: 0x55dddb3dc0d0> +#> <environment: 0x55b44acc6940> #> #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 2 is: #> A.2 ~ A.1 + B.1 + C -#> <environment: 0x55dddb3dc0d0> +#> <environment: 0x55b44acc6940> #> #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 3 is: #> A.3 ~ A.2 + B.2 + C -#> <environment: 0x55dddb3dc0d0> +#> <environment: 0x55b44acc6940> #> w <- createWeights(data = test, exposure = "A", outcome = "D.3", formulas = f, save.out = FALSE) -#> For the cbps weighting method, the median weight value is 1.12 (SD = 0.46; range = 0.36-2). +#> For the cbps weighting method, the median weight value is 1.02 (SD = 0.42; range = 0.3-3). #> @@ -205,24 +205,24 @@

Examples#> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 1 is: #> A.1 ~ C -#> <environment: 0x55ddde924a10> +#> <environment: 0x55b4530050a8> #> #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 2 is: #> A.2 ~ A.1 + A.1:B.1 + B.1 + C -#> <environment: 0x55ddde924a10> +#> <environment: 0x55b4530050a8> #> #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 3 is: #> A.3 ~ A.2 + B.2 + C -#> <environment: 0x55ddde924a10> +#> <environment: 0x55b4530050a8> #> w <- createWeights(data = test, exposure = "A", outcome = "D.3", formulas = f, save.out = FALSE) -#> For the cbps weighting method, the median weight value is 1.13 (SD = 0.46; range = 0.35-2). +#> For the cbps weighting method, the median weight value is 1.05 (SD = 0.55; range = 0.25-3). #> @@ -232,7 +232,7 @@

Examples formulas = f, method = "cbps", save.out = FALSE) -#> For the cbps weighting method, the median weight value is 1.13 (SD = 0.46; range = 0.35-2). +#> For the cbps weighting method, the median weight value is 1.05 (SD = 0.55; range = 0.25-3). #> w <- createWeights(data = test, @@ -241,7 +241,7 @@

Examples formulas = f, method = "cbps", save.out = FALSE) -#> For the cbps weighting method, the median weight value is 1.13 (SD = 0.46; range = 0.35-2). +#> For the cbps weighting method, the median weight value is 1.05 (SD = 0.55; range = 0.25-3). #> w <- createWeights(data = test, @@ -250,7 +250,7 @@

Examples formulas = f, method = "gbm", save.out = FALSE) -#> For the gbm weighting method, the median weight value is 0.73 (SD = 0.35; range = 0.26-2). +#> For the gbm weighting method, the median weight value is 0.58 (SD = 0.3; range = 0.27-2). #> w <- createWeights(data = test, @@ -259,7 +259,7 @@

Examples formulas = f, method = "bart", save.out = FALSE) -#> For the bart weighting method, the median weight value is 0.71 (SD = 0.27; range = 0.34-2). +#> For the bart weighting method, the median weight value is 0.69 (SD = 0.2; range = 0.12-1). #> w <- createWeights(data = test, @@ -271,11 +271,7 @@

Examples#> Loading required package: nnls #> Warning: All algorithms have zero weight #> Warning: All metalearner coefficients are zero, predictions will all be equal to 0 -#> Warning: All algorithms have zero weight -#> Warning: All metalearner coefficients are zero, predictions will all be equal to 0 -#> Warning: All algorithms have zero weight -#> Warning: All metalearner coefficients are zero, predictions will all be equal to 0 -#> For the super weighting method, the median weight value is 1.01 (SD = 0.28; range = 0.62-3). +#> For the super weighting method, the median weight value is 0.99 (SD = 0.41; range = 0.44-2). #> diff --git a/reference/eval_hist.html b/reference/eval_hist.html index f5c53ddd..df57ef0c 100644 --- a/reference/eval_hist.html +++ b/reference/eval_hist.html @@ -134,21 +134,21 @@

Examples time_pts = c(1, 2, 3)) #> Summary of Exposure Main Effects: #> -#> USER ALERT: Out of the total of 50 individuals in the sample, below is the distribution of the 48 (96%) individuals that fall into 8 out of the 8 total user-defined exposure histories created from median split values for low and high levels of exposure A, respectively, across 1, 2, 3. +#> USER ALERT: Out of the total of 50 individuals in the sample, below is the distribution of the 50 (100%) individuals that fall into 8 out of the 8 total user-defined exposure histories created from median split values for low and high levels of exposure A, respectively, across 1, 2, 3. #> USER ALERT: Please inspect the distribution of the sample across the following exposure histories and ensure there is sufficient spread to avoid extrapolation and low precision: #> #> Table: Summary of user-specified exposure A histories based on exposure main effects 1, 2, 3 containing time points 1, 2, 3: #> #> |history | n| #> |:-------|--:| -#> |h-h-h | 6| +#> |h-h-h | 4| #> |h-h-l | 8| -#> |h-l-h | 6| -#> |h-l-l | 3| -#> |l-h-h | 5| +#> |h-l-h | 7| +#> |h-l-l | 6| +#> |l-h-h | 7| #> |l-h-l | 6| #> |l-l-h | 7| -#> |l-l-l | 7| +#> |l-l-l | 5| #> h <- eval_hist(data = test, exposure = "A", @@ -157,17 +157,17 @@

Examples values = I(list(c(1, 2), c(3))))) #> Summary of Exposure Main Effects: #> -#> USER ALERT: Out of the total of 50 individuals in the sample, below is the distribution of the 48 (96%) individuals that fall into 4 out of the 4 total user-defined exposure histories created from median split values for low and high levels of exposure A, respectively, across Infancy, Toddlerhood. +#> USER ALERT: Out of the total of 50 individuals in the sample, below is the distribution of the 50 (100%) individuals that fall into 4 out of the 4 total user-defined exposure histories created from median split values for low and high levels of exposure A, respectively, across Infancy, Toddlerhood. #> USER ALERT: Please inspect the distribution of the sample across the following exposure histories and ensure there is sufficient spread to avoid extrapolation and low precision: #> #> Table: Summary of user-specified exposure A histories based on exposure main effects Infancy, Toddlerhood containing time points c(1, 2), 3: #> #> |history | n| #> |:-------|--:| -#> |h-h | 14| -#> |h-l | 11| -#> |l-h | 10| -#> |l-l | 13| +#> |h-h | 10| +#> |h-l | 15| +#> |l-h | 15| +#> |l-l | 10| #> h <- eval_hist(data = test, exposure = "A", @@ -175,22 +175,21 @@

Examples hi_lo_cut = c(0.6, 0.3)) #> Summary of Exposure Main Effects: #> -#> USER ALERT: Out of the total of 50 individuals in the sample, below is the distribution of the 17 (34%) individuals that fall into 7 out of the 8 the total user-defined exposure histories created from 30th and 60th percentile values for low and high levels of exposure A, respectively, across 1, 2, 3. +#> USER ALERT: Out of the total of 50 individuals in the sample, below is the distribution of the 12 (24%) individuals that fall into 6 out of the 8 the total user-defined exposure histories created from 30th and 60th percentile values for low and high levels of exposure A, respectively, across 1, 2, 3. #> USER ALERT: Please inspect the distribution of the sample across the following exposure histories and ensure there is sufficient spread to avoid extrapolation and low precision: -#> Warning: USER ALERT: There are no individuals in your sample that fall into l-h-h exposure history/histories. You may wish to consider different high/low cutoffs (for continuous exposures), alternative epochs, or choose a different measure to avoid extrapolation. +#> Warning: USER ALERT: There are no individuals in your sample that fall into l-h-l & l-l-l exposure history/histories. You may wish to consider different high/low cutoffs (for continuous exposures), alternative epochs, or choose a different measure to avoid extrapolation. #> #> #> Table: Summary of user-specified exposure A histories based on exposure main effects 1, 2, 3 containing time points 1, 2, 3: #> #> |history | n| #> |:-------|--:| -#> |h-h-h | 4| -#> |h-h-l | 3| +#> |h-h-h | 2| +#> |h-h-l | 2| #> |h-l-h | 1| -#> |h-l-l | 3| -#> |l-h-l | 2| -#> |l-l-h | 2| -#> |l-l-l | 2| +#> |h-l-l | 2| +#> |l-h-h | 2| +#> |l-l-h | 3| #> h <- eval_hist(data = test, exposure = "A", @@ -200,15 +199,16 @@

Examples comps = "h-h-h") #> Summary of Exposure Main Effects: #> -#> USER ALERT: Out of the total of 50 individuals in the sample, below is the distribution of the 6 (12%) individuals that fall into 2 out of the 2 the total user-defined exposure histories created from 30th and 60th percentile values for low and high levels of exposure A, respectively, across 1, 2, 3. +#> USER ALERT: Out of the total of 50 individuals in the sample, below is the distribution of the 2 (4%) individuals that fall into 1 out of the 2 the total user-defined exposure histories created from 30th and 60th percentile values for low and high levels of exposure A, respectively, across 1, 2, 3. #> USER ALERT: Please inspect the distribution of the sample across the following exposure histories and ensure there is sufficient spread to avoid extrapolation and low precision: +#> Warning: USER ALERT: There are no individuals in your sample that fall into l-l-l exposure history/histories. You may wish to consider different high/low cutoffs (for continuous exposures), alternative epochs, or choose a different measure to avoid extrapolation. +#> #> #> Table: Summary of user-specified exposure A histories based on exposure main effects 1, 2, 3 containing time points 1, 2, 3: #> #> |history | n| #> |:-------|--:| -#> |h-h-h | 4| -#> |l-l-l | 2| +#> |h-h-h | 2| #> diff --git a/reference/fitModel-1.png b/reference/fitModel-1.png index 39e59445..f1fa2951 100644 Binary files a/reference/fitModel-1.png and b/reference/fitModel-1.png differ diff --git a/reference/fitModel.html b/reference/fitModel.html index 95b0f4c7..969a12fd 100644 --- a/reference/fitModel.html +++ b/reference/fitModel.html @@ -173,17 +173,17 @@

Examples#> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 1 is: #> A.1 ~ C -#> <environment: 0x55dde8f49f20> +#> <environment: 0x55b463b17870> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 2 is: #> A.2 ~ A.1 + B.1 + C -#> <environment: 0x55dde8f49f20> +#> <environment: 0x55b463b17870> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 3 is: #> A.3 ~ A.1 + A.2 + B.1 + B.2 + C -#> <environment: 0x55dde8f49f20> +#> <environment: 0x55b463b17870> #> test <- data.frame(ID = 1:50, @@ -202,7 +202,7 @@

Examples outcome = "D.3", formulas = f, save.out = FALSE) -#> For the cbps weighting method, the median weight value is 0.98 (SD = 0.31; range = 0.3-2). +#> For the cbps weighting method, the median weight value is 0.94 (SD = 0.28; range = 0.55-2). #> @@ -219,8 +219,8 @@

Examples#> #> Working (Rao-Scott+F) LRT for A.1 A.2 A.3 #> in svyglm(formula = as.formula(f), design = s, family = fam) -#> Working 2logLR = 1.739392 p= 0.62288 -#> (scale factors: 1.2 1.1 0.67 ); denominator df= 46 +#> Working 2logLR = 3.967973 p= 0.27411 +#> (scale factors: 1.3 1.1 0.62 ); denominator df= 46 #> #> The marginal model, m0, is summarized below: m <- fitModel(data = test, @@ -240,8 +240,8 @@

Examples#> #> Working (Rao-Scott+F) LRT for A.Infancy A.Toddlerhood #> in svyglm(formula = as.formula(f), design = s, family = fam) -#> Working 2logLR = 0.8130111 p= 0.65766 -#> (scale factors: 1.3 0.72 ); denominator df= 47 +#> Working 2logLR = 3.737787 p= 0.16375 +#> (scale factors: 1.2 0.82 ); denominator df= 47 #> #> The marginal model, m0, is summarized below: m <- fitModel(data = test, @@ -258,8 +258,8 @@

Examples#> #> Working (Rao-Scott+F) LRT for A.1 A.2 A.3 #> in svyglm(formula = as.formula(f), design = s, family = fam) -#> Working 2logLR = 1.835843 p= 0.6022 -#> (scale factors: 1.3 1 0.69 ); denominator df= 45 +#> Working 2logLR = 3.881045 p= 0.28405 +#> (scale factors: 1.3 1.1 0.64 ); denominator df= 45 #> #> The marginal model, m1, is summarized below: m <- fitModel(data = test, @@ -276,8 +276,8 @@

Examples#> #> Working (Rao-Scott+F) LRT for A.1 A.2 A.3 A.1:A.2 A.1:A.3 A.2:A.3 A.1:A.2:A.3 #> in svyglm(formula = as.formula(f), design = s, family = fam) -#> Working 2logLR = 2.836186 p= 0.82934 -#> (scale factors: 2.7 1.6 1.2 0.85 0.34 0.22 0.2 ); denominator df= 42 +#> Working 2logLR = 20.56726 p= 0.02578 +#> (scale factors: 2.1 1.9 1 0.89 0.5 0.38 0.19 ); denominator df= 42 #> #> The marginal model, m2, is summarized below: m <- fitModel(data = test, @@ -295,8 +295,8 @@

Examples#> #> Working (Rao-Scott+F) LRT for A.1 A.2 A.3 A.1:A.2 A.1:A.3 A.2:A.3 A.1:A.2:A.3 #> in svyglm(formula = as.formula(f), design = s, family = fam) -#> Working 2logLR = 4.331592 p= 0.66448 -#> (scale factors: 2.7 1.4 0.99 0.72 0.44 0.37 0.31 ); denominator df= 41 +#> Working 2logLR = 19.69042 p= 0.031433 +#> (scale factors: 2.2 1.9 0.98 0.83 0.51 0.4 0.21 ); denominator df= 41 #> #> The marginal model, m3, is summarized below: diff --git a/reference/getModel-1.png b/reference/getModel-1.png index 743a9d8b..0e80ba4d 100644 Binary files a/reference/getModel-1.png and b/reference/getModel-1.png differ diff --git a/reference/getModel.html b/reference/getModel.html index bda95d67..2a68c33a 100644 --- a/reference/getModel.html +++ b/reference/getModel.html @@ -158,17 +158,17 @@

Examples#> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 1 is: #> A.1 ~ C -#> <environment: 0x55dddde612c0> +#> <environment: 0x55b45826bfa0> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 2 is: #> A.2 ~ A.1 + B.1 + C -#> <environment: 0x55dddde612c0> +#> <environment: 0x55b45826bfa0> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 3 is: #> A.3 ~ A.1 + A.2 + B.1 + B.2 + C -#> <environment: 0x55dddde612c0> +#> <environment: 0x55b45826bfa0> #> w <- createWeights(data = test, @@ -176,7 +176,7 @@

Examples outcome = "D.3", formulas = f, save.out = FALSE) -#> For the cbps weighting method, the median weight value is 1.08 (SD = 0.85; range = 0.37-6). +#> For the cbps weighting method, the median weight value is 1.29 (SD = 0.62; range = 0.2-3). #> diff --git a/reference/make_love_plot-1.png b/reference/make_love_plot-1.png index 2aee64d1..6964b245 100644 Binary files a/reference/make_love_plot-1.png and b/reference/make_love_plot-1.png differ diff --git a/reference/make_love_plot-2.png b/reference/make_love_plot-2.png index 58bc8cec..07936161 100644 Binary files a/reference/make_love_plot-2.png and b/reference/make_love_plot-2.png differ diff --git a/reference/make_love_plot-3.png b/reference/make_love_plot-3.png index b23206b8..d5285246 100644 Binary files a/reference/make_love_plot-3.png and b/reference/make_love_plot-3.png differ diff --git a/reference/make_love_plot-4.png b/reference/make_love_plot-4.png index 563da8fe..44b88706 100644 Binary files a/reference/make_love_plot-4.png and b/reference/make_love_plot-4.png differ diff --git a/reference/make_love_plot-5.png b/reference/make_love_plot-5.png index ab7b710d..478d77d9 100644 Binary files a/reference/make_love_plot-5.png and b/reference/make_love_plot-5.png differ diff --git a/reference/make_love_plot-6.png b/reference/make_love_plot-6.png index ab7b710d..478d77d9 100644 Binary files a/reference/make_love_plot-6.png and b/reference/make_love_plot-6.png differ diff --git a/reference/make_love_plot-7.png b/reference/make_love_plot-7.png index dbe97bfc..cda6ab9d 100644 Binary files a/reference/make_love_plot-7.png and b/reference/make_love_plot-7.png differ diff --git a/reference/make_love_plot-8.png b/reference/make_love_plot-8.png index dbe97bfc..cda6ab9d 100644 Binary files a/reference/make_love_plot-8.png and b/reference/make_love_plot-8.png differ diff --git a/reference/make_love_plot.html b/reference/make_love_plot.html index 77d6e419..3872fe73 100644 --- a/reference/make_love_plot.html +++ b/reference/make_love_plot.html @@ -158,17 +158,17 @@

Examples#> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 1 is: #> A.1 ~ C -#> <environment: 0x55dddc12d368> +#> <environment: 0x55b44632e810> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 2 is: #> A.2 ~ A.1 + B.1 + C -#> <environment: 0x55dddc12d368> +#> <environment: 0x55b44632e810> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 3 is: #> A.3 ~ A.1 + A.2 + B.1 + B.2 + C -#> <environment: 0x55dddc12d368> +#> <environment: 0x55b44632e810> #> test <- data.frame(ID = 1:50, @@ -187,7 +187,7 @@

Examples outcome = "D.3", formulas = f, save.out = FALSE) -#> For the cbps weighting method, the median weight value is 1.12 (SD = 1.48; range = 0.32-10). +#> For the cbps weighting method, the median weight value is 1.01 (SD = 0.68; range = 0.23-4). #> @@ -204,29 +204,29 @@

Examples -#> As shown below, 3 out of 9 (33%) covariates across time points, corresponding to 3 out of 3 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.34 (range= -0.38-0.11): +#> As shown below, 3 out of 9 (33%) covariates across time points, corresponding to 2 out of 3 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.11 (range= -0.12-0.11): #> #> Table: Imbalanced covariates using cbps and full formulas #> #> | exp_time| balanced_n| imbalanced_n| n| #> |--------:|----------:|------------:|--:| #> | 1| 1| 0| 1| -#> | 2| 1| 2| 3| -#> | 3| 4| 1| 5| +#> | 2| 3| 0| 3| +#> | 3| 2| 3| 5| #> #> #> USER ALERT: For exposure A using the full formulas and cbps : -#> The median absolute value relation between exposure and confounder is 0.09 (range = -0.38 -0.11). -#> As shown below, the following 3 covariates across time points out of 9 total (33.33%) spanning 3 domains out of 3 (100%) are imbalanced with a remaining median absolute value correlation/std mean difference in relation to A of 0.34 (range=-0.38-0.11) : +#> The median absolute value relation between exposure and confounder is 0.04 (range = -0.12 -0.11). +#> As shown below, the following 3 covariates across time points out of 9 total (33.33%) spanning 2 domains out of 3 (66.67%) are imbalanced with a remaining median absolute value correlation/std mean difference in relation to A of 0.11 (range=-0.12-0.11) : #> #> #> Table: Imbalanced Covariates #> #> | |exposure | exp_time| covar_time|covariate | avg_bal| bal_thresh| balanced| #> |:--|:--------|--------:|----------:|:---------|----------:|----------:|--------:| -#> |3 |A | 2| 1|B.1 | -0.3829012| 0.1| 0| -#> |4 |A | 2| 0|C | -0.3395794| 0.1| 0| -#> |5 |A | 3| 1|A.1 | 0.1100347| 0.1| 0| +#> |6 |A | 3| 2|A.2 | 0.1094876| 0.1| 0| +#> |7 |A | 3| 1|B.1 | -0.1090365| 0.1| 0| +#> |8 |A | 3| 2|B.2 | -0.1163024| 0.1| 0| #> make_love_plot(balance_stats = b, diff --git a/reference/trimWeights-1.png b/reference/trimWeights-1.png index c579c31f..e820c54c 100644 Binary files a/reference/trimWeights-1.png and b/reference/trimWeights-1.png differ diff --git a/reference/trimWeights-2.png b/reference/trimWeights-2.png index 7bdf31ef..a076bf7b 100644 Binary files a/reference/trimWeights-2.png and b/reference/trimWeights-2.png differ diff --git a/reference/trimWeights-3.png b/reference/trimWeights-3.png index 65b3349b..c0cdef8c 100644 Binary files a/reference/trimWeights-3.png and b/reference/trimWeights-3.png differ diff --git a/reference/trimWeights.html b/reference/trimWeights.html index 98e6a0b2..130e06b4 100644 --- a/reference/trimWeights.html +++ b/reference/trimWeights.html @@ -130,17 +130,17 @@

Examples#> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 1 is: #> A.1 ~ C -#> <environment: 0x55ddd33de190> +#> <environment: 0x55b453188968> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 2 is: #> A.2 ~ A.1 + B.1 + C -#> <environment: 0x55ddd33de190> +#> <environment: 0x55b453188968> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 3 is: #> A.3 ~ A.1 + A.2 + B.1 + B.2 + C -#> <environment: 0x55ddd33de190> +#> <environment: 0x55b453188968> #> test <- data.frame(ID = 1:50, @@ -158,7 +158,7 @@

Examples outcome = "D.3", formulas = f, save.out = FALSE) -#> For the cbps weighting method, the median weight value is 1.08 (SD = 0.56; range = 0.45-4). +#> For the cbps weighting method, the median weight value is 0.99 (SD = 0.43; range = 0.46-3). #> @@ -168,7 +168,7 @@

Examples save.out = FALSE) #> Trimming weights to 95%. #> -#> For the A-D.3 relation, following trimming at the 0.95 quantile, the median weight value is 1.08 (SD= 0.41; range= 0.45-2). +#> For the A-D.3 relation, following trimming at the 0.95 quantile, the median weight value is 0.99 (SD= 0.36; range= 0.46-2). #> t <- trimWeights(exposure = "A", @@ -178,7 +178,7 @@

Examples save.out = FALSE) #> Trimming weights to 75%. #> -#> For the A-D.3 relation, following trimming at the 0.75 quantile, the median weight value is 1.08 (SD= 0.23; range= 0.45-1). +#> For the A-D.3 relation, following trimming at the 0.75 quantile, the median weight value is 0.99 (SD= 0.22; range= 0.46-1). #> diff --git a/search.json b/search.json index 2ff404eb..d942a097 100644 --- a/search.json +++ b/search.json @@ -1 +1 @@ -[{"path":"https://istallworthy.github.io/devMSMs/articles/Data_Requirements.html","id":"references","dir":"Articles","previous_headings":"","what":"References","title":"Data Requirements","text":"Burchinal, M., Howes, C., Pianta, R., Bryant, D., Early, D., Clifford, R., & Barbarin, O. (2008). Predicting Child Outcomes End Kindergarten Quality Pre-Kindergarten Teacher–Child Interactions Instruction. Applied Developmental Science, 12(3), 140–153. https://doi.org/10.1080/10888690802199418 Kainz, K., Greifer, N., Givens, ., Swietek, K., Lombardi, B. M., Zietz, S., & Kohn, J. L. (2017). Improving Causal Inference: Recommendations Covariate Selection Balance Propensity Score Methods. Journal Society Social Work Research, 8(2), 279–303. https://doi.org/10.1086/691464 Vernon-Feagans, L., Cox, M., Willoughby, M., Burchinal, M., Garrett-Peters, P., Mills-Koonce, R., Garrett-Peiers, P., Conger, R. D., & Bauer, P. J. (2013). Family Life Project: Epidemiological Developmental Study Young Children Living Poor Rural Communities. Monographs Society Research Child Development, 78(5), –150.","code":""},{"path":"https://istallworthy.github.io/devMSMs/articles/Preliminary_Steps.html","id":"core-inputs","dir":"Articles","previous_headings":"","what":"Core inputs","title":"Preliminary Steps","text":"Please see Specifying Core Inputs vignette detail following core inputs. , use ESETA1, measure economic strain experienced family, exposure StrDif_Tot, behavior problems measured SDQ, outcome.","code":"set.seed(1234) home_dir <- NA # home_dir <- '/Users/isabella/Library/CloudStorage/Box-Box/BSL General/MSMs/testing/isa' #note: no / after exposure <- \"ESETA1\" exposure_time_pts <- c(6, 15, 24, 35, 58) outcome <- \"StrDif_Tot.58\" tv_confounders <- c(\"SAAmylase.6\",\"SAAmylase.15\", \"SAAmylase.24\", \"MDI.6\", \"MDI.15\", \"RHasSO.6\", \"RHasSO.15\", \"RHasSO.24\",\"RHasSO.35\", \"RHasSO.58\", \"WndNbrhood.6\",\"WndNbrhood.24\", \"WndNbrhood.35\", \"WndNbrhood.58\", \"IBRAttn.6\", \"IBRAttn.15\", \"IBRAttn.24\", \"B18Raw.6\", \"B18Raw.15\", \"B18Raw.24\", \"B18Raw.58\", \"HOMEETA1.6\", \"HOMEETA1.15\", \"HOMEETA1.24\", \"HOMEETA1.35\", \"HOMEETA1.58\", \"InRatioCor.6\", \"InRatioCor.15\", \"InRatioCor.24\", \"InRatioCor.35\", \"InRatioCor.58\", \"ESETA1.6\", \"ESETA1.15\", \"ESETA1.24\", \"ESETA1.35\", \"ESETA1.58\", \"CORTB.6\", \"CORTB.15\", \"CORTB.24\", \"EARS_TJo.24\", \"EARS_TJo.35\", \"LESMnPos.24\", \"LESMnPos.35\", \"LESMnNeg.24\", \"LESMnNeg.35\", \"StrDif_Tot.35\", \"StrDif_Tot.58\", \"fscore.35\", \"fscore.58\" ) #required ti_confounders <- c(\"state\", \"BioDadInHH2\", \"PmAge2\", \"PmBlac2\", \"TcBlac2\", \"PmMrSt2\", \"PmEd2\", \"KFASTScr\", \"RMomAgeU\", \"RHealth\", \"HomeOwnd\", \"SWghtLB\", \"SurpPreg\", \"SmokTotl\", \"DrnkFreq\", \"peri_health\", \"caregiv_health\", \"gov_assist\" #, \"state:SmokTotl\", \"PmAge2:PmBlac2\", \"PmAge2:PmEd2\" #testing interaction terms )"},{"path":"https://istallworthy.github.io/devMSMs/articles/Preliminary_Steps.html","id":"load-data","dir":"Articles","previous_headings":"","what":"Load data","title":"Preliminary Steps","text":"Users several options reading data. can begin workflow following options: long data (without missing data), can converted wide data wide data (without missing data), can imputed using imputeData() helper function, needed imputed wide data sets saved locally, can read list , demonstrate use starting options. First load simulated longitudinal data long format (missingness) accompanies devMSMs. data simulated based data Family Life Project (FLP), longitudinal study following 1,292 families representative two geographic areas (three counties North Carolina three counties Pennsylvania) high rural child poverty (Vernon-Feagans et al., 2013; Burchinal et al., 2008).","code":"data(\"sim_data_long_miss\", package = \"devMSMs\") data_long <- sim_data_long_miss"},{"path":"https://istallworthy.github.io/devMSMs/articles/Preliminary_Steps.html","id":"p1--format-data","dir":"Articles","previous_headings":"","what":"P1. Format Data","title":"Preliminary Steps","text":"data must wide format contain “ID” column subject identifier exposure, outcome, confounders separate columns (shown Figure 1). Column names can include underscore special characters time-varying variables suffix consists period followed time point (e.g., “variable.6”). variables classed integer, numeric, factor (character). Auxiliary nuisance covariates confounders (e.g, assessment version) can included dataset use specification final modeling step (Workflow vignettes Step 5). insert Figure 1 .","code":""},{"path":"https://istallworthy.github.io/devMSMs/articles/Preliminary_Steps.html","id":"p1a--format-single-data-frame-of-long-data","dir":"Articles","previous_headings":"P1. Format Data","what":"P1a. Format single data frame of long data","title":"Preliminary Steps","text":"Users beginning single data frame long format (without missingness) can utilize helper function formatLongData() summarize exposure outcome data convert required variable names. function takes dataset long format variables time (time_var), ID (id_var), missing data (missing) alternative variables re-labels according required package. also classes factor confounders (factor_confounders) factors data others numeric. get descriptive statistics summary exposure, ESETA1, outcome, StrDif_Tot.58, visual inspections.","code":"data_long_f <- formatLongData(data = data_long, exposure = exposure, exposure_time_pts = exposure_time_pts, outcome = outcome, time_var = \"WAVE\", id_var = \"ID\", missing = NA, factor_confounders = c(\"state\", \"TcBlac2\",\"BioDadInHH2\",\"HomeOwnd\", \"PmBlac2\", \"PmMrSt2\", \"SurpPreg\", \"RHealth\", \"SmokTotl\", \"DrnkFreq\", \"RHasSO\"), home_dir = home_dir, save.out = save.out) #> Table: Summary of ESETA1 Exposure Information #> #> |WAVE | mean| sd| min| max| #> |:----|---------:|---------:|------:|-----:| #> |15 | 0.2983433| 0.9261390| -2.699| 3.474| #> |24 | 0.1585387| 0.9575181| -2.858| 3.284| #> |35 | 0.1388395| 0.9475905| -3.046| 3.014| #> |58 | 0.0996006| 0.9924516| -2.478| 3.173| #> |6 | 0.3337979| 0.9298080| -2.809| 4.035| #> #> Table: Summary of Outcome StrDif_Tot.58 Information #> #> |WAVE | mean| sd| min| max| #> |:----|---------:|---------:|------:|-----:| #> |35 | 0.6009797| 0.2830620| -0.230| 1.536| #> |58 | 0.5029778| 0.2931918| -0.281| 1.448|"},{"path":"https://istallworthy.github.io/devMSMs/articles/Preliminary_Steps.html","id":"p1b--convert-single-long-data-frame-to-wide-format","dir":"Articles","previous_headings":"P1. Format Data","what":"P1b. Convert single long data frame to wide format","title":"Preliminary Steps","text":"Users correctly formatted variables long format option using following code transform data wide format, proceed using package (missing data) imputing (< 20% missing data MAR). transform newly formatted long data wide format. Alternatively, start wide data missingness already formatted.","code":"require(\"stats\") v <- sapply(strsplit(tv_confounders[!grepl(\"\\\\:\", tv_confounders)], \"\\\\.\"), \"[\", 1) v <- v[!duplicated(v)] data_wide <- stats::reshape(data = data_long_f, idvar = \"ID\", #list ID variable in your dataset v.names = v, timevar = \"WAVE\", # list time variable in your long dataset times = c(6, 15, 24, 35, 58), # list all time points in your dataset direction = \"wide\") data_wide <- data_wide[, colSums(is.na(data_wide)) < nrow(data_wide)] data(\"sim_data_wide_miss\", package = \"devMSMs\") data_wide <- sim_data_wide_miss"},{"path":"https://istallworthy.github.io/devMSMs/articles/Preliminary_Steps.html","id":"p2--impute-data-to-account-for-missingness","dir":"Articles","previous_headings":"","what":"P2. Impute Data to Account for Missingness","title":"Preliminary Steps","text":"functions devMSMs package accept data form single data frame missing values m imputed datasets form either mids object (output mice package via imputeData()) list imputed datasets. developmental data humans amount missing data. Given creation IPTW balancing weights requires complete data, recommend imputing data. Imputation assumes missing data mechanism missing random (MAR) 20% missing data total (Leyrat et al., 2021). Given existing work demonstrating superiority, devMSMS implements ‘within’ approach imputed data, conducting steps imputed dataset pooling estimates using Rubin’s rules create final average predictions contrast comparisons Worfklows vignettes Step 5 (Leyrat et al, 2021; Granger et al., 2019).","code":""},{"path":"https://istallworthy.github.io/devMSMs/articles/Preliminary_Steps.html","id":"p2a--multiply-impute-single-wide-formatted-data-frame-using-mice","dir":"Articles","previous_headings":"P2. Impute Data to Account for Missingness","what":"P2a. Multiply impute single wide, formatted data frame using mice","title":"Preliminary Steps","text":"Users option using helper imputeData() function impute correctly formatted wide data. step can take run. user can specify many imputed datasets create (default m = 5). imputeData() draws mice() function mice package (van Buuren & Oudshoorn, 2011) conduct multiple imputation chained equations (mice). variables present dataset used impute missing data column. user can specify imputation method method field drawing following list: “pmm” (predictive mean matching), “midastouch” (weighted predictive mean matching), “sample” (random sample observed values), “rf” (random forest) “cart” (classification regression trees). Random forest imputation default given evidence efficiency superior performance (Shah et al., 2014). Please review mice documentation details. Additionally, users can specify integer value seed order offset random number generator mice() make reproducible imputations. parameter read_imps_from_file allow read already imputed data local storage (TRUE) re-run imputation code multiple times (FALSE; default). Users may use parameter supply mids object imputed data mice package (title ‘all_imp.rds’). sure inspect console warnings well resulting imputed datasets. variables missing data following imputation may need removed due high collinearity /low variability. required inputs function data frame wide format (formatted according pre-requirements listed ), m number imputed datasets create, path home directory (save.= TRUE), exposure (e.g., “variable”), outcome (e.g., “variable.t”). home directory path, exposure, outcome already defined user completed Specifying Core Inputs vignette. optional inputs follows. user can specify imputation method compatible mice() (see ). Additionally, user can specify maxit number interactions mice::mice() conduct (default 5). user can also specify para_proc, logical indicator indicating whether speed imputing using parallel processing (default = TRUE). draws 2 cores using functions parallel, doRNG, doParallel packages. user may also specify additional inputs accepted mice::mice() advise consulting [mice documentation] information. user can also indicate already created imputed datasets function wish read (read_imps_from_file = TRUE rather recreate (default). example, create 5 imputed datasets using default random forest method 0 iterations (just illustrative purposes), set seed reproducibility, assign output data use devMSMs. code takes time run. inspect output console warnings mice(). mice object can now assigned data use deveMSMs package (see Workflows vignettes).","code":"s <- 1234 m <- 5 method <- \"rf\" maxit <- 0 imputed_data <- imputeData(data = data_wide, exposure = exposure, outcome = outcome, m = m, method = method, maxit = maxit, para_proc = FALSE, seed = s, read_imps_from_file = FALSE, home_dir = home_dir, save.out = save.out) #> Creating 5 imputed datasets using the rf imputation method in mice::mice(). This may take some time to run. #> #> #> USER ALERT: Please view any logged events from the imputation below: #> Table: Logged Events from mice::mice data <- imputed_data"},{"path":"https://istallworthy.github.io/devMSMs/articles/Preliminary_Steps.html","id":"p2b--read-in-as-a-list-wide-imputed-data-saved-locally","dir":"Articles","previous_headings":"P2. Impute Data to Account for Missingness","what":"P2b. Read in as a list wide imputed data saved locally","title":"Preliminary Steps","text":"Alternatively, user imputed datasets already created using program mice, can read , list, files saved locally .csv files (labeled “1”:m) single folder. list can assigned data use deveMSMs package (see Workflows vignettes). , load imputed data simulated FLP, example. Alternatively, case missing data, user can read formatted, wide dataset assigned data use deveMSMs package (see Workflows vignettes). , load single wide data frame simulated FLP example.","code":"# folder <- \"/Users/isabella/Library/CloudStorage/Box-Box/BSL General/MSMs/testing/testing data/continuous outcome/continuous exposure/imputations/\" # # files <- list.files(folder, full.names = TRUE, pattern = \"\\\\.csv\") #make sure pattern matches suffix of your data # # data <- lapply(files, function(file) { # imp_data <- read.csv(file) # imp_data # }) data(\"sim_data_imp_list\", package = \"devMSMs\") data <- sim_data_imp_list data(\"sim_data_wide\", package = \"devMSMs\") data_wide <- sim_data_wide data <- data_wide"},{"path":"https://istallworthy.github.io/devMSMs/articles/Preliminary_Steps.html","id":"p3--optional-identify-exposure-epochs","dir":"Articles","previous_headings":"","what":"P3. Optional: Identify Exposure Epochs","title":"Preliminary Steps","text":"Users option specify exposure epochs, meaningful periods developmental time many encompass time points exposure measured. user option draw theory structure data specify developmental epochs exposure differ time points exposure collected. specify epochs, users utilize optional epochs argument providing data frame contains two variables: epochs: provide, quotations, list user-created names epoch; values: list, named epoch, provide single integer list integers (exposure time points) constitute epoch. named epoch must corresponding value (values epoch can differ number entries, shown ). user ensure epoch values included exposure_time_pts field. exposure epochs arguments fitModel() compareHistories() devMSMs functions (see Workflows vignettes) specification kept consistent throughout use package vignettes. constitute main effects variables modeling relation exposure outcome (Workflows vignettes Step 5a) form basis estimating comparing exposure histories (Workflows vignettes Step 5b). epochs specified, exposure time points used aforementioned steps. , specify Infancy, Toddlerhood, Childhood epochs correspond 6 15; 24 35; 58 months, respectively.","code":"epochs <- data.frame(epochs = c(\"Infancy\", \"Toddlerhood\", \"Childhood\"), values = I(list(c(6, 15), c(24, 35), c(58) )))"},{"path":"https://istallworthy.github.io/devMSMs/articles/Preliminary_Steps.html","id":"p4--recommended-specify-inspect-exposure-histories","dir":"Articles","previous_headings":"","what":"P4. Recommended: Specify & Inspect Exposure Histories","title":"Preliminary Steps","text":"Exposure histories units users test substantive hypotheses, construction determined theoretical practical reasoning. strongly recommend users verify inspect exposure histories priori relation data hypotheses.","code":""},{"path":"https://istallworthy.github.io/devMSMs/articles/Preliminary_Steps.html","id":"p4a--create-high-and-low-cutoff-values-for-continuous-exposures","dir":"Articles","previous_headings":"","what":"P4a. Create high and low cutoff values for continuous exposures","title":"Preliminary Steps","text":"First, continuously distributed exposures (regardless whether exposure epochs specified), recommend users indicate high low cutoff values optional input compareHistories()) devMSMs function (see Workflows vignettes). , specify hi_lo_cut, list, quantile value (0-1) considered high levels exposure, followed quantile value (0-1) considered low levels exposure (default median split). values may revised following inspection sample distribution across resulting exposure histories subsequent steps. final values used creating exposure histories Step 5 Workflows vignettes. , specify 60th 30th percentiles demarcate high low levels economic strain exposure, respectively.","code":"hi_lo_cut <- c(0.6, 0.3)"},{"path":"https://istallworthy.github.io/devMSMs/articles/Preliminary_Steps.html","id":"p4b--specify-hypotheses-relevant-exposure-histories","dir":"Articles","previous_headings":"","what":"P4b. Specify hypotheses-relevant exposure histories","title":"Preliminary Steps","text":"strongly recommend users selective histories, developmental sequences high low exposure (exposure time points epochs), vital testing hypotheses. recommend user estimates compares subset possible exposure histories using reference comparison fields (rather comparing possible exposure histories). user can specify custom subset exposure histories using reference comparison fields optional inputs compareHistories() devMSMs function (see Workflows vignettes). conduct customized comparisons, users must provide least one unique valid history (e.g., “l-l-l”) reference , quotations, provide string (list strings) lowercase l’s h’s (separated -), corresponding exposure epoch (time point), signify sequence exposure levels (“low” “high”, respectively). supply reference history, comparisons provide least one unique valid history comparison , quotations, providing string (list strings) l’s h’s (separated “-”), corresponding exposure epoch, signify sequence exposure levels (“low” “high”, respectively) constitutes comparison exposure history/histories compared reference Step 5b Workflows vignettes. supply one comparisons, least one reference must specified. reference exposure history compared comparison history comparisons supplied multiple comparison correction. reference comparison specified, histories compared Step 5b Workflows vignettes. final reference comparison values established step used estimating comparing exposure histories Step 5b Workflows vignettes. 4 exposure main effects (either epochs exposure time points), user required select subset history comparisons (Step 5b Workflows vignettes), given base code (see hypotheses() function marginaleffects package) accommodate pairwise history comparisons 5 time points). , specify low economic strain epochs (“l-l-l”) reference event comparison high levels epochs (“h-h-h”) well histories contain 1 dose exposure high economic strain different epochs.","code":"reference <- c(\"l-l-l\") comparison <- c(\"h-h-h\", \"l-l-h\", \"h-l-l\", \"l-h-l\")"},{"path":"https://istallworthy.github.io/devMSMs/articles/Preliminary_Steps.html","id":"p4c--inspect-exposure-histories-and-data","dir":"Articles","previous_headings":"","what":"P4c. Inspect exposure histories and data","title":"Preliminary Steps","text":"users, highly recommend use helper inspectData() function (complete dataset long wide format imputed data case missingness) summarize exposure, outcome, confounders inspect sample distribution among exposure histories. Based user-specified exposure epochs high low quantile values (continuous exposures), function outputs table showing sample distribution across histories. strongly suggest visually inspecting table revising designation epochs /high low quantile values (continuous exposures) history contains reasonable number participants. gold standard required number per history cell, users guard extrapolation beyond scope data. example, data, using 75th 25th percentile cutoffs, histories represented less two cases thus re-evaluated cutoffs. Users may wish revise epoch designation high low cutoff values, applicable. function conducts summaries history distribution inspection imputed dataset imputed data supplied. insert Table 2 required inputs inspectData() : complete data (data frame wide long format, list imputed data frames wide format, mids object), exposure (e.g., “variable”), outcome (e.g., “variable.t”). Optional inputs home directory (save.= TRUE), epochs, high/low cutoff values continuous exposures, specification reference comparison histories. helper inspectData() function outputs following files home directory: correlation plot variables dataset, tables exposure outcome descriptive statistics, two summary tables confounders considered time point. , see summaries data types well reasonable cell counts specified histories, imputed dataset.","code":"inspectData(data = data, exposure = exposure, exposure_time_pts = exposure_time_pts, outcome = outcome, # required input ti_confounders = ti_confounders, tv_confounders = tv_confounders, # required input epochs = epochs, hi_lo_cut = hi_lo_cut, reference = reference, comparison = comparison, #optional input home_dir = home_dir, verbose = verbose, save.out = save.out) #optional input #> Using github PAT from envvar GITHUB_PAT #> Skipping install of 'devMSMs' from a github remote, the SHA1 (9a8a731a) has not changed since last install. #> Use `force = TRUE` to force installation #> USER ALERT: Below are the 67 variables spanning 33 unique domains that will be treated as confounding variables for the relation between ESETA1 and StrDif_Tot.58. #> Please inspect this list carefully. It should include all time-varying covariates, time invariant covariates, as well as lagged levels of exposure and outcome variables if they were collected at time points earlier than the outcome time point. #> [1] \"B18Raw.15\" \"B18Raw.24\" \"B18Raw.58\" \"B18Raw.6\" #> [5] \"BioDadInHH2\" \"caregiv_health\" \"CORTB.15\" \"CORTB.24\" #> [9] \"CORTB.6\" \"DrnkFreq\" \"EARS_TJo.24\" \"EARS_TJo.35\" #> [13] \"ESETA1.15\" \"ESETA1.24\" \"ESETA1.35\" \"ESETA1.58\" #> [17] \"ESETA1.6\" \"fscore.35\" \"fscore.58\" \"gov_assist\" #> [21] \"HOMEETA1.15\" \"HOMEETA1.24\" \"HOMEETA1.35\" \"HOMEETA1.58\" #> [25] \"HOMEETA1.6\" \"HomeOwnd\" \"IBRAttn.15\" \"IBRAttn.24\" #> [29] \"IBRAttn.6\" \"InRatioCor.15\" \"InRatioCor.24\" \"InRatioCor.35\" #> [33] \"InRatioCor.58\" \"InRatioCor.6\" \"KFASTScr\" \"LESMnNeg.24\" #> [37] \"LESMnNeg.35\" \"LESMnPos.24\" \"LESMnPos.35\" \"MDI.15\" #> [41] \"MDI.6\" \"peri_health\" \"PmAge2\" \"PmBlac2\" #> [45] \"PmEd2\" \"PmMrSt2\" \"RHasSO.15\" \"RHasSO.24\" #> [49] \"RHasSO.35\" \"RHasSO.58\" \"RHasSO.6\" \"RHealth\" #> [53] \"RMomAgeU\" \"SAAmylase.15\" \"SAAmylase.24\" \"SAAmylase.6\" #> [57] \"SmokTotl\" \"state\" \"StrDif_Tot.35\" \"StrDif_Tot.58\" #> [61] \"SurpPreg\" \"SWghtLB\" \"TcBlac2\" \"WndNbrhood.24\" #> [65] \"WndNbrhood.35\" \"WndNbrhood.58\" \"WndNbrhood.6\" #> #> The following variables are designated as numeric: #> [1] \"PmAge2, ALI_Le.35, CORTB.15, CORTB.24, CORTB.6, ESETA1.15, ESETA1.24, ESETA1.35, ESETA1.58, ESETA1.6, fscore.35, fscore.58, HOMEETA1.15, HOMEETA1.24, HOMEETA1.35, HOMEETA1.58, HOMEETA1.6, IBRAttn.15, IBRAttn.24, IBRAttn.6, InRatioCor.15, InRatioCor.24, InRatioCor.35, InRatioCor.58, InRatioCor.6, LESMnNeg.24, LESMnNeg.35, LESMnPos.24, LESMnPos.35, SAAmylase.15, SAAmylase.24, SAAmylase.6, StrDif_Tot.35, StrDif_Tot.58, WndNbrhood.24, WndNbrhood.35, WndNbrhood.58, WndNbrhood.6\" #> #> The following variables are designated as factors: #> [1] \"ID, state, TcBlac2, BioDadInHH2, HomeOwnd, PmBlac2, PmMrSt2, SurpPreg, DrnkFreq, RHealth, SmokTotl, RHasSO.15, RHasSO.24, RHasSO.35, RHasSO.58, RHasSO.6\" #> #> Table: Other variable types #> #> | |variable |type | #> |:--------------|:--------------|:-------| #> |KFASTScr |KFASTScr |integer | #> |PmEd2 |PmEd2 |integer | #> |RMomAgeU |RMomAgeU |integer | #> |SWghtLB |SWghtLB |integer | #> |peri_health |peri_health |integer | #> |caregiv_health |caregiv_health |integer | #> |gov_assist |gov_assist |integer | #> |B18Raw.15 |B18Raw.15 |integer | #> |B18Raw.24 |B18Raw.24 |integer | #> |B18Raw.58 |B18Raw.58 |integer | #> |B18Raw.6 |B18Raw.6 |integer | #> |EARS_TJo.24 |EARS_TJo.24 |integer | #> |EARS_TJo.35 |EARS_TJo.35 |integer | #> |MDI.15 |MDI.15 |integer | #> |MDI.6 |MDI.6 |integer | #> #> Summary of Exposure Main Effects: #> #> USER ALERT: Out of the total of 1292 individuals in the sample, below is the distribution of the 437 (33.82%) individuals that fall into 5 out of the 5 the total user-defined exposure histories created from 30th and 60th percentile values for low and high levels of exposure ESETA1, respectively, across Infancy, Toddlerhood, Childhood. #> USER ALERT: Please inspect the distribution of the sample across the following exposure histories and ensure there is sufficient spread to avoid extrapolation and low precision: #> #> Table: Summary of user-specified exposure ESETA1 histories based on exposure main effects Infancy, Toddlerhood, Childhood containing time points c(6, 15), c(24, 35), 58: #> #> |history | n| #> |:-------|---:| #> |h-h-h | 229| #> |h-l-l | 16| #> |l-h-l | 10| #> |l-l-h | 31| #> |l-l-l | 151| #> #> Your outcome variable(s) have the following type(s): data.frame"},{"path":"https://istallworthy.github.io/devMSMs/articles/Preliminary_Steps.html","id":"references","dir":"Articles","previous_headings":"","what":"References","title":"Preliminary Steps","text":"Arel-Bundock, Vincent. 2023. marginaleffects: Predictions, Comparisons, Slopes, Marginal Means, Hypothesis Tests. https://CRAN.R-project.org/package=marginaleffects. Burchinal, M., Howes, C., Pianta, R., Bryant, D., Early, D., Clifford, R., & Barbarin, O. (2008). Predicting Child Outcomes End Kindergarten Quality Pre-Kindergarten Teacher–Child Interactions Instruction. Applied Developmental Science, 12(3), 140–153. https://doi.org/10.1080/10888690802199418 Granger, E., Sergeant, J. C., & Lunt, M. (2019). Avoiding pitfalls combining multiple imputation propensity scores. Statistics Medicine, 38(26), 5120–5132. https://doi.org/10.1002/sim.8355 Leyrat, C., Carpenter, J. R., Bailly, S., & Williamson, E. J. (2021). Common Methods Handling Missing Data Marginal Structural Models: Works . American Journal Epidemiology, 190(4), 663–672. https://doi.org/10.1093/aje/kwaa225 Shah, . D., Bartlett, J. W., Carpenter, J., Nicholas, O., & Hemingway, H. (2014). Comparison Random Forest Parametric Imputation Models Imputing Missing Data Using MICE: CALIBER Study. American Journal Epidemiology, 179(6), 764–774. https://doi.org/10.1093/aje/kwt312 Vernon-Feagans, L., Cox, M., Willoughby, M., Burchinal, M., Garrett-Peters, P., Mills-Koonce, R., Garrett-Peiers, P., Conger, R. D., & Bauer, P. J. (2013). Family Life Project: Epidemiological Developmental Study Young Children Living Poor Rural Communities. Monographs Society Research Child Development, 78(5), –150. van Buuren, Stef, Karin Groothuis-Oudshoorn. 2011. “mice: Multivariate Imputation Chained Equations r.” Journal Statistical Software 45 (3): 1–67. https://doi.org/10.18637/jss.v045.i03.","code":""},{"path":"https://istallworthy.github.io/devMSMs/articles/Specify_Core_Inputs.html","id":"home-directory","dir":"Articles","previous_headings":"","what":"Home Directory","title":"Specify Core Inputs","text":"Users required specify home directory, quotations, path designated folder output package, plan save intermediary final outputs package (default) setting save.= TRUE functions. sub directories created within home directory devMSMs functions automatically save.’ = TRUE.","code":"home_dir <- NA home_dir <- '/Users/isabella/Library/CloudStorage/Box-Box/BSL General/MSMs/testing/isa' #note: no / after"},{"path":"https://istallworthy.github.io/devMSMs/articles/Specify_Core_Inputs.html","id":"exposure-variable","dir":"Articles","previous_headings":"","what":"Exposure Variable","title":"Specify Core Inputs","text":"Users required specify exposure variable input functions devMSMs. user must specify exposure, variable name exposure quotations, without time information appended (e.g., “variable”). Note dataset, exposure variables wide format labeled “.time” suffix (e.g., “variable.t”).","code":"exposure <- \"ESETA1\""},{"path":"https://istallworthy.github.io/devMSMs/articles/Specify_Core_Inputs.html","id":"exposure-time-points","dir":"Articles","previous_headings":"","what":"Exposure Time Points","title":"Specify Core Inputs","text":"Next, users required provide information time points exposure assessed exposure_time_pts, required input createFormulas(), assessBalance(), fitModel(), compareHistories() devMSMs functions (see Workflows vignettes). user two options specifying exposure time points select option best serves theory regarding developmental timing practical constraints data modeling process. First, may specify time points exposure measured data. means balancing formulas created (Steps 1a, 2a, 3b Workflows vignettes) IPTW weights created (Steps 2b, 3c Workflows vignettes) assessed (Steps 2c, 3a, 4 Workflows vignettes) time points. case, epochs specified, time points included exposure main effects final substantive model history comparison (Step 5 Workflows vignettes). Second, may specify subset theoretically important time points exposure measured data. means balancing formulas created IPTW weights created assessed time points. , epochs specified, subsetted time points included exposure main effects final substantive models. Importantly, exposure variables time points exposure assessed included time-varying confounders balancing purposes . specification exposure epochs kept consistent throughout use devMSMs package. user intends specify exposure epochs (Preliminary Steps vignette Step P3), user include time points encompassed epochs exposure_time_pts. user intend specify exposure epochs (Preliminary Steps vignette Step P3), exposure_time_pts constitute exposure main effects final outcome model form basis histories used history comparison. case, user specifies 4 exposure time points, required conduct subset history comparisons (Step 5b Workflows vignettes), given base code (see hypotheses() function marginaleffects package) accommodate pairwise history comparisons 5 time points. elected create epochs infancy (6 15 months), toddlerhood (24 35 months), early childhood (58 months). Thus, input 6, 15, 24, 35, 58 exposure_time_pts.","code":"exposure_time_pts <- c(6, 15, 24, 35, 58)"},{"path":"https://istallworthy.github.io/devMSMs/articles/Specify_Core_Inputs.html","id":"outcome-variable","dir":"Articles","previous_headings":"","what":"Outcome Variable","title":"Specify Core Inputs","text":"Users also required specify outcome variable designated final time point, required input functions devMSMs package. final time point equal (, ideally greater ) final exposure time point. Note instances outcome variable measured prior time points included time-varying confounders balancing purposes. Specifying outcome, variable name outcome time point collected appended following period (e.g., “variable.t”) corresponding variable name wide data, required package. Outcome variables dataset wide format labeled “.time” suffix.","code":"outcome <- \"StrDif_Tot.58\""},{"path":[]},{"path":"https://istallworthy.github.io/devMSMs/articles/Specify_Core_Inputs.html","id":"time-invariant-confounders","dir":"Articles","previous_headings":"Confounders","what":"Time invariant confounders","title":"Specify Core Inputs","text":"Specifying least one time invariant confounder required use package required input createFormulas() function. Time invariant confounders include core demographic birth characteristics (e.g., sex, racial group membership, birth complications) might cause either exposure outcome, either directly proxy, suggested theory /evidenced strong associations existing literature. , user can also include interaction terms time invariant variables (e.g., “variable:variable”) inclusion balancing formula. Keep mind interactions include factor variables decomposed interactions factor level. ti_confounders, list, quotations, provide names confounders (e.g., “variable”, “variable:variable”) dataset time invariant.","code":"ti_confounders <- c(\"state\", \"BioDadInHH2\", \"PmAge2\", \"PmBlac2\", \"TcBlac2\", \"PmMrSt2\", \"PmEd2\", \"KFASTScr\", \"RMomAgeU\", \"RHealth\", \"HomeOwnd\", \"SWghtLB\", \"SurpPreg\", \"SmokTotl\", \"DrnkFreq\", \"peri_health\", \"caregiv_health\", \"gov_assist\" )"},{"path":"https://istallworthy.github.io/devMSMs/articles/Specify_Core_Inputs.html","id":"time-varying-confounders","dir":"Articles","previous_headings":"Confounders","what":"Time-varying confounders","title":"Specify Core Inputs","text":"Specifying least time-varying exposures time-varying confounders required use package required input createFormulas() devMSMs function (see Workflows vignettes). tv_confounders list, quotations, provide names variables wide format (e.g., “variable.t”) dataset time-varying (including time-varying confounders, exposures, outcomes). include time-varying exposure variables outcome variables present dataset (e.g., “variable.t”). Note time-varying confounders also include confounders measured repeatedly time points (e.g., InRatioCor) collected one several specific time points, missing time points, time invariant. , user can also include interaction terms time-varying variables (e.g., “variable.t:variable.t”) time invariant time-varying variables (e.g., “variable.t:variable”) inclusion balancing formula. Keep mind interactions include factor variables decomposed interactions factor level.","code":"tv_confounders <- c(\"SAAmylase.6\",\"SAAmylase.15\", \"SAAmylase.24\", \"MDI.6\", \"MDI.15\", \"RHasSO.6\", \"RHasSO.15\", \"RHasSO.24\",\"RHasSO.35\", \"RHasSO.58\", \"WndNbrhood.6\",\"WndNbrhood.24\", \"WndNbrhood.35\", \"WndNbrhood.58\", \"IBRAttn.6\", \"IBRAttn.15\", \"IBRAttn.24\", \"B18Raw.6\", \"B18Raw.15\", \"B18Raw.24\", \"B18Raw.58\", \"HOMEETA1.6\", \"HOMEETA1.15\", \"HOMEETA1.24\", \"HOMEETA1.35\", \"HOMEETA1.58\", \"InRatioCor.6\", \"InRatioCor.15\", \"InRatioCor.24\", \"InRatioCor.35\", \"InRatioCor.58\", \"ESETA1.6\", \"ESETA1.15\", \"ESETA1.24\", \"ESETA1.35\", \"ESETA1.58\", #exposure variables required \"CORTB.6\", \"CORTB.15\", \"CORTB.24\", \"EARS_TJo.24\", \"EARS_TJo.35\", \"LESMnPos.24\", \"LESMnPos.35\", \"LESMnNeg.24\", \"LESMnNeg.35\", \"StrDif_Tot.35\", \"StrDif_Tot.58\", \"fscore.35\", \"fscore.58\" )"},{"path":"https://istallworthy.github.io/devMSMs/articles/Specify_Core_Inputs.html","id":"references","dir":"Articles","previous_headings":"","what":"References","title":"Specify Core Inputs","text":"Arel-Bundock, Vincent. 2023. marginaleffects: Predictions, Comparisons, Slopes, Marginal Means, Hypothesis Tests. https://CRAN.R-project.org/package=marginaleffects.","code":""},{"path":"https://istallworthy.github.io/devMSMs/articles/Workflow_Continuous_Exposure.html","id":"load-data","dir":"Articles","previous_headings":"","what":"Load data","title":"Workflow: Continuous Exposure","text":"first load data frame complete data. data simulated based data Family Life Project (FLP), longitudinal study following 1,292 families representative two geographic areas (three counties North Carolina three counties Pennsylvania) high rural child poverty (Vernon-Feagans et al., 2013; Burchinal et al., 2008). (See Preliminary Steps vignette beginning data types, including missing data).","code":"data(\"sim_data_wide\", package = \"devMSMs\") data <- sim_data_wide"},{"path":"https://istallworthy.github.io/devMSMs/articles/Workflow_Continuous_Exposure.html","id":"core-inputs","dir":"Articles","previous_headings":"","what":"Core inputs","title":"Workflow: Continuous Exposure","text":"Please see Specifying Core Inputs vignette detail following core inputs.","code":"set.seed(1234) home_dir <- NA # home_dir <- '/Users/isabella/Library/CloudStorage/Box-Box/BSL General/MSMs/testing/isa' #note: no / after exposure <- \"ESETA1\" exposure_time_pts <- c(6, 15, 24, 35, 58) outcome <- \"StrDif_Tot.58\" tv_confounders <- c(\"SAAmylase.6\",\"SAAmylase.15\", \"SAAmylase.24\", \"MDI.6\", \"MDI.15\", \"RHasSO.6\", \"RHasSO.15\", \"RHasSO.24\",\"RHasSO.35\", \"RHasSO.58\", \"WndNbrhood.6\",\"WndNbrhood.24\", \"WndNbrhood.35\", \"WndNbrhood.58\", \"IBRAttn.6\", \"IBRAttn.15\", \"IBRAttn.24\", \"B18Raw.6\", \"B18Raw.15\", \"B18Raw.24\", \"B18Raw.58\", \"HOMEETA1.6\", \"HOMEETA1.15\", \"HOMEETA1.24\", \"HOMEETA1.35\", \"HOMEETA1.58\", \"InRatioCor.6\", \"InRatioCor.15\", \"InRatioCor.24\", \"InRatioCor.35\", \"InRatioCor.58\", \"ESETA1.6\", \"ESETA1.15\", \"ESETA1.24\", \"ESETA1.35\", \"ESETA1.58\", \"CORTB.6\", \"CORTB.15\", \"CORTB.24\", \"EARS_TJo.24\", \"EARS_TJo.35\", \"LESMnPos.24\", \"LESMnPos.35\", \"LESMnNeg.24\", \"LESMnNeg.35\", \"StrDif_Tot.35\", \"StrDif_Tot.58\", \"fscore.35\", \"fscore.58\" ) ti_confounders <- c(\"state\", \"BioDadInHH2\", \"PmAge2\", \"PmBlac2\", \"TcBlac2\", \"PmMrSt2\", \"PmEd2\", \"KFASTScr\", \"RMomAgeU\", \"RHealth\", \"HomeOwnd\", \"SWghtLB\", \"SurpPreg\", \"SmokTotl\", \"DrnkFreq\", \"peri_health\", \"caregiv_health\", \"gov_assist\" )"},{"path":"https://istallworthy.github.io/devMSMs/articles/Workflow_Continuous_Exposure.html","id":"phase-1-confounder-adjustment","dir":"Articles","previous_headings":"","what":"Phase 1: Confounder Adjustment","title":"Workflow: Continuous Exposure","text":"goal first phase minimize associations confounders exposure using IPTW balancing weights. strongly advise user carefully inspect balancing formula ensure weights created evaluated appropriately step.","code":""},{"path":[]},{"path":"https://istallworthy.github.io/devMSMs/articles/Workflow_Continuous_Exposure.html","id":"step-1a--create-full-balancing-formulas-conduct-pre-balance-checking","dir":"Articles","previous_headings":"Phase 1: Confounder Adjustment > Step 1. Create Full Balancing Formulas & Conduct Pre-Balance Checking","what":"Step 1a. Create Full Balancing Formulas & Conduct Pre-Balance Checking","title":"Workflow: Continuous Exposure","text":"first create comprehensive, full balancing formulas relating exposure confounders time point using createFormulas() function (type = “full”). step creates full formulas containing measured confounding variables exposure time point, including time-invariant confounders, lagged time-varying confounders, well past levels exposure outcome (make sure listed time-varying confounders). code automatically excludes time-varying confounders contemporaneous time point given decisively differentiated mediators balanced (Thoemmes & Ong, 2016), although can modified user strong reason believe concurrent variable mediator (see ). include interactions covariates balancing formulas, please list composed time invariant covariates (e.g., “variable:variable” “variable.t:variable.t”) time invariant confounders, composed time-varying covariates (e.g., “variable.t:variable” “variable.t:variable.t”) time-varying confounders list. Interactions containing time-varying covariates treated time-varying confounders measured highest measurement time point constituent time points. note, interactions factor variables multiple levels can produce large number additional variables balancing formulas. required input create full balancing formulas using createFormulas() function : exposure (e.g., “variable”), exposure time points, outcome (e.g., “variable.time”), list time-varying confounders (e.g. “variable.time”), list time invariant confounders (e.g., “variable”), setting type = “full”. Optional inputs create full balancing formulas using createFormulas() function follows. concur_conf: list, provide names time-varying confounders (e.g., “variable.time”) wish included concurrently balancing formulas (overriding default include lagged confounders). choose specify concurrent confounders, reliably distinguish mediators. user may also specify list custom formulas specifying custom list formulas, one exposure time point (e.g., “exposure.time ~ variable.time + variable +…”) formula format, entry named formula type exposure time point (e.g., “full_form-6”). abridged example shown . createFormulas() function automatically check custom formulas ensure correctly formatted formula exposure time point exposure dependent variable. However, user responsible ensuring custom formulas contain appropriate confounders formula type generating. chose create custom formulas use createFormulas() make . createFormulas function saves .csv .rds files containing balancing formulas exposure time point specified type (“full”) ‘formulas/full/’ folder. function returns list formulas labeled type, exposure, outcome, exposure time point. shown , createFormulas() creates balancing formula exposure time point. full formula contains time invariant confounders well lagged time-varying confounders time point. inspect formulas ensure accurate creating IPTW balancing weights.","code":"# concur_conf <- \"B18Raw.15\" concur_conf <- NULL # custom <- list(\"full_form-6\" = as.formula(\"ESETA1.6 ~ BioDadInHH2 + DrnkFreq + gov_assist\"), # \"full_form-15\" = as.formula(\"ESETA1.15 ~ BioDadInHH2 + DrnkFreq + gov_assist\") # ) custom <- NULL type <- \"full\" full_formulas <- createFormulas(exposure = exposure, exposure_time_pts = exposure_time_pts, outcome = outcome, #required type = type, ti_confounders = ti_confounders, tv_confounders = tv_confounders, #required concur_conf = concur_conf, custom = custom, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for ESETA1 - StrDif_Tot.58 at ESETA1 time point 6 is: #> ESETA1.6 ~ BioDadInHH2 + caregiv_health + DrnkFreq + gov_assist + #> HomeOwnd + KFASTScr + peri_health + PmAge2 + PmBlac2 + PmEd2 + #> PmMrSt2 + RHealth + RMomAgeU + SmokTotl + state + SurpPreg + #> SWghtLB + TcBlac2 #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for ESETA1 - StrDif_Tot.58 at ESETA1 time point 15 is: #> ESETA1.15 ~ B18Raw.6 + BioDadInHH2 + caregiv_health + CORTB.6 + #> DrnkFreq + ESETA1.6 + gov_assist + HOMEETA1.6 + HomeOwnd + #> IBRAttn.6 + InRatioCor.6 + KFASTScr + MDI.6 + peri_health + #> PmAge2 + PmBlac2 + PmEd2 + PmMrSt2 + RHasSO.6 + RHealth + #> RMomAgeU + SAAmylase.6 + SmokTotl + state + SurpPreg + SWghtLB + #> TcBlac2 + WndNbrhood.6 #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for ESETA1 - StrDif_Tot.58 at ESETA1 time point 24 is: #> ESETA1.24 ~ B18Raw.15 + B18Raw.6 + BioDadInHH2 + caregiv_health + #> CORTB.15 + CORTB.6 + DrnkFreq + ESETA1.15 + ESETA1.6 + gov_assist + #> HOMEETA1.15 + HOMEETA1.6 + HomeOwnd + IBRAttn.15 + IBRAttn.6 + #> InRatioCor.15 + InRatioCor.6 + KFASTScr + MDI.15 + MDI.6 + #> peri_health + PmAge2 + PmBlac2 + PmEd2 + PmMrSt2 + RHasSO.15 + #> RHasSO.6 + RHealth + RMomAgeU + SAAmylase.15 + SAAmylase.6 + #> SmokTotl + state + SurpPreg + SWghtLB + TcBlac2 + WndNbrhood.6 #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for ESETA1 - StrDif_Tot.58 at ESETA1 time point 35 is: #> ESETA1.35 ~ B18Raw.15 + B18Raw.24 + B18Raw.6 + BioDadInHH2 + #> caregiv_health + CORTB.15 + CORTB.24 + CORTB.6 + DrnkFreq + #> EARS_TJo.24 + ESETA1.15 + ESETA1.24 + ESETA1.6 + gov_assist + #> HOMEETA1.15 + HOMEETA1.24 + HOMEETA1.6 + HomeOwnd + IBRAttn.15 + #> IBRAttn.24 + IBRAttn.6 + InRatioCor.15 + InRatioCor.24 + #> InRatioCor.6 + KFASTScr + LESMnNeg.24 + LESMnPos.24 + MDI.15 + #> MDI.6 + peri_health + PmAge2 + PmBlac2 + PmEd2 + PmMrSt2 + #> RHasSO.15 + RHasSO.24 + RHasSO.6 + RHealth + RMomAgeU + SAAmylase.15 + #> SAAmylase.24 + SAAmylase.6 + SmokTotl + state + SurpPreg + #> SWghtLB + TcBlac2 + WndNbrhood.24 + WndNbrhood.6 #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for ESETA1 - StrDif_Tot.58 at ESETA1 time point 58 is: #> ESETA1.58 ~ B18Raw.15 + B18Raw.24 + B18Raw.6 + BioDadInHH2 + #> caregiv_health + CORTB.15 + CORTB.24 + CORTB.6 + DrnkFreq + #> EARS_TJo.24 + EARS_TJo.35 + ESETA1.15 + ESETA1.24 + ESETA1.35 + #> ESETA1.6 + fscore.35 + gov_assist + HOMEETA1.15 + HOMEETA1.24 + #> HOMEETA1.35 + HOMEETA1.6 + HomeOwnd + IBRAttn.15 + IBRAttn.24 + #> IBRAttn.6 + InRatioCor.15 + InRatioCor.24 + InRatioCor.35 + #> InRatioCor.6 + KFASTScr + LESMnNeg.24 + LESMnNeg.35 + LESMnPos.24 + #> LESMnPos.35 + MDI.15 + MDI.6 + peri_health + PmAge2 + PmBlac2 + #> PmEd2 + PmMrSt2 + RHasSO.15 + RHasSO.24 + RHasSO.35 + RHasSO.6 + #> RHealth + RMomAgeU + SAAmylase.15 + SAAmylase.24 + SAAmylase.6 + #> SmokTotl + state + StrDif_Tot.35 + SurpPreg + SWghtLB + TcBlac2 + #> WndNbrhood.24 + WndNbrhood.35 + WndNbrhood.6 #> "},{"path":"https://istallworthy.github.io/devMSMs/articles/Workflow_Continuous_Exposure.html","id":"step-1b--conduct-exploratory-pre-balance-assessment","dir":"Articles","previous_headings":"Phase 1: Confounder Adjustment > Step 1. Create Full Balancing Formulas & Conduct Pre-Balance Checking","what":"Step 1b. Conduct Exploratory Pre-Balance Assessment","title":"Workflow: Continuous Exposure","text":"next step examines initial imbalance, strongly exposure relates confounder time point, measured confounders prior weighting using assessBalance() function (type = “prebalance”). function draws calcBalStats() function (see Assessing Balance Time-Varying Exposure section accompanying manuscript). assessBalance() function outputs balance statistics (correlations continuous exposures standardized mean differences binary exposures) relating exposure time point confounders table well plots. function also provides summary balance statistics averaging across time points (imputed datasets supplied). required inputs using assessBalance() function conduct pre-balance testing : complete data (data frame, mids object, list imputed datasets dataframes wide format), exposure (e.g., “variable”), exposure time points, outcome (e.g., “variable.time”), full formulas (see Step 1a), setting type = “prebalance”. optional inputs follows. user may specify balance_thresh, threshold(s) determining confounder balance, one two ways. First, can provide single number value (0-1) absolute value standardized balance statistic (either correlation continuous exposures standardized group mean difference binary exposures) exposure confounders confounders considered balanced considered imbalanced (default 0.1; Stuart, 2010). Second, users may make priori assertion confounders important others based theory existing research. case, can provide two numbers represent balance thresholds important less important confounders, respectively. user supplies two balance thresholds provided, must also supply list important confounders (time-varying: “variable.t”, time invariant: “variable”) imp_conf field. balance threshold specification kept consistent throughout use workflow. recommended, provide two balancing thresholds identify income parent education important confounders relation economic strain behavior problems. assessBalance() function saves following .csv .html files ‘balance/prebalance/’ folder: tables balance statistics confounders, tables balance statistics covariates imbalanced (respect respective balance thresholds), overall balance summary table (averaged across imputed datasets). Within ‘balance/prebalance/plots/’ folder, function outputs .jpeg files summary love plots depicting confounder balance exposure time point. function returns data frame (list) balance statistics, balance thresholds, binary balanced tag confounder relevant exposure time point. output shows initial imbalance confounders exposure tables plots. 55 confounders imbalanced (labeled red font love plots) respect economic strain exposure respective balance threshold. love plots depict standardized associations confounder exposure exposure time point, vertical red dashed lines indicating balance thresholds. .","code":"balance_thresh <- c(0.05, 0.1) imp_conf <- c(\"InRatioCor.6\", \"InRatioCor.15\", \"InRatioCor.24\", \"InRatioCor.35\", \"InRatioCor.58\", \"PmEd2\") type <- \"prebalance\" formulas <- full_formulas prebalance_stats <- assessBalance(data = data, exposure = exposure, exposure_time_pts = exposure_time_pts, outcome = outcome, type = type, formulas = formulas, #required balance_thresh = balance_thresh, imp_conf = imp_conf, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> USER ALERT: The following statistics display covariate imbalance at each exposure time point prior to weighting, using full formulas. #> As shown below, 55 out of 191 (29%) covariates across time points, corresponding to 18 out of 33 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.13 (range= -0.24-0.32): #> #> Table: Imbalanced covariates using no weights and full formulas #> #> | exp_time| balanced_n| imbalanced_n| n| #> |--------:|----------:|------------:|--:| #> | 6| 7| 11| 18| #> | 15| 16| 12| 28| #> | 24| 28| 9| 37| #> | 35| 38| 11| 49| #> | 58| 47| 12| 59| #> #> #> USER ALERT: For exposure ESETA1 using the full formulas and no weights : #> The median absolute value relation between exposure and confounder is 0.06 (range = -0.24 -0.32). #> As shown below, the following 55 covariates across time points out of 191 total (28.8%) spanning 18 domains out of 33 (54.55%) are imbalanced with a remaining median absolute value correlation/std mean difference in relation to ESETA1 of 0.13 (range=-0.24-0.32) : #> #> #> Table: Imbalanced Covariates #> #> | |exposure | exp_time| covar_time|covariate | avg_bal| bal_thresh| balanced| #> |:---|:--------|--------:|----------:|:--------------|----------:|----------:|--------:| #> |1 |ESETA1 | 6| 0|caregiv_health | 0.1750682| 0.10| 0| #> |2 |ESETA1 | 6| 0|gov_assist | 0.3162628| 0.10| 0| #> |3 |ESETA1 | 6| 0|KFASTScr | -0.1741547| 0.10| 0| #> |6 |ESETA1 | 6| 0|PmEd2 | -0.2440498| 0.05| 0| #> |7 |ESETA1 | 6| 0|RMomAgeU | -0.1308954| 0.10| 0| #> |9 |ESETA1 | 6| 0|BioDadInHH2 | -0.1245621| 0.10| 0| #> |12 |ESETA1 | 6| 0|PmBlac2 | 0.1802582| 0.10| 0| #> |13 |ESETA1 | 6| 0|PmMrSt2 | -0.1017754| 0.10| 0| #> |16 |ESETA1 | 6| 0|state | 0.1448002| 0.10| 0| #> |17 |ESETA1 | 6| 0|SurpPreg | 0.1045888| 0.10| 0| #> |18 |ESETA1 | 6| 0|TcBlac2 | 0.2058312| 0.10| 0| #> |19 |ESETA1 | 15| 6|B18Raw.6 | 0.1439834| 0.10| 0| #> |20 |ESETA1 | 15| 0|caregiv_health | 0.1104491| 0.10| 0| #> |22 |ESETA1 | 15| 6|ESETA1.6 | 0.1860003| 0.10| 0| #> |23 |ESETA1 | 15| 0|gov_assist | 0.1581273| 0.10| 0| #> |24 |ESETA1 | 15| 6|HOMEETA1.6 | -0.1583774| 0.10| 0| #> |26 |ESETA1 | 15| 6|InRatioCor.6 | -0.2275248| 0.05| 0| #> |31 |ESETA1 | 15| 0|PmEd2 | -0.1311598| 0.05| 0| #> |35 |ESETA1 | 15| 6|WndNbrhood.6 | -0.1109797| 0.10| 0| #> |36 |ESETA1 | 15| 0|BioDadInHH2 | -0.1020164| 0.10| 0| #> |39 |ESETA1 | 15| 0|PmBlac2 | 0.1344400| 0.10| 0| #> |44 |ESETA1 | 15| 0|state | 0.1006198| 0.10| 0| #> |46 |ESETA1 | 15| 0|TcBlac2 | 0.1327607| 0.10| 0| #> |47 |ESETA1 | 24| 15|B18Raw.15 | 0.1421575| 0.10| 0| #> |48 |ESETA1 | 24| 6|B18Raw.6 | 0.1557763| 0.10| 0| #> |52 |ESETA1 | 24| 15|ESETA1.15 | 0.1655498| 0.10| 0| #> |53 |ESETA1 | 24| 6|ESETA1.6 | 0.1403452| 0.10| 0| #> |54 |ESETA1 | 24| 0|gov_assist | 0.1547692| 0.10| 0| #> |55 |ESETA1 | 24| 15|HOMEETA1.15 | -0.1085277| 0.10| 0| #> |59 |ESETA1 | 24| 15|InRatioCor.15 | -0.1195160| 0.05| 0| #> |60 |ESETA1 | 24| 6|InRatioCor.6 | -0.1351805| 0.05| 0| #> |66 |ESETA1 | 24| 0|PmEd2 | -0.0903395| 0.05| 0| #> |84 |ESETA1 | 35| 15|B18Raw.15 | 0.1170191| 0.10| 0| #> |92 |ESETA1 | 35| 15|ESETA1.15 | 0.1513316| 0.10| 0| #> |93 |ESETA1 | 35| 24|ESETA1.24 | 0.1342892| 0.10| 0| #> |94 |ESETA1 | 35| 6|ESETA1.6 | 0.1168198| 0.10| 0| #> |95 |ESETA1 | 35| 0|gov_assist | 0.1260387| 0.10| 0| #> |97 |ESETA1 | 35| 24|HOMEETA1.24 | -0.1089467| 0.10| 0| #> |102 |ESETA1 | 35| 15|InRatioCor.15 | -0.1218204| 0.05| 0| #> |103 |ESETA1 | 35| 24|InRatioCor.24 | -0.1488017| 0.05| 0| #> |104 |ESETA1 | 35| 6|InRatioCor.6 | -0.1428961| 0.05| 0| #> |112 |ESETA1 | 35| 0|PmEd2 | -0.1065020| 0.05| 0| #> |127 |ESETA1 | 35| 6|RHasSO.6 | 0.1215992| 0.10| 0| #> |133 |ESETA1 | 58| 15|B18Raw.15 | 0.1032047| 0.10| 0| #> |144 |ESETA1 | 58| 35|ESETA1.35 | 0.1203630| 0.10| 0| #> |146 |ESETA1 | 58| 35|fscore.35 | -0.1074297| 0.10| 0| #> |147 |ESETA1 | 58| 0|gov_assist | 0.1254127| 0.10| 0| #> |150 |ESETA1 | 58| 35|HOMEETA1.35 | -0.1017226| 0.10| 0| #> |151 |ESETA1 | 58| 6|HOMEETA1.6 | -0.1287940| 0.10| 0| #> |155 |ESETA1 | 58| 15|InRatioCor.15 | -0.1439390| 0.05| 0| #> |156 |ESETA1 | 58| 24|InRatioCor.24 | -0.1611115| 0.05| 0| #> |157 |ESETA1 | 58| 35|InRatioCor.35 | -0.1323394| 0.05| 0| #> |158 |ESETA1 | 58| 6|InRatioCor.6 | -0.1575428| 0.05| 0| #> |168 |ESETA1 | 58| 0|PmEd2 | -0.1407238| 0.05| 0| #> |191 |ESETA1 | 58| 0|TcBlac2 | 0.1000105| 0.10| 0|"},{"path":"https://istallworthy.github.io/devMSMs/articles/Workflow_Continuous_Exposure.html","id":"step-2--create-simplified-balancing-formulas-determine-optimal-weighting-method","dir":"Articles","previous_headings":"Phase 1: Confounder Adjustment","what":"Step 2. Create Simplified Balancing Formulas & Determine Optimal Weighting Method","title":"Workflow: Continuous Exposure","text":"goal second step create shortened, parsimonious balancing formulas determining optimal IPTW weighting method successfully reduces imbalance.","code":""},{"path":"https://istallworthy.github.io/devMSMs/articles/Workflow_Continuous_Exposure.html","id":"a--create-simplified-balancing-formulas","dir":"Articles","previous_headings":"Phase 1: Confounder Adjustment > Step 2. Create Simplified Balancing Formulas & Determine Optimal Weighting Method","what":"2a. Create Simplified Balancing Formulas","title":"Workflow: Continuous Exposure","text":"First, create shorter, parsimonious balancing formulas relating exposure confounders time point using createFormulas() function (type = ”short”). exposure time point, formulas contain time invariant confounders well time-varying confounders t-1 lag. logic balancing confounders recent prior time point (t-1 ) may achieve balance levels distal time points, given stability many confounders time. Importantly, empirically assess relax assumption needed subsequent steps (Steps 3a-b). See Step 1a instructions include confounder interactions. required input create shortened balancing formulas using createFormulas() function : exposure (e.g., “variable”), exposure time points, outcome (e.g., “variable.time”), list time-varying confounders (e.g., “variable.time”), list time invariant confounders, setting type = “short”. addition optional input outlined Step 1a, user also option specify keep_conf, list time-varying confounders (e.g., “variable.t”) always retain lagged confounders shortened formulas. user may use argument retain specific time-varying confounders otherwise excluded step occur lags greater t-1 formula. , choose specify confounders always retain step. createFormulas() function saves .csv .rds files containing balancing formulas exposure time point (e.g., see ) specified type (case, “short”) ‘formulas/short/’ folder. function returns list balancing formulas labeled type, exposure, outcome, exposure time point. , inspect shortened balancing formula exposure time point. formulas considerably shorter full formulas. instance, 58-month exposure time point, formula contains time invariant confounders time-varying confounders 35-month time point.","code":"keep_conf <- \"InRatioCor.6\" keep_conf <- NULL type <- \"short\" short_formulas <- createFormulas(exposure = exposure, exposure_time_pts = exposure_time_pts, outcome = outcome, type = type, ti_confounders = ti_confounders, tv_confounders = tv_confounders, concur_conf = concur_conf, keep_conf = keep_conf, custom = custom, home_dir = home_dir, verbose = verbose, save.out = save.out) #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for ESETA1 - StrDif_Tot.58 at ESETA1 time point 6 is: #> ESETA1.6 ~ BioDadInHH2 + caregiv_health + DrnkFreq + gov_assist + #> HomeOwnd + KFASTScr + peri_health + PmAge2 + PmBlac2 + PmEd2 + #> PmMrSt2 + RHealth + RMomAgeU + SmokTotl + state + SurpPreg + #> SWghtLB + TcBlac2 #> #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for ESETA1 - StrDif_Tot.58 at ESETA1 time point 15 is: #> ESETA1.15 ~ B18Raw.6 + BioDadInHH2 + caregiv_health + CORTB.6 + #> DrnkFreq + ESETA1.6 + gov_assist + HOMEETA1.6 + HomeOwnd + #> IBRAttn.6 + InRatioCor.6 + KFASTScr + MDI.6 + peri_health + #> PmAge2 + PmBlac2 + PmEd2 + PmMrSt2 + RHasSO.6 + RHealth + #> RMomAgeU + SAAmylase.6 + SmokTotl + state + SurpPreg + SWghtLB + #> TcBlac2 + WndNbrhood.6 #> #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for ESETA1 - StrDif_Tot.58 at ESETA1 time point 24 is: #> ESETA1.24 ~ B18Raw.15 + BioDadInHH2 + caregiv_health + CORTB.15 + #> DrnkFreq + ESETA1.15 + gov_assist + HOMEETA1.15 + HomeOwnd + #> IBRAttn.15 + InRatioCor.15 + KFASTScr + MDI.15 + peri_health + #> PmAge2 + PmBlac2 + PmEd2 + PmMrSt2 + RHasSO.15 + RHealth + #> RMomAgeU + SAAmylase.15 + SmokTotl + state + SurpPreg + SWghtLB + #> TcBlac2 #> #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for ESETA1 - StrDif_Tot.58 at ESETA1 time point 35 is: #> ESETA1.35 ~ B18Raw.24 + BioDadInHH2 + caregiv_health + CORTB.24 + #> DrnkFreq + EARS_TJo.24 + ESETA1.24 + gov_assist + HOMEETA1.24 + #> HomeOwnd + IBRAttn.24 + InRatioCor.24 + KFASTScr + LESMnNeg.24 + #> LESMnPos.24 + peri_health + PmAge2 + PmBlac2 + PmEd2 + PmMrSt2 + #> RHasSO.24 + RHealth + RMomAgeU + SAAmylase.24 + SmokTotl + #> state + SurpPreg + SWghtLB + TcBlac2 + WndNbrhood.24 #> #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for ESETA1 - StrDif_Tot.58 at ESETA1 time point 58 is: #> ESETA1.58 ~ BioDadInHH2 + caregiv_health + DrnkFreq + EARS_TJo.35 + #> ESETA1.35 + fscore.35 + gov_assist + HOMEETA1.35 + HomeOwnd + #> InRatioCor.35 + KFASTScr + LESMnNeg.35 + LESMnPos.35 + peri_health + #> PmAge2 + PmBlac2 + PmEd2 + PmMrSt2 + RHasSO.35 + RHealth + #> RMomAgeU + SmokTotl + state + StrDif_Tot.35 + SurpPreg + #> SWghtLB + TcBlac2 + WndNbrhood.35 #> "},{"path":"https://istallworthy.github.io/devMSMs/articles/Workflow_Continuous_Exposure.html","id":"b--create-iptw-balancing-weights-using-multiple-weighting-methods","dir":"Articles","previous_headings":"Phase 1: Confounder Adjustment > Step 2. Create Simplified Balancing Formulas & Determine Optimal Weighting Method","what":"2b. Create IPTW Balancing Weights using Multiple Weighting Methods","title":"Workflow: Continuous Exposure","text":"created shorter, simplified balancing formulas, now create first round IPTW balancing weights (Thoemmes & Ong, 2016) using createWeights() function, shortened balancing formulas, available weighting methods. function calls weightitMSM() function WeightIt package (Greifer, 2023) uses time-specific formulas create weights time point automatically multiplying together create one weight per person. Weights stabilized, recommended (Cole & Hernan, 2008; Thoemmes & Ong, 2016), distributions saved inspection. required inputs using createWeights() function create initial around IPTW balancing weights : complete data (data frame, mids object, list imputed datasets dataframes wide format), exposure (e.g., “variable”), outcome (e.g., “variable.time”), short formulas (see Step 2a). optional inputs follows. method, provide one following methods calculating balancing weights using weightitMSM() methods validated longitudinal exposures: “cbps” (Covariate Balancing Propensity Score weighting; default), “gbm” (generalized boosted model), “glm” (generalized linear model), “super” (SuperLearner via SuperLearner package; Polley et al., 2013). information can found WeightIt documentation. createWeights() function can also take number additional arguments passed weightitMSM () function (e.g., ‘criterion’, distribution’, ‘SL.library’). user selects SuperLearner (“super”) method, default super learner library (‘SL.library’) xx alternative library can entered input createWeights function. binary exposures, “cbps” method allows specify estimand either ATE, ATT, ATC. “glm”, “super”, “bart” can specify ATE, ATT, ATC, ATO, ATM, ATOS. “gbm”, can specify ATE, ATT, ATC, ATO, ATM. default estimand binary exposures ATE. advise interested user review WeightIt documentation information additional optional arguments available weighting methods. user can also specify read_in_from_file = TRUEif user previously created weights specific data, formula, weight type using function wishes read local file instead recreating . createWeights() function automatically conducts basic checks saved weights match data type, weights method, number formulas provided. user responsible making sure weights created appropriately. createWeights() function saves .rds file weights ‘weights’ folder, histogram weights distribution ‘weights/histograms/’ folder, .csv file data weights appended ‘weights/values/’ folder. function returns list weights objects form WeightItMSM output single nested list (labeled “0” data data frame format) nested lists imputed dataset (data imputed). , create IPTW weights using default “CBPS” method. shown , distribution heavy right tail (typical real-world data), median value 1.21. right tail distribution represents individuals experienced statistically unexpected levels exposure given levels confounders. create IPTW balancing weights using available methods. shown , “glm” method produces similar distribution weights.","code":"formulas <- short_formulas method <- \"cbps\" weights.cbps <- createWeights(data = data, exposure = exposure, outcome = outcome, formulas = formulas, #required method = method, read_in_from_file = FALSE, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> For the cbps weighting method, the median weight value is 1.21 (SD = 3.68; range = 0-60). method <- \"glm\" weights.glm <- createWeights(data = data, exposure = exposure, outcome = outcome, formulas = formulas, #required method = method, read_in_from_file = FALSE, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> For the glm weighting method, the median weight value is 0.9 (SD = 1.73; range = 0.03-26). method <- \"gbm\" weights.gbm <- createWeights(data = data, exposure = exposure, outcome = outcome, formulas = formulas, #required method = method, read_in_from_file = FALSE, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> For the gbm weighting method, the median weight value is 0.48 (SD = 1.02; range = 0-17). method <- \"bart\" weights.bart <- createWeights(data = data, exposure = exposure, outcome = outcome, formulas = formulas, #required method = method, read_in_from_file = FALSE, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> For the bart weighting method, the median weight value is 0.71 (SD = 1.36; range = 0.03-25). method <- \"super\" weights.super <- createWeights(data = data, exposure = exposure, outcome = outcome, formulas = formulas, #required method = method, read_in_from_file = FALSE, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> Loading required package: nnls #> For the super weighting method, the median weight value is 0.99 (SD = 2.21; range = 0.03-40)."},{"path":"https://istallworthy.github.io/devMSMs/articles/Workflow_Continuous_Exposure.html","id":"c--assess-all-weighting-methods-to-determine-optimal-method","dir":"Articles","previous_headings":"Phase 1: Confounder Adjustment > Step 2. Create Simplified Balancing Formulas & Determine Optimal Weighting Method","what":"2c. Assess All Weighting Methods to Determine Optimal Method","title":"Workflow: Continuous Exposure","text":"Next, evaluate well weights created using different weighting methods reduced imbalance confounders provided short balancing formula, using assessBalance() function (type = “weighted”). function calls calcBalStats() function using short formulas specifies balance statistics calculated using IPTW weights supplied. assessBalance() function outputs balance statistics (correlations continuous exposures standardized mean differences binary exposures) relating exposure time point confounders table well plots. function also provides summary balance statistics averaging across time points (imputed datasets supplied). required inputs using assessBalance() function assess balance first round IPTW weights : complete data (data frame, mids object, list imputed datasets dataframes wide format), exposure (e.g., “variable”), exposure time points, outcome (e.g., “variable.time”), providing short formulas (see Step 2a), setting type = “weighted”, providing weights just created. optional inputs described Step 1b. assessBalance() function saves following .csv .html files ‘balance/weighted/’ folder: tables balance statistics confounders, tables balance statistics covariates imbalanced, overall balance summary table (averaged across imputed datasets). Within ‘balance/weighted/plots/’ folder, function outputs .jpeg files summary love plots depicting confounder balance exposure time point. function returns data frame (list) balance statistics, balance thresholds, binary balanced tag (1 = balanced, 0 = imbalanced) confounder relevant exposure time point. first assess balance “CBPS” weighting method. shown , “CBPS” weighting method short formulas, median absolute value correlation confounder exposure 0.03 8 confounders remain imbalanced. GLM weighting method, median absolute value correlation exposure confounders 0.02, 6 confounders remaining imbalanced. GBM weighting method, absolute median correlation exposure confounders 0.02, 9 confounders remaining imbalanced. BART weighting method, median absolute value correlation exposure confounder 0.02, 11 confounders remaining imbalanced. SuperLearner weighting method, median absolute value correlation exposure confounder 0.02, 5 confounders remaining imbalanced. optimal weighting method dataset method yields best confounder balance. iterations, identify best performing weighting method reduces imbalance exposure confounder (indicated lowest absolute value median correlation) fewest number confounders left imbalanced. example, identify SuperLearner optimal weighting method.","code":"type <- \"weighted\" formulas <- short_formulas weights <- weights.cbps balance_stats.cbps <- assessBalance(data = data, exposure = exposure, exposure_time_pts = exposure_time_pts, #required outcome = outcome, type = type, formulas = formulas, weights = weights, #required balance_thresh = balance_thresh, imp_conf = imp_conf, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> USER ALERT: The following statistics display covariate imbalance at each exposure time point following IPTW weighting, using short formulas. #> As shown below, 5 out of 131 (4%) covariates across time points, corresponding to 5 out of 32 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.1 (range= -0.1-0.14): #> #> Table: Imbalanced covariates using cbps and short formulas #> #> | exp_time| balanced_n| imbalanced_n| n| #> |--------:|----------:|------------:|--:| #> | 6| 17| 1| 18| #> | 15| 27| 1| 28| #> | 24| 27| 0| 27| #> | 35| 28| 2| 30| #> | 58| 27| 1| 28| #> #> #> USER ALERT: For exposure ESETA1 using the short formulas and cbps : #> The median absolute value relation between exposure and confounder is 0.03 (range = -0.1 -0.14). #> As shown below, the following 5 covariates across time points out of 131 total (3.82%) spanning 5 domains out of 33 (15.15%) are imbalanced with a remaining median absolute value correlation/std mean difference in relation to ESETA1 of 0.1 (range=-0.1-0.14) : #> #> #> Table: Imbalanced Covariates #> #> | |exposure | exp_time| covar_time|covariate | avg_bal| bal_thresh| balanced| #> |:---|:--------|--------:|----------:|:-------------|----------:|----------:|--------:| #> |8 |ESETA1 | 6| 0|SWghtLB | -0.1039386| 0.10| 0| #> |22 |ESETA1 | 15| 6|ESETA1.6 | 0.1272972| 0.10| 0| #> |82 |ESETA1 | 35| 24|InRatioCor.24 | -0.0729568| 0.05| 0| #> |88 |ESETA1 | 35| 0|PmEd2 | -0.0686910| 0.05| 0| #> |120 |ESETA1 | 58| 35|WndNbrhood.35 | 0.1433257| 0.10| 0| weights <- weights.glm balance_stats.glm <- assessBalance(data = data, exposure = exposure, exposure_time_pts = exposure_time_pts, #required outcome = outcome, type = type, formulas = formulas, weights = weights, #required balance_thresh = balance_thresh, imp_conf = imp_conf, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> USER ALERT: The following statistics display covariate imbalance at each exposure time point following IPTW weighting, using short formulas. #> As shown below, 6 out of 131 (5%) covariates across time points, corresponding to 3 out of 32 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.1 (range= -0.07-0.21): #> #> Table: Imbalanced covariates using glm and short formulas #> #> | exp_time| balanced_n| imbalanced_n| n| #> |--------:|----------:|------------:|--:| #> | 6| 18| 0| 18| #> | 15| 25| 3| 28| #> | 24| 26| 1| 27| #> | 35| 28| 2| 30| #> | 58| 28| 0| 28| #> #> #> USER ALERT: For exposure ESETA1 using the short formulas and glm : #> The median absolute value relation between exposure and confounder is 0.02 (range = -0.07 -0.21). #> As shown below, the following 6 covariates across time points out of 131 total (4.58%) spanning 3 domains out of 33 (9.09%) are imbalanced with a remaining median absolute value correlation/std mean difference in relation to ESETA1 of 0.1 (range=-0.07-0.21) : #> #> #> Table: Imbalanced Covariates #> #> | |exposure | exp_time| covar_time|covariate | avg_bal| bal_thresh| balanced| #> |:--|:--------|--------:|----------:|:-------------|----------:|----------:|--------:| #> |19 |ESETA1 | 15| 6|B18Raw.6 | 0.1072812| 0.10| 0| #> |22 |ESETA1 | 15| 6|ESETA1.6 | 0.2062601| 0.10| 0| #> |26 |ESETA1 | 15| 6|InRatioCor.6 | -0.0620487| 0.05| 0| #> |50 |ESETA1 | 24| 15|ESETA1.15 | 0.1621505| 0.10| 0| #> |78 |ESETA1 | 35| 24|ESETA1.24 | 0.1025166| 0.10| 0| #> |82 |ESETA1 | 35| 24|InRatioCor.24 | -0.0689534| 0.05| 0| weights <- weights.gbm balance_stats.gbm <- assessBalance(data = data, exposure = exposure, exposure_time_pts = exposure_time_pts, #required outcome = outcome, type = type, formulas = formulas, weights = weights, #required balance_thresh = balance_thresh, imp_conf = imp_conf, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> USER ALERT: The following statistics display covariate imbalance at each exposure time point following IPTW weighting, using short formulas. #> As shown below, 5 out of 131 (4%) covariates across time points, corresponding to 4 out of 32 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.1 (range= -0.1-0.19): #> #> Table: Imbalanced covariates using gbm and short formulas #> #> | exp_time| balanced_n| imbalanced_n| n| #> |--------:|----------:|------------:|--:| #> | 6| 18| 0| 18| #> | 15| 27| 1| 28| #> | 24| 23| 4| 27| #> | 35| 30| 0| 30| #> | 58| 28| 0| 28| #> #> #> USER ALERT: For exposure ESETA1 using the short formulas and gbm : #> The median absolute value relation between exposure and confounder is 0.02 (range = -0.1 -0.19). #> As shown below, the following 5 covariates across time points out of 131 total (3.82%) spanning 4 domains out of 33 (12.12%) are imbalanced with a remaining median absolute value correlation/std mean difference in relation to ESETA1 of 0.1 (range=-0.1-0.19) : #> #> #> Table: Imbalanced Covariates #> #> | |exposure | exp_time| covar_time|covariate | avg_bal| bal_thresh| balanced| #> |:--|:--------|--------:|----------:|:-------------|----------:|----------:|--------:| #> |22 |ESETA1 | 15| 6|ESETA1.6 | 0.1898210| 0.10| 0| #> |50 |ESETA1 | 24| 15|ESETA1.15 | 0.1510709| 0.10| 0| #> |52 |ESETA1 | 24| 15|HOMEETA1.15 | -0.1037627| 0.10| 0| #> |54 |ESETA1 | 24| 15|InRatioCor.15 | -0.0891084| 0.05| 0| #> |59 |ESETA1 | 24| 0|PmEd2 | -0.0530682| 0.05| 0| weights <- weights.bart balance_stats.bart <- assessBalance(data = data, exposure = exposure, exposure_time_pts = exposure_time_pts, #required outcome = outcome, type = type, formulas = formulas, weights = weights, #required balance_thresh = balance_thresh, imp_conf = imp_conf, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> USER ALERT: The following statistics display covariate imbalance at each exposure time point following IPTW weighting, using short formulas. #> As shown below, 11 out of 131 (8%) covariates across time points, corresponding to 4 out of 32 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.11 (range= -0.13-0.23): #> #> Table: Imbalanced covariates using bart and short formulas #> #> | exp_time| balanced_n| imbalanced_n| n| #> |--------:|----------:|------------:|--:| #> | 6| 17| 1| 18| #> | 15| 25| 3| 28| #> | 24| 24| 3| 27| #> | 35| 28| 2| 30| #> | 58| 26| 2| 28| #> #> #> USER ALERT: For exposure ESETA1 using the short formulas and bart : #> The median absolute value relation between exposure and confounder is 0.02 (range = -0.13 -0.23). #> As shown below, the following 11 covariates across time points out of 131 total (8.4%) spanning 4 domains out of 33 (12.12%) are imbalanced with a remaining median absolute value correlation/std mean difference in relation to ESETA1 of 0.11 (range=-0.13-0.23) : #> #> #> Table: Imbalanced Covariates #> #> | |exposure | exp_time| covar_time|covariate | avg_bal| bal_thresh| balanced| #> |:---|:--------|--------:|----------:|:-------------|----------:|----------:|--------:| #> |6 |ESETA1 | 6| 0|PmEd2 | -0.0595407| 0.05| 0| #> |19 |ESETA1 | 15| 6|B18Raw.6 | 0.1179861| 0.10| 0| #> |22 |ESETA1 | 15| 6|ESETA1.6 | 0.2253647| 0.10| 0| #> |26 |ESETA1 | 15| 6|InRatioCor.6 | -0.1001925| 0.05| 0| #> |47 |ESETA1 | 24| 15|B18Raw.15 | 0.1113215| 0.10| 0| #> |50 |ESETA1 | 24| 15|ESETA1.15 | 0.1913959| 0.10| 0| #> |54 |ESETA1 | 24| 15|InRatioCor.15 | -0.1310088| 0.05| 0| #> |78 |ESETA1 | 35| 24|ESETA1.24 | 0.1236928| 0.10| 0| #> |82 |ESETA1 | 35| 24|InRatioCor.24 | -0.0681214| 0.05| 0| #> |106 |ESETA1 | 58| 35|ESETA1.35 | 0.1106226| 0.10| 0| #> |110 |ESETA1 | 58| 35|InRatioCor.35 | -0.0514414| 0.05| 0| weights <- weights.super balance_stats.super <- assessBalance(data = data, exposure = exposure, exposure_time_pts = exposure_time_pts, #required outcome = outcome, type = type, formulas = formulas, weights = weights, #required balance_thresh = balance_thresh, imp_conf = imp_conf, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> USER ALERT: The following statistics display covariate imbalance at each exposure time point following IPTW weighting, using short formulas. #> As shown below, 5 out of 131 (4%) covariates across time points, corresponding to 3 out of 32 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.11 (range= -0.07-0.23): #> #> Table: Imbalanced covariates using super and short formulas #> #> | exp_time| balanced_n| imbalanced_n| n| #> |--------:|----------:|------------:|--:| #> | 6| 18| 0| 18| #> | 15| 25| 3| 28| #> | 24| 26| 1| 27| #> | 35| 29| 1| 30| #> | 58| 28| 0| 28| #> #> #> USER ALERT: For exposure ESETA1 using the short formulas and super : #> The median absolute value relation between exposure and confounder is 0.02 (range = -0.08 -0.23). #> As shown below, the following 5 covariates across time points out of 131 total (3.82%) spanning 3 domains out of 33 (9.09%) are imbalanced with a remaining median absolute value correlation/std mean difference in relation to ESETA1 of 0.11 (range=-0.07-0.23) : #> #> #> Table: Imbalanced Covariates #> #> | |exposure | exp_time| covar_time|covariate | avg_bal| bal_thresh| balanced| #> |:--|:--------|--------:|----------:|:------------|----------:|----------:|--------:| #> |19 |ESETA1 | 15| 6|B18Raw.6 | 0.1007296| 0.10| 0| #> |22 |ESETA1 | 15| 6|ESETA1.6 | 0.2280065| 0.10| 0| #> |26 |ESETA1 | 15| 6|InRatioCor.6 | -0.0674695| 0.05| 0| #> |50 |ESETA1 | 24| 15|ESETA1.15 | 0.1599982| 0.10| 0| #> |78 |ESETA1 | 35| 24|ESETA1.24 | 0.1056005| 0.10| 0|"},{"path":"https://istallworthy.github.io/devMSMs/articles/Workflow_Continuous_Exposure.html","id":"step-3-create-updated-formulas-re-specify-weights-using-optimal-weighting-method","dir":"Articles","previous_headings":"Phase 1: Confounder Adjustment","what":"Step 3: Create Updated Formulas & Re-Specify Weights Using Optimal Weighting Method","title":"Workflow: Continuous Exposure","text":"goal next step assess best-performing weights created shortened balancing formulas (containing time-varying confounders t-1) relative full balancing formulas, add shortened formulas time-varying confounders lags > t-1 successfully balanced, create final round weights.","code":""},{"path":"https://istallworthy.github.io/devMSMs/articles/Workflow_Continuous_Exposure.html","id":"step-3a--assess-balance-with-full-balancing-formulas","dir":"Articles","previous_headings":"Phase 1: Confounder Adjustment > Step 3: Create Updated Formulas & Re-Specify Weights Using Optimal Weighting Method","what":"Step 3a. Assess balance with full balancing formulas","title":"Workflow: Continuous Exposure","text":"next assess whether weights created previous step best-performing weights method (.e., using SuperLearner method) using simplified balancing formulas also achieve balance full formulas. , revisit assumption balancing proximal time-varying confounders (t-1) confers balance confounders distal prior time points (t-1+). assessing time point well weights just created using short formulas successfully balance confounders (including time-varying confounders time points prior) original, full formulas. use assessBalance() function (type = “weighted”) full balancing formulas. required inputs using assessBalance() function assess best weights achieve balance full formulas : complete data (data frame, mids object, list imputed datasets dataframes wide format), exposure (e.g., “variable”), exposure time points, outcome (e.g., “variable.time”), providing full formulas (see Step 1a), setting type = “weighted”, providing best weights (see Step 2c). optional inputs detailed Step 1b. assessBalance() function saves following .csv .html files ‘balance/weighted’ folder: tables balance statistics confounders, tables balance statistics covariates imbalanced, overall balance summary table (averaged across imputed datasets). Within ‘balance/type/plots’ folder, function outputs .jpeg files summary love plots depicting confounder balance exposure time point. function returns data frame (list) balance statistics, balance thresholds, binary balanced tag confounder relevant exposure time point. , assess weights created using SuperLearner method relative full balancing formulas. shown , using SuperLearner weighting method full formulas, find median absolute value correlation exposure confounder 0.03 total 13 confounders remaining imbalanced.","code":"type <- \"weighted\" formulas <- full_formulas weights <- weights.super balance_stats <- assessBalance(data = data, exposure = exposure, exposure_time_pts = exposure_time_pts, #required outcome = outcome, type = type, formulas = formulas, weights = weights, #required balance_thresh = balance_thresh, imp_conf = imp_conf, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> USER ALERT: The following statistics display covariate imbalance at each exposure time point following IPTW weighting, using full formulas. #> As shown below, 13 out of 191 (7%) covariates across time points, corresponding to 4 out of 33 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.11 (range= -0.1-0.23): #> #> Table: Imbalanced covariates using super and full formulas #> #> | exp_time| balanced_n| imbalanced_n| n| #> |--------:|----------:|------------:|--:| #> | 6| 18| 0| 18| #> | 15| 25| 3| 28| #> | 24| 34| 3| 37| #> | 35| 44| 5| 49| #> | 58| 57| 2| 59| #> #> #> USER ALERT: For exposure ESETA1 using the full formulas and super : #> The median absolute value relation between exposure and confounder is 0.03 (range = -0.1 -0.23). #> As shown below, the following 13 covariates across time points out of 191 total (6.81%) spanning 4 domains out of 33 (12.12%) are imbalanced with a remaining median absolute value correlation/std mean difference in relation to ESETA1 of 0.11 (range=-0.1-0.23) : #> #> #> Table: Imbalanced Covariates #> #> | |exposure | exp_time| covar_time|covariate | avg_bal| bal_thresh| balanced| #> |:---|:--------|--------:|----------:|:-------------|----------:|----------:|--------:| #> |19 |ESETA1 | 15| 6|B18Raw.6 | 0.1007296| 0.10| 0| #> |22 |ESETA1 | 15| 6|ESETA1.6 | 0.2280065| 0.10| 0| #> |26 |ESETA1 | 15| 6|InRatioCor.6 | -0.0674695| 0.05| 0| #> |52 |ESETA1 | 24| 15|ESETA1.15 | 0.1599982| 0.10| 0| #> |53 |ESETA1 | 24| 6|ESETA1.6 | 0.1995292| 0.10| 0| #> |60 |ESETA1 | 24| 6|InRatioCor.6 | -0.0672391| 0.05| 0| #> |92 |ESETA1 | 35| 15|ESETA1.15 | 0.2130827| 0.10| 0| #> |93 |ESETA1 | 35| 24|ESETA1.24 | 0.1056005| 0.10| 0| #> |94 |ESETA1 | 35| 6|ESETA1.6 | 0.1770972| 0.10| 0| #> |101 |ESETA1 | 35| 6|IBRAttn.6 | -0.1028477| 0.10| 0| #> |104 |ESETA1 | 35| 6|InRatioCor.6 | -0.0775183| 0.05| 0| #> |142 |ESETA1 | 58| 15|ESETA1.15 | 0.1059600| 0.10| 0| #> |155 |ESETA1 | 58| 15|InRatioCor.15 | -0.0567426| 0.05| 0|"},{"path":"https://istallworthy.github.io/devMSMs/articles/Workflow_Continuous_Exposure.html","id":"step-3b--update-simplified-formulas","dir":"Articles","previous_headings":"Phase 1: Confounder Adjustment > Step 3: Create Updated Formulas & Re-Specify Weights Using Optimal Weighting Method","what":"Step 3b. Update simplified formulas","title":"Workflow: Continuous Exposure","text":"Subsequently, update shortened formulas include time-varying confounders (t-1 +) successfully balanced full formulas, hown . , create final round balancing formulas using createFormulas() function (setting type = “update\" providing balance statistics bal_stats field). createFormulas() function draws user-provided balance statistics automatically identify add formulas exposure time point time-varying confounders lags greater 1 remain imbalanced weighting. function displays balancing formula console message user time-varying confounders added. required input update shortened balancing formulas using createFormulas() function : exposure (e.g., “variable”), exposure time points, outcome (e.g., “variable.time”), list time-varying confounders (e.g., “variable.time”), list time invariant confounders (e.g., “variable”), setting type = “update”, providing bal_stats balance statistics just created Step 3a. optional input detailed Step 1a. createFormulas() function saves .csv .rds files containing balancing formulas exposure time point specified type ‘formulas/update/’ folder. function returns list balancing formulas labeled type, exposure, outcome, exposure time point. shown , several imbalanced confounders lags greater t-1 added short formulas exposure time points 35-58. instance, 35-month time point, economic strain 6 15 months well attention problems income 6 months added balancing formula.","code":"type <- \"update\" bal_stats <- balance_stats updated_formulas <- createFormulas(exposure = exposure, exposure_time_pts = exposure_time_pts, outcome = outcome, #required type = type, ti_confounders = ti_confounders, tv_confounders = tv_confounders, bal_stats = bal_stats, #required concur_conf = concur_conf, keep_conf = keep_conf, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> USER ALERT: Please manually inspect the updated balancing formula below that includes time-varying confounders at t-1 and those greater at further lags that remained imbalanced: #> The update formula for ESETA1 - StrDif_Tot.58 at ESETA1 time point 6 is: #> ESETA1.6 ~ BioDadInHH2 + caregiv_health + DrnkFreq + gov_assist + #> HomeOwnd + KFASTScr + peri_health + PmAge2 + PmBlac2 + PmEd2 + #> PmMrSt2 + RHealth + RMomAgeU + SmokTotl + state + SurpPreg + #> SWghtLB + TcBlac2 #> #> USER ALERT: Please manually inspect the updated balancing formula below that includes time-varying confounders at t-1 and those greater at further lags that remained imbalanced: #> For ESETA1 at exposure time point 15 no time-varying confounders at additional lags were added. #> #> The update formula for ESETA1 - StrDif_Tot.58 at ESETA1 time point 15 is: #> ESETA1.15 ~ B18Raw.6 + BioDadInHH2 + caregiv_health + CORTB.6 + #> DrnkFreq + ESETA1.6 + gov_assist + HOMEETA1.6 + HomeOwnd + #> IBRAttn.6 + InRatioCor.6 + KFASTScr + MDI.6 + peri_health + #> PmAge2 + PmBlac2 + PmEd2 + PmMrSt2 + RHasSO.6 + RHealth + #> RMomAgeU + SAAmylase.6 + SmokTotl + state + SurpPreg + SWghtLB + #> TcBlac2 + WndNbrhood.6 #> #> USER ALERT: Please manually inspect the updated balancing formula below that includes time-varying confounders at t-1 and those greater at further lags that remained imbalanced: #> For ESETA1 at exposure time point 24 the following covariate(s) will be added to the short balancing formula: #> ESETA1.6, InRatioCor.6 #> #> The update formula for ESETA1 - StrDif_Tot.58 at ESETA1 time point 24 is: #> ESETA1.24 ~ B18Raw.15 + BioDadInHH2 + caregiv_health + CORTB.15 + #> DrnkFreq + ESETA1.15 + ESETA1.6 + gov_assist + HOMEETA1.15 + #> HomeOwnd + IBRAttn.15 + InRatioCor.15 + InRatioCor.6 + KFASTScr + #> MDI.15 + peri_health + PmAge2 + PmBlac2 + PmEd2 + PmMrSt2 + #> RHasSO.15 + RHealth + RMomAgeU + SAAmylase.15 + SmokTotl + #> state + SurpPreg + SWghtLB + TcBlac2 #> #> USER ALERT: Please manually inspect the updated balancing formula below that includes time-varying confounders at t-1 and those greater at further lags that remained imbalanced: #> For ESETA1 at exposure time point 35 the following covariate(s) will be added to the short balancing formula: #> ESETA1.15, ESETA1.6, IBRAttn.6, InRatioCor.6 #> #> The update formula for ESETA1 - StrDif_Tot.58 at ESETA1 time point 35 is: #> ESETA1.35 ~ B18Raw.24 + BioDadInHH2 + caregiv_health + CORTB.24 + #> DrnkFreq + EARS_TJo.24 + ESETA1.15 + ESETA1.24 + ESETA1.6 + #> gov_assist + HOMEETA1.24 + HomeOwnd + IBRAttn.24 + IBRAttn.6 + #> InRatioCor.24 + InRatioCor.6 + KFASTScr + LESMnNeg.24 + LESMnPos.24 + #> peri_health + PmAge2 + PmBlac2 + PmEd2 + PmMrSt2 + RHasSO.24 + #> RHealth + RMomAgeU + SAAmylase.24 + SmokTotl + state + SurpPreg + #> SWghtLB + TcBlac2 + WndNbrhood.24 #> #> USER ALERT: Please manually inspect the updated balancing formula below that includes time-varying confounders at t-1 and those greater at further lags that remained imbalanced: #> For ESETA1 at exposure time point 58 the following covariate(s) will be added to the short balancing formula: #> ESETA1.15, InRatioCor.15 #> #> The update formula for ESETA1 - StrDif_Tot.58 at ESETA1 time point 58 is: #> ESETA1.58 ~ BioDadInHH2 + caregiv_health + DrnkFreq + EARS_TJo.35 + #> ESETA1.15 + ESETA1.35 + fscore.35 + gov_assist + HOMEETA1.35 + #> HomeOwnd + InRatioCor.15 + InRatioCor.35 + KFASTScr + LESMnNeg.35 + #> LESMnPos.35 + peri_health + PmAge2 + PmBlac2 + PmEd2 + PmMrSt2 + #> RHasSO.35 + RHealth + RMomAgeU + SmokTotl + state + StrDif_Tot.35 + #> SurpPreg + SWghtLB + TcBlac2 + WndNbrhood.35 #> "},{"path":"https://istallworthy.github.io/devMSMs/articles/Workflow_Continuous_Exposure.html","id":"step-3c--create-final-balancing-weights","dir":"Articles","previous_headings":"Phase 1: Confounder Adjustment > Step 3: Create Updated Formulas & Re-Specify Weights Using Optimal Weighting Method","what":"Step 3c. Create final balancing weights","title":"Workflow: Continuous Exposure","text":"Next, create final set balancing weights using optimal weighting method identified Step 2c final, updated simplified formulas previous step using createWeights() function (method = “super’), SuperLearner method optimal weighting method identified Step 2c. function calls weightitMSM() function WeightIt package (Greifer, 2023) uses time-specific formulas create weights time point automatically multiplying together create one weight per person. Weights stabilized, recommended (Cole & Hernan, 2008; Thoemmes & Ong, 2016) distributions saved home directory inspection. required inputs using createWeights() function create final round IPTW balancing weights using updated short balancing formulas : complete data (data frame, mids object, list imputed datasets dataframes wide format), exposure (variable time points), outcome (e.g., “variable.time”), best-performing weights method, updated formulas (see Step 3a). optional input createWeights() function listed Step 2b. createWeights() function saves .rds file weights ‘weights/’ folder, histogram weights distribution ‘weights/histograms/’ folder, .csv file data weights appended ‘weights/values/’ folder. function returns list weights objects form WeightItMSM output list weights either single nested list (labeled “0” data data frame format) nested lists imputed dataset (data imputed). , use updated formulas SuperLearner weighting method create new round IPTW balancing weights. shown , weights median value 1 heavy right tail.","code":"formulas <- updated_formulas method <- \"super\" #all inputs final_weights <- createWeights(data = data, exposure = exposure, outcome = outcome, formulas = formulas, #required method = method, read_in_from_file = FALSE, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> For the super weighting method, the median weight value is 1 (SD = 1.02; range = 0.03-11)."},{"path":"https://istallworthy.github.io/devMSMs/articles/Workflow_Continuous_Exposure.html","id":"d--trim-final-balancing-weights","dir":"Articles","previous_headings":"Phase 1: Confounder Adjustment > Step 3: Create Updated Formulas & Re-Specify Weights Using Optimal Weighting Method","what":"3d. Trim final balancing weights","title":"Workflow: Continuous Exposure","text":"next step trim winsorize final set weights eliminate heavy right tail distribution using trimWeights() function. function draws Weightit package (Griefer, 2023) plots summarizes trimmed weights. function outputs list trimmed weights either single nested list (labeled “0” data data frame format) nested lists imputed dataset (data imputed). required inputs trimWeights() function : exposure (variable time points), outcome (e.g., “variable.time”), final weights just created. optional input allows user specify quantile value (0-1; default 0.95) weights replaced weight value quantile reduce heavy right tail. , use default 95th percentile trimming weights. trimWeights() function saves .rds file trimmed weights ‘weights/values/’ folder histogram trimmed weights ‘weights/histograms/’ folder. function returns list weights objects, containing trimmed weights, form weightitMSM output. shown , weights still median value 1 much shorter right tail. create trimmed weights using two quantile values + /- ~0.3 previously chosen quantile value, order conduct recommended sensitivity analyses subsequent steps. first create weights 92nd quantile value. 98th quantile value. find comparable descriptive statistics sets weights, upper range value varying quantile cutoff. assess consequences different ranges subsequent steps.","code":"quantile <- 0.95 weights <- final_weights trim_weights <- trimWeights(exposure = exposure, outcome = outcome, weights = weights, #required quantile = quantile, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> Trimming weights to 95%. #> #> For the ESETA1-StrDif_Tot.58 relation, following trimming at the 0.95 quantile, the median weight value is 1 (SD= 0.65; range= 0.03-3). quantile <- 0.92 trim_weights.s1 <- trimWeights(exposure = exposure, outcome = outcome, weights = weights, #required quantile = quantile, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> Trimming weights to 92%. #> #> For the ESETA1-StrDif_Tot.58 relation, following trimming at the 0.92 quantile, the median weight value is 1 (SD= 0.58; range= 0.03-2). quantile <- 0.98 trim_weights.s2 <- trimWeights(exposure = exposure, outcome = outcome, weights = weights, #required quantile = quantile, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> Trimming weights to 98%. #> #> For the ESETA1-StrDif_Tot.58 relation, following trimming at the 0.98 quantile, the median weight value is 1 (SD= 0.77; range= 0.03-4)."},{"path":"https://istallworthy.github.io/devMSMs/articles/Workflow_Continuous_Exposure.html","id":"step-4-conduct-final-balance-assessment","dir":"Articles","previous_headings":"Phase 1: Confounder Adjustment","what":"Step 4: Conduct Final Balance Assessment","title":"Workflow: Continuous Exposure","text":"created trimmed final set IPTW balancing weights, next step conduct final evaluation well reduce imbalance possible confounders. assess performance final weights previous step using assessBalance() function (type = “weighted”) full formulas. required inputs using assessBalance() function assess final, trimmed weights achieve balance full formulas : complete data (data frame, mids object, list imputed datasets dataframes wide format), exposure (e.g., “variable”), exposure time points, outcome (e.g., “variable.time”), full formulas (see Step 1a), set type = “weighted”, final, trimmed weights (see Step 3b). optional inputs assessBalance() function detailed Step 1b. assessBalance() function saves following .csv .html files ‘balance/weighted/’ folder: tables balance statistics confounders, tables balance statistics covariates imbalanced, overall balance summary table (averaged across imputed datasets applicable). Within ‘balance/weighted/plots/’ folder, function outputs .jpeg files summary love plots depicting confounder balance exposure time point. function returns data frame (list) balance statistics, balance thresholds, binary balanced tag confounder relevant exposure time point. , assess final weights using full formulas. assessment, find confounders across 3 domains (economic strain, income--needs-ratio, parental education –ones held stringet balancing threshold due theoretical importance confounders) remain imbalanced several exposure time points median absolute value correlation 0.07 (range = -0.09-0.16). largest remaining correlations exposure confounder economic strain lagged levels economic strain, suggesting high stability construct time. outcome modeling step (Step 5), user option include confounders time invariant measured first time point (6 months) remain imbalanced following final balance assessment covariates. inspect table manually list imbalanced confounders assign covariates. Subsequently, also assess balance weights trimmed two additional quantile values assess whether final balance assessment sensitive trim value. Importantly, save.= TRUE, running analyses overwrite output main history comparison main output rename re-located new folder first. Additionally, running first sensitivity check, output check overwritten second sensitivity check renamed re-located new folder first. first assess balance weights trimmed 93rd quantile value. , find constructs remain imbalanced similar degree. next assess balance weights trimmed 98th quantile value. , also find similar level imbalance constructs.","code":"type <- \"weighted\" formulas <- full_formulas weights <- trim_weights final_balance_stats <- assessBalance(data = data, exposure = exposure, exposure_time_pts = exposure_time_pts, #required outcome = outcome, type = type, formulas = formulas, weights = weights, #required balance_thresh = balance_thresh, imp_conf = imp_conf, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> USER ALERT: The following statistics display covariate imbalance at each exposure time point following IPTW weighting, using full formulas. #> As shown below, 19 out of 191 (10%) covariates across time points, corresponding to 3 out of 33 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.07 (range= -0.09-0.16): #> #> Table: Imbalanced covariates using super and full formulas #> #> | exp_time| balanced_n| imbalanced_n| n| #> |--------:|----------:|------------:|--:| #> | 6| 17| 1| 18| #> | 15| 25| 3| 28| #> | 24| 32| 5| 37| #> | 35| 42| 7| 49| #> | 58| 56| 3| 59| #> #> #> USER ALERT: For exposure ESETA1 using the full formulas and super : #> The median absolute value relation between exposure and confounder is 0.02 (range = -0.09 -0.16). #> As shown below, the following 19 covariates across time points out of 191 total (9.95%) spanning 3 domains out of 33 (9.09%) are imbalanced with a remaining median absolute value correlation/std mean difference in relation to ESETA1 of 0.07 (range=-0.09-0.16) : #> #> #> Table: Imbalanced Covariates #> #> | |exposure | exp_time| covar_time|covariate | avg_bal| bal_thresh| balanced| #> |:---|:--------|--------:|----------:|:-------------|----------:|----------:|--------:| #> |6 |ESETA1 | 6| 0|PmEd2 | -0.0675636| 0.05| 0| #> |22 |ESETA1 | 15| 6|ESETA1.6 | 0.1582999| 0.10| 0| #> |26 |ESETA1 | 15| 6|InRatioCor.6 | -0.0791760| 0.05| 0| #> |31 |ESETA1 | 15| 0|PmEd2 | -0.0668465| 0.05| 0| #> |52 |ESETA1 | 24| 15|ESETA1.15 | 0.1544269| 0.10| 0| #> |53 |ESETA1 | 24| 6|ESETA1.6 | 0.1178082| 0.10| 0| #> |59 |ESETA1 | 24| 15|InRatioCor.15 | -0.0630368| 0.05| 0| #> |60 |ESETA1 | 24| 6|InRatioCor.6 | -0.0721363| 0.05| 0| #> |66 |ESETA1 | 24| 0|PmEd2 | -0.0539022| 0.05| 0| #> |92 |ESETA1 | 35| 15|ESETA1.15 | 0.1367120| 0.10| 0| #> |93 |ESETA1 | 35| 24|ESETA1.24 | 0.1130236| 0.10| 0| #> |94 |ESETA1 | 35| 6|ESETA1.6 | 0.1038148| 0.10| 0| #> |102 |ESETA1 | 35| 15|InRatioCor.15 | -0.0586569| 0.05| 0| #> |103 |ESETA1 | 35| 24|InRatioCor.24 | -0.0866334| 0.05| 0| #> |104 |ESETA1 | 35| 6|InRatioCor.6 | -0.0741025| 0.05| 0| #> |112 |ESETA1 | 35| 0|PmEd2 | -0.0539368| 0.05| 0| #> |156 |ESETA1 | 58| 24|InRatioCor.24 | -0.0538865| 0.05| 0| #> |157 |ESETA1 | 58| 35|InRatioCor.35 | -0.0504691| 0.05| 0| #> |158 |ESETA1 | 58| 6|InRatioCor.6 | -0.0551483| 0.05| 0| covariates <- c(\"PmEd2\", \"ESETA1.6\", \"InRatioCor.6\") weights <- trim_weights.s1 final_balance_stats.s1 <- assessBalance(data = data, exposure = exposure, exposure_time_pts = exposure_time_pts, #required outcome = outcome, type = type, formulas = formulas, weights = weights, #required balance_thresh = balance_thresh, imp_conf = imp_conf, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> USER ALERT: The following statistics display covariate imbalance at each exposure time point following IPTW weighting, using full formulas. #> As shown below, 21 out of 191 (11%) covariates across time points, corresponding to 4 out of 33 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.08 (range= -0.09-0.16): #> #> Table: Imbalanced covariates using super and full formulas #> #> | exp_time| balanced_n| imbalanced_n| n| #> |--------:|----------:|------------:|--:| #> | 6| 16| 2| 18| #> | 15| 25| 3| 28| #> | 24| 32| 5| 37| #> | 35| 42| 7| 49| #> | 58| 55| 4| 59| #> #> #> USER ALERT: For exposure ESETA1 using the full formulas and super : #> The median absolute value relation between exposure and confounder is 0.03 (range = -0.09 -0.16). #> As shown below, the following 21 covariates across time points out of 191 total (10.99%) spanning 4 domains out of 33 (12.12%) are imbalanced with a remaining median absolute value correlation/std mean difference in relation to ESETA1 of 0.08 (range=-0.09-0.16) : #> #> #> Table: Imbalanced Covariates #> #> | |exposure | exp_time| covar_time|covariate | avg_bal| bal_thresh| balanced| #> |:---|:--------|--------:|----------:|:-------------|----------:|----------:|--------:| #> |2 |ESETA1 | 6| 0|gov_assist | 0.1121603| 0.10| 0| #> |6 |ESETA1 | 6| 0|PmEd2 | -0.0773531| 0.05| 0| #> |22 |ESETA1 | 15| 6|ESETA1.6 | 0.1572402| 0.10| 0| #> |26 |ESETA1 | 15| 6|InRatioCor.6 | -0.0859973| 0.05| 0| #> |31 |ESETA1 | 15| 0|PmEd2 | -0.0693023| 0.05| 0| #> |52 |ESETA1 | 24| 15|ESETA1.15 | 0.1518122| 0.10| 0| #> |53 |ESETA1 | 24| 6|ESETA1.6 | 0.1157459| 0.10| 0| #> |59 |ESETA1 | 24| 15|InRatioCor.15 | -0.0702066| 0.05| 0| #> |60 |ESETA1 | 24| 6|InRatioCor.6 | -0.0797116| 0.05| 0| #> |66 |ESETA1 | 24| 0|PmEd2 | -0.0577498| 0.05| 0| #> |92 |ESETA1 | 35| 15|ESETA1.15 | 0.1356838| 0.10| 0| #> |93 |ESETA1 | 35| 24|ESETA1.24 | 0.1124710| 0.10| 0| #> |94 |ESETA1 | 35| 6|ESETA1.6 | 0.1022136| 0.10| 0| #> |102 |ESETA1 | 35| 15|InRatioCor.15 | -0.0641473| 0.05| 0| #> |103 |ESETA1 | 35| 24|InRatioCor.24 | -0.0906605| 0.05| 0| #> |104 |ESETA1 | 35| 6|InRatioCor.6 | -0.0759835| 0.05| 0| #> |112 |ESETA1 | 35| 0|PmEd2 | -0.0546416| 0.05| 0| #> |155 |ESETA1 | 58| 15|InRatioCor.15 | -0.0515929| 0.05| 0| #> |156 |ESETA1 | 58| 24|InRatioCor.24 | -0.0618351| 0.05| 0| #> |157 |ESETA1 | 58| 35|InRatioCor.35 | -0.0556880| 0.05| 0| #> |158 |ESETA1 | 58| 6|InRatioCor.6 | -0.0625095| 0.05| 0| weights <- trim_weights.s2 final_balance_stats.s2 <- assessBalance(data = data, exposure = exposure, exposure_time_pts = exposure_time_pts, #required outcome = outcome, type = type, formulas = formulas, weights = weights, #required balance_thresh = balance_thresh, imp_conf = imp_conf, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> USER ALERT: The following statistics display covariate imbalance at each exposure time point following IPTW weighting, using full formulas. #> As shown below, 14 out of 191 (7%) covariates across time points, corresponding to 3 out of 33 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.08 (range= -0.08-0.16): #> #> Table: Imbalanced covariates using super and full formulas #> #> | exp_time| balanced_n| imbalanced_n| n| #> |--------:|----------:|------------:|--:| #> | 6| 17| 1| 18| #> | 15| 25| 3| 28| #> | 24| 33| 4| 37| #> | 35| 43| 6| 49| #> | 58| 59| 0| 59| #> #> #> USER ALERT: For exposure ESETA1 using the full formulas and super : #> The median absolute value relation between exposure and confounder is 0.02 (range = -0.08 -0.16). #> As shown below, the following 14 covariates across time points out of 191 total (7.33%) spanning 3 domains out of 33 (9.09%) are imbalanced with a remaining median absolute value correlation/std mean difference in relation to ESETA1 of 0.08 (range=-0.08-0.16) : #> #> #> Table: Imbalanced Covariates #> #> | |exposure | exp_time| covar_time|covariate | avg_bal| bal_thresh| balanced| #> |:---|:--------|--------:|----------:|:-------------|----------:|----------:|--------:| #> |6 |ESETA1 | 6| 0|PmEd2 | -0.0552228| 0.05| 0| #> |22 |ESETA1 | 15| 6|ESETA1.6 | 0.1606345| 0.10| 0| #> |26 |ESETA1 | 15| 6|InRatioCor.6 | -0.0689478| 0.05| 0| #> |31 |ESETA1 | 15| 0|PmEd2 | -0.0608262| 0.05| 0| #> |52 |ESETA1 | 24| 15|ESETA1.15 | 0.1594566| 0.10| 0| #> |53 |ESETA1 | 24| 6|ESETA1.6 | 0.1224854| 0.10| 0| #> |59 |ESETA1 | 24| 15|InRatioCor.15 | -0.0500768| 0.05| 0| #> |60 |ESETA1 | 24| 6|InRatioCor.6 | -0.0574045| 0.05| 0| #> |92 |ESETA1 | 35| 15|ESETA1.15 | 0.1391236| 0.10| 0| #> |93 |ESETA1 | 35| 24|ESETA1.24 | 0.1149125| 0.10| 0| #> |94 |ESETA1 | 35| 6|ESETA1.6 | 0.1088099| 0.10| 0| #> |103 |ESETA1 | 35| 24|InRatioCor.24 | -0.0802366| 0.05| 0| #> |104 |ESETA1 | 35| 6|InRatioCor.6 | -0.0720984| 0.05| 0| #> |112 |ESETA1 | 35| 0|PmEd2 | -0.0522716| 0.05| 0|"},{"path":"https://istallworthy.github.io/devMSMs/articles/Workflow_Continuous_Exposure.html","id":"phase-2-assess-substantive-associations-between-exposure-outcome","dir":"Articles","previous_headings":"","what":"Phase 2: Assess Substantive Associations between Exposure & Outcome","title":"Workflow: Continuous Exposure","text":"created IPTW balancing weights minimize associations confounders exposure time point, can move substantive modeling phase.","code":""},{"path":"https://istallworthy.github.io/devMSMs/articles/Workflow_Continuous_Exposure.html","id":"step-5-fit-marginal-structural-model-summarize-visualize-results","dir":"Articles","previous_headings":"Phase 2: Assess Substantive Associations between Exposure & Outcome","what":"Step 5: Fit Marginal Structural Model & Summarize & Visualize Results","title":"Workflow: Continuous Exposure","text":"goal final step fit weighted model relating exposure meaningful epochs developmental time outcome, summarizing visualizing results. step, user models compares various counterfactuals, effects different developmental histories exposure outcome, test substantive hypotheses dose timing.","code":""},{"path":"https://istallworthy.github.io/devMSMs/articles/Workflow_Continuous_Exposure.html","id":"step-5a--select-fit-a-marginal-outcome-model","dir":"Articles","previous_headings":"Phase 2: Assess Substantive Associations between Exposure & Outcome > Step 5: Fit Marginal Structural Model & Summarize & Visualize Results","what":"Step 5a. Select & fit a marginal outcome model","title":"Workflow: Continuous Exposure","text":"First, use fitModel() function fit weighted generalized linear model relating exposure outcome. function draws svyglm() function survey package (Lumley, 2023). exposure main effects models reflect exposure levels exposure time point unless exposure epochs specified. One benefits creating balancing weights can used variety different marginal outcome models encompassed function subset possible models. Note models can get complex advise interpreting individual terms. required inputs using fitModel() function : complete data (data frame, mids object, list imputed datasets dataframes wide format), exposure (e.g., “variable”), exposure time points, outcome (e.g., “variable.time”), list trimmed weights, model list (“m0”, “m1”, “m2”, “m3”): M0: Baseline model regressing outcome main effects exposure (e.g., infancy, toddlerhood, childhood). M1: Covariate model regressing outcome main effects exposure well user-specified covariates (e.g., confounders measured baseline first time point remained imbalanced weighting Step 4). M2: Interaction model regressing outcome main effects exposure well user-specified interactions exposure main effects (e.g., infancy:toddlerhood) M3: Full model regressing outcome main effects exposure, user-specified covariates, well user-specified exposure main effect interactions. , specify full model given baseline/time-invariant confounders remain imbalanced Step 4 possiblity non-linear effects economic strain behavior problems. user selects covariate model (“m1” “m3”), required supply list covariates corresponds covariates wide data (see Step 4). user selects interaction model (“m2” “m3”), required provide interaction order integer int_order field reflects maximum interaction (e.g., 3) (automatically include lower order interactions (e.g., 2-way)). interaction order exceed number exposure main effects. specify fitting 3-way (constituent) interactions exposure main effects. optional inputs fitModel() function follows. user option specify epochs differ measurement time points using optional epochs data frame field. epochs: provide list user-created names quotations (constitute meaningful developmental time period constitute time units exposure histories); values: list, epoch, provide single integer list integers time points exposure measured constitute epoch. epochs specified, time points exposure measured used creation exposure histories final step process. specified epoch must corresponding value (values can differ number entries shown ). user specifies exposure epochs, exposure main effects created epoch, exposure levels averaged epochs consist two time point values. Epochs must specified step used subsequent step comparing histories, specification exposure epochs kept consistent throughout use devMSMs package. , specify Infancy, Toddlerhood, early Childhood exposure epochs. Please see Preliminary Steps vignette accomanying manuscript details. user can also specify family (function, quotations; e.g., gaussian) link (quotations, e.g., “identity”) functions generalized linear model (defaults gaussian “link”, respectively). possible families : binomial, gaussian, Gama, inverse.gaussian, poisson, quasi, quasibinomial, quasipoisson. binomial Poisson families, set family quasibinomial quasipoisson, respectively, avoid warning non-integer numbers successes. `quasi’ versions family objects give point estimates standard errors give warning. gaussian family accepts links: “identity”, “log” “inverse”; binomial family links “logit”, “probit”, “cauchit”, (corresponding logistic, normal Cauchy CDFs respectively) “log” “cloglog” (complementary log-log); Gamma family links “inverse”, “identity”, “log”; poisson family links “log”, “identity”, “sqrt”; inverse.gaussian family links 1/mu^2, inverse, identity log. quasi family accepts links “logit”, “probit”, “cloglog”, “identity”, “inverse”, “log”, “1/mu^2”, “sqrt”, function power can used create power link function. See survey stats R package documentations information. , retain default family link functions. fitModel() function outputs .rds file fitted model(s) .html table model evidence (can display models 12 imputed datasets) ‘models/’ folder. Importantly, function also outputs console result likelihood ratio test comparing user-specified model nested version model omits exposure variables test whether exposure predicts variation outcome. test significant evidence exposure predicts outcome, advise proceeding subsequent history comparison step. (Models pooled prior conducting likelihood ratio test imputed data.) function returns list fitted model objects, svyglm output (labeled “0” data data frame format). find likerlihood ratio test significant, indicating can proceed next step evaluating effects different exposure histories. first conduct sensitivity analyses, fitting model weights trimmed two different values. note, described Step 4, save.= TRUE, running analyses overwrite output main model fitting main output rename re-located new folder first. first fit model weights trimmed 92nd quantile. similarly find significant likelihoood ratio test. fit model weights trimmed 98th quantile. comparable result.","code":"model <- \"m2\" int_order <- 3 epochs <- data.frame(epochs = c(\"Infancy\", \"Toddlerhood\", \"Childhood\"), values = I(list(c(6), c(15), c(24)))) family <- gaussian link <- \"identity\" weights <- trim_weights models <- fitModel(data = data, weights = weights, exposure = exposure, #required exposure_time_pts = exposure_time_pts, outcome = outcome, model = model, #required family = family, link = link, int_order = int_order, covariates = covariates, epochs = epochs, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> Please inspect the following likelihood ratio test to determine if the exposures collective predict significant variation in the outcome compared to a model without exposure terms. #> We strongly suggest not conducting history comparisons if the likelihood ratio test is non-significant. #> Working (Rao-Scott+F) LRT for ESETA1.Infancy ESETA1.Toddlerhood ESETA1.Childhood ESETA1.Infancy:ESETA1.Toddlerhood ESETA1.Infancy:ESETA1.Childhood ESETA1.Toddlerhood:ESETA1.Childhood ESETA1.Infancy:ESETA1.Toddlerhood:ESETA1.Childhood #> in svyglm(formula = as.formula(f), design = s, family = fam) #> Working 2logLR = 17.99531 p= 0.015443 #> (scale factors: 1.4 1.4 1.1 0.97 0.88 0.71 0.58 ); denominator df= 1284 #> #> The marginal model, m2, is summarized below: weights <- trim_weights.s1 models.s1 <- fitModel(data = data, weights = weights, exposure = exposure, #required exposure_time_pts = exposure_time_pts, outcome = outcome, model = model, #required family = family, link = link, int_order = int_order, covariates = covariates, epochs = epochs, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> Please inspect the following likelihood ratio test to determine if the exposures collective predict significant variation in the outcome compared to a model without exposure terms. #> We strongly suggest not conducting history comparisons if the likelihood ratio test is non-significant. #> Working (Rao-Scott+F) LRT for ESETA1.Infancy ESETA1.Toddlerhood ESETA1.Childhood ESETA1.Infancy:ESETA1.Toddlerhood ESETA1.Infancy:ESETA1.Childhood ESETA1.Toddlerhood:ESETA1.Childhood ESETA1.Infancy:ESETA1.Toddlerhood:ESETA1.Childhood #> in svyglm(formula = as.formula(f), design = s, family = fam) #> Working 2logLR = 20.64908 p= 0.0059484 #> (scale factors: 1.4 1.3 1.1 1 0.89 0.73 0.61 ); denominator df= 1284 #> #> The marginal model, m2, is summarized below: weights <- trim_weights.s2 models.s2 <- fitModel(data = data, weights = weights, exposure = exposure, #required exposure_time_pts = exposure_time_pts, outcome = outcome, model = model, #required family = family, link = link, int_order = int_order, covariates = covariates, epochs = epochs, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> Please inspect the following likelihood ratio test to determine if the exposures collective predict significant variation in the outcome compared to a model without exposure terms. #> We strongly suggest not conducting history comparisons if the likelihood ratio test is non-significant. #> Working (Rao-Scott+F) LRT for ESETA1.Infancy ESETA1.Toddlerhood ESETA1.Childhood ESETA1.Infancy:ESETA1.Toddlerhood ESETA1.Infancy:ESETA1.Childhood ESETA1.Toddlerhood:ESETA1.Childhood ESETA1.Infancy:ESETA1.Toddlerhood:ESETA1.Childhood #> in svyglm(formula = as.formula(f), design = s, family = fam) #> Working 2logLR = 14.87367 p= 0.04432 #> (scale factors: 1.5 1.4 1.1 0.93 0.87 0.67 0.54 ); denominator df= 1284 #> #> The marginal model, m2, is summarized below:"},{"path":"https://istallworthy.github.io/devMSMs/articles/Workflow_Continuous_Exposure.html","id":"step-5b--estimate-compare-and-visualize-model-predicted-outcome-as-a-function-of-exposure-history","dir":"Articles","previous_headings":"Phase 2: Assess Substantive Associations between Exposure & Outcome > Step 5: Fit Marginal Structural Model & Summarize & Visualize Results","what":"Step 5b. Estimate, compare, and visualize model-predicted outcome as a function of exposure history","title":"Workflow: Continuous Exposure","text":"final step, use fitted model results test substantive hypotheses dose timing. estimate compare average marginal estimates outcome user-specified exposure history (.e., permutation high (“h) low (“l”) levels exposure exposure epoch) using compareHistories() function. draws primarily avg_predictions() hypotheses() functions marginaleffects package (Arel-Bundock, 2023). First, compareHistories() function creates average predictions outcome exposure history. n combinations user-specified exposure histories, set value predictors full dataset values combination, leaving variables . gives us n datasets, size original dataset used fit model. n datasets, compute predicted values given model taking average predicted value n datasets. n averaged predicted values expected potential outcomes combination. (imputed data, function outputs pooled predicted values using Rubin’s Rules.) Next, using predicted values, function conducts comparisons different histories (pooling across imputed datasets imputed data using Rubin’s Rules). Lastly, function implements correction multiple comparisons (treating run function family) plotting results. Box plots display outcome x-axis exposure history y-axis whiskers display standard errors. required inputs using compareHistories() function : exposure (e.g., “variable”), outcome (e.g., “variable.t”), list model output Step 5a. optional inputs follows. create histories high low values continuous exposures, hi_lo_cut user can specify list two quantile values (0-1; default median split +/- 0.001) demarcating high low levels exposure, respectively. (Imputed data stacked calculate cutoff values.) suggest drawing existing hypotheses examining variability exposure variable determine high low cutoffs. recommend users begin specifying meaningful high low percentile cutoffs examining many individuals sample fall user-specified exposure histories created percentile cutoffs (see Preliminary Steps vignette). gold standard recommendations sufficient cell numbers per history, users ensure reasonable coverage histories avoid extrapolation maximize precision. , specify 60th 30th percentile values denote high low levels economic strain, respectively. Additionally, user option specify epochs differ exposure time points using optional epochs data frame field (see Step 5a ). user specified epochs Step 5a fitModel() function, must also specify step. user also option estimate compare custom subset user-specified exposure histories (.e., sequences high low levels exposure epoch time point) using reference comparison fields. conduct recommended customized comparisons, users must provide least one unique valid history (e.g., “l-l-l”) reference , quotations, provide string (list strings) lowercase l’s h’s (separated -), corresponding exposure epoch (time point), signify sequence exposure levels (“low” “high”, respectively). user supplies reference history, required provide least one unique valid history comparison , quotations, providing comparison string (list strings) l’s h’s (separated “-”), corresponding exposure epoch, signify sequence exposure levels (“low” “high”, respectively) constitutes comparison exposure history/histories compared reference. user supplies one comparisons, least one reference must specified. reference exposure history compared comparison history comparisons subject multiple comparison correction. reference comparison specified, histories compared . 4 exposure main effects (either epochs exposure time points), user required select subset history comparisons (Step 5b), given base code (see hypotheses() function marginaleffects package; Arel-Bundock, 2023) accommodate pairwise history comparisons 5 time points. , specify one reference two comparison histories. user can also specify multiple comparison method mc_method quotations, providing shorthand method (“holm”, “hochberg”,“hommel”, “bonferroni”, “BH” (default), “”, “fdr”, “n” (see stats::p.adjust documentation; R Core Team) multiple comparison correction applied final (pooled across imputed datasets applicable) contrasts comparing effects different exposure histories outcome (default Benjamini-Hochburg). code run considered family. user iterates function specifying different comparisons time, strongly recommend interpreting outcome inclusive set comparisons avoid false discovery. , retain default Benjamini-Hochburg method multiple comparison. Based substantive interests, user also option choose level dosage (“h” “l”) tallied labels dose counts tables figures (dose_level; default “h”). example, exposure variable coded way lower levels conceptualized exposure (e.g., lower income), user may wish choose dosage level “l”. , given interest histories high economic strain, specify wish tally high doses exposure. Lastly, user can provide alternate plotting labels exposure outcome exp_lab out_lab fields (defaults variable names), well list (equal number exposure main effects +1) colors Brewer color palette (colors; default “Dark2”). See RColorBrewer::display.brewer.() https://r-graph-gallery.com/38-rcolorbrewers-palettes.html). , specify plotting labels 4 colors. compareHistories() function saves .html tables estimated mean outcome values history history comparisons ‘histories/’ folder boxplot predicted values histories ‘plots/’ folder. function returns data frame user-specified history comparisons containing contrast estimates, standard errors, statistics, p-values, low high confidence intervals, corrected p-values, labeled history dose. shown , first confirm reasonable distribution sample specified exposure histories. inspect history comparison conclude evidence evidence children experienced different histories exposure economic strain infancy, toddlerhood, early childhood differ behavioral problems early childhood. conduct sensitivity analyses assessing comparing histories drawing models used weights trimmed two different values. note, running analyses overwrite output main history comparison main output rename re-located new folder first. first compare histories using model fit weights trimmed 92nd quantile value. shown , results indicate marginal non-significant contrast “l-l-l” “h-h-h” histories economic strain exposure relation behavior problems early childhood. compare histories usign model fit weights trimmed 98th quantile value. Similarly, find evidence differences behavioral problems function history exposure economic strain.","code":"hi_lo_cut <- c(0.6, 0.3) reference <- \"l-l-l\" comparison <- c(\"h-h-h\", \"l-l-h\", \"h-l-l\", \"l-h-l\") mc_comp_method <- \"BH\" dose_level <- \"h\" exp_lab <- \"Economic Strain\" out_lab <- \"Behavior Problems\" colors <- c(\"blue4\", \"darkgreen\", \"darkgoldenrod\", \"red2\") model <- models results <- compareHistories(exposure = exposure, exposure_time_pts = exposure_time_pts, outcome = outcome, model = model, #required epochs = epochs, hi_lo_cut = hi_lo_cut, reference = reference, comparison = comparison, #optional mc_comp_method = mc_comp_method, dose_level = dose_level, exp_lab = exp_lab, out_lab = out_lab, colors = colors, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> Summary of Exposure Main Effects: #> #> USER ALERT: Out of the total of 1292 individuals in the sample, below is the distribution of the 406 (31.42%) individuals that fall into 5 out of the 5 the total user-defined exposure histories created from 30th and 60th percentile values for low and high levels of exposure ESETA1, respectively, across Infancy, Toddlerhood, Childhood. #> USER ALERT: Please inspect the distribution of the sample across the following exposure histories and ensure there is sufficient spread to avoid extrapolation and low precision: #> #> Table: Summary of user-specified exposure ESETA1 histories based on exposure main effects Infancy, Toddlerhood, Childhood containing time points 6, 15, 24: #> #> |history | n| #> |:-------|---:| #> |h-h-h | 216| #> |h-l-l | 20| #> |l-h-l | 25| #> |l-l-h | 18| #> |l-l-l | 127| #> #> #> Below are the average predictions by user-specified history: #> | | ESETA1.Infancy| ESETA1.Toddlerhood| ESETA1.Childhood| estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|history | dose_count| #> |:--|--------------:|------------------:|----------------:|--------:|---------:|---------:|-------:|--------:|--------:|---------:|:-------|----------:| #> |1 | -0.2204| -0.243| -0.3527| 0.4747| 0.0121| 39.1104| 0| Inf| 0.4509| 0.4985|l-l-l | 0| #> |2 | -0.2204| 0.480| -0.3527| 0.4719| 0.0141| 33.4217| 0| 811.1404| 0.4443| 0.4996|l-h-l | 1| #> |3 | 0.5166| -0.243| -0.3527| 0.4817| 0.0131| 36.8133| 0| 983.1129| 0.4561| 0.5074|h-l-l | 1| #> |5 | -0.2204| -0.243| 0.3288| 0.4908| 0.0158| 31.0331| 0| 699.9813| 0.4598| 0.5218|l-l-h | 1| #> |8 | 0.5166| 0.480| 0.3288| 0.4965| 0.0108| 46.0536| 0| Inf| 0.4754| 0.5176|h-h-h | 3| #> #> #> Conducting multiple comparison correction for all pairings between comparison histories and each refernece history using the BH method. #> #> #> USER ALERT: please inspect the following comparisons: #> |term | estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|history |dose | p.value_corr| #> |:------------------------------------------------------|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:--------------|:------|------------:| #> |(-0.2204, -0.243, -0.3527) - (0.5166, 0.48, 0.3288) | 0.02| 0.01| 1.92| 0.06| 4.18| 0.00| 0.04|l-l-l vs h-h-h |0 vs 3 | 0.20| #> |(-0.2204, -0.243, -0.3527) - (0.5166, -0.243, -0.3527) | 0.01| 0.01| 0.70| 0.48| 1.05| -0.01| 0.03|l-l-l vs h-l-l |0 vs 1 | 0.64| #> |(-0.2204, -0.243, -0.3527) - (-0.2204, 0.48, -0.3527) | 0.00| 0.01| -0.26| 0.80| 0.33| -0.02| 0.02|l-l-l vs l-h-l |0 vs 1 | 0.80| #> |(-0.2204, -0.243, -0.3527) - (-0.2204, -0.243, 0.3288) | 0.02| 0.01| 1.66| 0.10| 3.36| 0.00| 0.04|l-l-l vs l-l-h |0 vs 1 | 0.20| model <- models.s1 results.s1 <- compareHistories(exposure = exposure, exposure_time_pts = exposure_time_pts, outcome = outcome, model = model, #required epochs = epochs, hi_lo_cut = hi_lo_cut, reference = reference, comparison = comparison, #optional mc_comp_method = mc_comp_method, dose_level = dose_level, exp_lab = exp_lab, out_lab = out_lab, colors = colors, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> Summary of Exposure Main Effects: #> #> USER ALERT: Out of the total of 1292 individuals in the sample, below is the distribution of the 406 (31.42%) individuals that fall into 5 out of the 5 the total user-defined exposure histories created from 30th and 60th percentile values for low and high levels of exposure ESETA1, respectively, across Infancy, Toddlerhood, Childhood. #> USER ALERT: Please inspect the distribution of the sample across the following exposure histories and ensure there is sufficient spread to avoid extrapolation and low precision: #> #> Table: Summary of user-specified exposure ESETA1 histories based on exposure main effects Infancy, Toddlerhood, Childhood containing time points 6, 15, 24: #> #> |history | n| #> |:-------|---:| #> |h-h-h | 216| #> |h-l-l | 20| #> |l-h-l | 25| #> |l-l-h | 18| #> |l-l-l | 127| #> #> #> Below are the average predictions by user-specified history: #> | | ESETA1.Infancy| ESETA1.Toddlerhood| ESETA1.Childhood| estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|history | dose_count| #> |:--|--------------:|------------------:|----------------:|--------:|---------:|---------:|-------:|---------:|--------:|---------:|:-------|----------:| #> |1 | -0.2204| -0.243| -0.3527| 0.4718| 0.0118| 40.0529| 0| Inf| 0.4487| 0.4949|l-l-l | 0| #> |2 | -0.2204| 0.480| -0.3527| 0.4707| 0.0139| 33.9638| 0| 837.5166| 0.4435| 0.4979|l-h-l | 1| #> |3 | 0.5166| -0.243| -0.3527| 0.4796| 0.0128| 37.4845| 0| 1019.1131| 0.4545| 0.5046|h-l-l | 1| #> |5 | -0.2204| -0.243| 0.3288| 0.4882| 0.0152| 32.0175| 0| 744.7950| 0.4583| 0.5181|l-l-h | 1| #> |8 | 0.5166| 0.480| 0.3288| 0.4964| 0.0105| 47.2873| 0| Inf| 0.4759| 0.5170|h-h-h | 3| #> #> #> Conducting multiple comparison correction for all pairings between comparison histories and each refernece history using the BH method. #> #> #> USER ALERT: please inspect the following comparisons: #> |term | estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|history |dose | p.value_corr| #> |:------------------------------------------------------|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:--------------|:------|------------:| #> |(-0.2204, -0.243, -0.3527) - (0.5166, 0.48, 0.3288) | 0.02| 0.01| 2.23| 0.03| 5.28| 0.00| 0.05|l-l-l vs h-h-h |0 vs 3 | 0.10| #> |(-0.2204, -0.243, -0.3527) - (0.5166, -0.243, -0.3527) | 0.01| 0.01| 0.80| 0.42| 1.24| -0.01| 0.03|l-l-l vs h-l-l |0 vs 1 | 0.56| #> |(-0.2204, -0.243, -0.3527) - (-0.2204, 0.48, -0.3527) | 0.00| 0.01| -0.10| 0.92| 0.12| -0.02| 0.02|l-l-l vs l-h-l |0 vs 1 | 0.92| #> |(-0.2204, -0.243, -0.3527) - (-0.2204, -0.243, 0.3288) | 0.02| 0.01| 1.74| 0.08| 3.62| 0.00| 0.03|l-l-l vs l-l-h |0 vs 1 | 0.16| model <- models.s2 results.s2 <- compareHistories(exposure = exposure, exposure_time_pts = exposure_time_pts, outcome = outcome, model = model, #required epochs = epochs, hi_lo_cut = hi_lo_cut, reference = reference, comparison = comparison, #optional mc_comp_method = mc_comp_method, dose_level = dose_level, exp_lab = exp_lab, out_lab = out_lab, colors = colors, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> Summary of Exposure Main Effects: #> #> USER ALERT: Out of the total of 1292 individuals in the sample, below is the distribution of the 406 (31.42%) individuals that fall into 5 out of the 5 the total user-defined exposure histories created from 30th and 60th percentile values for low and high levels of exposure ESETA1, respectively, across Infancy, Toddlerhood, Childhood. #> USER ALERT: Please inspect the distribution of the sample across the following exposure histories and ensure there is sufficient spread to avoid extrapolation and low precision: #> #> Table: Summary of user-specified exposure ESETA1 histories based on exposure main effects Infancy, Toddlerhood, Childhood containing time points 6, 15, 24: #> #> |history | n| #> |:-------|---:| #> |h-h-h | 216| #> |h-l-l | 20| #> |l-h-l | 25| #> |l-l-h | 18| #> |l-l-l | 127| #> #> #> Below are the average predictions by user-specified history: #> | | ESETA1.Infancy| ESETA1.Toddlerhood| ESETA1.Childhood| estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|history | dose_count| #> |:--|--------------:|------------------:|----------------:|--------:|---------:|---------:|-------:|--------:|--------:|---------:|:-------|----------:| #> |1 | -0.2204| -0.243| -0.3527| 0.4771| 0.0126| 37.9180| 0| Inf| 0.4524| 0.5017|l-l-l | 0| #> |2 | -0.2204| 0.480| -0.3527| 0.4743| 0.0146| 32.5709| 0| 770.6037| 0.4458| 0.5028|l-h-l | 1| #> |3 | 0.5166| -0.243| -0.3527| 0.4825| 0.0137| 35.1938| 0| 898.9284| 0.4557| 0.5094|h-l-l | 1| #> |5 | -0.2204| -0.243| 0.3288| 0.4921| 0.0165| 29.7876| 0| 645.2777| 0.4597| 0.5245|l-l-h | 1| #> |8 | 0.5166| 0.480| 0.3288| 0.4962| 0.0114| 43.4304| 0| Inf| 0.4738| 0.5186|h-h-h | 3| #> #> #> Conducting multiple comparison correction for all pairings between comparison histories and each refernece history using the BH method. #> #> #> USER ALERT: please inspect the following comparisons: #> |term | estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|history |dose | p.value_corr| #> |:------------------------------------------------------|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:--------------|:------|------------:| #> |(-0.2204, -0.243, -0.3527) - (0.5166, 0.48, 0.3288) | 0.02| 0.01| 1.62| 0.11| 3.24| 0.00| 0.04|l-l-l vs h-h-h |0 vs 3 | 0.28| #> |(-0.2204, -0.243, -0.3527) - (0.5166, -0.243, -0.3527) | 0.01| 0.01| 0.52| 0.60| 0.73| -0.02| 0.03|l-l-l vs h-l-l |0 vs 1 | 0.80| #> |(-0.2204, -0.243, -0.3527) - (-0.2204, 0.48, -0.3527) | 0.00| 0.01| -0.25| 0.80| 0.32| -0.02| 0.02|l-l-l vs l-h-l |0 vs 1 | 0.80| #> |(-0.2204, -0.243, -0.3527) - (-0.2204, -0.243, 0.3288) | 0.02| 0.01| 1.48| 0.14| 2.84| 0.00| 0.03|l-l-l vs l-l-h |0 vs 1 | 0.28|"},{"path":"https://istallworthy.github.io/devMSMs/articles/Workflow_Continuous_Exposure.html","id":"references","dir":"Articles","previous_headings":"","what":"References","title":"Workflow: Continuous Exposure","text":"Arel-Bundock, V. 2023. marginaleffects: Predictions, Comparisons, Slopes, Marginal Means,Hypothesis Tests. https://CRAN.R-project.org/package=marginaleffects. Burchinal, M., Howes, C., Pianta, R., Bryant, D., Early, D., Clifford, R., & Barbarin, O. (2008). Predicting Child Outcomes End Kindergarten Quality Pre-Kindergarten Teacher–Child Interactions Instruction. Applied Developmental Science, 12(3), 140–153. https://doi.org/10.1080/10888690802199418 Cole, S. R., & Hernán, M. . (2008). Constructing Inverse Probability Weights Marginal Structural Models. American Journal Epidemiology, 168(6), 656–664. https://doi.org/10.1093/aje/kwn164. Greifer, Noah. 2023.WeightIt: Weighting Covariate Balance Observational Studies. https://CRAN.R-project.org/package=WeightIt. Lumley, Thomas. 2023. “survey: Analysis Complex Survey Samples.” Polley, Eric, Erin LeDell, Chris Kennedy, Mark van der Laan. 2023. SuperLearner: SuperLearner Prediction. https://CRAN.R-project.org/package=SuperLearner. R Core Team (2013). R: language environment statistical computing. R Foundation Statistical Computing, Vienna, Austria. ISBN 3-900051-07-0, URLhttp://www.R-project.org/. Stuart, E. . (2010). Matching methods causal inference: review look forward. Statistical Science: Review Journal Institute Mathematical Statistics, 25(1), 1–21. https://doi.org/10.1214/09-STS313. Thoemmes, F., & Ong, . D. (2016). Primer Inverse Probability Treatment Weighting Marginal Structural Models. https://doi.org/10.1177/2167696815621645. Vernon-Feagans, L., Cox, M., Willoughby, M., Burchinal, M., Garrett-Peters, P., Mills-Koonce, R., Garrett-Peiers, P., Conger, R. D., & Bauer, P. J. (2013). Family Life Project: Epidemiological Developmental Study Young Children Living Poor Rural Communities. Monographs Society Research Child Development, 78(5), –150.","code":""},{"path":"https://istallworthy.github.io/devMSMs/authors.html","id":null,"dir":"","previous_headings":"","what":"Authors","title":"Authors and Citation","text":"Isabella Stallworthy. Author, maintainer. Noah Greifer. Author, contributor. Meriah DeJoseph. Author. Emily Padrutt. Author. Daniel Berry. Author.","code":""},{"path":"https://istallworthy.github.io/devMSMs/authors.html","id":"citation","dir":"","previous_headings":"","what":"Citation","title":"Authors and Citation","text":"Stallworthy , Greifer N, DeJoseph M, Padrutt E, Berry D (2023). devMSMs: Tools Conducting Marginal Structural Models Developmental Data. R package version 0.0.0.9000, https://github.com/istallworthy/devMSMs, https://istallworthy.github.io/devMSMs/.","code":"@Manual{, title = {devMSMs: Tools for Conducting Marginal Structural Models with Developmental Data}, author = {Isabella Stallworthy and Noah Greifer and Meriah DeJoseph and Emily Padrutt and Daniel Berry}, year = {2023}, note = {R package version 0.0.0.9000, https://github.com/istallworthy/devMSMs}, url = {https://istallworthy.github.io/devMSMs/}, }"},{"path":"https://istallworthy.github.io/devMSMs/index.html","id":"devmsms","dir":"","previous_headings":"","what":"An R package for conducting marginal structural models (MSMs) with longitudinal data","title":"An R package for conducting marginal structural models (MSMs) with longitudinal data","text":"Scientists study humans fundamentally interested questions causation, yet conceptual, methodological, practical barriers historically prevented use methods causal inference developed fields. specifically, scientists, clinicians, educators, policymakers alike often interested causal processes involving questions (timing) extent (dose) different factors influence human functioning development, order inform scientific understanding improve people’s lives. Marginal structural models (MSMs; Robins et al., 2000), orginating epidemiology public health, represent one -utilized tool improving causal inference longitudinal observational data, given certain assumptions. brief, MSMs leverage inverse-probability--treatment-weights (IPTW) potential outcomes framework. MSMs first focus problem confounding, using IPTW attenuate associations measured confounders exposure (e.g., experience, characteristic, event –biology broader environment) time. weighted model can fitted relating time-varying exposure future outcome. Finally, model-predicted effects different exposure histories, vary dose timing, can evaluated compared counterfactuals reveal putative causal effects.devMSMs R package accompanying tutorial paper, Investigating Causal Questions Human Development using Marginal Structural Models: Tutorial Introduction devMSMs Package R (insert preprint link ), implementing MSMs longitudinal data answer causal questions dose timing effects given exposure future outcome. Core features package include: flexible functions built-guidance, drawing established expertise best practices implementing longitudinal IPTW weighting outcome modeling answer substantive causal questions dose timing accommodation data form either complete dataframe multiple imputation recommended workflow using devMSMs functions longitudinal data step--step user guidance deveMSMs worflow form vignettes R markdown template file users new MSM technique R programming accompanying suite helper functions assist users preparing inspecting data prior use devMSMs conceptual introduction example empirical application accompanying tutorial paper","code":""},{"path":"https://istallworthy.github.io/devMSMs/index.html","id":"overview","dir":"","previous_headings":"","what":"Overview","title":"An R package for conducting marginal structural models (MSMs) with longitudinal data","text":"package contains 6 core functions conducting longitudinal confounder adjustment outcome modeling longitudinal data time-varying exposures.","code":""},{"path":"https://istallworthy.github.io/devMSMs/index.html","id":"installation","dir":"","previous_headings":"","what":"Installation","title":"An R package for conducting marginal structural models (MSMs) with longitudinal data","text":"devMSMs can installed R Studio Github using devtools package:library(devtools)install_github(\"istallworthy/devMSMs\")library(devMSMs) helper functions can installed accompanying devMSMsHelpers repo:install_github(\"istallworthy/devMSMsHelpers\")library(devMSMsHelpers)","code":""},{"path":"https://istallworthy.github.io/devMSMs/index.html","id":"recommended-workflow","dir":"","previous_headings":"","what":"Recommended Workflow","title":"An R package for conducting marginal structural models (MSMs) with longitudinal data","text":"Please see Workflows vignettes details.","code":""},{"path":"https://istallworthy.github.io/devMSMs/index.html","id":"additional-resources","dir":"","previous_headings":"","what":"Additional Resources","title":"An R package for conducting marginal structural models (MSMs) with longitudinal data","text":"Austin, P. C. (2011). Introduction Propensity Score Methods Reducing Effects Confounding Observational Studies. Multivariate Behavioral Research, 46(3), 399–424. https://doi.org/10.1080/00273171.2011.568786 Blackwell, M. (2013). Framework Dynamic Causal Inference Political Science. American Journal Political Science, 57(2), 504–520. https://doi.org/10.1111/j.1540-5907.2012.00626.x Cole, S. R., & Hernán, M. . (2008). Constructing Inverse Probability Weights Marginal Structural Models. American Journal Epidemiology, 168(6), 656–664. https://doi.org/10.1093/aje/kwn164 Eronen, M. . (2020). Causal discovery problem psychological interventions. New Ideas Psychology, 59, 100785. https://doi.org/10.1016/j.newideapsych.2020.100785 Fong, C., Hazlett, C., & Imai, K. (2018). Covariate balancing propensity score continuous treatment: Application efficacy political advertisements. Annals Applied Statistics, 12(1), 156–177. https://doi.org/10.1214/17-AOAS1101 Haber, N. ., Wood, M. E., Wieten, S., & Breskin, . (2022). DAG Omitted Objects Displayed (DAGWOOD): framework revealing causal assumptions DAGs. Annals Epidemiology, 68, 64–71. https://doi.org/10.1016/j.annepidem.2022.01.001 Hirano, K., & Imbens, G. W. (2004). Propensity Score Continuous Treatments. Applied Bayesian Modeling Causal Inference Incomplete-Data Perspectives (pp. 73–84). John Wiley & Sons, Ltd. https://doi.org/10.1002/0470090456.ch7 Kainz, K., Greifer, N., Givens, ., Swietek, K., Lombardi, B. M., Zietz, S., & Kohn, J. L. (2017). Improving Causal Inference: Recommendations Covariate Selection Balance Propensity Score Methods. Journal Society Social Work Research, 8(2), 279–303. https://doi.org/10.1086/691464 Robins, J. M., Hernán, M. Á., & Brumback, B. (2000). Marginal Structural Models Causal Inference Epidemiology. Epidemiology, 11(5), 550–560. Thoemmes, F., & Ong, . D. (2016). Primer Inverse Probability Treatment Weighting Marginal Structural Models. https://doi.org/10.1177/2167696815621645","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/add_dose.html","id":null,"dir":"Reference","previous_headings":"","what":"Add dose tally to table — add_dose","title":"Add dose tally to table — add_dose","text":"Add dose tally table","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/add_dose.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Add dose tally to table — add_dose","text":"","code":"add_dose(p, dose_level)"},{"path":"https://istallworthy.github.io/devMSMs/reference/add_dose.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Add dose tally to table — add_dose","text":"p table output marginaleffects::avg_predictions() hypotheses() dose_level \"l\" \"h\" indicating whether low high doses tallied tables plots","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/add_dose.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Add dose tally to table — add_dose","text":"table dose level tally","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/add_histories.html","id":null,"dir":"Reference","previous_headings":"","what":"Add history labels to table — add_histories","title":"Add history labels to table — add_histories","text":"Add history labels table","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/add_histories.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Add history labels to table — add_histories","text":"","code":"add_histories(p, d)"},{"path":"https://istallworthy.github.io/devMSMs/reference/add_histories.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Add history labels to table — add_histories","text":"p table output marginaleffects::avg_predictions() hypotheses() d data frame high low values per exposure main effect","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/add_histories.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Add history labels to table — add_histories","text":"table histories labeled","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/assessBalance.html","id":null,"dir":"Reference","previous_headings":"","what":"Assesses confounder balancing — assessBalance","title":"Assesses confounder balancing — assessBalance","text":"Draws functions cobalt package quantify relations exposure confounders exposure time point according guidelines Jackson, 2016 assess balance time-varying exposures.","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/assessBalance.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Assesses confounder balancing — assessBalance","text":"","code":"assessBalance( data, exposure, exposure_time_pts, outcome, type, formulas, weights = NULL, balance_thresh = NULL, imp_conf = NULL, home_dir = NULL, verbose = TRUE, save.out = TRUE )"},{"path":"https://istallworthy.github.io/devMSMs/reference/assessBalance.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Assesses confounder balancing — assessBalance","text":"data data wide format : data frame, list imputed data frames, mids object exposure name exposure variable exposure_time_pts list integers weights created/assessed correspond time points exposure wass measured outcome name outcome variable \".timepoint\" suffix type type balance assessment; 'prebalance' 'weighted' formulas list balancing formulas time point output createFormulas() weights list IPTW weights output createWeights, required type 'weighted' balance_thresh (optional) one two numbers 0 1 indicating single balancing threshold thresholds less important confounders, respectively (default = 0.1) imp_conf (optional) list variable names reflecting important confounders, required two balance thresholds supplied home_dir (optional) path home directory (required save.= TRUE) verbose (optiona) TRUE FALSE indicator user output (default TRUE) save.(optional) TRUE FALSE indicator save output intermediary output locally (default TRUE)","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/assessBalance.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Assesses confounder balancing — assessBalance","text":"data frame balance statistics","code":""},{"path":[]},{"path":"https://istallworthy.github.io/devMSMs/reference/assessBalance.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Assesses confounder balancing — assessBalance","text":"","code":"test <- data.frame(ID = 1:50, A.1 = rnorm(n = 50), A.2 = rnorm(n = 50), A.3 = rnorm(n = 50), B.1 = rnorm(n = 50), B.2 = rnorm(n = 50), B.3 = rnorm(n = 50), C = rnorm(n = 50), D.3 = rnorm(n = 50)) test[, c(\"A.1\", \"A.2\", \"A.3\")] <- lapply(test[, c(\"A.1\", \"A.2\", \"A.3\")], as.numeric) f <- createFormulas(exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", tv_confounders = c(\"A.1\", \"A.2\", \"A.3\", \"B.1\", \"B.2\", \"B.3\"), ti_confounders = \"C\", type = \"short\", save.out = FALSE) #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 1 is: #> A.1 ~ C #> #> #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 2 is: #> A.2 ~ A.1 + B.1 + C #> #> #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 3 is: #> A.3 ~ A.2 + B.2 + C #> #> #Prebalance b <- assessBalance(data = test, exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", type = \"prebalance\", formulas = f, save.out = FALSE) #> USER ALERT: The following statistics display covariate imbalance at each exposure time point prior to weighting, using short formulas. #> #> As shown below, 4 out of 7 (57%) covariates across time points, corresponding to 3 out of 2 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.14 (range= -0.14-0.16): #> #> Table: Imbalanced covariates using no weights and short formulas #> #> | exp_time| balanced_n| imbalanced_n| n| #> |--------:|----------:|------------:|--:| #> | 1| 1| 0| 1| #> | 2| 0| 3| 3| #> | 3| 2| 1| 3| #> #> #> USER ALERT: For exposure A using the short formulas and no weights : #> The median absolute value relation between exposure and confounder is 0.11 (range = -0.14 -0.16). #> As shown below, the following 4 covariates across time points out of 7 total (57.14%) spanning 3 domains out of 3 (100%) are imbalanced with a remaining median absolute value correlation/std mean difference in relation to A of 0.14 (range=-0.14-0.16) : #> #> #> Table: Imbalanced Covariates #> #> | |exposure | exp_time| covar_time|covariate | avg_bal| bal_thresh| balanced| #> |:--|:--------|--------:|----------:|:---------|----------:|----------:|--------:| #> |2 |A | 2| 1|A.1 | -0.1345161| 0.1| 0| #> |3 |A | 2| 1|B.1 | 0.1619951| 0.1| 0| #> |4 |A | 2| 0|C | -0.1383171| 0.1| 0| #> |7 |A | 3| 0|C | -0.1084922| 0.1| 0| #> b <- assessBalance(data = test, exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", type = \"prebalance\", formulas = f, balance_thresh = 0.2, save.out = FALSE) #> USER ALERT: The following statistics display covariate imbalance at each exposure time point prior to weighting, using short formulas. #> #> No covariates remain imbalanced using no weights and short formulas. #> #> #> USER ALERT: For exposure A using the short formulas and no weights : #> The median absolute value relation between exposure and confounder is 0.11 (range = -0.14 -0.16). #> There are no imbalanced covariates. #> There are no imbalanced covariates. b <- assessBalance(data = test, exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", type = \"prebalance\", formulas = f, balance_thresh = c(0.1, 0.2), imp_conf = \"B.1\", save.out = FALSE) #> USER ALERT: The following statistics display covariate imbalance at each exposure time point prior to weighting, using short formulas. #> #> As shown below, 1 out of 7 (14%) covariates across time points, corresponding to 1 out of 2 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.16 (range= 0.16-0.16): #> #> Table: Imbalanced covariates using no weights and short formulas #> #> | exp_time| balanced_n| imbalanced_n| n| #> |--------:|----------:|------------:|--:| #> | 1| 1| 0| 1| #> | 2| 2| 1| 3| #> | 3| 3| 0| 3| #> #> #> USER ALERT: For exposure A using the short formulas and no weights : #> The median absolute value relation between exposure and confounder is 0.11 (range = -0.14 -0.16). #> As shown below, the following 1 covariates across time points out of 7 total (14.29%) spanning 1 domains out of 3 (33.33%) are imbalanced with a remaining median absolute value correlation/std mean difference in relation to A of 0.16 (range=0.16-0.16) : #> #> #> Table: Imbalanced Covariates #> #> | |exposure | exp_time| covar_time|covariate | avg_bal| bal_thresh| balanced| #> |:--|:--------|--------:|----------:|:---------|---------:|----------:|--------:| #> |3 |A | 2| 1|B.1 | 0.1619951| 0.1| 0| #> f <- createFormulas(exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", tv_confounders = c(\"A.1\", \"A.2\", \"A.3\", \"B.1\", \"B.2\", \"B.3\", \"A.1:B.1\"), ti_confounders = \"C\", type = \"short\", save.out = FALSE) #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 1 is: #> A.1 ~ C #> #> #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 2 is: #> A.2 ~ A.1 + A.1:B.1 + B.1 + C #> #> #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 3 is: #> A.3 ~ A.2 + B.2 + C #> #> b <- assessBalance(data = test, exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", type = \"prebalance\", formulas = f, save.out = FALSE) #> USER ALERT: The following statistics display covariate imbalance at each exposure time point prior to weighting, using short formulas. #> #> As shown below, 5 out of 8 (62%) covariates across time points, corresponding to 3 out of 3 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.13 (range= -0.14-0.16): #> #> Table: Imbalanced covariates using no weights and short formulas #> #> | exp_time| balanced_n| imbalanced_n| n| #> |--------:|----------:|------------:|--:| #> | 1| 1| 0| 1| #> | 2| 0| 4| 4| #> | 3| 2| 1| 3| #> #> #> USER ALERT: For exposure A using the short formulas and no weights : #> The median absolute value relation between exposure and confounder is 0.11 (range = -0.14 -0.16). #> As shown below, the following 5 covariates across time points out of 8 total (62.5%) spanning 3 domains out of 3 (100%) are imbalanced with a remaining median absolute value correlation/std mean difference in relation to A of 0.13 (range=-0.14-0.16) : #> #> #> Table: Imbalanced Covariates #> #> | |exposure | exp_time| covar_time|covariate | avg_bal| bal_thresh| balanced| #> |:--|:--------|--------:|----------:|:---------|----------:|----------:|--------:| #> |2 |A | 2| 1|A.1 | -0.1345161| 0.1| 0| #> |3 |A | 2| 1|A.1:B.1 | -0.1066427| 0.1| 0| #> |4 |A | 2| 1|B.1 | 0.1619951| 0.1| 0| #> |5 |A | 2| 0|C | -0.1383171| 0.1| 0| #> |8 |A | 3| 0|C | -0.1084922| 0.1| 0| #> # Weighted w <- createWeights(data = test, exposure = \"A\", outcome = \"D.3\", formulas = f, save.out = FALSE) #> For the cbps weighting method, the median weight value is 0.99 (SD = 0.53; range = 0.33-3). #> b <- assessBalance(data = test, exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", type = \"weighted\", weights = w, formulas = f, save.out = FALSE) #> USER ALERT: The following statistics display covariate imbalance at each exposure time point following IPTW weighting, using short formulas. #> #> As shown below, 1 out of 8 (12%) covariates across time points, corresponding to 1 out of 3 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.13 (range= 0.13-0.13): #> #> Table: Imbalanced covariates using cbps and short formulas #> #> | exp_time| balanced_n| imbalanced_n| n| #> |--------:|----------:|------------:|--:| #> | 1| 1| 0| 1| #> | 2| 4| 0| 4| #> | 3| 2| 1| 3| #> #> #> USER ALERT: For exposure A using the short formulas and cbps : #> The median absolute value relation between exposure and confounder is 0.05 (range = -0.09 -0.13). #> As shown below, the following 1 covariates across time points out of 8 total (12.5%) spanning 1 domains out of 3 (33.33%) are imbalanced with a remaining median absolute value correlation/std mean difference in relation to A of 0.13 (range=0.13-0.13) : #> #> #> Table: Imbalanced Covariates #> #> | |exposure | exp_time| covar_time|covariate | avg_bal| bal_thresh| balanced| #> |:--|:--------|--------:|----------:|:---------|---------:|----------:|--------:| #> |7 |A | 3| 2|B.2 | 0.1264298| 0.1| 0| #> b <- assessBalance(data = test, exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", type = \"weighted\", weights = w, formulas = f, balance_thresh = 0.2, save.out = FALSE) #> USER ALERT: The following statistics display covariate imbalance at each exposure time point following IPTW weighting, using short formulas. #> #> No covariates remain imbalanced using cbps and short formulas. #> #> #> USER ALERT: For exposure A using the short formulas and cbps : #> The median absolute value relation between exposure and confounder is 0.05 (range = -0.09 -0.13). #> There are no imbalanced covariates. #> There are no imbalanced covariates. b <- assessBalance(data = test, exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", type = \"weighted\", weights = w, formulas = f, balance_thresh = c(0.1, 0.2), imp_conf = \"B.1\", save.out = FALSE) #> USER ALERT: The following statistics display covariate imbalance at each exposure time point following IPTW weighting, using short formulas. #> #> No covariates remain imbalanced using cbps and short formulas. #> #> #> USER ALERT: For exposure A using the short formulas and cbps : #> The median absolute value relation between exposure and confounder is 0.05 (range = -0.09 -0.13). #> There are no imbalanced covariates. #> There are no imbalanced covariates."},{"path":"https://istallworthy.github.io/devMSMs/reference/calcBalStats.html","id":null,"dir":"Reference","previous_headings":"","what":"Calculate balance stats based on Jackson paper — calcBalStats","title":"Calculate balance stats based on Jackson paper — calcBalStats","text":"Calculate weighted unweighted standardized balance statistics given exposure time point, using relevant confounders. Draws Jackson, 2016 approaches assessing balance time-varying exposures weighting statistics based sample distribution exposure histories.","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/calcBalStats.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Calculate balance stats based on Jackson paper — calcBalStats","text":"","code":"calcBalStats( data, formulas, exposure, exposure_time_pts, outcome, balance_thresh, k = 0, weights = NULL, imp_conf = NULL, home_dir = NULL, verbose = TRUE, save.out = TRUE )"},{"path":"https://istallworthy.github.io/devMSMs/reference/calcBalStats.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Calculate balance stats based on Jackson paper — calcBalStats","text":"data data wide format : data frame, path folder imputed .csv files, mids object formulas list balancing formulas time point output createFormulas() exposure name exposure variable exposure_time_pts list integers weights created/assessed correspond time points exposure wass measured outcome name outcome variable \".timepoint\" suffix balance_thresh (optional) one two numbers 0 1 indicating single balancingn threshold thresholds less important confounders, respectively k (optional) imputation number weights (optional) list IPTW weights output createWeights imp_conf (optional) list variable names reflecting important confounders (required two balance thresholds provided) home_dir (optional) path home directory (required save.= TRUE) verbose (optional) TRUE FALSE indicator user output (default TRUE) save.(optional) TRUE FALSE indicator save output intermediary output locally","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/calcBalStats.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Calculate balance stats based on Jackson paper — calcBalStats","text":"data frame balance statistics","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/calcBalStats.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Calculate balance stats based on Jackson paper — calcBalStats","text":"","code":"test <- data.frame(ID = 1:50, A.1 = rnorm(n = 50), A.2 = rnorm(n = 50), A.3 = rnorm(n = 50), B.1 = rnorm(n = 50), B.2 = rnorm(n = 50), B.3 = rnorm(n = 50), C = rnorm(n = 50), D.3 = rnorm(n = 50)) test[, c(\"A.1\", \"A.2\", \"A.3\")] <- lapply(test[, c(\"A.1\", \"A.2\", \"A.3\")], as.numeric) f <- createFormulas(exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", tv_confounders = c(\"A.1\", \"A.2\", \"A.3\", \"B.1\", \"B.2\", \"B.3\"), ti_confounders = \"C\", type = \"full\", save.out = FALSE) #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 1 is: #> A.1 ~ C #> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 2 is: #> A.2 ~ A.1 + B.1 + C #> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 3 is: #> A.3 ~ A.1 + A.2 + B.1 + B.2 + C #> #> w <- createWeights(data = test, exposure = \"A\", outcome = \"D.3\", formulas = f, save.out = FALSE) #> For the cbps weighting method, the median weight value is 0.99 (SD = 1.44; range = 0.23-9). #> c <- calcBalStats(data = test, formulas = f, exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", balance_thresh = 0.1, save.out = FALSE) #> As shown below, 4 out of 9 (44%) covariates across time points, corresponding to 3 out of 3 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.23 (range= -0.36-0.2): #> #> Table: Imbalanced covariates using no weights and full formulas #> #> | exp_time| balanced_n| imbalanced_n| n| #> |--------:|----------:|------------:|--:| #> | 1| 1| 0| 1| #> | 2| 2| 1| 3| #> | 3| 2| 3| 5| #> #> c <- calcBalStats(data = test, formulas = f, exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", balance_thresh = c(0.05, 0.1), imp_conf = \"B2\", save.out = FALSE) #> As shown below, 4 out of 9 (44%) covariates across time points, corresponding to 3 out of 3 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.23 (range= -0.36-0.2): #> #> Table: Imbalanced covariates using no weights and full formulas #> #> | exp_time| balanced_n| imbalanced_n| n| #> |--------:|----------:|------------:|--:| #> | 1| 1| 0| 1| #> | 2| 2| 1| 3| #> | 3| 2| 3| 5| #> #> c <- calcBalStats(data = test, formulas = f, exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", balance_thresh = 0.1, weights = w[[1]], save.out = FALSE) #> As shown below, 2 out of 9 (22%) covariates across time points, corresponding to 2 out of 3 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.12 (range= 0.11-0.13): #> #> Table: Imbalanced covariates using cbps and full formulas #> #> | exp_time| balanced_n| imbalanced_n| n| #> |--------:|----------:|------------:|--:| #> | 1| 1| 0| 1| #> | 2| 1| 2| 3| #> | 3| 5| 0| 5| #> #>"},{"path":"https://istallworthy.github.io/devMSMs/reference/compareHistories.html","id":null,"dir":"Reference","previous_headings":"","what":"Estimate, compare, and visualize exposure histories — compareHistories","title":"Estimate, compare, and visualize exposure histories — compareHistories","text":"Takes fitted model output created predicted values user-specified histories (pooling imputed data), conducting contrast comparisons (pooling imputed data), correcting multiple comparisons, plotting results.","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/compareHistories.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Estimate, compare, and visualize exposure histories — compareHistories","text":"","code":"compareHistories( home_dir, exposure, exposure_time_pts, outcome, model, epochs = NULL, hi_lo_cut = NULL, reference = NULL, comparison = NULL, mc_comp_method = NA, dose_level = NA, exp_lab = NA, out_lab = NA, colors = NULL, verbose = TRUE, save.out = TRUE )"},{"path":"https://istallworthy.github.io/devMSMs/reference/compareHistories.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Estimate, compare, and visualize exposure histories — compareHistories","text":"home_dir path home directory (required 'save.' = TRUE) exposure name exposure variable exposure_time_pts list integers weights created/assessed correspond time points exposure wass measured outcome name outcome variable \".timepoint\" suffix model list model outputs fitModel() epochs (optional) data frame exposure epoch labels values hi_lo_cut (optional) list two numbers indicating quantile values reflect high low values, respectively, continuous exposure (default median split) reference (optional) list sof one strings \"-\"-separated \"l\" \"h\" values indicative reference exposure history compare comparison, required comparison supplied comparison (optional) list one strings \"-\"-separated \"l\" \"h\" values indicative comparison history/histories compare reference, required reference supplied mc_comp_method (optional) character abbreviation multiple comparison correction method stats::p.adjust, default Benjamini-Hochburg (\"BH\") dose_level (optional) \"l\" \"h\" indicating whether low high doses tallied tables plots (default high \"h\") exp_lab (optional) character label exposure variable plots (default variable name) out_lab (optional) character label outcome variable plots (default variable name) colors (optional) character specifying Brewer palette list colors (n(epochs)+1) plotting (default \"Dark2\" palette) verbose (optional) TRUE FALSE indicator user output (default TRUE) save.(optional) TRUE FALSE indicator save output intermediary output locally (default TRUE)","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/compareHistories.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Estimate, compare, and visualize exposure histories — compareHistories","text":"data frame history comparisons","code":""},{"path":[]},{"path":"https://istallworthy.github.io/devMSMs/reference/compareHistories.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Estimate, compare, and visualize exposure histories — compareHistories","text":"","code":"f <- createFormulas(exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", tv_confounders = c(\"A.1\", \"A.2\", \"A.3\", \"B.1\", \"B.2\", \"B.3\"), ti_confounders = \"C\", type = \"full\", save.out = FALSE) #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 1 is: #> A.1 ~ C #> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 2 is: #> A.2 ~ A.1 + B.1 + C #> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 3 is: #> A.3 ~ A.1 + A.2 + B.1 + B.2 + C #> #> test <- data.frame(ID = 1:50, A.1 = rnorm(n = 50), A.2 = rnorm(n = 50), A.3 = rnorm(n = 50), B.1 = rnorm(n = 50), B.2 = rnorm(n = 50), B.3 = rnorm(n = 50), C = rnorm(n = 50), D.3 = rnorm(n = 50)) test[, c(\"A.1\", \"A.2\", \"A.3\")] <- lapply(test[, c(\"A.1\", \"A.2\", \"A.3\")], as.numeric) w <- createWeights(data = test, exposure = \"A\", outcome = \"D.3\", formulas = f, save.out = FALSE) #> For the cbps weighting method, the median weight value is 1.04 (SD = 0.45; range = 0.25-3). #> m <- fitModel(data = test, weights = w, exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", model = \"m0\", save.out = FALSE) #> Please inspect the following likelihood ratio test to determine if the exposures collective predict significant variation in the outcome compared to a model without exposure terms. #> #> We strongly suggest not conducting history comparisons if the likelihood ratio test is non-significant. #> #> Working (Rao-Scott+F) LRT for A.1 A.2 A.3 #> in svyglm(formula = as.formula(f), design = s, family = fam) #> Working 2logLR = 1.664444 p= 0.64366 #> (scale factors: 1.2 0.99 0.79 ); denominator df= 46 #> #> The marginal model, m0, is summarized below: r <- compareHistories(exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", model = m, save.out = FALSE) #> Summary of Exposure Main Effects: #> #> USER ALERT: Out of the total of 50 individuals in the sample, below is the distribution of the 50 (100%) individuals that fall into 8 out of the 8 total user-defined exposure histories created from median split values for low and high levels of exposure A, respectively, across 1, 2, 3. #> USER ALERT: Please inspect the distribution of the sample across the following exposure histories and ensure there is sufficient spread to avoid extrapolation and low precision: #> #> Table: Summary of user-specified exposure A histories based on exposure main effects 1, 2, 3 containing time points 1, 2, 3: #> #> |history | n| #> |:-------|--:| #> |h-h-h | 6| #> |h-h-l | 5| #> |h-l-h | 6| #> |h-l-l | 8| #> |l-h-h | 8| #> |l-h-l | 6| #> |l-l-h | 5| #> |l-l-l | 6| #> #> #> Below are the average predictions by user-specified history: #> | A.1| A.2| A.3| estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|history | dose_count| #> |-------:|------:|-----:|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:-------|----------:| #> | -0.0851| 0.1197| 0.205| -0.0529| 0.1168| -0.4526| 0.6508| 0.6197| -0.2818| 0.1761|l-l-l | 0| #> | -0.0851| 0.1197| 0.207| -0.0528| 0.1168| -0.4518| 0.6514| 0.6184| -0.2817| 0.1762|l-l-h | 1| #> | -0.0851| 0.1217| 0.205| -0.0531| 0.1169| -0.4548| 0.6493| 0.6231| -0.2822| 0.1759|l-h-l | 1| #> | -0.0851| 0.1217| 0.207| -0.0531| 0.1169| -0.4540| 0.6498| 0.6218| -0.2821| 0.1760|l-h-h | 2| #> | -0.0831| 0.1197| 0.205| -0.0530| 0.1168| -0.4538| 0.6500| 0.6215| -0.2819| 0.1759|h-l-l | 1| #> | -0.0831| 0.1197| 0.207| -0.0529| 0.1168| -0.4530| 0.6506| 0.6202| -0.2818| 0.1760|h-l-h | 2| #> | -0.0831| 0.1217| 0.205| -0.0533| 0.1169| -0.4559| 0.6485| 0.6249| -0.2823| 0.1757|h-h-l | 2| #> | -0.0831| 0.1217| 0.207| -0.0532| 0.1168| -0.4551| 0.6490| 0.6237| -0.2822| 0.1758|h-h-h | 3| #> #> #> Conducting multiple comparison correction for all pairings between comparison histories and each refernece history using the BH method. #> #> #> USER ALERT: please inspect the following comparisons: #> |term | estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|history |dose | p.value_corr| #> |:---------------------------------------------------|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:--------------|:------|------------:| #> |(-0.0851, 0.1197, 0.205) - (-0.0851, 0.1197, 0.207) | 0| 0| -0.37| 0.72| 0.48| 0| 0|l-l-l vs l-l-h |0 vs 1 | 0.80| #> |(-0.0851, 0.1197, 0.205) - (-0.0851, 0.1217, 0.205) | 0| 0| 1.03| 0.30| 1.72| 0| 0|l-l-l vs l-h-l |0 vs 1 | 0.80| #> |(-0.0851, 0.1197, 0.205) - (-0.0851, 0.1217, 0.207) | 0| 0| 0.55| 0.58| 0.77| 0| 0|l-l-l vs l-h-h |0 vs 2 | 0.80| #> |(-0.0851, 0.1197, 0.205) - (-0.0831, 0.1197, 0.205) | 0| 0| 0.57| 0.57| 0.81| 0| 0|l-l-l vs h-l-l |0 vs 1 | 0.80| #> |(-0.0851, 0.1197, 0.205) - (-0.0831, 0.1197, 0.207) | 0| 0| 0.08| 0.94| 0.09| 0| 0|l-l-l vs h-l-h |0 vs 2 | 0.94| #> |(-0.0851, 0.1197, 0.205) - (-0.0831, 0.1217, 0.205) | 0| 0| 1.29| 0.20| 2.35| 0| 0|l-l-l vs h-h-l |0 vs 2 | 0.80| #> |(-0.0851, 0.1197, 0.205) - (-0.0831, 0.1217, 0.207) | 0| 0| 0.70| 0.48| 1.05| 0| 0|l-l-l vs h-h-h |0 vs 3 | 0.80| #> |(-0.0851, 0.1197, 0.207) - (-0.0851, 0.1217, 0.205) | 0| 0| 0.90| 0.37| 1.45| 0| 0|l-l-h vs l-h-l |1 vs 1 | 0.80| #> |(-0.0851, 0.1197, 0.207) - (-0.0851, 0.1217, 0.207) | 0| 0| 1.03| 0.30| 1.72| 0| 0|l-l-h vs l-h-h |1 vs 2 | 0.80| #> |(-0.0851, 0.1197, 0.207) - (-0.0831, 0.1197, 0.205) | 0| 0| 0.90| 0.37| 1.44| 0| 0|l-l-h vs h-l-l |1 vs 1 | 0.80| #> |(-0.0851, 0.1197, 0.207) - (-0.0831, 0.1197, 0.207) | 0| 0| 0.57| 0.57| 0.81| 0| 0|l-l-h vs h-l-h |1 vs 2 | 0.80| #> |(-0.0851, 0.1197, 0.207) - (-0.0831, 0.1217, 0.205) | 0| 0| 1.34| 0.18| 2.48| 0| 0|l-l-h vs h-h-l |1 vs 2 | 0.80| #> |(-0.0851, 0.1197, 0.207) - (-0.0831, 0.1217, 0.207) | 0| 0| 1.29| 0.20| 2.35| 0| 0|l-l-h vs h-h-h |1 vs 3 | 0.80| #> |(-0.0851, 0.1217, 0.205) - (-0.0851, 0.1217, 0.207) | 0| 0| -0.37| 0.72| 0.48| 0| 0|l-h-l vs l-h-h |1 vs 2 | 0.80| #> |(-0.0851, 0.1217, 0.205) - (-0.0831, 0.1197, 0.205) | 0| 0| -0.39| 0.69| 0.53| 0| 0|l-h-l vs h-l-l |1 vs 1 | 0.80| #> |(-0.0851, 0.1217, 0.205) - (-0.0831, 0.1197, 0.207) | 0| 0| -0.45| 0.65| 0.61| 0| 0|l-h-l vs h-l-h |1 vs 2 | 0.80| #> |(-0.0851, 0.1217, 0.205) - (-0.0831, 0.1217, 0.205) | 0| 0| 0.57| 0.57| 0.81| 0| 0|l-h-l vs h-h-l |1 vs 2 | 0.80| #> |(-0.0851, 0.1217, 0.205) - (-0.0831, 0.1217, 0.207) | 0| 0| 0.08| 0.94| 0.09| 0| 0|l-h-l vs h-h-h |1 vs 3 | 0.94| #> |(-0.0851, 0.1217, 0.207) - (-0.0831, 0.1197, 0.205) | 0| 0| -0.15| 0.88| 0.19| 0| 0|l-h-h vs h-l-l |2 vs 1 | 0.94| #> |(-0.0851, 0.1217, 0.207) - (-0.0831, 0.1197, 0.207) | 0| 0| -0.39| 0.69| 0.53| 0| 0|l-h-h vs h-l-h |2 vs 2 | 0.80| #> |(-0.0851, 0.1217, 0.207) - (-0.0831, 0.1217, 0.205) | 0| 0| 0.90| 0.37| 1.44| 0| 0|l-h-h vs h-h-l |2 vs 2 | 0.80| #> |(-0.0851, 0.1217, 0.207) - (-0.0831, 0.1217, 0.207) | 0| 0| 0.57| 0.57| 0.81| 0| 0|l-h-h vs h-h-h |2 vs 3 | 0.80| #> |(-0.0831, 0.1197, 0.205) - (-0.0831, 0.1197, 0.207) | 0| 0| -0.37| 0.72| 0.48| 0| 0|h-l-l vs h-l-h |1 vs 2 | 0.80| #> |(-0.0831, 0.1197, 0.205) - (-0.0831, 0.1217, 0.205) | 0| 0| 1.03| 0.30| 1.72| 0| 0|h-l-l vs h-h-l |1 vs 2 | 0.80| #> |(-0.0831, 0.1197, 0.205) - (-0.0831, 0.1217, 0.207) | 0| 0| 0.55| 0.58| 0.77| 0| 0|h-l-l vs h-h-h |1 vs 3 | 0.80| #> |(-0.0831, 0.1197, 0.207) - (-0.0831, 0.1217, 0.205) | 0| 0| 0.90| 0.37| 1.45| 0| 0|h-l-h vs h-h-l |2 vs 2 | 0.80| #> |(-0.0831, 0.1197, 0.207) - (-0.0831, 0.1217, 0.207) | 0| 0| 1.03| 0.30| 1.72| 0| 0|h-l-h vs h-h-h |2 vs 3 | 0.80| #> |(-0.0831, 0.1217, 0.205) - (-0.0831, 0.1217, 0.207) | 0| 0| -0.37| 0.72| 0.48| 0| 0|h-h-l vs h-h-h |2 vs 3 | 0.80| #> #> r <- compareHistories(exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", model = m, reference = \"l-l-l\", comparison = \"h-h-h\", save.out = FALSE) #> Summary of Exposure Main Effects: #> #> USER ALERT: Out of the total of 50 individuals in the sample, below is the distribution of the 12 (24%) individuals that fall into 2 out of the 2 total user-defined exposure histories created from median split values for low and high levels of exposure A, respectively, across 1, 2, 3. #> USER ALERT: Please inspect the distribution of the sample across the following exposure histories and ensure there is sufficient spread to avoid extrapolation and low precision: #> #> Table: Summary of user-specified exposure A histories based on exposure main effects 1, 2, 3 containing time points 1, 2, 3: #> #> |history | n| #> |:-------|--:| #> |h-h-h | 6| #> |l-l-l | 6| #> #> #> Below are the average predictions by user-specified history: #> | | A.1| A.2| A.3| estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|history | dose_count| #> |:--|-------:|------:|-----:|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:-------|----------:| #> |1 | -0.0851| 0.1197| 0.205| -0.0529| 0.1168| -0.4526| 0.6508| 0.6197| -0.2818| 0.1761|l-l-l | 0| #> |8 | -0.0831| 0.1217| 0.207| -0.0532| 0.1168| -0.4551| 0.6490| 0.6237| -0.2822| 0.1758|h-h-h | 3| #> #> #> Conducting multiple comparison correction for all pairings between comparison histories and each refernece history using the BH method. #> #> #> USER ALERT: please inspect the following comparisons: #> |term | estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|history |dose | p.value_corr| #> |:---------------------------------------------------|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:--------------|:------|------------:| #> |(-0.0851, 0.1197, 0.205) - (-0.0831, 0.1217, 0.207) | 0| 0| -0.7| 0.48| 1.05| 0| 0|l-l-l vs h-h-h |0 vs 3 | 0.48| #> #> r <- compareHistories(exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", model = m, reference = \"l-l-l\", comparison = c(\"h-h-h\", \"h-l-l\"), save.out = FALSE) #> Summary of Exposure Main Effects: #> #> USER ALERT: Out of the total of 50 individuals in the sample, below is the distribution of the 20 (40%) individuals that fall into 3 out of the 3 total user-defined exposure histories created from median split values for low and high levels of exposure A, respectively, across 1, 2, 3. #> USER ALERT: Please inspect the distribution of the sample across the following exposure histories and ensure there is sufficient spread to avoid extrapolation and low precision: #> #> Table: Summary of user-specified exposure A histories based on exposure main effects 1, 2, 3 containing time points 1, 2, 3: #> #> |history | n| #> |:-------|--:| #> |h-h-h | 6| #> |h-l-l | 8| #> |l-l-l | 6| #> #> #> Below are the average predictions by user-specified history: #> | | A.1| A.2| A.3| estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|history | dose_count| #> |:--|-------:|------:|-----:|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:-------|----------:| #> |1 | -0.0851| 0.1197| 0.205| -0.0529| 0.1168| -0.4526| 0.6508| 0.6197| -0.2818| 0.1761|l-l-l | 0| #> |5 | -0.0831| 0.1197| 0.205| -0.0530| 0.1168| -0.4538| 0.6500| 0.6215| -0.2819| 0.1759|h-l-l | 1| #> |8 | -0.0831| 0.1217| 0.207| -0.0532| 0.1168| -0.4551| 0.6490| 0.6237| -0.2822| 0.1758|h-h-h | 3| #> #> #> Conducting multiple comparison correction for all pairings between comparison histories and each refernece history using the BH method. #> #> #> USER ALERT: please inspect the following comparisons: #> |term | estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|history |dose | p.value_corr| #> |:---------------------------------------------------|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:--------------|:------|------------:| #> |(-0.0851, 0.1197, 0.205) - (-0.0831, 0.1217, 0.207) | 0| 0| -0.70| 0.48| 1.05| 0| 0|l-l-l vs h-h-h |0 vs 3 | 0.57| #> |(-0.0851, 0.1197, 0.205) - (-0.0831, 0.1197, 0.205) | 0| 0| -0.57| 0.57| 0.81| 0| 0|l-l-l vs h-l-l |0 vs 1 | 0.57| #> #> r <- compareHistories(exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", model = m, reference = c(\"l-l-l\", \"l-h-h\"), comparison = c(\"h-h-h\"), save.out = FALSE) #> Summary of Exposure Main Effects: #> #> USER ALERT: Out of the total of 50 individuals in the sample, below is the distribution of the 20 (40%) individuals that fall into 3 out of the 3 total user-defined exposure histories created from median split values for low and high levels of exposure A, respectively, across 1, 2, 3. #> USER ALERT: Please inspect the distribution of the sample across the following exposure histories and ensure there is sufficient spread to avoid extrapolation and low precision: #> #> Table: Summary of user-specified exposure A histories based on exposure main effects 1, 2, 3 containing time points 1, 2, 3: #> #> |history | n| #> |:-------|--:| #> |h-h-h | 6| #> |l-h-h | 8| #> |l-l-l | 6| #> #> #> Below are the average predictions by user-specified history: #> | | A.1| A.2| A.3| estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|history | dose_count| #> |:--|-------:|------:|-----:|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:-------|----------:| #> |1 | -0.0851| 0.1197| 0.205| -0.0529| 0.1168| -0.4526| 0.6508| 0.6197| -0.2818| 0.1761|l-l-l | 0| #> |4 | -0.0851| 0.1217| 0.207| -0.0531| 0.1169| -0.4540| 0.6498| 0.6218| -0.2821| 0.1760|l-h-h | 2| #> |8 | -0.0831| 0.1217| 0.207| -0.0532| 0.1168| -0.4551| 0.6490| 0.6237| -0.2822| 0.1758|h-h-h | 3| #> #> #> Conducting multiple comparison correction for all pairings between comparison histories and each refernece history using the BH method. #> #> #> USER ALERT: please inspect the following comparisons: #> |term | estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|term.1 | estimate.1| std.error.1| statistic.1| p.value.1| s.value.1| conf.low.1| conf.high.1|history |dose | p.value_corr| #> |:---------------------------------------------------|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:---------------------------------------------------------------------------------------------------------------------|----------:|-----------:|-----------:|---------:|---------:|----------:|-----------:|:--------------|:------|------------:| #> |(-0.0851, 0.1197, 0.205) - (-0.0831, 0.1217, 0.207) | 0| 0| -0.7| 0.48| 1.05| 0| 0|(-0.0851499446362294,0.121681511944702,0.207011843149082) - (-0.0831499446362294,0.121681511944702,0.207011843149082) | 0| 0| -0.57| 0.57| 0.81| 0| 0|l-l-l vs h-h-h |0 vs 3 | 0.48| #> #> r <- compareHistories(exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", model = m, reference = c(\"l-l-l\", \"l-h-h\"), comparison = c(\"h-h-h\", \"l-l-h\"), save.out = FALSE) #> Summary of Exposure Main Effects: #> #> USER ALERT: Out of the total of 50 individuals in the sample, below is the distribution of the 25 (50%) individuals that fall into 4 out of the 4 total user-defined exposure histories created from median split values for low and high levels of exposure A, respectively, across 1, 2, 3. #> USER ALERT: Please inspect the distribution of the sample across the following exposure histories and ensure there is sufficient spread to avoid extrapolation and low precision: #> #> Table: Summary of user-specified exposure A histories based on exposure main effects 1, 2, 3 containing time points 1, 2, 3: #> #> |history | n| #> |:-------|--:| #> |h-h-h | 6| #> |l-h-h | 8| #> |l-l-h | 5| #> |l-l-l | 6| #> #> #> Below are the average predictions by user-specified history: #> | | A.1| A.2| A.3| estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|history | dose_count| #> |:--|-------:|------:|-----:|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:-------|----------:| #> |1 | -0.0851| 0.1197| 0.205| -0.0529| 0.1168| -0.4526| 0.6508| 0.6197| -0.2818| 0.1761|l-l-l | 0| #> |2 | -0.0851| 0.1197| 0.207| -0.0528| 0.1168| -0.4518| 0.6514| 0.6184| -0.2817| 0.1762|l-l-h | 1| #> |4 | -0.0851| 0.1217| 0.207| -0.0531| 0.1169| -0.4540| 0.6498| 0.6218| -0.2821| 0.1760|l-h-h | 2| #> |8 | -0.0831| 0.1217| 0.207| -0.0532| 0.1168| -0.4551| 0.6490| 0.6237| -0.2822| 0.1758|h-h-h | 3| #> #> #> Conducting multiple comparison correction for all pairings between comparison histories and each refernece history using the BH method. #> #> #> USER ALERT: please inspect the following comparisons: #> |term | estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|term.1 | estimate.1| std.error.1| statistic.1| p.value.1| s.value.1| conf.low.1| conf.high.1|history |dose | p.value_corr| #> |:---------------------------------------------------|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:---------------------------------------------------------------------------------------------------------------------|----------:|-----------:|-----------:|---------:|---------:|----------:|-----------:|:--------------|:------|------------:| #> |(-0.0851, 0.1197, 0.205) - (-0.0831, 0.1217, 0.207) | 0| 0| -0.70| 0.48| 1.05| 0| 0|(-0.0851499446362294,0.121681511944702,0.207011843149082) - (-0.0831499446362294,0.121681511944702,0.207011843149082) | 0| 0| -0.57| 0.57| 0.81| 0| 0|l-l-l vs h-h-h |0 vs 3 | 0.72| #> |(-0.0851, 0.1197, 0.205) - (-0.0851, 0.1197, 0.207) | 0| 0| 0.37| 0.72| 0.48| 0| 0|(-0.0851499446362294,0.121681511944702,0.207011843149082) - (-0.0851499446362294,0.119681511944702,0.207011843149082) | 0| 0| 1.03| 0.30| 1.72| 0| 0|l-l-l vs l-l-h |0 vs 1 | 0.72| #> #> r <- compareHistories(exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", model = m, reference = c(\"l-l-l\", \"l-h-h\"), comparison = c(\"h-h-h\", \"l-l-h\"), hi_lo_cut = c(0.60, 0.30), mc_comp_method = \"BH\", dose_level = \"l\", exp_lab = \"Hello\", out_lab = \"Goodbye\", colors = \"Set1\", save.out = FALSE) #> Summary of Exposure Main Effects: #> #> USER ALERT: Out of the total of 50 individuals in the sample, below is the distribution of the 10 (20%) individuals that fall into 4 out of the 4 the total user-defined exposure histories created from 30th and 60th percentile values for low and high levels of exposure A, respectively, across 1, 2, 3. #> USER ALERT: Please inspect the distribution of the sample across the following exposure histories and ensure there is sufficient spread to avoid extrapolation and low precision: #> #> Table: Summary of user-specified exposure A histories based on exposure main effects 1, 2, 3 containing time points 1, 2, 3: #> #> |history | n| #> |:-------|--:| #> |h-h-h | 4| #> |l-h-h | 3| #> |l-l-h | 1| #> |l-l-l | 2| #> #> #> Below are the average predictions by user-specified history: #> | | A.1| A.2| A.3| estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|history | dose_count| #> |:--|-------:|-------:|-------:|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:-------|----------:| #> |1 | -0.6672| -0.3898| -0.3736| 0.0267| 0.1649| 0.1617| 0.8715| 0.1984| -0.2966| 0.3499|l-l-l | 3| #> |2 | -0.6672| -0.3898| 0.2944| 0.0583| 0.1349| 0.4320| 0.6658| 0.5869| -0.2061| 0.3226|l-l-h | 2| #> |4 | -0.6672| 0.1975| 0.2944| -0.0227| 0.1384| -0.1642| 0.8696| 0.2016| -0.2941| 0.2486|l-h-h | 1| #> |8 | 0.1803| 0.1975| 0.2944| -0.0761| 0.1218| -0.6249| 0.5320| 0.9104| -0.3147| 0.1626|h-h-h | 0| #> #> #> Conducting multiple comparison correction for all pairings between comparison histories and each refernece history using the BH method. #> #> #> USER ALERT: please inspect the following comparisons: #> |term | estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|term.1 | estimate.1| std.error.1| statistic.1| p.value.1| s.value.1| conf.low.1| conf.high.1|history |dose | p.value_corr| #> |:--------------------------------------------------------|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:------------------------------------------------------------------------------------------------------------------|----------:|-----------:|-----------:|---------:|---------:|----------:|-----------:|:--------------|:------|------------:| #> |(-0.6672, -0.3898, -0.3736) - (0.1803, 0.1975, 0.2944) | -0.10| 0.16| -0.65| 0.51| 0.96| -0.41| 0.2|(-0.66724255804337,0.197543560074282,0.294433897893948) - (0.180252575049252,0.197543560074282,0.294433897893948) | -0.05| 0.09| -0.57| 0.57| 0.81| -0.24| 0.13|l-l-l vs h-h-h |3 vs 0 | 0.72| #> |(-0.6672, -0.3898, -0.3736) - (-0.6672, -0.3898, 0.2944) | 0.03| 0.09| 0.37| 0.72| 0.48| -0.14| 0.2|(-0.66724255804337,0.197543560074282,0.294433897893948) - (-0.66724255804337,-0.389779823228518,0.294433897893948) | 0.08| 0.08| 1.03| 0.30| 1.72| -0.07| 0.24|l-l-l vs l-l-h |3 vs 2 | 0.72| #> #>"},{"path":"https://istallworthy.github.io/devMSMs/reference/createFormulas.html","id":null,"dir":"Reference","previous_headings":"","what":"Create balancing formulas — createFormulas","title":"Create balancing formulas — createFormulas","text":"Creates balancing formulas relating exposure relevant time-varying time invariant confounders exposure time point used create IPTW weights.","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/createFormulas.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Create balancing formulas — createFormulas","text":"","code":"createFormulas( exposure, exposure_time_pts, outcome, type, ti_confounders, tv_confounders, bal_stats = NULL, concur_conf = NULL, keep_conf = NULL, home_dir = NULL, custom = NULL, verbose = TRUE, save.out = TRUE )"},{"path":"https://istallworthy.github.io/devMSMs/reference/createFormulas.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Create balancing formulas — createFormulas","text":"exposure name exposure variable exposure_time_pts list integers weights created/assessed correspond time points exposure wass measured outcome name outcome variable \".timepoint\" suffix type type formula create 'full' (includes lagged time-varying confounders), 'short' (includes time-varying confounders t-1 lag ), 'update' (adds 'short' formulas imbalanced time-varying confounders lags great t-1) ti_confounders list time invariant confounders (least one required) tv_confounders list time-varying confounders \".timepoint\" suffix, include exposure outcome variables (least time-varying exposure variables required ) bal_stats list balance statistics assessBalance(), required 'update' type concur_conf (optional) list variable names reflecting time-varying confounders retain formulas contemporaneously (default none) keep_conf (optional) list variable names reflecting confounders always retain formulas (default depends type) home_dir path home directory (required 'save.' = TRUE) custom (optional) custom list formulas exposure time point (default create automatically according type) verbose (optional) TRUE FALSE indicator user output (default TRUE) save.(optional) TRUE FALSE indicator save output intermediary output locally (default TRUE)","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/createFormulas.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Create balancing formulas — createFormulas","text":"list balancing formulas exposure time point","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/createFormulas.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Create balancing formulas — createFormulas","text":"","code":"#Full Formulas f <- createFormulas(exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", tv_confounders = c(\"A.1\", \"A.2\", \"A.3\"), ti_confounders = \"C\", type = \"full\", save.out = FALSE) #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 1 is: #> A.1 ~ C #> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 2 is: #> A.2 ~ A.1 + C #> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 3 is: #> A.3 ~ A.1 + A.2 + C #> #> f <- createFormulas(exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", tv_confounders = c(\"A.1\", \"A.2\", \"A.3\", \"B.1\", \"B.2\", \"B.3\"), ti_confounders = \"C\", type = \"full\", save.out = FALSE) #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 1 is: #> A.1 ~ C #> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 2 is: #> A.2 ~ A.1 + B.1 + C #> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 3 is: #> A.3 ~ A.1 + A.2 + B.1 + B.2 + C #> #> #Short Formulas f <- createFormulas(exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", tv_confounders = c(\"A.1\", \"A.2\", \"A.3\"), ti_confounders = \"C\", type = \"short\", save.out = FALSE) #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 1 is: #> A.1 ~ C #> #> #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 2 is: #> A.2 ~ A.1 + C #> #> #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 3 is: #> A.3 ~ A.2 + C #> #> f <- createFormulas(exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", tv_confounders = c(\"A.1\", \"A.2\", \"A.3\", \"B.1\", \"B.2\", \"B.3\"), ti_confounders = \"C\", type = \"short\", save.out = FALSE) #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 1 is: #> A.1 ~ C #> #> #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 2 is: #> A.2 ~ A.1 + B.1 + C #> #> #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 3 is: #> A.3 ~ A.2 + B.2 + C #> #> c <- list(\"short_form-1\" = as.formula(A.1 ~ C), \"short_form-2\" = as.formula(A.2 ~ A.1 + B.1 + C), \"short_form-3\" = as.formula(A.3 ~ A.2 + B.2 + C)) f <- createFormulas(exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", tv_confounders = c(\"A.1\", \"A.2\", \"A.3\", \"B.1\", \"B.2\", \"B.3\"), ti_confounders = \"C\", type = \"short\", custom = c, save.out = FALSE) #> The user-supplied custom balancing formula for each exposure time point are below: #> A.1 ~ C #> #> A.2 ~ A.1 + B.1 + C #> #> A.3 ~ A.2 + B.2 + C #> #Update Formulas test <- data.frame(ID = 1:50, A.1 = rnorm(n = 50), A.2 = rnorm(n = 50), A.3 = rnorm(n = 50), B.1 = rnorm(n = 50), B.2 = rnorm(n = 50), B.3 = rnorm(n = 50), C = rnorm(n = 50), D.3 = rnorm(n = 50)) test[, c(\"A.1\", \"A.2\", \"A.3\")] <- lapply(test[, c(\"A.1\", \"A.2\", \"A.3\")], as.numeric) w <- createWeights(data = test, exposure = \"A\", outcome = \"D.3\", formulas = f, save.out = FALSE) #> For the cbps weighting method, the median weight value is 1 (SD = 0.37; range = 0.29-2). #> b <- assessBalance(data = test, exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", type = \"weighted\", weights = w, formulas = f, save.out = FALSE) #> USER ALERT: The following statistics display covariate imbalance at each exposure time point following IPTW weighting, using short formulas. #> #> As shown below, 1 out of 7 (14%) covariates across time points, corresponding to 1 out of 2 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.19 (range= -0.19--0.19): #> #> Table: Imbalanced covariates using cbps and short formulas #> #> | exp_time| balanced_n| imbalanced_n| n| #> |--------:|----------:|------------:|--:| #> | 1| 1| 0| 1| #> | 2| 2| 1| 3| #> | 3| 3| 0| 3| #> #> #> USER ALERT: For exposure A using the short formulas and cbps : #> The median absolute value relation between exposure and confounder is 0.03 (range = -0.19 -0.07). #> As shown below, the following 1 covariates across time points out of 7 total (14.29%) spanning 1 domains out of 3 (33.33%) are imbalanced with a remaining median absolute value correlation/std mean difference in relation to A of 0.19 (range=-0.19--0.19) : #> #> #> Table: Imbalanced Covariates #> #> | |exposure | exp_time| covar_time|covariate | avg_bal| bal_thresh| balanced| #> |:--|:--------|--------:|----------:|:---------|----------:|----------:|--------:| #> |2 |A | 2| 1|A.1 | -0.1863923| 0.1| 0| #> f <- createFormulas(exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", tv_confounders = c(\"A.1\", \"A.2\", \"A.3\"), ti_confounders = \"C\", type = \"update\", bal_stats = b, save.out = FALSE) #> USER ALERT: Please manually inspect the updated balancing formula below that includes time-varying confounders at t-1 and those greater at further lags that remained imbalanced: #> The update formula for A - D.3 at A time point 1 is: #> A.1 ~ C #> #> #> USER ALERT: Please manually inspect the updated balancing formula below that includes time-varying confounders at t-1 and those greater at further lags that remained imbalanced: #> For A at exposure time point 2 no time-varying confounders at additional lags were added. #> #> The update formula for A - D.3 at A time point 2 is: #> A.2 ~ A.1 + C #> #> #> USER ALERT: Please manually inspect the updated balancing formula below that includes time-varying confounders at t-1 and those greater at further lags that remained imbalanced: #> For A at exposure time point 3 no time-varying confounders at additional lags were added. #> #> The update formula for A - D.3 at A time point 3 is: #> A.3 ~ A.2 + C #> #> f <- createFormulas(exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", tv_confounders = c(\"A.1\", \"A.2\", \"A.3\", \"B.1\", \"B.2\", \"B.3\"), ti_confounders = \"C\", type = \"update\", bal_stats = b, save.out = FALSE) #> USER ALERT: Please manually inspect the updated balancing formula below that includes time-varying confounders at t-1 and those greater at further lags that remained imbalanced: #> The update formula for A - D.3 at A time point 1 is: #> A.1 ~ C #> #> #> USER ALERT: Please manually inspect the updated balancing formula below that includes time-varying confounders at t-1 and those greater at further lags that remained imbalanced: #> For A at exposure time point 2 no time-varying confounders at additional lags were added. #> #> The update formula for A - D.3 at A time point 2 is: #> A.2 ~ A.1 + B.1 + C #> #> #> USER ALERT: Please manually inspect the updated balancing formula below that includes time-varying confounders at t-1 and those greater at further lags that remained imbalanced: #> For A at exposure time point 3 no time-varying confounders at additional lags were added. #> #> The update formula for A - D.3 at A time point 3 is: #> A.3 ~ A.2 + B.2 + C #> #>"},{"path":"https://istallworthy.github.io/devMSMs/reference/createWeights.html","id":null,"dir":"Reference","previous_headings":"","what":"Creates IPTW balancing weights — createWeights","title":"Creates IPTW balancing weights — createWeights","text":"Creates IPTW balancing weights user-specified exposure time point using balancing formulas relate exposure time point relevant confounders.","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/createWeights.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Creates IPTW balancing weights — createWeights","text":"","code":"createWeights( data, exposure, outcome, formulas, method = \"cbps\", SL.library = \"SL.glm\", criterion = NA, home_dir = NULL, read_in_from_file = FALSE, verbose = TRUE, save.out = TRUE, ... )"},{"path":"https://istallworthy.github.io/devMSMs/reference/createWeights.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Creates IPTW balancing weights — createWeights","text":"data data wide format : data frame, list imputed data frames, mids object exposure name exposure variable outcome name outcome variable \".timepoint\" suffix formulas list balancing formulas time point output createFormulas() method (optional) character string weightitMSM() balancing method abbreviation (default Covariate Balancing Propensity Score \"cbps\") SL.library required superLearner weighting method (\"super\"); see SuperLearner::listWrappers() options criterion (optional) criterion used select best weights (default \"p.mean\" minimizing avg Pearson correlation continuous exposures \"smd.mean\" binary exposures) (requird \"gbm\" method) home_dir path home directory (required 'save.' = TRUE) read_in_from_file (optional) TRUE FALSE indicator read weights previously run saved locally (default FALSE) verbose (optional) TRUE FALSE indicator user output (default TRUE) save.(optional) TRUE FALSE indicator save output intermediary output locally (default TRUE) ... inputs weightitMSM()","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/createWeights.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Creates IPTW balancing weights — createWeights","text":"list IPTW balancing weights","code":""},{"path":[]},{"path":"https://istallworthy.github.io/devMSMs/reference/createWeights.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Creates IPTW balancing weights — createWeights","text":"","code":"test <- data.frame(ID = 1:50, A.1 = rnorm(n = 50), A.2 = rnorm(n = 50), A.3 = rnorm(n = 50), B.1 = rnorm(n = 50), B.2 = rnorm(n = 50), B.3 = rnorm(n = 50), C = rnorm(n = 50), D.3 = rnorm(n = 50)) test[, c(\"A.1\", \"A.2\", \"A.3\")] <- lapply(test[, c(\"A.1\", \"A.2\", \"A.3\")], as.numeric) f <- createFormulas(exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", tv_confounders = c(\"A.1\", \"A.2\", \"A.3\", \"B.1\", \"B.2\", \"B.3\"), ti_confounders = \"C\", type = \"short\", save.out = FALSE) #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 1 is: #> A.1 ~ C #> #> #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 2 is: #> A.2 ~ A.1 + B.1 + C #> #> #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 3 is: #> A.3 ~ A.2 + B.2 + C #> #> w <- createWeights(data = test, exposure = \"A\", outcome = \"D.3\", formulas = f, save.out = FALSE) #> For the cbps weighting method, the median weight value is 1.12 (SD = 0.46; range = 0.36-2). #> f <- createFormulas(exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", tv_confounders = c(\"A.1\", \"A.2\", \"A.3\", \"B.1\", \"B.2\", \"B.3\", \"A.1:B.1\"), ti_confounders = \"C\", type = \"short\", save.out = FALSE) #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 1 is: #> A.1 ~ C #> #> #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 2 is: #> A.2 ~ A.1 + A.1:B.1 + B.1 + C #> #> #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 3 is: #> A.3 ~ A.2 + B.2 + C #> #> w <- createWeights(data = test, exposure = \"A\", outcome = \"D.3\", formulas = f, save.out = FALSE) #> For the cbps weighting method, the median weight value is 1.13 (SD = 0.46; range = 0.35-2). #> w <- createWeights(data = test, exposure = \"A\", outcome = \"D.3\", formulas = f, method = \"cbps\", save.out = FALSE) #> For the cbps weighting method, the median weight value is 1.13 (SD = 0.46; range = 0.35-2). #> w <- createWeights(data = test, exposure = \"A\", outcome = \"D.3\", formulas = f, method = \"cbps\", save.out = FALSE) #> For the cbps weighting method, the median weight value is 1.13 (SD = 0.46; range = 0.35-2). #> w <- createWeights(data = test, exposure = \"A\", outcome = \"D.3\", formulas = f, method = \"gbm\", save.out = FALSE) #> For the gbm weighting method, the median weight value is 0.73 (SD = 0.35; range = 0.26-2). #> w <- createWeights(data = test, exposure = \"A\", outcome = \"D.3\", formulas = f, method = \"bart\", save.out = FALSE) #> For the bart weighting method, the median weight value is 0.71 (SD = 0.27; range = 0.34-2). #> w <- createWeights(data = test, exposure = \"A\", outcome = \"D.3\", formulas = f, method = \"super\", save.out = FALSE) #> Loading required package: nnls #> Warning: All algorithms have zero weight #> Warning: All metalearner coefficients are zero, predictions will all be equal to 0 #> Warning: All algorithms have zero weight #> Warning: All metalearner coefficients are zero, predictions will all be equal to 0 #> Warning: All algorithms have zero weight #> Warning: All metalearner coefficients are zero, predictions will all be equal to 0 #> For the super weighting method, the median weight value is 1.01 (SD = 0.28; range = 0.62-3). #>"},{"path":"https://istallworthy.github.io/devMSMs/reference/create_custom_comparisons.html","id":null,"dir":"Reference","previous_headings":"","what":"Creates custom comparisons — create_custom_comparisons","title":"Creates custom comparisons — create_custom_comparisons","text":"Creates custom comparisons","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/create_custom_comparisons.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Creates custom comparisons — create_custom_comparisons","text":"","code":"create_custom_comparisons(preds, ref_vals, comp_vals, exposure)"},{"path":"https://istallworthy.github.io/devMSMs/reference/create_custom_comparisons.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Creates custom comparisons — create_custom_comparisons","text":"preds custom output marginaleffects::average_predictions() ref_vals reference values comp_vals comparison values exposure name exposure variable","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/create_custom_comparisons.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Creates custom comparisons — create_custom_comparisons","text":"custom comparisons","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/create_custom_contrasts.html","id":null,"dir":"Reference","previous_headings":"","what":"Create custom contrasts — create_custom_contrasts","title":"Create custom contrasts — create_custom_contrasts","text":"Create custom contrasts","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/create_custom_contrasts.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Create custom contrasts — create_custom_contrasts","text":"","code":"create_custom_contrasts(d, reference, comp_histories, exposure, preds)"},{"path":"https://istallworthy.github.io/devMSMs/reference/create_custom_contrasts.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Create custom contrasts — create_custom_contrasts","text":"d data frame high low values per exposure main effect reference reference sequence \"h\" /\"l\" (e.g., \"h-h-h\") comp_histories comparison sequence(s) \"h\" /\"l\" (e.g., \"h-h-h\") exposure name exposure variable preds custom output marginaleffects::average_predictions()","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/create_custom_contrasts.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Create custom contrasts — create_custom_contrasts","text":"contrasts","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/devMSMs-package.html","id":null,"dir":"Reference","previous_headings":"","what":"devMSMs: Tools for Conducting Marginal Structural Models with Developmental Data — devMSMs-package","title":"devMSMs: Tools for Conducting Marginal Structural Models with Developmental Data — devMSMs-package","text":"Functions preparing, assessing, implementing MSMS.","code":""},{"path":[]},{"path":"https://istallworthy.github.io/devMSMs/reference/devMSMs-package.html","id":"author","dir":"Reference","previous_headings":"","what":"Author","title":"devMSMs: Tools for Conducting Marginal Structural Models with Developmental Data — devMSMs-package","text":"Maintainer: Isabella Stallworthy istall@seas.upenn.edu Authors: Noah Greifer ngreifer@iq.harvard.edu [contributor] Meriah DeJoseph meriahd@stanford.edu Emily Padrutt padru004@umn.edu Daniel Berry dberry@umn.edu","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/eval_hist.html","id":null,"dir":"Reference","previous_headings":"","what":"Visualize distribution of sample across exposure histories — eval_hist","title":"Visualize distribution of sample across exposure histories — eval_hist","text":"Create customized, user-specified exposure histories tables displaying sample distribution across user inspection.","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/eval_hist.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Visualize distribution of sample across exposure histories — eval_hist","text":"","code":"eval_hist( data, exposure, time_pts, epochs = NULL, hi_lo_cut = NULL, ref = NULL, comps = NULL )"},{"path":"https://istallworthy.github.io/devMSMs/reference/eval_hist.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Visualize distribution of sample across exposure histories — eval_hist","text":"data data wide format : data frame, list imputed data frames, mids object exposure name exposure variable time_pts list integers weights created/assessed correspond time points exposure measured epochs (optional) data frame exposure epoch labels values hi_lo_cut list two numbers indicating quantile values reflect high low values, respectively, continuous exposure ref (optional) list one strings \"-\"-separated \"l\" \"h\" values indicative reference exposure history compare comparison, required comparison supplied comps (optional) list one strings \"-\"-separated \"l\" \"h\" values indicative comparison history/histories compare reference, required reference supplied","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/eval_hist.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Visualize distribution of sample across exposure histories — eval_hist","text":"none","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/eval_hist.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Visualize distribution of sample across exposure histories — eval_hist","text":"","code":"test <- data.frame(ID = 1:50, A.1 = rnorm(n = 50), A.2 = rnorm(n = 50), A.3 = rnorm(n = 50), B.1 = rnorm(n = 50), B.2 = rnorm(n = 50), B.3 = rnorm(n = 50), C = rnorm(n = 50), D.3 = rnorm(n = 50)) test[, c(\"A.1\", \"A.2\", \"A.3\")] <- lapply(test[, c(\"A.1\", \"A.2\", \"A.3\")], as.numeric) h <- eval_hist(data = test, exposure = \"A\", time_pts = c(1, 2, 3)) #> Summary of Exposure Main Effects: #> #> USER ALERT: Out of the total of 50 individuals in the sample, below is the distribution of the 48 (96%) individuals that fall into 8 out of the 8 total user-defined exposure histories created from median split values for low and high levels of exposure A, respectively, across 1, 2, 3. #> USER ALERT: Please inspect the distribution of the sample across the following exposure histories and ensure there is sufficient spread to avoid extrapolation and low precision: #> #> Table: Summary of user-specified exposure A histories based on exposure main effects 1, 2, 3 containing time points 1, 2, 3: #> #> |history | n| #> |:-------|--:| #> |h-h-h | 6| #> |h-h-l | 8| #> |h-l-h | 6| #> |h-l-l | 3| #> |l-h-h | 5| #> |l-h-l | 6| #> |l-l-h | 7| #> |l-l-l | 7| #> h <- eval_hist(data = test, exposure = \"A\", time_pts = c(1, 2, 3), epochs = data.frame(epochs = c(\"Infancy\", \"Toddlerhood\"), values = I(list(c(1, 2), c(3))))) #> Summary of Exposure Main Effects: #> #> USER ALERT: Out of the total of 50 individuals in the sample, below is the distribution of the 48 (96%) individuals that fall into 4 out of the 4 total user-defined exposure histories created from median split values for low and high levels of exposure A, respectively, across Infancy, Toddlerhood. #> USER ALERT: Please inspect the distribution of the sample across the following exposure histories and ensure there is sufficient spread to avoid extrapolation and low precision: #> #> Table: Summary of user-specified exposure A histories based on exposure main effects Infancy, Toddlerhood containing time points c(1, 2), 3: #> #> |history | n| #> |:-------|--:| #> |h-h | 14| #> |h-l | 11| #> |l-h | 10| #> |l-l | 13| #> h <- eval_hist(data = test, exposure = \"A\", time_pts = c(1, 2, 3), hi_lo_cut = c(0.6, 0.3)) #> Summary of Exposure Main Effects: #> #> USER ALERT: Out of the total of 50 individuals in the sample, below is the distribution of the 17 (34%) individuals that fall into 7 out of the 8 the total user-defined exposure histories created from 30th and 60th percentile values for low and high levels of exposure A, respectively, across 1, 2, 3. #> USER ALERT: Please inspect the distribution of the sample across the following exposure histories and ensure there is sufficient spread to avoid extrapolation and low precision: #> Warning: USER ALERT: There are no individuals in your sample that fall into l-h-h exposure history/histories. You may wish to consider different high/low cutoffs (for continuous exposures), alternative epochs, or choose a different measure to avoid extrapolation. #> #> #> Table: Summary of user-specified exposure A histories based on exposure main effects 1, 2, 3 containing time points 1, 2, 3: #> #> |history | n| #> |:-------|--:| #> |h-h-h | 4| #> |h-h-l | 3| #> |h-l-h | 1| #> |h-l-l | 3| #> |l-h-l | 2| #> |l-l-h | 2| #> |l-l-l | 2| #> h <- eval_hist(data = test, exposure = \"A\", time_pts = c(1, 2, 3), hi_lo_cut = c(0.6, 0.3), ref = \"l-l-l\", comps = \"h-h-h\") #> Summary of Exposure Main Effects: #> #> USER ALERT: Out of the total of 50 individuals in the sample, below is the distribution of the 6 (12%) individuals that fall into 2 out of the 2 the total user-defined exposure histories created from 30th and 60th percentile values for low and high levels of exposure A, respectively, across 1, 2, 3. #> USER ALERT: Please inspect the distribution of the sample across the following exposure histories and ensure there is sufficient spread to avoid extrapolation and low precision: #> #> Table: Summary of user-specified exposure A histories based on exposure main effects 1, 2, 3 containing time points 1, 2, 3: #> #> |history | n| #> |:-------|--:| #> |h-h-h | 4| #> |l-l-l | 2| #>"},{"path":"https://istallworthy.github.io/devMSMs/reference/fitModel.html","id":null,"dir":"Reference","previous_headings":"","what":"Fit outcome model — fitModel","title":"Fit outcome model — fitModel","text":"Fits weighted marginal outcome model generalized linear model user's choosing, relating exposure main effects outcome using IPTW weights.","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/fitModel.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Fit outcome model — fitModel","text":"","code":"fitModel( data, weights, exposure, exposure_time_pts, outcome, model, family = NULL, link = NA, int_order = NA, covariates = NULL, epochs = NULL, home_dir = NULL, verbose = TRUE, save.out = TRUE )"},{"path":"https://istallworthy.github.io/devMSMs/reference/fitModel.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Fit outcome model — fitModel","text":"data data wide format : data frame, list imputed data frames, mids object weights list IPTW weights output createWeights() exposure name exposure variable exposure_time_pts list integers weights created/assessed correspond time points exposure measured outcome name outcome variable \".timepoint\" suffix model character indicating one following outcome models: \"m0\" (exposure main effects) \"m1\" (exposure main effects & covariates) \"m2\" (exposure main effects & interactions) \"m3\" (exposure main effects, interactions, & covariates) family (optional) family function specification svyglm model link (optional) character link function specification svyglm model int_order integer specification highest order exposure main effects interaction, required interaction models (\"m2\", \"m3\") covariates list characters reflecting variable names covariates, required covariate models (\"m1\", \"m3\") epochs (optional) data frame exposure epoch labels values home_dir path home directory (required 'save.' = TRUE) verbose (optional) TRUE FALSE indicator user output (default TRUE) save.(optional) TRUE FALSE indicator save output intermediary output locally (default TRUE)","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/fitModel.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Fit outcome model — fitModel","text":"list svyglm model output","code":""},{"path":[]},{"path":"https://istallworthy.github.io/devMSMs/reference/fitModel.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Fit outcome model — fitModel","text":"","code":"f <- createFormulas(exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", tv_confounders = c(\"A.1\", \"A.2\", \"A.3\", \"B.1\", \"B.2\", \"B.3\"), ti_confounders = \"C\", type = \"full\", save.out = FALSE) #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 1 is: #> A.1 ~ C #> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 2 is: #> A.2 ~ A.1 + B.1 + C #> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 3 is: #> A.3 ~ A.1 + A.2 + B.1 + B.2 + C #> #> test <- data.frame(ID = 1:50, A.1 = rnorm(n = 50), A.2 = rnorm(n = 50), A.3 = rnorm(n = 50), B.1 = rnorm(n = 50), B.2 = rnorm(n = 50), B.3 = rnorm(n = 50), C = rnorm(n = 50), D.3 = rnorm(n = 50)) test[, c(\"A.1\", \"A.2\", \"A.3\")] <- lapply(test[, c(\"A.1\", \"A.2\", \"A.3\")], as.numeric) w <- createWeights(data = test, exposure = \"A\", outcome = \"D.3\", formulas = f, save.out = FALSE) #> For the cbps weighting method, the median weight value is 0.98 (SD = 0.31; range = 0.3-2). #> m <- fitModel(data = test, weights = w, exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", model = \"m0\", save.out = FALSE) #> Please inspect the following likelihood ratio test to determine if the exposures collective predict significant variation in the outcome compared to a model without exposure terms. #> #> We strongly suggest not conducting history comparisons if the likelihood ratio test is non-significant. #> #> Working (Rao-Scott+F) LRT for A.1 A.2 A.3 #> in svyglm(formula = as.formula(f), design = s, family = fam) #> Working 2logLR = 1.739392 p= 0.62288 #> (scale factors: 1.2 1.1 0.67 ); denominator df= 46 #> #> The marginal model, m0, is summarized below: m <- fitModel(data = test, weights = w, exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", model = \"m0\", family = gaussian, link = \"identity\", epochs = data.frame(epochs = c(\"Infancy\", \"Toddlerhood\"), values = I(list(c(1, 2), c(3)))), save.out = FALSE) #> Please inspect the following likelihood ratio test to determine if the exposures collective predict significant variation in the outcome compared to a model without exposure terms. #> #> We strongly suggest not conducting history comparisons if the likelihood ratio test is non-significant. #> #> Working (Rao-Scott+F) LRT for A.Infancy A.Toddlerhood #> in svyglm(formula = as.formula(f), design = s, family = fam) #> Working 2logLR = 0.8130111 p= 0.65766 #> (scale factors: 1.3 0.72 ); denominator df= 47 #> #> The marginal model, m0, is summarized below: m <- fitModel(data = test, weights = w, exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", model = \"m1\", covariates = \"C\", save.out = FALSE) #> Please inspect the following likelihood ratio test to determine if the exposures collective predict significant variation in the outcome compared to a model without exposure terms. #> #> We strongly suggest not conducting history comparisons if the likelihood ratio test is non-significant. #> #> Working (Rao-Scott+F) LRT for A.1 A.2 A.3 #> in svyglm(formula = as.formula(f), design = s, family = fam) #> Working 2logLR = 1.835843 p= 0.6022 #> (scale factors: 1.3 1 0.69 ); denominator df= 45 #> #> The marginal model, m1, is summarized below: m <- fitModel(data = test, weights = w, exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", model = \"m2\", int_order = 3, save.out = FALSE) #> Please inspect the following likelihood ratio test to determine if the exposures collective predict significant variation in the outcome compared to a model without exposure terms. #> #> We strongly suggest not conducting history comparisons if the likelihood ratio test is non-significant. #> #> Working (Rao-Scott+F) LRT for A.1 A.2 A.3 A.1:A.2 A.1:A.3 A.2:A.3 A.1:A.2:A.3 #> in svyglm(formula = as.formula(f), design = s, family = fam) #> Working 2logLR = 2.836186 p= 0.82934 #> (scale factors: 2.7 1.6 1.2 0.85 0.34 0.22 0.2 ); denominator df= 42 #> #> The marginal model, m2, is summarized below: m <- fitModel(data = test, weights = w, exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", model = \"m3\", int_order = 3, covariates = \"C\", save.out = FALSE) #> Please inspect the following likelihood ratio test to determine if the exposures collective predict significant variation in the outcome compared to a model without exposure terms. #> #> We strongly suggest not conducting history comparisons if the likelihood ratio test is non-significant. #> #> Working (Rao-Scott+F) LRT for A.1 A.2 A.3 A.1:A.2 A.1:A.3 A.2:A.3 A.1:A.2:A.3 #> in svyglm(formula = as.formula(f), design = s, family = fam) #> Working 2logLR = 4.331592 p= 0.66448 #> (scale factors: 2.7 1.4 0.99 0.72 0.44 0.37 0.31 ); denominator df= 41 #> #> The marginal model, m3, is summarized below:"},{"path":"https://istallworthy.github.io/devMSMs/reference/getModel.html","id":null,"dir":"Reference","previous_headings":"","what":"Fits outcome model — getModel","title":"Fits outcome model — getModel","text":"Fits outcome model","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/getModel.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Fits outcome model — getModel","text":"","code":"getModel( d, exposure, exposure_time_pts, outcome, exp_epochs, int_order, model, fam, covariates, verbose, epochs = NULL )"},{"path":"https://istallworthy.github.io/devMSMs/reference/getModel.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Fits outcome model — getModel","text":"d wide data frame exposure name exposure variable exposure_time_pts list integers weights created/assessed correspond time points exposure measured outcome name outcome variable \".timepoint\" suffix exp_epochs list exposure epochs int_order integer specification highest order exposure main effects interaction interaction models model character indicating one following outcome models: \"m0\" (exposure main effects) \"m1\" (exposure main effects & covariates) \"m2\" (exposure main effects & interactions) \"m3\" (exposure main effects, interactions, & covariates) \"covs\" (covariate model) \"int\" (intercept model) fam function specification svyglm model covariates list characters reflecting variable names covariates covariate models verbose TRUE FALSE indicator user output (default TRUE) epochs data frame exposure epoch labels values","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/getModel.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Fits outcome model — getModel","text":"list fitted model(s)","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/getModel.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Fits outcome model — getModel","text":"","code":"test <- data.frame(ID = 1:50, A.1 = rnorm(n = 50), A.2 = rnorm(n = 50), A.3 = rnorm(n = 50), B.1 = rnorm(n = 50), B.2 = rnorm(n = 50), B.3 = rnorm(n = 50), C = rnorm(n = 50), D.3 = rnorm(n = 50)) test[, c(\"A.1\", \"A.2\", \"A.3\")] <- lapply(test[, c(\"A.1\", \"A.2\", \"A.3\")], as.numeric) f <- createFormulas(exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", tv_confounders = c(\"A.1\", \"A.2\", \"A.3\", \"B.1\", \"B.2\", \"B.3\"), ti_confounders = \"C\", type = \"full\", save.out = FALSE) #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 1 is: #> A.1 ~ C #> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 2 is: #> A.2 ~ A.1 + B.1 + C #> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 3 is: #> A.3 ~ A.1 + A.2 + B.1 + B.2 + C #> #> w <- createWeights(data = test, exposure = \"A\", outcome = \"D.3\", formulas = f, save.out = FALSE) #> For the cbps weighting method, the median weight value is 1.08 (SD = 0.85; range = 0.37-6). #> epochs = data.frame(epochs = c(\"Infancy\", \"Toddlerhood\"), values = I(list(c(1, 2), c(3)))) e <- apply(expand.grid(\"A\", as.character(epochs[, 1])), 1, paste, sep = \"\", collapse = \".\") test$weights <- w[[1]]$weights g <- getModel(d = test, exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", epochs = epochs, exp_epochs = e, fam = gaussian, model = \"m0\")"},{"path":"https://istallworthy.github.io/devMSMs/reference/get_comparison_values.html","id":null,"dir":"Reference","previous_headings":"","what":"Finds custom comparison values — get_comparison_values","title":"Finds custom comparison values — get_comparison_values","text":"Finds custom comparison values","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/get_comparison_values.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Finds custom comparison values — get_comparison_values","text":"","code":"get_comparison_values(d, comp_histories)"},{"path":"https://istallworthy.github.io/devMSMs/reference/get_comparison_values.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Finds custom comparison values — get_comparison_values","text":"d data frame high low values per exposure main effect comp_histories comparison sequence(s) \"h\" /\"l\" (e.g., \"h-h-h\")","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/get_comparison_values.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Finds custom comparison values — get_comparison_values","text":"comparison values","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/get_comparison_values.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Finds custom comparison values — get_comparison_values","text":"","code":"d <- data.frame(e = c(\"A.1\", \"A.2\", \"A.3\"), l = c(0, 0, 0), h = c(1, 1, 1)) r <- get_comparison_values(d = d, comp_histories = \"l-l-l\" ) r <- get_comparison_values(d = d, comp_histories = \"h-h-h\" ) r <- get_comparison_values(d = d, comp_histories = c(\"h-h-h\", \"h-h-l\"))"},{"path":"https://istallworthy.github.io/devMSMs/reference/get_reference_values.html","id":null,"dir":"Reference","previous_headings":"","what":"Finds custom reference values — get_reference_values","title":"Finds custom reference values — get_reference_values","text":"Finds custom reference values","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/get_reference_values.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Finds custom reference values — get_reference_values","text":"","code":"get_reference_values(d, reference)"},{"path":"https://istallworthy.github.io/devMSMs/reference/get_reference_values.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Finds custom reference values — get_reference_values","text":"d data frame high low values per exposure main effect reference reference sequence \"h\" /\"l\" (e.g., \"h-h-h\")","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/get_reference_values.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Finds custom reference values — get_reference_values","text":"reference values","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/get_reference_values.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Finds custom reference values — get_reference_values","text":"","code":"d <- data.frame(e = c(\"A.1\", \"A.2\", \"A.3\"), l = c(0, 0, 0), h = c(1, 1, 1)) r <- get_reference_values(d = d, reference = \"l-l-l\" ) r <- get_reference_values(d = d, reference = \"h-h-h\" )"},{"path":"https://istallworthy.github.io/devMSMs/reference/make_love_plot.html","id":null,"dir":"Reference","previous_headings":"","what":"Create love plots showing balancing statistics — make_love_plot","title":"Create love plots showing balancing statistics — make_love_plot","text":"Create love plots showing balancing statistics","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/make_love_plot.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Create love plots showing balancing statistics — make_love_plot","text":"","code":"make_love_plot( balance_stats, exposure, exposure_time_pt, exposure_type, k = 0, form_name, data_type, balance_thresh, weights_method, imp_conf, save.out = FALSE, home_dir = NULL, folder = NULL, verbose = TRUE )"},{"path":"https://istallworthy.github.io/devMSMs/reference/make_love_plot.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Create love plots showing balancing statistics — make_love_plot","text":"balance_stats data frame balance statistics exposure name exposure variable exposure_time_pt exposure time point integer exposure_type character indicating binary continuous exposure type k imputation number form_name formula name data_type single imputed data type balance_thresh one two numbers 0 1 indicating single balancing threshold thresholds less important confounders, respectively weights_method method character string WeightItMSM() balancing method abbreviation imp_conf list variable names reflecting important confounders save.TRUE FALSE indicator save output intermediary output locally home_dir path home directory (required save.= TRUE) folder folder path saving verbose (optional) TRUE FALSE indicator user output (default TRUE)","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/make_love_plot.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Create love plots showing balancing statistics — make_love_plot","text":"none","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/make_love_plot.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Create love plots showing balancing statistics — make_love_plot","text":"","code":"f <- createFormulas(exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", tv_confounders = c(\"A.1\", \"A.2\", \"A.3\", \"B.1\", \"B.2\", \"B.3\"), ti_confounders = \"C\", type = \"full\", save.out = FALSE) #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 1 is: #> A.1 ~ C #> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 2 is: #> A.2 ~ A.1 + B.1 + C #> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 3 is: #> A.3 ~ A.1 + A.2 + B.1 + B.2 + C #> #> test <- data.frame(ID = 1:50, A.1 = rnorm(n = 50), A.2 = rnorm(n = 50), A.3 = rnorm(n = 50), B.1 = rnorm(n = 50), B.2 = rnorm(n = 50), B.3 = rnorm(n = 50), C = rnorm(n = 50), D.3 = rnorm(n = 50)) test[, c(\"A.1\", \"A.2\", \"A.3\")] <- lapply(test[, c(\"A.1\", \"A.2\", \"A.3\")], as.numeric) w <- createWeights(data = test, exposure = \"A\", outcome = \"D.3\", formulas = f, save.out = FALSE) #> For the cbps weighting method, the median weight value is 1.12 (SD = 1.48; range = 0.32-10). #> b <- assessBalance(data = test, exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", type = \"weighted\", weights = w, formulas = f, save.out = FALSE) #> USER ALERT: The following statistics display covariate imbalance at each exposure time point following IPTW weighting, using full formulas. #> #> As shown below, 3 out of 9 (33%) covariates across time points, corresponding to 3 out of 3 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.34 (range= -0.38-0.11): #> #> Table: Imbalanced covariates using cbps and full formulas #> #> | exp_time| balanced_n| imbalanced_n| n| #> |--------:|----------:|------------:|--:| #> | 1| 1| 0| 1| #> | 2| 1| 2| 3| #> | 3| 4| 1| 5| #> #> #> USER ALERT: For exposure A using the full formulas and cbps : #> The median absolute value relation between exposure and confounder is 0.09 (range = -0.38 -0.11). #> As shown below, the following 3 covariates across time points out of 9 total (33.33%) spanning 3 domains out of 3 (100%) are imbalanced with a remaining median absolute value correlation/std mean difference in relation to A of 0.34 (range=-0.38-0.11) : #> #> #> Table: Imbalanced Covariates #> #> | |exposure | exp_time| covar_time|covariate | avg_bal| bal_thresh| balanced| #> |:--|:--------|--------:|----------:|:---------|----------:|----------:|--------:| #> |3 |A | 2| 1|B.1 | -0.3829012| 0.1| 0| #> |4 |A | 2| 0|C | -0.3395794| 0.1| 0| #> |5 |A | 3| 1|A.1 | 0.1100347| 0.1| 0| #> make_love_plot(balance_stats = b, exposure = \"A\", exposure_time_pt = 1, exposure_type = \"continuous\", form_name = \"form_name\", data_type = \"single\", balance_thresh = 0.1, imp_conf = NULL, weights_method = w[[1]]$method, save.out = FALSE, folder = \"prebalance/\") make_love_plot(balance_stats = b, exposure = \"A\", exposure_time_pt = 2, exposure_type = \"continuous\", form_name = \"form_name\", data_type = \"single\", balance_thresh = c(0.05, 0.1), imp_conf = \"A.2\", weights_method = w[[1]]$method, save.out = FALSE, folder = \"weighted/\")"},{"path":"https://istallworthy.github.io/devMSMs/reference/perform_multiple_comparison_correction.html","id":null,"dir":"Reference","previous_headings":"","what":"Conduct multiple comparison correction — perform_multiple_comparison_correction","title":"Conduct multiple comparison correction — perform_multiple_comparison_correction","text":"Conduct multiple comparison correction","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/perform_multiple_comparison_correction.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Conduct multiple comparison correction — perform_multiple_comparison_correction","text":"","code":"perform_multiple_comparison_correction( comps, reference, comp_histories, method, verbose = TRUE )"},{"path":"https://istallworthy.github.io/devMSMs/reference/perform_multiple_comparison_correction.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Conduct multiple comparison correction — perform_multiple_comparison_correction","text":"comps table reference reference sequence \"h\" /\"l\" (e.g., \"h-h-h\") comp_histories comparison sequence(s) \"h\" /\"l\" (e.g., \"h-h-h\") method character abbreviation multiple comparison correction method verbose (optional) TRUE FALSE indicator user output (default TRUE)","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/perform_multiple_comparison_correction.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Conduct multiple comparison correction — perform_multiple_comparison_correction","text":"comparison table corrected p-values","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_imp_list.rda.html","id":null,"dir":"Reference","previous_headings":"","what":"Wide data imputed and read in (continuous exposure) — sim_data_imp_list.rda","title":"Wide data imputed and read in (continuous exposure) — sim_data_imp_list.rda","text":"data simulated based data Family Life Project (FLP), longitudinal study following 1,292 families representative two geographic areas (three counties North Carolina three counties Pennsylvania) high rural child poverty (Vernon-Feagans et al., 2013; Burchinal et al., 2008). MAR missingness added using missMethods package imputing mice package reading imputed dataset. data contain economic strain (ESEATA1) continuously distributed variable.","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_imp_list.rda.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Wide data imputed and read in (continuous exposure) — sim_data_imp_list.rda","text":"","code":"sim_data_imp_list"},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_imp_list.rda.html","id":"format","dir":"Reference","previous_headings":"","what":"Format","title":"Wide data imputed and read in (continuous exposure) — sim_data_imp_list.rda","text":"list data frames","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_long_miss.rda.html","id":null,"dir":"Reference","previous_headings":"","what":"Long data with missingness (continuous exposure) — sim_data_long_miss.rda","title":"Long data with missingness (continuous exposure) — sim_data_long_miss.rda","text":"data simulated based data Family Life Project (FLP), longitudinal study following 1,292 families representative two geographic areas (three counties North Carolina three counties Pennsylvania) high rural child poverty (Vernon-Feagans et al., 2013; Burchinal et al., 2008). MAR missingness added using missMethods package. data contain economic strain (ESEATA1) continuously distributed variable.","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_long_miss.rda.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Long data with missingness (continuous exposure) — sim_data_long_miss.rda","text":"","code":"sim_data_long_miss"},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_long_miss.rda.html","id":"format","dir":"Reference","previous_headings":"","what":"Format","title":"Long data with missingness (continuous exposure) — sim_data_long_miss.rda","text":"data frame","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_long_miss_bin.rda.html","id":null,"dir":"Reference","previous_headings":"","what":"Long data with missingness (binary exposure) — sim_data_long_miss_bin.rda","title":"Long data with missingness (binary exposure) — sim_data_long_miss_bin.rda","text":"data simulated based data Family Life Project (FLP), longitudinal study following 1,292 families representative two geographic areas (three counties North Carolina three counties Pennsylvania) high rural child poverty (Vernon-Feagans et al., 2013; Burchinal et al., 2008). MAR missingness added using missMethods package. data contain economic strain (ESEATA1) binary variable.","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_long_miss_bin.rda.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Long data with missingness (binary exposure) — sim_data_long_miss_bin.rda","text":"","code":"sim_data_long_miss_bin"},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_long_miss_bin.rda.html","id":"format","dir":"Reference","previous_headings":"","what":"Format","title":"Long data with missingness (binary exposure) — sim_data_long_miss_bin.rda","text":"data frame","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_mice.rda.html","id":null,"dir":"Reference","previous_headings":"","what":"Wide data imputed with mice (continuous exposure) — sim_data_mice.rda","title":"Wide data imputed with mice (continuous exposure) — sim_data_mice.rda","text":"data simulated based data Family Life Project (FLP), longitudinal study following 1,292 families representative two geographic areas (three counties North Carolina three counties Pennsylvania) high rural child poverty (Vernon-Feagans et al., 2013; Burchinal et al., 2008). MAR missingness added using missMethods package imputing mice package. data contain economic strain (ESEATA1) continuously distributed variable.","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_mice.rda.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Wide data imputed with mice (continuous exposure) — sim_data_mice.rda","text":"","code":"sim_data_mice"},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_mice.rda.html","id":"format","dir":"Reference","previous_headings":"","what":"Format","title":"Wide data imputed with mice (continuous exposure) — sim_data_mice.rda","text":"mice object","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_wide.rda.html","id":null,"dir":"Reference","previous_headings":"","what":"Wide complete data (continuous exposure)\nThese data are simulated based on data from the Family Life Project (FLP), a longitudinal study following 1,292 families\nrepresentative of two geographic areas (three counties in North Carolina and three counties in Pennsylvania) with high rural\nchild poverty (Vernon-Feagans et al., 2013; Burchinal et al., 2008). These data contain economic strain (ESEATA1) as a continuously\ndistributed variable. — sim_data_wide.rda","title":"Wide complete data (continuous exposure)\nThese data are simulated based on data from the Family Life Project (FLP), a longitudinal study following 1,292 families\nrepresentative of two geographic areas (three counties in North Carolina and three counties in Pennsylvania) with high rural\nchild poverty (Vernon-Feagans et al., 2013; Burchinal et al., 2008). These data contain economic strain (ESEATA1) as a continuously\ndistributed variable. — sim_data_wide.rda","text":"Wide complete data (continuous exposure) data simulated based data Family Life Project (FLP), longitudinal study following 1,292 families representative two geographic areas (three counties North Carolina three counties Pennsylvania) high rural child poverty (Vernon-Feagans et al., 2013; Burchinal et al., 2008). data contain economic strain (ESEATA1) continuously distributed variable.","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_wide.rda.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Wide complete data (continuous exposure)\nThese data are simulated based on data from the Family Life Project (FLP), a longitudinal study following 1,292 families\nrepresentative of two geographic areas (three counties in North Carolina and three counties in Pennsylvania) with high rural\nchild poverty (Vernon-Feagans et al., 2013; Burchinal et al., 2008). These data contain economic strain (ESEATA1) as a continuously\ndistributed variable. — sim_data_wide.rda","text":"","code":"sim_data_wide"},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_wide.rda.html","id":"format","dir":"Reference","previous_headings":"","what":"Format","title":"Wide complete data (continuous exposure)\nThese data are simulated based on data from the Family Life Project (FLP), a longitudinal study following 1,292 families\nrepresentative of two geographic areas (three counties in North Carolina and three counties in Pennsylvania) with high rural\nchild poverty (Vernon-Feagans et al., 2013; Burchinal et al., 2008). These data contain economic strain (ESEATA1) as a continuously\ndistributed variable. — sim_data_wide.rda","text":"wide data frame 1,292 observations 69 measured variables. \"ESETA1\" continuous exposure economic strain \"StrDif_Tot.58\" continuous outcome behavioral problems \"InRatioCor\" income--needs ratio \"PmEd2\" parent's education level \"state\" family's state residence \"TcBlac2\" family's race (1 = x, 0 = y) \"bioDadInHH2\" whether biological father lives family (insert coding) \"HomeOwnd\" indicator whether family owns home (insert coding) \"KFASTScr\" \"PmBlac2\" primary careigver race (insert coding) \"SmokTotl\" \"caregiv_health\" \"gov_assist\" \"ALI_LE\" \"B18Raw\" \"CORTB\" \"EARS_TJo\" \"fscore\" \"HOMEETA1\" \"IBRAttn\" \"LESMnNeg\" \"MDI\" \"RHAsSO\" \"SAAmylase\" \"WndNbrhood\"","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_wide.rda.html","id":"references","dir":"Reference","previous_headings":"","what":"References","title":"Wide complete data (continuous exposure)\nThese data are simulated based on data from the Family Life Project (FLP), a longitudinal study following 1,292 families\nrepresentative of two geographic areas (three counties in North Carolina and three counties in Pennsylvania) with high rural\nchild poverty (Vernon-Feagans et al., 2013; Burchinal et al., 2008). These data contain economic strain (ESEATA1) as a continuously\ndistributed variable. — sim_data_wide.rda","text":"Vernon-Feagans, L., Cox, M., Willoughby, M., Burchinal, M., Garrett-Peters, P., Mills-Koonce, R., Garrett-Peiers, P., Conger, R. D., & Bauer, P. J. (2013). Family Life Project: Epidemiological Developmental Study Young Children Living Poor Rural Communities. Monographs Society Research Child Development, 78(5), –150. Burchinal, M., Howes, C., Pianta, R., Bryant, D., Early, D., Clifford, R., & Barbarin, O. (2008). Predicting Child Outcomes End Kindergarten Quality Pre-Kindergarten Teacher–Child Interactions Instruction. Applied Developmental Science, 12(3), 140–153. https://doi.org/10.1080/10888690802199418","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_wide_bin.rda.html","id":null,"dir":"Reference","previous_headings":"","what":"Wide complete data (binary exposure)\nThese data are simulated based on data from the Family Life Project (FLP), a longitudinal study following 1,292 families\nrepresentative of two geographic areas (three counties in North Carolina and three counties in Pennsylvania) with high rural\nchild poverty (Vernon-Feagans et al., 2013; Burchinal et al., 2008). These data contain economic strain (ESEATA1) as a binary variable. — sim_data_wide_bin.rda","title":"Wide complete data (binary exposure)\nThese data are simulated based on data from the Family Life Project (FLP), a longitudinal study following 1,292 families\nrepresentative of two geographic areas (three counties in North Carolina and three counties in Pennsylvania) with high rural\nchild poverty (Vernon-Feagans et al., 2013; Burchinal et al., 2008). These data contain economic strain (ESEATA1) as a binary variable. — sim_data_wide_bin.rda","text":"Wide complete data (binary exposure) data simulated based data Family Life Project (FLP), longitudinal study following 1,292 families representative two geographic areas (three counties North Carolina three counties Pennsylvania) high rural child poverty (Vernon-Feagans et al., 2013; Burchinal et al., 2008). data contain economic strain (ESEATA1) binary variable.","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_wide_bin.rda.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Wide complete data (binary exposure)\nThese data are simulated based on data from the Family Life Project (FLP), a longitudinal study following 1,292 families\nrepresentative of two geographic areas (three counties in North Carolina and three counties in Pennsylvania) with high rural\nchild poverty (Vernon-Feagans et al., 2013; Burchinal et al., 2008). These data contain economic strain (ESEATA1) as a binary variable. — sim_data_wide_bin.rda","text":"","code":"sim_data_wide_bin"},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_wide_bin.rda.html","id":"format","dir":"Reference","previous_headings":"","what":"Format","title":"Wide complete data (binary exposure)\nThese data are simulated based on data from the Family Life Project (FLP), a longitudinal study following 1,292 families\nrepresentative of two geographic areas (three counties in North Carolina and three counties in Pennsylvania) with high rural\nchild poverty (Vernon-Feagans et al., 2013; Burchinal et al., 2008). These data contain economic strain (ESEATA1) as a binary variable. — sim_data_wide_bin.rda","text":"data frame","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_wide_miss.rda.html","id":null,"dir":"Reference","previous_headings":"","what":"Wide data with missingness (continuous exposure) — sim_data_wide_miss.rda","title":"Wide data with missingness (continuous exposure) — sim_data_wide_miss.rda","text":"data simulated based data Family Life Project (FLP), longitudinal study following 1,292 families representative two geographic areas (three counties North Carolina three counties Pennsylvania) high rural child poverty (Vernon-Feagans et al., 2013; Burchinal et al., 2008). MAR missingness added using missMethods package. data contain economic strain (ESEATA1) continuously distributed variable.","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_wide_miss.rda.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Wide data with missingness (continuous exposure) — sim_data_wide_miss.rda","text":"","code":"sim_data_wide_miss"},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_wide_miss.rda.html","id":"format","dir":"Reference","previous_headings":"","what":"Format","title":"Wide data with missingness (continuous exposure) — sim_data_wide_miss.rda","text":"data frame","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_wide_miss_bin.rda.html","id":null,"dir":"Reference","previous_headings":"","what":"Wide data with missingness (binary exposure) — sim_data_wide_miss_bin.rda","title":"Wide data with missingness (binary exposure) — sim_data_wide_miss_bin.rda","text":"data simulated based data Family Life Project (FLP), longitudinal study following 1,292 families representative two geographic areas (three counties North Carolina three counties Pennsylvania) high rural child poverty (Vernon-Feagans et al., 2013; Burchinal et al., 2008). MAR missingness added using missMethods package. data contain economic strain (ESEATA1) binary variable.","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_wide_miss_bin.rda.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Wide data with missingness (binary exposure) — sim_data_wide_miss_bin.rda","text":"","code":"sim_data_wide_miss_bin"},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_wide_miss_bin.rda.html","id":"format","dir":"Reference","previous_headings":"","what":"Format","title":"Wide data with missingness (binary exposure) — sim_data_wide_miss_bin.rda","text":"data frame","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/trimWeights.html","id":null,"dir":"Reference","previous_headings":"","what":"Trim IPTW balancing weights — trimWeights","title":"Trim IPTW balancing weights — trimWeights","text":"Trims IPTW balancing weights heavy right tails populating weight values given quantile weight value quantile.","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/trimWeights.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Trim IPTW balancing weights — trimWeights","text":"","code":"trimWeights( exposure, outcome, weights, quantile = NA, home_dir = NULL, verbose = TRUE, save.out = TRUE )"},{"path":"https://istallworthy.github.io/devMSMs/reference/trimWeights.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Trim IPTW balancing weights — trimWeights","text":"exposure name exposure variable outcome name outcome variable \".timepoint\" suffix weights list IPTW weights output createWeights() quantile (optional) numeric value 0 1 quantile value trim weights (default 0.95) home_dir path home directory (required save.= TRUE) verbose (optional) TRUE FALSE indicator user output (default TRUE) save.(optional) TRUE FALSE indicator save output intermediary output locally (default TRUE)","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/trimWeights.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Trim IPTW balancing weights — trimWeights","text":"list model output trimmed weights","code":""},{"path":[]},{"path":"https://istallworthy.github.io/devMSMs/reference/trimWeights.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Trim IPTW balancing weights — trimWeights","text":"","code":"f <- createFormulas(exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", tv_confounders = c(\"A.1\", \"A.2\", \"A.3\", \"B.1\", \"B.2\", \"B.3\"), ti_confounders = \"C\", type = \"full\", save.out = FALSE) #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 1 is: #> A.1 ~ C #> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 2 is: #> A.2 ~ A.1 + B.1 + C #> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 3 is: #> A.3 ~ A.1 + A.2 + B.1 + B.2 + C #> #> test <- data.frame(ID = 1:50, A.1 = rnorm(n = 50), A.2 = rnorm(n = 50), A.3 = rnorm(n = 50), B.1 = rnorm(n = 50), B.2 = rnorm(n = 50), B.3 = rnorm(n = 50), C = rnorm(n = 50), D.3 = rnorm(n = 50)) w <- createWeights(data = test, exposure = \"A\", outcome = \"D.3\", formulas = f, save.out = FALSE) #> For the cbps weighting method, the median weight value is 1.08 (SD = 0.56; range = 0.45-4). #> t <- trimWeights(exposure = \"A\", outcome = \"D.3\", weights = w, save.out = FALSE) #> Trimming weights to 95%. #> #> For the A-D.3 relation, following trimming at the 0.95 quantile, the median weight value is 1.08 (SD= 0.41; range= 0.45-2). #> t <- trimWeights(exposure = \"A\", outcome = \"D.3\", weights = w, quantile = 0.75, save.out = FALSE) #> Trimming weights to 75%. #> #> For the A-D.3 relation, following trimming at the 0.75 quantile, the median weight value is 1.08 (SD= 0.23; range= 0.45-1). #>"}] +[{"path":"https://istallworthy.github.io/devMSMs/articles/Data_Requirements.html","id":"references","dir":"Articles","previous_headings":"","what":"References","title":"Data Requirements","text":"Burchinal, M., Howes, C., Pianta, R., Bryant, D., Early, D., Clifford, R., & Barbarin, O. (2008). Predicting Child Outcomes End Kindergarten Quality Pre-Kindergarten Teacher–Child Interactions Instruction. Applied Developmental Science, 12(3), 140–153. https://doi.org/10.1080/10888690802199418 Kainz, K., Greifer, N., Givens, ., Swietek, K., Lombardi, B. M., Zietz, S., & Kohn, J. L. (2017). Improving Causal Inference: Recommendations Covariate Selection Balance Propensity Score Methods. Journal Society Social Work Research, 8(2), 279–303. https://doi.org/10.1086/691464 Vernon-Feagans, L., Cox, M., Willoughby, M., Burchinal, M., Garrett-Peters, P., Mills-Koonce, R., Garrett-Peiers, P., Conger, R. D., & Bauer, P. J. (2013). Family Life Project: Epidemiological Developmental Study Young Children Living Poor Rural Communities. Monographs Society Research Child Development, 78(5), –150.","code":""},{"path":"https://istallworthy.github.io/devMSMs/articles/Preliminary_Steps.html","id":"core-inputs","dir":"Articles","previous_headings":"","what":"Core inputs","title":"Preliminary Steps","text":"Please see Specifying Core Inputs vignette detail following core inputs. , use ESETA1, measure economic strain experienced family, exposure StrDif_Tot, behavior problems measured SDQ, outcome.","code":"set.seed(1234) home_dir <- NA # home_dir <- '/Users/isabella/Library/CloudStorage/Box-Box/BSL General/MSMs/testing/isa' #note: no / after exposure <- \"ESETA1\" exposure_time_pts <- c(6, 15, 24, 35, 58) outcome <- \"StrDif_Tot.58\" tv_confounders <- c(\"SAAmylase.6\",\"SAAmylase.15\", \"SAAmylase.24\", \"MDI.6\", \"MDI.15\", \"RHasSO.6\", \"RHasSO.15\", \"RHasSO.24\",\"RHasSO.35\", \"RHasSO.58\", \"WndNbrhood.6\",\"WndNbrhood.24\", \"WndNbrhood.35\", \"WndNbrhood.58\", \"IBRAttn.6\", \"IBRAttn.15\", \"IBRAttn.24\", \"B18Raw.6\", \"B18Raw.15\", \"B18Raw.24\", \"B18Raw.58\", \"HOMEETA1.6\", \"HOMEETA1.15\", \"HOMEETA1.24\", \"HOMEETA1.35\", \"HOMEETA1.58\", \"InRatioCor.6\", \"InRatioCor.15\", \"InRatioCor.24\", \"InRatioCor.35\", \"InRatioCor.58\", \"ESETA1.6\", \"ESETA1.15\", \"ESETA1.24\", \"ESETA1.35\", \"ESETA1.58\", \"CORTB.6\", \"CORTB.15\", \"CORTB.24\", \"EARS_TJo.24\", \"EARS_TJo.35\", \"LESMnPos.24\", \"LESMnPos.35\", \"LESMnNeg.24\", \"LESMnNeg.35\", \"StrDif_Tot.35\", \"StrDif_Tot.58\", \"fscore.35\", \"fscore.58\" ) #required ti_confounders <- c(\"state\", \"BioDadInHH2\", \"PmAge2\", \"PmBlac2\", \"TcBlac2\", \"PmMrSt2\", \"PmEd2\", \"KFASTScr\", \"RMomAgeU\", \"RHealth\", \"HomeOwnd\", \"SWghtLB\", \"SurpPreg\", \"SmokTotl\", \"DrnkFreq\", \"peri_health\", \"caregiv_health\", \"gov_assist\" #, \"state:SmokTotl\", \"PmAge2:PmBlac2\", \"PmAge2:PmEd2\" #testing interaction terms )"},{"path":"https://istallworthy.github.io/devMSMs/articles/Preliminary_Steps.html","id":"load-data","dir":"Articles","previous_headings":"","what":"Load data","title":"Preliminary Steps","text":"Users several options reading data. can begin workflow following options: long data (without missing data), can converted wide data wide data (without missing data), can imputed using imputeData() helper function, needed imputed wide data sets saved locally, can read list , demonstrate use starting options. First load simulated longitudinal data long format (missingness) accompanies devMSMs. data simulated based data Family Life Project (FLP), longitudinal study following 1,292 families representative two geographic areas (three counties North Carolina three counties Pennsylvania) high rural child poverty (Vernon-Feagans et al., 2013; Burchinal et al., 2008).","code":"data(\"sim_data_long_miss\", package = \"devMSMs\") data_long <- sim_data_long_miss"},{"path":"https://istallworthy.github.io/devMSMs/articles/Preliminary_Steps.html","id":"p1--format-data","dir":"Articles","previous_headings":"","what":"P1. Format Data","title":"Preliminary Steps","text":"data must wide format contain “ID” column subject identifier exposure, outcome, confounders separate columns (shown Figure 1). Column names can include underscore special characters time-varying variables suffix consists period followed time point (e.g., “variable.6”). variables classed integer, numeric, factor (character). Auxiliary nuisance covariates confounders (e.g, assessment version) can included dataset use specification final modeling step (Workflow vignettes Step 5). insert Figure 1 .","code":""},{"path":"https://istallworthy.github.io/devMSMs/articles/Preliminary_Steps.html","id":"p1a--format-single-data-frame-of-long-data","dir":"Articles","previous_headings":"P1. Format Data","what":"P1a. Format single data frame of long data","title":"Preliminary Steps","text":"Users beginning single data frame long format (without missingness) can utilize helper function formatLongData() summarize exposure outcome data convert required variable names. function takes dataset long format variables time (time_var), ID (id_var), missing data (missing) alternative variables re-labels according required package. also classes factor confounders (factor_confounders) factors data others numeric. get descriptive statistics summary exposure, ESETA1, outcome, StrDif_Tot.58, visual inspections.","code":"data_long_f <- formatLongData(data = data_long, exposure = exposure, exposure_time_pts = exposure_time_pts, outcome = outcome, time_var = \"WAVE\", id_var = \"ID\", missing = NA, factor_confounders = c(\"state\", \"TcBlac2\",\"BioDadInHH2\",\"HomeOwnd\", \"PmBlac2\", \"PmMrSt2\", \"SurpPreg\", \"RHealth\", \"SmokTotl\", \"DrnkFreq\", \"RHasSO\"), home_dir = home_dir, save.out = save.out) #> Table: Summary of ESETA1 Exposure Information #> #> |WAVE | mean| sd| min| max| #> |:----|---------:|---------:|------:|-----:| #> |15 | 0.2983433| 0.9261390| -2.699| 3.474| #> |24 | 0.1585387| 0.9575181| -2.858| 3.284| #> |35 | 0.1388395| 0.9475905| -3.046| 3.014| #> |58 | 0.0996006| 0.9924516| -2.478| 3.173| #> |6 | 0.3337979| 0.9298080| -2.809| 4.035| #> #> Table: Summary of Outcome StrDif_Tot.58 Information #> #> |WAVE | mean| sd| min| max| #> |:----|---------:|---------:|------:|-----:| #> |35 | 0.6009797| 0.2830620| -0.230| 1.536| #> |58 | 0.5029778| 0.2931918| -0.281| 1.448|"},{"path":"https://istallworthy.github.io/devMSMs/articles/Preliminary_Steps.html","id":"p1b--convert-single-long-data-frame-to-wide-format","dir":"Articles","previous_headings":"P1. Format Data","what":"P1b. Convert single long data frame to wide format","title":"Preliminary Steps","text":"Users correctly formatted variables long format option using following code transform data wide format, proceed using package (missing data) imputing (< 20% missing data MAR). transform newly formatted long data wide format. Alternatively, start wide data missingness already formatted.","code":"require(\"stats\") v <- sapply(strsplit(tv_confounders[!grepl(\"\\\\:\", tv_confounders)], \"\\\\.\"), \"[\", 1) v <- v[!duplicated(v)] data_wide <- stats::reshape(data = data_long_f, idvar = \"ID\", #list ID variable in your dataset v.names = v, timevar = \"WAVE\", # list time variable in your long dataset times = c(6, 15, 24, 35, 58), # list all time points in your dataset direction = \"wide\") data_wide <- data_wide[, colSums(is.na(data_wide)) < nrow(data_wide)] data(\"sim_data_wide_miss\", package = \"devMSMs\") data_wide <- sim_data_wide_miss"},{"path":"https://istallworthy.github.io/devMSMs/articles/Preliminary_Steps.html","id":"p2--impute-data-to-account-for-missingness","dir":"Articles","previous_headings":"","what":"P2. Impute Data to Account for Missingness","title":"Preliminary Steps","text":"functions devMSMs package accept data form single data frame missing values m imputed datasets form either mids object (output mice package via imputeData()) list imputed datasets. developmental data humans amount missing data. Given creation IPTW balancing weights requires complete data, recommend imputing data. Imputation assumes missing data mechanism missing random (MAR) 20% missing data total (Leyrat et al., 2021). Given existing work demonstrating superiority, devMSMS implements ‘within’ approach imputed data, conducting steps imputed dataset pooling estimates using Rubin’s rules create final average predictions contrast comparisons Worfklows vignettes Step 5 (Leyrat et al, 2021; Granger et al., 2019).","code":""},{"path":"https://istallworthy.github.io/devMSMs/articles/Preliminary_Steps.html","id":"p2a--multiply-impute-single-wide-formatted-data-frame-using-mice","dir":"Articles","previous_headings":"P2. Impute Data to Account for Missingness","what":"P2a. Multiply impute single wide, formatted data frame using mice","title":"Preliminary Steps","text":"Users option using helper imputeData() function impute correctly formatted wide data. step can take run. user can specify many imputed datasets create (default m = 5). imputeData() draws mice() function mice package (van Buuren & Oudshoorn, 2011) conduct multiple imputation chained equations (mice). variables present dataset used impute missing data column. user can specify imputation method method field drawing following list: “pmm” (predictive mean matching), “midastouch” (weighted predictive mean matching), “sample” (random sample observed values), “rf” (random forest) “cart” (classification regression trees). Random forest imputation default given evidence efficiency superior performance (Shah et al., 2014). Please review mice documentation details. Additionally, users can specify integer value seed order offset random number generator mice() make reproducible imputations. parameter read_imps_from_file allow read already imputed data local storage (TRUE) re-run imputation code multiple times (FALSE; default). Users may use parameter supply mids object imputed data mice package (title ‘all_imp.rds’). sure inspect console warnings well resulting imputed datasets. variables missing data following imputation may need removed due high collinearity /low variability. required inputs function data frame wide format (formatted according pre-requirements listed ), m number imputed datasets create, path home directory (save.= TRUE), exposure (e.g., “variable”), outcome (e.g., “variable.t”). home directory path, exposure, outcome already defined user completed Specifying Core Inputs vignette. optional inputs follows. user can specify imputation method compatible mice() (see ). Additionally, user can specify maxit number interactions mice::mice() conduct (default 5). user can also specify para_proc, logical indicator indicating whether speed imputing using parallel processing (default = TRUE). draws 2 cores using functions parallel, doRNG, doParallel packages. user may also specify additional inputs accepted mice::mice() advise consulting [mice documentation] information. user can also indicate already created imputed datasets function wish read (read_imps_from_file = TRUE rather recreate (default). example, create 5 imputed datasets using default random forest method 0 iterations (just illustrative purposes), set seed reproducibility, assign output data use devMSMs. code takes time run. inspect output console warnings mice(). mice object can now assigned data use deveMSMs package (see Workflows vignettes).","code":"s <- 1234 m <- 5 method <- \"rf\" maxit <- 0 imputed_data <- imputeData(data = data_wide, exposure = exposure, outcome = outcome, m = m, method = method, maxit = maxit, para_proc = FALSE, seed = s, read_imps_from_file = FALSE, home_dir = home_dir, save.out = save.out) #> Creating 5 imputed datasets using the rf imputation method in mice::mice(). This may take some time to run. #> #> #> USER ALERT: Please view any logged events from the imputation below: #> Table: Logged Events from mice::mice data <- imputed_data"},{"path":"https://istallworthy.github.io/devMSMs/articles/Preliminary_Steps.html","id":"p2b--read-in-as-a-list-wide-imputed-data-saved-locally","dir":"Articles","previous_headings":"P2. Impute Data to Account for Missingness","what":"P2b. Read in as a list wide imputed data saved locally","title":"Preliminary Steps","text":"Alternatively, user imputed datasets already created using program mice, can read , list, files saved locally .csv files (labeled “1”:m) single folder. list can assigned data use deveMSMs package (see Workflows vignettes). , load imputed data simulated FLP, example. Alternatively, case missing data, user can read formatted, wide dataset assigned data use deveMSMs package (see Workflows vignettes). , load single wide data frame simulated FLP example.","code":"# folder <- \"/Users/isabella/Library/CloudStorage/Box-Box/BSL General/MSMs/testing/testing data/continuous outcome/continuous exposure/imputations/\" # # files <- list.files(folder, full.names = TRUE, pattern = \"\\\\.csv\") #make sure pattern matches suffix of your data # # data <- lapply(files, function(file) { # imp_data <- read.csv(file) # imp_data # }) data(\"sim_data_imp_list\", package = \"devMSMs\") data <- sim_data_imp_list data(\"sim_data_wide\", package = \"devMSMs\") data_wide <- sim_data_wide data <- data_wide"},{"path":"https://istallworthy.github.io/devMSMs/articles/Preliminary_Steps.html","id":"p3--optional-identify-exposure-epochs","dir":"Articles","previous_headings":"","what":"P3. Optional: Identify Exposure Epochs","title":"Preliminary Steps","text":"Users option specify exposure epochs, meaningful periods developmental time many encompass time points exposure measured. user option draw theory structure data specify developmental epochs exposure differ time points exposure collected. specify epochs, users utilize optional epochs argument providing data frame contains two variables: epochs: provide, quotations, list user-created names epoch; values: list, named epoch, provide single integer list integers (exposure time points) constitute epoch. named epoch must corresponding value (values epoch can differ number entries, shown ). user ensure epoch values included exposure_time_pts field. exposure epochs arguments fitModel() compareHistories() devMSMs functions (see Workflows vignettes) specification kept consistent throughout use package vignettes. constitute main effects variables modeling relation exposure outcome (Workflows vignettes Step 5a) form basis estimating comparing exposure histories (Workflows vignettes Step 5b). epochs specified, exposure time points used aforementioned steps. , specify Infancy, Toddlerhood, Childhood epochs correspond 6 15; 24 35; 58 months, respectively.","code":"epochs <- data.frame(epochs = c(\"Infancy\", \"Toddlerhood\", \"Childhood\"), values = I(list(c(6, 15), c(24, 35), c(58) )))"},{"path":"https://istallworthy.github.io/devMSMs/articles/Preliminary_Steps.html","id":"p4--recommended-specify-inspect-exposure-histories","dir":"Articles","previous_headings":"","what":"P4. Recommended: Specify & Inspect Exposure Histories","title":"Preliminary Steps","text":"Exposure histories units users test substantive hypotheses, construction determined theoretical practical reasoning. strongly recommend users verify inspect exposure histories priori relation data hypotheses.","code":""},{"path":"https://istallworthy.github.io/devMSMs/articles/Preliminary_Steps.html","id":"p4a--create-high-and-low-cutoff-values-for-continuous-exposures","dir":"Articles","previous_headings":"","what":"P4a. Create high and low cutoff values for continuous exposures","title":"Preliminary Steps","text":"First, continuously distributed exposures (regardless whether exposure epochs specified), recommend users indicate high low cutoff values optional input compareHistories()) devMSMs function (see Workflows vignettes). , specify hi_lo_cut, list, quantile value (0-1) considered high levels exposure, followed quantile value (0-1) considered low levels exposure (default median split). values may revised following inspection sample distribution across resulting exposure histories subsequent steps. final values used creating exposure histories Step 5 Workflows vignettes. , specify 60th 30th percentiles demarcate high low levels economic strain exposure, respectively.","code":"hi_lo_cut <- c(0.6, 0.3)"},{"path":"https://istallworthy.github.io/devMSMs/articles/Preliminary_Steps.html","id":"p4b--specify-hypotheses-relevant-exposure-histories","dir":"Articles","previous_headings":"","what":"P4b. Specify hypotheses-relevant exposure histories","title":"Preliminary Steps","text":"strongly recommend users selective histories, developmental sequences high low exposure (exposure time points epochs), vital testing hypotheses. recommend user estimates compares subset possible exposure histories using reference comparison fields (rather comparing possible exposure histories). user can specify custom subset exposure histories using reference comparison fields optional inputs compareHistories() devMSMs function (see Workflows vignettes). conduct customized comparisons, users must provide least one unique valid history (e.g., “l-l-l”) reference , quotations, provide string (list strings) lowercase l’s h’s (separated -), corresponding exposure epoch (time point), signify sequence exposure levels (“low” “high”, respectively). supply reference history, comparisons provide least one unique valid history comparison , quotations, providing string (list strings) l’s h’s (separated “-”), corresponding exposure epoch, signify sequence exposure levels (“low” “high”, respectively) constitutes comparison exposure history/histories compared reference Step 5b Workflows vignettes. supply one comparisons, least one reference must specified. reference exposure history compared comparison history comparisons supplied multiple comparison correction. reference comparison specified, histories compared Step 5b Workflows vignettes. final reference comparison values established step used estimating comparing exposure histories Step 5b Workflows vignettes. 4 exposure main effects (either epochs exposure time points), user required select subset history comparisons (Step 5b Workflows vignettes), given base code (see hypotheses() function marginaleffects package) accommodate pairwise history comparisons 5 time points). , specify low economic strain epochs (“l-l-l”) reference event comparison high levels epochs (“h-h-h”) well histories contain 1 dose exposure high economic strain different epochs.","code":"reference <- c(\"l-l-l\") comparison <- c(\"h-h-h\", \"l-l-h\", \"h-l-l\", \"l-h-l\")"},{"path":"https://istallworthy.github.io/devMSMs/articles/Preliminary_Steps.html","id":"p4c--inspect-exposure-histories-and-data","dir":"Articles","previous_headings":"","what":"P4c. Inspect exposure histories and data","title":"Preliminary Steps","text":"users, highly recommend use helper inspectData() function (complete dataset long wide format imputed data case missingness) summarize exposure, outcome, confounders inspect sample distribution among exposure histories. Based user-specified exposure epochs high low quantile values (continuous exposures), function outputs table showing sample distribution across histories. strongly suggest visually inspecting table revising designation epochs /high low quantile values (continuous exposures) history contains reasonable number participants. gold standard required number per history cell, users guard extrapolation beyond scope data. example, data, using 75th 25th percentile cutoffs, histories represented less two cases thus re-evaluated cutoffs. Users may wish revise epoch designation high low cutoff values, applicable. function conducts summaries history distribution inspection imputed dataset imputed data supplied. insert Table 2 required inputs inspectData() : complete data (data frame wide long format, list imputed data frames wide format, mids object), exposure (e.g., “variable”), outcome (e.g., “variable.t”). Optional inputs home directory (save.= TRUE), epochs, high/low cutoff values continuous exposures, specification reference comparison histories. helper inspectData() function outputs following files home directory: correlation plot variables dataset, tables exposure outcome descriptive statistics, two summary tables confounders considered time point. , see summaries data types well reasonable cell counts specified histories, imputed dataset.","code":"inspectData(data = data, exposure = exposure, exposure_time_pts = exposure_time_pts, outcome = outcome, # required input ti_confounders = ti_confounders, tv_confounders = tv_confounders, # required input epochs = epochs, hi_lo_cut = hi_lo_cut, reference = reference, comparison = comparison, #optional input home_dir = home_dir, verbose = verbose, save.out = save.out) #optional input #> Using github PAT from envvar GITHUB_PAT #> Skipping install of 'devMSMs' from a github remote, the SHA1 (9a8a731a) has not changed since last install. #> Use `force = TRUE` to force installation #> USER ALERT: Below are the 67 variables spanning 33 unique domains that will be treated as confounding variables for the relation between ESETA1 and StrDif_Tot.58. #> Please inspect this list carefully. It should include all time-varying covariates, time invariant covariates, as well as lagged levels of exposure and outcome variables if they were collected at time points earlier than the outcome time point. #> [1] \"B18Raw.15\" \"B18Raw.24\" \"B18Raw.58\" \"B18Raw.6\" #> [5] \"BioDadInHH2\" \"caregiv_health\" \"CORTB.15\" \"CORTB.24\" #> [9] \"CORTB.6\" \"DrnkFreq\" \"EARS_TJo.24\" \"EARS_TJo.35\" #> [13] \"ESETA1.15\" \"ESETA1.24\" \"ESETA1.35\" \"ESETA1.58\" #> [17] \"ESETA1.6\" \"fscore.35\" \"fscore.58\" \"gov_assist\" #> [21] \"HOMEETA1.15\" \"HOMEETA1.24\" \"HOMEETA1.35\" \"HOMEETA1.58\" #> [25] \"HOMEETA1.6\" \"HomeOwnd\" \"IBRAttn.15\" \"IBRAttn.24\" #> [29] \"IBRAttn.6\" \"InRatioCor.15\" \"InRatioCor.24\" \"InRatioCor.35\" #> [33] \"InRatioCor.58\" \"InRatioCor.6\" \"KFASTScr\" \"LESMnNeg.24\" #> [37] \"LESMnNeg.35\" \"LESMnPos.24\" \"LESMnPos.35\" \"MDI.15\" #> [41] \"MDI.6\" \"peri_health\" \"PmAge2\" \"PmBlac2\" #> [45] \"PmEd2\" \"PmMrSt2\" \"RHasSO.15\" \"RHasSO.24\" #> [49] \"RHasSO.35\" \"RHasSO.58\" \"RHasSO.6\" \"RHealth\" #> [53] \"RMomAgeU\" \"SAAmylase.15\" \"SAAmylase.24\" \"SAAmylase.6\" #> [57] \"SmokTotl\" \"state\" \"StrDif_Tot.35\" \"StrDif_Tot.58\" #> [61] \"SurpPreg\" \"SWghtLB\" \"TcBlac2\" \"WndNbrhood.24\" #> [65] \"WndNbrhood.35\" \"WndNbrhood.58\" \"WndNbrhood.6\" #> #> The following variables are designated as numeric: #> [1] \"PmAge2, ALI_Le.35, CORTB.15, CORTB.24, CORTB.6, ESETA1.15, ESETA1.24, ESETA1.35, ESETA1.58, ESETA1.6, fscore.35, fscore.58, HOMEETA1.15, HOMEETA1.24, HOMEETA1.35, HOMEETA1.58, HOMEETA1.6, IBRAttn.15, IBRAttn.24, IBRAttn.6, InRatioCor.15, InRatioCor.24, InRatioCor.35, InRatioCor.58, InRatioCor.6, LESMnNeg.24, LESMnNeg.35, LESMnPos.24, LESMnPos.35, SAAmylase.15, SAAmylase.24, SAAmylase.6, StrDif_Tot.35, StrDif_Tot.58, WndNbrhood.24, WndNbrhood.35, WndNbrhood.58, WndNbrhood.6\" #> #> The following variables are designated as factors: #> [1] \"ID, state, TcBlac2, BioDadInHH2, HomeOwnd, PmBlac2, PmMrSt2, SurpPreg, DrnkFreq, RHealth, SmokTotl, RHasSO.15, RHasSO.24, RHasSO.35, RHasSO.58, RHasSO.6\" #> #> Table: Other variable types #> #> | |variable |type | #> |:--------------|:--------------|:-------| #> |KFASTScr |KFASTScr |integer | #> |PmEd2 |PmEd2 |integer | #> |RMomAgeU |RMomAgeU |integer | #> |SWghtLB |SWghtLB |integer | #> |peri_health |peri_health |integer | #> |caregiv_health |caregiv_health |integer | #> |gov_assist |gov_assist |integer | #> |B18Raw.15 |B18Raw.15 |integer | #> |B18Raw.24 |B18Raw.24 |integer | #> |B18Raw.58 |B18Raw.58 |integer | #> |B18Raw.6 |B18Raw.6 |integer | #> |EARS_TJo.24 |EARS_TJo.24 |integer | #> |EARS_TJo.35 |EARS_TJo.35 |integer | #> |MDI.15 |MDI.15 |integer | #> |MDI.6 |MDI.6 |integer | #> #> Summary of Exposure Main Effects: #> #> USER ALERT: Out of the total of 1292 individuals in the sample, below is the distribution of the 437 (33.82%) individuals that fall into 5 out of the 5 the total user-defined exposure histories created from 30th and 60th percentile values for low and high levels of exposure ESETA1, respectively, across Infancy, Toddlerhood, Childhood. #> USER ALERT: Please inspect the distribution of the sample across the following exposure histories and ensure there is sufficient spread to avoid extrapolation and low precision: #> #> Table: Summary of user-specified exposure ESETA1 histories based on exposure main effects Infancy, Toddlerhood, Childhood containing time points c(6, 15), c(24, 35), 58: #> #> |history | n| #> |:-------|---:| #> |h-h-h | 229| #> |h-l-l | 16| #> |l-h-l | 10| #> |l-l-h | 31| #> |l-l-l | 151| #> #> Your outcome variable(s) have the following type(s): data.frame"},{"path":"https://istallworthy.github.io/devMSMs/articles/Preliminary_Steps.html","id":"references","dir":"Articles","previous_headings":"","what":"References","title":"Preliminary Steps","text":"Arel-Bundock, Vincent. 2023. marginaleffects: Predictions, Comparisons, Slopes, Marginal Means, Hypothesis Tests. https://CRAN.R-project.org/package=marginaleffects. Burchinal, M., Howes, C., Pianta, R., Bryant, D., Early, D., Clifford, R., & Barbarin, O. (2008). Predicting Child Outcomes End Kindergarten Quality Pre-Kindergarten Teacher–Child Interactions Instruction. Applied Developmental Science, 12(3), 140–153. https://doi.org/10.1080/10888690802199418 Granger, E., Sergeant, J. C., & Lunt, M. (2019). Avoiding pitfalls combining multiple imputation propensity scores. Statistics Medicine, 38(26), 5120–5132. https://doi.org/10.1002/sim.8355 Leyrat, C., Carpenter, J. R., Bailly, S., & Williamson, E. J. (2021). Common Methods Handling Missing Data Marginal Structural Models: Works . American Journal Epidemiology, 190(4), 663–672. https://doi.org/10.1093/aje/kwaa225 Shah, . D., Bartlett, J. W., Carpenter, J., Nicholas, O., & Hemingway, H. (2014). Comparison Random Forest Parametric Imputation Models Imputing Missing Data Using MICE: CALIBER Study. American Journal Epidemiology, 179(6), 764–774. https://doi.org/10.1093/aje/kwt312 Vernon-Feagans, L., Cox, M., Willoughby, M., Burchinal, M., Garrett-Peters, P., Mills-Koonce, R., Garrett-Peiers, P., Conger, R. D., & Bauer, P. J. (2013). Family Life Project: Epidemiological Developmental Study Young Children Living Poor Rural Communities. Monographs Society Research Child Development, 78(5), –150. van Buuren, Stef, Karin Groothuis-Oudshoorn. 2011. “mice: Multivariate Imputation Chained Equations r.” Journal Statistical Software 45 (3): 1–67. https://doi.org/10.18637/jss.v045.i03.","code":""},{"path":"https://istallworthy.github.io/devMSMs/articles/Specify_Core_Inputs.html","id":"home-directory","dir":"Articles","previous_headings":"","what":"Home Directory","title":"Specify Core Inputs","text":"Users required specify home directory, quotations, path designated folder output package, plan save intermediary final outputs package (default) setting save.= TRUE functions. sub directories created within home directory devMSMs functions automatically save.’ = TRUE.","code":"home_dir <- NA home_dir <- '/Users/isabella/Library/CloudStorage/Box-Box/BSL General/MSMs/testing/isa' #note: no / after"},{"path":"https://istallworthy.github.io/devMSMs/articles/Specify_Core_Inputs.html","id":"exposure-variable","dir":"Articles","previous_headings":"","what":"Exposure Variable","title":"Specify Core Inputs","text":"Users required specify exposure variable input functions devMSMs. user must specify exposure, variable name exposure quotations, without time information appended (e.g., “variable”). Note dataset, exposure variables wide format labeled “.time” suffix (e.g., “variable.t”).","code":"exposure <- \"ESETA1\""},{"path":"https://istallworthy.github.io/devMSMs/articles/Specify_Core_Inputs.html","id":"exposure-time-points","dir":"Articles","previous_headings":"","what":"Exposure Time Points","title":"Specify Core Inputs","text":"Next, users required provide information time points exposure assessed exposure_time_pts, required input createFormulas(), assessBalance(), fitModel(), compareHistories() devMSMs functions (see Workflows vignettes). user two options specifying exposure time points select option best serves theory regarding developmental timing practical constraints data modeling process. First, may specify time points exposure measured data. means balancing formulas created (Steps 1a, 2a, 3b Workflows vignettes) IPTW weights created (Steps 2b, 3c Workflows vignettes) assessed (Steps 2c, 3a, 4 Workflows vignettes) time points. case, epochs specified, time points included exposure main effects final substantive model history comparison (Step 5 Workflows vignettes). Second, may specify subset theoretically important time points exposure measured data. means balancing formulas created IPTW weights created assessed time points. , epochs specified, subsetted time points included exposure main effects final substantive models. Importantly, exposure variables time points exposure assessed included time-varying confounders balancing purposes . specification exposure epochs kept consistent throughout use devMSMs package. user intends specify exposure epochs (Preliminary Steps vignette Step P3), user include time points encompassed epochs exposure_time_pts. user intend specify exposure epochs (Preliminary Steps vignette Step P3), exposure_time_pts constitute exposure main effects final outcome model form basis histories used history comparison. case, user specifies 4 exposure time points, required conduct subset history comparisons (Step 5b Workflows vignettes), given base code (see hypotheses() function marginaleffects package) accommodate pairwise history comparisons 5 time points. elected create epochs infancy (6 15 months), toddlerhood (24 35 months), early childhood (58 months). Thus, input 6, 15, 24, 35, 58 exposure_time_pts.","code":"exposure_time_pts <- c(6, 15, 24, 35, 58)"},{"path":"https://istallworthy.github.io/devMSMs/articles/Specify_Core_Inputs.html","id":"outcome-variable","dir":"Articles","previous_headings":"","what":"Outcome Variable","title":"Specify Core Inputs","text":"Users also required specify outcome variable designated final time point, required input functions devMSMs package. final time point equal (, ideally greater ) final exposure time point. Note instances outcome variable measured prior time points included time-varying confounders balancing purposes. Specifying outcome, variable name outcome time point collected appended following period (e.g., “variable.t”) corresponding variable name wide data, required package. Outcome variables dataset wide format labeled “.time” suffix.","code":"outcome <- \"StrDif_Tot.58\""},{"path":[]},{"path":"https://istallworthy.github.io/devMSMs/articles/Specify_Core_Inputs.html","id":"time-invariant-confounders","dir":"Articles","previous_headings":"Confounders","what":"Time invariant confounders","title":"Specify Core Inputs","text":"Specifying least one time invariant confounder required use package required input createFormulas() function. Time invariant confounders include core demographic birth characteristics (e.g., sex, racial group membership, birth complications) might cause either exposure outcome, either directly proxy, suggested theory /evidenced strong associations existing literature. , user can also include interaction terms time invariant variables (e.g., “variable:variable”) inclusion balancing formula. Keep mind interactions include factor variables decomposed interactions factor level. ti_confounders, list, quotations, provide names confounders (e.g., “variable”, “variable:variable”) dataset time invariant.","code":"ti_confounders <- c(\"state\", \"BioDadInHH2\", \"PmAge2\", \"PmBlac2\", \"TcBlac2\", \"PmMrSt2\", \"PmEd2\", \"KFASTScr\", \"RMomAgeU\", \"RHealth\", \"HomeOwnd\", \"SWghtLB\", \"SurpPreg\", \"SmokTotl\", \"DrnkFreq\", \"peri_health\", \"caregiv_health\", \"gov_assist\" )"},{"path":"https://istallworthy.github.io/devMSMs/articles/Specify_Core_Inputs.html","id":"time-varying-confounders","dir":"Articles","previous_headings":"Confounders","what":"Time-varying confounders","title":"Specify Core Inputs","text":"Specifying least time-varying exposures time-varying confounders required use package required input createFormulas() devMSMs function (see Workflows vignettes). tv_confounders list, quotations, provide names variables wide format (e.g., “variable.t”) dataset time-varying (including time-varying confounders, exposures, outcomes). include time-varying exposure variables outcome variables present dataset (e.g., “variable.t”). Note time-varying confounders also include confounders measured repeatedly time points (e.g., InRatioCor) collected one several specific time points, missing time points, time invariant. , user can also include interaction terms time-varying variables (e.g., “variable.t:variable.t”) time invariant time-varying variables (e.g., “variable.t:variable”) inclusion balancing formula. Keep mind interactions include factor variables decomposed interactions factor level.","code":"tv_confounders <- c(\"SAAmylase.6\",\"SAAmylase.15\", \"SAAmylase.24\", \"MDI.6\", \"MDI.15\", \"RHasSO.6\", \"RHasSO.15\", \"RHasSO.24\",\"RHasSO.35\", \"RHasSO.58\", \"WndNbrhood.6\",\"WndNbrhood.24\", \"WndNbrhood.35\", \"WndNbrhood.58\", \"IBRAttn.6\", \"IBRAttn.15\", \"IBRAttn.24\", \"B18Raw.6\", \"B18Raw.15\", \"B18Raw.24\", \"B18Raw.58\", \"HOMEETA1.6\", \"HOMEETA1.15\", \"HOMEETA1.24\", \"HOMEETA1.35\", \"HOMEETA1.58\", \"InRatioCor.6\", \"InRatioCor.15\", \"InRatioCor.24\", \"InRatioCor.35\", \"InRatioCor.58\", \"ESETA1.6\", \"ESETA1.15\", \"ESETA1.24\", \"ESETA1.35\", \"ESETA1.58\", #exposure variables required \"CORTB.6\", \"CORTB.15\", \"CORTB.24\", \"EARS_TJo.24\", \"EARS_TJo.35\", \"LESMnPos.24\", \"LESMnPos.35\", \"LESMnNeg.24\", \"LESMnNeg.35\", \"StrDif_Tot.35\", \"StrDif_Tot.58\", \"fscore.35\", \"fscore.58\" )"},{"path":"https://istallworthy.github.io/devMSMs/articles/Specify_Core_Inputs.html","id":"references","dir":"Articles","previous_headings":"","what":"References","title":"Specify Core Inputs","text":"Arel-Bundock, Vincent. 2023. marginaleffects: Predictions, Comparisons, Slopes, Marginal Means, Hypothesis Tests. https://CRAN.R-project.org/package=marginaleffects.","code":""},{"path":"https://istallworthy.github.io/devMSMs/articles/Workflow_Continuous_Exposure.html","id":"load-data","dir":"Articles","previous_headings":"","what":"Load data","title":"Workflow: Continuous Exposure","text":"first load data frame complete data. data simulated based data Family Life Project (FLP), longitudinal study following 1,292 families representative two geographic areas (three counties North Carolina three counties Pennsylvania) high rural child poverty (Vernon-Feagans et al., 2013; Burchinal et al., 2008). (See Preliminary Steps vignette beginning data types, including missing data).","code":"data(\"sim_data_wide\", package = \"devMSMs\") data <- sim_data_wide"},{"path":"https://istallworthy.github.io/devMSMs/articles/Workflow_Continuous_Exposure.html","id":"core-inputs","dir":"Articles","previous_headings":"","what":"Core inputs","title":"Workflow: Continuous Exposure","text":"Please see Specifying Core Inputs vignette detail following core inputs.","code":"set.seed(1234) home_dir <- NA # home_dir <- '/Users/isabella/Library/CloudStorage/Box-Box/BSL General/MSMs/testing/isa' #note: no / after exposure <- \"ESETA1\" exposure_time_pts <- c(6, 15, 24, 35, 58) outcome <- \"StrDif_Tot.58\" tv_confounders <- c(\"SAAmylase.6\",\"SAAmylase.15\", \"SAAmylase.24\", \"MDI.6\", \"MDI.15\", \"RHasSO.6\", \"RHasSO.15\", \"RHasSO.24\",\"RHasSO.35\", \"RHasSO.58\", \"WndNbrhood.6\",\"WndNbrhood.24\", \"WndNbrhood.35\", \"WndNbrhood.58\", \"IBRAttn.6\", \"IBRAttn.15\", \"IBRAttn.24\", \"B18Raw.6\", \"B18Raw.15\", \"B18Raw.24\", \"B18Raw.58\", \"HOMEETA1.6\", \"HOMEETA1.15\", \"HOMEETA1.24\", \"HOMEETA1.35\", \"HOMEETA1.58\", \"InRatioCor.6\", \"InRatioCor.15\", \"InRatioCor.24\", \"InRatioCor.35\", \"InRatioCor.58\", \"ESETA1.6\", \"ESETA1.15\", \"ESETA1.24\", \"ESETA1.35\", \"ESETA1.58\", \"CORTB.6\", \"CORTB.15\", \"CORTB.24\", \"EARS_TJo.24\", \"EARS_TJo.35\", \"LESMnPos.24\", \"LESMnPos.35\", \"LESMnNeg.24\", \"LESMnNeg.35\", \"StrDif_Tot.35\", \"StrDif_Tot.58\", \"fscore.35\", \"fscore.58\" ) ti_confounders <- c(\"state\", \"BioDadInHH2\", \"PmAge2\", \"PmBlac2\", \"TcBlac2\", \"PmMrSt2\", \"PmEd2\", \"KFASTScr\", \"RMomAgeU\", \"RHealth\", \"HomeOwnd\", \"SWghtLB\", \"SurpPreg\", \"SmokTotl\", \"DrnkFreq\", \"peri_health\", \"caregiv_health\", \"gov_assist\" )"},{"path":"https://istallworthy.github.io/devMSMs/articles/Workflow_Continuous_Exposure.html","id":"phase-1-confounder-adjustment","dir":"Articles","previous_headings":"","what":"Phase 1: Confounder Adjustment","title":"Workflow: Continuous Exposure","text":"goal first phase minimize associations confounders exposure using IPTW balancing weights. strongly advise user carefully inspect balancing formula ensure weights created evaluated appropriately step.","code":""},{"path":[]},{"path":"https://istallworthy.github.io/devMSMs/articles/Workflow_Continuous_Exposure.html","id":"step-1a--create-full-balancing-formulas-conduct-pre-balance-checking","dir":"Articles","previous_headings":"Phase 1: Confounder Adjustment > Step 1. Create Full Balancing Formulas & Conduct Pre-Balance Checking","what":"Step 1a. Create Full Balancing Formulas & Conduct Pre-Balance Checking","title":"Workflow: Continuous Exposure","text":"first create comprehensive, full balancing formulas relating exposure confounders time point using createFormulas() function (type = “full”). step creates full formulas containing measured confounding variables exposure time point, including time-invariant confounders, lagged time-varying confounders, well past levels exposure outcome (make sure listed time-varying confounders). code automatically excludes time-varying confounders contemporaneous time point given decisively differentiated mediators balanced (Thoemmes & Ong, 2016), although can modified user strong reason believe concurrent variable mediator (see ). include interactions covariates balancing formulas, please list composed time invariant covariates (e.g., “variable:variable” “variable.t:variable.t”) time invariant confounders, composed time-varying covariates (e.g., “variable.t:variable” “variable.t:variable.t”) time-varying confounders list. Interactions containing time-varying covariates treated time-varying confounders measured highest measurement time point constituent time points. note, interactions factor variables multiple levels can produce large number additional variables balancing formulas. required input create full balancing formulas using createFormulas() function : exposure (e.g., “variable”), exposure time points, outcome (e.g., “variable.time”), list time-varying confounders (e.g. “variable.time”), list time invariant confounders (e.g., “variable”), setting type = “full”. Optional inputs create full balancing formulas using createFormulas() function follows. concur_conf: list, provide names time-varying confounders (e.g., “variable.time”) wish included concurrently balancing formulas (overriding default include lagged confounders). choose specify concurrent confounders, reliably distinguish mediators. user may also specify list custom formulas specifying custom list formulas, one exposure time point (e.g., “exposure.time ~ variable.time + variable +…”) formula format, entry named formula type exposure time point (e.g., “full_form-6”). abridged example shown . createFormulas() function automatically check custom formulas ensure correctly formatted formula exposure time point exposure dependent variable. However, user responsible ensuring custom formulas contain appropriate confounders formula type generating. chose create custom formulas use createFormulas() make . createFormulas function saves .csv .rds files containing balancing formulas exposure time point specified type (“full”) ‘formulas/full/’ folder. function returns list formulas labeled type, exposure, outcome, exposure time point. shown , createFormulas() creates balancing formula exposure time point. full formula contains time invariant confounders well lagged time-varying confounders time point. inspect formulas ensure accurate creating IPTW balancing weights.","code":"# concur_conf <- \"B18Raw.15\" concur_conf <- NULL # custom <- list(\"full_form-6\" = as.formula(\"ESETA1.6 ~ BioDadInHH2 + DrnkFreq + gov_assist\"), # \"full_form-15\" = as.formula(\"ESETA1.15 ~ BioDadInHH2 + DrnkFreq + gov_assist\") # ) custom <- NULL type <- \"full\" full_formulas <- createFormulas(exposure = exposure, exposure_time_pts = exposure_time_pts, outcome = outcome, #required type = type, ti_confounders = ti_confounders, tv_confounders = tv_confounders, #required concur_conf = concur_conf, custom = custom, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for ESETA1 - StrDif_Tot.58 at ESETA1 time point 6 is: #> ESETA1.6 ~ BioDadInHH2 + caregiv_health + DrnkFreq + gov_assist + #> HomeOwnd + KFASTScr + peri_health + PmAge2 + PmBlac2 + PmEd2 + #> PmMrSt2 + RHealth + RMomAgeU + SmokTotl + state + SurpPreg + #> SWghtLB + TcBlac2 #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for ESETA1 - StrDif_Tot.58 at ESETA1 time point 15 is: #> ESETA1.15 ~ B18Raw.6 + BioDadInHH2 + caregiv_health + CORTB.6 + #> DrnkFreq + ESETA1.6 + gov_assist + HOMEETA1.6 + HomeOwnd + #> IBRAttn.6 + InRatioCor.6 + KFASTScr + MDI.6 + peri_health + #> PmAge2 + PmBlac2 + PmEd2 + PmMrSt2 + RHasSO.6 + RHealth + #> RMomAgeU + SAAmylase.6 + SmokTotl + state + SurpPreg + SWghtLB + #> TcBlac2 + WndNbrhood.6 #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for ESETA1 - StrDif_Tot.58 at ESETA1 time point 24 is: #> ESETA1.24 ~ B18Raw.15 + B18Raw.6 + BioDadInHH2 + caregiv_health + #> CORTB.15 + CORTB.6 + DrnkFreq + ESETA1.15 + ESETA1.6 + gov_assist + #> HOMEETA1.15 + HOMEETA1.6 + HomeOwnd + IBRAttn.15 + IBRAttn.6 + #> InRatioCor.15 + InRatioCor.6 + KFASTScr + MDI.15 + MDI.6 + #> peri_health + PmAge2 + PmBlac2 + PmEd2 + PmMrSt2 + RHasSO.15 + #> RHasSO.6 + RHealth + RMomAgeU + SAAmylase.15 + SAAmylase.6 + #> SmokTotl + state + SurpPreg + SWghtLB + TcBlac2 + WndNbrhood.6 #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for ESETA1 - StrDif_Tot.58 at ESETA1 time point 35 is: #> ESETA1.35 ~ B18Raw.15 + B18Raw.24 + B18Raw.6 + BioDadInHH2 + #> caregiv_health + CORTB.15 + CORTB.24 + CORTB.6 + DrnkFreq + #> EARS_TJo.24 + ESETA1.15 + ESETA1.24 + ESETA1.6 + gov_assist + #> HOMEETA1.15 + HOMEETA1.24 + HOMEETA1.6 + HomeOwnd + IBRAttn.15 + #> IBRAttn.24 + IBRAttn.6 + InRatioCor.15 + InRatioCor.24 + #> InRatioCor.6 + KFASTScr + LESMnNeg.24 + LESMnPos.24 + MDI.15 + #> MDI.6 + peri_health + PmAge2 + PmBlac2 + PmEd2 + PmMrSt2 + #> RHasSO.15 + RHasSO.24 + RHasSO.6 + RHealth + RMomAgeU + SAAmylase.15 + #> SAAmylase.24 + SAAmylase.6 + SmokTotl + state + SurpPreg + #> SWghtLB + TcBlac2 + WndNbrhood.24 + WndNbrhood.6 #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for ESETA1 - StrDif_Tot.58 at ESETA1 time point 58 is: #> ESETA1.58 ~ B18Raw.15 + B18Raw.24 + B18Raw.6 + BioDadInHH2 + #> caregiv_health + CORTB.15 + CORTB.24 + CORTB.6 + DrnkFreq + #> EARS_TJo.24 + EARS_TJo.35 + ESETA1.15 + ESETA1.24 + ESETA1.35 + #> ESETA1.6 + fscore.35 + gov_assist + HOMEETA1.15 + HOMEETA1.24 + #> HOMEETA1.35 + HOMEETA1.6 + HomeOwnd + IBRAttn.15 + IBRAttn.24 + #> IBRAttn.6 + InRatioCor.15 + InRatioCor.24 + InRatioCor.35 + #> InRatioCor.6 + KFASTScr + LESMnNeg.24 + LESMnNeg.35 + LESMnPos.24 + #> LESMnPos.35 + MDI.15 + MDI.6 + peri_health + PmAge2 + PmBlac2 + #> PmEd2 + PmMrSt2 + RHasSO.15 + RHasSO.24 + RHasSO.35 + RHasSO.6 + #> RHealth + RMomAgeU + SAAmylase.15 + SAAmylase.24 + SAAmylase.6 + #> SmokTotl + state + StrDif_Tot.35 + SurpPreg + SWghtLB + TcBlac2 + #> WndNbrhood.24 + WndNbrhood.35 + WndNbrhood.6 #> "},{"path":"https://istallworthy.github.io/devMSMs/articles/Workflow_Continuous_Exposure.html","id":"step-1b--conduct-exploratory-pre-balance-assessment","dir":"Articles","previous_headings":"Phase 1: Confounder Adjustment > Step 1. Create Full Balancing Formulas & Conduct Pre-Balance Checking","what":"Step 1b. Conduct Exploratory Pre-Balance Assessment","title":"Workflow: Continuous Exposure","text":"next step examines initial imbalance, strongly exposure relates confounder time point, measured confounders prior weighting using assessBalance() function (type = “prebalance”). function draws calcBalStats() function (see Assessing Balance Time-Varying Exposure section accompanying manuscript). assessBalance() function outputs balance statistics (correlations continuous exposures standardized mean differences binary exposures) relating exposure time point confounders table well plots. function also provides summary balance statistics averaging across time points (imputed datasets supplied). required inputs using assessBalance() function conduct pre-balance testing : complete data (data frame, mids object, list imputed datasets dataframes wide format), exposure (e.g., “variable”), exposure time points, outcome (e.g., “variable.time”), full formulas (see Step 1a), setting type = “prebalance”. optional inputs follows. user may specify balance_thresh, threshold(s) determining confounder balance, one two ways. First, can provide single number value (0-1) absolute value standardized balance statistic (either correlation continuous exposures standardized group mean difference binary exposures) exposure confounders confounders considered balanced considered imbalanced (default 0.1; Stuart, 2010). Second, users may make priori assertion confounders important others based theory existing research. case, can provide two numbers represent balance thresholds important less important confounders, respectively. user supplies two balance thresholds provided, must also supply list important confounders (time-varying: “variable.t”, time invariant: “variable”) imp_conf field. balance threshold specification kept consistent throughout use workflow. recommended, provide two balancing thresholds identify income parent education important confounders relation economic strain behavior problems. assessBalance() function saves following .csv .html files ‘balance/prebalance/’ folder: tables balance statistics confounders, tables balance statistics covariates imbalanced (respect respective balance thresholds), overall balance summary table (averaged across imputed datasets). Within ‘balance/prebalance/plots/’ folder, function outputs .jpeg files summary love plots depicting confounder balance exposure time point. function returns data frame (list) balance statistics, balance thresholds, binary balanced tag confounder relevant exposure time point. output shows initial imbalance confounders exposure tables plots. 55 confounders imbalanced (labeled red font love plots) respect economic strain exposure respective balance threshold. love plots depict standardized associations confounder exposure exposure time point, vertical red dashed lines indicating balance thresholds. .","code":"balance_thresh <- c(0.05, 0.1) imp_conf <- c(\"InRatioCor.6\", \"InRatioCor.15\", \"InRatioCor.24\", \"InRatioCor.35\", \"InRatioCor.58\", \"PmEd2\") type <- \"prebalance\" formulas <- full_formulas prebalance_stats <- assessBalance(data = data, exposure = exposure, exposure_time_pts = exposure_time_pts, outcome = outcome, type = type, formulas = formulas, #required balance_thresh = balance_thresh, imp_conf = imp_conf, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> USER ALERT: The following statistics display covariate imbalance at each exposure time point prior to weighting, using full formulas. #> As shown below, 55 out of 191 (29%) covariates across time points, corresponding to 18 out of 33 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.13 (range= -0.24-0.32): #> #> Table: Imbalanced covariates using no weights and full formulas #> #> | exp_time| balanced_n| imbalanced_n| n| #> |--------:|----------:|------------:|--:| #> | 6| 7| 11| 18| #> | 15| 16| 12| 28| #> | 24| 28| 9| 37| #> | 35| 38| 11| 49| #> | 58| 47| 12| 59| #> #> #> USER ALERT: For exposure ESETA1 using the full formulas and no weights : #> The median absolute value relation between exposure and confounder is 0.06 (range = -0.24 -0.32). #> As shown below, the following 55 covariates across time points out of 191 total (28.8%) spanning 18 domains out of 33 (54.55%) are imbalanced with a remaining median absolute value correlation/std mean difference in relation to ESETA1 of 0.13 (range=-0.24-0.32) : #> #> #> Table: Imbalanced Covariates #> #> | |exposure | exp_time| covar_time|covariate | avg_bal| bal_thresh| balanced| #> |:---|:--------|--------:|----------:|:--------------|----------:|----------:|--------:| #> |1 |ESETA1 | 6| 0|caregiv_health | 0.1750682| 0.10| 0| #> |2 |ESETA1 | 6| 0|gov_assist | 0.3162628| 0.10| 0| #> |3 |ESETA1 | 6| 0|KFASTScr | -0.1741547| 0.10| 0| #> |6 |ESETA1 | 6| 0|PmEd2 | -0.2440498| 0.05| 0| #> |7 |ESETA1 | 6| 0|RMomAgeU | -0.1308954| 0.10| 0| #> |9 |ESETA1 | 6| 0|BioDadInHH2 | -0.1245621| 0.10| 0| #> |12 |ESETA1 | 6| 0|PmBlac2 | 0.1802582| 0.10| 0| #> |13 |ESETA1 | 6| 0|PmMrSt2 | -0.1017754| 0.10| 0| #> |16 |ESETA1 | 6| 0|state | 0.1448002| 0.10| 0| #> |17 |ESETA1 | 6| 0|SurpPreg | 0.1045888| 0.10| 0| #> |18 |ESETA1 | 6| 0|TcBlac2 | 0.2058312| 0.10| 0| #> |19 |ESETA1 | 15| 6|B18Raw.6 | 0.1439834| 0.10| 0| #> |20 |ESETA1 | 15| 0|caregiv_health | 0.1104491| 0.10| 0| #> |22 |ESETA1 | 15| 6|ESETA1.6 | 0.1860003| 0.10| 0| #> |23 |ESETA1 | 15| 0|gov_assist | 0.1581273| 0.10| 0| #> |24 |ESETA1 | 15| 6|HOMEETA1.6 | -0.1583774| 0.10| 0| #> |26 |ESETA1 | 15| 6|InRatioCor.6 | -0.2275248| 0.05| 0| #> |31 |ESETA1 | 15| 0|PmEd2 | -0.1311598| 0.05| 0| #> |35 |ESETA1 | 15| 6|WndNbrhood.6 | -0.1109797| 0.10| 0| #> |36 |ESETA1 | 15| 0|BioDadInHH2 | -0.1020164| 0.10| 0| #> |39 |ESETA1 | 15| 0|PmBlac2 | 0.1344400| 0.10| 0| #> |44 |ESETA1 | 15| 0|state | 0.1006198| 0.10| 0| #> |46 |ESETA1 | 15| 0|TcBlac2 | 0.1327607| 0.10| 0| #> |47 |ESETA1 | 24| 15|B18Raw.15 | 0.1421575| 0.10| 0| #> |48 |ESETA1 | 24| 6|B18Raw.6 | 0.1557763| 0.10| 0| #> |52 |ESETA1 | 24| 15|ESETA1.15 | 0.1655498| 0.10| 0| #> |53 |ESETA1 | 24| 6|ESETA1.6 | 0.1403452| 0.10| 0| #> |54 |ESETA1 | 24| 0|gov_assist | 0.1547692| 0.10| 0| #> |55 |ESETA1 | 24| 15|HOMEETA1.15 | -0.1085277| 0.10| 0| #> |59 |ESETA1 | 24| 15|InRatioCor.15 | -0.1195160| 0.05| 0| #> |60 |ESETA1 | 24| 6|InRatioCor.6 | -0.1351805| 0.05| 0| #> |66 |ESETA1 | 24| 0|PmEd2 | -0.0903395| 0.05| 0| #> |84 |ESETA1 | 35| 15|B18Raw.15 | 0.1170191| 0.10| 0| #> |92 |ESETA1 | 35| 15|ESETA1.15 | 0.1513316| 0.10| 0| #> |93 |ESETA1 | 35| 24|ESETA1.24 | 0.1342892| 0.10| 0| #> |94 |ESETA1 | 35| 6|ESETA1.6 | 0.1168198| 0.10| 0| #> |95 |ESETA1 | 35| 0|gov_assist | 0.1260387| 0.10| 0| #> |97 |ESETA1 | 35| 24|HOMEETA1.24 | -0.1089467| 0.10| 0| #> |102 |ESETA1 | 35| 15|InRatioCor.15 | -0.1218204| 0.05| 0| #> |103 |ESETA1 | 35| 24|InRatioCor.24 | -0.1488017| 0.05| 0| #> |104 |ESETA1 | 35| 6|InRatioCor.6 | -0.1428961| 0.05| 0| #> |112 |ESETA1 | 35| 0|PmEd2 | -0.1065020| 0.05| 0| #> |127 |ESETA1 | 35| 6|RHasSO.6 | 0.1215992| 0.10| 0| #> |133 |ESETA1 | 58| 15|B18Raw.15 | 0.1032047| 0.10| 0| #> |144 |ESETA1 | 58| 35|ESETA1.35 | 0.1203630| 0.10| 0| #> |146 |ESETA1 | 58| 35|fscore.35 | -0.1074297| 0.10| 0| #> |147 |ESETA1 | 58| 0|gov_assist | 0.1254127| 0.10| 0| #> |150 |ESETA1 | 58| 35|HOMEETA1.35 | -0.1017226| 0.10| 0| #> |151 |ESETA1 | 58| 6|HOMEETA1.6 | -0.1287940| 0.10| 0| #> |155 |ESETA1 | 58| 15|InRatioCor.15 | -0.1439390| 0.05| 0| #> |156 |ESETA1 | 58| 24|InRatioCor.24 | -0.1611115| 0.05| 0| #> |157 |ESETA1 | 58| 35|InRatioCor.35 | -0.1323394| 0.05| 0| #> |158 |ESETA1 | 58| 6|InRatioCor.6 | -0.1575428| 0.05| 0| #> |168 |ESETA1 | 58| 0|PmEd2 | -0.1407238| 0.05| 0| #> |191 |ESETA1 | 58| 0|TcBlac2 | 0.1000105| 0.10| 0|"},{"path":"https://istallworthy.github.io/devMSMs/articles/Workflow_Continuous_Exposure.html","id":"step-2--create-simplified-balancing-formulas-determine-optimal-weighting-method","dir":"Articles","previous_headings":"Phase 1: Confounder Adjustment","what":"Step 2. Create Simplified Balancing Formulas & Determine Optimal Weighting Method","title":"Workflow: Continuous Exposure","text":"goal second step create shortened, parsimonious balancing formulas determining optimal IPTW weighting method successfully reduces imbalance.","code":""},{"path":"https://istallworthy.github.io/devMSMs/articles/Workflow_Continuous_Exposure.html","id":"a--create-simplified-balancing-formulas","dir":"Articles","previous_headings":"Phase 1: Confounder Adjustment > Step 2. Create Simplified Balancing Formulas & Determine Optimal Weighting Method","what":"2a. Create Simplified Balancing Formulas","title":"Workflow: Continuous Exposure","text":"First, create shorter, parsimonious balancing formulas relating exposure confounders time point using createFormulas() function (type = ”short”). exposure time point, formulas contain time invariant confounders well time-varying confounders t-1 lag. logic balancing confounders recent prior time point (t-1 ) may achieve balance levels distal time points, given stability many confounders time. Importantly, empirically assess relax assumption needed subsequent steps (Steps 3a-b). See Step 1a instructions include confounder interactions. required input create shortened balancing formulas using createFormulas() function : exposure (e.g., “variable”), exposure time points, outcome (e.g., “variable.time”), list time-varying confounders (e.g., “variable.time”), list time invariant confounders, setting type = “short”. addition optional input outlined Step 1a, user also option specify keep_conf, list time-varying confounders (e.g., “variable.t”) always retain lagged confounders shortened formulas. user may use argument retain specific time-varying confounders otherwise excluded step occur lags greater t-1 formula. , choose specify confounders always retain step. createFormulas() function saves .csv .rds files containing balancing formulas exposure time point (e.g., see ) specified type (case, “short”) ‘formulas/short/’ folder. function returns list balancing formulas labeled type, exposure, outcome, exposure time point. , inspect shortened balancing formula exposure time point. formulas considerably shorter full formulas. instance, 58-month exposure time point, formula contains time invariant confounders time-varying confounders 35-month time point.","code":"keep_conf <- \"InRatioCor.6\" keep_conf <- NULL type <- \"short\" short_formulas <- createFormulas(exposure = exposure, exposure_time_pts = exposure_time_pts, outcome = outcome, type = type, ti_confounders = ti_confounders, tv_confounders = tv_confounders, concur_conf = concur_conf, keep_conf = keep_conf, custom = custom, home_dir = home_dir, verbose = verbose, save.out = save.out) #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for ESETA1 - StrDif_Tot.58 at ESETA1 time point 6 is: #> ESETA1.6 ~ BioDadInHH2 + caregiv_health + DrnkFreq + gov_assist + #> HomeOwnd + KFASTScr + peri_health + PmAge2 + PmBlac2 + PmEd2 + #> PmMrSt2 + RHealth + RMomAgeU + SmokTotl + state + SurpPreg + #> SWghtLB + TcBlac2 #> #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for ESETA1 - StrDif_Tot.58 at ESETA1 time point 15 is: #> ESETA1.15 ~ B18Raw.6 + BioDadInHH2 + caregiv_health + CORTB.6 + #> DrnkFreq + ESETA1.6 + gov_assist + HOMEETA1.6 + HomeOwnd + #> IBRAttn.6 + InRatioCor.6 + KFASTScr + MDI.6 + peri_health + #> PmAge2 + PmBlac2 + PmEd2 + PmMrSt2 + RHasSO.6 + RHealth + #> RMomAgeU + SAAmylase.6 + SmokTotl + state + SurpPreg + SWghtLB + #> TcBlac2 + WndNbrhood.6 #> #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for ESETA1 - StrDif_Tot.58 at ESETA1 time point 24 is: #> ESETA1.24 ~ B18Raw.15 + BioDadInHH2 + caregiv_health + CORTB.15 + #> DrnkFreq + ESETA1.15 + gov_assist + HOMEETA1.15 + HomeOwnd + #> IBRAttn.15 + InRatioCor.15 + KFASTScr + MDI.15 + peri_health + #> PmAge2 + PmBlac2 + PmEd2 + PmMrSt2 + RHasSO.15 + RHealth + #> RMomAgeU + SAAmylase.15 + SmokTotl + state + SurpPreg + SWghtLB + #> TcBlac2 #> #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for ESETA1 - StrDif_Tot.58 at ESETA1 time point 35 is: #> ESETA1.35 ~ B18Raw.24 + BioDadInHH2 + caregiv_health + CORTB.24 + #> DrnkFreq + EARS_TJo.24 + ESETA1.24 + gov_assist + HOMEETA1.24 + #> HomeOwnd + IBRAttn.24 + InRatioCor.24 + KFASTScr + LESMnNeg.24 + #> LESMnPos.24 + peri_health + PmAge2 + PmBlac2 + PmEd2 + PmMrSt2 + #> RHasSO.24 + RHealth + RMomAgeU + SAAmylase.24 + SmokTotl + #> state + SurpPreg + SWghtLB + TcBlac2 + WndNbrhood.24 #> #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for ESETA1 - StrDif_Tot.58 at ESETA1 time point 58 is: #> ESETA1.58 ~ BioDadInHH2 + caregiv_health + DrnkFreq + EARS_TJo.35 + #> ESETA1.35 + fscore.35 + gov_assist + HOMEETA1.35 + HomeOwnd + #> InRatioCor.35 + KFASTScr + LESMnNeg.35 + LESMnPos.35 + peri_health + #> PmAge2 + PmBlac2 + PmEd2 + PmMrSt2 + RHasSO.35 + RHealth + #> RMomAgeU + SmokTotl + state + StrDif_Tot.35 + SurpPreg + #> SWghtLB + TcBlac2 + WndNbrhood.35 #> "},{"path":"https://istallworthy.github.io/devMSMs/articles/Workflow_Continuous_Exposure.html","id":"b--create-iptw-balancing-weights-using-multiple-weighting-methods","dir":"Articles","previous_headings":"Phase 1: Confounder Adjustment > Step 2. Create Simplified Balancing Formulas & Determine Optimal Weighting Method","what":"2b. Create IPTW Balancing Weights using Multiple Weighting Methods","title":"Workflow: Continuous Exposure","text":"created shorter, simplified balancing formulas, now create first round IPTW balancing weights (Thoemmes & Ong, 2016) using createWeights() function, shortened balancing formulas, available weighting methods. function calls weightitMSM() function WeightIt package (Greifer, 2023) uses time-specific formulas create weights time point automatically multiplying together create one weight per person. Weights stabilized, recommended (Cole & Hernan, 2008; Thoemmes & Ong, 2016), distributions saved inspection. required inputs using createWeights() function create initial around IPTW balancing weights : complete data (data frame, mids object, list imputed datasets dataframes wide format), exposure (e.g., “variable”), outcome (e.g., “variable.time”), short formulas (see Step 2a). optional inputs follows. method, provide one following methods calculating balancing weights using weightitMSM() methods validated longitudinal exposures: “cbps” (Covariate Balancing Propensity Score weighting; default), “gbm” (generalized boosted model), “glm” (generalized linear model), “super” (SuperLearner via SuperLearner package; Polley et al., 2013). information can found WeightIt documentation. createWeights() function can also take number additional arguments passed weightitMSM () function (e.g., ‘criterion’, distribution’, ‘SL.library’). user selects SuperLearner (“super”) method, default super learner library (‘SL.library’) xx alternative library can entered input createWeights function. binary exposures, “cbps” method allows specify estimand either ATE, ATT, ATC. “glm”, “super”, “bart” can specify ATE, ATT, ATC, ATO, ATM, ATOS. “gbm”, can specify ATE, ATT, ATC, ATO, ATM. default estimand binary exposures ATE. advise interested user review WeightIt documentation information additional optional arguments available weighting methods. user can also specify read_in_from_file = TRUEif user previously created weights specific data, formula, weight type using function wishes read local file instead recreating . createWeights() function automatically conducts basic checks saved weights match data type, weights method, number formulas provided. user responsible making sure weights created appropriately. createWeights() function saves .rds file weights ‘weights’ folder, histogram weights distribution ‘weights/histograms/’ folder, .csv file data weights appended ‘weights/values/’ folder. function returns list weights objects form WeightItMSM output single nested list (labeled “0” data data frame format) nested lists imputed dataset (data imputed). , create IPTW weights using default “CBPS” method. shown , distribution heavy right tail (typical real-world data), median value 1.21. right tail distribution represents individuals experienced statistically unexpected levels exposure given levels confounders. create IPTW balancing weights using available methods. shown , “glm” method produces similar distribution weights.","code":"formulas <- short_formulas method <- \"cbps\" weights.cbps <- createWeights(data = data, exposure = exposure, outcome = outcome, formulas = formulas, #required method = method, read_in_from_file = FALSE, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> For the cbps weighting method, the median weight value is 1.21 (SD = 3.68; range = 0-60). method <- \"glm\" weights.glm <- createWeights(data = data, exposure = exposure, outcome = outcome, formulas = formulas, #required method = method, read_in_from_file = FALSE, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> For the glm weighting method, the median weight value is 0.9 (SD = 1.73; range = 0.03-26). method <- \"gbm\" weights.gbm <- createWeights(data = data, exposure = exposure, outcome = outcome, formulas = formulas, #required method = method, read_in_from_file = FALSE, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> For the gbm weighting method, the median weight value is 0.48 (SD = 1.02; range = 0-17). method <- \"bart\" weights.bart <- createWeights(data = data, exposure = exposure, outcome = outcome, formulas = formulas, #required method = method, read_in_from_file = FALSE, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> For the bart weighting method, the median weight value is 0.71 (SD = 1.38; range = 0.03-24). method <- \"super\" weights.super <- createWeights(data = data, exposure = exposure, outcome = outcome, formulas = formulas, #required method = method, read_in_from_file = FALSE, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> Loading required package: nnls #> For the super weighting method, the median weight value is 0.99 (SD = 2.21; range = 0.03-40)."},{"path":"https://istallworthy.github.io/devMSMs/articles/Workflow_Continuous_Exposure.html","id":"c--assess-all-weighting-methods-to-determine-optimal-method","dir":"Articles","previous_headings":"Phase 1: Confounder Adjustment > Step 2. Create Simplified Balancing Formulas & Determine Optimal Weighting Method","what":"2c. Assess All Weighting Methods to Determine Optimal Method","title":"Workflow: Continuous Exposure","text":"Next, evaluate well weights created using different weighting methods reduced imbalance confounders provided short balancing formula, using assessBalance() function (type = “weighted”). function calls calcBalStats() function using short formulas specifies balance statistics calculated using IPTW weights supplied. assessBalance() function outputs balance statistics (correlations continuous exposures standardized mean differences binary exposures) relating exposure time point confounders table well plots. function also provides summary balance statistics averaging across time points (imputed datasets supplied). required inputs using assessBalance() function assess balance first round IPTW weights : complete data (data frame, mids object, list imputed datasets dataframes wide format), exposure (e.g., “variable”), exposure time points, outcome (e.g., “variable.time”), providing short formulas (see Step 2a), setting type = “weighted”, providing weights just created. optional inputs described Step 1b. assessBalance() function saves following .csv .html files ‘balance/weighted/’ folder: tables balance statistics confounders, tables balance statistics covariates imbalanced, overall balance summary table (averaged across imputed datasets). Within ‘balance/weighted/plots/’ folder, function outputs .jpeg files summary love plots depicting confounder balance exposure time point. function returns data frame (list) balance statistics, balance thresholds, binary balanced tag (1 = balanced, 0 = imbalanced) confounder relevant exposure time point. first assess balance “CBPS” weighting method. shown , “CBPS” weighting method short formulas, median absolute value correlation confounder exposure 0.03 8 confounders remain imbalanced. GLM weighting method, median absolute value correlation exposure confounders 0.02, 6 confounders remaining imbalanced. GBM weighting method, absolute median correlation exposure confounders 0.02, 9 confounders remaining imbalanced. BART weighting method, median absolute value correlation exposure confounder 0.02, 11 confounders remaining imbalanced. SuperLearner weighting method, median absolute value correlation exposure confounder 0.02, 5 confounders remaining imbalanced. optimal weighting method dataset method yields best confounder balance. iterations, identify best performing weighting method reduces imbalance exposure confounder (indicated lowest absolute value median correlation) fewest number confounders left imbalanced. example, identify SuperLearner optimal weighting method.","code":"type <- \"weighted\" formulas <- short_formulas weights <- weights.cbps balance_stats.cbps <- assessBalance(data = data, exposure = exposure, exposure_time_pts = exposure_time_pts, #required outcome = outcome, type = type, formulas = formulas, weights = weights, #required balance_thresh = balance_thresh, imp_conf = imp_conf, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> USER ALERT: The following statistics display covariate imbalance at each exposure time point following IPTW weighting, using short formulas. #> As shown below, 5 out of 131 (4%) covariates across time points, corresponding to 5 out of 32 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.1 (range= -0.1-0.14): #> #> Table: Imbalanced covariates using cbps and short formulas #> #> | exp_time| balanced_n| imbalanced_n| n| #> |--------:|----------:|------------:|--:| #> | 6| 17| 1| 18| #> | 15| 27| 1| 28| #> | 24| 27| 0| 27| #> | 35| 28| 2| 30| #> | 58| 27| 1| 28| #> #> #> USER ALERT: For exposure ESETA1 using the short formulas and cbps : #> The median absolute value relation between exposure and confounder is 0.03 (range = -0.1 -0.14). #> As shown below, the following 5 covariates across time points out of 131 total (3.82%) spanning 5 domains out of 33 (15.15%) are imbalanced with a remaining median absolute value correlation/std mean difference in relation to ESETA1 of 0.1 (range=-0.1-0.14) : #> #> #> Table: Imbalanced Covariates #> #> | |exposure | exp_time| covar_time|covariate | avg_bal| bal_thresh| balanced| #> |:---|:--------|--------:|----------:|:-------------|----------:|----------:|--------:| #> |8 |ESETA1 | 6| 0|SWghtLB | -0.1039386| 0.10| 0| #> |22 |ESETA1 | 15| 6|ESETA1.6 | 0.1272972| 0.10| 0| #> |82 |ESETA1 | 35| 24|InRatioCor.24 | -0.0729568| 0.05| 0| #> |88 |ESETA1 | 35| 0|PmEd2 | -0.0686910| 0.05| 0| #> |120 |ESETA1 | 58| 35|WndNbrhood.35 | 0.1433257| 0.10| 0| weights <- weights.glm balance_stats.glm <- assessBalance(data = data, exposure = exposure, exposure_time_pts = exposure_time_pts, #required outcome = outcome, type = type, formulas = formulas, weights = weights, #required balance_thresh = balance_thresh, imp_conf = imp_conf, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> USER ALERT: The following statistics display covariate imbalance at each exposure time point following IPTW weighting, using short formulas. #> As shown below, 6 out of 131 (5%) covariates across time points, corresponding to 3 out of 32 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.1 (range= -0.07-0.21): #> #> Table: Imbalanced covariates using glm and short formulas #> #> | exp_time| balanced_n| imbalanced_n| n| #> |--------:|----------:|------------:|--:| #> | 6| 18| 0| 18| #> | 15| 25| 3| 28| #> | 24| 26| 1| 27| #> | 35| 28| 2| 30| #> | 58| 28| 0| 28| #> #> #> USER ALERT: For exposure ESETA1 using the short formulas and glm : #> The median absolute value relation between exposure and confounder is 0.02 (range = -0.07 -0.21). #> As shown below, the following 6 covariates across time points out of 131 total (4.58%) spanning 3 domains out of 33 (9.09%) are imbalanced with a remaining median absolute value correlation/std mean difference in relation to ESETA1 of 0.1 (range=-0.07-0.21) : #> #> #> Table: Imbalanced Covariates #> #> | |exposure | exp_time| covar_time|covariate | avg_bal| bal_thresh| balanced| #> |:--|:--------|--------:|----------:|:-------------|----------:|----------:|--------:| #> |19 |ESETA1 | 15| 6|B18Raw.6 | 0.1072812| 0.10| 0| #> |22 |ESETA1 | 15| 6|ESETA1.6 | 0.2062601| 0.10| 0| #> |26 |ESETA1 | 15| 6|InRatioCor.6 | -0.0620487| 0.05| 0| #> |50 |ESETA1 | 24| 15|ESETA1.15 | 0.1621505| 0.10| 0| #> |78 |ESETA1 | 35| 24|ESETA1.24 | 0.1025166| 0.10| 0| #> |82 |ESETA1 | 35| 24|InRatioCor.24 | -0.0689534| 0.05| 0| weights <- weights.gbm balance_stats.gbm <- assessBalance(data = data, exposure = exposure, exposure_time_pts = exposure_time_pts, #required outcome = outcome, type = type, formulas = formulas, weights = weights, #required balance_thresh = balance_thresh, imp_conf = imp_conf, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> USER ALERT: The following statistics display covariate imbalance at each exposure time point following IPTW weighting, using short formulas. #> As shown below, 5 out of 131 (4%) covariates across time points, corresponding to 4 out of 32 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.1 (range= -0.1-0.19): #> #> Table: Imbalanced covariates using gbm and short formulas #> #> | exp_time| balanced_n| imbalanced_n| n| #> |--------:|----------:|------------:|--:| #> | 6| 18| 0| 18| #> | 15| 27| 1| 28| #> | 24| 23| 4| 27| #> | 35| 30| 0| 30| #> | 58| 28| 0| 28| #> #> #> USER ALERT: For exposure ESETA1 using the short formulas and gbm : #> The median absolute value relation between exposure and confounder is 0.02 (range = -0.1 -0.19). #> As shown below, the following 5 covariates across time points out of 131 total (3.82%) spanning 4 domains out of 33 (12.12%) are imbalanced with a remaining median absolute value correlation/std mean difference in relation to ESETA1 of 0.1 (range=-0.1-0.19) : #> #> #> Table: Imbalanced Covariates #> #> | |exposure | exp_time| covar_time|covariate | avg_bal| bal_thresh| balanced| #> |:--|:--------|--------:|----------:|:-------------|----------:|----------:|--------:| #> |22 |ESETA1 | 15| 6|ESETA1.6 | 0.1898210| 0.10| 0| #> |50 |ESETA1 | 24| 15|ESETA1.15 | 0.1510709| 0.10| 0| #> |52 |ESETA1 | 24| 15|HOMEETA1.15 | -0.1037627| 0.10| 0| #> |54 |ESETA1 | 24| 15|InRatioCor.15 | -0.0891084| 0.05| 0| #> |59 |ESETA1 | 24| 0|PmEd2 | -0.0530682| 0.05| 0| weights <- weights.bart balance_stats.bart <- assessBalance(data = data, exposure = exposure, exposure_time_pts = exposure_time_pts, #required outcome = outcome, type = type, formulas = formulas, weights = weights, #required balance_thresh = balance_thresh, imp_conf = imp_conf, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> USER ALERT: The following statistics display covariate imbalance at each exposure time point following IPTW weighting, using short formulas. #> As shown below, 9 out of 131 (7%) covariates across time points, corresponding to 3 out of 32 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.11 (range= -0.11-0.22): #> #> Table: Imbalanced covariates using bart and short formulas #> #> | exp_time| balanced_n| imbalanced_n| n| #> |--------:|----------:|------------:|--:| #> | 6| 18| 0| 18| #> | 15| 25| 3| 28| #> | 24| 25| 2| 27| #> | 35| 28| 2| 30| #> | 58| 26| 2| 28| #> #> #> USER ALERT: For exposure ESETA1 using the short formulas and bart : #> The median absolute value relation between exposure and confounder is 0.02 (range = -0.11 -0.22). #> As shown below, the following 9 covariates across time points out of 131 total (6.87%) spanning 3 domains out of 33 (9.09%) are imbalanced with a remaining median absolute value correlation/std mean difference in relation to ESETA1 of 0.11 (range=-0.11-0.22) : #> #> #> Table: Imbalanced Covariates #> #> | |exposure | exp_time| covar_time|covariate | avg_bal| bal_thresh| balanced| #> |:---|:--------|--------:|----------:|:-------------|----------:|----------:|--------:| #> |19 |ESETA1 | 15| 6|B18Raw.6 | 0.1255524| 0.10| 0| #> |22 |ESETA1 | 15| 6|ESETA1.6 | 0.2208960| 0.10| 0| #> |26 |ESETA1 | 15| 6|InRatioCor.6 | -0.0968499| 0.05| 0| #> |50 |ESETA1 | 24| 15|ESETA1.15 | 0.1825734| 0.10| 0| #> |54 |ESETA1 | 24| 15|InRatioCor.15 | -0.1122523| 0.05| 0| #> |78 |ESETA1 | 35| 24|ESETA1.24 | 0.1263206| 0.10| 0| #> |82 |ESETA1 | 35| 24|InRatioCor.24 | -0.0684189| 0.05| 0| #> |106 |ESETA1 | 58| 35|ESETA1.35 | 0.1078711| 0.10| 0| #> |110 |ESETA1 | 58| 35|InRatioCor.35 | -0.0513619| 0.05| 0| weights <- weights.super balance_stats.super <- assessBalance(data = data, exposure = exposure, exposure_time_pts = exposure_time_pts, #required outcome = outcome, type = type, formulas = formulas, weights = weights, #required balance_thresh = balance_thresh, imp_conf = imp_conf, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> USER ALERT: The following statistics display covariate imbalance at each exposure time point following IPTW weighting, using short formulas. #> As shown below, 5 out of 131 (4%) covariates across time points, corresponding to 3 out of 32 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.11 (range= -0.07-0.23): #> #> Table: Imbalanced covariates using super and short formulas #> #> | exp_time| balanced_n| imbalanced_n| n| #> |--------:|----------:|------------:|--:| #> | 6| 18| 0| 18| #> | 15| 25| 3| 28| #> | 24| 26| 1| 27| #> | 35| 29| 1| 30| #> | 58| 28| 0| 28| #> #> #> USER ALERT: For exposure ESETA1 using the short formulas and super : #> The median absolute value relation between exposure and confounder is 0.02 (range = -0.08 -0.23). #> As shown below, the following 5 covariates across time points out of 131 total (3.82%) spanning 3 domains out of 33 (9.09%) are imbalanced with a remaining median absolute value correlation/std mean difference in relation to ESETA1 of 0.11 (range=-0.07-0.23) : #> #> #> Table: Imbalanced Covariates #> #> | |exposure | exp_time| covar_time|covariate | avg_bal| bal_thresh| balanced| #> |:--|:--------|--------:|----------:|:------------|----------:|----------:|--------:| #> |19 |ESETA1 | 15| 6|B18Raw.6 | 0.1007296| 0.10| 0| #> |22 |ESETA1 | 15| 6|ESETA1.6 | 0.2280065| 0.10| 0| #> |26 |ESETA1 | 15| 6|InRatioCor.6 | -0.0674695| 0.05| 0| #> |50 |ESETA1 | 24| 15|ESETA1.15 | 0.1599982| 0.10| 0| #> |78 |ESETA1 | 35| 24|ESETA1.24 | 0.1056005| 0.10| 0|"},{"path":"https://istallworthy.github.io/devMSMs/articles/Workflow_Continuous_Exposure.html","id":"step-3-create-updated-formulas-re-specify-weights-using-optimal-weighting-method","dir":"Articles","previous_headings":"Phase 1: Confounder Adjustment","what":"Step 3: Create Updated Formulas & Re-Specify Weights Using Optimal Weighting Method","title":"Workflow: Continuous Exposure","text":"goal next step assess best-performing weights created shortened balancing formulas (containing time-varying confounders t-1) relative full balancing formulas, add shortened formulas time-varying confounders lags > t-1 successfully balanced, create final round weights.","code":""},{"path":"https://istallworthy.github.io/devMSMs/articles/Workflow_Continuous_Exposure.html","id":"step-3a--assess-balance-with-full-balancing-formulas","dir":"Articles","previous_headings":"Phase 1: Confounder Adjustment > Step 3: Create Updated Formulas & Re-Specify Weights Using Optimal Weighting Method","what":"Step 3a. Assess balance with full balancing formulas","title":"Workflow: Continuous Exposure","text":"next assess whether weights created previous step best-performing weights method (.e., using SuperLearner method) using simplified balancing formulas also achieve balance full formulas. , revisit assumption balancing proximal time-varying confounders (t-1) confers balance confounders distal prior time points (t-1+). assessing time point well weights just created using short formulas successfully balance confounders (including time-varying confounders time points prior) original, full formulas. use assessBalance() function (type = “weighted”) full balancing formulas. required inputs using assessBalance() function assess best weights achieve balance full formulas : complete data (data frame, mids object, list imputed datasets dataframes wide format), exposure (e.g., “variable”), exposure time points, outcome (e.g., “variable.time”), providing full formulas (see Step 1a), setting type = “weighted”, providing best weights (see Step 2c). optional inputs detailed Step 1b. assessBalance() function saves following .csv .html files ‘balance/weighted’ folder: tables balance statistics confounders, tables balance statistics covariates imbalanced, overall balance summary table (averaged across imputed datasets). Within ‘balance/type/plots’ folder, function outputs .jpeg files summary love plots depicting confounder balance exposure time point. function returns data frame (list) balance statistics, balance thresholds, binary balanced tag confounder relevant exposure time point. , assess weights created using SuperLearner method relative full balancing formulas. shown , using SuperLearner weighting method full formulas, find median absolute value correlation exposure confounder 0.03 total 13 confounders remaining imbalanced.","code":"type <- \"weighted\" formulas <- full_formulas weights <- weights.super balance_stats <- assessBalance(data = data, exposure = exposure, exposure_time_pts = exposure_time_pts, #required outcome = outcome, type = type, formulas = formulas, weights = weights, #required balance_thresh = balance_thresh, imp_conf = imp_conf, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> USER ALERT: The following statistics display covariate imbalance at each exposure time point following IPTW weighting, using full formulas. #> As shown below, 13 out of 191 (7%) covariates across time points, corresponding to 4 out of 33 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.11 (range= -0.1-0.23): #> #> Table: Imbalanced covariates using super and full formulas #> #> | exp_time| balanced_n| imbalanced_n| n| #> |--------:|----------:|------------:|--:| #> | 6| 18| 0| 18| #> | 15| 25| 3| 28| #> | 24| 34| 3| 37| #> | 35| 44| 5| 49| #> | 58| 57| 2| 59| #> #> #> USER ALERT: For exposure ESETA1 using the full formulas and super : #> The median absolute value relation between exposure and confounder is 0.03 (range = -0.1 -0.23). #> As shown below, the following 13 covariates across time points out of 191 total (6.81%) spanning 4 domains out of 33 (12.12%) are imbalanced with a remaining median absolute value correlation/std mean difference in relation to ESETA1 of 0.11 (range=-0.1-0.23) : #> #> #> Table: Imbalanced Covariates #> #> | |exposure | exp_time| covar_time|covariate | avg_bal| bal_thresh| balanced| #> |:---|:--------|--------:|----------:|:-------------|----------:|----------:|--------:| #> |19 |ESETA1 | 15| 6|B18Raw.6 | 0.1007296| 0.10| 0| #> |22 |ESETA1 | 15| 6|ESETA1.6 | 0.2280065| 0.10| 0| #> |26 |ESETA1 | 15| 6|InRatioCor.6 | -0.0674695| 0.05| 0| #> |52 |ESETA1 | 24| 15|ESETA1.15 | 0.1599982| 0.10| 0| #> |53 |ESETA1 | 24| 6|ESETA1.6 | 0.1995292| 0.10| 0| #> |60 |ESETA1 | 24| 6|InRatioCor.6 | -0.0672391| 0.05| 0| #> |92 |ESETA1 | 35| 15|ESETA1.15 | 0.2130827| 0.10| 0| #> |93 |ESETA1 | 35| 24|ESETA1.24 | 0.1056005| 0.10| 0| #> |94 |ESETA1 | 35| 6|ESETA1.6 | 0.1770972| 0.10| 0| #> |101 |ESETA1 | 35| 6|IBRAttn.6 | -0.1028477| 0.10| 0| #> |104 |ESETA1 | 35| 6|InRatioCor.6 | -0.0775183| 0.05| 0| #> |142 |ESETA1 | 58| 15|ESETA1.15 | 0.1059600| 0.10| 0| #> |155 |ESETA1 | 58| 15|InRatioCor.15 | -0.0567426| 0.05| 0|"},{"path":"https://istallworthy.github.io/devMSMs/articles/Workflow_Continuous_Exposure.html","id":"step-3b--update-simplified-formulas","dir":"Articles","previous_headings":"Phase 1: Confounder Adjustment > Step 3: Create Updated Formulas & Re-Specify Weights Using Optimal Weighting Method","what":"Step 3b. Update simplified formulas","title":"Workflow: Continuous Exposure","text":"Subsequently, update shortened formulas include time-varying confounders (t-1 +) successfully balanced full formulas, hown . , create final round balancing formulas using createFormulas() function (setting type = “update\" providing balance statistics bal_stats field). createFormulas() function draws user-provided balance statistics automatically identify add formulas exposure time point time-varying confounders lags greater 1 remain imbalanced weighting. function displays balancing formula console message user time-varying confounders added. required input update shortened balancing formulas using createFormulas() function : exposure (e.g., “variable”), exposure time points, outcome (e.g., “variable.time”), list time-varying confounders (e.g., “variable.time”), list time invariant confounders (e.g., “variable”), setting type = “update”, providing bal_stats balance statistics just created Step 3a. optional input detailed Step 1a. createFormulas() function saves .csv .rds files containing balancing formulas exposure time point specified type ‘formulas/update/’ folder. function returns list balancing formulas labeled type, exposure, outcome, exposure time point. shown , several imbalanced confounders lags greater t-1 added short formulas exposure time points 35-58. instance, 35-month time point, economic strain 6 15 months well attention problems income 6 months added balancing formula.","code":"type <- \"update\" bal_stats <- balance_stats updated_formulas <- createFormulas(exposure = exposure, exposure_time_pts = exposure_time_pts, outcome = outcome, #required type = type, ti_confounders = ti_confounders, tv_confounders = tv_confounders, bal_stats = bal_stats, #required concur_conf = concur_conf, keep_conf = keep_conf, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> USER ALERT: Please manually inspect the updated balancing formula below that includes time-varying confounders at t-1 and those greater at further lags that remained imbalanced: #> The update formula for ESETA1 - StrDif_Tot.58 at ESETA1 time point 6 is: #> ESETA1.6 ~ BioDadInHH2 + caregiv_health + DrnkFreq + gov_assist + #> HomeOwnd + KFASTScr + peri_health + PmAge2 + PmBlac2 + PmEd2 + #> PmMrSt2 + RHealth + RMomAgeU + SmokTotl + state + SurpPreg + #> SWghtLB + TcBlac2 #> #> USER ALERT: Please manually inspect the updated balancing formula below that includes time-varying confounders at t-1 and those greater at further lags that remained imbalanced: #> For ESETA1 at exposure time point 15 no time-varying confounders at additional lags were added. #> #> The update formula for ESETA1 - StrDif_Tot.58 at ESETA1 time point 15 is: #> ESETA1.15 ~ B18Raw.6 + BioDadInHH2 + caregiv_health + CORTB.6 + #> DrnkFreq + ESETA1.6 + gov_assist + HOMEETA1.6 + HomeOwnd + #> IBRAttn.6 + InRatioCor.6 + KFASTScr + MDI.6 + peri_health + #> PmAge2 + PmBlac2 + PmEd2 + PmMrSt2 + RHasSO.6 + RHealth + #> RMomAgeU + SAAmylase.6 + SmokTotl + state + SurpPreg + SWghtLB + #> TcBlac2 + WndNbrhood.6 #> #> USER ALERT: Please manually inspect the updated balancing formula below that includes time-varying confounders at t-1 and those greater at further lags that remained imbalanced: #> For ESETA1 at exposure time point 24 the following covariate(s) will be added to the short balancing formula: #> ESETA1.6, InRatioCor.6 #> #> The update formula for ESETA1 - StrDif_Tot.58 at ESETA1 time point 24 is: #> ESETA1.24 ~ B18Raw.15 + BioDadInHH2 + caregiv_health + CORTB.15 + #> DrnkFreq + ESETA1.15 + ESETA1.6 + gov_assist + HOMEETA1.15 + #> HomeOwnd + IBRAttn.15 + InRatioCor.15 + InRatioCor.6 + KFASTScr + #> MDI.15 + peri_health + PmAge2 + PmBlac2 + PmEd2 + PmMrSt2 + #> RHasSO.15 + RHealth + RMomAgeU + SAAmylase.15 + SmokTotl + #> state + SurpPreg + SWghtLB + TcBlac2 #> #> USER ALERT: Please manually inspect the updated balancing formula below that includes time-varying confounders at t-1 and those greater at further lags that remained imbalanced: #> For ESETA1 at exposure time point 35 the following covariate(s) will be added to the short balancing formula: #> ESETA1.15, ESETA1.6, IBRAttn.6, InRatioCor.6 #> #> The update formula for ESETA1 - StrDif_Tot.58 at ESETA1 time point 35 is: #> ESETA1.35 ~ B18Raw.24 + BioDadInHH2 + caregiv_health + CORTB.24 + #> DrnkFreq + EARS_TJo.24 + ESETA1.15 + ESETA1.24 + ESETA1.6 + #> gov_assist + HOMEETA1.24 + HomeOwnd + IBRAttn.24 + IBRAttn.6 + #> InRatioCor.24 + InRatioCor.6 + KFASTScr + LESMnNeg.24 + LESMnPos.24 + #> peri_health + PmAge2 + PmBlac2 + PmEd2 + PmMrSt2 + RHasSO.24 + #> RHealth + RMomAgeU + SAAmylase.24 + SmokTotl + state + SurpPreg + #> SWghtLB + TcBlac2 + WndNbrhood.24 #> #> USER ALERT: Please manually inspect the updated balancing formula below that includes time-varying confounders at t-1 and those greater at further lags that remained imbalanced: #> For ESETA1 at exposure time point 58 the following covariate(s) will be added to the short balancing formula: #> ESETA1.15, InRatioCor.15 #> #> The update formula for ESETA1 - StrDif_Tot.58 at ESETA1 time point 58 is: #> ESETA1.58 ~ BioDadInHH2 + caregiv_health + DrnkFreq + EARS_TJo.35 + #> ESETA1.15 + ESETA1.35 + fscore.35 + gov_assist + HOMEETA1.35 + #> HomeOwnd + InRatioCor.15 + InRatioCor.35 + KFASTScr + LESMnNeg.35 + #> LESMnPos.35 + peri_health + PmAge2 + PmBlac2 + PmEd2 + PmMrSt2 + #> RHasSO.35 + RHealth + RMomAgeU + SmokTotl + state + StrDif_Tot.35 + #> SurpPreg + SWghtLB + TcBlac2 + WndNbrhood.35 #> "},{"path":"https://istallworthy.github.io/devMSMs/articles/Workflow_Continuous_Exposure.html","id":"step-3c--create-final-balancing-weights","dir":"Articles","previous_headings":"Phase 1: Confounder Adjustment > Step 3: Create Updated Formulas & Re-Specify Weights Using Optimal Weighting Method","what":"Step 3c. Create final balancing weights","title":"Workflow: Continuous Exposure","text":"Next, create final set balancing weights using optimal weighting method identified Step 2c final, updated simplified formulas previous step using createWeights() function (method = “super’), SuperLearner method optimal weighting method identified Step 2c. function calls weightitMSM() function WeightIt package (Greifer, 2023) uses time-specific formulas create weights time point automatically multiplying together create one weight per person. Weights stabilized, recommended (Cole & Hernan, 2008; Thoemmes & Ong, 2016) distributions saved home directory inspection. required inputs using createWeights() function create final round IPTW balancing weights using updated short balancing formulas : complete data (data frame, mids object, list imputed datasets dataframes wide format), exposure (variable time points), outcome (e.g., “variable.time”), best-performing weights method, updated formulas (see Step 3a). optional input createWeights() function listed Step 2b. createWeights() function saves .rds file weights ‘weights/’ folder, histogram weights distribution ‘weights/histograms/’ folder, .csv file data weights appended ‘weights/values/’ folder. function returns list weights objects form WeightItMSM output list weights either single nested list (labeled “0” data data frame format) nested lists imputed dataset (data imputed). , use updated formulas SuperLearner weighting method create new round IPTW balancing weights. shown , weights median value 1 heavy right tail.","code":"formulas <- updated_formulas method <- \"super\" #all inputs final_weights <- createWeights(data = data, exposure = exposure, outcome = outcome, formulas = formulas, #required method = method, read_in_from_file = FALSE, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> For the super weighting method, the median weight value is 1 (SD = 1.02; range = 0.03-11)."},{"path":"https://istallworthy.github.io/devMSMs/articles/Workflow_Continuous_Exposure.html","id":"d--trim-final-balancing-weights","dir":"Articles","previous_headings":"Phase 1: Confounder Adjustment > Step 3: Create Updated Formulas & Re-Specify Weights Using Optimal Weighting Method","what":"3d. Trim final balancing weights","title":"Workflow: Continuous Exposure","text":"next step trim winsorize final set weights eliminate heavy right tail distribution using trimWeights() function. function draws Weightit package (Griefer, 2023) plots summarizes trimmed weights. function outputs list trimmed weights either single nested list (labeled “0” data data frame format) nested lists imputed dataset (data imputed). required inputs trimWeights() function : exposure (variable time points), outcome (e.g., “variable.time”), final weights just created. optional input allows user specify quantile value (0-1; default 0.95) weights replaced weight value quantile reduce heavy right tail. , use default 95th percentile trimming weights. trimWeights() function saves .rds file trimmed weights ‘weights/values/’ folder histogram trimmed weights ‘weights/histograms/’ folder. function returns list weights objects, containing trimmed weights, form weightitMSM output. shown , weights still median value 1 much shorter right tail. create trimmed weights using two quantile values + /- ~0.3 previously chosen quantile value, order conduct recommended sensitivity analyses subsequent steps. first create weights 92nd quantile value. 98th quantile value. find comparable descriptive statistics sets weights, upper range value varying quantile cutoff. assess consequences different ranges subsequent steps.","code":"quantile <- 0.95 weights <- final_weights trim_weights <- trimWeights(exposure = exposure, outcome = outcome, weights = weights, #required quantile = quantile, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> Trimming weights to 95%. #> #> For the ESETA1-StrDif_Tot.58 relation, following trimming at the 0.95 quantile, the median weight value is 1 (SD= 0.65; range= 0.03-3). quantile <- 0.92 trim_weights.s1 <- trimWeights(exposure = exposure, outcome = outcome, weights = weights, #required quantile = quantile, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> Trimming weights to 92%. #> #> For the ESETA1-StrDif_Tot.58 relation, following trimming at the 0.92 quantile, the median weight value is 1 (SD= 0.58; range= 0.03-2). quantile <- 0.98 trim_weights.s2 <- trimWeights(exposure = exposure, outcome = outcome, weights = weights, #required quantile = quantile, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> Trimming weights to 98%. #> #> For the ESETA1-StrDif_Tot.58 relation, following trimming at the 0.98 quantile, the median weight value is 1 (SD= 0.77; range= 0.03-4)."},{"path":"https://istallworthy.github.io/devMSMs/articles/Workflow_Continuous_Exposure.html","id":"step-4-conduct-final-balance-assessment","dir":"Articles","previous_headings":"Phase 1: Confounder Adjustment","what":"Step 4: Conduct Final Balance Assessment","title":"Workflow: Continuous Exposure","text":"created trimmed final set IPTW balancing weights, next step conduct final evaluation well reduce imbalance possible confounders. assess performance final weights previous step using assessBalance() function (type = “weighted”) full formulas. required inputs using assessBalance() function assess final, trimmed weights achieve balance full formulas : complete data (data frame, mids object, list imputed datasets dataframes wide format), exposure (e.g., “variable”), exposure time points, outcome (e.g., “variable.time”), full formulas (see Step 1a), set type = “weighted”, final, trimmed weights (see Step 3b). optional inputs assessBalance() function detailed Step 1b. assessBalance() function saves following .csv .html files ‘balance/weighted/’ folder: tables balance statistics confounders, tables balance statistics covariates imbalanced, overall balance summary table (averaged across imputed datasets applicable). Within ‘balance/weighted/plots/’ folder, function outputs .jpeg files summary love plots depicting confounder balance exposure time point. function returns data frame (list) balance statistics, balance thresholds, binary balanced tag confounder relevant exposure time point. , assess final weights using full formulas. assessment, find confounders across 3 domains (economic strain, income--needs-ratio, parental education –ones held stringet balancing threshold due theoretical importance confounders) remain imbalanced several exposure time points median absolute value correlation 0.07 (range = -0.09-0.16). largest remaining correlations exposure confounder economic strain lagged levels economic strain, suggesting high stability construct time. outcome modeling step (Step 5), user option include confounders time invariant measured first time point (6 months) remain imbalanced following final balance assessment covariates. inspect table manually list imbalanced confounders assign covariates. Subsequently, also assess balance weights trimmed two additional quantile values assess whether final balance assessment sensitive trim value. Importantly, save.= TRUE, running analyses overwrite output main history comparison main output rename re-located new folder first. Additionally, running first sensitivity check, output check overwritten second sensitivity check renamed re-located new folder first. first assess balance weights trimmed 93rd quantile value. , find constructs remain imbalanced similar degree. next assess balance weights trimmed 98th quantile value. , also find similar level imbalance constructs.","code":"type <- \"weighted\" formulas <- full_formulas weights <- trim_weights final_balance_stats <- assessBalance(data = data, exposure = exposure, exposure_time_pts = exposure_time_pts, #required outcome = outcome, type = type, formulas = formulas, weights = weights, #required balance_thresh = balance_thresh, imp_conf = imp_conf, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> USER ALERT: The following statistics display covariate imbalance at each exposure time point following IPTW weighting, using full formulas. #> As shown below, 19 out of 191 (10%) covariates across time points, corresponding to 3 out of 33 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.07 (range= -0.09-0.16): #> #> Table: Imbalanced covariates using super and full formulas #> #> | exp_time| balanced_n| imbalanced_n| n| #> |--------:|----------:|------------:|--:| #> | 6| 17| 1| 18| #> | 15| 25| 3| 28| #> | 24| 32| 5| 37| #> | 35| 42| 7| 49| #> | 58| 56| 3| 59| #> #> #> USER ALERT: For exposure ESETA1 using the full formulas and super : #> The median absolute value relation between exposure and confounder is 0.02 (range = -0.09 -0.16). #> As shown below, the following 19 covariates across time points out of 191 total (9.95%) spanning 3 domains out of 33 (9.09%) are imbalanced with a remaining median absolute value correlation/std mean difference in relation to ESETA1 of 0.07 (range=-0.09-0.16) : #> #> #> Table: Imbalanced Covariates #> #> | |exposure | exp_time| covar_time|covariate | avg_bal| bal_thresh| balanced| #> |:---|:--------|--------:|----------:|:-------------|----------:|----------:|--------:| #> |6 |ESETA1 | 6| 0|PmEd2 | -0.0675636| 0.05| 0| #> |22 |ESETA1 | 15| 6|ESETA1.6 | 0.1582999| 0.10| 0| #> |26 |ESETA1 | 15| 6|InRatioCor.6 | -0.0791760| 0.05| 0| #> |31 |ESETA1 | 15| 0|PmEd2 | -0.0668465| 0.05| 0| #> |52 |ESETA1 | 24| 15|ESETA1.15 | 0.1544269| 0.10| 0| #> |53 |ESETA1 | 24| 6|ESETA1.6 | 0.1178082| 0.10| 0| #> |59 |ESETA1 | 24| 15|InRatioCor.15 | -0.0630368| 0.05| 0| #> |60 |ESETA1 | 24| 6|InRatioCor.6 | -0.0721363| 0.05| 0| #> |66 |ESETA1 | 24| 0|PmEd2 | -0.0539022| 0.05| 0| #> |92 |ESETA1 | 35| 15|ESETA1.15 | 0.1367120| 0.10| 0| #> |93 |ESETA1 | 35| 24|ESETA1.24 | 0.1130236| 0.10| 0| #> |94 |ESETA1 | 35| 6|ESETA1.6 | 0.1038148| 0.10| 0| #> |102 |ESETA1 | 35| 15|InRatioCor.15 | -0.0586569| 0.05| 0| #> |103 |ESETA1 | 35| 24|InRatioCor.24 | -0.0866334| 0.05| 0| #> |104 |ESETA1 | 35| 6|InRatioCor.6 | -0.0741025| 0.05| 0| #> |112 |ESETA1 | 35| 0|PmEd2 | -0.0539368| 0.05| 0| #> |156 |ESETA1 | 58| 24|InRatioCor.24 | -0.0538865| 0.05| 0| #> |157 |ESETA1 | 58| 35|InRatioCor.35 | -0.0504691| 0.05| 0| #> |158 |ESETA1 | 58| 6|InRatioCor.6 | -0.0551483| 0.05| 0| covariates <- c(\"PmEd2\", \"ESETA1.6\", \"InRatioCor.6\") weights <- trim_weights.s1 final_balance_stats.s1 <- assessBalance(data = data, exposure = exposure, exposure_time_pts = exposure_time_pts, #required outcome = outcome, type = type, formulas = formulas, weights = weights, #required balance_thresh = balance_thresh, imp_conf = imp_conf, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> USER ALERT: The following statistics display covariate imbalance at each exposure time point following IPTW weighting, using full formulas. #> As shown below, 21 out of 191 (11%) covariates across time points, corresponding to 4 out of 33 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.08 (range= -0.09-0.16): #> #> Table: Imbalanced covariates using super and full formulas #> #> | exp_time| balanced_n| imbalanced_n| n| #> |--------:|----------:|------------:|--:| #> | 6| 16| 2| 18| #> | 15| 25| 3| 28| #> | 24| 32| 5| 37| #> | 35| 42| 7| 49| #> | 58| 55| 4| 59| #> #> #> USER ALERT: For exposure ESETA1 using the full formulas and super : #> The median absolute value relation between exposure and confounder is 0.03 (range = -0.09 -0.16). #> As shown below, the following 21 covariates across time points out of 191 total (10.99%) spanning 4 domains out of 33 (12.12%) are imbalanced with a remaining median absolute value correlation/std mean difference in relation to ESETA1 of 0.08 (range=-0.09-0.16) : #> #> #> Table: Imbalanced Covariates #> #> | |exposure | exp_time| covar_time|covariate | avg_bal| bal_thresh| balanced| #> |:---|:--------|--------:|----------:|:-------------|----------:|----------:|--------:| #> |2 |ESETA1 | 6| 0|gov_assist | 0.1121603| 0.10| 0| #> |6 |ESETA1 | 6| 0|PmEd2 | -0.0773531| 0.05| 0| #> |22 |ESETA1 | 15| 6|ESETA1.6 | 0.1572402| 0.10| 0| #> |26 |ESETA1 | 15| 6|InRatioCor.6 | -0.0859973| 0.05| 0| #> |31 |ESETA1 | 15| 0|PmEd2 | -0.0693023| 0.05| 0| #> |52 |ESETA1 | 24| 15|ESETA1.15 | 0.1518122| 0.10| 0| #> |53 |ESETA1 | 24| 6|ESETA1.6 | 0.1157459| 0.10| 0| #> |59 |ESETA1 | 24| 15|InRatioCor.15 | -0.0702066| 0.05| 0| #> |60 |ESETA1 | 24| 6|InRatioCor.6 | -0.0797116| 0.05| 0| #> |66 |ESETA1 | 24| 0|PmEd2 | -0.0577498| 0.05| 0| #> |92 |ESETA1 | 35| 15|ESETA1.15 | 0.1356838| 0.10| 0| #> |93 |ESETA1 | 35| 24|ESETA1.24 | 0.1124710| 0.10| 0| #> |94 |ESETA1 | 35| 6|ESETA1.6 | 0.1022136| 0.10| 0| #> |102 |ESETA1 | 35| 15|InRatioCor.15 | -0.0641473| 0.05| 0| #> |103 |ESETA1 | 35| 24|InRatioCor.24 | -0.0906605| 0.05| 0| #> |104 |ESETA1 | 35| 6|InRatioCor.6 | -0.0759835| 0.05| 0| #> |112 |ESETA1 | 35| 0|PmEd2 | -0.0546416| 0.05| 0| #> |155 |ESETA1 | 58| 15|InRatioCor.15 | -0.0515929| 0.05| 0| #> |156 |ESETA1 | 58| 24|InRatioCor.24 | -0.0618351| 0.05| 0| #> |157 |ESETA1 | 58| 35|InRatioCor.35 | -0.0556880| 0.05| 0| #> |158 |ESETA1 | 58| 6|InRatioCor.6 | -0.0625095| 0.05| 0| weights <- trim_weights.s2 final_balance_stats.s2 <- assessBalance(data = data, exposure = exposure, exposure_time_pts = exposure_time_pts, #required outcome = outcome, type = type, formulas = formulas, weights = weights, #required balance_thresh = balance_thresh, imp_conf = imp_conf, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> USER ALERT: The following statistics display covariate imbalance at each exposure time point following IPTW weighting, using full formulas. #> As shown below, 14 out of 191 (7%) covariates across time points, corresponding to 3 out of 33 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.08 (range= -0.08-0.16): #> #> Table: Imbalanced covariates using super and full formulas #> #> | exp_time| balanced_n| imbalanced_n| n| #> |--------:|----------:|------------:|--:| #> | 6| 17| 1| 18| #> | 15| 25| 3| 28| #> | 24| 33| 4| 37| #> | 35| 43| 6| 49| #> | 58| 59| 0| 59| #> #> #> USER ALERT: For exposure ESETA1 using the full formulas and super : #> The median absolute value relation between exposure and confounder is 0.02 (range = -0.08 -0.16). #> As shown below, the following 14 covariates across time points out of 191 total (7.33%) spanning 3 domains out of 33 (9.09%) are imbalanced with a remaining median absolute value correlation/std mean difference in relation to ESETA1 of 0.08 (range=-0.08-0.16) : #> #> #> Table: Imbalanced Covariates #> #> | |exposure | exp_time| covar_time|covariate | avg_bal| bal_thresh| balanced| #> |:---|:--------|--------:|----------:|:-------------|----------:|----------:|--------:| #> |6 |ESETA1 | 6| 0|PmEd2 | -0.0552228| 0.05| 0| #> |22 |ESETA1 | 15| 6|ESETA1.6 | 0.1606345| 0.10| 0| #> |26 |ESETA1 | 15| 6|InRatioCor.6 | -0.0689478| 0.05| 0| #> |31 |ESETA1 | 15| 0|PmEd2 | -0.0608262| 0.05| 0| #> |52 |ESETA1 | 24| 15|ESETA1.15 | 0.1594566| 0.10| 0| #> |53 |ESETA1 | 24| 6|ESETA1.6 | 0.1224854| 0.10| 0| #> |59 |ESETA1 | 24| 15|InRatioCor.15 | -0.0500768| 0.05| 0| #> |60 |ESETA1 | 24| 6|InRatioCor.6 | -0.0574045| 0.05| 0| #> |92 |ESETA1 | 35| 15|ESETA1.15 | 0.1391236| 0.10| 0| #> |93 |ESETA1 | 35| 24|ESETA1.24 | 0.1149125| 0.10| 0| #> |94 |ESETA1 | 35| 6|ESETA1.6 | 0.1088099| 0.10| 0| #> |103 |ESETA1 | 35| 24|InRatioCor.24 | -0.0802366| 0.05| 0| #> |104 |ESETA1 | 35| 6|InRatioCor.6 | -0.0720984| 0.05| 0| #> |112 |ESETA1 | 35| 0|PmEd2 | -0.0522716| 0.05| 0|"},{"path":"https://istallworthy.github.io/devMSMs/articles/Workflow_Continuous_Exposure.html","id":"phase-2-assess-substantive-associations-between-exposure-outcome","dir":"Articles","previous_headings":"","what":"Phase 2: Assess Substantive Associations between Exposure & Outcome","title":"Workflow: Continuous Exposure","text":"created IPTW balancing weights minimize associations confounders exposure time point, can move substantive modeling phase.","code":""},{"path":"https://istallworthy.github.io/devMSMs/articles/Workflow_Continuous_Exposure.html","id":"step-5-fit-marginal-structural-model-summarize-visualize-results","dir":"Articles","previous_headings":"Phase 2: Assess Substantive Associations between Exposure & Outcome","what":"Step 5: Fit Marginal Structural Model & Summarize & Visualize Results","title":"Workflow: Continuous Exposure","text":"goal final step fit weighted model relating exposure meaningful epochs developmental time outcome, summarizing visualizing results. step, user models compares various counterfactuals, effects different developmental histories exposure outcome, test substantive hypotheses dose timing.","code":""},{"path":"https://istallworthy.github.io/devMSMs/articles/Workflow_Continuous_Exposure.html","id":"step-5a--select-fit-a-marginal-outcome-model","dir":"Articles","previous_headings":"Phase 2: Assess Substantive Associations between Exposure & Outcome > Step 5: Fit Marginal Structural Model & Summarize & Visualize Results","what":"Step 5a. Select & fit a marginal outcome model","title":"Workflow: Continuous Exposure","text":"First, use fitModel() function fit weighted generalized linear model relating exposure outcome. function draws svyglm() function survey package (Lumley, 2023). exposure main effects models reflect exposure levels exposure time point unless exposure epochs specified. One benefits creating balancing weights can used variety different marginal outcome models encompassed function subset possible models. Note models can get complex advise interpreting individual terms. required inputs using fitModel() function : complete data (data frame, mids object, list imputed datasets dataframes wide format), exposure (e.g., “variable”), exposure time points, outcome (e.g., “variable.time”), list trimmed weights, model list (“m0”, “m1”, “m2”, “m3”): M0: Baseline model regressing outcome main effects exposure (e.g., infancy, toddlerhood, childhood). M1: Covariate model regressing outcome main effects exposure well user-specified covariates (e.g., confounders measured baseline first time point remained imbalanced weighting Step 4). M2: Interaction model regressing outcome main effects exposure well user-specified interactions exposure main effects (e.g., infancy:toddlerhood) M3: Full model regressing outcome main effects exposure, user-specified covariates, well user-specified exposure main effect interactions. , specify full model given baseline/time-invariant confounders remain imbalanced Step 4 possiblity non-linear effects economic strain behavior problems. user selects covariate model (“m1” “m3”), required supply list covariates corresponds covariates wide data (see Step 4). user selects interaction model (“m2” “m3”), required provide interaction order integer int_order field reflects maximum interaction (e.g., 3) (automatically include lower order interactions (e.g., 2-way)). interaction order exceed number exposure main effects. specify fitting 3-way (constituent) interactions exposure main effects. optional inputs fitModel() function follows. user option specify epochs differ measurement time points using optional epochs data frame field. epochs: provide list user-created names quotations (constitute meaningful developmental time period constitute time units exposure histories); values: list, epoch, provide single integer list integers time points exposure measured constitute epoch. epochs specified, time points exposure measured used creation exposure histories final step process. specified epoch must corresponding value (values can differ number entries shown ). user specifies exposure epochs, exposure main effects created epoch, exposure levels averaged epochs consist two time point values. Epochs must specified step used subsequent step comparing histories, specification exposure epochs kept consistent throughout use devMSMs package. , specify Infancy, Toddlerhood, early Childhood exposure epochs. Please see Preliminary Steps vignette accomanying manuscript details. user can also specify family (function, quotations; e.g., gaussian) link (quotations, e.g., “identity”) functions generalized linear model (defaults gaussian “link”, respectively). possible families : binomial, gaussian, Gama, inverse.gaussian, poisson, quasi, quasibinomial, quasipoisson. binomial Poisson families, set family quasibinomial quasipoisson, respectively, avoid warning non-integer numbers successes. `quasi’ versions family objects give point estimates standard errors give warning. gaussian family accepts links: “identity”, “log” “inverse”; binomial family links “logit”, “probit”, “cauchit”, (corresponding logistic, normal Cauchy CDFs respectively) “log” “cloglog” (complementary log-log); Gamma family links “inverse”, “identity”, “log”; poisson family links “log”, “identity”, “sqrt”; inverse.gaussian family links 1/mu^2, inverse, identity log. quasi family accepts links “logit”, “probit”, “cloglog”, “identity”, “inverse”, “log”, “1/mu^2”, “sqrt”, function power can used create power link function. See survey stats R package documentations information. , retain default family link functions. fitModel() function outputs .rds file fitted model(s) .html table model evidence (can display models 12 imputed datasets) ‘models/’ folder. Importantly, function also outputs console result likelihood ratio test comparing user-specified model nested version model omits exposure variables test whether exposure predicts variation outcome. test significant evidence exposure predicts outcome, advise proceeding subsequent history comparison step. (Models pooled prior conducting likelihood ratio test imputed data.) function returns list fitted model objects, svyglm output (labeled “0” data data frame format). find likerlihood ratio test significant, indicating can proceed next step evaluating effects different exposure histories. first conduct sensitivity analyses, fitting model weights trimmed two different values. note, described Step 4, save.= TRUE, running analyses overwrite output main model fitting main output rename re-located new folder first. first fit model weights trimmed 92nd quantile. similarly find significant likelihoood ratio test. fit model weights trimmed 98th quantile. comparable result.","code":"model <- \"m2\" int_order <- 3 epochs <- data.frame(epochs = c(\"Infancy\", \"Toddlerhood\", \"Childhood\"), values = I(list(c(6), c(15), c(24)))) family <- gaussian link <- \"identity\" weights <- trim_weights models <- fitModel(data = data, weights = weights, exposure = exposure, #required exposure_time_pts = exposure_time_pts, outcome = outcome, model = model, #required family = family, link = link, int_order = int_order, covariates = covariates, epochs = epochs, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> Please inspect the following likelihood ratio test to determine if the exposures collective predict significant variation in the outcome compared to a model without exposure terms. #> We strongly suggest not conducting history comparisons if the likelihood ratio test is non-significant. #> Working (Rao-Scott+F) LRT for ESETA1.Infancy ESETA1.Toddlerhood ESETA1.Childhood ESETA1.Infancy:ESETA1.Toddlerhood ESETA1.Infancy:ESETA1.Childhood ESETA1.Toddlerhood:ESETA1.Childhood ESETA1.Infancy:ESETA1.Toddlerhood:ESETA1.Childhood #> in svyglm(formula = as.formula(f), design = s, family = fam) #> Working 2logLR = 17.99531 p= 0.015443 #> (scale factors: 1.4 1.4 1.1 0.97 0.88 0.71 0.58 ); denominator df= 1284 #> #> The marginal model, m2, is summarized below: weights <- trim_weights.s1 models.s1 <- fitModel(data = data, weights = weights, exposure = exposure, #required exposure_time_pts = exposure_time_pts, outcome = outcome, model = model, #required family = family, link = link, int_order = int_order, covariates = covariates, epochs = epochs, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> Please inspect the following likelihood ratio test to determine if the exposures collective predict significant variation in the outcome compared to a model without exposure terms. #> We strongly suggest not conducting history comparisons if the likelihood ratio test is non-significant. #> Working (Rao-Scott+F) LRT for ESETA1.Infancy ESETA1.Toddlerhood ESETA1.Childhood ESETA1.Infancy:ESETA1.Toddlerhood ESETA1.Infancy:ESETA1.Childhood ESETA1.Toddlerhood:ESETA1.Childhood ESETA1.Infancy:ESETA1.Toddlerhood:ESETA1.Childhood #> in svyglm(formula = as.formula(f), design = s, family = fam) #> Working 2logLR = 20.64908 p= 0.0059484 #> (scale factors: 1.4 1.3 1.1 1 0.89 0.73 0.61 ); denominator df= 1284 #> #> The marginal model, m2, is summarized below: weights <- trim_weights.s2 models.s2 <- fitModel(data = data, weights = weights, exposure = exposure, #required exposure_time_pts = exposure_time_pts, outcome = outcome, model = model, #required family = family, link = link, int_order = int_order, covariates = covariates, epochs = epochs, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> Please inspect the following likelihood ratio test to determine if the exposures collective predict significant variation in the outcome compared to a model without exposure terms. #> We strongly suggest not conducting history comparisons if the likelihood ratio test is non-significant. #> Working (Rao-Scott+F) LRT for ESETA1.Infancy ESETA1.Toddlerhood ESETA1.Childhood ESETA1.Infancy:ESETA1.Toddlerhood ESETA1.Infancy:ESETA1.Childhood ESETA1.Toddlerhood:ESETA1.Childhood ESETA1.Infancy:ESETA1.Toddlerhood:ESETA1.Childhood #> in svyglm(formula = as.formula(f), design = s, family = fam) #> Working 2logLR = 14.87367 p= 0.04432 #> (scale factors: 1.5 1.4 1.1 0.93 0.87 0.67 0.54 ); denominator df= 1284 #> #> The marginal model, m2, is summarized below:"},{"path":"https://istallworthy.github.io/devMSMs/articles/Workflow_Continuous_Exposure.html","id":"step-5b--estimate-compare-and-visualize-model-predicted-outcome-as-a-function-of-exposure-history","dir":"Articles","previous_headings":"Phase 2: Assess Substantive Associations between Exposure & Outcome > Step 5: Fit Marginal Structural Model & Summarize & Visualize Results","what":"Step 5b. Estimate, compare, and visualize model-predicted outcome as a function of exposure history","title":"Workflow: Continuous Exposure","text":"final step, use fitted model results test substantive hypotheses dose timing. estimate compare average marginal estimates outcome user-specified exposure history (.e., permutation high (“h) low (“l”) levels exposure exposure epoch) using compareHistories() function. draws primarily avg_predictions() hypotheses() functions marginaleffects package (Arel-Bundock, 2023). First, compareHistories() function creates average predictions outcome exposure history. n combinations user-specified exposure histories, set value predictors full dataset values combination, leaving variables . gives us n datasets, size original dataset used fit model. n datasets, compute predicted values given model taking average predicted value n datasets. n averaged predicted values expected potential outcomes combination. (imputed data, function outputs pooled predicted values using Rubin’s Rules.) Next, using predicted values, function conducts comparisons different histories (pooling across imputed datasets imputed data using Rubin’s Rules). Lastly, function implements correction multiple comparisons (treating run function family) plotting results. Box plots display outcome x-axis exposure history y-axis whiskers display standard errors. required inputs using compareHistories() function : exposure (e.g., “variable”), outcome (e.g., “variable.t”), list model output Step 5a. optional inputs follows. create histories high low values continuous exposures, hi_lo_cut user can specify list two quantile values (0-1; default median split +/- 0.001) demarcating high low levels exposure, respectively. (Imputed data stacked calculate cutoff values.) suggest drawing existing hypotheses examining variability exposure variable determine high low cutoffs. recommend users begin specifying meaningful high low percentile cutoffs examining many individuals sample fall user-specified exposure histories created percentile cutoffs (see Preliminary Steps vignette). gold standard recommendations sufficient cell numbers per history, users ensure reasonable coverage histories avoid extrapolation maximize precision. , specify 60th 30th percentile values denote high low levels economic strain, respectively. Additionally, user option specify epochs differ exposure time points using optional epochs data frame field (see Step 5a ). user specified epochs Step 5a fitModel() function, must also specify step. user also option estimate compare custom subset user-specified exposure histories (.e., sequences high low levels exposure epoch time point) using reference comparison fields. conduct recommended customized comparisons, users must provide least one unique valid history (e.g., “l-l-l”) reference , quotations, provide string (list strings) lowercase l’s h’s (separated -), corresponding exposure epoch (time point), signify sequence exposure levels (“low” “high”, respectively). user supplies reference history, required provide least one unique valid history comparison , quotations, providing comparison string (list strings) l’s h’s (separated “-”), corresponding exposure epoch, signify sequence exposure levels (“low” “high”, respectively) constitutes comparison exposure history/histories compared reference. user supplies one comparisons, least one reference must specified. reference exposure history compared comparison history comparisons subject multiple comparison correction. reference comparison specified, histories compared . 4 exposure main effects (either epochs exposure time points), user required select subset history comparisons (Step 5b), given base code (see hypotheses() function marginaleffects package; Arel-Bundock, 2023) accommodate pairwise history comparisons 5 time points. , specify one reference two comparison histories. user can also specify multiple comparison method mc_method quotations, providing shorthand method (“holm”, “hochberg”,“hommel”, “bonferroni”, “BH” (default), “”, “fdr”, “n” (see stats::p.adjust documentation; R Core Team) multiple comparison correction applied final (pooled across imputed datasets applicable) contrasts comparing effects different exposure histories outcome (default Benjamini-Hochburg). code run considered family. user iterates function specifying different comparisons time, strongly recommend interpreting outcome inclusive set comparisons avoid false discovery. , retain default Benjamini-Hochburg method multiple comparison. Based substantive interests, user also option choose level dosage (“h” “l”) tallied labels dose counts tables figures (dose_level; default “h”). example, exposure variable coded way lower levels conceptualized exposure (e.g., lower income), user may wish choose dosage level “l”. , given interest histories high economic strain, specify wish tally high doses exposure. Lastly, user can provide alternate plotting labels exposure outcome exp_lab out_lab fields (defaults variable names), well list (equal number exposure main effects +1) colors Brewer color palette (colors; default “Dark2”). See RColorBrewer::display.brewer.() https://r-graph-gallery.com/38-rcolorbrewers-palettes.html). , specify plotting labels 4 colors. compareHistories() function saves .html tables estimated mean outcome values history history comparisons ‘histories/’ folder boxplot predicted values histories ‘plots/’ folder. function returns data frame user-specified history comparisons containing contrast estimates, standard errors, statistics, p-values, low high confidence intervals, corrected p-values, labeled history dose. shown , first confirm reasonable distribution sample specified exposure histories. inspect history comparison conclude evidence evidence children experienced different histories exposure economic strain infancy, toddlerhood, early childhood differ behavioral problems early childhood. conduct sensitivity analyses assessing comparing histories drawing models used weights trimmed two different values. note, running analyses overwrite output main history comparison main output rename re-located new folder first. first compare histories using model fit weights trimmed 92nd quantile value. shown , results indicate marginal non-significant contrast “l-l-l” “h-h-h” histories economic strain exposure relation behavior problems early childhood. compare histories usign model fit weights trimmed 98th quantile value. Similarly, find evidence differences behavioral problems function history exposure economic strain.","code":"hi_lo_cut <- c(0.6, 0.3) reference <- \"l-l-l\" comparison <- c(\"h-h-h\", \"l-l-h\", \"h-l-l\", \"l-h-l\") mc_comp_method <- \"BH\" dose_level <- \"h\" exp_lab <- \"Economic Strain\" out_lab <- \"Behavior Problems\" colors <- c(\"blue4\", \"darkgreen\", \"darkgoldenrod\", \"red2\") model <- models results <- compareHistories(exposure = exposure, exposure_time_pts = exposure_time_pts, outcome = outcome, model = model, #required epochs = epochs, hi_lo_cut = hi_lo_cut, reference = reference, comparison = comparison, #optional mc_comp_method = mc_comp_method, dose_level = dose_level, exp_lab = exp_lab, out_lab = out_lab, colors = colors, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> Summary of Exposure Main Effects: #> #> USER ALERT: Out of the total of 1292 individuals in the sample, below is the distribution of the 406 (31.42%) individuals that fall into 5 out of the 5 the total user-defined exposure histories created from 30th and 60th percentile values for low and high levels of exposure ESETA1, respectively, across Infancy, Toddlerhood, Childhood. #> USER ALERT: Please inspect the distribution of the sample across the following exposure histories and ensure there is sufficient spread to avoid extrapolation and low precision: #> #> Table: Summary of user-specified exposure ESETA1 histories based on exposure main effects Infancy, Toddlerhood, Childhood containing time points 6, 15, 24: #> #> |history | n| #> |:-------|---:| #> |h-h-h | 216| #> |h-l-l | 20| #> |l-h-l | 25| #> |l-l-h | 18| #> |l-l-l | 127| #> #> #> Below are the average predictions by user-specified history: #> | | ESETA1.Infancy| ESETA1.Toddlerhood| ESETA1.Childhood| estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|history | dose_count| #> |:--|--------------:|------------------:|----------------:|--------:|---------:|---------:|-------:|--------:|--------:|---------:|:-------|----------:| #> |1 | -0.2204| -0.243| -0.3527| 0.4747| 0.0121| 39.1104| 0| Inf| 0.4509| 0.4985|l-l-l | 0| #> |2 | -0.2204| 0.480| -0.3527| 0.4719| 0.0141| 33.4217| 0| 811.1404| 0.4443| 0.4996|l-h-l | 1| #> |3 | 0.5166| -0.243| -0.3527| 0.4817| 0.0131| 36.8133| 0| 983.1129| 0.4561| 0.5074|h-l-l | 1| #> |5 | -0.2204| -0.243| 0.3288| 0.4908| 0.0158| 31.0331| 0| 699.9813| 0.4598| 0.5218|l-l-h | 1| #> |8 | 0.5166| 0.480| 0.3288| 0.4965| 0.0108| 46.0536| 0| Inf| 0.4754| 0.5176|h-h-h | 3| #> #> #> Conducting multiple comparison correction for all pairings between comparison histories and each refernece history using the BH method. #> #> #> USER ALERT: please inspect the following comparisons: #> |term | estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|history |dose | p.value_corr| #> |:------------------------------------------------------|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:--------------|:------|------------:| #> |(-0.2204, -0.243, -0.3527) - (0.5166, 0.48, 0.3288) | 0.02| 0.01| 1.92| 0.06| 4.18| 0.00| 0.04|l-l-l vs h-h-h |0 vs 3 | 0.20| #> |(-0.2204, -0.243, -0.3527) - (0.5166, -0.243, -0.3527) | 0.01| 0.01| 0.70| 0.48| 1.05| -0.01| 0.03|l-l-l vs h-l-l |0 vs 1 | 0.64| #> |(-0.2204, -0.243, -0.3527) - (-0.2204, 0.48, -0.3527) | 0.00| 0.01| -0.26| 0.80| 0.33| -0.02| 0.02|l-l-l vs l-h-l |0 vs 1 | 0.80| #> |(-0.2204, -0.243, -0.3527) - (-0.2204, -0.243, 0.3288) | 0.02| 0.01| 1.66| 0.10| 3.36| 0.00| 0.04|l-l-l vs l-l-h |0 vs 1 | 0.20| model <- models.s1 results.s1 <- compareHistories(exposure = exposure, exposure_time_pts = exposure_time_pts, outcome = outcome, model = model, #required epochs = epochs, hi_lo_cut = hi_lo_cut, reference = reference, comparison = comparison, #optional mc_comp_method = mc_comp_method, dose_level = dose_level, exp_lab = exp_lab, out_lab = out_lab, colors = colors, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> Summary of Exposure Main Effects: #> #> USER ALERT: Out of the total of 1292 individuals in the sample, below is the distribution of the 406 (31.42%) individuals that fall into 5 out of the 5 the total user-defined exposure histories created from 30th and 60th percentile values for low and high levels of exposure ESETA1, respectively, across Infancy, Toddlerhood, Childhood. #> USER ALERT: Please inspect the distribution of the sample across the following exposure histories and ensure there is sufficient spread to avoid extrapolation and low precision: #> #> Table: Summary of user-specified exposure ESETA1 histories based on exposure main effects Infancy, Toddlerhood, Childhood containing time points 6, 15, 24: #> #> |history | n| #> |:-------|---:| #> |h-h-h | 216| #> |h-l-l | 20| #> |l-h-l | 25| #> |l-l-h | 18| #> |l-l-l | 127| #> #> #> Below are the average predictions by user-specified history: #> | | ESETA1.Infancy| ESETA1.Toddlerhood| ESETA1.Childhood| estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|history | dose_count| #> |:--|--------------:|------------------:|----------------:|--------:|---------:|---------:|-------:|---------:|--------:|---------:|:-------|----------:| #> |1 | -0.2204| -0.243| -0.3527| 0.4718| 0.0118| 40.0529| 0| Inf| 0.4487| 0.4949|l-l-l | 0| #> |2 | -0.2204| 0.480| -0.3527| 0.4707| 0.0139| 33.9638| 0| 837.5166| 0.4435| 0.4979|l-h-l | 1| #> |3 | 0.5166| -0.243| -0.3527| 0.4796| 0.0128| 37.4845| 0| 1019.1131| 0.4545| 0.5046|h-l-l | 1| #> |5 | -0.2204| -0.243| 0.3288| 0.4882| 0.0152| 32.0175| 0| 744.7950| 0.4583| 0.5181|l-l-h | 1| #> |8 | 0.5166| 0.480| 0.3288| 0.4964| 0.0105| 47.2873| 0| Inf| 0.4759| 0.5170|h-h-h | 3| #> #> #> Conducting multiple comparison correction for all pairings between comparison histories and each refernece history using the BH method. #> #> #> USER ALERT: please inspect the following comparisons: #> |term | estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|history |dose | p.value_corr| #> |:------------------------------------------------------|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:--------------|:------|------------:| #> |(-0.2204, -0.243, -0.3527) - (0.5166, 0.48, 0.3288) | 0.02| 0.01| 2.23| 0.03| 5.28| 0.00| 0.05|l-l-l vs h-h-h |0 vs 3 | 0.10| #> |(-0.2204, -0.243, -0.3527) - (0.5166, -0.243, -0.3527) | 0.01| 0.01| 0.80| 0.42| 1.24| -0.01| 0.03|l-l-l vs h-l-l |0 vs 1 | 0.56| #> |(-0.2204, -0.243, -0.3527) - (-0.2204, 0.48, -0.3527) | 0.00| 0.01| -0.10| 0.92| 0.12| -0.02| 0.02|l-l-l vs l-h-l |0 vs 1 | 0.92| #> |(-0.2204, -0.243, -0.3527) - (-0.2204, -0.243, 0.3288) | 0.02| 0.01| 1.74| 0.08| 3.62| 0.00| 0.03|l-l-l vs l-l-h |0 vs 1 | 0.16| model <- models.s2 results.s2 <- compareHistories(exposure = exposure, exposure_time_pts = exposure_time_pts, outcome = outcome, model = model, #required epochs = epochs, hi_lo_cut = hi_lo_cut, reference = reference, comparison = comparison, #optional mc_comp_method = mc_comp_method, dose_level = dose_level, exp_lab = exp_lab, out_lab = out_lab, colors = colors, #optional home_dir = home_dir, verbose = verbose, save.out = save.out) #optional #> Summary of Exposure Main Effects: #> #> USER ALERT: Out of the total of 1292 individuals in the sample, below is the distribution of the 406 (31.42%) individuals that fall into 5 out of the 5 the total user-defined exposure histories created from 30th and 60th percentile values for low and high levels of exposure ESETA1, respectively, across Infancy, Toddlerhood, Childhood. #> USER ALERT: Please inspect the distribution of the sample across the following exposure histories and ensure there is sufficient spread to avoid extrapolation and low precision: #> #> Table: Summary of user-specified exposure ESETA1 histories based on exposure main effects Infancy, Toddlerhood, Childhood containing time points 6, 15, 24: #> #> |history | n| #> |:-------|---:| #> |h-h-h | 216| #> |h-l-l | 20| #> |l-h-l | 25| #> |l-l-h | 18| #> |l-l-l | 127| #> #> #> Below are the average predictions by user-specified history: #> | | ESETA1.Infancy| ESETA1.Toddlerhood| ESETA1.Childhood| estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|history | dose_count| #> |:--|--------------:|------------------:|----------------:|--------:|---------:|---------:|-------:|--------:|--------:|---------:|:-------|----------:| #> |1 | -0.2204| -0.243| -0.3527| 0.4771| 0.0126| 37.9180| 0| Inf| 0.4524| 0.5017|l-l-l | 0| #> |2 | -0.2204| 0.480| -0.3527| 0.4743| 0.0146| 32.5709| 0| 770.6037| 0.4458| 0.5028|l-h-l | 1| #> |3 | 0.5166| -0.243| -0.3527| 0.4825| 0.0137| 35.1938| 0| 898.9284| 0.4557| 0.5094|h-l-l | 1| #> |5 | -0.2204| -0.243| 0.3288| 0.4921| 0.0165| 29.7876| 0| 645.2777| 0.4597| 0.5245|l-l-h | 1| #> |8 | 0.5166| 0.480| 0.3288| 0.4962| 0.0114| 43.4304| 0| Inf| 0.4738| 0.5186|h-h-h | 3| #> #> #> Conducting multiple comparison correction for all pairings between comparison histories and each refernece history using the BH method. #> #> #> USER ALERT: please inspect the following comparisons: #> |term | estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|history |dose | p.value_corr| #> |:------------------------------------------------------|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:--------------|:------|------------:| #> |(-0.2204, -0.243, -0.3527) - (0.5166, 0.48, 0.3288) | 0.02| 0.01| 1.62| 0.11| 3.24| 0.00| 0.04|l-l-l vs h-h-h |0 vs 3 | 0.28| #> |(-0.2204, -0.243, -0.3527) - (0.5166, -0.243, -0.3527) | 0.01| 0.01| 0.52| 0.60| 0.73| -0.02| 0.03|l-l-l vs h-l-l |0 vs 1 | 0.80| #> |(-0.2204, -0.243, -0.3527) - (-0.2204, 0.48, -0.3527) | 0.00| 0.01| -0.25| 0.80| 0.32| -0.02| 0.02|l-l-l vs l-h-l |0 vs 1 | 0.80| #> |(-0.2204, -0.243, -0.3527) - (-0.2204, -0.243, 0.3288) | 0.02| 0.01| 1.48| 0.14| 2.84| 0.00| 0.03|l-l-l vs l-l-h |0 vs 1 | 0.28|"},{"path":"https://istallworthy.github.io/devMSMs/articles/Workflow_Continuous_Exposure.html","id":"references","dir":"Articles","previous_headings":"","what":"References","title":"Workflow: Continuous Exposure","text":"Arel-Bundock, V. 2023. marginaleffects: Predictions, Comparisons, Slopes, Marginal Means,Hypothesis Tests. https://CRAN.R-project.org/package=marginaleffects. Burchinal, M., Howes, C., Pianta, R., Bryant, D., Early, D., Clifford, R., & Barbarin, O. (2008). Predicting Child Outcomes End Kindergarten Quality Pre-Kindergarten Teacher–Child Interactions Instruction. Applied Developmental Science, 12(3), 140–153. https://doi.org/10.1080/10888690802199418 Cole, S. R., & Hernán, M. . (2008). Constructing Inverse Probability Weights Marginal Structural Models. American Journal Epidemiology, 168(6), 656–664. https://doi.org/10.1093/aje/kwn164. Greifer, Noah. 2023.WeightIt: Weighting Covariate Balance Observational Studies. https://CRAN.R-project.org/package=WeightIt. Lumley, Thomas. 2023. “survey: Analysis Complex Survey Samples.” Polley, Eric, Erin LeDell, Chris Kennedy, Mark van der Laan. 2023. SuperLearner: SuperLearner Prediction. https://CRAN.R-project.org/package=SuperLearner. R Core Team (2013). R: language environment statistical computing. R Foundation Statistical Computing, Vienna, Austria. ISBN 3-900051-07-0, URLhttp://www.R-project.org/. Stuart, E. . (2010). Matching methods causal inference: review look forward. Statistical Science: Review Journal Institute Mathematical Statistics, 25(1), 1–21. https://doi.org/10.1214/09-STS313. Thoemmes, F., & Ong, . D. (2016). Primer Inverse Probability Treatment Weighting Marginal Structural Models. https://doi.org/10.1177/2167696815621645. Vernon-Feagans, L., Cox, M., Willoughby, M., Burchinal, M., Garrett-Peters, P., Mills-Koonce, R., Garrett-Peiers, P., Conger, R. D., & Bauer, P. J. (2013). Family Life Project: Epidemiological Developmental Study Young Children Living Poor Rural Communities. Monographs Society Research Child Development, 78(5), –150.","code":""},{"path":"https://istallworthy.github.io/devMSMs/authors.html","id":null,"dir":"","previous_headings":"","what":"Authors","title":"Authors and Citation","text":"Isabella Stallworthy. Author, maintainer. Noah Greifer. Author, contributor. Meriah DeJoseph. Author. Emily Padrutt. Author. Daniel Berry. Author.","code":""},{"path":"https://istallworthy.github.io/devMSMs/authors.html","id":"citation","dir":"","previous_headings":"","what":"Citation","title":"Authors and Citation","text":"Stallworthy , Greifer N, DeJoseph M, Padrutt E, Berry D (2023). devMSMs: Tools Conducting Marginal Structural Models Developmental Data. R package version 0.0.0.9000, https://github.com/istallworthy/devMSMs, https://istallworthy.github.io/devMSMs/.","code":"@Manual{, title = {devMSMs: Tools for Conducting Marginal Structural Models with Developmental Data}, author = {Isabella Stallworthy and Noah Greifer and Meriah DeJoseph and Emily Padrutt and Daniel Berry}, year = {2023}, note = {R package version 0.0.0.9000, https://github.com/istallworthy/devMSMs}, url = {https://istallworthy.github.io/devMSMs/}, }"},{"path":"https://istallworthy.github.io/devMSMs/index.html","id":"devmsms","dir":"","previous_headings":"","what":"An R package for conducting marginal structural models (MSMs) with longitudinal data","title":"An R package for conducting marginal structural models (MSMs) with longitudinal data","text":"Scientists study humans fundamentally interested questions causation, yet conceptual, methodological, practical barriers historically prevented use methods causal inference developed fields. specifically, scientists, clinicians, educators, policymakers alike often interested causal processes involving questions (timing) extent (dose) different factors influence human functioning development, order inform scientific understanding improve people’s lives. Marginal structural models (MSMs; Robins et al., 2000), orginating epidemiology public health, represent one -utilized tool improving causal inference longitudinal observational data, given certain assumptions. brief, MSMs leverage inverse-probability--treatment-weights (IPTW) potential outcomes framework. MSMs first focus problem confounding, using IPTW attenuate associations measured confounders exposure (e.g., experience, characteristic, event –biology broader environment) time. weighted model can fitted relating time-varying exposure future outcome. Finally, model-predicted effects different exposure histories, vary dose timing, can evaluated compared counterfactuals reveal putative causal effects.devMSMs R package accompanying tutorial paper, Investigating Causal Questions Human Development using Marginal Structural Models: Tutorial Introduction devMSMs Package R (insert preprint link ), implementing MSMs longitudinal data answer causal questions dose timing effects given exposure future outcome. Core features package include: flexible functions built-guidance, drawing established expertise best practices implementing longitudinal IPTW weighting outcome modeling answer substantive causal questions dose timing accommodation data form either complete dataframe multiple imputation recommended workflow using devMSMs functions longitudinal data step--step user guidance deveMSMs worflow form vignettes R markdown template file users new MSM technique R programming accompanying suite helper functions assist users preparing inspecting data prior use devMSMs conceptual introduction example empirical application accompanying tutorial paper","code":""},{"path":"https://istallworthy.github.io/devMSMs/index.html","id":"overview","dir":"","previous_headings":"","what":"Overview","title":"An R package for conducting marginal structural models (MSMs) with longitudinal data","text":"package contains 6 core functions conducting longitudinal confounder adjustment outcome modeling longitudinal data time-varying exposures.","code":""},{"path":"https://istallworthy.github.io/devMSMs/index.html","id":"installation","dir":"","previous_headings":"","what":"Installation","title":"An R package for conducting marginal structural models (MSMs) with longitudinal data","text":"devMSMs can installed R Studio Github using devtools package:library(devtools)install_github(\"istallworthy/devMSMs\")library(devMSMs) helper functions can installed accompanying devMSMsHelpers repo:install_github(\"istallworthy/devMSMsHelpers\")library(devMSMsHelpers)","code":""},{"path":"https://istallworthy.github.io/devMSMs/index.html","id":"recommended-workflow","dir":"","previous_headings":"","what":"Recommended Workflow","title":"An R package for conducting marginal structural models (MSMs) with longitudinal data","text":"Please see Workflows vignettes details.","code":""},{"path":"https://istallworthy.github.io/devMSMs/index.html","id":"additional-resources","dir":"","previous_headings":"","what":"Additional Resources","title":"An R package for conducting marginal structural models (MSMs) with longitudinal data","text":"Austin, P. C. (2011). Introduction Propensity Score Methods Reducing Effects Confounding Observational Studies. Multivariate Behavioral Research, 46(3), 399–424. https://doi.org/10.1080/00273171.2011.568786 Blackwell, M. (2013). Framework Dynamic Causal Inference Political Science. American Journal Political Science, 57(2), 504–520. https://doi.org/10.1111/j.1540-5907.2012.00626.x Cole, S. R., & Hernán, M. . (2008). Constructing Inverse Probability Weights Marginal Structural Models. American Journal Epidemiology, 168(6), 656–664. https://doi.org/10.1093/aje/kwn164 Eronen, M. . (2020). Causal discovery problem psychological interventions. New Ideas Psychology, 59, 100785. https://doi.org/10.1016/j.newideapsych.2020.100785 Fong, C., Hazlett, C., & Imai, K. (2018). Covariate balancing propensity score continuous treatment: Application efficacy political advertisements. Annals Applied Statistics, 12(1), 156–177. https://doi.org/10.1214/17-AOAS1101 Haber, N. ., Wood, M. E., Wieten, S., & Breskin, . (2022). DAG Omitted Objects Displayed (DAGWOOD): framework revealing causal assumptions DAGs. Annals Epidemiology, 68, 64–71. https://doi.org/10.1016/j.annepidem.2022.01.001 Hirano, K., & Imbens, G. W. (2004). Propensity Score Continuous Treatments. Applied Bayesian Modeling Causal Inference Incomplete-Data Perspectives (pp. 73–84). John Wiley & Sons, Ltd. https://doi.org/10.1002/0470090456.ch7 Kainz, K., Greifer, N., Givens, ., Swietek, K., Lombardi, B. M., Zietz, S., & Kohn, J. L. (2017). Improving Causal Inference: Recommendations Covariate Selection Balance Propensity Score Methods. Journal Society Social Work Research, 8(2), 279–303. https://doi.org/10.1086/691464 Robins, J. M., Hernán, M. Á., & Brumback, B. (2000). Marginal Structural Models Causal Inference Epidemiology. Epidemiology, 11(5), 550–560. Thoemmes, F., & Ong, . D. (2016). Primer Inverse Probability Treatment Weighting Marginal Structural Models. https://doi.org/10.1177/2167696815621645","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/add_dose.html","id":null,"dir":"Reference","previous_headings":"","what":"Add dose tally to table — add_dose","title":"Add dose tally to table — add_dose","text":"Add dose tally table","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/add_dose.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Add dose tally to table — add_dose","text":"","code":"add_dose(p, dose_level)"},{"path":"https://istallworthy.github.io/devMSMs/reference/add_dose.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Add dose tally to table — add_dose","text":"p table output marginaleffects::avg_predictions() hypotheses() dose_level \"l\" \"h\" indicating whether low high doses tallied tables plots","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/add_dose.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Add dose tally to table — add_dose","text":"table dose level tally","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/add_histories.html","id":null,"dir":"Reference","previous_headings":"","what":"Add history labels to table — add_histories","title":"Add history labels to table — add_histories","text":"Add history labels table","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/add_histories.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Add history labels to table — add_histories","text":"","code":"add_histories(p, d)"},{"path":"https://istallworthy.github.io/devMSMs/reference/add_histories.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Add history labels to table — add_histories","text":"p table output marginaleffects::avg_predictions() hypotheses() d data frame high low values per exposure main effect","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/add_histories.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Add history labels to table — add_histories","text":"table histories labeled","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/assessBalance.html","id":null,"dir":"Reference","previous_headings":"","what":"Assesses confounder balancing — assessBalance","title":"Assesses confounder balancing — assessBalance","text":"Draws functions cobalt package quantify relations exposure confounders exposure time point according guidelines Jackson, 2016 assess balance time-varying exposures.","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/assessBalance.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Assesses confounder balancing — assessBalance","text":"","code":"assessBalance( data, exposure, exposure_time_pts, outcome, type, formulas, weights = NULL, balance_thresh = NULL, imp_conf = NULL, home_dir = NULL, verbose = TRUE, save.out = TRUE )"},{"path":"https://istallworthy.github.io/devMSMs/reference/assessBalance.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Assesses confounder balancing — assessBalance","text":"data data wide format : data frame, list imputed data frames, mids object exposure name exposure variable exposure_time_pts list integers weights created/assessed correspond time points exposure wass measured outcome name outcome variable \".timepoint\" suffix type type balance assessment; 'prebalance' 'weighted' formulas list balancing formulas time point output createFormulas() weights list IPTW weights output createWeights, required type 'weighted' balance_thresh (optional) one two numbers 0 1 indicating single balancing threshold thresholds less important confounders, respectively (default = 0.1) imp_conf (optional) list variable names reflecting important confounders, required two balance thresholds supplied home_dir (optional) path home directory (required save.= TRUE) verbose (optiona) TRUE FALSE indicator user output (default TRUE) save.(optional) TRUE FALSE indicator save output intermediary output locally (default TRUE)","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/assessBalance.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Assesses confounder balancing — assessBalance","text":"data frame balance statistics","code":""},{"path":[]},{"path":"https://istallworthy.github.io/devMSMs/reference/assessBalance.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Assesses confounder balancing — assessBalance","text":"","code":"test <- data.frame(ID = 1:50, A.1 = rnorm(n = 50), A.2 = rnorm(n = 50), A.3 = rnorm(n = 50), B.1 = rnorm(n = 50), B.2 = rnorm(n = 50), B.3 = rnorm(n = 50), C = rnorm(n = 50), D.3 = rnorm(n = 50)) test[, c(\"A.1\", \"A.2\", \"A.3\")] <- lapply(test[, c(\"A.1\", \"A.2\", \"A.3\")], as.numeric) f <- createFormulas(exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", tv_confounders = c(\"A.1\", \"A.2\", \"A.3\", \"B.1\", \"B.2\", \"B.3\"), ti_confounders = \"C\", type = \"short\", save.out = FALSE) #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 1 is: #> A.1 ~ C #> #> #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 2 is: #> A.2 ~ A.1 + B.1 + C #> #> #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 3 is: #> A.3 ~ A.2 + B.2 + C #> #> #Prebalance b <- assessBalance(data = test, exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", type = \"prebalance\", formulas = f, save.out = FALSE) #> USER ALERT: The following statistics display covariate imbalance at each exposure time point prior to weighting, using short formulas. #> #> As shown below, 1 out of 7 (14%) covariates across time points, corresponding to 1 out of 2 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.1 (range= 0.1-0.1): #> #> Table: Imbalanced covariates using no weights and short formulas #> #> | exp_time| balanced_n| imbalanced_n| n| #> |--------:|----------:|------------:|--:| #> | 1| 0| 1| 1| #> | 2| 3| 0| 3| #> | 3| 3| 0| 3| #> #> #> USER ALERT: For exposure A using the short formulas and no weights : #> The median absolute value relation between exposure and confounder is 0.06 (range = -0.02 -0.1). #> As shown below, the following 1 covariates across time points out of 7 total (14.29%) spanning 1 domains out of 3 (33.33%) are imbalanced with a remaining median absolute value correlation/std mean difference in relation to A of 0.1 (range=0.1-0.1) : #> #> #> Table: Imbalanced Covariates #> #> |exposure | exp_time| covar_time|covariate | avg_bal| bal_thresh| balanced| #> |:--------|--------:|----------:|:---------|---------:|----------:|--------:| #> |A | 1| 0|C | 0.1002166| 0.1| 0| #> b <- assessBalance(data = test, exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", type = \"prebalance\", formulas = f, balance_thresh = 0.2, save.out = FALSE) #> USER ALERT: The following statistics display covariate imbalance at each exposure time point prior to weighting, using short formulas. #> #> No covariates remain imbalanced using no weights and short formulas. #> #> #> USER ALERT: For exposure A using the short formulas and no weights : #> The median absolute value relation between exposure and confounder is 0.06 (range = -0.02 -0.1). #> There are no imbalanced covariates. #> There are no imbalanced covariates. b <- assessBalance(data = test, exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", type = \"prebalance\", formulas = f, balance_thresh = c(0.1, 0.2), imp_conf = \"B.1\", save.out = FALSE) #> USER ALERT: The following statistics display covariate imbalance at each exposure time point prior to weighting, using short formulas. #> #> No covariates remain imbalanced using no weights and short formulas. #> #> #> USER ALERT: For exposure A using the short formulas and no weights : #> The median absolute value relation between exposure and confounder is 0.06 (range = -0.02 -0.1). #> There are no imbalanced covariates. #> There are no imbalanced covariates. f <- createFormulas(exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", tv_confounders = c(\"A.1\", \"A.2\", \"A.3\", \"B.1\", \"B.2\", \"B.3\", \"A.1:B.1\"), ti_confounders = \"C\", type = \"short\", save.out = FALSE) #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 1 is: #> A.1 ~ C #> #> #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 2 is: #> A.2 ~ A.1 + A.1:B.1 + B.1 + C #> #> #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 3 is: #> A.3 ~ A.2 + B.2 + C #> #> b <- assessBalance(data = test, exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", type = \"prebalance\", formulas = f, save.out = FALSE) #> USER ALERT: The following statistics display covariate imbalance at each exposure time point prior to weighting, using short formulas. #> #> As shown below, 2 out of 8 (25%) covariates across time points, corresponding to 2 out of 3 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.14 (range= 0.1-0.19): #> #> Table: Imbalanced covariates using no weights and short formulas #> #> | exp_time| balanced_n| imbalanced_n| n| #> |--------:|----------:|------------:|--:| #> | 1| 0| 1| 1| #> | 2| 3| 1| 4| #> | 3| 3| 0| 3| #> #> #> USER ALERT: For exposure A using the short formulas and no weights : #> The median absolute value relation between exposure and confounder is 0.06 (range = -0.02 -0.19). #> As shown below, the following 2 covariates across time points out of 8 total (25%) spanning 2 domains out of 3 (66.67%) are imbalanced with a remaining median absolute value correlation/std mean difference in relation to A of 0.14 (range=0.1-0.19) : #> #> #> Table: Imbalanced Covariates #> #> | |exposure | exp_time| covar_time|covariate | avg_bal| bal_thresh| balanced| #> |:--|:--------|--------:|----------:|:---------|---------:|----------:|--------:| #> |1 |A | 1| 0|C | 0.1002166| 0.1| 0| #> |3 |A | 2| 1|A.1:B.1 | 0.1859313| 0.1| 0| #> # Weighted w <- createWeights(data = test, exposure = \"A\", outcome = \"D.3\", formulas = f, save.out = FALSE) #> For the cbps weighting method, the median weight value is 1.07 (SD = 0.43; range = 0.35-3). #> b <- assessBalance(data = test, exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", type = \"weighted\", weights = w, formulas = f, save.out = FALSE) #> USER ALERT: The following statistics display covariate imbalance at each exposure time point following IPTW weighting, using short formulas. #> #> As shown below, 1 out of 8 (12%) covariates across time points, corresponding to 1 out of 3 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.11 (range= 0.11-0.11): #> #> Table: Imbalanced covariates using cbps and short formulas #> #> | exp_time| balanced_n| imbalanced_n| n| #> |--------:|----------:|------------:|--:| #> | 1| 1| 0| 1| #> | 2| 4| 0| 4| #> | 3| 2| 1| 3| #> #> #> USER ALERT: For exposure A using the short formulas and cbps : #> The median absolute value relation between exposure and confounder is 0.03 (range = -0.02 -0.11). #> As shown below, the following 1 covariates across time points out of 8 total (12.5%) spanning 1 domains out of 3 (33.33%) are imbalanced with a remaining median absolute value correlation/std mean difference in relation to A of 0.11 (range=0.11-0.11) : #> #> #> Table: Imbalanced Covariates #> #> | |exposure | exp_time| covar_time|covariate | avg_bal| bal_thresh| balanced| #> |:--|:--------|--------:|----------:|:---------|---------:|----------:|--------:| #> |6 |A | 3| 2|A.2 | 0.1072889| 0.1| 0| #> b <- assessBalance(data = test, exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", type = \"weighted\", weights = w, formulas = f, balance_thresh = 0.2, save.out = FALSE) #> USER ALERT: The following statistics display covariate imbalance at each exposure time point following IPTW weighting, using short formulas. #> #> No covariates remain imbalanced using cbps and short formulas. #> #> #> USER ALERT: For exposure A using the short formulas and cbps : #> The median absolute value relation between exposure and confounder is 0.03 (range = -0.02 -0.11). #> There are no imbalanced covariates. #> There are no imbalanced covariates. b <- assessBalance(data = test, exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", type = \"weighted\", weights = w, formulas = f, balance_thresh = c(0.1, 0.2), imp_conf = \"B.1\", save.out = FALSE) #> USER ALERT: The following statistics display covariate imbalance at each exposure time point following IPTW weighting, using short formulas. #> #> No covariates remain imbalanced using cbps and short formulas. #> #> #> USER ALERT: For exposure A using the short formulas and cbps : #> The median absolute value relation between exposure and confounder is 0.03 (range = -0.02 -0.11). #> There are no imbalanced covariates. #> There are no imbalanced covariates."},{"path":"https://istallworthy.github.io/devMSMs/reference/calcBalStats.html","id":null,"dir":"Reference","previous_headings":"","what":"Calculate balance stats based on Jackson paper — calcBalStats","title":"Calculate balance stats based on Jackson paper — calcBalStats","text":"Calculate weighted unweighted standardized balance statistics given exposure time point, using relevant confounders. Draws Jackson, 2016 approaches assessing balance time-varying exposures weighting statistics based sample distribution exposure histories.","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/calcBalStats.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Calculate balance stats based on Jackson paper — calcBalStats","text":"","code":"calcBalStats( data, formulas, exposure, exposure_time_pts, outcome, balance_thresh, k = 0, weights = NULL, imp_conf = NULL, home_dir = NULL, verbose = TRUE, save.out = TRUE )"},{"path":"https://istallworthy.github.io/devMSMs/reference/calcBalStats.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Calculate balance stats based on Jackson paper — calcBalStats","text":"data data wide format : data frame, path folder imputed .csv files, mids object formulas list balancing formulas time point output createFormulas() exposure name exposure variable exposure_time_pts list integers weights created/assessed correspond time points exposure wass measured outcome name outcome variable \".timepoint\" suffix balance_thresh (optional) one two numbers 0 1 indicating single balancingn threshold thresholds less important confounders, respectively k (optional) imputation number weights (optional) list IPTW weights output createWeights imp_conf (optional) list variable names reflecting important confounders (required two balance thresholds provided) home_dir (optional) path home directory (required save.= TRUE) verbose (optional) TRUE FALSE indicator user output (default TRUE) save.(optional) TRUE FALSE indicator save output intermediary output locally","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/calcBalStats.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Calculate balance stats based on Jackson paper — calcBalStats","text":"data frame balance statistics","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/calcBalStats.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Calculate balance stats based on Jackson paper — calcBalStats","text":"","code":"test <- data.frame(ID = 1:50, A.1 = rnorm(n = 50), A.2 = rnorm(n = 50), A.3 = rnorm(n = 50), B.1 = rnorm(n = 50), B.2 = rnorm(n = 50), B.3 = rnorm(n = 50), C = rnorm(n = 50), D.3 = rnorm(n = 50)) test[, c(\"A.1\", \"A.2\", \"A.3\")] <- lapply(test[, c(\"A.1\", \"A.2\", \"A.3\")], as.numeric) f <- createFormulas(exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", tv_confounders = c(\"A.1\", \"A.2\", \"A.3\", \"B.1\", \"B.2\", \"B.3\"), ti_confounders = \"C\", type = \"full\", save.out = FALSE) #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 1 is: #> A.1 ~ C #> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 2 is: #> A.2 ~ A.1 + B.1 + C #> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 3 is: #> A.3 ~ A.1 + A.2 + B.1 + B.2 + C #> #> w <- createWeights(data = test, exposure = \"A\", outcome = \"D.3\", formulas = f, save.out = FALSE) #> For the cbps weighting method, the median weight value is 1.09 (SD = 0.7; range = 0.11-4). #> c <- calcBalStats(data = test, formulas = f, exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", balance_thresh = 0.1, save.out = FALSE) #> As shown below, 3 out of 9 (33%) covariates across time points, corresponding to 3 out of 3 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.16 (range= 0.16-0.27): #> #> Table: Imbalanced covariates using no weights and full formulas #> #> | exp_time| balanced_n| imbalanced_n| n| #> |--------:|----------:|------------:|--:| #> | 1| 1| 0| 1| #> | 2| 3| 0| 3| #> | 3| 2| 3| 5| #> #> c <- calcBalStats(data = test, formulas = f, exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", balance_thresh = c(0.05, 0.1), imp_conf = \"B2\", save.out = FALSE) #> As shown below, 3 out of 9 (33%) covariates across time points, corresponding to 3 out of 3 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.16 (range= 0.16-0.27): #> #> Table: Imbalanced covariates using no weights and full formulas #> #> | exp_time| balanced_n| imbalanced_n| n| #> |--------:|----------:|------------:|--:| #> | 1| 1| 0| 1| #> | 2| 3| 0| 3| #> | 3| 2| 3| 5| #> #> c <- calcBalStats(data = test, formulas = f, exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", balance_thresh = 0.1, weights = w[[1]], save.out = FALSE) #> As shown below, 2 out of 9 (22%) covariates across time points, corresponding to 2 out of 3 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.12 (range= -0.11-0.13): #> #> Table: Imbalanced covariates using cbps and full formulas #> #> | exp_time| balanced_n| imbalanced_n| n| #> |--------:|----------:|------------:|--:| #> | 1| 1| 0| 1| #> | 2| 2| 1| 3| #> | 3| 4| 1| 5| #> #>"},{"path":"https://istallworthy.github.io/devMSMs/reference/compareHistories.html","id":null,"dir":"Reference","previous_headings":"","what":"Estimate, compare, and visualize exposure histories — compareHistories","title":"Estimate, compare, and visualize exposure histories — compareHistories","text":"Takes fitted model output created predicted values user-specified histories (pooling imputed data), conducting contrast comparisons (pooling imputed data), correcting multiple comparisons, plotting results.","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/compareHistories.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Estimate, compare, and visualize exposure histories — compareHistories","text":"","code":"compareHistories( home_dir, exposure, exposure_time_pts, outcome, model, epochs = NULL, hi_lo_cut = NULL, reference = NULL, comparison = NULL, mc_comp_method = NA, dose_level = NA, exp_lab = NA, out_lab = NA, colors = NULL, verbose = TRUE, save.out = TRUE )"},{"path":"https://istallworthy.github.io/devMSMs/reference/compareHistories.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Estimate, compare, and visualize exposure histories — compareHistories","text":"home_dir path home directory (required 'save.' = TRUE) exposure name exposure variable exposure_time_pts list integers weights created/assessed correspond time points exposure wass measured outcome name outcome variable \".timepoint\" suffix model list model outputs fitModel() epochs (optional) data frame exposure epoch labels values hi_lo_cut (optional) list two numbers indicating quantile values reflect high low values, respectively, continuous exposure (default median split) reference (optional) list sof one strings \"-\"-separated \"l\" \"h\" values indicative reference exposure history compare comparison, required comparison supplied comparison (optional) list one strings \"-\"-separated \"l\" \"h\" values indicative comparison history/histories compare reference, required reference supplied mc_comp_method (optional) character abbreviation multiple comparison correction method stats::p.adjust, default Benjamini-Hochburg (\"BH\") dose_level (optional) \"l\" \"h\" indicating whether low high doses tallied tables plots (default high \"h\") exp_lab (optional) character label exposure variable plots (default variable name) out_lab (optional) character label outcome variable plots (default variable name) colors (optional) character specifying Brewer palette list colors (n(epochs)+1) plotting (default \"Dark2\" palette) verbose (optional) TRUE FALSE indicator user output (default TRUE) save.(optional) TRUE FALSE indicator save output intermediary output locally (default TRUE)","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/compareHistories.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Estimate, compare, and visualize exposure histories — compareHistories","text":"data frame history comparisons","code":""},{"path":[]},{"path":"https://istallworthy.github.io/devMSMs/reference/compareHistories.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Estimate, compare, and visualize exposure histories — compareHistories","text":"","code":"f <- createFormulas(exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", tv_confounders = c(\"A.1\", \"A.2\", \"A.3\", \"B.1\", \"B.2\", \"B.3\"), ti_confounders = \"C\", type = \"full\", save.out = FALSE) #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 1 is: #> A.1 ~ C #> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 2 is: #> A.2 ~ A.1 + B.1 + C #> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 3 is: #> A.3 ~ A.1 + A.2 + B.1 + B.2 + C #> #> test <- data.frame(ID = 1:50, A.1 = rnorm(n = 50), A.2 = rnorm(n = 50), A.3 = rnorm(n = 50), B.1 = rnorm(n = 50), B.2 = rnorm(n = 50), B.3 = rnorm(n = 50), C = rnorm(n = 50), D.3 = rnorm(n = 50)) test[, c(\"A.1\", \"A.2\", \"A.3\")] <- lapply(test[, c(\"A.1\", \"A.2\", \"A.3\")], as.numeric) w <- createWeights(data = test, exposure = \"A\", outcome = \"D.3\", formulas = f, save.out = FALSE) #> For the cbps weighting method, the median weight value is 1.12 (SD = 0.84; range = 0.23-5). #> m <- fitModel(data = test, weights = w, exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", model = \"m0\", save.out = FALSE) #> Please inspect the following likelihood ratio test to determine if the exposures collective predict significant variation in the outcome compared to a model without exposure terms. #> #> We strongly suggest not conducting history comparisons if the likelihood ratio test is non-significant. #> #> Working (Rao-Scott+F) LRT for A.1 A.2 A.3 #> in svyglm(formula = as.formula(f), design = s, family = fam) #> Working 2logLR = 0.9440798 p= 0.7907 #> (scale factors: 1.5 1.1 0.43 ); denominator df= 46 #> #> The marginal model, m0, is summarized below: r <- compareHistories(exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", model = m, save.out = FALSE) #> Summary of Exposure Main Effects: #> #> USER ALERT: Out of the total of 50 individuals in the sample, below is the distribution of the 50 (100%) individuals that fall into 8 out of the 8 total user-defined exposure histories created from median split values for low and high levels of exposure A, respectively, across 1, 2, 3. #> USER ALERT: Please inspect the distribution of the sample across the following exposure histories and ensure there is sufficient spread to avoid extrapolation and low precision: #> #> Table: Summary of user-specified exposure A histories based on exposure main effects 1, 2, 3 containing time points 1, 2, 3: #> #> |history | n| #> |:-------|--:| #> |h-h-h | 6| #> |h-h-l | 5| #> |h-l-h | 7| #> |h-l-l | 7| #> |l-h-h | 6| #> |l-h-l | 8| #> |l-l-h | 6| #> |l-l-l | 5| #> #> #> Below are the average predictions by user-specified history: #> | A.1| A.2| A.3| estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|history | dose_count| #> |------:|-------:|-------:|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:-------|----------:| #> | 0.1484| -0.2923| -0.0858| -0.2479| 0.1614| -1.5360| 0.1245| 3.0054| -0.5642| 0.0684|l-l-l | 0| #> | 0.1484| -0.2923| -0.0838| -0.2480| 0.1613| -1.5373| 0.1242| 3.0090| -0.5642| 0.0682|l-l-h | 1| #> | 0.1484| -0.2903| -0.0858| -0.2476| 0.1614| -1.5337| 0.1251| 2.9988| -0.5640| 0.0688|l-h-l | 1| #> | 0.1484| -0.2903| -0.0838| -0.2477| 0.1614| -1.5349| 0.1248| 3.0023| -0.5640| 0.0686|l-h-h | 2| #> | 0.1504| -0.2923| -0.0858| -0.2480| 0.1614| -1.5366| 0.1244| 3.0069| -0.5644| 0.0683|h-l-l | 1| #> | 0.1504| -0.2923| -0.0838| -0.2481| 0.1614| -1.5378| 0.1241| 3.0105| -0.5644| 0.0681|h-l-h | 2| #> | 0.1504| -0.2903| -0.0858| -0.2477| 0.1615| -1.5342| 0.1250| 3.0003| -0.5642| 0.0687|h-h-l | 2| #> | 0.1504| -0.2903| -0.0838| -0.2478| 0.1614| -1.5355| 0.1247| 3.0039| -0.5642| 0.0685|h-h-h | 3| #> #> #> Conducting multiple comparison correction for all pairings between comparison histories and each refernece history using the BH method. #> #> #> USER ALERT: please inspect the following comparisons: #> |term | estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|history |dose | p.value_corr| #> |:-------------------------------------------------------|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:--------------|:------|------------:| #> |(0.1484, -0.2923, -0.0858) - (0.1484, -0.2923, -0.0838) | 0| 0| 0.23| 0.81| 0.30| 0| 0|l-l-l vs l-l-h |0 vs 1 | 0.91| #> |(0.1484, -0.2923, -0.0858) - (0.1484, -0.2903, -0.0858) | 0| 0| -1.05| 0.29| 1.77| 0| 0|l-l-l vs l-h-l |0 vs 1 | 0.91| #> |(0.1484, -0.2923, -0.0858) - (0.1484, -0.2903, -0.0838) | 0| 0| -0.39| 0.70| 0.52| 0| 0|l-l-l vs l-h-h |0 vs 2 | 0.91| #> |(0.1484, -0.2923, -0.0858) - (0.1504, -0.2923, -0.0858) | 0| 0| 0.37| 0.71| 0.50| 0| 0|l-l-l vs h-l-l |0 vs 1 | 0.91| #> |(0.1484, -0.2923, -0.0858) - (0.1504, -0.2923, -0.0838) | 0| 0| 0.40| 0.69| 0.53| 0| 0|l-l-l vs h-l-h |0 vs 2 | 0.91| #> |(0.1484, -0.2923, -0.0858) - (0.1504, -0.2903, -0.0858) | 0| 0| -0.30| 0.76| 0.39| 0| 0|l-l-l vs h-h-l |0 vs 2 | 0.91| #> |(0.1484, -0.2923, -0.0858) - (0.1504, -0.2903, -0.0838) | 0| 0| -0.09| 0.93| 0.11| 0| 0|l-l-l vs h-h-h |0 vs 3 | 0.94| #> |(0.1484, -0.2923, -0.0838) - (0.1484, -0.2903, -0.0858) | 0| 0| -0.82| 0.41| 1.27| 0| 0|l-l-h vs l-h-l |1 vs 1 | 0.91| #> |(0.1484, -0.2923, -0.0838) - (0.1484, -0.2903, -0.0838) | 0| 0| -1.05| 0.29| 1.77| 0| 0|l-l-h vs l-h-h |1 vs 2 | 0.91| #> |(0.1484, -0.2923, -0.0838) - (0.1504, -0.2923, -0.0858) | 0| 0| 0.08| 0.94| 0.09| 0| 0|l-l-h vs h-l-l |1 vs 1 | 0.94| #> |(0.1484, -0.2923, -0.0838) - (0.1504, -0.2923, -0.0838) | 0| 0| 0.37| 0.71| 0.50| 0| 0|l-l-h vs h-l-h |1 vs 2 | 0.91| #> |(0.1484, -0.2923, -0.0838) - (0.1504, -0.2903, -0.0858) | 0| 0| -0.41| 0.68| 0.55| 0| 0|l-l-h vs h-h-l |1 vs 2 | 0.91| #> |(0.1484, -0.2923, -0.0838) - (0.1504, -0.2903, -0.0838) | 0| 0| -0.30| 0.76| 0.39| 0| 0|l-l-h vs h-h-h |1 vs 3 | 0.91| #> |(0.1484, -0.2903, -0.0858) - (0.1484, -0.2903, -0.0838) | 0| 0| 0.23| 0.81| 0.30| 0| 0|l-h-l vs l-h-h |1 vs 2 | 0.91| #> |(0.1484, -0.2903, -0.0858) - (0.1504, -0.2923, -0.0858) | 0| 0| 1.16| 0.24| 2.03| 0| 0|l-h-l vs h-l-l |1 vs 1 | 0.91| #> |(0.1484, -0.2903, -0.0858) - (0.1504, -0.2923, -0.0838) | 0| 0| 0.92| 0.36| 1.48| 0| 0|l-h-l vs h-l-h |1 vs 2 | 0.91| #> |(0.1484, -0.2903, -0.0858) - (0.1504, -0.2903, -0.0858) | 0| 0| 0.37| 0.71| 0.50| 0| 0|l-h-l vs h-h-l |1 vs 2 | 0.91| #> |(0.1484, -0.2903, -0.0858) - (0.1504, -0.2903, -0.0838) | 0| 0| 0.40| 0.69| 0.53| 0| 0|l-h-l vs h-h-h |1 vs 3 | 0.91| #> |(0.1484, -0.2903, -0.0838) - (0.1504, -0.2923, -0.0858) | 0| 0| 0.63| 0.53| 0.93| 0| 0|l-h-h vs h-l-l |2 vs 1 | 0.91| #> |(0.1484, -0.2903, -0.0838) - (0.1504, -0.2923, -0.0838) | 0| 0| 1.16| 0.24| 2.03| 0| 0|l-h-h vs h-l-h |2 vs 2 | 0.91| #> |(0.1484, -0.2903, -0.0838) - (0.1504, -0.2903, -0.0858) | 0| 0| 0.08| 0.94| 0.09| 0| 0|l-h-h vs h-h-l |2 vs 2 | 0.94| #> |(0.1484, -0.2903, -0.0838) - (0.1504, -0.2903, -0.0838) | 0| 0| 0.37| 0.71| 0.50| 0| 0|l-h-h vs h-h-h |2 vs 3 | 0.91| #> |(0.1504, -0.2923, -0.0858) - (0.1504, -0.2923, -0.0838) | 0| 0| 0.23| 0.81| 0.30| 0| 0|h-l-l vs h-l-h |1 vs 2 | 0.91| #> |(0.1504, -0.2923, -0.0858) - (0.1504, -0.2903, -0.0858) | 0| 0| -1.05| 0.29| 1.77| 0| 0|h-l-l vs h-h-l |1 vs 2 | 0.91| #> |(0.1504, -0.2923, -0.0858) - (0.1504, -0.2903, -0.0838) | 0| 0| -0.39| 0.70| 0.52| 0| 0|h-l-l vs h-h-h |1 vs 3 | 0.91| #> |(0.1504, -0.2923, -0.0838) - (0.1504, -0.2903, -0.0858) | 0| 0| -0.82| 0.41| 1.27| 0| 0|h-l-h vs h-h-l |2 vs 2 | 0.91| #> |(0.1504, -0.2923, -0.0838) - (0.1504, -0.2903, -0.0838) | 0| 0| -1.05| 0.29| 1.77| 0| 0|h-l-h vs h-h-h |2 vs 3 | 0.91| #> |(0.1504, -0.2903, -0.0858) - (0.1504, -0.2903, -0.0838) | 0| 0| 0.23| 0.81| 0.30| 0| 0|h-h-l vs h-h-h |2 vs 3 | 0.91| #> #> r <- compareHistories(exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", model = m, reference = \"l-l-l\", comparison = \"h-h-h\", save.out = FALSE) #> Summary of Exposure Main Effects: #> #> USER ALERT: Out of the total of 50 individuals in the sample, below is the distribution of the 11 (22%) individuals that fall into 2 out of the 2 total user-defined exposure histories created from median split values for low and high levels of exposure A, respectively, across 1, 2, 3. #> USER ALERT: Please inspect the distribution of the sample across the following exposure histories and ensure there is sufficient spread to avoid extrapolation and low precision: #> #> Table: Summary of user-specified exposure A histories based on exposure main effects 1, 2, 3 containing time points 1, 2, 3: #> #> |history | n| #> |:-------|--:| #> |h-h-h | 6| #> |l-l-l | 5| #> #> #> Below are the average predictions by user-specified history: #> | | A.1| A.2| A.3| estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|history | dose_count| #> |:--|------:|-------:|-------:|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:-------|----------:| #> |1 | 0.1484| -0.2923| -0.0858| -0.2479| 0.1614| -1.5360| 0.1245| 3.0054| -0.5642| 0.0684|l-l-l | 0| #> |8 | 0.1504| -0.2903| -0.0838| -0.2478| 0.1614| -1.5355| 0.1247| 3.0039| -0.5642| 0.0685|h-h-h | 3| #> #> #> Conducting multiple comparison correction for all pairings between comparison histories and each refernece history using the BH method. #> #> #> USER ALERT: please inspect the following comparisons: #> |term | estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|history |dose | p.value_corr| #> |:-------------------------------------------------------|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:--------------|:------|------------:| #> |(0.1484, -0.2923, -0.0858) - (0.1504, -0.2903, -0.0838) | 0| 0| 0.09| 0.93| 0.11| 0| 0|l-l-l vs h-h-h |0 vs 3 | 0.93| #> #> r <- compareHistories(exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", model = m, reference = \"l-l-l\", comparison = c(\"h-h-h\", \"h-l-l\"), save.out = FALSE) #> Summary of Exposure Main Effects: #> #> USER ALERT: Out of the total of 50 individuals in the sample, below is the distribution of the 18 (36%) individuals that fall into 3 out of the 3 total user-defined exposure histories created from median split values for low and high levels of exposure A, respectively, across 1, 2, 3. #> USER ALERT: Please inspect the distribution of the sample across the following exposure histories and ensure there is sufficient spread to avoid extrapolation and low precision: #> #> Table: Summary of user-specified exposure A histories based on exposure main effects 1, 2, 3 containing time points 1, 2, 3: #> #> |history | n| #> |:-------|--:| #> |h-h-h | 6| #> |h-l-l | 7| #> |l-l-l | 5| #> #> #> Below are the average predictions by user-specified history: #> | | A.1| A.2| A.3| estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|history | dose_count| #> |:--|------:|-------:|-------:|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:-------|----------:| #> |1 | 0.1484| -0.2923| -0.0858| -0.2479| 0.1614| -1.5360| 0.1245| 3.0054| -0.5642| 0.0684|l-l-l | 0| #> |5 | 0.1504| -0.2923| -0.0858| -0.2480| 0.1614| -1.5366| 0.1244| 3.0069| -0.5644| 0.0683|h-l-l | 1| #> |8 | 0.1504| -0.2903| -0.0838| -0.2478| 0.1614| -1.5355| 0.1247| 3.0039| -0.5642| 0.0685|h-h-h | 3| #> #> #> Conducting multiple comparison correction for all pairings between comparison histories and each refernece history using the BH method. #> #> #> USER ALERT: please inspect the following comparisons: #> |term | estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|history |dose | p.value_corr| #> |:-------------------------------------------------------|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:--------------|:------|------------:| #> |(0.1484, -0.2923, -0.0858) - (0.1504, -0.2903, -0.0838) | 0| 0| 0.09| 0.93| 0.11| 0| 0|l-l-l vs h-h-h |0 vs 3 | 0.93| #> |(0.1484, -0.2923, -0.0858) - (0.1504, -0.2923, -0.0858) | 0| 0| -0.37| 0.71| 0.50| 0| 0|l-l-l vs h-l-l |0 vs 1 | 0.93| #> #> r <- compareHistories(exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", model = m, reference = c(\"l-l-l\", \"l-h-h\"), comparison = c(\"h-h-h\"), save.out = FALSE) #> Summary of Exposure Main Effects: #> #> USER ALERT: Out of the total of 50 individuals in the sample, below is the distribution of the 17 (34%) individuals that fall into 3 out of the 3 total user-defined exposure histories created from median split values for low and high levels of exposure A, respectively, across 1, 2, 3. #> USER ALERT: Please inspect the distribution of the sample across the following exposure histories and ensure there is sufficient spread to avoid extrapolation and low precision: #> #> Table: Summary of user-specified exposure A histories based on exposure main effects 1, 2, 3 containing time points 1, 2, 3: #> #> |history | n| #> |:-------|--:| #> |h-h-h | 6| #> |l-h-h | 6| #> |l-l-l | 5| #> #> #> Below are the average predictions by user-specified history: #> | | A.1| A.2| A.3| estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|history | dose_count| #> |:--|------:|-------:|-------:|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:-------|----------:| #> |1 | 0.1484| -0.2923| -0.0858| -0.2479| 0.1614| -1.5360| 0.1245| 3.0054| -0.5642| 0.0684|l-l-l | 0| #> |4 | 0.1484| -0.2903| -0.0838| -0.2477| 0.1614| -1.5349| 0.1248| 3.0023| -0.5640| 0.0686|l-h-h | 2| #> |8 | 0.1504| -0.2903| -0.0838| -0.2478| 0.1614| -1.5355| 0.1247| 3.0039| -0.5642| 0.0685|h-h-h | 3| #> #> #> Conducting multiple comparison correction for all pairings between comparison histories and each refernece history using the BH method. #> #> #> USER ALERT: please inspect the following comparisons: #> |term | estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|term.1 | estimate.1| std.error.1| statistic.1| p.value.1| s.value.1| conf.low.1| conf.high.1|history |dose | p.value_corr| #> |:-------------------------------------------------------|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:-----------------------------------------------------------------------------------------------------------------------|----------:|-----------:|-----------:|---------:|---------:|----------:|-----------:|:--------------|:------|------------:| #> |(0.1484, -0.2923, -0.0858) - (0.1504, -0.2903, -0.0838) | 0| 0| 0.09| 0.93| 0.11| 0| 0|(0.148357910991221,-0.290250944106764,-0.0837970022925556) - (0.150357910991221,-0.290250944106764,-0.0837970022925556) | 0| 0| -0.37| 0.71| 0.5| 0| 0|l-l-l vs h-h-h |0 vs 3 | 0.93| #> #> r <- compareHistories(exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", model = m, reference = c(\"l-l-l\", \"l-h-h\"), comparison = c(\"h-h-h\", \"l-l-h\"), save.out = FALSE) #> Summary of Exposure Main Effects: #> #> USER ALERT: Out of the total of 50 individuals in the sample, below is the distribution of the 23 (46%) individuals that fall into 4 out of the 4 total user-defined exposure histories created from median split values for low and high levels of exposure A, respectively, across 1, 2, 3. #> USER ALERT: Please inspect the distribution of the sample across the following exposure histories and ensure there is sufficient spread to avoid extrapolation and low precision: #> #> Table: Summary of user-specified exposure A histories based on exposure main effects 1, 2, 3 containing time points 1, 2, 3: #> #> |history | n| #> |:-------|--:| #> |h-h-h | 6| #> |l-h-h | 6| #> |l-l-h | 6| #> |l-l-l | 5| #> #> #> Below are the average predictions by user-specified history: #> | | A.1| A.2| A.3| estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|history | dose_count| #> |:--|------:|-------:|-------:|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:-------|----------:| #> |1 | 0.1484| -0.2923| -0.0858| -0.2479| 0.1614| -1.5360| 0.1245| 3.0054| -0.5642| 0.0684|l-l-l | 0| #> |2 | 0.1484| -0.2923| -0.0838| -0.2480| 0.1613| -1.5373| 0.1242| 3.0090| -0.5642| 0.0682|l-l-h | 1| #> |4 | 0.1484| -0.2903| -0.0838| -0.2477| 0.1614| -1.5349| 0.1248| 3.0023| -0.5640| 0.0686|l-h-h | 2| #> |8 | 0.1504| -0.2903| -0.0838| -0.2478| 0.1614| -1.5355| 0.1247| 3.0039| -0.5642| 0.0685|h-h-h | 3| #> #> #> Conducting multiple comparison correction for all pairings between comparison histories and each refernece history using the BH method. #> #> #> USER ALERT: please inspect the following comparisons: #> |term | estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|term.1 | estimate.1| std.error.1| statistic.1| p.value.1| s.value.1| conf.low.1| conf.high.1|history |dose | p.value_corr| #> |:-------------------------------------------------------|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:-----------------------------------------------------------------------------------------------------------------------|----------:|-----------:|-----------:|---------:|---------:|----------:|-----------:|:--------------|:------|------------:| #> |(0.1484, -0.2923, -0.0858) - (0.1504, -0.2903, -0.0838) | 0| 0| 0.09| 0.93| 0.11| 0| 0|(0.148357910991221,-0.290250944106764,-0.0837970022925556) - (0.150357910991221,-0.290250944106764,-0.0837970022925556) | 0| 0| -0.37| 0.71| 0.50| 0| 0|l-l-l vs h-h-h |0 vs 3 | 0.93| #> |(0.1484, -0.2923, -0.0858) - (0.1484, -0.2923, -0.0838) | 0| 0| -0.23| 0.81| 0.30| 0| 0|(0.148357910991221,-0.290250944106764,-0.0837970022925556) - (0.148357910991221,-0.292250944106764,-0.0837970022925556) | 0| 0| -1.05| 0.29| 1.77| 0| 0|l-l-l vs l-l-h |0 vs 1 | 0.93| #> #> r <- compareHistories(exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", model = m, reference = c(\"l-l-l\", \"l-h-h\"), comparison = c(\"h-h-h\", \"l-l-h\"), hi_lo_cut = c(0.60, 0.30), mc_comp_method = \"BH\", dose_level = \"l\", exp_lab = \"Hello\", out_lab = \"Goodbye\", colors = \"Set1\", save.out = FALSE) #> Summary of Exposure Main Effects: #> #> USER ALERT: Out of the total of 50 individuals in the sample, below is the distribution of the 7 (14%) individuals that fall into 4 out of the 4 the total user-defined exposure histories created from 30th and 60th percentile values for low and high levels of exposure A, respectively, across 1, 2, 3. #> USER ALERT: Please inspect the distribution of the sample across the following exposure histories and ensure there is sufficient spread to avoid extrapolation and low precision: #> #> Table: Summary of user-specified exposure A histories based on exposure main effects 1, 2, 3 containing time points 1, 2, 3: #> #> |history | n| #> |:-------|--:| #> |h-h-h | 1| #> |l-h-h | 3| #> |l-l-h | 2| #> |l-l-l | 1| #> #> #> Below are the average predictions by user-specified history: #> | | A.1| A.2| A.3| estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|history | dose_count| #> |:--|-------:|-------:|-------:|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:-------|----------:| #> |1 | -0.5238| -0.7820| -0.6330| -0.2486| 0.2640| -0.9417| 0.3463| 1.5298| -0.7661| 0.2688|l-l-l | 3| #> |2 | -0.5238| -0.7820| 0.1213| -0.2855| 0.2082| -1.3711| 0.1703| 2.5535| -0.6937| 0.1226|l-l-h | 2| #> |4 | -0.5238| 0.0294| 0.1213| -0.1630| 0.1920| -0.8489| 0.3959| 1.3367| -0.5393| 0.2133|l-h-h | 1| #> |8 | 0.4388| 0.0294| 0.1213| -0.2295| 0.1933| -1.1877| 0.2349| 2.0897| -0.6083| 0.1492|h-h-h | 0| #> #> #> Conducting multiple comparison correction for all pairings between comparison histories and each refernece history using the BH method. #> #> #> USER ALERT: please inspect the following comparisons: #> |term | estimate| std.error| statistic| p.value| s.value| conf.low| conf.high|term.1 | estimate.1| std.error.1| statistic.1| p.value.1| s.value.1| conf.low.1| conf.high.1|history |dose | p.value_corr| #> |:-----------------------------------------------------|--------:|---------:|---------:|-------:|-------:|--------:|---------:|:-------------------------------------------------------------------------------------------------------------------|----------:|-----------:|-----------:|---------:|---------:|----------:|-----------:|:--------------|:------|------------:| #> |(-0.5238, -0.782, -0.633) - (0.4388, 0.0294, 0.1213) | 0.02| 0.31| 0.06| 0.95| 0.07| -0.59| 0.62|(-0.523818390762229,0.0293884543900781,0.12132001986401) - (0.438836555281291,0.0293884543900781,0.12132001986401) | -0.07| 0.18| -0.37| 0.71| 0.50| -0.42| 0.28|l-l-l vs h-h-h |3 vs 0 | 0.95| #> |(-0.5238, -0.782, -0.633) - (-0.5238, -0.782, 0.1213) | -0.04| 0.16| -0.23| 0.81| 0.30| -0.35| 0.27|(-0.523818390762229,0.0293884543900781,0.12132001986401) - (-0.523818390762229,-0.781977904815324,0.12132001986401) | -0.12| 0.12| -1.05| 0.29| 1.77| -0.35| 0.11|l-l-l vs l-l-h |3 vs 2 | 0.95| #> #>"},{"path":"https://istallworthy.github.io/devMSMs/reference/createFormulas.html","id":null,"dir":"Reference","previous_headings":"","what":"Create balancing formulas — createFormulas","title":"Create balancing formulas — createFormulas","text":"Creates balancing formulas relating exposure relevant time-varying time invariant confounders exposure time point used create IPTW weights.","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/createFormulas.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Create balancing formulas — createFormulas","text":"","code":"createFormulas( exposure, exposure_time_pts, outcome, type, ti_confounders, tv_confounders, bal_stats = NULL, concur_conf = NULL, keep_conf = NULL, home_dir = NULL, custom = NULL, verbose = TRUE, save.out = TRUE )"},{"path":"https://istallworthy.github.io/devMSMs/reference/createFormulas.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Create balancing formulas — createFormulas","text":"exposure name exposure variable exposure_time_pts list integers weights created/assessed correspond time points exposure wass measured outcome name outcome variable \".timepoint\" suffix type type formula create 'full' (includes lagged time-varying confounders), 'short' (includes time-varying confounders t-1 lag ), 'update' (adds 'short' formulas imbalanced time-varying confounders lags great t-1) ti_confounders list time invariant confounders (least one required) tv_confounders list time-varying confounders \".timepoint\" suffix, include exposure outcome variables (least time-varying exposure variables required ) bal_stats list balance statistics assessBalance(), required 'update' type concur_conf (optional) list variable names reflecting time-varying confounders retain formulas contemporaneously (default none) keep_conf (optional) list variable names reflecting confounders always retain formulas (default depends type) home_dir path home directory (required 'save.' = TRUE) custom (optional) custom list formulas exposure time point (default create automatically according type) verbose (optional) TRUE FALSE indicator user output (default TRUE) save.(optional) TRUE FALSE indicator save output intermediary output locally (default TRUE)","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/createFormulas.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Create balancing formulas — createFormulas","text":"list balancing formulas exposure time point","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/createFormulas.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Create balancing formulas — createFormulas","text":"","code":"#Full Formulas f <- createFormulas(exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", tv_confounders = c(\"A.1\", \"A.2\", \"A.3\"), ti_confounders = \"C\", type = \"full\", save.out = FALSE) #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 1 is: #> A.1 ~ C #> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 2 is: #> A.2 ~ A.1 + C #> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 3 is: #> A.3 ~ A.1 + A.2 + C #> #> f <- createFormulas(exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", tv_confounders = c(\"A.1\", \"A.2\", \"A.3\", \"B.1\", \"B.2\", \"B.3\"), ti_confounders = \"C\", type = \"full\", save.out = FALSE) #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 1 is: #> A.1 ~ C #> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 2 is: #> A.2 ~ A.1 + B.1 + C #> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 3 is: #> A.3 ~ A.1 + A.2 + B.1 + B.2 + C #> #> #Short Formulas f <- createFormulas(exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", tv_confounders = c(\"A.1\", \"A.2\", \"A.3\"), ti_confounders = \"C\", type = \"short\", save.out = FALSE) #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 1 is: #> A.1 ~ C #> #> #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 2 is: #> A.2 ~ A.1 + C #> #> #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 3 is: #> A.3 ~ A.2 + C #> #> f <- createFormulas(exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", tv_confounders = c(\"A.1\", \"A.2\", \"A.3\", \"B.1\", \"B.2\", \"B.3\"), ti_confounders = \"C\", type = \"short\", save.out = FALSE) #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 1 is: #> A.1 ~ C #> #> #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 2 is: #> A.2 ~ A.1 + B.1 + C #> #> #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 3 is: #> A.3 ~ A.2 + B.2 + C #> #> c <- list(\"short_form-1\" = as.formula(A.1 ~ C), \"short_form-2\" = as.formula(A.2 ~ A.1 + B.1 + C), \"short_form-3\" = as.formula(A.3 ~ A.2 + B.2 + C)) f <- createFormulas(exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", tv_confounders = c(\"A.1\", \"A.2\", \"A.3\", \"B.1\", \"B.2\", \"B.3\"), ti_confounders = \"C\", type = \"short\", custom = c, save.out = FALSE) #> The user-supplied custom balancing formula for each exposure time point are below: #> A.1 ~ C #> #> A.2 ~ A.1 + B.1 + C #> #> A.3 ~ A.2 + B.2 + C #> #Update Formulas test <- data.frame(ID = 1:50, A.1 = rnorm(n = 50), A.2 = rnorm(n = 50), A.3 = rnorm(n = 50), B.1 = rnorm(n = 50), B.2 = rnorm(n = 50), B.3 = rnorm(n = 50), C = rnorm(n = 50), D.3 = rnorm(n = 50)) test[, c(\"A.1\", \"A.2\", \"A.3\")] <- lapply(test[, c(\"A.1\", \"A.2\", \"A.3\")], as.numeric) w <- createWeights(data = test, exposure = \"A\", outcome = \"D.3\", formulas = f, save.out = FALSE) #> For the cbps weighting method, the median weight value is 0.87 (SD = 0.38; range = 0.43-2). #> b <- assessBalance(data = test, exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", type = \"weighted\", weights = w, formulas = f, save.out = FALSE) #> USER ALERT: The following statistics display covariate imbalance at each exposure time point following IPTW weighting, using short formulas. #> #> As shown below, 2 out of 7 (29%) covariates across time points, corresponding to 2 out of 2 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.12 (range= -0.13-0.1): #> #> Table: Imbalanced covariates using cbps and short formulas #> #> | exp_time| balanced_n| imbalanced_n| n| #> |--------:|----------:|------------:|--:| #> | 1| 0| 1| 1| #> | 2| 3| 0| 3| #> | 3| 2| 1| 3| #> #> #> USER ALERT: For exposure A using the short formulas and cbps : #> The median absolute value relation between exposure and confounder is 0.04 (range = -0.13 -0.1). #> As shown below, the following 2 covariates across time points out of 7 total (28.57%) spanning 2 domains out of 3 (66.67%) are imbalanced with a remaining median absolute value correlation/std mean difference in relation to A of 0.12 (range=-0.13-0.1) : #> #> #> Table: Imbalanced Covariates #> #> | |exposure | exp_time| covar_time|covariate | avg_bal| bal_thresh| balanced| #> |:--|:--------|--------:|----------:|:---------|----------:|----------:|--------:| #> |1 |A | 1| 0|C | 0.1000057| 0.1| 0| #> |6 |A | 3| 2|B.2 | -0.1325060| 0.1| 0| #> f <- createFormulas(exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", tv_confounders = c(\"A.1\", \"A.2\", \"A.3\"), ti_confounders = \"C\", type = \"update\", bal_stats = b, save.out = FALSE) #> USER ALERT: Please manually inspect the updated balancing formula below that includes time-varying confounders at t-1 and those greater at further lags that remained imbalanced: #> The update formula for A - D.3 at A time point 1 is: #> A.1 ~ C #> #> #> USER ALERT: Please manually inspect the updated balancing formula below that includes time-varying confounders at t-1 and those greater at further lags that remained imbalanced: #> For A at exposure time point 2 no time-varying confounders at additional lags were added. #> #> The update formula for A - D.3 at A time point 2 is: #> A.2 ~ A.1 + C #> #> #> USER ALERT: Please manually inspect the updated balancing formula below that includes time-varying confounders at t-1 and those greater at further lags that remained imbalanced: #> For A at exposure time point 3 no time-varying confounders at additional lags were added. #> #> The update formula for A - D.3 at A time point 3 is: #> A.3 ~ A.2 + C #> #> f <- createFormulas(exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", tv_confounders = c(\"A.1\", \"A.2\", \"A.3\", \"B.1\", \"B.2\", \"B.3\"), ti_confounders = \"C\", type = \"update\", bal_stats = b, save.out = FALSE) #> USER ALERT: Please manually inspect the updated balancing formula below that includes time-varying confounders at t-1 and those greater at further lags that remained imbalanced: #> The update formula for A - D.3 at A time point 1 is: #> A.1 ~ C #> #> #> USER ALERT: Please manually inspect the updated balancing formula below that includes time-varying confounders at t-1 and those greater at further lags that remained imbalanced: #> For A at exposure time point 2 no time-varying confounders at additional lags were added. #> #> The update formula for A - D.3 at A time point 2 is: #> A.2 ~ A.1 + B.1 + C #> #> #> USER ALERT: Please manually inspect the updated balancing formula below that includes time-varying confounders at t-1 and those greater at further lags that remained imbalanced: #> For A at exposure time point 3 no time-varying confounders at additional lags were added. #> #> The update formula for A - D.3 at A time point 3 is: #> A.3 ~ A.2 + B.2 + C #> #>"},{"path":"https://istallworthy.github.io/devMSMs/reference/createWeights.html","id":null,"dir":"Reference","previous_headings":"","what":"Creates IPTW balancing weights — createWeights","title":"Creates IPTW balancing weights — createWeights","text":"Creates IPTW balancing weights user-specified exposure time point using balancing formulas relate exposure time point relevant confounders.","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/createWeights.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Creates IPTW balancing weights — createWeights","text":"","code":"createWeights( data, exposure, outcome, formulas, method = \"cbps\", SL.library = \"SL.glm\", criterion = NA, home_dir = NULL, read_in_from_file = FALSE, verbose = TRUE, save.out = TRUE, ... )"},{"path":"https://istallworthy.github.io/devMSMs/reference/createWeights.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Creates IPTW balancing weights — createWeights","text":"data data wide format : data frame, list imputed data frames, mids object exposure name exposure variable outcome name outcome variable \".timepoint\" suffix formulas list balancing formulas time point output createFormulas() method (optional) character string weightitMSM() balancing method abbreviation (default Covariate Balancing Propensity Score \"cbps\") SL.library required superLearner weighting method (\"super\"); see SuperLearner::listWrappers() options criterion (optional) criterion used select best weights (default \"p.mean\" minimizing avg Pearson correlation continuous exposures \"smd.mean\" binary exposures) (requird \"gbm\" method) home_dir path home directory (required 'save.' = TRUE) read_in_from_file (optional) TRUE FALSE indicator read weights previously run saved locally (default FALSE) verbose (optional) TRUE FALSE indicator user output (default TRUE) save.(optional) TRUE FALSE indicator save output intermediary output locally (default TRUE) ... inputs weightitMSM()","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/createWeights.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Creates IPTW balancing weights — createWeights","text":"list IPTW balancing weights","code":""},{"path":[]},{"path":"https://istallworthy.github.io/devMSMs/reference/createWeights.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Creates IPTW balancing weights — createWeights","text":"","code":"test <- data.frame(ID = 1:50, A.1 = rnorm(n = 50), A.2 = rnorm(n = 50), A.3 = rnorm(n = 50), B.1 = rnorm(n = 50), B.2 = rnorm(n = 50), B.3 = rnorm(n = 50), C = rnorm(n = 50), D.3 = rnorm(n = 50)) test[, c(\"A.1\", \"A.2\", \"A.3\")] <- lapply(test[, c(\"A.1\", \"A.2\", \"A.3\")], as.numeric) f <- createFormulas(exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", tv_confounders = c(\"A.1\", \"A.2\", \"A.3\", \"B.1\", \"B.2\", \"B.3\"), ti_confounders = \"C\", type = \"short\", save.out = FALSE) #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 1 is: #> A.1 ~ C #> #> #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 2 is: #> A.2 ~ A.1 + B.1 + C #> #> #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 3 is: #> A.3 ~ A.2 + B.2 + C #> #> w <- createWeights(data = test, exposure = \"A\", outcome = \"D.3\", formulas = f, save.out = FALSE) #> For the cbps weighting method, the median weight value is 1.02 (SD = 0.42; range = 0.3-3). #> f <- createFormulas(exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", tv_confounders = c(\"A.1\", \"A.2\", \"A.3\", \"B.1\", \"B.2\", \"B.3\", \"A.1:B.1\"), ti_confounders = \"C\", type = \"short\", save.out = FALSE) #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 1 is: #> A.1 ~ C #> #> #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 2 is: #> A.2 ~ A.1 + A.1:B.1 + B.1 + C #> #> #> USER ALERT: Please manually inspect the short balancing formula below that includes time-varying confounders at t-1 only: #> The short formula for A - D.3 at A time point 3 is: #> A.3 ~ A.2 + B.2 + C #> #> w <- createWeights(data = test, exposure = \"A\", outcome = \"D.3\", formulas = f, save.out = FALSE) #> For the cbps weighting method, the median weight value is 1.05 (SD = 0.55; range = 0.25-3). #> w <- createWeights(data = test, exposure = \"A\", outcome = \"D.3\", formulas = f, method = \"cbps\", save.out = FALSE) #> For the cbps weighting method, the median weight value is 1.05 (SD = 0.55; range = 0.25-3). #> w <- createWeights(data = test, exposure = \"A\", outcome = \"D.3\", formulas = f, method = \"cbps\", save.out = FALSE) #> For the cbps weighting method, the median weight value is 1.05 (SD = 0.55; range = 0.25-3). #> w <- createWeights(data = test, exposure = \"A\", outcome = \"D.3\", formulas = f, method = \"gbm\", save.out = FALSE) #> For the gbm weighting method, the median weight value is 0.58 (SD = 0.3; range = 0.27-2). #> w <- createWeights(data = test, exposure = \"A\", outcome = \"D.3\", formulas = f, method = \"bart\", save.out = FALSE) #> For the bart weighting method, the median weight value is 0.69 (SD = 0.2; range = 0.12-1). #> w <- createWeights(data = test, exposure = \"A\", outcome = \"D.3\", formulas = f, method = \"super\", save.out = FALSE) #> Loading required package: nnls #> Warning: All algorithms have zero weight #> Warning: All metalearner coefficients are zero, predictions will all be equal to 0 #> For the super weighting method, the median weight value is 0.99 (SD = 0.41; range = 0.44-2). #>"},{"path":"https://istallworthy.github.io/devMSMs/reference/create_custom_comparisons.html","id":null,"dir":"Reference","previous_headings":"","what":"Creates custom comparisons — create_custom_comparisons","title":"Creates custom comparisons — create_custom_comparisons","text":"Creates custom comparisons","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/create_custom_comparisons.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Creates custom comparisons — create_custom_comparisons","text":"","code":"create_custom_comparisons(preds, ref_vals, comp_vals, exposure)"},{"path":"https://istallworthy.github.io/devMSMs/reference/create_custom_comparisons.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Creates custom comparisons — create_custom_comparisons","text":"preds custom output marginaleffects::average_predictions() ref_vals reference values comp_vals comparison values exposure name exposure variable","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/create_custom_comparisons.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Creates custom comparisons — create_custom_comparisons","text":"custom comparisons","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/create_custom_contrasts.html","id":null,"dir":"Reference","previous_headings":"","what":"Create custom contrasts — create_custom_contrasts","title":"Create custom contrasts — create_custom_contrasts","text":"Create custom contrasts","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/create_custom_contrasts.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Create custom contrasts — create_custom_contrasts","text":"","code":"create_custom_contrasts(d, reference, comp_histories, exposure, preds)"},{"path":"https://istallworthy.github.io/devMSMs/reference/create_custom_contrasts.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Create custom contrasts — create_custom_contrasts","text":"d data frame high low values per exposure main effect reference reference sequence \"h\" /\"l\" (e.g., \"h-h-h\") comp_histories comparison sequence(s) \"h\" /\"l\" (e.g., \"h-h-h\") exposure name exposure variable preds custom output marginaleffects::average_predictions()","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/create_custom_contrasts.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Create custom contrasts — create_custom_contrasts","text":"contrasts","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/devMSMs-package.html","id":null,"dir":"Reference","previous_headings":"","what":"devMSMs: Tools for Conducting Marginal Structural Models with Developmental Data — devMSMs-package","title":"devMSMs: Tools for Conducting Marginal Structural Models with Developmental Data — devMSMs-package","text":"Functions preparing, assessing, implementing MSMS.","code":""},{"path":[]},{"path":"https://istallworthy.github.io/devMSMs/reference/devMSMs-package.html","id":"author","dir":"Reference","previous_headings":"","what":"Author","title":"devMSMs: Tools for Conducting Marginal Structural Models with Developmental Data — devMSMs-package","text":"Maintainer: Isabella Stallworthy istall@seas.upenn.edu Authors: Noah Greifer ngreifer@iq.harvard.edu [contributor] Meriah DeJoseph meriahd@stanford.edu Emily Padrutt padru004@umn.edu Daniel Berry dberry@umn.edu","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/eval_hist.html","id":null,"dir":"Reference","previous_headings":"","what":"Visualize distribution of sample across exposure histories — eval_hist","title":"Visualize distribution of sample across exposure histories — eval_hist","text":"Create customized, user-specified exposure histories tables displaying sample distribution across user inspection.","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/eval_hist.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Visualize distribution of sample across exposure histories — eval_hist","text":"","code":"eval_hist( data, exposure, time_pts, epochs = NULL, hi_lo_cut = NULL, ref = NULL, comps = NULL )"},{"path":"https://istallworthy.github.io/devMSMs/reference/eval_hist.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Visualize distribution of sample across exposure histories — eval_hist","text":"data data wide format : data frame, list imputed data frames, mids object exposure name exposure variable time_pts list integers weights created/assessed correspond time points exposure measured epochs (optional) data frame exposure epoch labels values hi_lo_cut list two numbers indicating quantile values reflect high low values, respectively, continuous exposure ref (optional) list one strings \"-\"-separated \"l\" \"h\" values indicative reference exposure history compare comparison, required comparison supplied comps (optional) list one strings \"-\"-separated \"l\" \"h\" values indicative comparison history/histories compare reference, required reference supplied","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/eval_hist.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Visualize distribution of sample across exposure histories — eval_hist","text":"none","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/eval_hist.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Visualize distribution of sample across exposure histories — eval_hist","text":"","code":"test <- data.frame(ID = 1:50, A.1 = rnorm(n = 50), A.2 = rnorm(n = 50), A.3 = rnorm(n = 50), B.1 = rnorm(n = 50), B.2 = rnorm(n = 50), B.3 = rnorm(n = 50), C = rnorm(n = 50), D.3 = rnorm(n = 50)) test[, c(\"A.1\", \"A.2\", \"A.3\")] <- lapply(test[, c(\"A.1\", \"A.2\", \"A.3\")], as.numeric) h <- eval_hist(data = test, exposure = \"A\", time_pts = c(1, 2, 3)) #> Summary of Exposure Main Effects: #> #> USER ALERT: Out of the total of 50 individuals in the sample, below is the distribution of the 50 (100%) individuals that fall into 8 out of the 8 total user-defined exposure histories created from median split values for low and high levels of exposure A, respectively, across 1, 2, 3. #> USER ALERT: Please inspect the distribution of the sample across the following exposure histories and ensure there is sufficient spread to avoid extrapolation and low precision: #> #> Table: Summary of user-specified exposure A histories based on exposure main effects 1, 2, 3 containing time points 1, 2, 3: #> #> |history | n| #> |:-------|--:| #> |h-h-h | 4| #> |h-h-l | 8| #> |h-l-h | 7| #> |h-l-l | 6| #> |l-h-h | 7| #> |l-h-l | 6| #> |l-l-h | 7| #> |l-l-l | 5| #> h <- eval_hist(data = test, exposure = \"A\", time_pts = c(1, 2, 3), epochs = data.frame(epochs = c(\"Infancy\", \"Toddlerhood\"), values = I(list(c(1, 2), c(3))))) #> Summary of Exposure Main Effects: #> #> USER ALERT: Out of the total of 50 individuals in the sample, below is the distribution of the 50 (100%) individuals that fall into 4 out of the 4 total user-defined exposure histories created from median split values for low and high levels of exposure A, respectively, across Infancy, Toddlerhood. #> USER ALERT: Please inspect the distribution of the sample across the following exposure histories and ensure there is sufficient spread to avoid extrapolation and low precision: #> #> Table: Summary of user-specified exposure A histories based on exposure main effects Infancy, Toddlerhood containing time points c(1, 2), 3: #> #> |history | n| #> |:-------|--:| #> |h-h | 10| #> |h-l | 15| #> |l-h | 15| #> |l-l | 10| #> h <- eval_hist(data = test, exposure = \"A\", time_pts = c(1, 2, 3), hi_lo_cut = c(0.6, 0.3)) #> Summary of Exposure Main Effects: #> #> USER ALERT: Out of the total of 50 individuals in the sample, below is the distribution of the 12 (24%) individuals that fall into 6 out of the 8 the total user-defined exposure histories created from 30th and 60th percentile values for low and high levels of exposure A, respectively, across 1, 2, 3. #> USER ALERT: Please inspect the distribution of the sample across the following exposure histories and ensure there is sufficient spread to avoid extrapolation and low precision: #> Warning: USER ALERT: There are no individuals in your sample that fall into l-h-l & l-l-l exposure history/histories. You may wish to consider different high/low cutoffs (for continuous exposures), alternative epochs, or choose a different measure to avoid extrapolation. #> #> #> Table: Summary of user-specified exposure A histories based on exposure main effects 1, 2, 3 containing time points 1, 2, 3: #> #> |history | n| #> |:-------|--:| #> |h-h-h | 2| #> |h-h-l | 2| #> |h-l-h | 1| #> |h-l-l | 2| #> |l-h-h | 2| #> |l-l-h | 3| #> h <- eval_hist(data = test, exposure = \"A\", time_pts = c(1, 2, 3), hi_lo_cut = c(0.6, 0.3), ref = \"l-l-l\", comps = \"h-h-h\") #> Summary of Exposure Main Effects: #> #> USER ALERT: Out of the total of 50 individuals in the sample, below is the distribution of the 2 (4%) individuals that fall into 1 out of the 2 the total user-defined exposure histories created from 30th and 60th percentile values for low and high levels of exposure A, respectively, across 1, 2, 3. #> USER ALERT: Please inspect the distribution of the sample across the following exposure histories and ensure there is sufficient spread to avoid extrapolation and low precision: #> Warning: USER ALERT: There are no individuals in your sample that fall into l-l-l exposure history/histories. You may wish to consider different high/low cutoffs (for continuous exposures), alternative epochs, or choose a different measure to avoid extrapolation. #> #> #> Table: Summary of user-specified exposure A histories based on exposure main effects 1, 2, 3 containing time points 1, 2, 3: #> #> |history | n| #> |:-------|--:| #> |h-h-h | 2| #>"},{"path":"https://istallworthy.github.io/devMSMs/reference/fitModel.html","id":null,"dir":"Reference","previous_headings":"","what":"Fit outcome model — fitModel","title":"Fit outcome model — fitModel","text":"Fits weighted marginal outcome model generalized linear model user's choosing, relating exposure main effects outcome using IPTW weights.","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/fitModel.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Fit outcome model — fitModel","text":"","code":"fitModel( data, weights, exposure, exposure_time_pts, outcome, model, family = NULL, link = NA, int_order = NA, covariates = NULL, epochs = NULL, home_dir = NULL, verbose = TRUE, save.out = TRUE )"},{"path":"https://istallworthy.github.io/devMSMs/reference/fitModel.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Fit outcome model — fitModel","text":"data data wide format : data frame, list imputed data frames, mids object weights list IPTW weights output createWeights() exposure name exposure variable exposure_time_pts list integers weights created/assessed correspond time points exposure measured outcome name outcome variable \".timepoint\" suffix model character indicating one following outcome models: \"m0\" (exposure main effects) \"m1\" (exposure main effects & covariates) \"m2\" (exposure main effects & interactions) \"m3\" (exposure main effects, interactions, & covariates) family (optional) family function specification svyglm model link (optional) character link function specification svyglm model int_order integer specification highest order exposure main effects interaction, required interaction models (\"m2\", \"m3\") covariates list characters reflecting variable names covariates, required covariate models (\"m1\", \"m3\") epochs (optional) data frame exposure epoch labels values home_dir path home directory (required 'save.' = TRUE) verbose (optional) TRUE FALSE indicator user output (default TRUE) save.(optional) TRUE FALSE indicator save output intermediary output locally (default TRUE)","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/fitModel.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Fit outcome model — fitModel","text":"list svyglm model output","code":""},{"path":[]},{"path":"https://istallworthy.github.io/devMSMs/reference/fitModel.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Fit outcome model — fitModel","text":"","code":"f <- createFormulas(exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", tv_confounders = c(\"A.1\", \"A.2\", \"A.3\", \"B.1\", \"B.2\", \"B.3\"), ti_confounders = \"C\", type = \"full\", save.out = FALSE) #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 1 is: #> A.1 ~ C #> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 2 is: #> A.2 ~ A.1 + B.1 + C #> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 3 is: #> A.3 ~ A.1 + A.2 + B.1 + B.2 + C #> #> test <- data.frame(ID = 1:50, A.1 = rnorm(n = 50), A.2 = rnorm(n = 50), A.3 = rnorm(n = 50), B.1 = rnorm(n = 50), B.2 = rnorm(n = 50), B.3 = rnorm(n = 50), C = rnorm(n = 50), D.3 = rnorm(n = 50)) test[, c(\"A.1\", \"A.2\", \"A.3\")] <- lapply(test[, c(\"A.1\", \"A.2\", \"A.3\")], as.numeric) w <- createWeights(data = test, exposure = \"A\", outcome = \"D.3\", formulas = f, save.out = FALSE) #> For the cbps weighting method, the median weight value is 0.94 (SD = 0.28; range = 0.55-2). #> m <- fitModel(data = test, weights = w, exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", model = \"m0\", save.out = FALSE) #> Please inspect the following likelihood ratio test to determine if the exposures collective predict significant variation in the outcome compared to a model without exposure terms. #> #> We strongly suggest not conducting history comparisons if the likelihood ratio test is non-significant. #> #> Working (Rao-Scott+F) LRT for A.1 A.2 A.3 #> in svyglm(formula = as.formula(f), design = s, family = fam) #> Working 2logLR = 3.967973 p= 0.27411 #> (scale factors: 1.3 1.1 0.62 ); denominator df= 46 #> #> The marginal model, m0, is summarized below: m <- fitModel(data = test, weights = w, exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", model = \"m0\", family = gaussian, link = \"identity\", epochs = data.frame(epochs = c(\"Infancy\", \"Toddlerhood\"), values = I(list(c(1, 2), c(3)))), save.out = FALSE) #> Please inspect the following likelihood ratio test to determine if the exposures collective predict significant variation in the outcome compared to a model without exposure terms. #> #> We strongly suggest not conducting history comparisons if the likelihood ratio test is non-significant. #> #> Working (Rao-Scott+F) LRT for A.Infancy A.Toddlerhood #> in svyglm(formula = as.formula(f), design = s, family = fam) #> Working 2logLR = 3.737787 p= 0.16375 #> (scale factors: 1.2 0.82 ); denominator df= 47 #> #> The marginal model, m0, is summarized below: m <- fitModel(data = test, weights = w, exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", model = \"m1\", covariates = \"C\", save.out = FALSE) #> Please inspect the following likelihood ratio test to determine if the exposures collective predict significant variation in the outcome compared to a model without exposure terms. #> #> We strongly suggest not conducting history comparisons if the likelihood ratio test is non-significant. #> #> Working (Rao-Scott+F) LRT for A.1 A.2 A.3 #> in svyglm(formula = as.formula(f), design = s, family = fam) #> Working 2logLR = 3.881045 p= 0.28405 #> (scale factors: 1.3 1.1 0.64 ); denominator df= 45 #> #> The marginal model, m1, is summarized below: m <- fitModel(data = test, weights = w, exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", model = \"m2\", int_order = 3, save.out = FALSE) #> Please inspect the following likelihood ratio test to determine if the exposures collective predict significant variation in the outcome compared to a model without exposure terms. #> #> We strongly suggest not conducting history comparisons if the likelihood ratio test is non-significant. #> #> Working (Rao-Scott+F) LRT for A.1 A.2 A.3 A.1:A.2 A.1:A.3 A.2:A.3 A.1:A.2:A.3 #> in svyglm(formula = as.formula(f), design = s, family = fam) #> Working 2logLR = 20.56726 p= 0.02578 #> (scale factors: 2.1 1.9 1 0.89 0.5 0.38 0.19 ); denominator df= 42 #> #> The marginal model, m2, is summarized below: m <- fitModel(data = test, weights = w, exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", model = \"m3\", int_order = 3, covariates = \"C\", save.out = FALSE) #> Please inspect the following likelihood ratio test to determine if the exposures collective predict significant variation in the outcome compared to a model without exposure terms. #> #> We strongly suggest not conducting history comparisons if the likelihood ratio test is non-significant. #> #> Working (Rao-Scott+F) LRT for A.1 A.2 A.3 A.1:A.2 A.1:A.3 A.2:A.3 A.1:A.2:A.3 #> in svyglm(formula = as.formula(f), design = s, family = fam) #> Working 2logLR = 19.69042 p= 0.031433 #> (scale factors: 2.2 1.9 0.98 0.83 0.51 0.4 0.21 ); denominator df= 41 #> #> The marginal model, m3, is summarized below:"},{"path":"https://istallworthy.github.io/devMSMs/reference/getModel.html","id":null,"dir":"Reference","previous_headings":"","what":"Fits outcome model — getModel","title":"Fits outcome model — getModel","text":"Fits outcome model","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/getModel.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Fits outcome model — getModel","text":"","code":"getModel( d, exposure, exposure_time_pts, outcome, exp_epochs, int_order, model, fam, covariates, verbose, epochs = NULL )"},{"path":"https://istallworthy.github.io/devMSMs/reference/getModel.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Fits outcome model — getModel","text":"d wide data frame exposure name exposure variable exposure_time_pts list integers weights created/assessed correspond time points exposure measured outcome name outcome variable \".timepoint\" suffix exp_epochs list exposure epochs int_order integer specification highest order exposure main effects interaction interaction models model character indicating one following outcome models: \"m0\" (exposure main effects) \"m1\" (exposure main effects & covariates) \"m2\" (exposure main effects & interactions) \"m3\" (exposure main effects, interactions, & covariates) \"covs\" (covariate model) \"int\" (intercept model) fam function specification svyglm model covariates list characters reflecting variable names covariates covariate models verbose TRUE FALSE indicator user output (default TRUE) epochs data frame exposure epoch labels values","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/getModel.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Fits outcome model — getModel","text":"list fitted model(s)","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/getModel.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Fits outcome model — getModel","text":"","code":"test <- data.frame(ID = 1:50, A.1 = rnorm(n = 50), A.2 = rnorm(n = 50), A.3 = rnorm(n = 50), B.1 = rnorm(n = 50), B.2 = rnorm(n = 50), B.3 = rnorm(n = 50), C = rnorm(n = 50), D.3 = rnorm(n = 50)) test[, c(\"A.1\", \"A.2\", \"A.3\")] <- lapply(test[, c(\"A.1\", \"A.2\", \"A.3\")], as.numeric) f <- createFormulas(exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", tv_confounders = c(\"A.1\", \"A.2\", \"A.3\", \"B.1\", \"B.2\", \"B.3\"), ti_confounders = \"C\", type = \"full\", save.out = FALSE) #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 1 is: #> A.1 ~ C #> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 2 is: #> A.2 ~ A.1 + B.1 + C #> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 3 is: #> A.3 ~ A.1 + A.2 + B.1 + B.2 + C #> #> w <- createWeights(data = test, exposure = \"A\", outcome = \"D.3\", formulas = f, save.out = FALSE) #> For the cbps weighting method, the median weight value is 1.29 (SD = 0.62; range = 0.2-3). #> epochs = data.frame(epochs = c(\"Infancy\", \"Toddlerhood\"), values = I(list(c(1, 2), c(3)))) e <- apply(expand.grid(\"A\", as.character(epochs[, 1])), 1, paste, sep = \"\", collapse = \".\") test$weights <- w[[1]]$weights g <- getModel(d = test, exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", epochs = epochs, exp_epochs = e, fam = gaussian, model = \"m0\")"},{"path":"https://istallworthy.github.io/devMSMs/reference/get_comparison_values.html","id":null,"dir":"Reference","previous_headings":"","what":"Finds custom comparison values — get_comparison_values","title":"Finds custom comparison values — get_comparison_values","text":"Finds custom comparison values","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/get_comparison_values.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Finds custom comparison values — get_comparison_values","text":"","code":"get_comparison_values(d, comp_histories)"},{"path":"https://istallworthy.github.io/devMSMs/reference/get_comparison_values.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Finds custom comparison values — get_comparison_values","text":"d data frame high low values per exposure main effect comp_histories comparison sequence(s) \"h\" /\"l\" (e.g., \"h-h-h\")","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/get_comparison_values.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Finds custom comparison values — get_comparison_values","text":"comparison values","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/get_comparison_values.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Finds custom comparison values — get_comparison_values","text":"","code":"d <- data.frame(e = c(\"A.1\", \"A.2\", \"A.3\"), l = c(0, 0, 0), h = c(1, 1, 1)) r <- get_comparison_values(d = d, comp_histories = \"l-l-l\" ) r <- get_comparison_values(d = d, comp_histories = \"h-h-h\" ) r <- get_comparison_values(d = d, comp_histories = c(\"h-h-h\", \"h-h-l\"))"},{"path":"https://istallworthy.github.io/devMSMs/reference/get_reference_values.html","id":null,"dir":"Reference","previous_headings":"","what":"Finds custom reference values — get_reference_values","title":"Finds custom reference values — get_reference_values","text":"Finds custom reference values","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/get_reference_values.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Finds custom reference values — get_reference_values","text":"","code":"get_reference_values(d, reference)"},{"path":"https://istallworthy.github.io/devMSMs/reference/get_reference_values.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Finds custom reference values — get_reference_values","text":"d data frame high low values per exposure main effect reference reference sequence \"h\" /\"l\" (e.g., \"h-h-h\")","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/get_reference_values.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Finds custom reference values — get_reference_values","text":"reference values","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/get_reference_values.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Finds custom reference values — get_reference_values","text":"","code":"d <- data.frame(e = c(\"A.1\", \"A.2\", \"A.3\"), l = c(0, 0, 0), h = c(1, 1, 1)) r <- get_reference_values(d = d, reference = \"l-l-l\" ) r <- get_reference_values(d = d, reference = \"h-h-h\" )"},{"path":"https://istallworthy.github.io/devMSMs/reference/make_love_plot.html","id":null,"dir":"Reference","previous_headings":"","what":"Create love plots showing balancing statistics — make_love_plot","title":"Create love plots showing balancing statistics — make_love_plot","text":"Create love plots showing balancing statistics","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/make_love_plot.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Create love plots showing balancing statistics — make_love_plot","text":"","code":"make_love_plot( balance_stats, exposure, exposure_time_pt, exposure_type, k = 0, form_name, data_type, balance_thresh, weights_method, imp_conf, save.out = FALSE, home_dir = NULL, folder = NULL, verbose = TRUE )"},{"path":"https://istallworthy.github.io/devMSMs/reference/make_love_plot.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Create love plots showing balancing statistics — make_love_plot","text":"balance_stats data frame balance statistics exposure name exposure variable exposure_time_pt exposure time point integer exposure_type character indicating binary continuous exposure type k imputation number form_name formula name data_type single imputed data type balance_thresh one two numbers 0 1 indicating single balancing threshold thresholds less important confounders, respectively weights_method method character string WeightItMSM() balancing method abbreviation imp_conf list variable names reflecting important confounders save.TRUE FALSE indicator save output intermediary output locally home_dir path home directory (required save.= TRUE) folder folder path saving verbose (optional) TRUE FALSE indicator user output (default TRUE)","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/make_love_plot.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Create love plots showing balancing statistics — make_love_plot","text":"none","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/make_love_plot.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Create love plots showing balancing statistics — make_love_plot","text":"","code":"f <- createFormulas(exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", tv_confounders = c(\"A.1\", \"A.2\", \"A.3\", \"B.1\", \"B.2\", \"B.3\"), ti_confounders = \"C\", type = \"full\", save.out = FALSE) #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 1 is: #> A.1 ~ C #> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 2 is: #> A.2 ~ A.1 + B.1 + C #> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 3 is: #> A.3 ~ A.1 + A.2 + B.1 + B.2 + C #> #> test <- data.frame(ID = 1:50, A.1 = rnorm(n = 50), A.2 = rnorm(n = 50), A.3 = rnorm(n = 50), B.1 = rnorm(n = 50), B.2 = rnorm(n = 50), B.3 = rnorm(n = 50), C = rnorm(n = 50), D.3 = rnorm(n = 50)) test[, c(\"A.1\", \"A.2\", \"A.3\")] <- lapply(test[, c(\"A.1\", \"A.2\", \"A.3\")], as.numeric) w <- createWeights(data = test, exposure = \"A\", outcome = \"D.3\", formulas = f, save.out = FALSE) #> For the cbps weighting method, the median weight value is 1.01 (SD = 0.68; range = 0.23-4). #> b <- assessBalance(data = test, exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", type = \"weighted\", weights = w, formulas = f, save.out = FALSE) #> USER ALERT: The following statistics display covariate imbalance at each exposure time point following IPTW weighting, using full formulas. #> #> As shown below, 3 out of 9 (33%) covariates across time points, corresponding to 2 out of 3 domains, remain imbalanced with a remaining median absolute value correlation/std mean difference of 0.11 (range= -0.12-0.11): #> #> Table: Imbalanced covariates using cbps and full formulas #> #> | exp_time| balanced_n| imbalanced_n| n| #> |--------:|----------:|------------:|--:| #> | 1| 1| 0| 1| #> | 2| 3| 0| 3| #> | 3| 2| 3| 5| #> #> #> USER ALERT: For exposure A using the full formulas and cbps : #> The median absolute value relation between exposure and confounder is 0.04 (range = -0.12 -0.11). #> As shown below, the following 3 covariates across time points out of 9 total (33.33%) spanning 2 domains out of 3 (66.67%) are imbalanced with a remaining median absolute value correlation/std mean difference in relation to A of 0.11 (range=-0.12-0.11) : #> #> #> Table: Imbalanced Covariates #> #> | |exposure | exp_time| covar_time|covariate | avg_bal| bal_thresh| balanced| #> |:--|:--------|--------:|----------:|:---------|----------:|----------:|--------:| #> |6 |A | 3| 2|A.2 | 0.1094876| 0.1| 0| #> |7 |A | 3| 1|B.1 | -0.1090365| 0.1| 0| #> |8 |A | 3| 2|B.2 | -0.1163024| 0.1| 0| #> make_love_plot(balance_stats = b, exposure = \"A\", exposure_time_pt = 1, exposure_type = \"continuous\", form_name = \"form_name\", data_type = \"single\", balance_thresh = 0.1, imp_conf = NULL, weights_method = w[[1]]$method, save.out = FALSE, folder = \"prebalance/\") make_love_plot(balance_stats = b, exposure = \"A\", exposure_time_pt = 2, exposure_type = \"continuous\", form_name = \"form_name\", data_type = \"single\", balance_thresh = c(0.05, 0.1), imp_conf = \"A.2\", weights_method = w[[1]]$method, save.out = FALSE, folder = \"weighted/\")"},{"path":"https://istallworthy.github.io/devMSMs/reference/perform_multiple_comparison_correction.html","id":null,"dir":"Reference","previous_headings":"","what":"Conduct multiple comparison correction — perform_multiple_comparison_correction","title":"Conduct multiple comparison correction — perform_multiple_comparison_correction","text":"Conduct multiple comparison correction","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/perform_multiple_comparison_correction.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Conduct multiple comparison correction — perform_multiple_comparison_correction","text":"","code":"perform_multiple_comparison_correction( comps, reference, comp_histories, method, verbose = TRUE )"},{"path":"https://istallworthy.github.io/devMSMs/reference/perform_multiple_comparison_correction.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Conduct multiple comparison correction — perform_multiple_comparison_correction","text":"comps table reference reference sequence \"h\" /\"l\" (e.g., \"h-h-h\") comp_histories comparison sequence(s) \"h\" /\"l\" (e.g., \"h-h-h\") method character abbreviation multiple comparison correction method verbose (optional) TRUE FALSE indicator user output (default TRUE)","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/perform_multiple_comparison_correction.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Conduct multiple comparison correction — perform_multiple_comparison_correction","text":"comparison table corrected p-values","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_imp_list.rda.html","id":null,"dir":"Reference","previous_headings":"","what":"Wide data imputed and read in (continuous exposure) — sim_data_imp_list.rda","title":"Wide data imputed and read in (continuous exposure) — sim_data_imp_list.rda","text":"data simulated based data Family Life Project (FLP), longitudinal study following 1,292 families representative two geographic areas (three counties North Carolina three counties Pennsylvania) high rural child poverty (Vernon-Feagans et al., 2013; Burchinal et al., 2008). MAR missingness added using missMethods package imputing mice package reading imputed dataset. data contain economic strain (ESEATA1) continuously distributed variable.","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_imp_list.rda.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Wide data imputed and read in (continuous exposure) — sim_data_imp_list.rda","text":"","code":"sim_data_imp_list"},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_imp_list.rda.html","id":"format","dir":"Reference","previous_headings":"","what":"Format","title":"Wide data imputed and read in (continuous exposure) — sim_data_imp_list.rda","text":"list data frames","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_long_miss.rda.html","id":null,"dir":"Reference","previous_headings":"","what":"Long data with missingness (continuous exposure) — sim_data_long_miss.rda","title":"Long data with missingness (continuous exposure) — sim_data_long_miss.rda","text":"data simulated based data Family Life Project (FLP), longitudinal study following 1,292 families representative two geographic areas (three counties North Carolina three counties Pennsylvania) high rural child poverty (Vernon-Feagans et al., 2013; Burchinal et al., 2008). MAR missingness added using missMethods package. data contain economic strain (ESEATA1) continuously distributed variable.","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_long_miss.rda.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Long data with missingness (continuous exposure) — sim_data_long_miss.rda","text":"","code":"sim_data_long_miss"},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_long_miss.rda.html","id":"format","dir":"Reference","previous_headings":"","what":"Format","title":"Long data with missingness (continuous exposure) — sim_data_long_miss.rda","text":"data frame","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_long_miss_bin.rda.html","id":null,"dir":"Reference","previous_headings":"","what":"Long data with missingness (binary exposure) — sim_data_long_miss_bin.rda","title":"Long data with missingness (binary exposure) — sim_data_long_miss_bin.rda","text":"data simulated based data Family Life Project (FLP), longitudinal study following 1,292 families representative two geographic areas (three counties North Carolina three counties Pennsylvania) high rural child poverty (Vernon-Feagans et al., 2013; Burchinal et al., 2008). MAR missingness added using missMethods package. data contain economic strain (ESEATA1) binary variable.","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_long_miss_bin.rda.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Long data with missingness (binary exposure) — sim_data_long_miss_bin.rda","text":"","code":"sim_data_long_miss_bin"},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_long_miss_bin.rda.html","id":"format","dir":"Reference","previous_headings":"","what":"Format","title":"Long data with missingness (binary exposure) — sim_data_long_miss_bin.rda","text":"data frame","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_mice.rda.html","id":null,"dir":"Reference","previous_headings":"","what":"Wide data imputed with mice (continuous exposure) — sim_data_mice.rda","title":"Wide data imputed with mice (continuous exposure) — sim_data_mice.rda","text":"data simulated based data Family Life Project (FLP), longitudinal study following 1,292 families representative two geographic areas (three counties North Carolina three counties Pennsylvania) high rural child poverty (Vernon-Feagans et al., 2013; Burchinal et al., 2008). MAR missingness added using missMethods package imputing mice package. data contain economic strain (ESEATA1) continuously distributed variable.","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_mice.rda.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Wide data imputed with mice (continuous exposure) — sim_data_mice.rda","text":"","code":"sim_data_mice"},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_mice.rda.html","id":"format","dir":"Reference","previous_headings":"","what":"Format","title":"Wide data imputed with mice (continuous exposure) — sim_data_mice.rda","text":"mice object","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_wide.rda.html","id":null,"dir":"Reference","previous_headings":"","what":"Wide complete data (continuous exposure)\nThese data are simulated based on data from the Family Life Project (FLP), a longitudinal study following 1,292 families\nrepresentative of two geographic areas (three counties in North Carolina and three counties in Pennsylvania) with high rural\nchild poverty (Vernon-Feagans et al., 2013; Burchinal et al., 2008). These data contain economic strain (ESEATA1) as a continuously\ndistributed variable. — sim_data_wide.rda","title":"Wide complete data (continuous exposure)\nThese data are simulated based on data from the Family Life Project (FLP), a longitudinal study following 1,292 families\nrepresentative of two geographic areas (three counties in North Carolina and three counties in Pennsylvania) with high rural\nchild poverty (Vernon-Feagans et al., 2013; Burchinal et al., 2008). These data contain economic strain (ESEATA1) as a continuously\ndistributed variable. — sim_data_wide.rda","text":"Wide complete data (continuous exposure) data simulated based data Family Life Project (FLP), longitudinal study following 1,292 families representative two geographic areas (three counties North Carolina three counties Pennsylvania) high rural child poverty (Vernon-Feagans et al., 2013; Burchinal et al., 2008). data contain economic strain (ESEATA1) continuously distributed variable.","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_wide.rda.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Wide complete data (continuous exposure)\nThese data are simulated based on data from the Family Life Project (FLP), a longitudinal study following 1,292 families\nrepresentative of two geographic areas (three counties in North Carolina and three counties in Pennsylvania) with high rural\nchild poverty (Vernon-Feagans et al., 2013; Burchinal et al., 2008). These data contain economic strain (ESEATA1) as a continuously\ndistributed variable. — sim_data_wide.rda","text":"","code":"sim_data_wide"},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_wide.rda.html","id":"format","dir":"Reference","previous_headings":"","what":"Format","title":"Wide complete data (continuous exposure)\nThese data are simulated based on data from the Family Life Project (FLP), a longitudinal study following 1,292 families\nrepresentative of two geographic areas (three counties in North Carolina and three counties in Pennsylvania) with high rural\nchild poverty (Vernon-Feagans et al., 2013; Burchinal et al., 2008). These data contain economic strain (ESEATA1) as a continuously\ndistributed variable. — sim_data_wide.rda","text":"wide data frame 1,292 observations 69 measured variables. \"ESETA1\" continuous exposure economic strain \"StrDif_Tot.58\" continuous outcome behavioral problems \"InRatioCor\" income--needs ratio \"PmEd2\" parent's education level \"state\" family's state residence \"TcBlac2\" family's race (1 = x, 0 = y) \"bioDadInHH2\" whether biological father lives family (insert coding) \"HomeOwnd\" indicator whether family owns home (insert coding) \"KFASTScr\" \"PmBlac2\" primary careigver race (insert coding) \"SmokTotl\" \"caregiv_health\" \"gov_assist\" \"ALI_LE\" \"B18Raw\" \"CORTB\" \"EARS_TJo\" \"fscore\" \"HOMEETA1\" \"IBRAttn\" \"LESMnNeg\" \"MDI\" \"RHAsSO\" \"SAAmylase\" \"WndNbrhood\"","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_wide.rda.html","id":"references","dir":"Reference","previous_headings":"","what":"References","title":"Wide complete data (continuous exposure)\nThese data are simulated based on data from the Family Life Project (FLP), a longitudinal study following 1,292 families\nrepresentative of two geographic areas (three counties in North Carolina and three counties in Pennsylvania) with high rural\nchild poverty (Vernon-Feagans et al., 2013; Burchinal et al., 2008). These data contain economic strain (ESEATA1) as a continuously\ndistributed variable. — sim_data_wide.rda","text":"Vernon-Feagans, L., Cox, M., Willoughby, M., Burchinal, M., Garrett-Peters, P., Mills-Koonce, R., Garrett-Peiers, P., Conger, R. D., & Bauer, P. J. (2013). Family Life Project: Epidemiological Developmental Study Young Children Living Poor Rural Communities. Monographs Society Research Child Development, 78(5), –150. Burchinal, M., Howes, C., Pianta, R., Bryant, D., Early, D., Clifford, R., & Barbarin, O. (2008). Predicting Child Outcomes End Kindergarten Quality Pre-Kindergarten Teacher–Child Interactions Instruction. Applied Developmental Science, 12(3), 140–153. https://doi.org/10.1080/10888690802199418","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_wide_bin.rda.html","id":null,"dir":"Reference","previous_headings":"","what":"Wide complete data (binary exposure)\nThese data are simulated based on data from the Family Life Project (FLP), a longitudinal study following 1,292 families\nrepresentative of two geographic areas (three counties in North Carolina and three counties in Pennsylvania) with high rural\nchild poverty (Vernon-Feagans et al., 2013; Burchinal et al., 2008). These data contain economic strain (ESEATA1) as a binary variable. — sim_data_wide_bin.rda","title":"Wide complete data (binary exposure)\nThese data are simulated based on data from the Family Life Project (FLP), a longitudinal study following 1,292 families\nrepresentative of two geographic areas (three counties in North Carolina and three counties in Pennsylvania) with high rural\nchild poverty (Vernon-Feagans et al., 2013; Burchinal et al., 2008). These data contain economic strain (ESEATA1) as a binary variable. — sim_data_wide_bin.rda","text":"Wide complete data (binary exposure) data simulated based data Family Life Project (FLP), longitudinal study following 1,292 families representative two geographic areas (three counties North Carolina three counties Pennsylvania) high rural child poverty (Vernon-Feagans et al., 2013; Burchinal et al., 2008). data contain economic strain (ESEATA1) binary variable.","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_wide_bin.rda.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Wide complete data (binary exposure)\nThese data are simulated based on data from the Family Life Project (FLP), a longitudinal study following 1,292 families\nrepresentative of two geographic areas (three counties in North Carolina and three counties in Pennsylvania) with high rural\nchild poverty (Vernon-Feagans et al., 2013; Burchinal et al., 2008). These data contain economic strain (ESEATA1) as a binary variable. — sim_data_wide_bin.rda","text":"","code":"sim_data_wide_bin"},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_wide_bin.rda.html","id":"format","dir":"Reference","previous_headings":"","what":"Format","title":"Wide complete data (binary exposure)\nThese data are simulated based on data from the Family Life Project (FLP), a longitudinal study following 1,292 families\nrepresentative of two geographic areas (three counties in North Carolina and three counties in Pennsylvania) with high rural\nchild poverty (Vernon-Feagans et al., 2013; Burchinal et al., 2008). These data contain economic strain (ESEATA1) as a binary variable. — sim_data_wide_bin.rda","text":"data frame","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_wide_miss.rda.html","id":null,"dir":"Reference","previous_headings":"","what":"Wide data with missingness (continuous exposure) — sim_data_wide_miss.rda","title":"Wide data with missingness (continuous exposure) — sim_data_wide_miss.rda","text":"data simulated based data Family Life Project (FLP), longitudinal study following 1,292 families representative two geographic areas (three counties North Carolina three counties Pennsylvania) high rural child poverty (Vernon-Feagans et al., 2013; Burchinal et al., 2008). MAR missingness added using missMethods package. data contain economic strain (ESEATA1) continuously distributed variable.","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_wide_miss.rda.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Wide data with missingness (continuous exposure) — sim_data_wide_miss.rda","text":"","code":"sim_data_wide_miss"},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_wide_miss.rda.html","id":"format","dir":"Reference","previous_headings":"","what":"Format","title":"Wide data with missingness (continuous exposure) — sim_data_wide_miss.rda","text":"data frame","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_wide_miss_bin.rda.html","id":null,"dir":"Reference","previous_headings":"","what":"Wide data with missingness (binary exposure) — sim_data_wide_miss_bin.rda","title":"Wide data with missingness (binary exposure) — sim_data_wide_miss_bin.rda","text":"data simulated based data Family Life Project (FLP), longitudinal study following 1,292 families representative two geographic areas (three counties North Carolina three counties Pennsylvania) high rural child poverty (Vernon-Feagans et al., 2013; Burchinal et al., 2008). MAR missingness added using missMethods package. data contain economic strain (ESEATA1) binary variable.","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_wide_miss_bin.rda.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Wide data with missingness (binary exposure) — sim_data_wide_miss_bin.rda","text":"","code":"sim_data_wide_miss_bin"},{"path":"https://istallworthy.github.io/devMSMs/reference/sim_data_wide_miss_bin.rda.html","id":"format","dir":"Reference","previous_headings":"","what":"Format","title":"Wide data with missingness (binary exposure) — sim_data_wide_miss_bin.rda","text":"data frame","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/trimWeights.html","id":null,"dir":"Reference","previous_headings":"","what":"Trim IPTW balancing weights — trimWeights","title":"Trim IPTW balancing weights — trimWeights","text":"Trims IPTW balancing weights heavy right tails populating weight values given quantile weight value quantile.","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/trimWeights.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Trim IPTW balancing weights — trimWeights","text":"","code":"trimWeights( exposure, outcome, weights, quantile = NA, home_dir = NULL, verbose = TRUE, save.out = TRUE )"},{"path":"https://istallworthy.github.io/devMSMs/reference/trimWeights.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Trim IPTW balancing weights — trimWeights","text":"exposure name exposure variable outcome name outcome variable \".timepoint\" suffix weights list IPTW weights output createWeights() quantile (optional) numeric value 0 1 quantile value trim weights (default 0.95) home_dir path home directory (required save.= TRUE) verbose (optional) TRUE FALSE indicator user output (default TRUE) save.(optional) TRUE FALSE indicator save output intermediary output locally (default TRUE)","code":""},{"path":"https://istallworthy.github.io/devMSMs/reference/trimWeights.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Trim IPTW balancing weights — trimWeights","text":"list model output trimmed weights","code":""},{"path":[]},{"path":"https://istallworthy.github.io/devMSMs/reference/trimWeights.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Trim IPTW balancing weights — trimWeights","text":"","code":"f <- createFormulas(exposure = \"A\", exposure_time_pts = c(1, 2, 3), outcome = \"D.3\", tv_confounders = c(\"A.1\", \"A.2\", \"A.3\", \"B.1\", \"B.2\", \"B.3\"), ti_confounders = \"C\", type = \"full\", save.out = FALSE) #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 1 is: #> A.1 ~ C #> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 2 is: #> A.2 ~ A.1 + B.1 + C #> #> #> USER ALERT: Please manually inspect the full balancing formula below: #> The full formula for A - D.3 at A time point 3 is: #> A.3 ~ A.1 + A.2 + B.1 + B.2 + C #> #> test <- data.frame(ID = 1:50, A.1 = rnorm(n = 50), A.2 = rnorm(n = 50), A.3 = rnorm(n = 50), B.1 = rnorm(n = 50), B.2 = rnorm(n = 50), B.3 = rnorm(n = 50), C = rnorm(n = 50), D.3 = rnorm(n = 50)) w <- createWeights(data = test, exposure = \"A\", outcome = \"D.3\", formulas = f, save.out = FALSE) #> For the cbps weighting method, the median weight value is 0.99 (SD = 0.43; range = 0.46-3). #> t <- trimWeights(exposure = \"A\", outcome = \"D.3\", weights = w, save.out = FALSE) #> Trimming weights to 95%. #> #> For the A-D.3 relation, following trimming at the 0.95 quantile, the median weight value is 0.99 (SD= 0.36; range= 0.46-2). #> t <- trimWeights(exposure = \"A\", outcome = \"D.3\", weights = w, quantile = 0.75, save.out = FALSE) #> Trimming weights to 75%. #> #> For the A-D.3 relation, following trimming at the 0.75 quantile, the median weight value is 0.99 (SD= 0.22; range= 0.46-1). #>"}]