Skip to contents

Functions used in definition of smooth terms within model formulae. The functions do not evaluate a (spline) smooth - they exist purely to help set up mvgam models using spline based smooths.

Usage

ti(...)

te(...)

Arguments

...

Arguments passed to mgcv::ti or mgcv::te

Details

The functions defined here are just simple wrappers of the respective functions of the mgcv package. When using them, please cite the appropriate references obtained via citation("mgcv").

See also

Examples

# \donttest{
# Simulate some data
dat <- mgcv::gamSim(1, n = 200, scale = 2)
#> Gu & Wahba 4 term additive model

# Fit univariate smooths for all predictors
fit1 <- mvgam(y ~ s(x0) + s(x1) + s(x2) + s(x3),
              data = dat, chains = 2, family = gaussian())
#> Compiling Stan program using cmdstanr
#> 
#> Start sampling
#> Running MCMC with 2 parallel chains...
#> 
#> Chain 1 Iteration:   1 / 1000 [  0%]  (Warmup) 
#> Chain 2 Iteration:   1 / 1000 [  0%]  (Warmup) 
#> Chain 2 Iteration: 100 / 1000 [ 10%]  (Warmup) 
#> Chain 1 Iteration: 100 / 1000 [ 10%]  (Warmup) 
#> Chain 1 Iteration: 200 / 1000 [ 20%]  (Warmup) 
#> Chain 2 Iteration: 200 / 1000 [ 20%]  (Warmup) 
#> Chain 1 Iteration: 300 / 1000 [ 30%]  (Warmup) 
#> Chain 2 Iteration: 300 / 1000 [ 30%]  (Warmup) 
#> Chain 1 Iteration: 400 / 1000 [ 40%]  (Warmup) 
#> Chain 2 Iteration: 400 / 1000 [ 40%]  (Warmup) 
#> Chain 1 Iteration: 500 / 1000 [ 50%]  (Warmup) 
#> Chain 1 Iteration: 501 / 1000 [ 50%]  (Sampling) 
#> Chain 2 Iteration: 500 / 1000 [ 50%]  (Warmup) 
#> Chain 2 Iteration: 501 / 1000 [ 50%]  (Sampling) 
#> Chain 1 Iteration: 600 / 1000 [ 60%]  (Sampling) 
#> Chain 2 Iteration: 600 / 1000 [ 60%]  (Sampling) 
#> Chain 1 Iteration: 700 / 1000 [ 70%]  (Sampling) 
#> Chain 2 Iteration: 700 / 1000 [ 70%]  (Sampling) 
#> Chain 1 Iteration: 800 / 1000 [ 80%]  (Sampling) 
#> Chain 2 Iteration: 800 / 1000 [ 80%]  (Sampling) 
#> Chain 2 Iteration: 900 / 1000 [ 90%]  (Sampling) 
#> Chain 1 Iteration: 900 / 1000 [ 90%]  (Sampling) 
#> Chain 2 Iteration: 1000 / 1000 [100%]  (Sampling) 
#> Chain 2 finished in 5.5 seconds.
#> Chain 1 Iteration: 1000 / 1000 [100%]  (Sampling) 
#> Chain 1 finished in 5.8 seconds.
#> 
#> Both chains finished successfully.
#> Mean chain execution time: 5.7 seconds.
#> Total execution time: 5.9 seconds.
#> 
summary(fit1)
#> GAM formula:
#> y ~ s(x0) + s(x1) + s(x2) + s(x3)
#> <environment: 0x0000013426d46318>
#> 
#> Family:
#> gaussian
#> 
#> Link function:
#> identity
#> 
#> Trend model:
#> None
#> 
#> N series:
#> 1 
#> 
#> N timepoints:
#> 200 
#> 
#> Status:
#> Fitted using Stan 
#> 2 chains, each with iter = 1000; warmup = 500; thin = 1 
#> Total post-warmup draws = 1000
#> 
#> 
#> Observation error parameter estimates:
#>              2.5% 50% 97.5% Rhat n_eff
#> sigma_obs[1]  1.9 2.1   2.4    1   957
#> 
#> GAM coefficient (beta) estimates:
#>               2.5%     50%  97.5% Rhat n_eff
#> (Intercept)  7.500  7.8000  8.100 1.00  1593
#> s(x0).1     -0.330  0.1200  0.640 1.00   588
#> s(x0).2     -1.300  0.3400  2.500 1.00   282
#> s(x0).3     -0.380  0.0820  0.590 1.00   376
#> s(x0).4     -1.500 -0.3200  0.520 1.01   245
#> s(x0).5     -0.240  0.0760  0.430 1.00   387
#> s(x0).6     -1.300 -0.3400  0.430 1.00   260
#> s(x0).7     -0.150  0.0058  0.170 1.00   498
#> s(x0).8     -0.330  1.7000  5.000 1.01   206
#> s(x0).9     -0.430 -0.0044  0.430 1.00   581
#> s(x1).1     -0.300 -0.0095  0.190 1.00   647
#> s(x1).2     -0.410 -0.0100  0.320 1.00   592
#> s(x1).3     -0.077  0.0088  0.160 1.00   445
#> s(x1).4     -0.300 -0.0140  0.150 1.00   457
#> s(x1).5     -0.045  0.0042  0.093 1.00   507
#> s(x1).6     -0.120  0.0160  0.250 1.00   452
#> s(x1).7     -0.058  0.0076  0.120 1.00   466
#> s(x1).8     -0.980 -0.0840  0.340 1.00   328
#> s(x1).9      1.400  1.9000  2.200 1.00   818
#> s(x2).1      3.500  5.2000  6.900 1.00   321
#> s(x2).2      2.600 11.0000 19.000 1.00   223
#> s(x2).3     -4.600 -3.0000 -1.200 1.00   375
#> s(x2).4     -6.400  0.2100  6.000 1.00   242
#> s(x2).5     -1.500  0.8000  3.100 1.00   334
#> s(x2).6     -7.900 -2.0000  4.500 1.00   258
#> s(x2).7      0.650  2.3000  4.100 1.00   798
#> s(x2).8     -1.500 12.0000 24.000 1.00   228
#> s(x2).9     -0.520  0.0330  0.900 1.00   292
#> s(x3).1     -0.140  0.0150  0.230 1.01   468
#> s(x3).2     -0.330  0.0012  0.270 1.01   194
#> s(x3).3     -0.077 -0.0027  0.062 1.00   433
#> s(x3).4     -0.240 -0.0066  0.150 1.01   153
#> s(x3).5     -0.091 -0.0049  0.067 1.01   244
#> s(x3).6     -0.120  0.0077  0.190 1.00   147
#> s(x3).7     -0.048  0.0051  0.086 1.01   195
#> s(x3).8     -0.710 -0.0500  0.350 1.00   119
#> s(x3).9     -0.320 -0.0660  0.200 1.00   996
#> 
#> Approximate significance of GAM smooths:
#>        edf Ref.df Chi.sq p-value    
#> s(x0) 3.76      9   39.6    0.07 .  
#> s(x1) 2.33      9  653.7  <2e-16 ***
#> s(x2) 7.71      9 1213.2  <2e-16 ***
#> s(x3) 1.16      9    1.7    1.00    
#> ---
#> Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
#> 
#> Stan MCMC diagnostics:
#> n_eff / iter looks reasonable for all parameters
#> Rhat looks reasonable for all parameters
#> 0 of 1000 iterations ended with a divergence (0%)
#> 0 of 1000 iterations saturated the maximum tree depth of 12 (0%)
#> E-FMI indicated no pathological behavior
#> 
#> Samples were drawn using NUTS(diag_e) at Thu Aug 29 11:16:04 AM 2024.
#> For each parameter, n_eff is a crude measure of effective sample size,
#> and Rhat is the potential scale reduction factor on split MCMC chains
#> (at convergence, Rhat = 1)
conditional_effects(fit1)





# Fit a more complicated smooth model
fit2 <- mvgam(y ~ te(x0, x1) + s(x2, by = x3),
              data = dat, chains = 2, family = gaussian())
#> Compiling Stan program using cmdstanr
#> 
#> Start sampling
#> Running MCMC with 2 parallel chains...
#> 
#> Chain 1 Iteration:   1 / 1000 [  0%]  (Warmup) 
#> Chain 2 Iteration:   1 / 1000 [  0%]  (Warmup) 
#> Chain 1 Iteration: 100 / 1000 [ 10%]  (Warmup) 
#> Chain 2 Iteration: 100 / 1000 [ 10%]  (Warmup) 
#> Chain 1 Iteration: 200 / 1000 [ 20%]  (Warmup) 
#> Chain 2 Iteration: 200 / 1000 [ 20%]  (Warmup) 
#> Chain 1 Iteration: 300 / 1000 [ 30%]  (Warmup) 
#> Chain 2 Iteration: 300 / 1000 [ 30%]  (Warmup) 
#> Chain 1 Iteration: 400 / 1000 [ 40%]  (Warmup) 
#> Chain 2 Iteration: 400 / 1000 [ 40%]  (Warmup) 
#> Chain 1 Iteration: 500 / 1000 [ 50%]  (Warmup) 
#> Chain 1 Iteration: 501 / 1000 [ 50%]  (Sampling) 
#> Chain 2 Iteration: 500 / 1000 [ 50%]  (Warmup) 
#> Chain 2 Iteration: 501 / 1000 [ 50%]  (Sampling) 
#> Chain 1 Iteration: 600 / 1000 [ 60%]  (Sampling) 
#> Chain 2 Iteration: 600 / 1000 [ 60%]  (Sampling) 
#> Chain 1 Iteration: 700 / 1000 [ 70%]  (Sampling) 
#> Chain 2 Iteration: 700 / 1000 [ 70%]  (Sampling) 
#> Chain 1 Iteration: 800 / 1000 [ 80%]  (Sampling) 
#> Chain 2 Iteration: 800 / 1000 [ 80%]  (Sampling) 
#> Chain 1 Iteration: 900 / 1000 [ 90%]  (Sampling) 
#> Chain 2 Iteration: 900 / 1000 [ 90%]  (Sampling) 
#> Chain 1 Iteration: 1000 / 1000 [100%]  (Sampling) 
#> Chain 2 Iteration: 1000 / 1000 [100%]  (Sampling) 
#> Chain 1 finished in 3.9 seconds.
#> Chain 2 finished in 3.9 seconds.
#> 
#> Both chains finished successfully.
#> Mean chain execution time: 3.9 seconds.
#> Total execution time: 4.0 seconds.
#> 
summary(fit2)
#> GAM formula:
#> y ~ te(x0, x1) + s(x2, by = x3)
#> <environment: 0x0000013426d46318>
#> 
#> Family:
#> gaussian
#> 
#> Link function:
#> identity
#> 
#> Trend model:
#> None
#> 
#> N series:
#> 1 
#> 
#> N timepoints:
#> 200 
#> 
#> Status:
#> Fitted using Stan 
#> 2 chains, each with iter = 1000; warmup = 500; thin = 1 
#> Total post-warmup draws = 1000
#> 
#> 
#> Observation error parameter estimates:
#>              2.5% 50% 97.5% Rhat n_eff
#> sigma_obs[1]  2.4 2.7     3    1  1067
#> 
#> GAM coefficient (beta) estimates:
#>               2.5%    50%   97.5% Rhat n_eff
#> (Intercept)   7.50  8.200  8.9000    1   745
#> te(x0,x1).1  -3.20 -1.900 -0.7400    1   314
#> te(x0,x1).2  -2.10 -0.620  0.5900    1   406
#> te(x0,x1).3  -0.38  1.300  2.8000    1   582
#> te(x0,x1).4   0.63  2.700  4.7000    1   727
#> te(x0,x1).5  -3.50 -2.300 -1.0000    1   701
#> te(x0,x1).6  -1.20 -0.320  0.7300    1   404
#> te(x0,x1).7   0.19  1.100  2.1000    1   403
#> te(x0,x1).8   2.40  3.500  4.8000    1   393
#> te(x0,x1).9   3.10  4.400  5.6000    1   677
#> te(x0,x1).10 -3.70 -2.400 -1.0000    1   464
#> te(x0,x1).11 -1.20 -0.120  0.9900    1   286
#> te(x0,x1).12  0.45  1.500  2.5000    1   286
#> te(x0,x1).13  2.50  3.600  4.8000    1   316
#> te(x0,x1).14  3.50  4.700  6.2000    1   489
#> te(x0,x1).15 -4.40 -3.100 -1.9000    1   694
#> te(x0,x1).16 -1.80 -0.750  0.3400    1   475
#> te(x0,x1).17 -0.14  0.780  1.7000    1   497
#> te(x0,x1).18  2.30  3.400  4.6000    1   444
#> te(x0,x1).19  3.00  4.300  5.8000    1   519
#> te(x0,x1).20 -7.30 -4.800 -2.3000    1   715
#> te(x0,x1).21 -4.20 -2.700 -1.2000    1   491
#> te(x0,x1).22 -2.70 -1.200  0.0057    1   415
#> te(x0,x1).23 -0.58  1.100  2.7000    1   425
#> te(x0,x1).24 -0.21  2.400  4.7000    1   481
#> s(x2):x3.1   -1.40  2.100  5.3000    1   515
#> s(x2):x3.2    3.20  5.500  7.7000    1  1238
#> s(x2):x3.3   -0.10  3.400  6.7000    1   690
#> s(x2):x3.4   -4.10 -1.100  1.4000    1   690
#> s(x2):x3.5   -9.50 -5.300 -1.9000    1   621
#> s(x2):x3.6   -0.32  2.300  5.4000    1   971
#> s(x2):x3.7   -0.58  2.500  6.0000    1   882
#> s(x2):x3.8   -1.10  1.400  4.4000    1   887
#> s(x2):x3.9   -0.71  0.011  0.7800    1   531
#> s(x2):x3.10  -0.57  0.032  0.7900    1   482
#> 
#> Approximate significance of GAM smooths:
#>             edf Ref.df Chi.sq p-value    
#> te(x0,x1) 11.79     24    913  <2e-16 ***
#> s(x2):x3   7.74     10    640  <2e-16 ***
#> ---
#> Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
#> 
#> Stan MCMC diagnostics:
#> n_eff / iter looks reasonable for all parameters
#> Rhat looks reasonable for all parameters
#> 0 of 1000 iterations ended with a divergence (0%)
#> 0 of 1000 iterations saturated the maximum tree depth of 12 (0%)
#> E-FMI indicated no pathological behavior
#> 
#> Samples were drawn using NUTS(diag_e) at Thu Aug 29 11:16:53 AM 2024.
#> For each parameter, n_eff is a crude measure of effective sample size,
#> and Rhat is the potential scale reduction factor on split MCMC chains
#> (at convergence, Rhat = 1)
conditional_effects(fit2)


# }