Skip to content

Commit

Permalink
add matt's ggplot version of plot_mvgam_series; add patchwork to impo…
Browse files Browse the repository at this point in the history
…rts and update docs
nicholasjclark committed Nov 22, 2024
1 parent 96ddd32 commit ac7d749
Showing 51 changed files with 337 additions and 449 deletions.
7 changes: 5 additions & 2 deletions DESCRIPTION
Original file line number Diff line number Diff line change
@@ -5,7 +5,9 @@ Date: 2024-09-05
Authors@R: c(person("Nicholas J", "Clark", , "nicholas.j.clark1214@gmail.com",
role = c("aut", "cre"), comment = c(ORCID = "0000-0001-7131-3301")),
person("Scott", "Pease", role = c("ctb"),
comment = c("broom enhancements")))
comment = c("broom enhancements")),
person("Matthijs", "Hollanders", role = c("ctb"),
comment = c("ggplot visualizations")))
Description: Fit Bayesian Dynamic Generalized Additive Models to multivariate observations. Users can build nonlinear State-Space models that can incorporate semiparametric effects in observation and process components, using a wide range of observation families. Estimation is performed using Markov Chain Monte Carlo with Hamiltonian Monte Carlo in the software 'Stan'. References: Clark & Wells (2022) <doi:10.1111/2041-210X.13974>.
URL: https://github.com/nicholasjclark/mvgam, https://nicholasjclark.github.io/mvgam/
BugReports: https://github.com/nicholasjclark/mvgam/issues
@@ -31,7 +33,8 @@ Imports:
magrittr,
rlang,
generics,
tibble (>= 3.0.0)
tibble (>= 3.0.0),
patchwork (>= 1.2.0)
Encoding: UTF-8
LazyData: true
Roxygen: list(markdown = TRUE)
1 change: 1 addition & 0 deletions NAMESPACE
Original file line number Diff line number Diff line change
@@ -306,6 +306,7 @@ importFrom(stats,formula)
importFrom(stats,frequency)
importFrom(stats,gaussian)
importFrom(stats,is.ts)
importFrom(stats,lag)
importFrom(stats,lm)
importFrom(stats,logLik)
importFrom(stats,mad)
1 change: 1 addition & 0 deletions NEWS.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
# mvgam 1.1.4 (development version; not yet on CRAN)
## New functionalities
* Improved various plotting functions by returning `ggplot` objects in place of base plots (thanks to @mhollanders #38)
* Added `augment()` function to add residuals and fitted values to an mvgam object's observed data (thanks to @swpease #83)
* Added support for approximate `gp()` effects with more than one covariate and with different kernel functions (#79)
* Added function `jsdgam()` to estimate Joint Species Distribution Models in which both the latent factors and the observation model components can include any of mvgam's complex linear predictor effects. Also added a function `residual_cor()` to compute residual correlation, covariance and precision matrices from `jsdgam` models. See `?mvgam::jsdgam` and `?mvgam::residual_cor` for details
2 changes: 1 addition & 1 deletion R/globals.R
Original file line number Diff line number Diff line change
@@ -22,4 +22,4 @@ utils::globalVariables(c("y", "year", "smooth_vals", "smooth_num",
"total_evd", "smooth_label", "by_variable",
"gr", "tot_subgrs", "subgr", "lambda",
"level", "sim_hilbert_gp", "trend_model",
"jags_path"))
"jags_path", "x"))
75 changes: 74 additions & 1 deletion R/mvgam_setup.R
Original file line number Diff line number Diff line change
@@ -1142,7 +1142,7 @@ jagam <- function(formula,
n.sp <- n.sp + 1
#cat(" for (i in ",b0,":",b1,") { b[i] ~ dnorm(0, lambda[",n.sp,"]) }\n",file=file,append=TRUE,sep="")
#b0 <- b1 + 1
cat(" for (i in ",compress.iseq((b0:b1)[D]),") { b[i] ~ dnorm(0, lambda[",n.sp,"]) }\n",file=file,append=TRUE,sep="")
cat(" for (i in ",compress_iseq((b0:b1)[D]),") { b[i] ~ dnorm(0, lambda[",n.sp,"]) }\n",file=file,append=TRUE,sep="")
}
} else { ## inseperable - requires the penalty matrices to be supplied to JAGS...
b0 <- G$smooth[[i]]$first.para; b1 <- G$smooth[[i]]$last.para
@@ -1416,6 +1416,79 @@ gam_setup.list <- function(formula,
G
}


#' Takes a set of non-negative integers and returns minimal code for generating it
#'
#' This function is derived from \code{mgcv:::compress.iseq}
#'
#' @author Simon N Wood with modifications by Nicholas Clark
#' @noRd
compress_iseq <- function(x) {
x1 <- sort(x)
br <- diff(x1)!=1 ## TRUE at sequence breaks
txt <- paste(x1[c(TRUE, br)], x1[c(br, TRUE)], sep = ":") ## subsequences
txt1 <- paste(x1[c(TRUE, br)]) ## subseq starts
ii <- x1[c(TRUE, br)] == x1[c(br, TRUE)] ## index start and end equal
txt[ii] <- txt1[ii] ## replace length on sequences with integers
paste("c(", paste(txt, collapse = ","),")", sep = "")
}

#' Returns a vector dind of columns of X to drop for identifiability
#'
#' This function is derived from \code{mgcv:::olid}
#'
#' @author Simon N Wood with modifications by Nicholas Clark
#' @noRd
olid <- function(X, nsdf, pstart, flpi, lpi) {
nlp <- length(lpi) ## number of linear predictors
n <- nrow(X)
nf <- length(nsdf) ## number of formulae blocks
Xp <- matrix(0 ,n * nlp, sum(nsdf))
start <- 1
ii <- 1:n
tind <- rep(0,0) ## complete index of all parametric columns in X
## create a block matrix, Xp, with the same identifiability properties as
## unpenalized part of model...
for (i in 1:nf) {
stop <- start - 1 + nsdf[i]
if (stop>=start) {
ind <- pstart[i] + 1:nsdf[i] - 1
for (k in flpi[[i]]) {
Xp[ii+(k-1)*n, start:stop] <- X[,ind]
}
tind <- c(tind,ind)
start <- start + nsdf[i]
}
}
## rank deficiency of Xp will reveal number of redundant parametric
## terms, and a pivoted QR will reveal which to drop to restore
## full rank...
qrx <- qr(Xp, LAPACK=TRUE,tol=0.0) ## unidentifiable columns get pivoted to final cols
r <- mgcv::Rrank(qr.R(qrx)) ## get rank from R factor of pivoted QR
if (r == ncol(Xp)) { ## full rank, all fine, drop nothing
dind <- rep(0,0)
} else { ## reduced rank, drop some columns
dind <- tind[sort(qrx$pivot[(r+1):ncol(X)],decreasing=TRUE)] ## columns to drop
## now we need to adjust nsdf, pstart and lpi
for (d in dind) { ## working down through drop indices
## following commented out code is useful should it ever prove necessary to
## adjust pstart and nsdf, but at present these are only used in prediction,
## and it is cleaner to leave them unchanged, and simply drop using dind during prediction.
#k <- if (d>=pstart[nf]) nlp else which(d >= pstart[1:(nf-1)] & d < pstart[2:nf])
#nsdf[k] <- nsdf[k] - 1 ## one less unpenalized column in this block
#if (k<nf) pstart[(k+1):nf] <- pstart[(k+1):nf] - 1 ## later block starts move down 1
for (i in 1:nlp) {
k <- which(d == lpi[[i]])
if (length(k)>0) lpi[[i]] <- lpi[[i]][-k] ## drop row
k <- which(lpi[[i]]>d)
if (length(k)>0) lpi[[i]][k] <- lpi[[i]][k] - 1 ## close up
}
} ## end of drop index loop
}
list(dind=dind,lpi=lpi) ##,pstart=pstart,nsdf=nsdf)
}


#' Write linear predictor section of a jagam file
#'
#' This function is derived from \code{mgcv:::write.jagslp}
2 changes: 1 addition & 1 deletion R/plot.mvgam.R
Original file line number Diff line number Diff line change
@@ -112,7 +112,7 @@ plot.mvgam = function(x, type = 'residuals',

# Other errors and warnings will propagate from individual functions below
if(type == 'series'){
plot_mvgam_series(object, series = series, data_test = data_test, ...)
print(plot_mvgam_series(object, series = series, newdata = data_test, ...))
}

if(type == 're'){
Loading

0 comments on commit ac7d749

Please sign in to comment.