diff --git a/DESCRIPTION b/DESCRIPTION index 6961131..ff9ec6c 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -12,16 +12,23 @@ Description: This package fits models to bycatch data, and can expand to fleet-w License: GPL-3 Encoding: UTF-8 Depends: - R (>= 4.0.0), Rcpp (>= 1.0.5), methods + R (>= 3.4.0) Imports: - rstan (>= 2.21.2), - rstantools (>= 2.1.1), ggplot2, loo (>= 2.4.1), dplyr (>= 1.0.2), - rlang (>= 0.4.1) -LinkingTo: StanHeaders (>= 2.21.0.7), rstan (>= 2.21.2), BH (>= 1.75.0.0), - Rcpp (>= 1.0.5), RcppEigen (>= 0.3.3.9.1) + rlang (>= 0.4.1), + methods, + Rcpp (>= 0.12.0), + RcppParallel (>= 5.0.1), + rstan (>= 2.18.1), + rstantools (>= 2.3.0) +LinkingTo: BH (>= 1.66.0), + Rcpp (>= 0.12.0), + RcppEigen (>= 0.3.3.3.0), + RcppParallel (>= 5.0.1), + rstan (>= 2.18.1), + StanHeaders (>= 2.18.0) Suggests: testthat, parallel, diff --git a/NAMESPACE b/NAMESPACE index 9504a1f..b1781c7 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -9,7 +9,6 @@ export(plot_fitted) import(Rcpp) import(ggplot2) import(methods) -import(rstantools) importFrom(rstan,extract) importFrom(rstan,sampling) importFrom(rstan,vb) diff --git a/R/bycatch-package.R b/R/bycatch-package.R index b519343..e5a4923 100644 --- a/R/bycatch-package.R +++ b/R/bycatch-package.R @@ -8,10 +8,9 @@ #' @useDynLib bycatch, .registration = TRUE #' @import methods #' @import Rcpp -#' @import rstantools #' @importFrom rstan sampling #' #' @references -#' Stan Development Team (2018). RStan: the R interface to Stan. R package version 2.18.2. http://mc-stan.org +#' Stan Development Team (2023). RStan: the R interface to Stan. R package version 2.21.8. https://mc-stan.org #' NULL diff --git a/R/stanmodels.R b/R/stanmodels.R index 11277a3..2d6e93f 100644 --- a/R/stanmodels.R +++ b/R/stanmodels.R @@ -1,43 +1,25 @@ -# Part of the bayesdfa package for estimating model parameters -# Copyright (C) 2015, 2016, 2017 Trustees of Columbia University -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 3 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# Generated by rstantools. Do not edit by hand. -# This file is only intended to be used during the installation process -# nocov start -MODELS_HOME <- "src" -if (!file.exists(MODELS_HOME)) MODELS_HOME <- sub("R$", "src", getwd()) +# names of stan models +stanmodels <- c("bycatch") -stan_files <- dir(file.path(MODELS_HOME, "stan_files"), - pattern = "stan$", full.names = TRUE -) -stanmodels <- lapply(stan_files, function(f) { - model_cppname <- sub("\\.stan$", "", basename(f)) - stanfit <- rstan::stanc(f, - allow_undefined = TRUE, - obfuscate_model_name = FALSE - ) - stanfit$model_cpp <- list( - model_cppname = stanfit$model_name, - model_cppcode = stanfit$cppcode - ) - return(do.call(methods::new, args = c(stanfit[-(1:3)], - Class = "stanmodel", - mk_cppmodule = function(x) get(paste0("model_", model_cppname)) - ))) +# load each stan module +Rcpp::loadModule("stan_fit4bycatch_mod", what = TRUE) + +# instantiate each stanmodel object +stanmodels <- sapply(stanmodels, function(model_name) { + # create C++ code for stan model + stan_file <- if(dir.exists("stan")) "stan" else file.path("inst", "stan") + stan_file <- file.path(stan_file, paste0(model_name, ".stan")) + stanfit <- rstan::stanc_builder(stan_file, + allow_undefined = TRUE, + obfuscate_model_name = FALSE) + stanfit$model_cpp <- list(model_cppname = stanfit$model_name, + model_cppcode = stanfit$cppcode) + # create stanmodel object + methods::new(Class = "stanmodel", + model_name = stanfit$model_name, + model_code = stanfit$model_code, + model_cpp = stanfit$model_cpp, + mk_cppmodule = function(x) get(paste0("rstantools_model_", model_name))) }) -names(stanmodels) <- sub("\\.stan$", "", basename(stan_files)) -rm(MODELS_HOME) -# nocov end diff --git a/R/zzz.R b/R/zzz.R deleted file mode 100644 index ab4f6b4..0000000 --- a/R/zzz.R +++ /dev/null @@ -1,4 +0,0 @@ -.onLoad <- function(libname, pkgname) { - modules <- paste0("stan_fit4", names(stanmodels), "_mod") - for (m in modules) loadModule(m, what = TRUE) -} diff --git a/bycatch.Rproj b/bycatch.Rproj index cba1b6b..69fafd4 100644 --- a/bycatch.Rproj +++ b/bycatch.Rproj @@ -14,6 +14,7 @@ LaTeX: pdfLaTeX AutoAppendNewline: Yes StripTrailingWhitespace: Yes +LineEndingConversion: Posix BuildType: Package PackageUseDevtools: Yes diff --git a/docs/404.html b/docs/404.html index 105968f..69b5c6a 100644 --- a/docs/404.html +++ b/docs/404.html @@ -11,8 +11,7 @@ - - + - - + -// v0.0.1 -// Written by JooYoung Seo (jooyoung@psu.edu) and Atsushi Yasumoto on June 1st, 2020. - -document.addEventListener('DOMContentLoaded', function() { - const codeList = document.getElementsByClassName("sourceCode"); - for (var i = 0; i < codeList.length; i++) { - var linkList = codeList[i].getElementsByTagName('a'); - for (var j = 0; j < linkList.length; j++) { - if (linkList[j].innerHTML === "") { - linkList[j].setAttribute('aria-hidden', 'true'); - } - } - } -}); diff --git a/docs/articles/a01_overview_files/header-attrs-2.9/header-attrs.js b/docs/articles/a01_overview_files/header-attrs-2.9/header-attrs.js deleted file mode 100644 index dd57d92..0000000 --- a/docs/articles/a01_overview_files/header-attrs-2.9/header-attrs.js +++ /dev/null @@ -1,12 +0,0 @@ -// Pandoc 2.9 adds attributes on both header and div. We remove the former (to -// be compatible with the behavior of Pandoc < 2.8). -document.addEventListener('DOMContentLoaded', function(e) { - var hs = document.querySelectorAll("div.section[class*='level'] > :first-child"); - var i, h, a; - for (i = 0; i < hs.length; i++) { - h = hs[i]; - if (!/^h[1-6]$/i.test(h.tagName)) continue; // it should be a header h1-h6 - a = h.attributes; - while (a.length > 0) h.removeAttribute(a[0].name); - } -}); diff --git a/docs/articles/a02_fitting_models.html b/docs/articles/a02_fitting_models.html index f30af88..f4096a1 100644 --- a/docs/articles/a02_fitting_models.html +++ b/docs/articles/a02_fitting_models.html @@ -11,8 +11,7 @@ - - + -// v0.0.1 -// Written by JooYoung Seo (jooyoung@psu.edu) and Atsushi Yasumoto on June 1st, 2020. - -document.addEventListener('DOMContentLoaded', function() { - const codeList = document.getElementsByClassName("sourceCode"); - for (var i = 0; i < codeList.length; i++) { - var linkList = codeList[i].getElementsByTagName('a'); - for (var j = 0; j < linkList.length; j++) { - if (linkList[j].innerHTML === "") { - linkList[j].setAttribute('aria-hidden', 'true'); - } - } - } -}); diff --git a/docs/articles/a02_fitting_models_files/figure-html/unnamed-chunk-16-1.png b/docs/articles/a02_fitting_models_files/figure-html/unnamed-chunk-16-1.png deleted file mode 100644 index c1e5d43..0000000 Binary files a/docs/articles/a02_fitting_models_files/figure-html/unnamed-chunk-16-1.png and /dev/null differ diff --git a/docs/articles/a02_fitting_models_files/header-attrs-2.9/header-attrs.js b/docs/articles/a02_fitting_models_files/header-attrs-2.9/header-attrs.js deleted file mode 100644 index dd57d92..0000000 --- a/docs/articles/a02_fitting_models_files/header-attrs-2.9/header-attrs.js +++ /dev/null @@ -1,12 +0,0 @@ -// Pandoc 2.9 adds attributes on both header and div. We remove the former (to -// be compatible with the behavior of Pandoc < 2.8). -document.addEventListener('DOMContentLoaded', function(e) { - var hs = document.querySelectorAll("div.section[class*='level'] > :first-child"); - var i, h, a; - for (i = 0; i < hs.length; i++) { - h = hs[i]; - if (!/^h[1-6]$/i.test(h.tagName)) continue; // it should be a header h1-h6 - a = h.attributes; - while (a.length > 0) h.removeAttribute(a[0].name); - } -}); diff --git a/docs/articles/a03_expanding_estimates.html b/docs/articles/a03_expanding_estimates.html index d6bdd71..2f029ce 100644 --- a/docs/articles/a03_expanding_estimates.html +++ b/docs/articles/a03_expanding_estimates.html @@ -11,8 +11,7 @@ - - + -// v0.0.1 -// Written by JooYoung Seo (jooyoung@psu.edu) and Atsushi Yasumoto on June 1st, 2020. - -document.addEventListener('DOMContentLoaded', function() { - const codeList = document.getElementsByClassName("sourceCode"); - for (var i = 0; i < codeList.length; i++) { - var linkList = codeList[i].getElementsByTagName('a'); - for (var j = 0; j < linkList.length; j++) { - if (linkList[j].innerHTML === "") { - linkList[j].setAttribute('aria-hidden', 'true'); - } - } - } -}); diff --git a/docs/articles/a03_expanding_estimates_files/header-attrs-2.9/header-attrs.js b/docs/articles/a03_expanding_estimates_files/header-attrs-2.9/header-attrs.js deleted file mode 100644 index dd57d92..0000000 --- a/docs/articles/a03_expanding_estimates_files/header-attrs-2.9/header-attrs.js +++ /dev/null @@ -1,12 +0,0 @@ -// Pandoc 2.9 adds attributes on both header and div. We remove the former (to -// be compatible with the behavior of Pandoc < 2.8). -document.addEventListener('DOMContentLoaded', function(e) { - var hs = document.querySelectorAll("div.section[class*='level'] > :first-child"); - var i, h, a; - for (i = 0; i < hs.length; i++) { - h = hs[i]; - if (!/^h[1-6]$/i.test(h.tagName)) continue; // it should be a header h1-h6 - a = h.attributes; - while (a.length > 0) h.removeAttribute(a[0].name); - } -}); diff --git a/docs/articles/a04_diagnosing_problems.html b/docs/articles/a04_diagnosing_problems.html index ab60faa..d605d68 100644 --- a/docs/articles/a04_diagnosing_problems.html +++ b/docs/articles/a04_diagnosing_problems.html @@ -11,8 +11,7 @@ - - + - - - - - -Background • bycatch - - - - - - - - - - -
-
- - - - -
-
- - - - -
-

-Load library

-
-

-Overview

-

Previous authors, including Gardner et al. (2008) and Martin et al. (2015) have assumed that rare even bycatch follows a Poisson process, where the observed bycatch events \(y_{t}\) are modeled according to some estimated bycatch rate \(\lambda\),

-

\[p(y_{t}|\lambda) = e^{-\lambda}\frac{\lambda^{y_{t}}}{y_{t}!}\] where \(\lambda\) is the mean of the Poisson distribution. The mean rate parameter \(\lambda\) can be further decomposed into a per - event (e.g. set) parameter and the number of observed sets \(n_{t}\), \(\lambda = \theta * n_{t}\). In a GLM setting, the log-link function is often used to model effects of covariates, e.g. \(log(\lambda) = log(\theta) + b_{1}*x_{1} + b_{2}*x_{2}...\), and in this setting the known number of sets \(n_{t}\) are treated as an offset, \(log(\lambda) = log(\theta) + b_{1}*x_{1} + b_{2}*x_{2} + log(n_{t})\).

-

Estimation of \(\lambda\) or \(\theta\) can be done in a maximum likelihood or Bayesian setting, and here we use Bayesian methods to estimate the posterior distribution of the parameters given observed bycatch data,

-

\[p(\theta|\underline{\mathbf{y}}) \propto p(\theta)p(\underline{\mathbf{y}}|\theta)\]

-
-
-

-Other families

-

In addition to the Poisson model described here, we include the Negative Binomial model, which allows for the variance to be greater than the mean. We use the ‘nbinom2’ alternative parameterization of the Negative Binomial, where \(Var(Y) = u + \frac{u^2}{\tau}\), where \(\tau\) controls the degree of overdispersion. Two additional extensions to these models are the zero-inflated extensions. We adopt hurdle models to model extra zeros, so that \(p(y_{t}=0) = \phi\) and \(p(y_{t}>0) = 1-\phi\).

-
-
-

-Fixed effect covariates

-

As described above, any type of fixed effects can be included as a predictors of bycatch in the formula interface. Coefficients are estimated for all coefficients using the log-link, \(log(\lambda) = log(\theta) + b_{1}*x_{1} + b_{2}*x_{2} + log(n_{t})\), where \(x_{1}\) and \(x_{2}\) are covariates.

-
-
-

-Time varying parameters

-

In some cases, such as when covariates may be missing or not completely observed, it may be useful to include time effects as time - varying. We have implemented these as random effects, in link (log) space. We can modify the predictions of bycatch rate to be \(log(\lambda) = log(\theta) + b_{1}*x_{1} + b_{2}*x_{2} + log(n_{t}) + \omega_{t}\), where \(\omega_{t}\) is the latent time effect. We constrain the time effects to be a random walk, so that \(\omega_{t} \sim Normal(\omega_{t-1},\sigma_{\omega})\). Extensions of this model could include autoregressive coefficients to help aid in stationarity.

-
-
-

-Expanding estimates

-

A major goal of bycatch analyses with partially observed datasets is estimating the total unobserved bycatch. Because the total numbers of effort (sets, trips, etc) is known, and the number of observed units of effort is known, the numbers of unobserved units is also known, \(N_{t}-n_{t}\).

-

Using the Bayesian framework, we can generate samples from the posterior predictive distribution of unobserved sets, using the same distribution that is assumed for the observed sets (Poisson, Negative Binomial, etc). This posterior predictive approach assumes that the distribution of unobserved bycatch is

-

\[P(Y_{t}-y_{t} | \theta, N_{t}, n_{t}) = \int_{\theta} p(Y_{t}-y_{t} | \theta, N_{t} - n_{t})p(\theta | \underline{\mathbf{y}})d\theta\]

-

The integration includes a product of two quantities. The first is the probability of individual bycatch values (e.g. 0, 1, 2, …) conditioned on the estimated parameters \(\theta\) and number of unobserved units \(N_{t}-n_{t}\), and the second represents the posterior distribution of the parameters given all data, \(p(\theta | \underline{\mathbf{y}})\). By generating large numbers of samples from the posterior predictive distribution, we can generate credible intervals and other summary statistics on the distribution.

-
-
-
- - - -
- - - - -
- - - - - - diff --git a/docs/articles/background_files/accessible-code-block-0.0.1/empty-anchor.js b/docs/articles/background_files/accessible-code-block-0.0.1/empty-anchor.js deleted file mode 100644 index ca349fd..0000000 --- a/docs/articles/background_files/accessible-code-block-0.0.1/empty-anchor.js +++ /dev/null @@ -1,15 +0,0 @@ -// Hide empty tag within highlighted CodeBlock for screen reader accessibility (see https://github.com/jgm/pandoc/issues/6352#issuecomment-626106786) --> -// v0.0.1 -// Written by JooYoung Seo (jooyoung@psu.edu) and Atsushi Yasumoto on June 1st, 2020. - -document.addEventListener('DOMContentLoaded', function() { - const codeList = document.getElementsByClassName("sourceCode"); - for (var i = 0; i < codeList.length; i++) { - var linkList = codeList[i].getElementsByTagName('a'); - for (var j = 0; j < linkList.length; j++) { - if (linkList[j].innerHTML === "") { - linkList[j].setAttribute('aria-hidden', 'true'); - } - } - } -}); diff --git a/docs/articles/example_files/figure-html/unnamed-chunk-21-1.png b/docs/articles/example_files/figure-html/unnamed-chunk-21-1.png deleted file mode 100644 index 58391ab..0000000 Binary files a/docs/articles/example_files/figure-html/unnamed-chunk-21-1.png and /dev/null differ diff --git a/docs/articles/example_files/figure-html/unnamed-chunk-5-1.png b/docs/articles/example_files/figure-html/unnamed-chunk-5-1.png deleted file mode 100644 index 9156508..0000000 Binary files a/docs/articles/example_files/figure-html/unnamed-chunk-5-1.png and /dev/null differ diff --git a/docs/articles/example_files/figure-html/unnamed-chunk-6-1.png b/docs/articles/example_files/figure-html/unnamed-chunk-6-1.png deleted file mode 100644 index ad1b6de..0000000 Binary files a/docs/articles/example_files/figure-html/unnamed-chunk-6-1.png and /dev/null differ diff --git a/docs/articles/expanding.html b/docs/articles/expanding.html deleted file mode 100644 index b67545c..0000000 --- a/docs/articles/expanding.html +++ /dev/null @@ -1,184 +0,0 @@ - - - - - - - -Expanding estimates to unobserved bycatch • bycatch - - - - - - - - - - -
-
- - - - -
-
- - - - -
-

-Load library

-
-#library(devtools)
-#devtools::install_github("ericward-noaa/bycatch")
-library(bycatch)
-set.seed(123)
-
-

-Overview

-

Previous authors, including Gardner et al. (2008) and Martin et al. (2015) have assumed that rare even bycatch follows a Poisson process, where the observed bycatch events \(y_{t}\) are modeled according to some estimated bycatch rate \(\lambda\),

-

\[p(y_{t}|\lambda) = e^{-\lambda}\frac{\lambda^{y_{t}}}{y_{t}!}\] where \(\lambda\) is the mean of the Poisson distribution. The mean rate parameter \(\lambda\) can be further decomposed into a per - event (e.g. set) parameter and the number of observed sets \(n_{t}\), \(\lambda = \theta * n_{t}\). In a GLM setting, the log-link function is often used to model effects of covariates, e.g. \(log(\lambda) = log(\theta) + B_{1}*x_{1} + B_{2}*x_{2}...\), and in this setting the known number of sets \(n_{t}\) are treated as an offset, \(log(\lambda) = log(\theta) + B_{1}*x_{1} + B_{2}*x_{2} + log(n_{t})\).

-

The reported effort (here, sets) represents the total events that are observed. Focusing on a single year of data, the expansion rate, \(p_{obs}\), represents the fraction (n / 100) of the sets that are observed. The total sets are also assumed known, \(N = e_{obs} + e_{unobs}\). So we are interested in estimating \(e_{unobs}\) given \(N\) and \(e_{obs}\), where \(N = e_{obs} / p_{obs}\) and \(e_{unobs} = e_{obs} * (1/p_{obs} - 1)\).

-
-
-

-Load data

-
-# replace this with your own data frame
-d = data.frame("Year"= 2002:2014, 
-  "Takes" = c(0, 0, 0, 0, 0, 0, 0, 0, 1, 3, 0, 0, 0),
-  "expansionRate" = c(24, 22, 14, 32, 28, 25, 30,  7, 26, 21, 22, 23, 27),
-  "Sets" = c(391, 340, 330, 660, 470, 500, 330, 287, 756, 673, 532, 351, 486))
-
-
-
-

-Simple model with constant bycatch, no covariates

-

We’ll start by fitting a model with constant bycatch rate,

-
-fit = fit_bycatch(Takes ~ 1, data=d, time="Year", effort="Sets", family="poisson",
-  time_varying = FALSE)
-
-
-

-Expanding bycatch estimates

-

Using our example above, the observer coverage for that dataset was less than 100% and so our estimates need to be expanded to the fleetwide level. There are some important control arguments here that are left at defaults, but should maybe be changed for huge numbers of bycatch.

-
-expanded = expand(fit, coverage = d$expansionRate)
-

And we can then plot these estimates. Like the previous function we can specify whether to include the raw points or not.

-
-plot_expanded(fitted_model=fit, expanded_estimates = expanded, xlab="Year", ylab = "Fleet-level bycatch", include_points = TRUE)
-
-

-Make table of expanded bycatch estimates

-

We can also do things like summarize the expanded estimates in table form

-
-df = data.frame("time" = d[,"Year"], 
-  "mean" = apply(expanded, 2, mean),
-  "median" = apply(expanded, 2, quantile, 0.5),
-  "lower95" = apply(expanded, 2, quantile, 0.025),
-  "upper95" = apply(expanded, 2, quantile, 0.975))
-
-write.table(df, "estimated_bycatch.csv", row.names=F, col.names=T, sep=",")
-
-
-
- - - -
- - - - -
- - - - - - diff --git a/docs/articles/expanding_files/accessible-code-block-0.0.1/empty-anchor.js b/docs/articles/expanding_files/accessible-code-block-0.0.1/empty-anchor.js deleted file mode 100644 index ca349fd..0000000 --- a/docs/articles/expanding_files/accessible-code-block-0.0.1/empty-anchor.js +++ /dev/null @@ -1,15 +0,0 @@ -// Hide empty tag within highlighted CodeBlock for screen reader accessibility (see https://github.com/jgm/pandoc/issues/6352#issuecomment-626106786) --> -// v0.0.1 -// Written by JooYoung Seo (jooyoung@psu.edu) and Atsushi Yasumoto on June 1st, 2020. - -document.addEventListener('DOMContentLoaded', function() { - const codeList = document.getElementsByClassName("sourceCode"); - for (var i = 0; i < codeList.length; i++) { - var linkList = codeList[i].getElementsByTagName('a'); - for (var j = 0; j < linkList.length; j++) { - if (linkList[j].innerHTML === "") { - linkList[j].setAttribute('aria-hidden', 'true'); - } - } - } -}); diff --git a/docs/articles/index.html b/docs/articles/index.html index 68941a6..e9af0d3 100644 --- a/docs/articles/index.html +++ b/docs/articles/index.html @@ -1,5 +1,5 @@ -Articles • bycatchArticles • bycatch diff --git a/docs/authors.html b/docs/authors.html index d6dbb4b..2c1b665 100644 --- a/docs/authors.html +++ b/docs/authors.html @@ -1,5 +1,5 @@ -Authors and Citation • bycatchAuthors and Citation • bycatch diff --git a/docs/extra.css b/docs/extra.css deleted file mode 100644 index ed01162..0000000 --- a/docs/extra.css +++ /dev/null @@ -1 +0,0 @@ -@import url("https://nmfs-general-modeling-tools.github.io/nmfspalette/extra.css"); diff --git a/docs/index.html b/docs/index.html index cebedd9..6940184 100644 --- a/docs/index.html +++ b/docs/index.html @@ -11,8 +11,7 @@ - - + The 'bycatch' package. — bycatch-package • bycatchThe 'bycatch' package. — bycatch-package • bycatch @@ -71,7 +71,7 @@

The 'bycatch' package.

References

-

Stan Development Team (2018). RStan: the R interface to Stan. R package version 2.18.2. http://mc-stan.org

+

Stan Development Team (2023). RStan: the R interface to Stan. R package version 2.21.8. https://mc-stan.org

diff --git a/docs/reference/fit_bycatch-1.png b/docs/reference/fit_bycatch-1.png index 0e0f603..ae85f21 100644 Binary files a/docs/reference/fit_bycatch-1.png and b/docs/reference/fit_bycatch-1.png differ diff --git a/docs/reference/fit_bycatch.html b/docs/reference/fit_bycatch.html index a2e94ee..da825ac 100644 --- a/docs/reference/fit_bycatch.html +++ b/docs/reference/fit_bycatch.html @@ -1,5 +1,5 @@ -fit_bycatch is the primary function for fitting bycatch models to time series of takes and effort — fit_bycatch • bycatchfit_bycatch is the primary function for fitting bycatch models to time series of takes and effort — fit_bycatch • bycatch @@ -164,8 +164,8 @@

Examples

#> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 1). #> Chain 1: -#> Chain 1: Gradient evaluation took 3.8e-05 seconds -#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.38 seconds. +#> Chain 1: Gradient evaluation took 3.5e-05 seconds +#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.35 seconds. #> Chain 1: Adjust your expectations accordingly! #> Chain 1: #> Chain 1: @@ -182,9 +182,9 @@

Examples

#> Chain 1: Iteration: 900 / 1000 [ 90%] (Sampling) #> Chain 1: Iteration: 1000 / 1000 [100%] (Sampling) #> Chain 1: -#> Chain 1: Elapsed Time: 0.012121 seconds (Warm-up) -#> Chain 1: 0.01112 seconds (Sampling) -#> Chain 1: 0.023241 seconds (Total) +#> Chain 1: Elapsed Time: 0.016508 seconds (Warm-up) +#> Chain 1: 0.015624 seconds (Sampling) +#> Chain 1: 0.032132 seconds (Total) #> Chain 1: #> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 2). @@ -207,15 +207,15 @@

Examples

#> Chain 2: Iteration: 900 / 1000 [ 90%] (Sampling) #> Chain 2: Iteration: 1000 / 1000 [100%] (Sampling) #> Chain 2: -#> Chain 2: Elapsed Time: 0.012482 seconds (Warm-up) -#> Chain 2: 0.010169 seconds (Sampling) -#> Chain 2: 0.022651 seconds (Total) +#> Chain 2: Elapsed Time: 0.017509 seconds (Warm-up) +#> Chain 2: 0.016909 seconds (Sampling) +#> Chain 2: 0.034418 seconds (Total) #> Chain 2: #> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 3). #> Chain 3: -#> Chain 3: Gradient evaluation took 9e-06 seconds -#> Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.09 seconds. +#> Chain 3: Gradient evaluation took 1.2e-05 seconds +#> Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.12 seconds. #> Chain 3: Adjust your expectations accordingly! #> Chain 3: #> Chain 3: @@ -232,16 +232,16 @@

Examples

#> Chain 3: Iteration: 900 / 1000 [ 90%] (Sampling) #> Chain 3: Iteration: 1000 / 1000 [100%] (Sampling) #> Chain 3: -#> Chain 3: Elapsed Time: 0.01171 seconds (Warm-up) -#> Chain 3: 0.011931 seconds (Sampling) -#> Chain 3: 0.023641 seconds (Total) +#> Chain 3: Elapsed Time: 0.015337 seconds (Warm-up) +#> Chain 3: 0.015091 seconds (Sampling) +#> Chain 3: 0.030428 seconds (Total) #> Chain 3: loo::
loo(fit$fitted_model)$estimates #> Warning: Some Pareto k diagnostic values are too high. See help('pareto-k-diagnostic') for details. #> Estimate SE -#> elpd_loo -10.642299 5.393601 -#> p_loo 1.972237 1.611526 -#> looic 21.284598 10.787202 +#> elpd_loo -11.215059 5.994750 +#> p_loo 2.555063 2.180128 +#> looic 22.430118 11.989499 fit <- fit_bycatch(Takes ~ 1, data = d, time = "Year", effort = "Sets", @@ -250,8 +250,8 @@

Examples

#> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 1). #> Chain 1: -#> Chain 1: Gradient evaluation took 1.1e-05 seconds -#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.11 seconds. +#> Chain 1: Gradient evaluation took 1.3e-05 seconds +#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.13 seconds. #> Chain 1: Adjust your expectations accordingly! #> Chain 1: #> Chain 1: @@ -268,15 +268,15 @@

Examples

#> Chain 1: Iteration: 900 / 1000 [ 90%] (Sampling) #> Chain 1: Iteration: 1000 / 1000 [100%] (Sampling) #> Chain 1: -#> Chain 1: Elapsed Time: 0.011336 seconds (Warm-up) -#> Chain 1: 0.011355 seconds (Sampling) -#> Chain 1: 0.022691 seconds (Total) +#> Chain 1: Elapsed Time: 0.014235 seconds (Warm-up) +#> Chain 1: 0.014836 seconds (Sampling) +#> Chain 1: 0.029071 seconds (Total) #> Chain 1: #> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 2). #> Chain 2: -#> Chain 2: Gradient evaluation took 8e-06 seconds -#> Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.08 seconds. +#> Chain 2: Gradient evaluation took 1e-05 seconds +#> Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.1 seconds. #> Chain 2: Adjust your expectations accordingly! #> Chain 2: #> Chain 2: @@ -293,15 +293,15 @@

Examples

#> Chain 2: Iteration: 900 / 1000 [ 90%] (Sampling) #> Chain 2: Iteration: 1000 / 1000 [100%] (Sampling) #> Chain 2: -#> Chain 2: Elapsed Time: 0.012035 seconds (Warm-up) -#> Chain 2: 0.011975 seconds (Sampling) -#> Chain 2: 0.02401 seconds (Total) +#> Chain 2: Elapsed Time: 0.01486 seconds (Warm-up) +#> Chain 2: 0.015182 seconds (Sampling) +#> Chain 2: 0.030042 seconds (Total) #> Chain 2: #> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 3). #> Chain 3: -#> Chain 3: Gradient evaluation took 9e-06 seconds -#> Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.09 seconds. +#> Chain 3: Gradient evaluation took 1.1e-05 seconds +#> Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.11 seconds. #> Chain 3: Adjust your expectations accordingly! #> Chain 3: #> Chain 3: @@ -318,9 +318,9 @@

Examples

#> Chain 3: Iteration: 900 / 1000 [ 90%] (Sampling) #> Chain 3: Iteration: 1000 / 1000 [100%] (Sampling) #> Chain 3: -#> Chain 3: Elapsed Time: 0.011612 seconds (Warm-up) -#> Chain 3: 0.01167 seconds (Sampling) -#> Chain 3: 0.023282 seconds (Total) +#> Chain 3: Elapsed Time: 0.015184 seconds (Warm-up) +#> Chain 3: 0.013594 seconds (Sampling) +#> Chain 3: 0.028778 seconds (Total) #> Chain 3: plot_fitted(fit, xlab = "Year", ylab = "Fleet-level bycatch", @@ -338,8 +338,8 @@

Examples

#> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 1). #> Chain 1: -#> Chain 1: Gradient evaluation took 2.5e-05 seconds -#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.25 seconds. +#> Chain 1: Gradient evaluation took 2.4e-05 seconds +#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.24 seconds. #> Chain 1: Adjust your expectations accordingly! #> Chain 1: #> Chain 1: @@ -356,15 +356,15 @@

Examples

#> Chain 1: Iteration: 1800 / 2000 [ 90%] (Sampling) #> Chain 1: Iteration: 2000 / 2000 [100%] (Sampling) #> Chain 1: -#> Chain 1: Elapsed Time: 0.090452 seconds (Warm-up) -#> Chain 1: 0.083831 seconds (Sampling) -#> Chain 1: 0.174283 seconds (Total) +#> Chain 1: Elapsed Time: 0.114801 seconds (Warm-up) +#> Chain 1: 0.145848 seconds (Sampling) +#> Chain 1: 0.260649 seconds (Total) #> Chain 1: #> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 2). #> Chain 2: -#> Chain 2: Gradient evaluation took 1.1e-05 seconds -#> Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.11 seconds. +#> Chain 2: Gradient evaluation took 1.4e-05 seconds +#> Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.14 seconds. #> Chain 2: Adjust your expectations accordingly! #> Chain 2: #> Chain 2: @@ -381,15 +381,15 @@

Examples

#> Chain 2: Iteration: 1800 / 2000 [ 90%] (Sampling) #> Chain 2: Iteration: 2000 / 2000 [100%] (Sampling) #> Chain 2: -#> Chain 2: Elapsed Time: 0.093734 seconds (Warm-up) -#> Chain 2: 0.079033 seconds (Sampling) -#> Chain 2: 0.172767 seconds (Total) +#> Chain 2: Elapsed Time: 0.119818 seconds (Warm-up) +#> Chain 2: 0.076438 seconds (Sampling) +#> Chain 2: 0.196256 seconds (Total) #> Chain 2: #> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 3). #> Chain 3: -#> Chain 3: Gradient evaluation took 1.6e-05 seconds -#> Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.16 seconds. +#> Chain 3: Gradient evaluation took 1.3e-05 seconds +#> Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.13 seconds. #> Chain 3: Adjust your expectations accordingly! #> Chain 3: #> Chain 3: @@ -406,15 +406,15 @@

Examples

#> Chain 3: Iteration: 1800 / 2000 [ 90%] (Sampling) #> Chain 3: Iteration: 2000 / 2000 [100%] (Sampling) #> Chain 3: -#> Chain 3: Elapsed Time: 0.106918 seconds (Warm-up) -#> Chain 3: 0.102909 seconds (Sampling) -#> Chain 3: 0.209827 seconds (Total) +#> Chain 3: Elapsed Time: 0.134221 seconds (Warm-up) +#> Chain 3: 0.083619 seconds (Sampling) +#> Chain 3: 0.21784 seconds (Total) #> Chain 3: #> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 4). #> Chain 4: -#> Chain 4: Gradient evaluation took 1.8e-05 seconds -#> Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.18 seconds. +#> Chain 4: Gradient evaluation took 1.7e-05 seconds +#> Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.17 seconds. #> Chain 4: Adjust your expectations accordingly! #> Chain 4: #> Chain 4: @@ -431,10 +431,16 @@

Examples

#> Chain 4: Iteration: 1800 / 2000 [ 90%] (Sampling) #> Chain 4: Iteration: 2000 / 2000 [100%] (Sampling) #> Chain 4: -#> Chain 4: Elapsed Time: 0.089587 seconds (Warm-up) -#> Chain 4: 0.075889 seconds (Sampling) -#> Chain 4: 0.165476 seconds (Total) +#> Chain 4: Elapsed Time: 0.142798 seconds (Warm-up) +#> Chain 4: 0.101362 seconds (Sampling) +#> Chain 4: 0.24416 seconds (Total) #> Chain 4: +#> Warning: Bulk Effective Samples Size (ESS) is too low, indicating posterior means and medians may be unreliable. +#> Running the chains for more iterations may help. See +#> https://mc-stan.org/misc/warnings.html#bulk-ess +#> Warning: Tail Effective Samples Size (ESS) is too low, indicating posterior variances and tail quantiles may be unreliable. +#> Running the chains for more iterations may help. See +#> https://mc-stan.org/misc/warnings.html#tail-ess # fit a time varying model fit <- fit_bycatch(Takes ~ 1, @@ -444,8 +450,8 @@

Examples

#> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 1). #> Chain 1: -#> Chain 1: Gradient evaluation took 1.7e-05 seconds -#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.17 seconds. +#> Chain 1: Gradient evaluation took 2.6e-05 seconds +#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.26 seconds. #> Chain 1: Adjust your expectations accordingly! #> Chain 1: #> Chain 1: @@ -462,15 +468,15 @@

Examples

#> Chain 1: Iteration: 900 / 1000 [ 90%] (Sampling) #> Chain 1: Iteration: 1000 / 1000 [100%] (Sampling) #> Chain 1: -#> Chain 1: Elapsed Time: 0.142954 seconds (Warm-up) -#> Chain 1: 0.099665 seconds (Sampling) -#> Chain 1: 0.242619 seconds (Total) +#> Chain 1: Elapsed Time: 0.198109 seconds (Warm-up) +#> Chain 1: 0.171208 seconds (Sampling) +#> Chain 1: 0.369317 seconds (Total) #> Chain 1: #> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 2). #> Chain 2: -#> Chain 2: Gradient evaluation took 1e-05 seconds -#> Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.1 seconds. +#> Chain 2: Gradient evaluation took 1.4e-05 seconds +#> Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.14 seconds. #> Chain 2: Adjust your expectations accordingly! #> Chain 2: #> Chain 2: @@ -487,15 +493,15 @@

Examples

#> Chain 2: Iteration: 900 / 1000 [ 90%] (Sampling) #> Chain 2: Iteration: 1000 / 1000 [100%] (Sampling) #> Chain 2: -#> Chain 2: Elapsed Time: 0.125528 seconds (Warm-up) -#> Chain 2: 0.081922 seconds (Sampling) -#> Chain 2: 0.20745 seconds (Total) +#> Chain 2: Elapsed Time: 0.199543 seconds (Warm-up) +#> Chain 2: 0.210584 seconds (Sampling) +#> Chain 2: 0.410127 seconds (Total) #> Chain 2: #> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 3). #> Chain 3: -#> Chain 3: Gradient evaluation took 1.1e-05 seconds -#> Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.11 seconds. +#> Chain 3: Gradient evaluation took 2.3e-05 seconds +#> Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.23 seconds. #> Chain 3: Adjust your expectations accordingly! #> Chain 3: #> Chain 3: @@ -512,11 +518,11 @@

Examples

#> Chain 3: Iteration: 900 / 1000 [ 90%] (Sampling) #> Chain 3: Iteration: 1000 / 1000 [100%] (Sampling) #> Chain 3: -#> Chain 3: Elapsed Time: 0.133941 seconds (Warm-up) -#> Chain 3: 0.124245 seconds (Sampling) -#> Chain 3: 0.258186 seconds (Total) +#> Chain 3: Elapsed Time: 0.205326 seconds (Warm-up) +#> Chain 3: 0.130213 seconds (Sampling) +#> Chain 3: 0.335539 seconds (Total) #> Chain 3: -#> Warning: There were 25 divergent transitions after warmup. See +#> Warning: There were 5 divergent transitions after warmup. See #> https://mc-stan.org/misc/warnings.html#divergent-transitions-after-warmup #> to find out why this is a problem and how to eliminate them. #> Warning: There were 1 chains where the estimated Bayesian Fraction of Missing Information was low. See @@ -558,15 +564,15 @@

Examples

#> Chain 1: Iteration: 1800 / 2000 [ 90%] (Sampling) #> Chain 1: Iteration: 2000 / 2000 [100%] (Sampling) #> Chain 1: -#> Chain 1: Elapsed Time: 0.102285 seconds (Warm-up) -#> Chain 1: 0.090779 seconds (Sampling) -#> Chain 1: 0.193064 seconds (Total) +#> Chain 1: Elapsed Time: 0.114514 seconds (Warm-up) +#> Chain 1: 0.092788 seconds (Sampling) +#> Chain 1: 0.207302 seconds (Total) #> Chain 1: #> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 2). #> Chain 2: -#> Chain 2: Gradient evaluation took 1.6e-05 seconds -#> Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.16 seconds. +#> Chain 2: Gradient evaluation took 1.8e-05 seconds +#> Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.18 seconds. #> Chain 2: Adjust your expectations accordingly! #> Chain 2: #> Chain 2: @@ -583,15 +589,15 @@

Examples

#> Chain 2: Iteration: 1800 / 2000 [ 90%] (Sampling) #> Chain 2: Iteration: 2000 / 2000 [100%] (Sampling) #> Chain 2: -#> Chain 2: Elapsed Time: 0.102019 seconds (Warm-up) -#> Chain 2: 0.124445 seconds (Sampling) -#> Chain 2: 0.226464 seconds (Total) +#> Chain 2: Elapsed Time: 0.109146 seconds (Warm-up) +#> Chain 2: 0.117459 seconds (Sampling) +#> Chain 2: 0.226605 seconds (Total) #> Chain 2: #> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 3). #> Chain 3: -#> Chain 3: Gradient evaluation took 1.2e-05 seconds -#> Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.12 seconds. +#> Chain 3: Gradient evaluation took 1.5e-05 seconds +#> Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.15 seconds. #> Chain 3: Adjust your expectations accordingly! #> Chain 3: #> Chain 3: @@ -608,15 +614,15 @@

Examples

#> Chain 3: Iteration: 1800 / 2000 [ 90%] (Sampling) #> Chain 3: Iteration: 2000 / 2000 [100%] (Sampling) #> Chain 3: -#> Chain 3: Elapsed Time: 0.102889 seconds (Warm-up) -#> Chain 3: 0.087467 seconds (Sampling) -#> Chain 3: 0.190356 seconds (Total) +#> Chain 3: Elapsed Time: 0.114225 seconds (Warm-up) +#> Chain 3: 0.147366 seconds (Sampling) +#> Chain 3: 0.261591 seconds (Total) #> Chain 3: #> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 4). #> Chain 4: -#> Chain 4: Gradient evaluation took 1e-05 seconds -#> Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.1 seconds. +#> Chain 4: Gradient evaluation took 1.6e-05 seconds +#> Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.16 seconds. #> Chain 4: Adjust your expectations accordingly! #> Chain 4: #> Chain 4: @@ -633,13 +639,10 @@

Examples

#> Chain 4: Iteration: 1800 / 2000 [ 90%] (Sampling) #> Chain 4: Iteration: 2000 / 2000 [100%] (Sampling) #> Chain 4: -#> Chain 4: Elapsed Time: 0.112103 seconds (Warm-up) -#> Chain 4: 0.079653 seconds (Sampling) -#> Chain 4: 0.191756 seconds (Total) +#> Chain 4: Elapsed Time: 0.150685 seconds (Warm-up) +#> Chain 4: 0.131817 seconds (Sampling) +#> Chain 4: 0.282502 seconds (Total) #> Chain 4: -#> Warning: Tail Effective Samples Size (ESS) is too low, indicating posterior variances and tail quantiles may be unreliable. -#> Running the chains for more iterations may help. See -#> https://mc-stan.org/misc/warnings.html#tail-ess # fit a model with a lognormal distribution d$Takes <- rnorm(nrow(d), 5, 0.1) @@ -653,8 +656,8 @@

Examples

#> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 1). #> Chain 1: -#> Chain 1: Gradient evaluation took 1.8e-05 seconds -#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.18 seconds. +#> Chain 1: Gradient evaluation took 1.9e-05 seconds +#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.19 seconds. #> Chain 1: Adjust your expectations accordingly! #> Chain 1: #> Chain 1: @@ -671,15 +674,15 @@

Examples

#> Chain 1: Iteration: 1800 / 2000 [ 90%] (Sampling) #> Chain 1: Iteration: 2000 / 2000 [100%] (Sampling) #> Chain 1: -#> Chain 1: Elapsed Time: 0.058607 seconds (Warm-up) -#> Chain 1: 0.083769 seconds (Sampling) -#> Chain 1: 0.142376 seconds (Total) +#> Chain 1: Elapsed Time: 0.078807 seconds (Warm-up) +#> Chain 1: 0.092738 seconds (Sampling) +#> Chain 1: 0.171545 seconds (Total) #> Chain 1: #> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 2). #> Chain 2: -#> Chain 2: Gradient evaluation took 1.1e-05 seconds -#> Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.11 seconds. +#> Chain 2: Gradient evaluation took 1.5e-05 seconds +#> Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.15 seconds. #> Chain 2: Adjust your expectations accordingly! #> Chain 2: #> Chain 2: @@ -696,15 +699,15 @@

Examples

#> Chain 2: Iteration: 1800 / 2000 [ 90%] (Sampling) #> Chain 2: Iteration: 2000 / 2000 [100%] (Sampling) #> Chain 2: -#> Chain 2: Elapsed Time: 0.062824 seconds (Warm-up) -#> Chain 2: 0.093914 seconds (Sampling) -#> Chain 2: 0.156738 seconds (Total) +#> Chain 2: Elapsed Time: 0.089118 seconds (Warm-up) +#> Chain 2: 0.089601 seconds (Sampling) +#> Chain 2: 0.178719 seconds (Total) #> Chain 2: #> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 3). #> Chain 3: -#> Chain 3: Gradient evaluation took 1.5e-05 seconds -#> Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.15 seconds. +#> Chain 3: Gradient evaluation took 1.3e-05 seconds +#> Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.13 seconds. #> Chain 3: Adjust your expectations accordingly! #> Chain 3: #> Chain 3: @@ -721,15 +724,15 @@

Examples

#> Chain 3: Iteration: 1800 / 2000 [ 90%] (Sampling) #> Chain 3: Iteration: 2000 / 2000 [100%] (Sampling) #> Chain 3: -#> Chain 3: Elapsed Time: 0.058587 seconds (Warm-up) -#> Chain 3: 0.04476 seconds (Sampling) -#> Chain 3: 0.103347 seconds (Total) +#> Chain 3: Elapsed Time: 0.070309 seconds (Warm-up) +#> Chain 3: 0.072018 seconds (Sampling) +#> Chain 3: 0.142327 seconds (Total) #> Chain 3: #> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 4). #> Chain 4: -#> Chain 4: Gradient evaluation took 1e-05 seconds -#> Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.1 seconds. +#> Chain 4: Gradient evaluation took 1.3e-05 seconds +#> Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.13 seconds. #> Chain 4: Adjust your expectations accordingly! #> Chain 4: #> Chain 4: @@ -746,9 +749,9 @@

Examples

#> Chain 4: Iteration: 1800 / 2000 [ 90%] (Sampling) #> Chain 4: Iteration: 2000 / 2000 [100%] (Sampling) #> Chain 4: -#> Chain 4: Elapsed Time: 0.054834 seconds (Warm-up) -#> Chain 4: 0.052866 seconds (Sampling) -#> Chain 4: 0.1077 seconds (Total) +#> Chain 4: Elapsed Time: 0.076659 seconds (Warm-up) +#> Chain 4: 0.090967 seconds (Sampling) +#> Chain 4: 0.167626 seconds (Total) #> Chain 4: # add zeros and fit a delta-gamma distribution @@ -764,8 +767,8 @@

Examples

#> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 1). #> Chain 1: -#> Chain 1: Gradient evaluation took 3.7e-05 seconds -#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.37 seconds. +#> Chain 1: Gradient evaluation took 2.8e-05 seconds +#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.28 seconds. #> Chain 1: Adjust your expectations accordingly! #> Chain 1: #> Chain 1: @@ -782,15 +785,15 @@

Examples

#> Chain 1: Iteration: 1800 / 2000 [ 90%] (Sampling) #> Chain 1: Iteration: 2000 / 2000 [100%] (Sampling) #> Chain 1: -#> Chain 1: Elapsed Time: 0.163386 seconds (Warm-up) -#> Chain 1: 0.143099 seconds (Sampling) -#> Chain 1: 0.306485 seconds (Total) +#> Chain 1: Elapsed Time: 0.19163 seconds (Warm-up) +#> Chain 1: 0.17531 seconds (Sampling) +#> Chain 1: 0.36694 seconds (Total) #> Chain 1: #> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 2). #> Chain 2: -#> Chain 2: Gradient evaluation took 1.9e-05 seconds -#> Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.19 seconds. +#> Chain 2: Gradient evaluation took 2.9e-05 seconds +#> Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.29 seconds. #> Chain 2: Adjust your expectations accordingly! #> Chain 2: #> Chain 2: @@ -807,9 +810,9 @@

Examples

#> Chain 2: Iteration: 1800 / 2000 [ 90%] (Sampling) #> Chain 2: Iteration: 2000 / 2000 [100%] (Sampling) #> Chain 2: -#> Chain 2: Elapsed Time: 0.136766 seconds (Warm-up) -#> Chain 2: 0.103817 seconds (Sampling) -#> Chain 2: 0.240583 seconds (Total) +#> Chain 2: Elapsed Time: 0.176724 seconds (Warm-up) +#> Chain 2: 0.172624 seconds (Sampling) +#> Chain 2: 0.349348 seconds (Total) #> Chain 2: #> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 3). @@ -832,15 +835,15 @@

Examples

#> Chain 3: Iteration: 1800 / 2000 [ 90%] (Sampling) #> Chain 3: Iteration: 2000 / 2000 [100%] (Sampling) #> Chain 3: -#> Chain 3: Elapsed Time: 0.14658 seconds (Warm-up) -#> Chain 3: 0.128287 seconds (Sampling) -#> Chain 3: 0.274867 seconds (Total) +#> Chain 3: Elapsed Time: 0.161976 seconds (Warm-up) +#> Chain 3: 0.140394 seconds (Sampling) +#> Chain 3: 0.30237 seconds (Total) #> Chain 3: #> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 4). #> Chain 4: -#> Chain 4: Gradient evaluation took 1.8e-05 seconds -#> Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.18 seconds. +#> Chain 4: Gradient evaluation took 2.2e-05 seconds +#> Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.22 seconds. #> Chain 4: Adjust your expectations accordingly! #> Chain 4: #> Chain 4: @@ -857,9 +860,9 @@

Examples

#> Chain 4: Iteration: 1800 / 2000 [ 90%] (Sampling) #> Chain 4: Iteration: 2000 / 2000 [100%] (Sampling) #> Chain 4: -#> Chain 4: Elapsed Time: 0.15925 seconds (Warm-up) -#> Chain 4: 0.167242 seconds (Sampling) -#> Chain 4: 0.326492 seconds (Total) +#> Chain 4: Elapsed Time: 0.185741 seconds (Warm-up) +#> Chain 4: 0.168448 seconds (Sampling) +#> Chain 4: 0.354189 seconds (Total) #> Chain 4: # } diff --git a/docs/reference/get_expanded.html b/docs/reference/get_expanded.html index 361ca72..13c7da9 100644 --- a/docs/reference/get_expanded.html +++ b/docs/reference/get_expanded.html @@ -1,5 +1,5 @@ -get_expanded is a helper function to return a matrix of posterior predictive values for unobserved bycatch — get_expanded • bycatchget_expanded is a helper function to return a matrix of posterior predictive values for unobserved bycatch — get_expanded • bycatch @@ -104,8 +104,8 @@

Examples

#> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 1). #> Chain 1: -#> Chain 1: Gradient evaluation took 1.2e-05 seconds -#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.12 seconds. +#> Chain 1: Gradient evaluation took 1.3e-05 seconds +#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.13 seconds. #> Chain 1: Adjust your expectations accordingly! #> Chain 1: #> Chain 1: @@ -122,15 +122,15 @@

Examples

#> Chain 1: Iteration: 900 / 1000 [ 90%] (Sampling) #> Chain 1: Iteration: 1000 / 1000 [100%] (Sampling) #> Chain 1: -#> Chain 1: Elapsed Time: 0.012397 seconds (Warm-up) -#> Chain 1: 0.012045 seconds (Sampling) -#> Chain 1: 0.024442 seconds (Total) +#> Chain 1: Elapsed Time: 0.016335 seconds (Warm-up) +#> Chain 1: 0.016032 seconds (Sampling) +#> Chain 1: 0.032367 seconds (Total) #> Chain 1: #> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 2). #> Chain 2: -#> Chain 2: Gradient evaluation took 8e-06 seconds -#> Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.08 seconds. +#> Chain 2: Gradient evaluation took 1.1e-05 seconds +#> Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.11 seconds. #> Chain 2: Adjust your expectations accordingly! #> Chain 2: #> Chain 2: @@ -147,15 +147,15 @@

Examples

#> Chain 2: Iteration: 900 / 1000 [ 90%] (Sampling) #> Chain 2: Iteration: 1000 / 1000 [100%] (Sampling) #> Chain 2: -#> Chain 2: Elapsed Time: 0.012283 seconds (Warm-up) -#> Chain 2: 0.012498 seconds (Sampling) -#> Chain 2: 0.024781 seconds (Total) +#> Chain 2: Elapsed Time: 0.017885 seconds (Warm-up) +#> Chain 2: 0.017153 seconds (Sampling) +#> Chain 2: 0.035038 seconds (Total) #> Chain 2: #> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 3). #> Chain 3: -#> Chain 3: Gradient evaluation took 9e-06 seconds -#> Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.09 seconds. +#> Chain 3: Gradient evaluation took 1e-05 seconds +#> Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.1 seconds. #> Chain 3: Adjust your expectations accordingly! #> Chain 3: #> Chain 3: @@ -172,9 +172,9 @@

Examples

#> Chain 3: Iteration: 900 / 1000 [ 90%] (Sampling) #> Chain 3: Iteration: 1000 / 1000 [100%] (Sampling) #> Chain 3: -#> Chain 3: Elapsed Time: 0.012452 seconds (Warm-up) -#> Chain 3: 0.011996 seconds (Sampling) -#> Chain 3: 0.024448 seconds (Total) +#> Chain 3: Elapsed Time: 0.016425 seconds (Warm-up) +#> Chain 3: 0.013375 seconds (Sampling) +#> Chain 3: 0.0298 seconds (Total) #> Chain 3: expanded <- get_expanded(fit) # } diff --git a/docs/reference/get_fitted.html b/docs/reference/get_fitted.html index 44ccea0..2a0504c 100644 --- a/docs/reference/get_fitted.html +++ b/docs/reference/get_fitted.html @@ -1,5 +1,5 @@ -get_fitted returns df of observed bycatch estimates (lambda of Poisson), accounting for effort but not accounting for observer coverage — get_fitted • bycatchget_fitted returns df of observed bycatch estimates (lambda of Poisson), accounting for effort but not accounting for observer coverage — get_fitted • bycatch @@ -106,8 +106,8 @@

Examples

#> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 1). #> Chain 1: -#> Chain 1: Gradient evaluation took 1.1e-05 seconds -#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.11 seconds. +#> Chain 1: Gradient evaluation took 1.3e-05 seconds +#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.13 seconds. #> Chain 1: Adjust your expectations accordingly! #> Chain 1: #> Chain 1: @@ -124,15 +124,15 @@

Examples

#> Chain 1: Iteration: 900 / 1000 [ 90%] (Sampling) #> Chain 1: Iteration: 1000 / 1000 [100%] (Sampling) #> Chain 1: -#> Chain 1: Elapsed Time: 0.013252 seconds (Warm-up) -#> Chain 1: 0.011722 seconds (Sampling) -#> Chain 1: 0.024974 seconds (Total) +#> Chain 1: Elapsed Time: 0.018033 seconds (Warm-up) +#> Chain 1: 0.015791 seconds (Sampling) +#> Chain 1: 0.033824 seconds (Total) #> Chain 1: #> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 2). #> Chain 2: -#> Chain 2: Gradient evaluation took 8e-06 seconds -#> Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.08 seconds. +#> Chain 2: Gradient evaluation took 9e-06 seconds +#> Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.09 seconds. #> Chain 2: Adjust your expectations accordingly! #> Chain 2: #> Chain 2: @@ -149,15 +149,15 @@

Examples

#> Chain 2: Iteration: 900 / 1000 [ 90%] (Sampling) #> Chain 2: Iteration: 1000 / 1000 [100%] (Sampling) #> Chain 2: -#> Chain 2: Elapsed Time: 0.012269 seconds (Warm-up) -#> Chain 2: 0.011166 seconds (Sampling) -#> Chain 2: 0.023435 seconds (Total) +#> Chain 2: Elapsed Time: 0.01611 seconds (Warm-up) +#> Chain 2: 0.015492 seconds (Sampling) +#> Chain 2: 0.031602 seconds (Total) #> Chain 2: #> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 3). #> Chain 3: -#> Chain 3: Gradient evaluation took 9e-06 seconds -#> Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.09 seconds. +#> Chain 3: Gradient evaluation took 1e-05 seconds +#> Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.1 seconds. #> Chain 3: Adjust your expectations accordingly! #> Chain 3: #> Chain 3: @@ -174,25 +174,25 @@

Examples

#> Chain 3: Iteration: 900 / 1000 [ 90%] (Sampling) #> Chain 3: Iteration: 1000 / 1000 [100%] (Sampling) #> Chain 3: -#> Chain 3: Elapsed Time: 0.012159 seconds (Warm-up) -#> Chain 3: 0.010798 seconds (Sampling) -#> Chain 3: 0.022957 seconds (Total) +#> Chain 3: Elapsed Time: 0.01666 seconds (Warm-up) +#> Chain 3: 0.013597 seconds (Sampling) +#> Chain 3: 0.030257 seconds (Total) #> Chain 3: get_fitted(fit) #> time mean low high obs -#> 1 2002 0.2784447 0.08283466 0.5879883 0 -#> 2 2003 0.2421258 0.07203014 0.5112942 0 -#> 3 2004 0.2350045 0.06991161 0.4962561 0 -#> 4 2005 0.4700090 0.13982321 0.9925123 0 -#> 5 2006 0.3347034 0.09957107 0.7067890 0 -#> 6 2007 0.3560674 0.10592668 0.7519032 0 -#> 7 2008 0.2350045 0.06991161 0.4962561 0 -#> 8 2009 0.2043827 0.06080191 0.4315925 0 -#> 9 2010 0.5383739 0.16016113 1.1368777 1 -#> 10 2011 0.4792667 0.14257731 1.0120618 3 -#> 11 2012 0.3788557 0.11270598 0.8000250 0 -#> 12 2013 0.2499593 0.07436053 0.5278361 0 -#> 13 2014 0.3460975 0.10296073 0.7308499 0 +#> 1 2002 0.2911843 0.08612770 0.6004800 0 +#> 2 2003 0.2532037 0.07489365 0.5221566 0 +#> 3 2004 0.2457566 0.07269090 0.5067990 0 +#> 4 2005 0.4915131 0.14538180 1.0135980 0 +#> 5 2006 0.3500169 0.10352946 0.7218047 0 +#> 6 2007 0.3723584 0.11013772 0.7678773 0 +#> 7 2008 0.2457566 0.07269090 0.5067990 0 +#> 8 2009 0.2137337 0.06321905 0.4407616 0 +#> 9 2010 0.5630059 0.16652824 1.1610305 1 +#> 10 2011 0.5011944 0.14824538 1.0335628 3 +#> 11 2012 0.3961894 0.11718654 0.8170214 0 +#> 12 2013 0.2613956 0.07731668 0.5390499 0 +#> 13 2014 0.3619324 0.10705387 0.7463767 0 # } diff --git a/docs/reference/get_total.html b/docs/reference/get_total.html index dd59645..7e18508 100644 --- a/docs/reference/get_total.html +++ b/docs/reference/get_total.html @@ -1,5 +1,5 @@ -get_total is a helper function to return a matrix of total estimated bycatch — get_total • bycatchget_total is a helper function to return a matrix of total estimated bycatch — get_total • bycatch @@ -104,8 +104,8 @@

Examples

#> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 1). #> Chain 1: -#> Chain 1: Gradient evaluation took 1.3e-05 seconds -#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.13 seconds. +#> Chain 1: Gradient evaluation took 1.5e-05 seconds +#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.15 seconds. #> Chain 1: Adjust your expectations accordingly! #> Chain 1: #> Chain 1: @@ -122,15 +122,15 @@

Examples

#> Chain 1: Iteration: 900 / 1000 [ 90%] (Sampling) #> Chain 1: Iteration: 1000 / 1000 [100%] (Sampling) #> Chain 1: -#> Chain 1: Elapsed Time: 0.013386 seconds (Warm-up) -#> Chain 1: 0.010576 seconds (Sampling) -#> Chain 1: 0.023962 seconds (Total) +#> Chain 1: Elapsed Time: 0.01851 seconds (Warm-up) +#> Chain 1: 0.016295 seconds (Sampling) +#> Chain 1: 0.034805 seconds (Total) #> Chain 1: #> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 2). #> Chain 2: -#> Chain 2: Gradient evaluation took 2.1e-05 seconds -#> Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.21 seconds. +#> Chain 2: Gradient evaluation took 9e-06 seconds +#> Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.09 seconds. #> Chain 2: Adjust your expectations accordingly! #> Chain 2: #> Chain 2: @@ -147,15 +147,15 @@

Examples

#> Chain 2: Iteration: 900 / 1000 [ 90%] (Sampling) #> Chain 2: Iteration: 1000 / 1000 [100%] (Sampling) #> Chain 2: -#> Chain 2: Elapsed Time: 0.01352 seconds (Warm-up) -#> Chain 2: 0.010831 seconds (Sampling) -#> Chain 2: 0.024351 seconds (Total) +#> Chain 2: Elapsed Time: 0.017269 seconds (Warm-up) +#> Chain 2: 0.016731 seconds (Sampling) +#> Chain 2: 0.034 seconds (Total) #> Chain 2: #> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 3). #> Chain 3: -#> Chain 3: Gradient evaluation took 1.2e-05 seconds -#> Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.12 seconds. +#> Chain 3: Gradient evaluation took 1.1e-05 seconds +#> Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.11 seconds. #> Chain 3: Adjust your expectations accordingly! #> Chain 3: #> Chain 3: @@ -172,9 +172,9 @@

Examples

#> Chain 3: Iteration: 900 / 1000 [ 90%] (Sampling) #> Chain 3: Iteration: 1000 / 1000 [100%] (Sampling) #> Chain 3: -#> Chain 3: Elapsed Time: 0.014103 seconds (Warm-up) -#> Chain 3: 0.012194 seconds (Sampling) -#> Chain 3: 0.026297 seconds (Total) +#> Chain 3: Elapsed Time: 0.016809 seconds (Warm-up) +#> Chain 3: 0.015186 seconds (Sampling) +#> Chain 3: 0.031995 seconds (Total) #> Chain 3: expanded <- get_total(fit) # } diff --git a/docs/reference/index.html b/docs/reference/index.html index b48f8ea..1950dcc 100644 --- a/docs/reference/index.html +++ b/docs/reference/index.html @@ -1,5 +1,5 @@ -Function reference • bycatchFunction reference • bycatch diff --git a/docs/reference/plot_expanded-1.png b/docs/reference/plot_expanded-1.png index 368301f..191544f 100644 Binary files a/docs/reference/plot_expanded-1.png and b/docs/reference/plot_expanded-1.png differ diff --git a/docs/reference/plot_expanded.html b/docs/reference/plot_expanded.html index 3c2ca44..a627e1b 100644 --- a/docs/reference/plot_expanded.html +++ b/docs/reference/plot_expanded.html @@ -1,5 +1,5 @@ -plot_expanded is makes plots of the expanded bycatch estimates, accounting for observer coverage and effort — plot_expanded • bycatchplot_expanded is makes plots of the expanded bycatch estimates, accounting for observer coverage and effort — plot_expanded • bycatch @@ -144,15 +144,15 @@

Examples

#> Chain 1: Iteration: 900 / 1000 [ 90%] (Sampling) #> Chain 1: Iteration: 1000 / 1000 [100%] (Sampling) #> Chain 1: -#> Chain 1: Elapsed Time: 0.013448 seconds (Warm-up) -#> Chain 1: 0.012482 seconds (Sampling) -#> Chain 1: 0.02593 seconds (Total) +#> Chain 1: Elapsed Time: 0.016722 seconds (Warm-up) +#> Chain 1: 0.012483 seconds (Sampling) +#> Chain 1: 0.029205 seconds (Total) #> Chain 1: #> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 2). #> Chain 2: -#> Chain 2: Gradient evaluation took 1.2e-05 seconds -#> Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.12 seconds. +#> Chain 2: Gradient evaluation took 1e-05 seconds +#> Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.1 seconds. #> Chain 2: Adjust your expectations accordingly! #> Chain 2: #> Chain 2: @@ -169,15 +169,15 @@

Examples

#> Chain 2: Iteration: 900 / 1000 [ 90%] (Sampling) #> Chain 2: Iteration: 1000 / 1000 [100%] (Sampling) #> Chain 2: -#> Chain 2: Elapsed Time: 0.013754 seconds (Warm-up) -#> Chain 2: 0.013112 seconds (Sampling) -#> Chain 2: 0.026866 seconds (Total) +#> Chain 2: Elapsed Time: 0.016139 seconds (Warm-up) +#> Chain 2: 0.01595 seconds (Sampling) +#> Chain 2: 0.032089 seconds (Total) #> Chain 2: #> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 3). #> Chain 3: -#> Chain 3: Gradient evaluation took 1.3e-05 seconds -#> Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.13 seconds. +#> Chain 3: Gradient evaluation took 1e-05 seconds +#> Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.1 seconds. #> Chain 3: Adjust your expectations accordingly! #> Chain 3: #> Chain 3: @@ -194,9 +194,9 @@

Examples

#> Chain 3: Iteration: 900 / 1000 [ 90%] (Sampling) #> Chain 3: Iteration: 1000 / 1000 [100%] (Sampling) #> Chain 3: -#> Chain 3: Elapsed Time: 0.01389 seconds (Warm-up) -#> Chain 3: 0.01126 seconds (Sampling) -#> Chain 3: 0.02515 seconds (Total) +#> Chain 3: Elapsed Time: 0.021423 seconds (Warm-up) +#> Chain 3: 0.017192 seconds (Sampling) +#> Chain 3: 0.038615 seconds (Total) #> Chain 3: plot_expanded( fitted_model = fit, diff --git a/docs/reference/plot_fitted-1.png b/docs/reference/plot_fitted-1.png index b107d3e..983aa23 100644 Binary files a/docs/reference/plot_fitted-1.png and b/docs/reference/plot_fitted-1.png differ diff --git a/docs/reference/plot_fitted.html b/docs/reference/plot_fitted.html index 2021fd4..c736935 100644 --- a/docs/reference/plot_fitted.html +++ b/docs/reference/plot_fitted.html @@ -1,5 +1,5 @@ -plot_fitted makes plots bycatch estimates (lambda of Poisson), accounting for effort but not accounting for observer coverage — plot_fitted • bycatchplot_fitted makes plots bycatch estimates (lambda of Poisson), accounting for effort but not accounting for observer coverage — plot_fitted • bycatch @@ -142,15 +142,15 @@

Examples

#> Chain 1: Iteration: 900 / 1000 [ 90%] (Sampling) #> Chain 1: Iteration: 1000 / 1000 [100%] (Sampling) #> Chain 1: -#> Chain 1: Elapsed Time: 0.01134 seconds (Warm-up) -#> Chain 1: 0.011191 seconds (Sampling) -#> Chain 1: 0.022531 seconds (Total) +#> Chain 1: Elapsed Time: 0.016464 seconds (Warm-up) +#> Chain 1: 0.013108 seconds (Sampling) +#> Chain 1: 0.029572 seconds (Total) #> Chain 1: #> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 2). #> Chain 2: -#> Chain 2: Gradient evaluation took 8e-06 seconds -#> Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.08 seconds. +#> Chain 2: Gradient evaluation took 9e-06 seconds +#> Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.09 seconds. #> Chain 2: Adjust your expectations accordingly! #> Chain 2: #> Chain 2: @@ -167,15 +167,15 @@

Examples

#> Chain 2: Iteration: 900 / 1000 [ 90%] (Sampling) #> Chain 2: Iteration: 1000 / 1000 [100%] (Sampling) #> Chain 2: -#> Chain 2: Elapsed Time: 0.01173 seconds (Warm-up) -#> Chain 2: 0.011888 seconds (Sampling) -#> Chain 2: 0.023618 seconds (Total) +#> Chain 2: Elapsed Time: 0.014999 seconds (Warm-up) +#> Chain 2: 0.015074 seconds (Sampling) +#> Chain 2: 0.030073 seconds (Total) #> Chain 2: #> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 3). #> Chain 3: -#> Chain 3: Gradient evaluation took 1e-05 seconds -#> Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.1 seconds. +#> Chain 3: Gradient evaluation took 9e-06 seconds +#> Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.09 seconds. #> Chain 3: Adjust your expectations accordingly! #> Chain 3: #> Chain 3: @@ -192,9 +192,9 @@

Examples

#> Chain 3: Iteration: 900 / 1000 [ 90%] (Sampling) #> Chain 3: Iteration: 1000 / 1000 [100%] (Sampling) #> Chain 3: -#> Chain 3: Elapsed Time: 0.013134 seconds (Warm-up) -#> Chain 3: 0.011859 seconds (Sampling) -#> Chain 3: 0.024993 seconds (Total) +#> Chain 3: Elapsed Time: 0.015753 seconds (Warm-up) +#> Chain 3: 0.016185 seconds (Sampling) +#> Chain 3: 0.031938 seconds (Total) #> Chain 3: plot_fitted(fit, xlab = "Year", ylab = "Fleet-level bycatch", @@ -212,8 +212,8 @@

Examples

#> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 1). #> Chain 1: -#> Chain 1: Gradient evaluation took 1.4e-05 seconds -#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.14 seconds. +#> Chain 1: Gradient evaluation took 1.7e-05 seconds +#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.17 seconds. #> Chain 1: Adjust your expectations accordingly! #> Chain 1: #> Chain 1: @@ -230,15 +230,15 @@

Examples

#> Chain 1: Iteration: 1800 / 2000 [ 90%] (Sampling) #> Chain 1: Iteration: 2000 / 2000 [100%] (Sampling) #> Chain 1: -#> Chain 1: Elapsed Time: 0.087667 seconds (Warm-up) -#> Chain 1: 0.100111 seconds (Sampling) -#> Chain 1: 0.187778 seconds (Total) +#> Chain 1: Elapsed Time: 0.147065 seconds (Warm-up) +#> Chain 1: 0.112022 seconds (Sampling) +#> Chain 1: 0.259087 seconds (Total) #> Chain 1: #> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 2). #> Chain 2: -#> Chain 2: Gradient evaluation took 1.1e-05 seconds -#> Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.11 seconds. +#> Chain 2: Gradient evaluation took 1.5e-05 seconds +#> Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.15 seconds. #> Chain 2: Adjust your expectations accordingly! #> Chain 2: #> Chain 2: @@ -255,9 +255,9 @@

Examples

#> Chain 2: Iteration: 1800 / 2000 [ 90%] (Sampling) #> Chain 2: Iteration: 2000 / 2000 [100%] (Sampling) #> Chain 2: -#> Chain 2: Elapsed Time: 0.088077 seconds (Warm-up) -#> Chain 2: 0.082645 seconds (Sampling) -#> Chain 2: 0.170722 seconds (Total) +#> Chain 2: Elapsed Time: 0.131502 seconds (Warm-up) +#> Chain 2: 0.091208 seconds (Sampling) +#> Chain 2: 0.22271 seconds (Total) #> Chain 2: #> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 3). @@ -280,15 +280,15 @@

Examples

#> Chain 3: Iteration: 1800 / 2000 [ 90%] (Sampling) #> Chain 3: Iteration: 2000 / 2000 [100%] (Sampling) #> Chain 3: -#> Chain 3: Elapsed Time: 0.082372 seconds (Warm-up) -#> Chain 3: 0.086456 seconds (Sampling) -#> Chain 3: 0.168828 seconds (Total) +#> Chain 3: Elapsed Time: 0.136492 seconds (Warm-up) +#> Chain 3: 0.135635 seconds (Sampling) +#> Chain 3: 0.272127 seconds (Total) #> Chain 3: #> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 4). #> Chain 4: -#> Chain 4: Gradient evaluation took 1.2e-05 seconds -#> Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.12 seconds. +#> Chain 4: Gradient evaluation took 2.3e-05 seconds +#> Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.23 seconds. #> Chain 4: Adjust your expectations accordingly! #> Chain 4: #> Chain 4: @@ -305,9 +305,9 @@

Examples

#> Chain 4: Iteration: 1800 / 2000 [ 90%] (Sampling) #> Chain 4: Iteration: 2000 / 2000 [100%] (Sampling) #> Chain 4: -#> Chain 4: Elapsed Time: 0.088284 seconds (Warm-up) -#> Chain 4: 0.07482 seconds (Sampling) -#> Chain 4: 0.163104 seconds (Total) +#> Chain 4: Elapsed Time: 0.120382 seconds (Warm-up) +#> Chain 4: 0.12716 seconds (Sampling) +#> Chain 4: 0.247542 seconds (Total) #> Chain 4: #> Warning: Tail Effective Samples Size (ESS) is too low, indicating posterior variances and tail quantiles may be unreliable. #> Running the chains for more iterations may help. See @@ -321,8 +321,8 @@

Examples

#> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 1). #> Chain 1: -#> Chain 1: Gradient evaluation took 1.7e-05 seconds -#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.17 seconds. +#> Chain 1: Gradient evaluation took 1.8e-05 seconds +#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.18 seconds. #> Chain 1: Adjust your expectations accordingly! #> Chain 1: #> Chain 1: @@ -339,15 +339,15 @@

Examples

#> Chain 1: Iteration: 900 / 1000 [ 90%] (Sampling) #> Chain 1: Iteration: 1000 / 1000 [100%] (Sampling) #> Chain 1: -#> Chain 1: Elapsed Time: 0.182595 seconds (Warm-up) -#> Chain 1: 0.412423 seconds (Sampling) -#> Chain 1: 0.595018 seconds (Total) +#> Chain 1: Elapsed Time: 0.234767 seconds (Warm-up) +#> Chain 1: 0.171553 seconds (Sampling) +#> Chain 1: 0.40632 seconds (Total) #> Chain 1: #> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 2). #> Chain 2: -#> Chain 2: Gradient evaluation took 1.2e-05 seconds -#> Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.12 seconds. +#> Chain 2: Gradient evaluation took 1.3e-05 seconds +#> Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.13 seconds. #> Chain 2: Adjust your expectations accordingly! #> Chain 2: #> Chain 2: @@ -364,15 +364,15 @@

Examples

#> Chain 2: Iteration: 900 / 1000 [ 90%] (Sampling) #> Chain 2: Iteration: 1000 / 1000 [100%] (Sampling) #> Chain 2: -#> Chain 2: Elapsed Time: 0.19685 seconds (Warm-up) -#> Chain 2: 0.132619 seconds (Sampling) -#> Chain 2: 0.329469 seconds (Total) +#> Chain 2: Elapsed Time: 0.195732 seconds (Warm-up) +#> Chain 2: 0.171898 seconds (Sampling) +#> Chain 2: 0.36763 seconds (Total) #> Chain 2: #> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 3). #> Chain 3: -#> Chain 3: Gradient evaluation took 1.1e-05 seconds -#> Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.11 seconds. +#> Chain 3: Gradient evaluation took 1.4e-05 seconds +#> Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.14 seconds. #> Chain 3: Adjust your expectations accordingly! #> Chain 3: #> Chain 3: @@ -389,19 +389,14 @@

Examples

#> Chain 3: Iteration: 900 / 1000 [ 90%] (Sampling) #> Chain 3: Iteration: 1000 / 1000 [100%] (Sampling) #> Chain 3: -#> Chain 3: Elapsed Time: 0.113471 seconds (Warm-up) -#> Chain 3: 0.121102 seconds (Sampling) -#> Chain 3: 0.234573 seconds (Total) +#> Chain 3: Elapsed Time: 0.21817 seconds (Warm-up) +#> Chain 3: 0.170343 seconds (Sampling) +#> Chain 3: 0.388513 seconds (Total) #> Chain 3: -#> Warning: There were 2 divergent transitions after warmup. See +#> Warning: There were 1 divergent transitions after warmup. See #> https://mc-stan.org/misc/warnings.html#divergent-transitions-after-warmup #> to find out why this is a problem and how to eliminate them. -#> Warning: There were 1 chains where the estimated Bayesian Fraction of Missing Information was low. See -#> https://mc-stan.org/misc/warnings.html#bfmi-low #> Warning: Examine the pairs() plot to diagnose sampling problems -#> Warning: The largest R-hat is NA, indicating chains have not mixed. -#> Running the chains for more iterations may help. See -#> https://mc-stan.org/misc/warnings.html#r-hat #> Warning: Bulk Effective Samples Size (ESS) is too low, indicating posterior means and medians may be unreliable. #> Running the chains for more iterations may help. See #> https://mc-stan.org/misc/warnings.html#bulk-ess @@ -420,8 +415,8 @@

Examples

#> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 1). #> Chain 1: -#> Chain 1: Gradient evaluation took 1.6e-05 seconds -#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.16 seconds. +#> Chain 1: Gradient evaluation took 1.8e-05 seconds +#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.18 seconds. #> Chain 1: Adjust your expectations accordingly! #> Chain 1: #> Chain 1: @@ -438,15 +433,15 @@

Examples

#> Chain 1: Iteration: 1800 / 2000 [ 90%] (Sampling) #> Chain 1: Iteration: 2000 / 2000 [100%] (Sampling) #> Chain 1: -#> Chain 1: Elapsed Time: 0.096085 seconds (Warm-up) -#> Chain 1: 0.096155 seconds (Sampling) -#> Chain 1: 0.19224 seconds (Total) +#> Chain 1: Elapsed Time: 0.123247 seconds (Warm-up) +#> Chain 1: 0.105668 seconds (Sampling) +#> Chain 1: 0.228915 seconds (Total) #> Chain 1: #> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 2). #> Chain 2: -#> Chain 2: Gradient evaluation took 1.2e-05 seconds -#> Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.12 seconds. +#> Chain 2: Gradient evaluation took 2e-05 seconds +#> Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.2 seconds. #> Chain 2: Adjust your expectations accordingly! #> Chain 2: #> Chain 2: @@ -463,15 +458,15 @@

Examples

#> Chain 2: Iteration: 1800 / 2000 [ 90%] (Sampling) #> Chain 2: Iteration: 2000 / 2000 [100%] (Sampling) #> Chain 2: -#> Chain 2: Elapsed Time: 0.095219 seconds (Warm-up) -#> Chain 2: 0.09967 seconds (Sampling) -#> Chain 2: 0.194889 seconds (Total) +#> Chain 2: Elapsed Time: 0.138872 seconds (Warm-up) +#> Chain 2: 0.091018 seconds (Sampling) +#> Chain 2: 0.22989 seconds (Total) #> Chain 2: #> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 3). #> Chain 3: -#> Chain 3: Gradient evaluation took 1.1e-05 seconds -#> Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.11 seconds. +#> Chain 3: Gradient evaluation took 1.4e-05 seconds +#> Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.14 seconds. #> Chain 3: Adjust your expectations accordingly! #> Chain 3: #> Chain 3: @@ -488,15 +483,15 @@

Examples

#> Chain 3: Iteration: 1800 / 2000 [ 90%] (Sampling) #> Chain 3: Iteration: 2000 / 2000 [100%] (Sampling) #> Chain 3: -#> Chain 3: Elapsed Time: 0.104922 seconds (Warm-up) -#> Chain 3: 0.102727 seconds (Sampling) -#> Chain 3: 0.207649 seconds (Total) +#> Chain 3: Elapsed Time: 0.137062 seconds (Warm-up) +#> Chain 3: 0.15276 seconds (Sampling) +#> Chain 3: 0.289822 seconds (Total) #> Chain 3: #> #> SAMPLING FOR MODEL 'bycatch' NOW (CHAIN 4). #> Chain 4: -#> Chain 4: Gradient evaluation took 1.2e-05 seconds -#> Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.12 seconds. +#> Chain 4: Gradient evaluation took 1.4e-05 seconds +#> Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.14 seconds. #> Chain 4: Adjust your expectations accordingly! #> Chain 4: #> Chain 4: @@ -513,13 +508,10 @@

Examples

#> Chain 4: Iteration: 1800 / 2000 [ 90%] (Sampling) #> Chain 4: Iteration: 2000 / 2000 [100%] (Sampling) #> Chain 4: -#> Chain 4: Elapsed Time: 0.109951 seconds (Warm-up) -#> Chain 4: 0.105392 seconds (Sampling) -#> Chain 4: 0.215343 seconds (Total) +#> Chain 4: Elapsed Time: 0.128982 seconds (Warm-up) +#> Chain 4: 0.100159 seconds (Sampling) +#> Chain 4: 0.229141 seconds (Total) #> Chain 4: -#> Warning: Tail Effective Samples Size (ESS) is too low, indicating posterior variances and tail quantiles may be unreliable. -#> Running the chains for more iterations may help. See -#> https://mc-stan.org/misc/warnings.html#tail-ess # } diff --git a/docs/sitemap.xml b/docs/sitemap.xml index d9ebf48..3ab8d8c 100644 --- a/docs/sitemap.xml +++ b/docs/sitemap.xml @@ -15,12 +15,6 @@ /articles/a04_diagnosing_problems.html - - /articles/background.html - - - /articles/expanding.html - /articles/index.html diff --git a/inst/include/meta_header.hpp b/inst/include/stan_meta_header.hpp similarity index 100% rename from inst/include/meta_header.hpp rename to inst/include/stan_meta_header.hpp diff --git a/src/stan_files/bycatch.stan b/inst/stan/bycatch.stan similarity index 100% rename from src/stan_files/bycatch.stan rename to inst/stan/bycatch.stan diff --git a/src/stan_files/chunks/license.stan b/inst/stan/include/license.stan similarity index 100% rename from src/stan_files/chunks/license.stan rename to inst/stan/include/license.stan diff --git a/man/bycatch-package.Rd b/man/bycatch-package.Rd index 11f4497..40518f1 100644 --- a/man/bycatch-package.Rd +++ b/man/bycatch-package.Rd @@ -9,5 +9,5 @@ A DESCRIPTION OF THE PACKAGE } \references{ -Stan Development Team (2018). RStan: the R interface to Stan. R package version 2.18.2. http://mc-stan.org +Stan Development Team (2023). RStan: the R interface to Stan. R package version 2.21.8. https://mc-stan.org } diff --git a/pkgdown/extra.css b/pkgdown/extra.css deleted file mode 100644 index ed01162..0000000 --- a/pkgdown/extra.css +++ /dev/null @@ -1 +0,0 @@ -@import url("https://nmfs-general-modeling-tools.github.io/nmfspalette/extra.css"); diff --git a/src/Makevars b/src/Makevars index ccf5b3a..6670e5e 100644 --- a/src/Makevars +++ b/src/Makevars @@ -1,4 +1,7 @@ +# Generated by rstantools. Do not edit by hand. + STANHEADERS_SRC = $(shell "$(R_HOME)/bin$(R_ARCH_BIN)/Rscript" -e "message()" -e "cat(system.file('include', 'src', package = 'StanHeaders', mustWork = TRUE))" -e "message()" | grep "StanHeaders") +<<<<<<< Updated upstream STANC_FLAGS = $(shell "$(R_HOME)/bin$(R_ARCH_BIN)/Rscript" -e "cat(ifelse(utils::packageVersion('rstan') >= 2.26, '-DUSE_STANC3',''))") PKG_CPPFLAGS = -I"../inst/include" -I"$(STANHEADERS_SRC)" -DBOOST_DISABLE_ASSERTS -DEIGEN_NO_DEBUG -DBOOST_MATH_OVERFLOW_ERROR_POLICY=errno_on_error $(STANC_FLAGS) -D_HAS_AUTO_PTR_ETC=0 @@ -22,3 +25,12 @@ clean: %.cc: %.stan "$(R_HOME)/bin$(R_ARCH_BIN)/Rscript" -e "source(file.path('..', 'tools', 'make_cc.R')); make_cc(commandArgs(TRUE))" $< +======= + +STANC_FLAGS = $(shell "$(R_HOME)/bin$(R_ARCH_BIN)/Rscript" -e "cat(ifelse(utils::packageVersion('rstan') >= 2.26, '-DUSE_STANC3',''))") +PKG_CPPFLAGS = -I"../inst/include" -I"$(STANHEADERS_SRC)" -DBOOST_DISABLE_ASSERTS -DEIGEN_NO_DEBUG -DBOOST_MATH_OVERFLOW_ERROR_POLICY=errno_on_error $(STANC_FLAGS) -D_HAS_AUTO_PTR_ETC=0 +PKG_CXXFLAGS = $(shell "$(R_HOME)/bin$(R_ARCH_BIN)/Rscript" -e "RcppParallel::CxxFlags()") $(shell "$(R_HOME)/bin$(R_ARCH_BIN)/Rscript" -e "StanHeaders:::CxxFlags()") +PKG_LIBS = $(shell "$(R_HOME)/bin$(R_ARCH_BIN)/Rscript" -e "RcppParallel::RcppParallelLibs()") $(shell "$(R_HOME)/bin$(R_ARCH_BIN)/Rscript" -e "StanHeaders:::LdFlags()") + +CXX_STD = CXX17 +>>>>>>> Stashed changes diff --git a/src/Makevars.win b/src/Makevars.win index 41665c6..1470bb9 100644 --- a/src/Makevars.win +++ b/src/Makevars.win @@ -1,9 +1,12 @@ +# Generated by rstantools. Do not edit by hand. + STANHEADERS_SRC = $(shell "$(R_HOME)/bin$(R_ARCH_BIN)/Rscript" -e "message()" -e "cat(system.file('include', 'src', package = 'StanHeaders', mustWork = TRUE))" -e "message()" | grep "StanHeaders") STANC_FLAGS = $(shell "$(R_HOME)/bin$(R_ARCH_BIN)/Rscript" -e "cat(ifelse(utils::packageVersion('rstan') >= 2.26, '-DUSE_STANC3',''))") PKG_CPPFLAGS = -I"../inst/include" -I"$(STANHEADERS_SRC)" -DBOOST_DISABLE_ASSERTS -DEIGEN_NO_DEBUG -DRCPP_PARALLEL_USE_TBB=1 $(STANC_FLAGS) -D_HAS_AUTO_PTR_ETC=0 PKG_CXXFLAGS = $(shell "$(R_HOME)/bin$(R_ARCH_BIN)/Rscript" -e "RcppParallel::CxxFlags()") $(shell "$(R_HOME)/bin$(R_ARCH_BIN)/Rscript" -e "StanHeaders:::CxxFlags()") PKG_LIBS = $(shell "$(R_HOME)/bin$(R_ARCH_BIN)/Rscript" -e "RcppParallel::RcppParallelLibs()") $(shell "$(R_HOME)/bin$(R_ARCH_BIN)/Rscript" -e "StanHeaders:::LdFlags()") +<<<<<<< Updated upstream PKG_CXXFLAGS += -flto=jobserver PKG_LIBS += -Wl,--allow-multiple-definition @@ -29,3 +32,7 @@ clean: .phony: clean +======= + +CXX_STD = CXX17 +>>>>>>> Stashed changes diff --git a/src/RcppExports.cpp b/src/RcppExports.cpp new file mode 100644 index 0000000..42caca9 --- /dev/null +++ b/src/RcppExports.cpp @@ -0,0 +1,25 @@ +// Generated by using Rcpp::compileAttributes() -> do not edit by hand +// Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 + +#include +#include + +using namespace Rcpp; + +#ifdef RCPP_USE_GLOBAL_ROSTREAM +Rcpp::Rostream& Rcpp::Rcout = Rcpp::Rcpp_cout_get(); +Rcpp::Rostream& Rcpp::Rcerr = Rcpp::Rcpp_cerr_get(); +#endif + + +RcppExport SEXP _rcpp_module_boot_stan_fit4bycatch_mod(); + +static const R_CallMethodDef CallEntries[] = { + {"_rcpp_module_boot_stan_fit4bycatch_mod", (DL_FUNC) &_rcpp_module_boot_stan_fit4bycatch_mod, 0}, + {NULL, NULL, 0} +}; + +RcppExport void R_init_bycatch(DllInfo *dll) { + R_registerRoutines(dll, NULL, CallEntries, NULL, NULL); + R_useDynamicSymbols(dll, FALSE); +} diff --git a/src/RcppExports.o b/src/RcppExports.o new file mode 100644 index 0000000..38d1b71 Binary files /dev/null and b/src/RcppExports.o differ diff --git a/src/bycatch.so b/src/bycatch.so new file mode 100755 index 0000000..0018544 Binary files /dev/null and b/src/bycatch.so differ diff --git a/src/init.cpp b/src/init.cpp deleted file mode 100644 index 20caf9a..0000000 --- a/src/init.cpp +++ /dev/null @@ -1,20 +0,0 @@ -// Generated by the rstantools package - - -#include -#include -#include -#include -#include - - -static const R_CallMethodDef CallEntries[] = { - {NULL, NULL, 0} -}; - - -void attribute_visible R_init_bayesdfa(DllInfo *dll) { - // next line is necessary to avoid a NOTE from R CMD check - R_registerRoutines(dll, NULL, CallEntries, NULL, NULL); - R_useDynamicSymbols(dll, TRUE); // necessary for .onLoad() to work -} diff --git a/src/stanExports_bycatch.cc b/src/stanExports_bycatch.cc new file mode 100644 index 0000000..bd76712 --- /dev/null +++ b/src/stanExports_bycatch.cc @@ -0,0 +1,32 @@ +// Generated by rstantools. Do not edit by hand. + +#include +using namespace Rcpp ; +#include "stanExports_bycatch.h" + +RCPP_MODULE(stan_fit4bycatch_mod) { + + + class_ >("rstantools_model_bycatch") + + .constructor() + + + .method("call_sampler", &rstan::stan_fit ::call_sampler) + .method("param_names", &rstan::stan_fit ::param_names) + .method("param_names_oi", &rstan::stan_fit ::param_names_oi) + .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) + .method("param_dims", &rstan::stan_fit ::param_dims) + .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) + .method("update_param_oi", &rstan::stan_fit ::update_param_oi) + .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) + .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) + .method("log_prob", &rstan::stan_fit ::log_prob) + .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) + .method("constrain_pars", &rstan::stan_fit ::constrain_pars) + .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) + .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) + .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) + .method("standalone_gqs", &rstan::stan_fit ::standalone_gqs) + ; +} diff --git a/src/stanExports_bycatch.h b/src/stanExports_bycatch.h new file mode 100644 index 0000000..e5bddee --- /dev/null +++ b/src/stanExports_bycatch.h @@ -0,0 +1,1599 @@ +// Generated by rstantools. Do not edit by hand. + +/* + bycatch is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + bycatch is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with bycatch. If not, see . +*/ +#ifndef MODELS_HPP +#define MODELS_HPP +#define STAN__SERVICES__COMMAND_HPP +#include +// Code generated by Stan version 2.21.0 +#include +namespace model_bycatch_namespace { +using std::istream; +using std::string; +using std::stringstream; +using std::vector; +using stan::io::dump; +using stan::math::lgamma; +using stan::model::prob_grad; +using namespace stan::math; +static int current_statement_begin__; +stan::io::program_reader prog_reader__() { + stan::io::program_reader reader; + reader.add_event(0, 0, "start", "model_bycatch"); + reader.add_event(302, 300, "end", "model_bycatch"); + return reader; +} +#include +class model_bycatch + : public stan::model::model_base_crtp { +private: + int n_row; + vector_d effort; + vector_d new_effort; + std::vector yint; + vector_d yreal; + std::vector time; + int n_year; + int K; + matrix_d x; + int family; + int time_varying; + int est_phi; + int est_theta; + int est_sigma; + int est_cv; + int is_discrete; +public: + model_bycatch(stan::io::var_context& context__, + std::ostream* pstream__ = 0) + : model_base_crtp(0) { + ctor_body(context__, 0, pstream__); + } + model_bycatch(stan::io::var_context& context__, + unsigned int random_seed__, + std::ostream* pstream__ = 0) + : model_base_crtp(0) { + ctor_body(context__, random_seed__, pstream__); + } + void ctor_body(stan::io::var_context& context__, + unsigned int random_seed__, + std::ostream* pstream__) { + typedef double local_scalar_t__; + boost::ecuyer1988 base_rng__ = + stan::services::util::create_rng(random_seed__, 0); + (void) base_rng__; // suppress unused var warning + current_statement_begin__ = -1; + static const char* function__ = "model_bycatch_namespace::model_bycatch"; + (void) function__; // dummy to suppress unused var warning + size_t pos__; + (void) pos__; // dummy to suppress unused var warning + std::vector vals_i__; + std::vector vals_r__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // suppress unused var warning + try { + // initialize data block variables from context__ + current_statement_begin__ = 2; + context__.validate_dims("data initialization", "n_row", "int", context__.to_vec()); + n_row = int(0); + vals_i__ = context__.vals_i("n_row"); + pos__ = 0; + n_row = vals_i__[pos__++]; + check_greater_or_equal(function__, "n_row", n_row, 0); + current_statement_begin__ = 3; + validate_non_negative_index("effort", "n_row", n_row); + context__.validate_dims("data initialization", "effort", "vector_d", context__.to_vec(n_row)); + effort = Eigen::Matrix(n_row); + vals_r__ = context__.vals_r("effort"); + pos__ = 0; + size_t effort_j_1_max__ = n_row; + for (size_t j_1__ = 0; j_1__ < effort_j_1_max__; ++j_1__) { + effort(j_1__) = vals_r__[pos__++]; + } + current_statement_begin__ = 4; + validate_non_negative_index("new_effort", "n_row", n_row); + context__.validate_dims("data initialization", "new_effort", "vector_d", context__.to_vec(n_row)); + new_effort = Eigen::Matrix(n_row); + vals_r__ = context__.vals_r("new_effort"); + pos__ = 0; + size_t new_effort_j_1_max__ = n_row; + for (size_t j_1__ = 0; j_1__ < new_effort_j_1_max__; ++j_1__) { + new_effort(j_1__) = vals_r__[pos__++]; + } + current_statement_begin__ = 5; + validate_non_negative_index("yint", "n_row", n_row); + context__.validate_dims("data initialization", "yint", "int", context__.to_vec(n_row)); + yint = std::vector(n_row, int(0)); + vals_i__ = context__.vals_i("yint"); + pos__ = 0; + size_t yint_k_0_max__ = n_row; + for (size_t k_0__ = 0; k_0__ < yint_k_0_max__; ++k_0__) { + yint[k_0__] = vals_i__[pos__++]; + } + current_statement_begin__ = 6; + validate_non_negative_index("yreal", "n_row", n_row); + context__.validate_dims("data initialization", "yreal", "vector_d", context__.to_vec(n_row)); + yreal = Eigen::Matrix(n_row); + vals_r__ = context__.vals_r("yreal"); + pos__ = 0; + size_t yreal_j_1_max__ = n_row; + for (size_t j_1__ = 0; j_1__ < yreal_j_1_max__; ++j_1__) { + yreal(j_1__) = vals_r__[pos__++]; + } + current_statement_begin__ = 7; + validate_non_negative_index("time", "n_row", n_row); + context__.validate_dims("data initialization", "time", "int", context__.to_vec(n_row)); + time = std::vector(n_row, int(0)); + vals_i__ = context__.vals_i("time"); + pos__ = 0; + size_t time_k_0_max__ = n_row; + for (size_t k_0__ = 0; k_0__ < time_k_0_max__; ++k_0__) { + time[k_0__] = vals_i__[pos__++]; + } + current_statement_begin__ = 8; + context__.validate_dims("data initialization", "n_year", "int", context__.to_vec()); + n_year = int(0); + vals_i__ = context__.vals_i("n_year"); + pos__ = 0; + n_year = vals_i__[pos__++]; + check_greater_or_equal(function__, "n_year", n_year, 0); + current_statement_begin__ = 9; + context__.validate_dims("data initialization", "K", "int", context__.to_vec()); + K = int(0); + vals_i__ = context__.vals_i("K"); + pos__ = 0; + K = vals_i__[pos__++]; + check_greater_or_equal(function__, "K", K, 0); + current_statement_begin__ = 10; + validate_non_negative_index("x", "n_row", n_row); + validate_non_negative_index("x", "K", K); + context__.validate_dims("data initialization", "x", "matrix_d", context__.to_vec(n_row,K)); + x = Eigen::Matrix(n_row, K); + vals_r__ = context__.vals_r("x"); + pos__ = 0; + size_t x_j_2_max__ = K; + size_t x_j_1_max__ = n_row; + for (size_t j_2__ = 0; j_2__ < x_j_2_max__; ++j_2__) { + for (size_t j_1__ = 0; j_1__ < x_j_1_max__; ++j_1__) { + x(j_1__, j_2__) = vals_r__[pos__++]; + } + } + current_statement_begin__ = 11; + context__.validate_dims("data initialization", "family", "int", context__.to_vec()); + family = int(0); + vals_i__ = context__.vals_i("family"); + pos__ = 0; + family = vals_i__[pos__++]; + current_statement_begin__ = 12; + context__.validate_dims("data initialization", "time_varying", "int", context__.to_vec()); + time_varying = int(0); + vals_i__ = context__.vals_i("time_varying"); + pos__ = 0; + time_varying = vals_i__[pos__++]; + // initialize transformed data variables + current_statement_begin__ = 15; + est_phi = int(0); + stan::math::fill(est_phi, std::numeric_limits::min()); + current_statement_begin__ = 16; + est_theta = int(0); + stan::math::fill(est_theta, std::numeric_limits::min()); + current_statement_begin__ = 17; + est_sigma = int(0); + stan::math::fill(est_sigma, std::numeric_limits::min()); + current_statement_begin__ = 18; + est_cv = int(0); + stan::math::fill(est_cv, std::numeric_limits::min()); + current_statement_begin__ = 19; + is_discrete = int(0); + stan::math::fill(is_discrete, std::numeric_limits::min()); + // execute transformed data statements + current_statement_begin__ = 21; + stan::math::assign(est_phi, 0); + current_statement_begin__ = 22; + stan::math::assign(est_theta, 0); + current_statement_begin__ = 23; + stan::math::assign(est_sigma, 0); + current_statement_begin__ = 24; + stan::math::assign(est_cv, 0); + current_statement_begin__ = 25; + stan::math::assign(is_discrete, 0); + current_statement_begin__ = 26; + if (as_bool(logical_eq(family, 2))) { + current_statement_begin__ = 26; + stan::math::assign(est_phi, 1); + } + current_statement_begin__ = 27; + if (as_bool(logical_eq(family, 3))) { + current_statement_begin__ = 27; + stan::math::assign(est_theta, 1); + } + current_statement_begin__ = 28; + if (as_bool(logical_eq(family, 4))) { + current_statement_begin__ = 29; + stan::math::assign(est_phi, 1); + current_statement_begin__ = 30; + stan::math::assign(est_theta, 1); + } + current_statement_begin__ = 32; + if (as_bool(logical_lt(family, 5))) { + current_statement_begin__ = 32; + stan::math::assign(is_discrete, 1); + } + current_statement_begin__ = 34; + if (as_bool(logical_eq(family, 5))) { + current_statement_begin__ = 34; + stan::math::assign(est_sigma, 1); + } + current_statement_begin__ = 35; + if (as_bool(logical_eq(family, 7))) { + current_statement_begin__ = 36; + stan::math::assign(est_sigma, 1); + current_statement_begin__ = 37; + stan::math::assign(est_theta, 1); + } + current_statement_begin__ = 40; + if (as_bool(logical_eq(family, 6))) { + current_statement_begin__ = 40; + stan::math::assign(est_cv, 1); + } + current_statement_begin__ = 41; + if (as_bool(logical_eq(family, 8))) { + current_statement_begin__ = 42; + stan::math::assign(est_cv, 1); + current_statement_begin__ = 43; + stan::math::assign(est_theta, 1); + } + current_statement_begin__ = 46; + if (as_bool(logical_eq(family, 9))) { + current_statement_begin__ = 46; + stan::math::assign(est_sigma, 1); + } + current_statement_begin__ = 47; + if (as_bool(logical_eq(family, 10))) { + current_statement_begin__ = 48; + stan::math::assign(est_sigma, 1); + current_statement_begin__ = 49; + stan::math::assign(est_theta, 1); + } + // validate transformed data + // validate, set parameter ranges + num_params_r__ = 0U; + param_ranges_i__.clear(); + current_statement_begin__ = 53; + validate_non_negative_index("beta", "K", K); + num_params_r__ += K; + current_statement_begin__ = 54; + validate_non_negative_index("est_time_dev", "(time_varying * (n_year - 1))", (time_varying * (n_year - 1))); + num_params_r__ += (time_varying * (n_year - 1)); + current_statement_begin__ = 55; + validate_non_negative_index("sigma_rw", "time_varying", time_varying); + num_params_r__ += (1 * time_varying); + current_statement_begin__ = 56; + validate_non_negative_index("sigma_logn", "est_sigma", est_sigma); + num_params_r__ += (1 * est_sigma); + current_statement_begin__ = 57; + validate_non_negative_index("cv_gamma", "est_cv", est_cv); + num_params_r__ += (1 * est_cv); + current_statement_begin__ = 58; + validate_non_negative_index("nb2_phi", "est_phi", est_phi); + num_params_r__ += (1 * est_phi); + current_statement_begin__ = 59; + validate_non_negative_index("theta", "est_theta", est_theta); + num_params_r__ += (1 * est_theta); + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + } + ~model_bycatch() { } + void transform_inits(const stan::io::var_context& context__, + std::vector& params_i__, + std::vector& params_r__, + std::ostream* pstream__) const { + typedef double local_scalar_t__; + stan::io::writer writer__(params_r__, params_i__); + size_t pos__; + (void) pos__; // dummy call to supress warning + std::vector vals_r__; + std::vector vals_i__; + current_statement_begin__ = 53; + if (!(context__.contains_r("beta"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable beta missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("beta"); + pos__ = 0U; + validate_non_negative_index("beta", "K", K); + context__.validate_dims("parameter initialization", "beta", "vector_d", context__.to_vec(K)); + Eigen::Matrix beta(K); + size_t beta_j_1_max__ = K; + for (size_t j_1__ = 0; j_1__ < beta_j_1_max__; ++j_1__) { + beta(j_1__) = vals_r__[pos__++]; + } + try { + writer__.vector_unconstrain(beta); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable beta: ") + e.what()), current_statement_begin__, prog_reader__()); + } + current_statement_begin__ = 54; + if (!(context__.contains_r("est_time_dev"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable est_time_dev missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("est_time_dev"); + pos__ = 0U; + validate_non_negative_index("est_time_dev", "(time_varying * (n_year - 1))", (time_varying * (n_year - 1))); + context__.validate_dims("parameter initialization", "est_time_dev", "vector_d", context__.to_vec((time_varying * (n_year - 1)))); + Eigen::Matrix est_time_dev((time_varying * (n_year - 1))); + size_t est_time_dev_j_1_max__ = (time_varying * (n_year - 1)); + for (size_t j_1__ = 0; j_1__ < est_time_dev_j_1_max__; ++j_1__) { + est_time_dev(j_1__) = vals_r__[pos__++]; + } + try { + writer__.vector_unconstrain(est_time_dev); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable est_time_dev: ") + e.what()), current_statement_begin__, prog_reader__()); + } + current_statement_begin__ = 55; + if (!(context__.contains_r("sigma_rw"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable sigma_rw missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("sigma_rw"); + pos__ = 0U; + validate_non_negative_index("sigma_rw", "time_varying", time_varying); + context__.validate_dims("parameter initialization", "sigma_rw", "double", context__.to_vec(time_varying)); + std::vector sigma_rw(time_varying, double(0)); + size_t sigma_rw_k_0_max__ = time_varying; + for (size_t k_0__ = 0; k_0__ < sigma_rw_k_0_max__; ++k_0__) { + sigma_rw[k_0__] = vals_r__[pos__++]; + } + size_t sigma_rw_i_0_max__ = time_varying; + for (size_t i_0__ = 0; i_0__ < sigma_rw_i_0_max__; ++i_0__) { + try { + writer__.scalar_lb_unconstrain(0, sigma_rw[i_0__]); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable sigma_rw: ") + e.what()), current_statement_begin__, prog_reader__()); + } + } + current_statement_begin__ = 56; + if (!(context__.contains_r("sigma_logn"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable sigma_logn missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("sigma_logn"); + pos__ = 0U; + validate_non_negative_index("sigma_logn", "est_sigma", est_sigma); + context__.validate_dims("parameter initialization", "sigma_logn", "double", context__.to_vec(est_sigma)); + std::vector sigma_logn(est_sigma, double(0)); + size_t sigma_logn_k_0_max__ = est_sigma; + for (size_t k_0__ = 0; k_0__ < sigma_logn_k_0_max__; ++k_0__) { + sigma_logn[k_0__] = vals_r__[pos__++]; + } + size_t sigma_logn_i_0_max__ = est_sigma; + for (size_t i_0__ = 0; i_0__ < sigma_logn_i_0_max__; ++i_0__) { + try { + writer__.scalar_lb_unconstrain(0, sigma_logn[i_0__]); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable sigma_logn: ") + e.what()), current_statement_begin__, prog_reader__()); + } + } + current_statement_begin__ = 57; + if (!(context__.contains_r("cv_gamma"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable cv_gamma missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("cv_gamma"); + pos__ = 0U; + validate_non_negative_index("cv_gamma", "est_cv", est_cv); + context__.validate_dims("parameter initialization", "cv_gamma", "double", context__.to_vec(est_cv)); + std::vector cv_gamma(est_cv, double(0)); + size_t cv_gamma_k_0_max__ = est_cv; + for (size_t k_0__ = 0; k_0__ < cv_gamma_k_0_max__; ++k_0__) { + cv_gamma[k_0__] = vals_r__[pos__++]; + } + size_t cv_gamma_i_0_max__ = est_cv; + for (size_t i_0__ = 0; i_0__ < cv_gamma_i_0_max__; ++i_0__) { + try { + writer__.scalar_lb_unconstrain(0, cv_gamma[i_0__]); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable cv_gamma: ") + e.what()), current_statement_begin__, prog_reader__()); + } + } + current_statement_begin__ = 58; + if (!(context__.contains_r("nb2_phi"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable nb2_phi missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("nb2_phi"); + pos__ = 0U; + validate_non_negative_index("nb2_phi", "est_phi", est_phi); + context__.validate_dims("parameter initialization", "nb2_phi", "double", context__.to_vec(est_phi)); + std::vector nb2_phi(est_phi, double(0)); + size_t nb2_phi_k_0_max__ = est_phi; + for (size_t k_0__ = 0; k_0__ < nb2_phi_k_0_max__; ++k_0__) { + nb2_phi[k_0__] = vals_r__[pos__++]; + } + size_t nb2_phi_i_0_max__ = est_phi; + for (size_t i_0__ = 0; i_0__ < nb2_phi_i_0_max__; ++i_0__) { + try { + writer__.scalar_lb_unconstrain(0, nb2_phi[i_0__]); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable nb2_phi: ") + e.what()), current_statement_begin__, prog_reader__()); + } + } + current_statement_begin__ = 59; + if (!(context__.contains_r("theta"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable theta missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("theta"); + pos__ = 0U; + validate_non_negative_index("theta", "est_theta", est_theta); + context__.validate_dims("parameter initialization", "theta", "double", context__.to_vec(est_theta)); + std::vector theta(est_theta, double(0)); + size_t theta_k_0_max__ = est_theta; + for (size_t k_0__ = 0; k_0__ < theta_k_0_max__; ++k_0__) { + theta[k_0__] = vals_r__[pos__++]; + } + size_t theta_i_0_max__ = est_theta; + for (size_t i_0__ = 0; i_0__ < theta_i_0_max__; ++i_0__) { + try { + writer__.scalar_lub_unconstrain(0, 1, theta[i_0__]); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable theta: ") + e.what()), current_statement_begin__, prog_reader__()); + } + } + params_r__ = writer__.data_r(); + params_i__ = writer__.data_i(); + } + void transform_inits(const stan::io::var_context& context, + Eigen::Matrix& params_r, + std::ostream* pstream__) const { + std::vector params_r_vec; + std::vector params_i_vec; + transform_inits(context, params_i_vec, params_r_vec, pstream__); + params_r.resize(params_r_vec.size()); + for (int i = 0; i < params_r.size(); ++i) + params_r(i) = params_r_vec[i]; + } + template + T__ log_prob(std::vector& params_r__, + std::vector& params_i__, + std::ostream* pstream__ = 0) const { + typedef T__ local_scalar_t__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // dummy to suppress unused var warning + T__ lp__(0.0); + stan::math::accumulator lp_accum__; + try { + stan::io::reader in__(params_r__, params_i__); + // model parameters + current_statement_begin__ = 53; + Eigen::Matrix beta; + (void) beta; // dummy to suppress unused var warning + if (jacobian__) + beta = in__.vector_constrain(K, lp__); + else + beta = in__.vector_constrain(K); + current_statement_begin__ = 54; + Eigen::Matrix est_time_dev; + (void) est_time_dev; // dummy to suppress unused var warning + if (jacobian__) + est_time_dev = in__.vector_constrain((time_varying * (n_year - 1)), lp__); + else + est_time_dev = in__.vector_constrain((time_varying * (n_year - 1))); + current_statement_begin__ = 55; + std::vector sigma_rw; + size_t sigma_rw_d_0_max__ = time_varying; + sigma_rw.reserve(sigma_rw_d_0_max__); + for (size_t d_0__ = 0; d_0__ < sigma_rw_d_0_max__; ++d_0__) { + if (jacobian__) + sigma_rw.push_back(in__.scalar_lb_constrain(0, lp__)); + else + sigma_rw.push_back(in__.scalar_lb_constrain(0)); + } + current_statement_begin__ = 56; + std::vector sigma_logn; + size_t sigma_logn_d_0_max__ = est_sigma; + sigma_logn.reserve(sigma_logn_d_0_max__); + for (size_t d_0__ = 0; d_0__ < sigma_logn_d_0_max__; ++d_0__) { + if (jacobian__) + sigma_logn.push_back(in__.scalar_lb_constrain(0, lp__)); + else + sigma_logn.push_back(in__.scalar_lb_constrain(0)); + } + current_statement_begin__ = 57; + std::vector cv_gamma; + size_t cv_gamma_d_0_max__ = est_cv; + cv_gamma.reserve(cv_gamma_d_0_max__); + for (size_t d_0__ = 0; d_0__ < cv_gamma_d_0_max__; ++d_0__) { + if (jacobian__) + cv_gamma.push_back(in__.scalar_lb_constrain(0, lp__)); + else + cv_gamma.push_back(in__.scalar_lb_constrain(0)); + } + current_statement_begin__ = 58; + std::vector nb2_phi; + size_t nb2_phi_d_0_max__ = est_phi; + nb2_phi.reserve(nb2_phi_d_0_max__); + for (size_t d_0__ = 0; d_0__ < nb2_phi_d_0_max__; ++d_0__) { + if (jacobian__) + nb2_phi.push_back(in__.scalar_lb_constrain(0, lp__)); + else + nb2_phi.push_back(in__.scalar_lb_constrain(0)); + } + current_statement_begin__ = 59; + std::vector theta; + size_t theta_d_0_max__ = est_theta; + theta.reserve(theta_d_0_max__); + for (size_t d_0__ = 0; d_0__ < theta_d_0_max__; ++d_0__) { + if (jacobian__) + theta.push_back(in__.scalar_lub_constrain(0, 1, lp__)); + else + theta.push_back(in__.scalar_lub_constrain(0, 1)); + } + // transformed parameters + current_statement_begin__ = 62; + validate_non_negative_index("log_lambda", "n_row", n_row); + Eigen::Matrix log_lambda(n_row); + stan::math::initialize(log_lambda, DUMMY_VAR__); + stan::math::fill(log_lambda, DUMMY_VAR__); + current_statement_begin__ = 63; + validate_non_negative_index("lambda", "n_row", n_row); + Eigen::Matrix lambda(n_row); + stan::math::initialize(lambda, DUMMY_VAR__); + stan::math::fill(lambda, DUMMY_VAR__); + current_statement_begin__ = 64; + validate_non_negative_index("pred", "n_row", n_row); + Eigen::Matrix pred(n_row); + stan::math::initialize(pred, DUMMY_VAR__); + stan::math::fill(pred, DUMMY_VAR__); + current_statement_begin__ = 65; + validate_non_negative_index("gammaA", "est_cv", est_cv); + std::vector gammaA(est_cv, local_scalar_t__(0)); + stan::math::initialize(gammaA, DUMMY_VAR__); + stan::math::fill(gammaA, DUMMY_VAR__); + current_statement_begin__ = 66; + validate_non_negative_index("time_dev", "(time_varying * n_year)", (time_varying * n_year)); + Eigen::Matrix time_dev((time_varying * n_year)); + stan::math::initialize(time_dev, DUMMY_VAR__); + stan::math::fill(time_dev, DUMMY_VAR__); + // transformed parameters block statements + current_statement_begin__ = 67; + stan::math::assign(pred, multiply(x, beta)); + current_statement_begin__ = 69; + if (as_bool(logical_eq(time_varying, 1))) { + current_statement_begin__ = 70; + stan::model::assign(time_dev, + stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), + 0, + "assigning variable time_dev"); + current_statement_begin__ = 71; + for (int i = 2; i <= n_year; ++i) { + current_statement_begin__ = 72; + stan::model::assign(time_dev, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + get_base1(est_time_dev, (i - 1), "est_time_dev", 1), + "assigning variable time_dev"); + } + } + current_statement_begin__ = 76; + for (int i = 1; i <= n_row; ++i) { + current_statement_begin__ = 77; + if (as_bool(logical_eq(time_varying, 1))) { + current_statement_begin__ = 78; + stan::model::assign(pred, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + (get_base1(pred, i, "pred", 1) + (time_varying * get_base1(time_dev, get_base1(time, i, "time", 1), "time_dev", 1))), + "assigning variable pred"); + } + current_statement_begin__ = 80; + stan::model::assign(log_lambda, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + (get_base1(pred, i, "pred", 1) + stan::math::log(get_base1(effort, i, "effort", 1))), + "assigning variable log_lambda"); + current_statement_begin__ = 81; + stan::model::assign(lambda, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + stan::math::exp(get_base1(log_lambda, i, "log_lambda", 1)), + "assigning variable lambda"); + } + current_statement_begin__ = 84; + if (as_bool(logical_eq(est_cv, 1))) { + current_statement_begin__ = 84; + stan::model::assign(gammaA, + stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), + inv(pow(get_base1(cv_gamma, 1, "cv_gamma", 1), 2.0)), + "assigning variable gammaA"); + } + // validate transformed parameters + const char* function__ = "validate transformed params"; + (void) function__; // dummy to suppress unused var warning + current_statement_begin__ = 62; + size_t log_lambda_j_1_max__ = n_row; + for (size_t j_1__ = 0; j_1__ < log_lambda_j_1_max__; ++j_1__) { + if (stan::math::is_uninitialized(log_lambda(j_1__))) { + std::stringstream msg__; + msg__ << "Undefined transformed parameter: log_lambda" << "(" << j_1__ << ")"; + stan::lang::rethrow_located(std::runtime_error(std::string("Error initializing variable log_lambda: ") + msg__.str()), current_statement_begin__, prog_reader__()); + } + } + current_statement_begin__ = 63; + size_t lambda_j_1_max__ = n_row; + for (size_t j_1__ = 0; j_1__ < lambda_j_1_max__; ++j_1__) { + if (stan::math::is_uninitialized(lambda(j_1__))) { + std::stringstream msg__; + msg__ << "Undefined transformed parameter: lambda" << "(" << j_1__ << ")"; + stan::lang::rethrow_located(std::runtime_error(std::string("Error initializing variable lambda: ") + msg__.str()), current_statement_begin__, prog_reader__()); + } + } + current_statement_begin__ = 64; + size_t pred_j_1_max__ = n_row; + for (size_t j_1__ = 0; j_1__ < pred_j_1_max__; ++j_1__) { + if (stan::math::is_uninitialized(pred(j_1__))) { + std::stringstream msg__; + msg__ << "Undefined transformed parameter: pred" << "(" << j_1__ << ")"; + stan::lang::rethrow_located(std::runtime_error(std::string("Error initializing variable pred: ") + msg__.str()), current_statement_begin__, prog_reader__()); + } + } + current_statement_begin__ = 65; + size_t gammaA_k_0_max__ = est_cv; + for (size_t k_0__ = 0; k_0__ < gammaA_k_0_max__; ++k_0__) { + if (stan::math::is_uninitialized(gammaA[k_0__])) { + std::stringstream msg__; + msg__ << "Undefined transformed parameter: gammaA" << "[" << k_0__ << "]"; + stan::lang::rethrow_located(std::runtime_error(std::string("Error initializing variable gammaA: ") + msg__.str()), current_statement_begin__, prog_reader__()); + } + } + size_t gammaA_i_0_max__ = est_cv; + for (size_t i_0__ = 0; i_0__ < gammaA_i_0_max__; ++i_0__) { + check_greater_or_equal(function__, "gammaA[i_0__]", gammaA[i_0__], 0); + } + current_statement_begin__ = 66; + size_t time_dev_j_1_max__ = (time_varying * n_year); + for (size_t j_1__ = 0; j_1__ < time_dev_j_1_max__; ++j_1__) { + if (stan::math::is_uninitialized(time_dev(j_1__))) { + std::stringstream msg__; + msg__ << "Undefined transformed parameter: time_dev" << "(" << j_1__ << ")"; + stan::lang::rethrow_located(std::runtime_error(std::string("Error initializing variable time_dev: ") + msg__.str()), current_statement_begin__, prog_reader__()); + } + } + // model body + current_statement_begin__ = 87; + lp_accum__.add(student_t_log(beta, 3, 0, 2)); + current_statement_begin__ = 89; + if (as_bool(logical_eq(time_varying, 1))) { + current_statement_begin__ = 90; + lp_accum__.add(student_t_log(sigma_rw, 3, 0, 1)); + current_statement_begin__ = 91; + lp_accum__.add(student_t_log(get_base1(est_time_dev, 1, "est_time_dev", 1), 3, 0, 2)); + current_statement_begin__ = 92; + for (int i = 2; i <= (n_year - 1); ++i) { + current_statement_begin__ = 94; + lp_accum__.add(normal_log(get_base1(est_time_dev, i, "est_time_dev", 1), get_base1(est_time_dev, (i - 1), "est_time_dev", 1), get_base1(sigma_rw, 1, "sigma_rw", 1))); + } + } + current_statement_begin__ = 98; + if (as_bool(logical_eq(est_theta, 1))) { + current_statement_begin__ = 99; + lp_accum__.add(beta_log(theta, 1, 1)); + } + current_statement_begin__ = 102; + if (as_bool(logical_eq(family, 1))) { + current_statement_begin__ = 103; + lp_accum__.add(poisson_log_log(yint, log_lambda)); + } else if (as_bool(logical_eq(family, 2))) { + current_statement_begin__ = 106; + lp_accum__.add(student_t_log(nb2_phi, 3, 0, 2)); + current_statement_begin__ = 107; + lp_accum__.add(neg_binomial_2_log_log(yint, log_lambda, get_base1(nb2_phi, 1, "nb2_phi", 1))); + } else if (as_bool(logical_eq(family, 3))) { + current_statement_begin__ = 110; + for (int i = 1; i <= n_row; ++i) { + current_statement_begin__ = 113; + if (as_bool(logical_eq(get_base1(yint, i, "yint", 1), 0))) { + current_statement_begin__ = 114; + lp_accum__.add(bernoulli_log(1, theta)); + } else { + current_statement_begin__ = 116; + lp_accum__.add(bernoulli_log(0, theta)); + current_statement_begin__ = 117; + lp_accum__.add(poisson_log(get_base1(yint, i, "yint", 1), get_base1(lambda, i, "lambda", 1))); + if (get_base1(yint, i, "yint", 1) < 1) lp_accum__.add(-std::numeric_limits::infinity()); + else lp_accum__.add(-log_sum_exp(poisson_ccdf_log(1, get_base1(lambda, i, "lambda", 1)), poisson_log(1, get_base1(lambda, i, "lambda", 1)))); + } + } + } else if (as_bool(logical_eq(family, 4))) { + current_statement_begin__ = 122; + for (int i = 1; i <= n_row; ++i) { + current_statement_begin__ = 123; + if (as_bool(logical_eq(get_base1(yint, i, "yint", 1), 0))) { + current_statement_begin__ = 124; + lp_accum__.add(bernoulli_log(1, theta)); + } else { + current_statement_begin__ = 126; + lp_accum__.add(bernoulli_log(0, theta)); + current_statement_begin__ = 127; + lp_accum__.add(neg_binomial_2_log(get_base1(yint, i, "yint", 1), get_base1(lambda, i, "lambda", 1), get_base1(nb2_phi, 1, "nb2_phi", 1))); + if (get_base1(yint, i, "yint", 1) < 1) lp_accum__.add(-std::numeric_limits::infinity()); + else lp_accum__.add(-log_sum_exp(neg_binomial_2_ccdf_log(1, get_base1(lambda, i, "lambda", 1), get_base1(nb2_phi, 1, "nb2_phi", 1)), neg_binomial_2_log(1, get_base1(lambda, i, "lambda", 1), get_base1(nb2_phi, 1, "nb2_phi", 1)))); + } + } + } else if (as_bool(logical_eq(family, 5))) { + current_statement_begin__ = 132; + lp_accum__.add(student_t_log(sigma_logn, 3, 0, 2)); + current_statement_begin__ = 133; + lp_accum__.add(lognormal_log(yreal, log_lambda, get_base1(sigma_logn, 1, "sigma_logn", 1))); + } else if (as_bool(logical_eq(family, 6))) { + current_statement_begin__ = 136; + lp_accum__.add(student_t_log(get_base1(cv_gamma, 1, "cv_gamma", 1), 3, 0, 2)); + current_statement_begin__ = 137; + lp_accum__.add(gamma_log(yreal, get_base1(gammaA, 1, "gammaA", 1), elt_divide(get_base1(gammaA, 1, "gammaA", 1), lambda))); + } else if (as_bool(logical_eq(family, 7))) { + current_statement_begin__ = 140; + lp_accum__.add(student_t_log(sigma_logn, 3, 0, 2)); + current_statement_begin__ = 141; + for (int i = 1; i <= n_row; ++i) { + current_statement_begin__ = 142; + if (as_bool(logical_eq(get_base1(yint, i, "yint", 1), 0))) { + current_statement_begin__ = 143; + lp_accum__.add(bernoulli_log(1, theta)); + } else { + current_statement_begin__ = 145; + lp_accum__.add(bernoulli_log(0, theta)); + current_statement_begin__ = 146; + lp_accum__.add(lognormal_log(get_base1(yreal, i, "yreal", 1), get_base1(log_lambda, i, "log_lambda", 1), get_base1(sigma_logn, 1, "sigma_logn", 1))); + } + } + } else if (as_bool(logical_eq(family, 8))) { + current_statement_begin__ = 151; + lp_accum__.add(student_t_log(get_base1(cv_gamma, 1, "cv_gamma", 1), 3, 0, 2)); + current_statement_begin__ = 152; + for (int i = 1; i <= n_row; ++i) { + current_statement_begin__ = 153; + if (as_bool(logical_eq(get_base1(yint, i, "yint", 1), 0))) { + current_statement_begin__ = 154; + lp_accum__.add(bernoulli_log(1, theta)); + } else { + current_statement_begin__ = 156; + lp_accum__.add(bernoulli_log(0, theta)); + current_statement_begin__ = 157; + lp_accum__.add(gamma_log(get_base1(yreal, i, "yreal", 1), get_base1(gammaA, 1, "gammaA", 1), (get_base1(gammaA, 1, "gammaA", 1) / get_base1(lambda, i, "lambda", 1)))); + } + } + } else if (as_bool(logical_eq(family, 9))) { + current_statement_begin__ = 162; + lp_accum__.add(student_t_log(sigma_logn, 3, 0, 2)); + current_statement_begin__ = 163; + lp_accum__.add(normal_log(yreal, lambda, get_base1(sigma_logn, 1, "sigma_logn", 1))); + } else if (as_bool(logical_eq(family, 10))) { + current_statement_begin__ = 166; + lp_accum__.add(student_t_log(sigma_logn, 3, 0, 2)); + current_statement_begin__ = 167; + for (int i = 1; i <= n_row; ++i) { + current_statement_begin__ = 168; + if (as_bool(logical_eq(get_base1(yint, i, "yint", 1), 0))) { + current_statement_begin__ = 169; + lp_accum__.add(bernoulli_log(1, theta)); + } else { + current_statement_begin__ = 171; + lp_accum__.add(bernoulli_log(0, theta)); + current_statement_begin__ = 172; + lp_accum__.add(normal_log(get_base1(yreal, i, "yreal", 1), get_base1(lambda, i, "lambda", 1), get_base1(sigma_logn, 1, "sigma_logn", 1))); + } + } + } + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + lp_accum__.add(lp__); + return lp_accum__.sum(); + } // log_prob() + template + T_ log_prob(Eigen::Matrix& params_r, + std::ostream* pstream = 0) const { + std::vector vec_params_r; + vec_params_r.reserve(params_r.size()); + for (int i = 0; i < params_r.size(); ++i) + vec_params_r.push_back(params_r(i)); + std::vector vec_params_i; + return log_prob(vec_params_r, vec_params_i, pstream); + } + void get_param_names(std::vector& names__) const { + names__.resize(0); + names__.push_back("beta"); + names__.push_back("est_time_dev"); + names__.push_back("sigma_rw"); + names__.push_back("sigma_logn"); + names__.push_back("cv_gamma"); + names__.push_back("nb2_phi"); + names__.push_back("theta"); + names__.push_back("log_lambda"); + names__.push_back("lambda"); + names__.push_back("pred"); + names__.push_back("gammaA"); + names__.push_back("time_dev"); + names__.push_back("log_lik"); + names__.push_back("y_new"); + names__.push_back("y_new_real"); + } + void get_dims(std::vector >& dimss__) const { + dimss__.resize(0); + std::vector dims__; + dims__.resize(0); + dims__.push_back(K); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back((time_varying * (n_year - 1))); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(time_varying); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(est_sigma); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(est_cv); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(est_phi); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(est_theta); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(n_row); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(n_row); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(n_row); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(est_cv); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back((time_varying * n_year)); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(n_row); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back((n_row * is_discrete)); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back((n_row * (1 - is_discrete))); + dimss__.push_back(dims__); + } + template + void write_array(RNG& base_rng__, + std::vector& params_r__, + std::vector& params_i__, + std::vector& vars__, + bool include_tparams__ = true, + bool include_gqs__ = true, + std::ostream* pstream__ = 0) const { + typedef double local_scalar_t__; + vars__.resize(0); + stan::io::reader in__(params_r__, params_i__); + static const char* function__ = "model_bycatch_namespace::write_array"; + (void) function__; // dummy to suppress unused var warning + // read-transform, write parameters + Eigen::Matrix beta = in__.vector_constrain(K); + size_t beta_j_1_max__ = K; + for (size_t j_1__ = 0; j_1__ < beta_j_1_max__; ++j_1__) { + vars__.push_back(beta(j_1__)); + } + Eigen::Matrix est_time_dev = in__.vector_constrain((time_varying * (n_year - 1))); + size_t est_time_dev_j_1_max__ = (time_varying * (n_year - 1)); + for (size_t j_1__ = 0; j_1__ < est_time_dev_j_1_max__; ++j_1__) { + vars__.push_back(est_time_dev(j_1__)); + } + std::vector sigma_rw; + size_t sigma_rw_d_0_max__ = time_varying; + sigma_rw.reserve(sigma_rw_d_0_max__); + for (size_t d_0__ = 0; d_0__ < sigma_rw_d_0_max__; ++d_0__) { + sigma_rw.push_back(in__.scalar_lb_constrain(0)); + } + size_t sigma_rw_k_0_max__ = time_varying; + for (size_t k_0__ = 0; k_0__ < sigma_rw_k_0_max__; ++k_0__) { + vars__.push_back(sigma_rw[k_0__]); + } + std::vector sigma_logn; + size_t sigma_logn_d_0_max__ = est_sigma; + sigma_logn.reserve(sigma_logn_d_0_max__); + for (size_t d_0__ = 0; d_0__ < sigma_logn_d_0_max__; ++d_0__) { + sigma_logn.push_back(in__.scalar_lb_constrain(0)); + } + size_t sigma_logn_k_0_max__ = est_sigma; + for (size_t k_0__ = 0; k_0__ < sigma_logn_k_0_max__; ++k_0__) { + vars__.push_back(sigma_logn[k_0__]); + } + std::vector cv_gamma; + size_t cv_gamma_d_0_max__ = est_cv; + cv_gamma.reserve(cv_gamma_d_0_max__); + for (size_t d_0__ = 0; d_0__ < cv_gamma_d_0_max__; ++d_0__) { + cv_gamma.push_back(in__.scalar_lb_constrain(0)); + } + size_t cv_gamma_k_0_max__ = est_cv; + for (size_t k_0__ = 0; k_0__ < cv_gamma_k_0_max__; ++k_0__) { + vars__.push_back(cv_gamma[k_0__]); + } + std::vector nb2_phi; + size_t nb2_phi_d_0_max__ = est_phi; + nb2_phi.reserve(nb2_phi_d_0_max__); + for (size_t d_0__ = 0; d_0__ < nb2_phi_d_0_max__; ++d_0__) { + nb2_phi.push_back(in__.scalar_lb_constrain(0)); + } + size_t nb2_phi_k_0_max__ = est_phi; + for (size_t k_0__ = 0; k_0__ < nb2_phi_k_0_max__; ++k_0__) { + vars__.push_back(nb2_phi[k_0__]); + } + std::vector theta; + size_t theta_d_0_max__ = est_theta; + theta.reserve(theta_d_0_max__); + for (size_t d_0__ = 0; d_0__ < theta_d_0_max__; ++d_0__) { + theta.push_back(in__.scalar_lub_constrain(0, 1)); + } + size_t theta_k_0_max__ = est_theta; + for (size_t k_0__ = 0; k_0__ < theta_k_0_max__; ++k_0__) { + vars__.push_back(theta[k_0__]); + } + double lp__ = 0.0; + (void) lp__; // dummy to suppress unused var warning + stan::math::accumulator lp_accum__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // suppress unused var warning + if (!include_tparams__ && !include_gqs__) return; + try { + // declare and define transformed parameters + current_statement_begin__ = 62; + validate_non_negative_index("log_lambda", "n_row", n_row); + Eigen::Matrix log_lambda(n_row); + stan::math::initialize(log_lambda, DUMMY_VAR__); + stan::math::fill(log_lambda, DUMMY_VAR__); + current_statement_begin__ = 63; + validate_non_negative_index("lambda", "n_row", n_row); + Eigen::Matrix lambda(n_row); + stan::math::initialize(lambda, DUMMY_VAR__); + stan::math::fill(lambda, DUMMY_VAR__); + current_statement_begin__ = 64; + validate_non_negative_index("pred", "n_row", n_row); + Eigen::Matrix pred(n_row); + stan::math::initialize(pred, DUMMY_VAR__); + stan::math::fill(pred, DUMMY_VAR__); + current_statement_begin__ = 65; + validate_non_negative_index("gammaA", "est_cv", est_cv); + std::vector gammaA(est_cv, double(0)); + stan::math::initialize(gammaA, DUMMY_VAR__); + stan::math::fill(gammaA, DUMMY_VAR__); + current_statement_begin__ = 66; + validate_non_negative_index("time_dev", "(time_varying * n_year)", (time_varying * n_year)); + Eigen::Matrix time_dev((time_varying * n_year)); + stan::math::initialize(time_dev, DUMMY_VAR__); + stan::math::fill(time_dev, DUMMY_VAR__); + // do transformed parameters statements + current_statement_begin__ = 67; + stan::math::assign(pred, multiply(x, beta)); + current_statement_begin__ = 69; + if (as_bool(logical_eq(time_varying, 1))) { + current_statement_begin__ = 70; + stan::model::assign(time_dev, + stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), + 0, + "assigning variable time_dev"); + current_statement_begin__ = 71; + for (int i = 2; i <= n_year; ++i) { + current_statement_begin__ = 72; + stan::model::assign(time_dev, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + get_base1(est_time_dev, (i - 1), "est_time_dev", 1), + "assigning variable time_dev"); + } + } + current_statement_begin__ = 76; + for (int i = 1; i <= n_row; ++i) { + current_statement_begin__ = 77; + if (as_bool(logical_eq(time_varying, 1))) { + current_statement_begin__ = 78; + stan::model::assign(pred, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + (get_base1(pred, i, "pred", 1) + (time_varying * get_base1(time_dev, get_base1(time, i, "time", 1), "time_dev", 1))), + "assigning variable pred"); + } + current_statement_begin__ = 80; + stan::model::assign(log_lambda, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + (get_base1(pred, i, "pred", 1) + stan::math::log(get_base1(effort, i, "effort", 1))), + "assigning variable log_lambda"); + current_statement_begin__ = 81; + stan::model::assign(lambda, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + stan::math::exp(get_base1(log_lambda, i, "log_lambda", 1)), + "assigning variable lambda"); + } + current_statement_begin__ = 84; + if (as_bool(logical_eq(est_cv, 1))) { + current_statement_begin__ = 84; + stan::model::assign(gammaA, + stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), + inv(pow(get_base1(cv_gamma, 1, "cv_gamma", 1), 2.0)), + "assigning variable gammaA"); + } + if (!include_gqs__ && !include_tparams__) return; + // validate transformed parameters + const char* function__ = "validate transformed params"; + (void) function__; // dummy to suppress unused var warning + current_statement_begin__ = 65; + size_t gammaA_i_0_max__ = est_cv; + for (size_t i_0__ = 0; i_0__ < gammaA_i_0_max__; ++i_0__) { + check_greater_or_equal(function__, "gammaA[i_0__]", gammaA[i_0__], 0); + } + // write transformed parameters + if (include_tparams__) { + size_t log_lambda_j_1_max__ = n_row; + for (size_t j_1__ = 0; j_1__ < log_lambda_j_1_max__; ++j_1__) { + vars__.push_back(log_lambda(j_1__)); + } + size_t lambda_j_1_max__ = n_row; + for (size_t j_1__ = 0; j_1__ < lambda_j_1_max__; ++j_1__) { + vars__.push_back(lambda(j_1__)); + } + size_t pred_j_1_max__ = n_row; + for (size_t j_1__ = 0; j_1__ < pred_j_1_max__; ++j_1__) { + vars__.push_back(pred(j_1__)); + } + size_t gammaA_k_0_max__ = est_cv; + for (size_t k_0__ = 0; k_0__ < gammaA_k_0_max__; ++k_0__) { + vars__.push_back(gammaA[k_0__]); + } + size_t time_dev_j_1_max__ = (time_varying * n_year); + for (size_t j_1__ = 0; j_1__ < time_dev_j_1_max__; ++j_1__) { + vars__.push_back(time_dev(j_1__)); + } + } + if (!include_gqs__) return; + // declare and define generated quantities + current_statement_begin__ = 179; + validate_non_negative_index("log_lik", "n_row", n_row); + Eigen::Matrix log_lik(n_row); + stan::math::initialize(log_lik, DUMMY_VAR__); + stan::math::fill(log_lik, DUMMY_VAR__); + current_statement_begin__ = 180; + validate_non_negative_index("y_new", "(n_row * is_discrete)", (n_row * is_discrete)); + std::vector y_new((n_row * is_discrete), int(0)); + stan::math::fill(y_new, std::numeric_limits::min()); + current_statement_begin__ = 181; + validate_non_negative_index("y_new_real", "(n_row * (1 - is_discrete))", (n_row * (1 - is_discrete))); + Eigen::Matrix y_new_real((n_row * (1 - is_discrete))); + stan::math::initialize(y_new_real, DUMMY_VAR__); + stan::math::fill(y_new_real, DUMMY_VAR__); + // generated quantities statements + current_statement_begin__ = 183; + if (as_bool(logical_eq(family, 1))) { + current_statement_begin__ = 184; + for (int n = 1; n <= n_row; ++n) { + current_statement_begin__ = 185; + stan::model::assign(log_lik, + stan::model::cons_list(stan::model::index_uni(n), stan::model::nil_index_list()), + poisson_log_log(get_base1(yint, n, "yint", 1), get_base1(log_lambda, n, "log_lambda", 1)), + "assigning variable log_lik"); + current_statement_begin__ = 188; + stan::model::assign(y_new, + stan::model::cons_list(stan::model::index_uni(n), stan::model::nil_index_list()), + 0, + "assigning variable y_new"); + current_statement_begin__ = 189; + if (as_bool(logical_gt(get_base1(new_effort, n, "new_effort", 1), 0))) { + current_statement_begin__ = 189; + stan::model::assign(y_new, + stan::model::cons_list(stan::model::index_uni(n), stan::model::nil_index_list()), + poisson_log_rng((get_base1(pred, n, "pred", 1) + stan::math::log(get_base1(new_effort, n, "new_effort", 1))), base_rng__), + "assigning variable y_new"); + } + } + } else if (as_bool(logical_eq(family, 2))) { + current_statement_begin__ = 193; + for (int n = 1; n <= n_row; ++n) { + current_statement_begin__ = 194; + stan::model::assign(log_lik, + stan::model::cons_list(stan::model::index_uni(n), stan::model::nil_index_list()), + neg_binomial_2_log_log(get_base1(yint, n, "yint", 1), get_base1(log_lambda, n, "log_lambda", 1), get_base1(nb2_phi, 1, "nb2_phi", 1)), + "assigning variable log_lik"); + current_statement_begin__ = 197; + stan::model::assign(y_new, + stan::model::cons_list(stan::model::index_uni(n), stan::model::nil_index_list()), + 0, + "assigning variable y_new"); + current_statement_begin__ = 198; + if (as_bool(logical_gt(get_base1(new_effort, n, "new_effort", 1), 0))) { + current_statement_begin__ = 198; + stan::model::assign(y_new, + stan::model::cons_list(stan::model::index_uni(n), stan::model::nil_index_list()), + neg_binomial_2_log_rng((get_base1(pred, n, "pred", 1) + stan::math::log(get_base1(new_effort, n, "new_effort", 1))), get_base1(nb2_phi, 1, "nb2_phi", 1), base_rng__), + "assigning variable y_new"); + } + } + } else if (as_bool(logical_eq(family, 3))) { + current_statement_begin__ = 202; + for (int n = 1; n <= n_row; ++n) { + current_statement_begin__ = 203; + if (as_bool(logical_eq(get_base1(yint, n, "yint", 1), 0))) { + current_statement_begin__ = 205; + stan::model::assign(log_lik, + stan::model::cons_list(stan::model::index_uni(n), stan::model::nil_index_list()), + stan::math::log(get_base1(theta, 1, "theta", 1)), + "assigning variable log_lik"); + } else { + current_statement_begin__ = 208; + stan::model::assign(log_lik, + stan::model::cons_list(stan::model::index_uni(n), stan::model::nil_index_list()), + ((log1m(get_base1(theta, 1, "theta", 1)) + poisson_log_log(get_base1(yint, n, "yint", 1), get_base1(log_lambda, n, "log_lambda", 1))) - poisson_ccdf_log(0, get_base1(lambda, n, "lambda", 1))), + "assigning variable log_lik"); + } + current_statement_begin__ = 212; + stan::model::assign(y_new, + stan::model::cons_list(stan::model::index_uni(n), stan::model::nil_index_list()), + 0, + "assigning variable y_new"); + current_statement_begin__ = 213; + if (as_bool(logical_gt(get_base1(new_effort, n, "new_effort", 1), 0))) { + current_statement_begin__ = 213; + stan::model::assign(y_new, + stan::model::cons_list(stan::model::index_uni(n), stan::model::nil_index_list()), + ((1 - bernoulli_rng(get_base1(theta, 1, "theta", 1), base_rng__)) * poisson_log_rng((get_base1(pred, n, "pred", 1) + stan::math::log(get_base1(new_effort, n, "new_effort", 1))), base_rng__)), + "assigning variable y_new"); + } + } + } else if (as_bool(logical_eq(family, 4))) { + current_statement_begin__ = 217; + for (int n = 1; n <= n_row; ++n) { + current_statement_begin__ = 218; + if (as_bool(logical_eq(get_base1(yint, n, "yint", 1), 0))) { + current_statement_begin__ = 220; + stan::model::assign(log_lik, + stan::model::cons_list(stan::model::index_uni(n), stan::model::nil_index_list()), + stan::math::log(get_base1(theta, 1, "theta", 1)), + "assigning variable log_lik"); + } else { + current_statement_begin__ = 223; + stan::model::assign(log_lik, + stan::model::cons_list(stan::model::index_uni(n), stan::model::nil_index_list()), + ((log1m(get_base1(theta, 1, "theta", 1)) + neg_binomial_2_log_log(get_base1(yint, n, "yint", 1), get_base1(log_lambda, n, "log_lambda", 1), get_base1(nb2_phi, 1, "nb2_phi", 1))) - neg_binomial_2_ccdf_log(0, get_base1(lambda, n, "lambda", 1), get_base1(nb2_phi, 1, "nb2_phi", 1))), + "assigning variable log_lik"); + } + current_statement_begin__ = 227; + stan::model::assign(y_new, + stan::model::cons_list(stan::model::index_uni(n), stan::model::nil_index_list()), + 0, + "assigning variable y_new"); + current_statement_begin__ = 228; + if (as_bool(logical_gt(get_base1(new_effort, n, "new_effort", 1), 0))) { + current_statement_begin__ = 228; + stan::model::assign(y_new, + stan::model::cons_list(stan::model::index_uni(n), stan::model::nil_index_list()), + ((1 - bernoulli_rng(get_base1(theta, 1, "theta", 1), base_rng__)) * neg_binomial_2_log_rng((get_base1(pred, n, "pred", 1) + stan::math::log(get_base1(new_effort, n, "new_effort", 1))), get_base1(nb2_phi, 1, "nb2_phi", 1), base_rng__)), + "assigning variable y_new"); + } + } + } else if (as_bool(logical_eq(family, 5))) { + current_statement_begin__ = 232; + for (int n = 1; n <= n_row; ++n) { + current_statement_begin__ = 233; + stan::model::assign(log_lik, + stan::model::cons_list(stan::model::index_uni(n), stan::model::nil_index_list()), + lognormal_log(get_base1(yreal, n, "yreal", 1), get_base1(log_lambda, n, "log_lambda", 1), get_base1(sigma_logn, 1, "sigma_logn", 1)), + "assigning variable log_lik"); + current_statement_begin__ = 236; + stan::model::assign(y_new_real, + stan::model::cons_list(stan::model::index_uni(n), stan::model::nil_index_list()), + 0, + "assigning variable y_new_real"); + current_statement_begin__ = 237; + if (as_bool(logical_gt(get_base1(new_effort, n, "new_effort", 1), 0))) { + current_statement_begin__ = 237; + stan::model::assign(y_new_real, + stan::model::cons_list(stan::model::index_uni(n), stan::model::nil_index_list()), + lognormal_rng((get_base1(pred, n, "pred", 1) + stan::math::log(get_base1(new_effort, n, "new_effort", 1))), get_base1(sigma_logn, 1, "sigma_logn", 1), base_rng__), + "assigning variable y_new_real"); + } + } + } else if (as_bool(logical_eq(family, 6))) { + current_statement_begin__ = 241; + for (int n = 1; n <= n_row; ++n) { + current_statement_begin__ = 242; + stan::model::assign(log_lik, + stan::model::cons_list(stan::model::index_uni(n), stan::model::nil_index_list()), + gamma_log(get_base1(yreal, n, "yreal", 1), get_base1(gammaA, 1, "gammaA", 1), (get_base1(gammaA, 1, "gammaA", 1) / get_base1(lambda, n, "lambda", 1))), + "assigning variable log_lik"); + current_statement_begin__ = 245; + stan::model::assign(y_new_real, + stan::model::cons_list(stan::model::index_uni(n), stan::model::nil_index_list()), + 0, + "assigning variable y_new_real"); + current_statement_begin__ = 246; + if (as_bool(logical_gt(get_base1(new_effort, n, "new_effort", 1), 0))) { + current_statement_begin__ = 246; + stan::model::assign(y_new_real, + stan::model::cons_list(stan::model::index_uni(n), stan::model::nil_index_list()), + gamma_rng(get_base1(gammaA, 1, "gammaA", 1), (get_base1(gammaA, 1, "gammaA", 1) / stan::math::exp((get_base1(pred, n, "pred", 1) + stan::math::log(get_base1(new_effort, n, "new_effort", 1))))), base_rng__), + "assigning variable y_new_real"); + } + } + } else if (as_bool(logical_eq(family, 7))) { + current_statement_begin__ = 250; + for (int n = 1; n <= n_row; ++n) { + current_statement_begin__ = 251; + if (as_bool(logical_eq(get_base1(yint, n, "yint", 1), 0))) { + current_statement_begin__ = 253; + stan::model::assign(log_lik, + stan::model::cons_list(stan::model::index_uni(n), stan::model::nil_index_list()), + stan::math::log(get_base1(theta, 1, "theta", 1)), + "assigning variable log_lik"); + } else { + current_statement_begin__ = 255; + stan::model::assign(log_lik, + stan::model::cons_list(stan::model::index_uni(n), stan::model::nil_index_list()), + lognormal_log(get_base1(yreal, n, "yreal", 1), get_base1(log_lambda, n, "log_lambda", 1), get_base1(sigma_logn, 1, "sigma_logn", 1)), + "assigning variable log_lik"); + } + current_statement_begin__ = 259; + stan::model::assign(y_new_real, + stan::model::cons_list(stan::model::index_uni(n), stan::model::nil_index_list()), + 0, + "assigning variable y_new_real"); + current_statement_begin__ = 260; + if (as_bool(logical_gt(get_base1(new_effort, n, "new_effort", 1), 0))) { + current_statement_begin__ = 260; + stan::model::assign(y_new_real, + stan::model::cons_list(stan::model::index_uni(n), stan::model::nil_index_list()), + ((1 - bernoulli_rng(get_base1(theta, 1, "theta", 1), base_rng__)) * lognormal_rng((get_base1(pred, n, "pred", 1) + stan::math::log(get_base1(new_effort, n, "new_effort", 1))), get_base1(sigma_logn, 1, "sigma_logn", 1), base_rng__)), + "assigning variable y_new_real"); + } + } + } else if (as_bool(logical_eq(family, 8))) { + current_statement_begin__ = 264; + for (int n = 1; n <= n_row; ++n) { + current_statement_begin__ = 265; + if (as_bool(logical_eq(get_base1(yint, n, "yint", 1), 0))) { + current_statement_begin__ = 267; + stan::model::assign(log_lik, + stan::model::cons_list(stan::model::index_uni(n), stan::model::nil_index_list()), + stan::math::log(get_base1(theta, 1, "theta", 1)), + "assigning variable log_lik"); + } else { + current_statement_begin__ = 269; + stan::model::assign(log_lik, + stan::model::cons_list(stan::model::index_uni(n), stan::model::nil_index_list()), + gamma_log(get_base1(yreal, n, "yreal", 1), get_base1(gammaA, 1, "gammaA", 1), (get_base1(gammaA, 1, "gammaA", 1) / get_base1(lambda, n, "lambda", 1))), + "assigning variable log_lik"); + } + current_statement_begin__ = 273; + stan::model::assign(y_new_real, + stan::model::cons_list(stan::model::index_uni(n), stan::model::nil_index_list()), + 0, + "assigning variable y_new_real"); + current_statement_begin__ = 274; + if (as_bool(logical_gt(get_base1(new_effort, n, "new_effort", 1), 0))) { + current_statement_begin__ = 274; + stan::model::assign(y_new_real, + stan::model::cons_list(stan::model::index_uni(n), stan::model::nil_index_list()), + ((1 - bernoulli_rng(get_base1(theta, 1, "theta", 1), base_rng__)) * gamma_rng(get_base1(gammaA, 1, "gammaA", 1), (get_base1(gammaA, 1, "gammaA", 1) / stan::math::exp((get_base1(pred, n, "pred", 1) + stan::math::log(get_base1(new_effort, n, "new_effort", 1))))), base_rng__)), + "assigning variable y_new_real"); + } + } + } else if (as_bool(logical_eq(family, 9))) { + current_statement_begin__ = 278; + for (int n = 1; n <= n_row; ++n) { + current_statement_begin__ = 279; + stan::model::assign(log_lik, + stan::model::cons_list(stan::model::index_uni(n), stan::model::nil_index_list()), + normal_log(get_base1(yreal, n, "yreal", 1), get_base1(lambda, n, "lambda", 1), get_base1(sigma_logn, 1, "sigma_logn", 1)), + "assigning variable log_lik"); + current_statement_begin__ = 282; + stan::model::assign(y_new_real, + stan::model::cons_list(stan::model::index_uni(n), stan::model::nil_index_list()), + 0, + "assigning variable y_new_real"); + current_statement_begin__ = 283; + if (as_bool(logical_gt(get_base1(new_effort, n, "new_effort", 1), 0))) { + current_statement_begin__ = 283; + stan::model::assign(y_new_real, + stan::model::cons_list(stan::model::index_uni(n), stan::model::nil_index_list()), + normal_rng(stan::math::exp((get_base1(pred, n, "pred", 1) + stan::math::log(get_base1(new_effort, n, "new_effort", 1)))), get_base1(sigma_logn, 1, "sigma_logn", 1), base_rng__), + "assigning variable y_new_real"); + } + } + } else if (as_bool(logical_eq(family, 10))) { + current_statement_begin__ = 287; + for (int n = 1; n <= n_row; ++n) { + current_statement_begin__ = 288; + if (as_bool(logical_eq(get_base1(yint, n, "yint", 1), 0))) { + current_statement_begin__ = 290; + stan::model::assign(log_lik, + stan::model::cons_list(stan::model::index_uni(n), stan::model::nil_index_list()), + stan::math::log(get_base1(theta, 1, "theta", 1)), + "assigning variable log_lik"); + } else { + current_statement_begin__ = 292; + stan::model::assign(log_lik, + stan::model::cons_list(stan::model::index_uni(n), stan::model::nil_index_list()), + normal_log(get_base1(yreal, n, "yreal", 1), get_base1(lambda, n, "lambda", 1), get_base1(sigma_logn, 1, "sigma_logn", 1)), + "assigning variable log_lik"); + } + current_statement_begin__ = 296; + stan::model::assign(y_new_real, + stan::model::cons_list(stan::model::index_uni(n), stan::model::nil_index_list()), + 0, + "assigning variable y_new_real"); + current_statement_begin__ = 297; + if (as_bool(logical_gt(get_base1(new_effort, n, "new_effort", 1), 0))) { + current_statement_begin__ = 297; + stan::model::assign(y_new_real, + stan::model::cons_list(stan::model::index_uni(n), stan::model::nil_index_list()), + ((1 - bernoulli_rng(get_base1(theta, 1, "theta", 1), base_rng__)) * normal_rng(stan::math::exp((get_base1(pred, n, "pred", 1) + stan::math::log(get_base1(new_effort, n, "new_effort", 1)))), get_base1(sigma_logn, 1, "sigma_logn", 1), base_rng__)), + "assigning variable y_new_real"); + } + } + } + // validate, write generated quantities + current_statement_begin__ = 179; + size_t log_lik_j_1_max__ = n_row; + for (size_t j_1__ = 0; j_1__ < log_lik_j_1_max__; ++j_1__) { + vars__.push_back(log_lik(j_1__)); + } + current_statement_begin__ = 180; + size_t y_new_i_0_max__ = (n_row * is_discrete); + for (size_t i_0__ = 0; i_0__ < y_new_i_0_max__; ++i_0__) { + check_greater_or_equal(function__, "y_new[i_0__]", y_new[i_0__], 0); + } + size_t y_new_k_0_max__ = (n_row * is_discrete); + for (size_t k_0__ = 0; k_0__ < y_new_k_0_max__; ++k_0__) { + vars__.push_back(y_new[k_0__]); + } + current_statement_begin__ = 181; + size_t y_new_real_j_1_max__ = (n_row * (1 - is_discrete)); + for (size_t j_1__ = 0; j_1__ < y_new_real_j_1_max__; ++j_1__) { + vars__.push_back(y_new_real(j_1__)); + } + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + } + template + void write_array(RNG& base_rng, + Eigen::Matrix& params_r, + Eigen::Matrix& vars, + bool include_tparams = true, + bool include_gqs = true, + std::ostream* pstream = 0) const { + std::vector params_r_vec(params_r.size()); + for (int i = 0; i < params_r.size(); ++i) + params_r_vec[i] = params_r(i); + std::vector vars_vec; + std::vector params_i_vec; + write_array(base_rng, params_r_vec, params_i_vec, vars_vec, include_tparams, include_gqs, pstream); + vars.resize(vars_vec.size()); + for (int i = 0; i < vars.size(); ++i) + vars(i) = vars_vec[i]; + } + std::string model_name() const { + return "model_bycatch"; + } + void constrained_param_names(std::vector& param_names__, + bool include_tparams__ = true, + bool include_gqs__ = true) const { + std::stringstream param_name_stream__; + size_t beta_j_1_max__ = K; + for (size_t j_1__ = 0; j_1__ < beta_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "beta" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t est_time_dev_j_1_max__ = (time_varying * (n_year - 1)); + for (size_t j_1__ = 0; j_1__ < est_time_dev_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "est_time_dev" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t sigma_rw_k_0_max__ = time_varying; + for (size_t k_0__ = 0; k_0__ < sigma_rw_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "sigma_rw" << '.' << k_0__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t sigma_logn_k_0_max__ = est_sigma; + for (size_t k_0__ = 0; k_0__ < sigma_logn_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "sigma_logn" << '.' << k_0__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t cv_gamma_k_0_max__ = est_cv; + for (size_t k_0__ = 0; k_0__ < cv_gamma_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "cv_gamma" << '.' << k_0__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t nb2_phi_k_0_max__ = est_phi; + for (size_t k_0__ = 0; k_0__ < nb2_phi_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "nb2_phi" << '.' << k_0__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t theta_k_0_max__ = est_theta; + for (size_t k_0__ = 0; k_0__ < theta_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "theta" << '.' << k_0__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + if (!include_gqs__ && !include_tparams__) return; + if (include_tparams__) { + size_t log_lambda_j_1_max__ = n_row; + for (size_t j_1__ = 0; j_1__ < log_lambda_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "log_lambda" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t lambda_j_1_max__ = n_row; + for (size_t j_1__ = 0; j_1__ < lambda_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "lambda" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t pred_j_1_max__ = n_row; + for (size_t j_1__ = 0; j_1__ < pred_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "pred" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t gammaA_k_0_max__ = est_cv; + for (size_t k_0__ = 0; k_0__ < gammaA_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "gammaA" << '.' << k_0__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t time_dev_j_1_max__ = (time_varying * n_year); + for (size_t j_1__ = 0; j_1__ < time_dev_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "time_dev" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + if (!include_gqs__) return; + size_t log_lik_j_1_max__ = n_row; + for (size_t j_1__ = 0; j_1__ < log_lik_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "log_lik" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t y_new_k_0_max__ = (n_row * is_discrete); + for (size_t k_0__ = 0; k_0__ < y_new_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_new" << '.' << k_0__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t y_new_real_j_1_max__ = (n_row * (1 - is_discrete)); + for (size_t j_1__ = 0; j_1__ < y_new_real_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_new_real" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + void unconstrained_param_names(std::vector& param_names__, + bool include_tparams__ = true, + bool include_gqs__ = true) const { + std::stringstream param_name_stream__; + size_t beta_j_1_max__ = K; + for (size_t j_1__ = 0; j_1__ < beta_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "beta" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t est_time_dev_j_1_max__ = (time_varying * (n_year - 1)); + for (size_t j_1__ = 0; j_1__ < est_time_dev_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "est_time_dev" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t sigma_rw_k_0_max__ = time_varying; + for (size_t k_0__ = 0; k_0__ < sigma_rw_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "sigma_rw" << '.' << k_0__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t sigma_logn_k_0_max__ = est_sigma; + for (size_t k_0__ = 0; k_0__ < sigma_logn_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "sigma_logn" << '.' << k_0__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t cv_gamma_k_0_max__ = est_cv; + for (size_t k_0__ = 0; k_0__ < cv_gamma_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "cv_gamma" << '.' << k_0__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t nb2_phi_k_0_max__ = est_phi; + for (size_t k_0__ = 0; k_0__ < nb2_phi_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "nb2_phi" << '.' << k_0__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t theta_k_0_max__ = est_theta; + for (size_t k_0__ = 0; k_0__ < theta_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "theta" << '.' << k_0__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + if (!include_gqs__ && !include_tparams__) return; + if (include_tparams__) { + size_t log_lambda_j_1_max__ = n_row; + for (size_t j_1__ = 0; j_1__ < log_lambda_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "log_lambda" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t lambda_j_1_max__ = n_row; + for (size_t j_1__ = 0; j_1__ < lambda_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "lambda" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t pred_j_1_max__ = n_row; + for (size_t j_1__ = 0; j_1__ < pred_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "pred" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t gammaA_k_0_max__ = est_cv; + for (size_t k_0__ = 0; k_0__ < gammaA_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "gammaA" << '.' << k_0__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t time_dev_j_1_max__ = (time_varying * n_year); + for (size_t j_1__ = 0; j_1__ < time_dev_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "time_dev" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + if (!include_gqs__) return; + size_t log_lik_j_1_max__ = n_row; + for (size_t j_1__ = 0; j_1__ < log_lik_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "log_lik" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t y_new_k_0_max__ = (n_row * is_discrete); + for (size_t k_0__ = 0; k_0__ < y_new_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_new" << '.' << k_0__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t y_new_real_j_1_max__ = (n_row * (1 - is_discrete)); + for (size_t j_1__ = 0; j_1__ < y_new_real_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_new_real" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } +}; // model +} // namespace +typedef model_bycatch_namespace::model_bycatch stan_model; +#ifndef USING_R +stan::model::model_base& new_model( + stan::io::var_context& data_context, + unsigned int seed, + std::ostream* msg_stream) { + stan_model* m = new stan_model(data_context, seed, msg_stream); + return *m; +} +#endif +#endif diff --git a/src/stanExports_bycatch.o b/src/stanExports_bycatch.o new file mode 100644 index 0000000..5155b39 Binary files /dev/null and b/src/stanExports_bycatch.o differ diff --git a/tools/make_cc.R b/tools/make_cc.R deleted file mode 100644 index 2c0e4c1..0000000 --- a/tools/make_cc.R +++ /dev/null @@ -1,48 +0,0 @@ -# Part of the rstanarm package for estimating model parameters -# Copyright (C) 2015, 2016, 2017 Trustees of Columbia University -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 3 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -options(warn = 3L) -options("useFancyQuotes" = FALSE) - -make_cc <- function(file) { - file <- sub("\\.cc$", ".stan", file) - cppcode <- rstan::stanc(file, allow_undefined = TRUE, - obfuscate_model_name = FALSE)$cppcode - cppcode <- sub("(class[[:space:]]+[A-Za-z_][A-Za-z0-9_]*[[:space:]]*: public prob_grad \\{)", - paste("#include \n", "\\1"), cppcode) - - cat(readLines(dir("stan_files", pattern = "license.stan", recursive = TRUE, full.names = TRUE)), - "#ifndef MODELS_HPP", "#define MODELS_HPP", "#define STAN__SERVICES__COMMAND_HPP", - "#include ", - cppcode, "#endif", file = sub("\\.stan$", ".hpp", file), - sep = "\n", append = FALSE) - - f <- sub("\\.stan$", "", basename(file)) - Rcpp::exposeClass(class = paste0("model_", f), - constructors = list(c("SEXP", "SEXP", "SEXP")), fields = character(), - methods = c("call_sampler", - "param_names", "param_names_oi", "param_fnames_oi", - "param_dims", "param_dims_oi", "update_param_oi", "param_oi_tidx", - "grad_log_prob", "log_prob", - "unconstrain_pars", "constrain_pars", "num_pars_unconstrained", - "unconstrained_param_names", "constrained_param_names"), - file = file.path("stan_files", paste0(f, ".cc")), - header = paste0('#include "', f, '.hpp"'), - module = paste0("stan_fit4", f, "_mod"), - CppClass = "rstan::stan_fit ", - Rfile = FALSE) - return(invisible(NULL)) -}