diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 0000000..5008ddf Binary files /dev/null and b/.DS_Store differ diff --git a/.github/workflows/check.yaml b/.github/workflows/check.yaml index c1f8e71..c273716 100644 --- a/.github/workflows/check.yaml +++ b/.github/workflows/check.yaml @@ -9,10 +9,12 @@ on: branches: - main - master + - AD-rc0 pull_request: branches: - main - master + - AD-rc0 name: R-CMD-check @@ -27,11 +29,11 @@ jobs: matrix: config: - {os: macOS-latest, r: 'release'} - - {os: ubuntu-18.04, r: 'devel', rspm: "https://packagemanager.rstudio.com/cran/__linux__/bionic/latest", http-user-agent: "R/4.0.0 (ubuntu-18.04) R (4.0.0 x86_64-pc-linux-gnu x86_64 linux-gnu) on GitHub Actions" } - - {os: ubuntu-18.04, r: 'release', rspm: "https://packagemanager.rstudio.com/cran/__linux__/bionic/latest"} - - {os: ubuntu-18.04, r: 'oldrel', rspm: "https://packagemanager.rstudio.com/cran/__linux__/bionic/latest"} - - {os: ubuntu-18.04, r: '3.5', rspm: "https://packagemanager.rstudio.com/cran/__linux__/bionic/latest"} - - {os: ubuntu-18.04, r: '3.4', rspm: "https://packagemanager.rstudio.com/cran/__linux__/bionic/latest"} + - {os: ubuntu-latest, r: 'devel', rspm: "https://packagemanager.rstudio.com/cran/__linux__/bionic/latest"}#, http-user-agent: "R/4.0.0 (ubuntu-18.04) R (4.0.0 x86_64-pc-linux-gnu x86_64 linux-gnu) on GitHub Actions" } + - {os: ubuntu-latest, r: 'release', rspm: "https://packagemanager.rstudio.com/cran/__linux__/bionic/latest"} + - {os: ubuntu-latest, r: 'oldrel', rspm: "https://packagemanager.rstudio.com/cran/__linux__/bionic/latest"} + # - {os: ubuntu-18.04, r: '3.5', rspm: "https://packagemanager.rstudio.com/cran/__linux__/bionic/latest"} + # - {os: ubuntu-18.04, r: '3.4', rspm: "https://packagemanager.rstudio.com/cran/__linux__/bionic/latest"} env: R_REMOTES_NO_ERRORS_FROM_WARNINGS: true @@ -41,12 +43,12 @@ jobs: steps: - uses: actions/checkout@v2 - - uses: r-lib/actions/setup-r@master + - uses: r-lib/actions/setup-r@v2 with: r-version: ${{ matrix.config.r }} http-user-agent: ${{ matrix.config.http-user-agent }} - - uses: r-lib/actions/setup-pandoc@master + - uses: r-lib/actions/setup-pandoc@v2 - name: Query dependencies run: | @@ -70,10 +72,16 @@ jobs: do eval sudo $cmd done < <(Rscript -e 'writeLines(remotes::system_requirements("ubuntu", "18.04"))') + sudo apt-get install libcurl4-openssl-dev # needed for R pkg curl -> covr + - name: Install dependencies run: | remotes::install_deps(dependencies = TRUE) remotes::install_cran("rcmdcheck") + # install.packages('devtools') + # install.packages('pracma') + # install.packages('numDeriv') + # devtools::install_github("nimble-dev/nimble", ref = "ADoak", subdir = "packages/nimble") # Remove this line once AD is released in NIMBLE shell: Rscript {0} - name: Session info diff --git a/.github/workflows/check_windows.yaml b/.github/workflows/check_windows.yaml index 6749077..6601e19 100644 --- a/.github/workflows/check_windows.yaml +++ b/.github/workflows/check_windows.yaml @@ -9,10 +9,12 @@ on: branches: - main - master + - AD-rc0 pull_request: branches: - main - master + - AD-rc0 name: R-CMD-check @@ -37,12 +39,12 @@ jobs: steps: - uses: actions/checkout@v2 - - uses: r-lib/actions/setup-r@master + - uses: r-lib/actions/setup-r@v2 with: r-version: ${{ matrix.config.r }} http-user-agent: ${{ matrix.config.http-user-agent }} - - uses: r-lib/actions/setup-pandoc@master + - uses: r-lib/actions/setup-pandoc@v2 - name: Query dependencies run: | @@ -70,6 +72,10 @@ jobs: run: | remotes::install_deps(dependencies = TRUE) remotes::install_cran("rcmdcheck") +# install.packages('devtools') +# install.packages('pracma') +# install.packages('numDeriv') +# devtools::install_github("nimble-dev/nimble", ref = "ADoak", subdir = "packages/nimble") # Remove this line once AD is released in NIMBLE shell: Rscript {0} - name: Session info diff --git a/DESCRIPTION b/DESCRIPTION index 536e76f..516b806 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,20 +1,21 @@ Package: nimbleEcology Type: Package Title: Distributions for Ecological Models in 'nimble' -Version: 0.4.1 +Version: 0.5.0 Maintainer: Benjamin R. Goldstein Authors@R: c(person("Benjamin R.", "Goldstein", role = c("aut", "cre"), - email = "ben.goldstein@berkeley.edu"), + email = "bgoldst2@ncsu.edu"), person("Daniel", "Turek", role = "aut"), person("Lauren", "Ponisio", role = "aut"), + person("Wei", "Zhang", role = "ctb"), person("Perry", "de Valpine", role = "aut")) -Date: 2021-11-1 +Date: 2024-06-24 Description: Common ecological distributions for 'nimble' models in the form of nimbleFunction objects. Includes Cormack-Jolly-Seber, occupancy, dynamic occupancy, hidden Markov, dynamic hidden Markov, and N-mixture models. (Jolly (1965) , Seber (1965) , Turek et al. (2016) ). License: GPL-3 -Copyright: Copyright (c) 2019, Perry de Valpine, Ben Goldstein, Daniel Turek, Lauren Ponisio -Depends: R (>= 3.4.0), nimble +Copyright: Copyright (c) 2024, Perry de Valpine, Ben Goldstein, Daniel Turek, Lauren Ponisio +Depends: R (>= 4.0.0), nimble Encoding: UTF-8 VignetteBuilder: knitr URL: https://github.com/nimble-dev/nimbleEcology @@ -27,8 +28,9 @@ Collate: dHMM.R dOcc.R dNmixture.R + dNmixtureAD.R zzz.R -RoxygenNote: 7.1.2 +RoxygenNote: 7.3.1 Suggests: rmarkdown, knitr, diff --git a/NAMESPACE b/NAMESPACE index 33a94ba..627a8de 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -1,4 +1,7 @@ +# Generated by roxygen2: do not edit by hand +export(dBetaBinom_s) +export(dBetaBinom_v) export(dCJS_ss) export(dCJS_sv) export(dCJS_vs) @@ -19,19 +22,38 @@ export(dDynOcc_vvs) export(dDynOcc_vvv) export(dHMM) export(dHMMo) -export(dNmixture_s) -export(dNmixture_v) -export(dNmixture_BNB_oneObs) -export(dNmixture_BNB_v) -export(dNmixture_BNB_s) -export(dNmixture_BBP_oneObs) -export(dNmixture_BBP_v) -export(dNmixture_BBP_s) +export(dNmixtureAD_BBNB_oneObs) +export(dNmixtureAD_BBNB_s) +export(dNmixtureAD_BBNB_v) +export(dNmixtureAD_BBP_oneObs) +export(dNmixtureAD_BBP_s) +export(dNmixtureAD_BBP_v) +export(dNmixtureAD_BNB_oneObs) +export(dNmixtureAD_BNB_s) +export(dNmixtureAD_BNB_v) +export(dNmixtureAD_s) +export(dNmixtureAD_v) export(dNmixture_BBNB_oneObs) -export(dNmixture_BBNB_v) export(dNmixture_BBNB_s) +export(dNmixture_BBNB_steps) +export(dNmixture_BBNB_v) +export(dNmixture_BBP_oneObs) +export(dNmixture_BBP_s) +export(dNmixture_BBP_steps) +export(dNmixture_BBP_v) +export(dNmixture_BNB_oneObs) +export(dNmixture_BNB_s) +export(dNmixture_BNB_steps) +export(dNmixture_BNB_v) +export(dNmixture_s) +export(dNmixture_steps) +export(dNmixture_v) export(dOcc_s) export(dOcc_v) +export(nimBetaFun) +export(nimNmixPois_logFac) +export(rBetaBinom_s) +export(rBetaBinom_v) export(rCJS_ss) export(rCJS_sv) export(rCJS_vs) @@ -52,27 +74,38 @@ export(rDynOcc_vvs) export(rDynOcc_vvv) export(rHMM) export(rHMMo) -export(rNmixture_s) -export(rNmixture_v) -export(rNmixture_BNB_oneObs) -export(rNmixture_BNB_v) -export(rNmixture_BNB_s) -export(rNmixture_BBP_oneObs) -export(rNmixture_BBP_v) -export(rNmixture_BBP_s) +export(rNmixtureAD_BBNB_oneObs) +export(rNmixtureAD_BBNB_s) +export(rNmixtureAD_BBNB_v) +export(rNmixtureAD_BBP_oneObs) +export(rNmixtureAD_BBP_s) +export(rNmixtureAD_BBP_v) +export(rNmixtureAD_BNB_oneObs) +export(rNmixtureAD_BNB_s) +export(rNmixtureAD_BNB_v) +export(rNmixtureAD_s) +export(rNmixtureAD_v) export(rNmixture_BBNB_oneObs) -export(rNmixture_BBNB_v) export(rNmixture_BBNB_s) -export(nimNmixPois_logFac) +export(rNmixture_BBNB_v) +export(rNmixture_BBP_oneObs) +export(rNmixture_BBP_s) +export(rNmixture_BBP_v) +export(rNmixture_BNB_oneObs) +export(rNmixture_BNB_s) +export(rNmixture_BNB_v) +export(rNmixture_s) +export(rNmixture_v) export(rOcc_s) export(rOcc_v) -export(nimBetaFun) -export(dBetaBinom) -export(dBetaBinom_One) -export(rBetaBinom) -export(rBetaBinom_One) import(nimble) importFrom(stats,dbinom) +importFrom(stats,dnbinom) +importFrom(stats,dpois) +importFrom(stats,qnbinom) +importFrom(stats,qpois) +importFrom(stats,rbeta) importFrom(stats,rbinom) +importFrom(stats,rnbinom) +importFrom(stats,rpois) importFrom(stats,runif) -importFrom("stats", "dpois", "qpois", "rpois", "dnbinom", "rnbinom", "rbeta", "qnbinom") diff --git a/R/dBetaBinom.R b/R/dBetaBinom.R index a65f371..1a24682 100644 --- a/R/dBetaBinom.R +++ b/R/dBetaBinom.R @@ -1,60 +1,59 @@ # dBetaBinom #' A beta binomial distribution and beta function for use in \code{nimble} models #' -#' \code{dBetaBinom} and \code{dBetaBinom_One} provide a beta binomial +#' \code{dBetaBinom_v} and \code{dBetaBinom_s} provide a beta binomial #' distribution that can be used directly from R or in \code{nimble} #' models. These are also used by beta binomial variations of dNmixture distributions. #' \code{nimBetaFun} is the beta function. #' #' @name dBetaBinom -#' @aliases nimBetaFun dBetaBinom dBetaBinom_One rBetaBinom rBetaBinom_One +#' @aliases nimBetaFun dBetaBinom_v dBetaBinom_s rBetaBinom_v rBetaBinom_s #' #' @author Ben Goldstein and Perry de Valpine #' #' @param x vector of integer counts. #' @param N number of trials, sometimes called "size". -#' @param shape1 shape1 parameter of the beta-binomial distribution. -#' @param shape2 shape2 parameter of the beta-binomial distribution. +#' @param shape1 shape1 parameter of the beta distribution. +#' @param shape2 shape2 parameter of the beta distribution. #' @param log TRUE or 1 to return log probability. FALSE or 0 to return #' probability. #' @param n number of random draws, each returning a vector of length #' \code{len}. Currently only \code{n = 1} is supported, but the argument #' exists for standardization of "\code{r}" functions. -#' @param a shape1 argument of the beta function nimBetaFun. -#' @param b shape2 argument of the beta function nimBetaFun. +#' @param len length of \code{x}. +#' @param a shape1 argument of the beta function. +#' @param b shape2 argument of the beta function. #' -#' @details These nimbleFunctions provide distributions that can be -#' used directly in R or in \code{nimble} hierarchical models (via -#' \code{\link[nimble]{nimbleCode}} and -#' \code{\link[nimble]{nimbleModel}}). They were originally written for -#' the beta binomial N-mixture extensions. +#' @details These nimbleFunctions provide distributions that can be used +#' directly in R or in \code{nimble} hierarchical models (via +#' \code{\link[nimble]{nimbleCode}} and \code{\link[nimble]{nimbleModel}}). +#' They are used by the beta-binomial variants of the N-mixture distributions +#' (\code{\link{dNmixture}}). #' -#' The beta binomial distribution is equivalent to a binomial distribution whose -#' probability is itself a beta distributed random variable. +#' The beta binomial is the marginal distribution of a binomial distribution whose +#' probability follows a beta distribution. #' #' The probability mass function of the beta binomial is #' \code{choose(N, x) * B(x + shape1, N - x + shape2) / #' B(shape1, shape2)}, where \code{B(shape1, shape2)} is the beta function. #' -#' The beta binomial distribution is provided in two forms. \code{dBetaBinom} and -#' \code{rBetaBinom} are used when \code{x} is a vector (i.e. \code{length(x) > 1}), -#' in which case the parameters \code{alpha} and \code{beta} must also be vectors. -#' When \code{x} is scalar, \code{dBetaBinom_One} and \code{rBetaBinom_One} are -#' used. +#' \code{nimBetaFun(shape1, shape2)} calculates \code{B(shape1, shape2)}. +#' +#' The beta binomial distribution is provided in two forms. \code{dBetaBinom_v} and +#' is when \code{shape1} and \code{shape2} are vectors. +#' \code{dBetaBinom_s} is used when \code{shape1} and \code{shape2} are scalars. +#' In both cases, \code{x} is a vector. #' #' @seealso For beta binomial N-mixture models, see \code{\link{dNmixture}}. #' For documentation on the beta function, use \code{?stats::dbeta} #' #' @examples -#' # Calculate a beta binomial probability -#' dBetaBinom(x = c(4, 0, 0, 3), N = 10, +#' # Calculate a beta binomial probability with different shape1 and shape2 for each x[i] +#' dBetaBinom_v(x = c(4, 0, 0, 3), N = 10, #' shape1 = c(0.5, 0.5, 0.3, 0.5), shape2 = c(0.2, 0.4, 1, 1.2)) -#' # Same for case with one observation -#' dBetaBinom_One(x = 3, N = 10, shape1 = 0.5, shape2 = 0.5, log = TRUE) -#' @export +#' # or with constant shape1 and shape2 +#' dBetaBinom_s(x = c(4, 0, 0, 3), N = 10, shape1 = 0.5, shape2 = 0.5, log = TRUE) -NULL -# nimbleOptions(checkNimbleFunction = FALSE) ##### Beta binomial support functions ##### #' @rdname dBetaBinom @@ -63,61 +62,71 @@ nimBetaFun <- nimbleFunction( run = function(a = double(0), b = double(0), log = logical(0)) { - if (log) return(lgamma(a) + lgamma(b) - lgamma(a + b)) - else return(exp(lgamma(a) + lgamma(b) - lgamma(a + b))) + log_ans <- lgamma(a) + lgamma(b) - lgamma(a + b) + if (log) return(log_ans) + else return(exp(log_ans)) returnType(double(0)) - }) + }, buildDerivs=list(run=list())) #' @rdname dBetaBinom #' @export -dBetaBinom <- nimbleFunction( +dBetaBinom_v <- nimbleFunction( run = function(x = double(1), N = double(0), shape1 = double(1), shape2 = double(1), + len = double(), log = integer(0, default = 0)) { logprob <- 0 + lgNp1 <- lgamma(N+1) for (i in 1:length(x)) { logprob <- logprob + nimBetaFun(a = x[i] + shape1[i], b = N - x[i] + shape2[i], log = TRUE) - - nimBetaFun(a = shape1[i], b = shape2[ i], log = TRUE) + - lgamma(N+1) - (lgamma(x[i] + 1) + lgamma(N - x[i] + 1)) + nimBetaFun(a = shape1[i], b = shape2[i], log = TRUE) + + lgNp1 - (lgamma(x[i] + 1) + lgamma(N - x[i] + 1)) } if (log) return(logprob) return(exp(logprob)) returnType(double(0)) - } + }, + buildDerivs = list(run = list(ignore = 'i')) ) #' @rdname dBetaBinom #' @export -dBetaBinom_One <- nimbleFunction( - run = function(x = double(0), +dBetaBinom_s <- nimbleFunction( + run = function(x = double(1), N = double(0), shape1 = double(0), shape2 = double(0), + len = double(), log = integer(0, default = 0)) { logprob <- 0 - logprob <- logprob + - nimBetaFun(a = x + shape1, b = N - x + shape2, log = TRUE) - - nimBetaFun(a = shape1, b = shape2, log = TRUE) + - lgamma(N+1) - (lgamma(x+1) + lgamma(N - x + 1)) - + lgNp1 <- lgamma(N+1) + lbs1s2 <- nimBetaFun(a = shape1, b = shape2, log = TRUE) + for (i in 1:length(x)) { + logprob <- logprob + + nimBetaFun(a = x[i] + shape1, b = N - x[i] + shape2, log = TRUE) - + lbs1s2 + + lgNp1 - (lgamma(x[i] + 1) + lgamma(N - x[i] + 1)) + } if (log) return(logprob) return(exp(logprob)) returnType(double(0)) - } + }, + buildDerivs = list(run=list(ignore = 'i')) ) - #' @rdname dBetaBinom #' @export -rBetaBinom <- nimbleFunction( +#' @importFrom stats rbeta +rBetaBinom_v <- nimbleFunction( run = function(n = double(0), N = double(0), shape1 = double(1), - shape2 = double(1)) { + shape2 = double(1), + len = double()) { p <- numeric(length(shape1)) for (i in 1:length(shape1)) { p[i] <- rbeta(1, shape1[i], shape2[i]) @@ -129,16 +138,18 @@ rBetaBinom <- nimbleFunction( #' @rdname dBetaBinom #' @export -rBetaBinom_One <- nimbleFunction( +#' @importFrom stats rbeta +rBetaBinom_s <- nimbleFunction( run = function(n = double(0), N = double(0), shape1 = double(0), - shape2 = double(0)) { - p <- rbeta(1, shape1, shape2) - x <- rbinom(1, N, p) + shape2 = double(0), + len = double()) { + p <- numeric(length=len) + for (i in 1:len) { + p[i] <- rbeta(1, shape1, shape2) + } + x <- rbinom(len, N, p) return(x) - returnType(double()) + returnType(double(1)) }) - -# nimbleOptions(checkNimbleFunction = TRUE) - diff --git a/R/dCJS.R b/R/dCJS.R index 25e8083..f3eb973 100644 --- a/R/dCJS.R +++ b/R/dCJS.R @@ -92,6 +92,19 @@ #' #' and so on for each combination of time-dependent and time-independent parameters. #' +#' @section Notes for use with automatic differentiation: +#' +#' The \code{dCJS_**} distributions should all work for models and algorithms +#' that use nimble's automatic differentiation (AD) system. In that system, +#' some kinds of values are "baked in" (cannot be changed) to the AD calculations +#' from the first call, unless and until the AD calculations are reset. For +#' the \code{dCJS_**} distributions, the lengths of vector inputs and the data +#' (\code{x}) values themselves are baked in. These can be different for different +#' iterations through a for loop (or nimble model declarations with different indices, +#' for example), but the lengths and data values for each specific iteration +#' will be "baked in" after the first call. \bold{In other words, it is assumed that +#' \code{x} are data and are not going to change.} +#' #' @return #' #' For \code{dCJS_**}: the probability (or likelihood) or log probability of observation vector \code{x}. @@ -138,7 +151,7 @@ dCJS_ss <- nimbleFunction( run = function(x = double(1), ## standard name for the "data" probSurvive = double(), probCapture = double(), - len = double(0, default = 0), + len = integer(0, default = 0), log = integer(0, default = 0) ## required log argument ) { @@ -158,8 +171,9 @@ dCJS_ss <- nimbleFunction( ## probAlive is P(Alive(t) | x(1)...x(t-1)) ## probAliveGivenHistory is (Alive(t-1) | x(1)...x(t-1)) probAlive <- probAliveGivenHistory * probSurvive - if (!is.na(x[t])) { - if (x[t] == 1) { + xt <- ADbreak(x[t]) + if (!is.na(xt)) { + if (xt == 1) { ## ProbThisObs = P(x(t) | x(1)...x(t-1)) probThisObs <- probAlive * probCapture probAliveGivenHistory <- 1 @@ -174,7 +188,7 @@ dCJS_ss <- nimbleFunction( if (log) return(logProbData) return(exp(logProbData)) returnType(double(0)) - } + }, buildDerivs = list(run = list(ignore = c('i', "xt", "t"))) ) #' @rdname dCJS @@ -183,7 +197,7 @@ dCJS_sv <- nimbleFunction( run = function(x = double(1), ## standard name for the "data" probSurvive = double(), probCapture = double(1), - len = double(0, default = 0), + len = integer(0, default = 0), log = integer(0, default = 0) ## required log argument ) { if (len != 0) { @@ -204,8 +218,9 @@ dCJS_sv <- nimbleFunction( ## probAlive is P(Alive(t) | x(1)...x(t-1)) ## probAliveGivenHistory is (Alive(t-1) | x(1)...x(t-1)) probAlive <- probAliveGivenHistory * probSurvive - if (!is.na(x[t])) { - if (x[t] == 1) { + xt <- ADbreak(x[t]) + if (!is.na(xt)) { + if (xt == 1) { ## ProbThisObs = P(x(t) | x(1)...x(t-1)) probThisObs <- probAlive * probCapture[t] probAliveGivenHistory <- 1 @@ -220,7 +235,7 @@ dCJS_sv <- nimbleFunction( if (log) return(logProbData) return(exp(logProbData)) returnType(double()) - } + }, buildDerivs = list(run = list(ignore = c('i', "xt", "t"))) ) @@ -230,7 +245,7 @@ dCJS_vs <- nimbleFunction( run = function(x = double(1), ## standard name for the "data" probSurvive = double(1), probCapture = double(), - len = double(0, default = 0), + len = integer(0, default = 0), log = integer(0, default = 0) ## required log argument ) { if (len != 0) { @@ -252,8 +267,9 @@ dCJS_vs <- nimbleFunction( ## probAlive is P(Alive(t) | x(1)...x(t-1)) ## probAliveGivenHistory is (Alive(t-1) | x(1)...x(t-1)) probAlive <- probAliveGivenHistory * probSurvive[t - 1] - if (!is.na(x[t])) { - if (x[t] == 1) { + xt <- ADbreak(x[t]) + if (!is.na(xt)) { + if (xt == 1) { ## ProbThisObs = P(x(t) | x(1)...x(t-1)) probThisObs <- probAlive * probCapture probAliveGivenHistory <- 1 @@ -268,7 +284,7 @@ dCJS_vs <- nimbleFunction( if (log) return(logProbData) return(exp(logProbData)) returnType(double()) - } + }, buildDerivs = list(run = list(ignore = c('i', "xt", "t"))) ) @@ -282,7 +298,7 @@ dCJS_vv <- nimbleFunction( run = function(x = double(1), ## standard name for the "data" probSurvive = double(1), probCapture = double(1), - len = double(0, default = 0), + len = integer(0, default = 0), log = integer(0, default = 0) ## required log argument ) { if (len != 0) { @@ -304,8 +320,9 @@ dCJS_vv <- nimbleFunction( ## probAlive is P(Alive(t) | x(1)...x(t-1)) ## probAliveGivenHistory is (Alive(t-1) | x(1)...x(t-1)) probAlive <- probAliveGivenHistory * probSurvive[t - 1] - if (!is.na(x[t])) { - if (x[t] == 1) { + xt <- ADbreak(x[t]) + if (!is.na(xt)) { + if (xt == 1) { ## ProbThisObs = P(x(t) | x(1)...x(t-1)) probThisObs <- probAlive * probCapture[t] probAliveGivenHistory <- 1 @@ -322,7 +339,7 @@ dCJS_vv <- nimbleFunction( } return(exp(logProbData)) returnType(double()) - } + }, buildDerivs = list(run = list(ignore = c('i', "xt", "t"))) ) #' @rdname dCJS @@ -331,7 +348,7 @@ rCJS_ss <- nimbleFunction( run = function(n = integer(), probSurvive = double(), probCapture = double(), - len = double(0, default = 0)) { + len = integer(0, default = 0)) { if (n != 1) stop("rCJS only works for n = 1") if (len < 2) stop("len must be greater than 1.") @@ -359,10 +376,10 @@ rCJS_sv <- nimbleFunction( run = function(n = integer(), probSurvive = double(), probCapture = double(1), - len = double(0, default = 0)) { + len = integer(0, default = 0)) { if (n != 1) stop("rCJS only works for n = 1") if (len < 2) - stop("len must be non-negative.") + stop("len must be greater than 1.") if (length(probCapture) != len) stop("Length of probCapture is not the same as len.") ans <- numeric(length = len, init = FALSE) @@ -389,7 +406,7 @@ rCJS_vs <- nimbleFunction( run = function(n = integer(), probSurvive = double(1), probCapture = double(), - len = double(0, default = 0)) { + len = integer(0, default = 0)) { if (n != 1) stop("rCJS only works for n = 1") if (len < 2) stop("len must be greater than 1.") @@ -419,7 +436,7 @@ rCJS_vv <- nimbleFunction( run = function(n = integer(), probSurvive = double(1), probCapture = double(1), - len = double(0, default = 0)) { + len = integer(0, default = 0)) { if (n != 1) stop("rCJS only works for n = 1") if (len < 2) stop("len must be greater than 1.") diff --git a/R/dDHMM.R b/R/dDHMM.R index b877020..0e85ba6 100644 --- a/R/dDHMM.R +++ b/R/dDHMM.R @@ -109,6 +109,17 @@ #' \code{observedStates[1:T] ~ dDHMMo(initStates[1:S], observationProbs[1:S, #' 1:O, 1:T], transitionProbs[1:S, 1:S, 1:(T-1)], 1, T)} #' +#' The \code{dDHMM[o]} distributions should work for models and algorithms that +#' use nimble's automatic differentiation (AD) system. In that system, some +#' kinds of values are "baked in" (cannot be changed) to the AD calculations +#' from the first call, unless and until the AD calculations are reset. For the +#' \code{dDHMM[o]} distributions, the sizes of the inputs and the data (\code{x}) +#' values themselves are baked in. These can be different for different +#' iterations through a for loop (or nimble model declarations with different +#' indices, for example), but the sizes and data values for each specific +#' iteration will be "baked in" after the first call. \bold{In other words, it +#' is assumed that \code{x} are data and are not going to change.} +#' #' @return For \code{dDHMM} and \code{dDHMMo}: the probability (or likelihood) #' or log probability of observation vector \code{x}. For \code{rDHMM} and #' \code{rDHMMo}: a simulated detection history, \code{x}. @@ -170,20 +181,22 @@ dDHMM <- nimbleFunction( init = double(1), probObs = double(2), probTrans = double(3), - len = double(),## length of x (needed as a separate param for rDHMM) - checkRowSums = double(0, default = 1), + len = integer(),## length of x (needed as a separate param for rDHMM) + checkRowSums = integer(0, default = 1), log = integer(0, default = 0)) { if (length(init) != dim(probObs)[1]) stop("In dDHMM: Length of init does not match nrow of probObs in dDHMM.") if (length(init) != dim(probTrans)[1]) stop("In dDHMM: Length of init does not match dim(probTrans)[1] in dDHMM.") if (length(init) != dim(probTrans)[2]) stop("In dDHMM: Length of init does not match dim(probTrans)[2] in dDHMM.") if (length(x) != len) stop("In dDHMM: Length of x does not match len in dDHMM.") if (len - 1 != dim(probTrans)[3]) stop("In dDHMM: len - 1 does not match dim(probTrans)[3] in dDHMM.") + if (abs(sum(init) - 1) > 1e-6) stop("In dDHMM: Initial probabilities must sum to 1.") if (checkRowSums) { transCheckPasses <- TRUE for (i in 1:dim(probTrans)[1]) { for (k in 1:dim(probTrans)[3]) { - thisCheckSum <- sum(probTrans[i,,k]) + thisCheckSumTemp <- sum(probTrans[i,,k]) + thisCheckSum <- ADbreak(thisCheckSumTemp) if (abs(thisCheckSum - 1) > 1e-6) { ## Compilation doesn't support more than a simple string for stop() ## so we provide more detail using a print(). @@ -194,7 +207,8 @@ dDHMM <- nimbleFunction( } obsCheckPasses <- TRUE for (i in 1:dim(probObs)[1]) { - thisCheckSum <- sum(probObs[i,]) + thisCheckSumTemp <- sum(probObs[i,]) + thisCheckSum <- ADbreak(thisCheckSumTemp) if (abs(thisCheckSum - 1) > 1e-6) { print("In dDHMM: Problem with sum(probObs[i,]) with i = ", i, ". The sum should be 1 but is ", thisCheckSum) obsCheckPasses <- FALSE @@ -211,19 +225,21 @@ dDHMM <- nimbleFunction( pi <- init # State probabilities at time t=1 logL <- 0 nObsClasses <- dim(probObs)[2] - lengthX <- length(x) - for (t in 1:lengthX) { - if (x[t] > nObsClasses | x[t] < 1) stop("In dDHMM: Invalid value of x[t].") - Zpi <- probObs[, x[t]] * pi # Vector of P(state) * P(observation class x[t] | state) + declare(t, integer()) + for (t in 1:len) { + xt <- ADbreak(x[t]) + # probTrans_t <- ADbreak(probTrans[,,t]) + if (xt > nObsClasses | xt < 1) stop("In dDHMM: Invalid value of x[t].") + Zpi <- probObs[, xt] * pi # Vector of P(state) * P(observation class x[t] | state) sumZpi <- sum(Zpi) # Total P(observed as class x[t]) logL <- logL + log(sumZpi) # Accumulate log probabilities through time - if (t != lengthX) pi <- ((Zpi %*% probTrans[,,t])/sumZpi)[1, ] # State probabilities at t+1 + if (t != len) pi <- ((Zpi %*% probTrans[,,t])/sumZpi)[1, ] # State probabilities at t+1 } returnType(double()) if (log) return(logL) return(exp(logL)) - } + }, buildDerivs = list(run = list(ignore = c('i', 'k', 't', 'xt', 'thisCheckSum'))) ) #' @export @@ -233,8 +249,8 @@ dDHMMo <- nimbleFunction( init = double(1),## probObs = double(3), probTrans = double(3), - len = double(),## length of x (needed as a separate param for rDHMM) - checkRowSums = double(0, default = 1), + len = integer(),## length of x (needed as a separate param for rDHMM) + checkRowSums = integer(0, default = 1), log = integer(0, default = 0)) { if (length(init) != dim(probObs)[1]) stop("In dDHMMo: Length of init does not match ncol of probObs in dDHMMo.") if (length(init) != dim(probTrans)[1]) stop("In dDHMMo: Length of init does not match dim(probTrans)[1] in dDHMMo.") @@ -248,7 +264,8 @@ dDHMMo <- nimbleFunction( transCheckPasses <- TRUE for (i in 1:dim(probTrans)[1]) { for (k in 1:dim(probTrans)[3]) { - thisCheckSum <- sum(probTrans[i,,k]) + thisCheckSumTemp <- sum(probTrans[i,,k]) + thisCheckSum <- ADbreak(thisCheckSumTemp) if (abs(thisCheckSum - 1) > 1e-6) { ## Compilation doesn't support more than a simple string for stop() ## so we provide more detail using a print(). @@ -260,7 +277,8 @@ dDHMMo <- nimbleFunction( obsCheckPasses <- TRUE for (i in 1:dim(probObs)[1]) { for (k in 1:dim(probObs)[3]) { - thisCheckSum <- sum(probObs[i,,k]) + thisCheckSumTemp <- sum(probObs[i,,k]) + thisCheckSum <- ADbreak(thisCheckSumTemp) if (abs(thisCheckSum - 1) > 1e-6) { print("In dDHMMo: Problem with sum(probObs[i,,k]) with i = ", i, " k = ", k, ". The sum should be 1 but is ", thisCheckSum) obsCheckPasses <- FALSE @@ -280,8 +298,9 @@ dDHMMo <- nimbleFunction( nObsClasses <- dim(probObs)[2] lengthX <- length(x) for (t in 1:lengthX) { - if (x[t] > nObsClasses | x[t] < 1) stop("In dDHMMo: Invalid value of x[t].") - Zpi <- probObs[, x[t], t] * pi # Vector of P(state) * P(observation class x[t] | state) + xt <- ADbreak(x[t]) + if (xt > nObsClasses | xt < 1) stop("In dDHMMo: Invalid value of x[t].") + Zpi <- probObs[, xt, t] * pi # Vector of P(state) * P(observation class x[t] | state) sumZpi <- sum(Zpi) # Total P(observed as class x[t]) logL <- logL + log(sumZpi) # Accumulate log probabilities through time if (t != lengthX) pi <- ((Zpi %*% probTrans[,,t])/sumZpi)[1, ] # State probabilities at t+1 @@ -289,7 +308,7 @@ dDHMMo <- nimbleFunction( returnType(double()) if (log) return(logL) return(exp(logL)) - } + }, buildDerivs = list(run = list(ignore = c('i', 'k', 't', 'xt', 'thisCheckSum'))) ) #' @export @@ -299,8 +318,8 @@ rDHMM <- nimbleFunction( init = double(1), probObs = double(2), probTrans = double(3), - len = double(), - checkRowSums = double(0, default = 1)) { + len = integer(), + checkRowSums = integer(0, default = 1)) { nStates <- length(init) if (nStates != dim(probObs)[1]) stop("In rDHMM: Length of init does not match nrow of probObs in dDHMM.") if (nStates != dim(probTrans)[1]) stop("In rDHMM: Length of init does not match dim(probTrans)[1] in dDHMM.") @@ -378,8 +397,8 @@ rDHMMo <- nimbleFunction( init = double(1), probObs = double(3), probTrans = double(3), - len = double(), - checkRowSums = double(0, default = 1)) { + len = integer(), + checkRowSums = integer(0, default = 1)) { nStates <- length(init) if (nStates != dim(probObs)[1]) stop("In rDHMMo: Length of init does not match nrow of probObs in dDHMM.") if (nStates != dim(probTrans)[1]) stop("In rDHMMo: Length of init does not match dim(probTrans)[1] in dDHMM.") diff --git a/R/dDynOcc.R b/R/dDynOcc.R index 9f186ea..5c2cd3a 100644 --- a/R/dDynOcc.R +++ b/R/dDynOcc.R @@ -122,6 +122,20 @@ #' probPersist = persistence_prob, #' probColonize = colonization_prob[1:(T-1)], p = p[1:T, 1:O])} #' +#' @section Notes for use with automatic differentiation: +#' +#' The \code{dDynOcc_***} distributions should all work for models and +#' algorithms that use nimble's automatic differentiation (AD) system. In that +#' system, some kinds of values are "baked in" (cannot be changed) to the AD +#' calculations from the first call, unless and until the AD calculations are +#' reset. For the \code{dDynOcc_***} distributions, the lengths or dimensions of +#' vector and/or matrix inputs and the \code{start} and \code{end} values +#' themselves are baked in. These can be different for different iterations +#' through a for loop (or nimble model declarations with different indices, for +#' example), but the for each specific iteration will be "baked in" after the +#' first call. \bold{It is safest if one can assume that \code{x} are data and +#' are not going to change.} +#' #' @return #' For \code{dDynOcc_***}: the probability (or likelihood) or log probability #' of observation vector \code{x}. @@ -188,7 +202,7 @@ dDynOcc_vvm <- nimbleFunction( p = double(2), start = double(1), end = double(1), - log = double(0, default = 0)) { + log = integer(0, default = 0)) { if (length(probPersist) < dim(x)[1] - 1) stop("Length of probPersist vector must be at least length(x) - 1.") if (length(probColonize) < dim(x)[1] - 1) stop("Length of probColonize vector must be at least length(x) - 1.") if (dim(p)[1] != dim(x)[1]) stop("Dimension mismatch between x and p matrices.") @@ -200,16 +214,17 @@ dDynOcc_vvm <- nimbleFunction( nyears <- dim(x)[1] if (nyears >= 1) { for (t in 1:nyears) { - if (end[t] - start[t] + 1 > 0) { - numObs <- sum(x[t, start[t]:end[t]]) - if (is.na(numObs)) numObs <- 0 + istart <- ADbreak(start[t]) + iend <- ADbreak(end[t]) + if (iend - istart + 1 > 0) { + numObs <- sum(x[t, istart:iend]) if (numObs < 0) { print("Error in dDynamicOccupancy: numObs < 0 but number of obs in start/end > 0\n") stop("Error in dDynamicOccupancy: numObs < 0 but number of obs in start/end > 0\n") } ProbOccAndCount <- ProbOccNextTime * - exp(sum(dbinom(x[t,start[t]:end[t]], - size = 1, prob = p[t,start[t]:end[t]], log = 1))) + exp(sum(dbinom(x[t, istart:iend], + size = 1, prob = p[t, istart:iend], log = 1))) ProbUnoccAndCount <- (1 - ProbOccNextTime) * (numObs == 0) ProbCount <- ProbOccAndCount + ProbUnoccAndCount ProbOccGivenCount <- ProbOccAndCount / ProbCount @@ -229,7 +244,7 @@ dDynOcc_vvm <- nimbleFunction( if (log) return(ll) else return(exp(ll)) returnType(double(0)) - } + }, buildDerivs = list(run = list(ignore = c('t', 'istart', 'iend'))) ) #' @rdname dDynOcc @@ -244,7 +259,7 @@ dDynOcc_vsm <- nimbleFunction( p = double(2), start = double(1), end = double(1), - log = double(0, default = 0)) { + log = integer(0, default = 0)) { if (length(probPersist) < dim(x)[1] - 1) stop("Length of probPersist vector must be at least length(x) - 1.") if (dim(p)[1] != dim(x)[1]) stop("Dimension mismatch between x and p matrices.") @@ -256,15 +271,17 @@ dDynOcc_vsm <- nimbleFunction( nyears <- dim(x)[1] if (nyears >= 1) { for (t in 1:nyears) { - if (end[t] - start[t] + 1 > 0) { - numObs <- sum(x[t,start[t]:end[t]]) + istart <- ADbreak(start[t]) + iend <- ADbreak(end[t]) + if (iend - istart + 1 > 0) { + numObs <- sum(x[t, istart:iend]) if (numObs < 0) { print("Error in dDynamicOccupancy: numObs < 0 but number of obs in start/end > 0\n") stop("Error in dDynamicOccupancy: numObs < 0 but number of obs in start/end > 0\n") } ProbOccAndCount <- ProbOccNextTime * - exp(sum(dbinom(x[t,start[t]:end[t]], - size = 1, prob = p[t,start[t]:end[t]], log = 1))) + exp(sum(dbinom(x[t, istart:iend], + size = 1, prob = p[t, istart:iend], log = 1))) ProbUnoccAndCount <- (1 - ProbOccNextTime) * (numObs == 0) ProbCount <- ProbOccAndCount + ProbUnoccAndCount ProbOccGivenCount <- ProbOccAndCount / ProbCount @@ -284,7 +301,7 @@ dDynOcc_vsm <- nimbleFunction( if (log) return(ll) else return(exp(ll)) returnType(double(0)) - } + }, buildDerivs = list(run = list(ignore = c('t', 'istart', 'iend'))) ) #' @rdname dDynOcc @@ -299,7 +316,7 @@ dDynOcc_svm <- nimbleFunction( p = double(2), start = double(1), end = double(1), - log = double(0, default = 0)) { + log = integer(0, default = 0)) { if (length(probColonize) < dim(x)[1] - 1) stop("Length of probColonize vector must be at least length(x) - 1.") if (dim(p)[1] != dim(x)[1]) stop("Dimension mismatch between x and p matrices.") if (dim(p)[2] != dim(x)[2]) stop("Dimension mismatch between x and p matrices.") @@ -310,15 +327,17 @@ dDynOcc_svm <- nimbleFunction( nyears <- dim(x)[1] if (nyears >= 1) { for (t in 1:nyears) { - if (end[t] - start[t] + 1 > 0) { - numObs <- sum(x[t,start[t]:end[t]]) + istart <- ADbreak(start[t]) + iend <- ADbreak(end[t]) + if (iend - istart + 1 > 0) { + numObs <- sum(x[t, istart:iend]) if (numObs < 0) { print("Error in dDynamicOccupancy: numObs < 0 but number of obs in start/end > 0\n") stop("Error in dDynamicOccupancy: numObs < 0 but number of obs in start/end > 0\n") } ProbOccAndCount <- ProbOccNextTime * - exp(sum(dbinom(x[t,start[t]:end[t]], - size = 1, prob = p[t,start[t]:end[t]], log = 1))) + exp(sum(dbinom(x[t, istart:iend], + size = 1, prob = p[t, istart:iend], log = 1))) ProbUnoccAndCount <- (1 - ProbOccNextTime) * (numObs == 0) ProbCount <- ProbOccAndCount + ProbUnoccAndCount ProbOccGivenCount <- ProbOccAndCount / ProbCount @@ -338,7 +357,7 @@ dDynOcc_svm <- nimbleFunction( if (log) return(ll) else return(exp(ll)) returnType(double(0)) - } + }, buildDerivs = list(run = list(ignore = c('t', 'istart', 'iend'))) ) #' @rdname dDynOcc @@ -353,7 +372,7 @@ dDynOcc_ssm <- nimbleFunction( p = double(2), start = double(1), end = double(1), - log = double(0, default = 0)) { + log = integer(0, default = 0)) { # if (length(probColonize) != 1) stop("In dDynOcc_vs probColonize must be scalar") # if (length(probPersist) != 1) stop("In dDynOcc_vs probPersist must be scalar") if (dim(p)[1] != dim(x)[1]) stop("Dimension mismatch between x and p matrices.") @@ -365,15 +384,17 @@ dDynOcc_ssm <- nimbleFunction( nyears <- dim(x)[1] if (nyears >= 1) { for (t in 1:nyears) { - if (end[t] - start[t] + 1 > 0) { - numObs <- sum(x[t,start[t]:end[t]]) + istart <- ADbreak(start[t]) + iend <- ADbreak(end[t]) + if (iend - istart + 1 > 0) { + numObs <- sum(x[t, istart:iend]) if (numObs < 0) { print("Error in dDynamicOccupancy: numObs < 0 but number of obs in start/end > 0\n") stop("Error in dDynamicOccupancy: numObs < 0 but number of obs in start/end > 0\n") } ProbOccAndCount <- ProbOccNextTime * - exp(sum(dbinom(x[t,start[t]:end[t]], - size = 1, prob = p[t,start[t]:end[t]], log = 1))) + exp(sum(dbinom(x[t, istart:iend], + size = 1, prob = p[t, istart:iend], log = 1))) ProbUnoccAndCount <- (1 - ProbOccNextTime) * (numObs == 0) ProbCount <- ProbOccAndCount + ProbUnoccAndCount ProbOccGivenCount <- ProbOccAndCount / ProbCount @@ -393,7 +414,7 @@ dDynOcc_ssm <- nimbleFunction( if (log) return(ll) else return(exp(ll)) returnType(double(0)) - } + }, buildDerivs = list(run = list(ignore = c('t', 'istart', 'iend'))) ) #' @rdname dDynOcc @@ -521,7 +542,7 @@ dDynOcc_vvv <- nimbleFunction( p = double(1), start = double(1), end = double(1), - log = double(0, default = 0)) { + log = integer(0, default = 0)) { if (length(probPersist) < dim(x)[1] - 1) stop("Length of probPersist vector must be at least length(x) - 1.") if (length(probColonize) < dim(x)[1] - 1) stop("Length of probColonize vector must be at least length(x) - 1.") if (dim(p)[1] != dim(x)[1]) stop("Dimension mismatch between x matrix and p vector.") @@ -532,14 +553,16 @@ dDynOcc_vvv <- nimbleFunction( nyears <- dim(x)[1] if (nyears >= 1) { for (t in 1:nyears) { - if (end[t] - start[t] + 1 > 0) { - numObs <- sum(x[t,start[t]:end[t]]) + istart <- ADbreak(start[t]) + iend <- ADbreak(end[t]) + if (iend - istart + 1 > 0) { + numObs <- sum(x[t, istart:iend]) if (numObs < 0) { print("Error in dDynamicOccupancy: numObs < 0 but number of obs in start/end > 0\n") stop("Error in dDynamicOccupancy: numObs < 0 but number of obs in start/end > 0\n") } ProbOccAndCount <- ProbOccNextTime * - exp(sum(dbinom(x[t,start[t]:end[t]], + exp(sum(dbinom(x[t, istart:iend], size = 1, prob = p[t], log = 1))) ProbUnoccAndCount <- (1 - ProbOccNextTime) * (numObs == 0) ProbCount <- ProbOccAndCount + ProbUnoccAndCount @@ -560,7 +583,7 @@ dDynOcc_vvv <- nimbleFunction( if (log) return(ll) else return(exp(ll)) returnType(double(0)) - } + }, buildDerivs = list(run = list(ignore = c('t', 'istart', 'iend'))) ) #' @rdname dDynOcc @@ -575,7 +598,7 @@ dDynOcc_vsv <- nimbleFunction( p = double(1), start = double(1), end = double(1), - log = double(0, default = 0)) { + log = integer(0, default = 0)) { if (length(probPersist) < dim(x)[1] - 1) stop("Length of probPersist vector must be at least length(x) - 1.") if (dim(p)[1] != dim(x)[1]) stop("Dimension mismatch between x matrix and p vector.") @@ -586,14 +609,16 @@ dDynOcc_vsv <- nimbleFunction( nyears <- dim(x)[1] if (nyears >= 1) { for (t in 1:nyears) { - if (end[t] - start[t] + 1 > 0) { - numObs <- sum(x[t,start[t]:end[t]]) + istart <- ADbreak(start[t]) + iend <- ADbreak(end[t]) + if (iend - istart + 1 > 0) { + numObs <- sum(x[t, istart:iend]) if (numObs < 0) { print("Error in dDynamicOccupancy: numObs < 0 but number of obs in start/end > 0\n") stop("Error in dDynamicOccupancy: numObs < 0 but number of obs in start/end > 0\n") } ProbOccAndCount <- ProbOccNextTime * - exp(sum(dbinom(x[t,start[t]:end[t]], + exp(sum(dbinom(x[t, istart:iend], size = 1, prob = p[t], log = 1))) ProbUnoccAndCount <- (1 - ProbOccNextTime) * (numObs == 0) ProbCount <- ProbOccAndCount + ProbUnoccAndCount @@ -614,7 +639,7 @@ dDynOcc_vsv <- nimbleFunction( if (log) return(ll) else return(exp(ll)) returnType(double(0)) - } + }, buildDerivs = list(run = list(ignore = c('t', 'istart', 'iend'))) ) #' @rdname dDynOcc @@ -629,7 +654,7 @@ dDynOcc_svv <- nimbleFunction( p = double(1), start = double(1), end = double(1), - log = double(0, default = 0)) { + log = integer(0, default = 0)) { if (length(probColonize) < dim(x)[1] - 1) stop("Length of probColonize vector must be at least length(x) - 1.") if (dim(p)[1] != dim(x)[1]) stop("Dimension mismatch between x matrix and p vector.") @@ -639,14 +664,16 @@ dDynOcc_svv <- nimbleFunction( nyears <- dim(x)[1] if (nyears >= 1) { for (t in 1:nyears) { - if (end[t] - start[t] + 1 > 0) { - numObs <- sum(x[t,start[t]:end[t]]) + istart <- ADbreak(start[t]) + iend <- ADbreak(end[t]) + if (iend - istart + 1 > 0) { + numObs <- sum(x[t, istart:iend]) if (numObs < 0) { print("Error in dDynamicOccupancy: numObs < 0 but number of obs in start/end > 0\n") stop("Error in dDynamicOccupancy: numObs < 0 but number of obs in start/end > 0\n") } ProbOccAndCount <- ProbOccNextTime * - exp(sum(dbinom(x[t,start[t]:end[t]], + exp(sum(dbinom(x[t, istart:iend], size = 1, prob = p[t], log = 1))) ProbUnoccAndCount <- (1 - ProbOccNextTime) * (numObs == 0) ProbCount <- ProbOccAndCount + ProbUnoccAndCount @@ -667,7 +694,7 @@ dDynOcc_svv <- nimbleFunction( if (log) return(ll) else return(exp(ll)) returnType(double(0)) - } + }, buildDerivs = list(run = list(ignore = c('t', 'istart', 'iend'))) ) #' @rdname dDynOcc @@ -682,7 +709,7 @@ dDynOcc_ssv <- nimbleFunction( p = double(1), start = double(1), end = double(1), - log = double(0, default = 0)) { + log = integer(0, default = 0)) { # if (length(probColonize) != 1) stop("In dDynOcc_vs probColonize must be scalar") # if (length(probPersist) != 1) stop("In dDynOcc_vs probPersist must be scalar") if (dim(p)[1] != dim(x)[1]) stop("Dimension mismatch between x matrix and p vector.") @@ -693,14 +720,16 @@ dDynOcc_ssv <- nimbleFunction( nyears <- dim(x)[1] if (nyears >= 1) { for (t in 1:nyears) { - if (end[t] - start[t] + 1 > 0) { - numObs <- sum(x[t,start[t]:end[t]]) + istart <- ADbreak(start[t]) + iend <- ADbreak(end[t]) + if (iend - istart + 1 > 0) { + numObs <- sum(x[t, istart:iend]) if (numObs < 0) { print("Error in dDynamicOccupancy: numObs < 0 but number of obs in start/end > 0\n") stop("Error in dDynamicOccupancy: numObs < 0 but number of obs in start/end > 0\n") } ProbOccAndCount <- ProbOccNextTime * - exp(sum(dbinom(x[t,start[t]:end[t]], + exp(sum(dbinom(x[t, istart:iend], size = 1, prob = p[t], log = 1))) ProbUnoccAndCount <- (1 - ProbOccNextTime) * (numObs == 0) ProbCount <- ProbOccAndCount + ProbUnoccAndCount @@ -721,7 +750,7 @@ dDynOcc_ssv <- nimbleFunction( if (log) return(ll) else return(exp(ll)) returnType(double(0)) - } + }, buildDerivs = list(run = list(ignore = c('t', 'istart', 'iend'))) ) @@ -764,10 +793,10 @@ rDynOcc_vsv <- nimbleFunction( start = double(1), end = double(1)) { occupied <- rbinom(1, 1, init) - val <- matrix(-1, nrow = dim(p)[1], ncol = max(end)) + val <- matrix(-1, nrow = length(p), ncol = max(end)) val[1, start[1]:end[1]] <- occupied * rbinom(end[1] - start[1] + 1, 1, p[1]) - for (t in 2:dim(p)[1]) { + for (t in 2:length(p)) { if (occupied == 1) { occupied <- rbinom(1, 1, probPersist[t - 1]) } else { @@ -792,10 +821,10 @@ rDynOcc_svv <- nimbleFunction( start = double(1), end = double(1)) { occupied <- rbinom(1, 1, init) - val <- matrix(-1, nrow = dim(p)[1], ncol = max(end)) + val <- matrix(-1, nrow = length(p), ncol = max(end)) val[1, start[1]:end[1]] <- occupied * rbinom(end[1] - start[1] + 1, 1, p[1]) - for (t in 2:dim(p)[1]) { + for (t in 2:length(p)) { if (occupied == 1) { occupied <- rbinom(1, 1, probPersist) } else { @@ -820,10 +849,10 @@ rDynOcc_ssv <- nimbleFunction( start = double(1), end = double(1)) { occupied <- rbinom(1, 1, init) - val <- matrix(-1, nrow = dim(p)[1], ncol = max(end)) + val <- matrix(-1, nrow = length(p), ncol = max(end)) val[1, start[1]:end[1]] <- occupied * rbinom(end[1] - start[1] + 1, 1, p[1]) - for (t in 2:dim(p)[1]) { + for (t in 2:length(p)) { if (occupied == 1) { occupied <- rbinom(1, 1, probPersist) } else { @@ -851,7 +880,7 @@ dDynOcc_vvs <- nimbleFunction( p = double(), start = double(1), end = double(1), - log = double(0, default = 0)) { + log = integer(0, default = 0)) { if (length(probPersist) < dim(x)[1] - 1) stop("Length of probPersist vector must be at least length(x) - 1.") if (length(probColonize) < dim(x)[1] - 1) stop("Length of probColonize vector must be at least length(x) - 1.") @@ -861,14 +890,17 @@ dDynOcc_vvs <- nimbleFunction( nyears <- dim(x)[1] if (nyears >= 1) { for (t in 1:nyears) { - if (end[t] - start[t] + 1 > 0) { - numObs <- sum(x[t,start[t]:end[t]]) + istart <- ADbreak(start[t]) + iend <- ADbreak(end[t]) + + if (iend - istart + 1 > 0) { + numObs <- sum(x[t, istart:iend]) if (numObs < 0) { print("Error in dDynamicOccupancy: numObs < 0 but number of obs in start/end > 0\n") stop("Error in dDynamicOccupancy: numObs < 0 but number of obs in start/end > 0\n") } ProbOccAndCount <- ProbOccNextTime * - exp(sum(dbinom(x[t,start[t]:end[t]], + exp(sum(dbinom(x[t,istart:iend], size = 1, prob = p, log = 1))) ProbUnoccAndCount <- (1 - ProbOccNextTime) * (numObs == 0) ProbCount <- ProbOccAndCount + ProbUnoccAndCount @@ -889,7 +921,7 @@ dDynOcc_vvs <- nimbleFunction( if (log) return(ll) else return(exp(ll)) returnType(double(0)) - } + }, buildDerivs = list(run = list(ignore = c('t', 'istart', 'iend'))) ) #' @rdname dDynOcc @@ -904,7 +936,7 @@ dDynOcc_vss <- nimbleFunction( p = double(), start = double(1), end = double(1), - log = double(0, default = 0)) { + log = integer(0, default = 0)) { if (length(probPersist) < dim(x)[1] - 1) stop("Length of probPersist vector must be at least length(x) - 1.") @@ -914,14 +946,16 @@ dDynOcc_vss <- nimbleFunction( nyears <- dim(x)[1] if (nyears >= 1) { for (t in 1:nyears) { - if (end[t] - start[t] + 1 > 0) { - numObs <- sum(x[t,start[t]:end[t]]) + istart <- ADbreak(start[t]) + iend <- ADbreak(end[t]) + if (iend - istart + 1 > 0) { + numObs <- sum(x[t,istart:iend]) if (numObs < 0) { print("Error in dDynamicOccupancy: numObs < 0 but number of obs in start/end > 0\n") stop("Error in dDynamicOccupancy: numObs < 0 but number of obs in start/end > 0\n") } ProbOccAndCount <- ProbOccNextTime * - exp(sum(dbinom(x[t,start[t]:end[t]], + exp(sum(dbinom(x[t,istart:iend], size = 1, prob = p, log = 1))) ProbUnoccAndCount <- (1 - ProbOccNextTime) * (numObs == 0) ProbCount <- ProbOccAndCount + ProbUnoccAndCount @@ -942,7 +976,7 @@ dDynOcc_vss <- nimbleFunction( if (log) return(ll) else return(exp(ll)) returnType(double(0)) - } + }, buildDerivs = list(run = list(ignore = c('t', 'istart', 'iend'))) ) #' @rdname dDynOcc @@ -957,7 +991,7 @@ dDynOcc_svs <- nimbleFunction( p = double(), start = double(1), end = double(1), - log = double(0, default = 0)) { + log = integer(0, default = 0)) { if (length(probColonize) < dim(x)[1] - 1) stop("Length of probColonize vector must be at least length(x) - 1.") ## x is a year by rep matix @@ -966,14 +1000,16 @@ dDynOcc_svs <- nimbleFunction( nyears <- dim(x)[1] if (nyears >= 1) { for (t in 1:nyears) { - if (end[t] - start[t] + 1 > 0) { - numObs <- sum(x[t,start[t]:end[t]]) + istart <- ADbreak(start[t]) + iend <- ADbreak(end[t]) + if (iend - istart + 1 > 0) { + numObs <- sum(x[t, istart:iend]) if (numObs < 0) { print("Error in dDynamicOccupancy: numObs < 0 but number of obs in start/end > 0\n") stop("Error in dDynamicOccupancy: numObs < 0 but number of obs in start/end > 0\n") } ProbOccAndCount <- ProbOccNextTime * - exp(sum(dbinom(x[t,start[t]:end[t]], + exp(sum(dbinom(x[t,istart:iend], size = 1, prob = p, log = 1))) ProbUnoccAndCount <- (1 - ProbOccNextTime) * (numObs == 0) ProbCount <- ProbOccAndCount + ProbUnoccAndCount @@ -994,7 +1030,7 @@ dDynOcc_svs <- nimbleFunction( if (log) return(ll) else return(exp(ll)) returnType(double(0)) - } + }, buildDerivs = list(run = list(ignore = c('t', 'istart', 'iend'))) ) #' @rdname dDynOcc @@ -1007,9 +1043,9 @@ dDynOcc_sss <- nimbleFunction( probPersist = double(), probColonize = double(), p = double(), - start = double(1), + start = double(1), # These end up being a problem end = double(1), - log = double(0, default = 0)) { + log = integer(0, default = 0)) { ## x is a year by rep matix ProbOccNextTime <- init @@ -1017,14 +1053,16 @@ dDynOcc_sss <- nimbleFunction( nyears <- dim(x)[1] if (nyears >= 1) { for (t in 1:nyears) { - if (end[t] - start[t] + 1 > 0) { - numObs <- sum(x[t,start[t]:end[t]]) + istart <- ADbreak(start[t]) + iend <- ADbreak(end[t]) + if (iend - istart + 1 > 0) { + numObs <- sum(x[t,istart:iend]) if (numObs < 0) { print("Error in dDynamicOccupancy: numObs < 0 but number of obs in start/end > 0\n") stop("Error in dDynamicOccupancy: numObs < 0 but number of obs in start/end > 0\n") } ProbOccAndCount <- ProbOccNextTime * - exp(sum(dbinom(x[t,start[t]:end[t]], + exp(sum(dbinom(x[t,istart:iend], size = 1, prob = p, log = 1))) ProbUnoccAndCount <- (1 - ProbOccNextTime) * (numObs == 0) ProbCount <- ProbOccAndCount + ProbUnoccAndCount @@ -1045,10 +1083,9 @@ dDynOcc_sss <- nimbleFunction( if (log) return(ll) else return(exp(ll)) returnType(double(0)) - } + }, buildDerivs = list(run = list(ignore = c('t', 'istart', 'iend'))) ) - #' @rdname dDynOcc #' @export rDynOcc_vvs <- nimbleFunction( diff --git a/R/dHMM.R b/R/dHMM.R index 2c6afc8..99defa6 100644 --- a/R/dHMM.R +++ b/R/dHMM.R @@ -109,6 +109,19 @@ #' \code{observedStates[1:T] ~ dHMMo(initStates[1:S], observationProbs[1:S, #' 1:O, 1:T], transitionProbs[1:S, 1:S], 1, T)} #' +#' @section Notes for use with automatic differentiation: +#' +#' The \code{dHMM[o]} distributions should work for models and algorithms that +#' use nimble's automatic differentiation (AD) system. In that system, some +#' kinds of values are "baked in" (cannot be changed) to the AD calculations +#' from the first call, unless and until the AD calculations are reset. For the +#' \code{dHMM[o]} distributions, the sizes of the inputs and the data (\code{x}) +#' values themselves are baked in. These can be different for different +#' iterations through a for loop (or nimble model declarations with different +#' indices, for example), but the sizes and data values for each specific +#' iteration will be "baked in" after the first call. \bold{In other words, it +#' is assumed that \code{x} are data and are not going to change.} +#' #' @return For \code{dHMM} and \code{dHMMo}: the probability (or likelihood) or #' log probability of observation vector \code{x}. #' @@ -172,18 +185,19 @@ dHMM <- nimbleFunction( init = double(1), probObs = double(2), probTrans = double(2), - len = double(0, default = 0),## length of x (needed as a separate param for rDHMM) - checkRowSums = double(0, default = 1), + len = integer(0, default = 0),## length of x (needed as a separate param for rDHMM) + checkRowSums = integer(0, default = 1), log = integer(0, default = 0)) { if (length(x) != len) stop("In dHMM: Argument len must be length of x or 0.") if (dim(probObs)[1] != dim(probTrans)[1]) stop("In dHMM: Length of dimension 1 in probObs must equal length of dimension 1 in probTrans.") if (dim(probTrans)[1] != dim(probTrans)[2]) stop("In dHMM: probTrans must be a square matrix.") if (abs(sum(init) - 1) > 1e-6) stop("In dHMM: Initial probabilities must sum to 1.") - if (checkRowSums) { + if (checkRowSums) { ## For AD, the checking will only be done in the taping call, once. transCheckPasses <- TRUE for (i in 1:dim(probTrans)[1]) { - thisCheckSum <- sum(probTrans[i,]) + thisCheckSumTemp <- sum(probTrans[i,]) + thisCheckSum <- ADbreak(thisCheckSumTemp) if (abs(thisCheckSum - 1) > 1e-6) { ## Compilation doesn't support more than a simple string for stop() ## so we provide more detail using a print(). @@ -193,7 +207,8 @@ dHMM <- nimbleFunction( } obsCheckPasses <- TRUE for (i in 1:dim(probObs)[1]) { - thisCheckSum <- sum(probObs[i,]) + thisCheckSumTemp <- sum(probObs[i,]) + thisCheckSum <- ADbreak(thisCheckSumTemp) if (abs(thisCheckSum - 1) > 1e-6) { print("In dHMM: Problem with sum(probObs[i,]) with i = ", i, ". The sum should be 1 but is ", thisCheckSum) obsCheckPasses <- FALSE @@ -211,16 +226,17 @@ dHMM <- nimbleFunction( logL <- 0 nObsClasses <- dim(probObs)[2] for (t in 1:len) { - if (x[t] > nObsClasses | x[t] < 1) stop("In dHMM: Invalid value of x[t].") - Zpi <- probObs[, x[t]] * pi # Vector of P(state) * P(observation class x[t] | state) - sumZpi <- sum(Zpi) # Total P(observed as class x[t]) - logL <- logL + log(sumZpi) # Accumulate log probabilities through time - if (t != len) pi <- ((Zpi %*% probTrans) / sumZpi)[1, ] # State probabilities at t+1 + xt <- ADbreak(x[t]) + if (xt > nObsClasses | xt < 1) stop("In dHMM: Invalid value of x[t].") + Zpi <- probObs[, xt] * pi + sumZpi <- sum(Zpi) + logL <- logL + log(sumZpi) + if (t != len) pi <- ((Zpi %*% probTrans) / sumZpi)[1, ] } returnType(double()) if (log) return(logL) return(exp(logL)) - } + }, buildDerivs = list(run = list(ignore = c('i', 't', 'xt', 'thisCheckSum'))) ) #' @export @@ -230,8 +246,8 @@ dHMMo <- nimbleFunction( init = double(1),## probObs = double(3), probTrans = double(2), - len = double(0, default = 0),## length of x (needed as a separate param for rDHMM) - checkRowSums = double(0, default = 1), + len = integer(0, default = 0),## length of x (needed as a separate param for rDHMM) + checkRowSums = integer(0, default = 1), log = integer(0, default = 0)) { if (length(x) != len) stop("In dHMMo: Argument len must be length of x or 0.") if (dim(probObs)[1] != dim(probTrans)[1]) stop("In dHMMo: In dHMM: Length of dimension 1 in probObs must equal length of dimension 1 in probTrans.") @@ -242,10 +258,14 @@ dHMMo <- nimbleFunction( } if (abs(sum(init) - 1) > 1e-6) stop("In dHMMo: Initial probabilities must sum to 1.") + ## declare(i, integer()) + ## declare(k, integer()) + if (checkRowSums) { transCheckPasses <- TRUE for (i in 1:dim(probTrans)[1]) { - thisCheckSum <- sum(probTrans[i,]) + thisCheckSumTemp <- sum(probTrans[i,]) + thisCheckSum <- ADbreak(thisCheckSumTemp) if (abs(thisCheckSum - 1) > 1e-6) { ## Compilation doesn't support more than a simple string for stop() ## so we provide more detail using a print(). @@ -254,9 +274,17 @@ dHMMo <- nimbleFunction( } } obsCheckPasses <- TRUE + + # declare(probObs_dim1, integer()) + # declare(probObs_dim3, integer()) + + # probObs_dim1 <- dim(probObs)[1] + # probObs_dim3 <- dim(probObs)[3] + for (i in 1:dim(probObs)[1]) { for (k in 1:dim(probObs)[3]) { - thisCheckSum <- sum(probObs[i,,k]) + thisCheckSumTemp <- sum(probObs[i,,k]) + thisCheckSum <- ADbreak(thisCheckSumTemp) if (abs(thisCheckSum - 1) > 1e-6) { print("In dHMMo: Problem with sum(probObs[i,,k]) with i = ", i, " k = " , k, ". The sum should be 1 but is ", thisCheckSum) obsCheckPasses <- FALSE @@ -274,9 +302,12 @@ dHMMo <- nimbleFunction( pi <- init # State probabilities at time t=1 logL <- 0 nObsClasses <- dim(probObs)[2] + declare(t, integer()) for (t in 1:len) { - if (x[t] > nObsClasses | x[t] < 1) stop("In dHMMo: Invalid value of x[t].") - Zpi <- probObs[,x[t],t] * pi # Vector of P(state) * P(observation class x[t] | state) + xt <- ADbreak(x[t]) + if (xt > nObsClasses | xt < 1) stop("In dHMMo: Invalid value of x[t].") + thisProbObs <- probObs[,xt,] + Zpi <- thisProbObs[,t] * pi # Vector of P(state) * P(observation class x[t] | state) sumZpi <- sum(Zpi) # Total P(observed as class x[t]) logL <- logL + log(sumZpi) # Accumulate log probabilities through timeƍ if (t != len) pi <- ((Zpi %*% probTrans) / sumZpi)[1, ] # State probabilities at t+1 @@ -284,7 +315,7 @@ dHMMo <- nimbleFunction( returnType(double()) if (log) return(logL) return(exp(logL)) - } + }, buildDerivs = list(run = list(ignore = c('i', 'k', 't', 'xt', 'thisCheckSum'))) ) #' @export @@ -294,8 +325,8 @@ rHMM <- nimbleFunction( init = double(1), probObs = double(2), probTrans = double(2), - len = double(0, default = 0), - checkRowSums = double(0, default = 1)) { + len = integer(0, default = 0), + checkRowSums = integer(0, default = 1)) { returnType(double(1)) if (dim(probObs)[1] != dim(probTrans)[1]) stop("In rHMM: Number of cols in probObs must equal number of cols in probTrans.") if (dim(probTrans)[1] != dim(probTrans)[2]) stop("In rHMM: probTrans must be a square matrix.") @@ -364,8 +395,8 @@ rHMMo <- nimbleFunction( init = double(1), probObs = double(3), probTrans = double(2), - len = double(0, default = 0), - checkRowSums = double(0, default = 1)) { + len = integer(0, default = 0), + checkRowSums = integer(0, default = 1)) { returnType(double(1)) if (dim(probObs)[1] != dim(probTrans)[1]) stop("In rHMMo: Number of cols in probObs must equal number of cols in probTrans.") if (dim(probTrans)[1] != dim(probTrans)[2]) stop("In rHMMo: probTrans must be a square matrix.") diff --git a/R/dNmixture.R b/R/dNmixture.R index 3c593c6..4edaa23 100644 --- a/R/dNmixture.R +++ b/R/dNmixture.R @@ -1,13 +1,17 @@ -# dNmixture -#' N-mixture distribution for use in \code{nimble} models +#' dNmixture distribution for use in \code{nimble} models #' -#' \code{dNmixture_s} and \code{dNmixture_v} provide Poisson-Binomial mixture distributions of abundance ("N-mixture") for use in \code{nimble} models. Overdispersion alternatives are also provided. +#' \code{dNmixture_s} and \code{dNmixture_v} provide Poisson-Binomial mixture +#' distributions of abundance ("N-mixture") for use in \code{nimble} models. +#' Overdispersion alternatives using the negative binomial distribution (for +#' the abundance submodel) and the beta binomial distribution (for the detection +#' submodel) are also provided. #' #' @name dNmixture +#' #' @aliases dNmixture_s dNmixture_v rNmixture_s rNmixture_v dNmixture_BNB_v #' dNmixture_BNB_s dNmixture_BNB_oneObs dNmixture_BBP_v dNmixture_BBP_s #' dNmixture_BBP_oneObs dNmixture_BBNB_v dNmixture_BBNB_s -#' rNmixture_BBNB_oneObs rNmixture_BNB_v rNmixture_BNB_s rNmixture_BNB_oneObs +#' dNmixture_BBNB_oneObs rNmixture_BNB_v rNmixture_BNB_s rNmixture_BNB_oneObs #' rNmixture_BBP_v rNmixture_BBP_s rNmixture_BBP_oneObs rNmixture_BBNB_v #' rNmixture_BBNB_s rNmixture_BBNB_oneObs #' @@ -15,14 +19,14 @@ #' #' @param x vector of integer counts from a series of sampling occasions. #' @param lambda expected value of the Poisson distribution of true abundance -#' @param theta abundance overdispersion parameter required for negative binomial -#' (*NB) N-mixture models. theta is parameterized such that variance of -#' the negative binomial variable x is \code{lambda^2 * theta + lambda} -#' @param prob detection probability (scalar for \code{dNmixture_s}, vector for \code{dNmixture_v}). +#' @param theta abundance overdispersion parameter required for negative +#' binomial (*NB) N-mixture models. The negative binomial is parameterized +#' such that variance of x is \code{lambda^2 * theta + lambda} +#' @param prob detection probability (scalar for \code{dNmixture_s}, vector for +#' \code{dNmixture_v}). #' @param s detection overdispersion parameter required for beta binomial (BB*) -#' N-mixture models. s is parameterized such that variance of the beta -#' binomial variable x is \code{V(x) = N \* prob \* (1-prob) \* (N + -#' s) / (s + 1)} +#' N-mixture models. The beta binomial is parameterized such that variance of +#' x is \code{V(x) = N * prob * (1-prob) * (N + s) / (s + 1)} #' @param Nmin minimum abundance to sum over for the mixture probability. Set to #' -1 to select automatically (not available for beta binomial variations; see #' Details). @@ -30,10 +34,11 @@ #' -1 to select automatically (not available for beta binomial variations; see #' Details). #' @param len The length of the x vector -#' @param log TRUE or 1 to return log probability. FALSE or 0 to return probability. +#' @param log TRUE or 1 to return log probability. FALSE or 0 to return +#' probability. #' @param n number of random draws, each returning a vector of length -#' \code{len}. Currently only \code{n = 1} is supported, but the -#' argument exists for standardization of "\code{r}" functions. +#' \code{len}. Currently only \code{n = 1} is supported, but the argument +#' exists for standardization of "\code{r}" functions. #' #' @details These nimbleFunctions provide distributions that can be #' used directly in R or in \code{nimble} hierarchical models (via @@ -63,12 +68,6 @@ #' distribution with the negative binomial (NB) and the binomial (B) detection #' distribution with the beta binomial (BB). #' -#' \strong{NOTE: These variants should work but are considered to be in development. -#' Their function names, parameter names, and implementations are subject to -#' change. Use with caution while this message is present. Please contact the -#' authors on the nimble-users listserv if you have any questions. dNmixture_v -#' and dNmixture_s are \emph{not} considered to be in development.} -#' #' Binomial-negative binomial: BNB N-mixture models use a binomial distribution #' for detection and a negative binomial distribution for abundance with scalar #' overdispersion parameter \code{theta} (0-Inf). We parameterize such that the @@ -83,8 +82,8 @@ #' Beta-binomial-Poisson: BBP N-mixture uses a beta binomial distribution for #' detection probabilities and a Poisson distribution for abundance. The beta #' binomial distribution has scalar overdispersion parameter s (0-Inf). We -#' parameterize such that the variance of the beta binomial is \code{N \* prob -#' \* (1-prob) \* (N + s) / (s + 1)}, with greater s indicating less variance +#' parameterize such that the variance of the beta binomial is \code{N * prob +#' * (1-prob) * (N + s) / (s + 1)}, with greater s indicating less variance #' (greater-than-binomial relatedness between observations at the site) and s -> #' 0 indicating the binomial. The BBP is available in three suffixed forms: #' \code{dNmixture_BBP_v} is used if \code{prob} varies between observations, @@ -118,8 +117,9 @@ #' marginal distributions of each count, using the minimum over counts of the #' former and the maximum over counts of the latter. #' -#' The summation over N uses the efficient method given by Meehan et al. (2020). -#' See Appendix B for the algorithm. +#' The summation over N uses the efficient method given by Meehan et al. (2020, +#' see Appendix B) for the basic Poisson-Binomial case, extended for the +#' overdispersion cases in Goldstein and de Valpine (2022). #' #' These are \code{nimbleFunction}s written in the format of user-defined #' distributions for NIMBLE's extension of the BUGS model language. More @@ -161,6 +161,12 @@ #' prob[i], #' Nmin, Nmax, T)} #' +#' @section Notes for use with automatic differentiation: +#' +#' The N-mixture distributions are the only ones in \code{nimbleEcology} for which +#' one must use different versions when AD support is needed. See +#' \code{\link{dNmixtureAD}}. +#' #' @return #' For \code{dNmixture_s} and \code{dNmixture_v}: the probability (or likelihood) or log #' probability of observation vector \code{x}. @@ -177,10 +183,13 @@ #' Carlo sampling for hierarchical hidden Markov models. Environmental and #' Ecological Statistics 23:549ā€“564. DOI 10.1007/s10651-016-0353-z #' -#' Meehan, T. D., Michel, N. L., & Rue, H. (2020). Estimating Animal Abundance +#' Meehan, T. D., Michel, N. L., & Rue, H. 2020. Estimating Animal Abundance #' with N-Mixture Models Using the Rā€”INLA Package for R. Journal of Statistical #' Software, 95(2). https://doi.org/10.18637/jss.v095.i02 #' +#' Goldstein, B.R., and P. de Valpine. 2022. Comparing N-mixture Models and +#' GLMMs for Relative Abundance Estimation in a Citizen Science Dataset. +#' Scientific Reports 12: 12276. DOI:10.1038/s41598-022-16368-z #' #' @examples #' # Set up constants and initial values for defining the model @@ -212,57 +221,42 @@ # nimbleOptions(checkNimbleFunction = FALSE) + ##### Regular N-mixture ##### NULL #' @rdname dNmixture +#' @importFrom stats qpois #' @export dNmixture_v <- nimbleFunction( run = function(x = double(1), lambda = double(), prob = double(1), - Nmin = double(0, default = -1), - Nmax = double(0, default = -1), - len = double(), + Nmin = integer(0, default = -1), + Nmax = integer(0, default = -1), + len = integer(), log = integer(0, default = 0)) { if (length(x) != len) stop("in dNmixture_v, len must equal length(x).") if (len != length(prob)) stop("in dNmixture_v, len must equal length(prob).") - - # Lambda cannot be negative - if (lambda < 0) { - if (log) return(-Inf) - else return(0) - } - + if (lambda < 0) + if (log) return(-Inf) else return(0) ## For each x, the conditional distribution of (N - x | x) is pois(lambda * (1-p)) ## We determine the lowest N and highest N at extreme quantiles and sum over those. - if (Nmin == -1) { + if (Nmin == -1) Nmin <- min(x + qpois(0.00001, lambda * (1 - prob))) - } - if (Nmax == -1) { + if (Nmax == -1) Nmax <- max(x + qpois(0.99999, lambda * (1 - prob))) - } Nmin <- max( max(x), Nmin ) ## set Nmin to at least the largest x - - logProb <- -Inf - - if (Nmax > Nmin) { - numN <- Nmax - Nmin + 1 - 1 ## remember: +1 for the count, but -1 because the summation should run from N = maxN to N = minN + 1 - prods <- rep(0, numN) - for (i in (Nmin + 1):Nmax) { - prods[i - Nmin] <- prod(i/(i - x)) / i - } - - ff <- log(lambda) + sum(log(1-prob)) + log(prods) - log_fac <- nimNmixPois_logFac(numN, ff) - logProb <- dpois(Nmin, lambda, log = TRUE) + sum(dbinom(x, size = Nmin, prob = prob, log = TRUE)) + log_fac - } + logProb <- dNmixture_steps(x, lambda, Nmin, Nmax, sum(log(1-prob)), + sum(dbinom(x, size = Nmin, prob = prob, log = TRUE))) if (log) return(logProb) else return(exp(logProb)) returnType(double()) -}) +} +) NULL #' @rdname dNmixture +#' @importFrom stats qpois #' @export dNmixture_s <- nimbleFunction( run = function(x = double(1), @@ -275,41 +269,26 @@ dNmixture_s <- nimbleFunction( if (length(x) != len) stop("in dNmixture_s, len must equal length(x).") # Lambda cannot be negative - if (lambda < 0) { - if (log) return(-Inf) - else return(0) - } - + if (lambda < 0) + if (log) return(-Inf) else return(0) ## For each x, the conditional distribution of (N - x | x) is pois(lambda * (1-p)) ## We determine the lowest N and highest N at extreme quantiles and sum over those. - if (Nmin == -1) { + if (Nmin == -1) Nmin <- min(x + qpois(0.00001, lambda * (1 - prob))) - } - if (Nmax == -1) { + if (Nmax == -1) Nmax <- max(x + qpois(0.99999, lambda * (1 - prob))) - } Nmin <- max( max(x), Nmin ) ## set Nmin to at least the largest x - - logProb <- -Inf - - if (Nmax > Nmin) { - numN <- Nmax - Nmin + 1 - 1 ## remember: +1 for the count, but -1 because the summation should run from N = maxN to N = minN + 1 - prods <- rep(0, numN) - for (i in (Nmin + 1):Nmax) { - prods[i - Nmin] <- prod(i/(i - x)) / i - } - - ff <- log(lambda) + log(1-prob)*len + log(prods) - log_fac <- nimNmixPois_logFac(numN, ff) - logProb <- dpois(Nmin, lambda, log = TRUE) + sum(dbinom(x, size = Nmin, prob = prob, log = TRUE)) + log_fac - } + logProb <- dNmixture_steps(x, lambda, Nmin, Nmax, log(1-prob)*len, + sum(dbinom(x, size = Nmin, prob = prob, log = TRUE))) if (log) return(logProb) else return(exp(logProb)) returnType(double()) -}) + } +) NULL #' @rdname dNmixture +#' @importFrom stats rpois rbinom #' @export rNmixture_v <- nimbleFunction( run = function(n = double(), @@ -322,16 +301,15 @@ rNmixture_v <- nimbleFunction( if (length(prob) != len) stop("In rNmixture_v, len must equal length(prob).") trueN <- rpois(1, lambda) ans <- numeric(len) - for (i in 1:len) { + for (i in 1:len) ans[i] <- rbinom(n = 1, size = trueN, prob = prob[i]) - } - return(ans) returnType(double(1)) }) NULL #' @rdname dNmixture +#' @importFrom stats rpois rbinom #' @export rNmixture_s <- nimbleFunction( run = function(n = double(), @@ -343,18 +321,15 @@ rNmixture_s <- nimbleFunction( if (n != 1) stop("rNmixture_v only works for n = 1") trueN <- rpois(1, lambda) ans <- numeric(len) - for (i in 1:len) { + for (i in 1:len) ans[i] <- rbinom(n = 1, size = trueN, prob = prob) - } - return(ans) returnType(double(1)) }) -##### N-mixture extensions ##### -##### dNmixture_BNB_v ##### -NULL + #' @rdname dNmixture +#' @importFrom stats qnbinom #' @export dNmixture_BNB_v <- nimbleFunction( run = function(x = double(1), @@ -368,20 +343,10 @@ dNmixture_BNB_v <- nimbleFunction( if (length(x) != len) stop("in dNmixture_BNB_v, len must equal length(x).") if (len != length(prob)) stop("in dNmixture_BNB_v, len must equal length(prob).") - if (theta <= 0) { - if (log) return(-Inf) - else return(0) - } - - r <- 1 / theta - pNB <- 1 / (1 + theta * lambda) - - # Lambda cannot be negative - if (lambda < 0) { - if (log) return(-Inf) - else return(0) - } - + if (theta <= 0) + if (log) return(-Inf) else return(0) + if (lambda < 0) + if (log) return(-Inf) else return(0) ## For each x, the conditional distribution of (N - x | x) is ## a negative binomial with overdispersion parameter (theta / (1 + y * theta)) ## and mean omega / ((theta / (1 + y * theta)) * (1 - omega)) where @@ -392,38 +357,23 @@ dNmixture_BNB_v <- nimbleFunction( lambda_cond <- omega / (theta_cond * (1 - omega)) r_cond <- 1 / theta_cond pNB_cond <- 1 / (1 + theta_cond * lambda_cond) - if (Nmin == -1) { + if (Nmin == -1) Nmin <- min(x + qnbinom(0.00001, size = r_cond, prob = pNB_cond)) - } - if (Nmax == -1) { + if (Nmax == -1) Nmax <- max(x + qnbinom(0.99999, size = r_cond, prob = pNB_cond)) - } - Nmin <- max( max(x), Nmin ) ## set Nmin to at least the largest x - - logProb <- -Inf - - if (Nmax > Nmin) { - numN <- Nmax - Nmin + 1 - 1 ## remember: +1 for the count, but -1 because the summation should run from N = maxN to N = minN + 1 - prods <- rep(0, numN) - for (i in (Nmin + 1):Nmax) { - prods[i - Nmin] <- (i + r - 1) * prod(i/(i - x)) / i - } - - ff <- log(1 - pNB) + sum(log(1-prob)) + log(prods) - log_fac <- nimNmixPois_logFac(numN, ff) - logProb <- dnbinom(Nmin, size = r, prob = pNB, log = TRUE) + - sum(dbinom(x, size = Nmin, prob = prob, log = TRUE)) + - log_fac - } + logProb <- dNmixture_BNB_steps(x,lambda,theta,Nmin,Nmax,sum(log(1-prob)), + sum(dbinom(x, size = Nmin, prob = prob, log = TRUE))) if (log) return(logProb) else return(exp(logProb)) returnType(double()) - }) + } +) ##### dNmixture_BNB_s ##### NULL #' @rdname dNmixture +#' @importFrom stats qnbinom #' @export dNmixture_BNB_s <- nimbleFunction( run = function(x = double(1), @@ -435,58 +385,28 @@ dNmixture_BNB_s <- nimbleFunction( len = double(), log = integer(0, default = 0)) { if (length(x) != len) stop("in dNmixture_BNB_s, len must equal length(x).") - - if (theta <= 0) { - if (log) return(-Inf) - else return(0) - } - - r <- 1 / theta - pNB <- 1 / (1 + theta * lambda) - - # Lambda cannot be negative - if (lambda < 0) { - if (log) return(-Inf) - else return(0) - } - - ## For each x, the conditional distribution of (N - x | x) is - ## a negative binomial with overdispersion parameter (theta / (1 + y * theta)) - ## and mean omega / ((theta / (1 + y * theta)) * (1 - omega)) where - ## omega = (1 - p) * (theta * lambda / (1 + theta * lambda)) - ## We determine the lowest N and highest N at extreme quantiles and sum over those. + if (theta <= 0) + if (log) return(-Inf) else return(0) + if (lambda < 0) + if (log) return(-Inf) else return(0) + ## See above for comments theta_cond <- theta / (1 + x * theta) omega <- (1 - prob) * (theta * lambda / (1 + theta * lambda)) lambda_cond <- omega / (theta_cond * (1 - omega)) r_cond <- 1 / theta_cond pNB_cond <- 1 / (1 + theta_cond * lambda_cond) - if (Nmin == -1) { + if (Nmin == -1) Nmin <- min(x + qnbinom(0.00001, size = r_cond, prob = pNB_cond)) - } - if (Nmax == -1) { + if (Nmax == -1) Nmax <- max(x + qnbinom(0.99999, size = r_cond, prob = pNB_cond)) - } Nmin <- max( max(x), Nmin ) ## set Nmin to at least the largest x - - logProb <- -Inf - - if (Nmax > Nmin) { - numN <- Nmax - Nmin + 1 - 1 ## remember: +1 for the count, but -1 because the summation should run from N = maxN to N = minN + 1 - prods <- rep(0, numN) - for (i in (Nmin + 1):Nmax) { - prods[i - Nmin] <- (i + r - 1) * prod(i/(i - x)) / i - } - - ff <- log(1 - pNB) + len * log(1-prob) + log(prods) - log_fac <- nimNmixPois_logFac(numN, ff) - logProb <- dnbinom(Nmin, size = r, prob = pNB, log = TRUE) + - sum(dbinom(x, size = Nmin, prob = prob, log = TRUE)) + - log_fac - } + logProb <- dNmixture_BNB_steps(x,lambda,theta,Nmin,Nmax,len*log(1-prob), + sum(dbinom(x, size = Nmin, prob = prob, log = TRUE))) if (log) return(logProb) else return(exp(logProb)) returnType(double()) - }) + } +) ##### dNmixture_BNB_oneObs ##### NULL @@ -499,62 +419,13 @@ dNmixture_BNB_oneObs <- nimbleFunction( prob = double(), Nmin = double(0, default = -1), Nmax = double(0, default = -1), - len = double(), log = integer(0, default = 0)) { - if (theta <= 0) { - if (log) return(-Inf) - else return(0) - } - - r <- 1 / theta - pNB <- 1 / (1 + theta * lambda) - - # Lambda cannot be negative - if (lambda < 0) { - if (log) return(-Inf) - else return(0) - } - - ## For each x, the conditional distribution of (N - x | x) is - ## a negative binomial with overdispersion parameter (theta / (1 + y * theta)) - ## and mean omega / ((theta / (1 + y * theta)) * (1 - omega)) where - ## omega = (1 - p) * (theta * lambda / (1 + theta * lambda)) - ## We determine the lowest N and highest N at extreme quantiles and sum over those. - theta_cond <- theta / (1 + x * theta) - omega <- (1 - prob) * (theta * lambda / (1 + theta * lambda)) - lambda_cond <- omega / (theta_cond * (1 - omega)) - r_cond <- 1 / theta_cond - pNB_cond <- 1 / (1 + theta_cond * lambda_cond) - if (Nmin == -1) { - Nmin <- x + qnbinom(0.00001, size = r_cond, prob = pNB_cond) - } - if (Nmax == -1) { - Nmax <- x + qnbinom(0.99999, size = r_cond, prob = pNB_cond) - } - Nmin <- max(c(x, Nmin)) ## set Nmin to at least the largest x - - logProb <- -Inf - - if (Nmax > Nmin) { - numN <- Nmax - Nmin + 1 - 1 ## remember: +1 for the count, but -1 because the summation should run from N = maxN to N = minN + 1 - prods <- rep(0, numN) - for (i in (Nmin + 1):Nmax) { - prods[i - Nmin] <- (i + r - 1) / (i - x) - } - - ff <- log(1 - pNB) + log(1-prob) + log(prods) - log_fac <- nimNmixPois_logFac(numN, ff) - logProb <- dnbinom(Nmin, size = r, prob = pNB, log = TRUE) + - dbinom(x, size = Nmin, prob = prob, log = TRUE) + - log_fac - } - if (log) return(logProb) - else return(exp(logProb)) + xvec <- numeric(value = x, length=1) + return(dNmixture_BNB_s(xvec,lambda,theta,prob,Nmin,Nmax,1,log)) returnType(double()) - }) + } +) -##### dNmixture_BBP_v ##### -NULL #' @rdname dNmixture #' @export dNmixture_BBP_v <- nimbleFunction( @@ -568,50 +439,25 @@ dNmixture_BBP_v <- nimbleFunction( log = integer(0, default = 0)) { if (length(x) != len) stop("in dNmixture_BBP_v, len must equal length(x).") if (len != length(prob)) stop("in dNmixture_BBP_v, len must equal length(prob).") - - if (s <= 0) { - if (log) return(-Inf) - else return(0) - } - + if (s <= 0) + if (log) return(-Inf) else return(0) alpha <- prob * s beta <- s - prob * s - - # Lambda cannot be negative - if (lambda < 0) { - if (log) return(-Inf) - else return(0) - } - + if (lambda < 0) + if (log) return(-Inf) else return(0) ## For beta-binomial N-mixtures , the conditional distribution of (N - x | ## x) doesn't have a nice closed-form expression. if (Nmin == -1 | Nmax == -1) { stop("Dynamic choice of Nmin/Nmax is not supported for beta binomial N-mixtures.") } Nmin <- max( max(x), Nmin ) ## set Nmin to at least the largest x - - logProb <- -Inf - - if (Nmax > Nmin) { - numN <- Nmax - Nmin + 1 - 1 ## remember: +1 for the count, but -1 because the summation should run from N = maxN to N = minN + 1 - prods <- rep(0, numN) - - for (i in (Nmin + 1):Nmax) { - # prods[i - Nmin] <- prod(i * (i - 1 + beta - x) / ((i - x) * (alpha + beta + i - 1))) / i - prods[i - Nmin] <- prod(i * (i - 1 + beta - x) / ((i - x) * (alpha + beta + i - 1))) * (lambda / i) - } - - - ff <- log(prods) - log_fac <- nimNmixPois_logFac(numN, ff) - logProb <- dpois(Nmin, lambda, log = TRUE) + - dBetaBinom(x, Nmin, alpha, beta, log = TRUE) + - log_fac - } + logProb <- dNmixture_BBP_steps(x, beta-x, lambda, s, Nmin, Nmax, + dBetaBinom_v(x, Nmin, alpha, beta, len, log = TRUE)) if (log) return(logProb) else return(exp(logProb)) returnType(double()) - }) + } +) ##### dNmixture_BBP_s ##### NULL @@ -627,50 +473,25 @@ dNmixture_BBP_s <- nimbleFunction( len = double(), log = integer(0, default = 0)) { if (length(x) != len) stop("in dNmixture_BBP_s, len must equal length(x).") - - if (s <= 0) { - if (log) return(-Inf) - else return(0) - } - + if (s <= 0) + if (log) return(-Inf) else return(0) alpha <- prob * s beta <- s - prob * s - - # Lambda cannot be negative - if (lambda < 0) { - if (log) return(-Inf) - else return(0) - } - + if (lambda < 0) + if (log) return(-Inf) else return(0) ## For beta-binomial N-mixtures , the conditional distribution of (N - x | ## x) doesn't have a nice closed-form expression. if (Nmin == -1 | Nmax == -1) { stop("Dynamic choice of Nmin/Nmax is not supported for beta binomial N-mixtures.") } Nmin <- max( max(x), Nmin ) ## set Nmin to at least the largest x - - logProb <- -Inf - - if (Nmax > Nmin) { - numN <- Nmax - Nmin + 1 - 1 ## remember: +1 for the count, but -1 because the summation should run from N = maxN to N = minN + 1 - prods <- rep(0, numN) - - for (i in (Nmin + 1):Nmax) { - # prods[i - Nmin] <- prod(i * (i - 1 + beta - x) / ((i - x) * (alpha + beta + i - 1))) / i - prods[i - Nmin] <- prod(i * (i - 1 + beta - x) / ((i - x) * (alpha + beta + i - 1))) * (lambda / i) - } - - - ff <- log(prods) - log_fac <- nimNmixPois_logFac(numN, ff) - logProb <- dpois(Nmin, lambda, log = TRUE) + - dBetaBinom(x, Nmin, rep(alpha, len), rep(beta, len), log = TRUE) + - log_fac - } + logProb <- dNmixture_BBP_steps(x, beta-x, lambda, s, Nmin, Nmax, + dBetaBinom_s(x, Nmin, alpha, beta, len, log = TRUE)) if (log) return(logProb) else return(exp(logProb)) returnType(double()) - }) + } +) ##### dNmixture_BBP_oneObs ##### NULL @@ -683,55 +504,13 @@ dNmixture_BBP_oneObs <- nimbleFunction( s = double(), Nmin = double(0, default = -1), Nmax = double(0, default = -1), - len = double(), log = integer(0, default = 0)) { - if (s <= 0) { - if (log) return(-Inf) - else return(0) - } - - alpha <- prob * s - beta <- s - prob * s - - # Lambda cannot be negative - if (lambda < 0) { - if (log) return(-Inf) - else return(0) - } - - ## For beta-binomial N-mixtures , the conditional distribution of (N - x | - ## x) doesn't have a nice closed-form expression. - if (Nmin == -1 | Nmax == -1) { - stop("Dynamic choice of Nmin/Nmax is not supported for beta binomial N-mixtures.") - } - Nmin <- x - - logProb <- -Inf - - if (Nmax > Nmin) { - numN <- Nmax - Nmin + 1 - 1 ## remember: +1 for the count, but -1 because the summation should run from N = maxN to N = minN + 1 - prods <- rep(0, numN) - - for (i in (Nmin + 1):Nmax) { - # prods[i - Nmin] <- prod(i * (i - 1 + beta - x) / ((i - x) * (alpha + beta + i - 1))) / i - prods[i - Nmin] <- i * (i - 1 + beta - x) / ((i - x) * (alpha + beta + i - 1)) * (lambda / i) - } - - - ff <- log(prods) - log_fac <- nimNmixPois_logFac(numN, ff) - logProb <- dpois(Nmin, lambda, log = TRUE) + - dBetaBinom_One(x, Nmin, alpha, beta, log = TRUE) + - log_fac - } - if (log) return(logProb) - else return(exp(logProb)) + xvec <- numeric(value = x, length = 1) + return(dNmixture_BBP_s(xvec,lambda,prob,s,Nmin,Nmax,1,log)) returnType(double()) - }) - + } +) -##### dNmixture_BBNB_v ##### -NULL #' @rdname dNmixture #' @export dNmixture_BBNB_v <- nimbleFunction( @@ -746,58 +525,26 @@ dNmixture_BBNB_v <- nimbleFunction( log = integer(0, default = 0)) { if (length(x) != len) stop("in dNmixture_BBNB_v, len must equal length(x).") if (len != length(prob)) stop("in dNmixture_BBNB_v, len must equal length(prob).") - - if (s <= 0) { - if (log) return(-Inf) - else return(0) - } - if (theta <= 0) { - if (log) return(-Inf) - else return(0) - } - - r <- 1 / theta - pNB <- 1 / (1 + theta * lambda) - + if (s <= 0) + if (log) return(-Inf) else return(0) + if (theta <= 0) + if (log) return(-Inf) else return(0) alpha <- prob * s beta <- s - prob * s - - # Lambda cannot be negative - if (lambda < 0) { - if (log) return(-Inf) - else return(0) - } - - ## For beta-binomial N-mixtures , the conditional distribution of (N - x | - ## x) doesn't have a nice closed-form expression. + if (lambda < 0) + if (log) return(-Inf) else return(0) + ## see comments above if (Nmin == -1 | Nmax == -1) { stop("Dynamic choice of Nmin/Nmax is not supported for beta binomial N-mixtures.") } Nmin <- max( max(x), Nmin ) ## set Nmin to at least the largest x - - logProb <- -Inf - - if (Nmax > Nmin) { - numN <- Nmax - Nmin + 1 - 1 ## remember: +1 for the count, but -1 because the summation should run from N = maxN to N = minN + 1 - prods <- rep(0, numN) - - for (i in (Nmin + 1):Nmax) { - # prods[i - Nmin] <- prod(i * (i - 1 + beta - x) / ((i - x) * (alpha + beta + i - 1))) / i - prods[i - Nmin] <- prod(i * (i - 1 + beta - x) / ((i - x) * (alpha + beta + i - 1))) * - ((1 - pNB) * (i + r - 1) / i) - } - - - ff <- log(prods) - log_fac <- nimNmixPois_logFac(numN, ff) - logProb <- dnbinom(Nmin, size = r, prob = pNB, log = TRUE) + - dBetaBinom(x, Nmin, alpha, beta, log = TRUE) + - log_fac - } + logProb <- dNmixture_BBNB_steps(x, beta-x,lambda,theta,s,Nmin,Nmax, + dBetaBinom_v(x, Nmin, alpha, beta, len, log = TRUE)) if (log) return(logProb) else return(exp(logProb)) returnType(double()) - }) + } +) ##### dNmixture_BBNB_s ##### NULL @@ -814,58 +561,28 @@ dNmixture_BBNB_s <- nimbleFunction( len = double(), log = integer(0, default = 0)) { if (length(x) != len) stop("in dNmixture_BBNB_s, len must equal length(x).") - - if (s <= 0) { - if (log) return(-Inf) - else return(0) - } - if (theta <= 0) { - if (log) return(-Inf) - else return(0) - } - + if (s <= 0) + if (log) return(-Inf) else return(0) + if (theta <= 0) + if (log) return(-Inf) else return(0) r <- 1 / theta pNB <- 1 / (1 + theta * lambda) - alpha <- prob * s beta <- s - prob * s - - # Lambda cannot be negative - if (lambda < 0) { - if (log) return(-Inf) - else return(0) - } - - ## For beta-binomial N-mixtures , the conditional distribution of (N - x | - ## x) doesn't have a nice closed-form expression. + if (lambda < 0) + if (log) return(-Inf) else return(0) + ## See comments above if (Nmin == -1 | Nmax == -1) { stop("Dynamic choice of Nmin/Nmax is not supported for beta binomial N-mixtures.") } Nmin <- max( max(x), Nmin ) ## set Nmin to at least the largest x - - logProb <- -Inf - - if (Nmax > Nmin) { - numN <- Nmax - Nmin + 1 - 1 ## remember: +1 for the count, but -1 because the summation should run from N = maxN to N = minN + 1 - prods <- rep(0, numN) - - for (i in (Nmin + 1):Nmax) { - # prods[i - Nmin] <- prod(i * (i - 1 + beta - x) / ((i - x) * (alpha + beta + i - 1))) / i - prods[i - Nmin] <- prod(i * (i - 1 + beta - x) / ((i - x) * (alpha + beta + i - 1))) * - ((1 - pNB) * (i + r - 1) / i) - } - - ff <- log(prods) - - log_fac <- nimNmixPois_logFac(numN, ff) - logProb <- dnbinom(Nmin, size = r, prob = pNB, log = TRUE) + - dBetaBinom(x, Nmin, rep(alpha, len), rep(beta, len), log = TRUE) + - log_fac - } + logProb <- dNmixture_BBNB_steps(x, beta-x,lambda,theta,s,Nmin,Nmax, + dBetaBinom_s(x, Nmin, alpha, beta, len, log = TRUE)) if (log) return(logProb) else return(exp(logProb)) returnType(double()) - }) + } +) ##### dNmixture_BBNB_oneObs ##### NULL @@ -879,66 +596,18 @@ dNmixture_BBNB_oneObs <- nimbleFunction( s = double(), Nmin = double(0, default = -1), Nmax = double(0, default = -1), - len = double(), log = integer(0, default = 0)) { - - if (s <= 0) { - if (log) return(-Inf) - else return(0) - } - if (theta <= 0) { - if (log) return(-Inf) - else return(0) - } - - r <- 1 / theta - pNB <- 1 / (1 + theta * lambda) - - alpha <- prob * s - beta <- s - prob * s - - # Lambda cannot be negative - if (lambda < 0) { - if (log) return(-Inf) - else return(0) - } - - ## For beta-binomial N-mixtures , the conditional distribution of (N - x | - ## x) doesn't have a nice closed-form expression. - if (Nmin == -1 | Nmax == -1) { - stop("Dynamic choice of Nmin/Nmax is not supported for beta binomial N-mixtures.") - } - if (Nmin < x) Nmin <- x - - logProb <- -Inf - - if (Nmax > Nmin) { - numN <- Nmax - Nmin + 1 - 1 ## remember: +1 for the count, but -1 because the summation should run from N = maxN to N = minN + 1 - prods <- rep(0, numN) - - for (i in (Nmin + 1):Nmax) { - # prods[i - Nmin] <- prod(i * (i - 1 + beta - x) / ((i - x) * (alpha + beta + i - 1))) / i - prods[i - Nmin] <- i * (i - 1 + beta - x) / ((i - x) * (alpha + beta + i - 1)) * - ((1 - pNB) * (i + r - 1) / i) - } - ff <- log(prods) - - log_fac <- nimNmixPois_logFac(numN, ff) - - logProb <- dnbinom(Nmin, size = r, prob = pNB, log = TRUE) + - dBetaBinom_One(x, Nmin, alpha, beta, log = TRUE) + - log_fac - } - if (log) return(logProb) - else return(exp(logProb)) + xvec <- numeric(x, length = 1) + return(dNmixture_BBNB_s(xvec, lambda, theta, prob, s, Nmin, Nmax, 1, log)) returnType(double()) - }) + } +) ##### rNmixture extensions ##### NULL #' @rdname dNmixture +#' @importFrom stats rnbinom #' @export - rNmixture_BNB_v <- nimbleFunction( run = function(n = double(), lambda = double(), @@ -963,10 +632,11 @@ rNmixture_BNB_v <- nimbleFunction( return(ans) returnType(double(1)) }) + NULL #' @rdname dNmixture +#' @importFrom stats rnbinom #' @export - rNmixture_BNB_s <- nimbleFunction( run = function(n = double(), lambda = double(), @@ -994,6 +664,7 @@ rNmixture_BNB_s <- nimbleFunction( NULL #' @rdname dNmixture +#' @importFrom stats rnbinom #' @export rNmixture_BNB_oneObs <- nimbleFunction( run = function(n = double(), @@ -1001,8 +672,7 @@ rNmixture_BNB_oneObs <- nimbleFunction( theta = double(), prob = double(), Nmin = double(0, default = -1), - Nmax = double(0, default = -1), - len = double()) { + Nmax = double(0, default = -1)) { if (n != 1) stop("rNmixture* only works for n = 1") r <- 1 / theta @@ -1018,6 +688,7 @@ rNmixture_BNB_oneObs <- nimbleFunction( NULL #' @rdname dNmixture +#' @importFrom stats rpois #' @export rNmixture_BBP_v <- nimbleFunction( run = function(n = double(), @@ -1034,7 +705,7 @@ rNmixture_BBP_v <- nimbleFunction( beta <- s - prob * s trueN <- rpois(1, lambda = lambda) - ans <- rBetaBinom(n = 1, N = trueN, shape1 = alpha, shape2 = beta) + ans <- rBetaBinom_v(n = 1, N = trueN, shape1 = alpha, shape2 = beta, len = len) return(ans) returnType(double(1)) @@ -1042,6 +713,7 @@ rNmixture_BBP_v <- nimbleFunction( NULL #' @rdname dNmixture +#' @importFrom stats rpois #' @export rNmixture_BBP_s <- nimbleFunction( run = function(n = double(), @@ -1057,8 +729,8 @@ rNmixture_BBP_s <- nimbleFunction( beta <- s - prob * s trueN <- rpois(1, lambda = lambda) - ans <- rBetaBinom(n = 1, N = trueN, - shape1 = rep(alpha, len), shape2 = rep(beta, len)) + ans <- rBetaBinom_s(n = 1, N = trueN, + shape1 = alpha, shape2 = beta, len = len) return(ans) returnType(double(1)) @@ -1066,6 +738,7 @@ rNmixture_BBP_s <- nimbleFunction( NULL #' @rdname dNmixture +#' @importFrom stats rpois #' @export rNmixture_BBP_oneObs <- nimbleFunction( run = function(n = double(), @@ -1073,22 +746,22 @@ rNmixture_BBP_oneObs <- nimbleFunction( prob = double(), s = double(), Nmin = double(0, default = -1), - Nmax = double(0, default = -1), - len = double()) { + Nmax = double(0, default = -1)) { if (n != 1) stop("rNmixture* only works for n = 1") alpha <- prob * s beta <- s - prob * s trueN <- rpois(1, lambda = lambda) - ans <- rBetaBinom_One(n = 1, N = trueN, shape1 = alpha, shape2 = beta) + ans <- rBetaBinom_s(n = 1, N = trueN, shape1 = alpha, shape2 = beta, len = 1) - return(ans) + return(ans[1]) returnType(double()) }) NULL #' @rdname dNmixture +#' @importFrom stats rnbinom #' @export rNmixture_BBNB_v <- nimbleFunction( run = function(n = double(), @@ -1107,7 +780,7 @@ rNmixture_BBNB_v <- nimbleFunction( p <- 1 / (1 + theta * lambda) trueN <- rnbinom(1, size = r, prob = p) - ans <- rBetaBinom(n = 1, N = trueN, shape1 = alpha, shape2 = beta) + ans <- rBetaBinom_v(n = 1, N = trueN, shape1 = alpha, shape2 = beta, len = len) return(ans) returnType(double(1)) @@ -1115,6 +788,7 @@ rNmixture_BBNB_v <- nimbleFunction( NULL #' @rdname dNmixture +#' @importFrom stats rnbinom #' @export rNmixture_BBNB_s <- nimbleFunction( run = function(n = double(), @@ -1133,8 +807,8 @@ rNmixture_BBNB_s <- nimbleFunction( p <- 1 / (1 + theta * lambda) trueN <- rnbinom(1, size = r, prob = p) - ans <- rBetaBinom(n = 1, N = trueN, - shape1 = rep(alpha, len), shape2 = rep(beta, len)) + ans <- rBetaBinom_s(n = 1, N = trueN, + shape1 = alpha, shape2 = beta, len = len) return(ans) returnType(double(1)) @@ -1142,6 +816,7 @@ rNmixture_BBNB_s <- nimbleFunction( NULL #' @rdname dNmixture +#' @importFrom stats rnbinom #' @export rNmixture_BBNB_oneObs <- nimbleFunction( run = function(n = double(), @@ -1150,8 +825,7 @@ rNmixture_BBNB_oneObs <- nimbleFunction( prob = double(), s = double(), Nmin = double(0, default = -1), - Nmax = double(0, default = -1), - len = double()) { + Nmax = double(0, default = -1)) { if (n != 1) stop("rNmixture* only works for n = 1") alpha <- prob * s @@ -1160,9 +834,7 @@ rNmixture_BBNB_oneObs <- nimbleFunction( p <- 1 / (1 + theta * lambda) trueN <- rnbinom(1, size = r, prob = p) - ans <- rBetaBinom_One(n = 1, N = trueN, shape1 = alpha, shape2 = beta) - return(ans) + ans <- rBetaBinom_s(n = 1, N = trueN, shape1 = alpha, shape2 = beta, len = 1) + return(ans[1]) returnType(double()) }) - -# nimbleOptions(checkNimbleFunction = TRUE) diff --git a/R/dNmixtureAD.R b/R/dNmixtureAD.R new file mode 100644 index 0000000..76e13d8 --- /dev/null +++ b/R/dNmixtureAD.R @@ -0,0 +1,541 @@ +# dNmixtureAD +#' N-mixture distributions with AD support for use in \code{nimble} models +#' +#' \code{dNmixtureAD_s} and \code{dNmixtureAD_v} provide Poisson-Binomial +#' mixture distributions of abundance ("N-mixture") for use in \code{nimble} +#' models when automatic differentiation may be needed by an algorithm. +#' Overdispersion alternatives are also provided. +#' +#' @name dNmixtureAD +#' @aliases dNmixtureAD_s dNmixtureAD_v rNmixtureAD_s rNmixtureAD_v dNmixtureAD_BNB_v +#' dNmixtureAD_BNB_s dNmixtureAD_BNB_oneObs dNmixtureAD_BBP_v dNmixtureAD_BBP_s +#' dNmixtureAD_BBP_oneObs dNmixtureAD_BBNB_v dNmixtureAD_BBNB_s +#' rNmixtureAD_BBNB_oneObs rNmixtureAD_BNB_v rNmixtureAD_BNB_s rNmixtureAD_BNB_oneObs +#' rNmixtureAD_BBP_v rNmixtureAD_BBP_s rNmixtureAD_BBP_oneObs rNmixtureAD_BBNB_v +#' rNmixtureAD_BBNB_s rNmixtureAD_BBNB_oneObs +#' +#' @author Ben Goldstein, Lauren Ponisio, and Perry de Valpine +#' +#' @param x vector of integer counts from a series of sampling occasions. +#' @param lambda expected value of the Poisson distribution of true abundance +#' @param theta abundance overdispersion parameter required for negative binomial +#' (*NB) N-mixture models. theta is parameterized such that variance of +#' the negative binomial variable x is \code{lambda^2 * theta + lambda} +#' @param prob detection probability (scalar for \code{dNmixture_s}, vector for \code{dNmixture_v}). +#' @param s detection overdispersion parameter required for beta binomial (BB*) +#' N-mixture models. s is parameterized such that variance of the beta +#' binomial variable x is \code{V(x) = N \* prob \* (1-prob) \* (N + +#' s) / (s + 1)} +#' @param Nmin minimum abundance to sum over for the mixture probability. Must be provided. +#' @param Nmax maximum abundance to sum over for the mixture probability. Must be provided. +#' @param len The length of the x vector +#' @param log TRUE or 1 to return log probability. FALSE or 0 to return probability. +#' @param n number of random draws, each returning a vector of length +#' \code{len}. Currently only \code{n = 1} is supported, but the +#' argument exists for standardization of "\code{r}" functions. +#' +#' @details These nimbleFunctions provide distributions that can be +#' used directly in R or in \code{nimble} hierarchical models (via +#' \code{\link[nimble]{nimbleCode}} and +#' \code{\link[nimble]{nimbleModel}}). +#' +#' See \code{\link{dNmixture}} for more information about the N-mixture +#' distributions. +#' +#' The versions here can be used in models that will be used by algorithms that +#' use nimble's system for automatic differentiation (AD). The primary +#' difference is that \code{Nmin} and \code{Nmax} must be provided. There are no +#' automatic defaults for these. +#' +#' In the AD system some kinds of values are "baked in" (cannot be changed) to +#' the AD calculations from the first call, unless and until the AD calculations +#' are reset. For all variants of the \code{dNmixtureAD} distributions, the +#' sizes of the inputs as well as \code{Nmin} and \code{Nmax} are baked in. +#' These can be different for different iterations through a for loop (or nimble +#' model declarations with different indices, for example), but the sizes and +#' \code{Nmin} and \code{Nmax} values for each specific iteration will be +#' "baked in" after the first call. +#' +#' @return The probability (or likelihood) or log probability of an observation +#' vector \code{x}. + +##### Regular N-mixture ##### +NULL +#' @rdname dNmixtureAD +#' @export +dNmixtureAD_v <- nimbleFunction( + run = function(x = double(1), + lambda = double(), + prob = double(1), + Nmin = double(0, default = -1), + Nmax = double(0, default = -1), + len = double(), + log = integer(0, default = 0)) { + if (length(x) != len) stop("in dNmixtureAD_v, len must equal length(x).") + if (len != length(prob)) stop("in dNmixtureAD_v, len must equal length(prob).") + if (lambda < 0) + if (log) return(-Inf) else return(0) + if ((Nmin == -1) | (Nmax == -1)) + stop("Must provide Nmin and Nmax in AD version of dNmixture distributions") + Nmin <- ADbreak(max( max(x), Nmin )) + logProb <- dNmixture_steps(x, lambda, Nmin, Nmax, sum(log(1-prob)), + sum(dbinom(x, size = Nmin, prob = prob, log = TRUE)), + usingAD=TRUE) + if (log) return(logProb) + else return(exp(logProb)) + returnType(double()) + }, buildDerivs = list(run = list()) +) + +#' @rdname dNmixtureAD +#' @export +dNmixtureAD_s <- nimbleFunction( + run = function(x = double(1), + lambda = double(), + prob = double(0), + Nmin = double(0, default = -1), + Nmax = double(0, default = -1), + len = double(), + log = integer(0, default = 0)) { + if (length(x) != len) stop("in dNmixtureAD_v, len must equal length(x).") + if (lambda < 0) + if (log) return(-Inf) else return(0) + if ((Nmin == -1) | (Nmax == -1)) + stop("Must provide Nmin and Nmax in AD version of dNmixture distributions") + Nmin <- ADbreak(max( max(x), Nmin )) + logProb <- dNmixture_steps(x, lambda, Nmin, Nmax, len*log(1-prob), + sum(dbinom(x, size = Nmin, prob = prob, log = TRUE)), + usingAD=TRUE) + if (log) return(logProb) + else return(exp(logProb)) + returnType(double()) + }, buildDerivs = list(run = list()) +) + +NULL +#' @rdname dNmixtureAD +#' @export +rNmixtureAD_v <- nimbleFunction( + run = function(n = double(), + lambda = double(), + prob = double(1), + Nmin = double(0), + Nmax = double(0), + len = double()) { + if ((Nmin == -1) | (Nmax == -1)) + stop("Must provide Nmin and Nmax in AD version of dNmixture distributions") + return(rNmixture_v(n,lambda,prob,Nmin,Nmax,len)) + returnType(double(1)) + }) + +#' @rdname dNmixtureAD +#' @export +rNmixtureAD_s <- nimbleFunction( + run = function(n = double(), + lambda = double(), + prob = double(0), + Nmin = double(0), + Nmax = double(0), + len = double()) { + if ((Nmin == -1) | (Nmax == -1)) + stop("Must provide Nmin and Nmax in AD version of dNmixture distributions") + return(rNmixture_s(n,lambda,prob,Nmin,Nmax,len)) + returnType(double(1)) + }) + +##### BNB cases ##### +#' @rdname dNmixtureAD +#' @export +dNmixtureAD_BNB_v <- nimbleFunction( + run = function(x = double(1), + lambda = double(), + theta = double(), + prob = double(1), + Nmin = double(0, default = -1), + Nmax = double(0, default = -1), + len = double(), + log = integer(0, default = 0)) { + if (length(x) != len) stop("in dNmixtureAD_BNB_v, len must equal length(x).") + if (len != length(prob)) stop("in dNmixtureAD_BNB_v, len must equal length(prob).") + if (theta <= 0) + if (log) return(-Inf) else return(0) + if (lambda < 0) + if (log) return(-Inf) else return(0) + if ((Nmin == -1) | (Nmax == -1)) + stop("Must provide Nmin and Nmax in AD version of dNmixture distributions") + Nmin <- ADbreak(max( max(x), Nmin )) + logProb <- dNmixture_BNB_steps(x,lambda,theta,Nmin,Nmax,sum(log(1-prob)), + sum(dbinom(x, size = Nmin, prob = prob, log = TRUE))) + if (log) return(logProb) + else return(exp(logProb)) + returnType(double()) + }, buildDerivs = list(run = list()) +) + +#' @rdname dNmixtureAD +#' @export +dNmixtureAD_BNB_s <- nimbleFunction( + run = function(x = double(1), + lambda = double(), + theta = double(), + prob = double(0), + Nmin = double(0, default = -1), + Nmax = double(0, default = -1), + len = double(), + log = integer(0, default = 0)) { + if (length(x) != len) stop("in dNmixtureAD_BNB_v, len must equal length(x).") + if (theta <= 0) + if (log) return(-Inf) else return(0) + if (lambda < 0) + if (log) return(-Inf) else return(0) + if ((Nmin == -1) | (Nmax == -1)) + stop("Must provide Nmin and Nmax in AD version of dNmixture distributions") + Nmin <- ADbreak(max( max(x), Nmin )) + logProb <- dNmixture_BNB_steps(x,lambda,theta,Nmin,Nmax,len*log(1-prob), + sum(dbinom(x, size = Nmin, prob = prob, log = TRUE))) + if (log) return(logProb) + else return(exp(logProb)) + returnType(double()) + }, buildDerivs = list(run = list()) +) + +#' @rdname dNmixtureAD +#' @export +dNmixtureAD_BNB_oneObs <- nimbleFunction( + run = function(x = double(), + lambda = double(), + theta = double(), + prob = double(), + Nmin = double(0, default = -1), + Nmax = double(0, default = -1), + log = integer(0, default = 0)) { + xvec <- numeric(value = x, length=1) + return(dNmixtureAD_BNB_s(xvec,lambda,theta,prob,Nmin,Nmax,1,log)) + returnType(double()) + }, buildDerivs = list(run = list()) +) + +#' @rdname dNmixtureAD +#' @export +rNmixtureAD_BNB_oneObs <- nimbleFunction( + run = function(n = double(), + lambda = double(), + theta = double(), + prob = double(0), + Nmin = double(0), + Nmax = double(0)) { + if ((Nmin == -1) | (Nmax == -1)) + stop("Must provide Nmin and Nmax in AD version of dNmixture distributions") + return(rNmixture_BNB_oneObs(n,lambda,theta,prob,Nmin,Nmax)) + returnType(double(1)) + }) + +### BBP cases ### +#' @rdname dNmixtureAD +#' @export +dNmixtureAD_BBP_v <- nimbleFunction( + run = function(x = double(1), + lambda = double(), + prob = double(1), + s = double(), + Nmin = double(0, default = -1), + Nmax = double(0, default = -1), + len = double(), + log = integer(0, default = 0)) { + if (length(x) != len) stop("in dNmixtureAD_BBP_v, len must equal length(x).") + if (len != length(prob)) stop("in dNmixtureAD_BBP_v, len must equal length(prob).") + if (s <= 0) + if (log) return(-Inf) else return(0) + alpha <- prob * s + beta <- s - prob * s + if (lambda < 0) + if (log) return(-Inf) else return(0) + ## For beta-binomial N-mixtures , the conditional distribution of (N - x | + ## x) doesn't have a nice closed-form expression. + if (Nmin == -1 | Nmax == -1) { + stop("Dynamic choice of Nmin/Nmax is not supported for beta binomial N-mixtures.") + } + Nmin <- ADbreak(max( max(x), Nmin )) ## set Nmin to at least the largest x + logProb <- dNmixture_BBP_steps(x, beta-x, lambda, s, Nmin, Nmax, + dBetaBinom_v(x, Nmin, alpha, beta, len = len, log = TRUE)) + if (log) return(logProb) + else return(exp(logProb)) + returnType(double()) + }, buildDerivs = list(run = list()) +) + +#' @rdname dNmixtureAD +#' @export +dNmixtureAD_BBP_s <- nimbleFunction( + run = function(x = double(1), + lambda = double(), + prob = double(), + s = double(), + Nmin = double(0, default = -1), + Nmax = double(0, default = -1), + len = double(), + log = integer(0, default = 0)) { + if (length(x) != len) stop("in dNmixtureAD_BBP_s, len must equal length(x).") + if (s <= 0) + if (log) return(-Inf) else return(0) + alpha <- prob * s + beta <- s - prob * s + if (lambda < 0) + if (log) return(-Inf) else return(0) + if (Nmin == -1 | Nmax == -1) { + stop("Dynamic choice of Nmin/Nmax is not supported for beta binomial N-mixtures.") + } + #Clen <- 0L + #Clen <- ADbreak(len) + Nmin <- ADbreak(max( max(x), Nmin )) ## set Nmin to at least the largest x + logProb <- dNmixture_BBP_steps(x, beta-x, lambda, s, Nmin, Nmax, + dBetaBinom_s(x, Nmin, alpha, beta, len = len, log = TRUE)) + if (log) return(logProb) + else return(exp(logProb)) + returnType(double()) + }, buildDerivs = list(run=list()) +) + +#' @rdname dNmixtureAD +#' @export +dNmixtureAD_BBP_oneObs <- nimbleFunction( + run = function(x = double(), + lambda = double(), + prob = double(), + s = double(), + Nmin = double(0, default = -1), + Nmax = double(0, default = -1), + log = integer(0, default = 0)) { + xvec <- numeric(value = x, length = 1) + return(dNmixtureAD_BBP_s(xvec,lambda,prob,s,Nmin,Nmax,1,log)) + returnType(double()) + }, buildDerivs=list(run=list()) +) + +## BBNB cases ## +#' @rdname dNmixtureAD +#' @export +dNmixtureAD_BBNB_v <- nimbleFunction( + run = function(x = double(1), + lambda = double(), + theta = double(), + prob = double(1), + s = double(), + Nmin = double(0, default = -1), + Nmax = double(0, default = -1), + len = double(), + log = integer(0, default = 0)) { + if (length(x) != len) stop("in dNmixtureAD_BBNB_v, len must equal length(x).") + if (len != length(prob)) stop("in dNmixtureAD_BBNB_v, len must equal length(prob).") + if (s <= 0) + if (log) return(-Inf) else return(0) + if (theta <= 0) + if (log) return(-Inf) else return(0) + alpha <- prob * s + beta <- s - prob * s + if (lambda < 0) + if (log) return(-Inf) else return(0) + ## see comments above + if (Nmin == -1 | Nmax == -1) { + stop("Dynamic choice of Nmin/Nmax is not supported for beta binomial N-mixtures.") + } + Nmin <- ADbreak(max( max(x), Nmin )) ## set Nmin to at least the largest x + logProb <- dNmixture_BBNB_steps(x, beta-x,lambda,theta,s,Nmin,Nmax, + dBetaBinom_v(x, Nmin, alpha, beta, len = len, log = TRUE)) + if (log) return(logProb) + else return(exp(logProb)) + returnType(double()) + }, buildDerivs=list(run=list()) +) + +#' @rdname dNmixtureAD +#' @export +dNmixtureAD_BBNB_s <- nimbleFunction( + run = function(x = double(1), + lambda = double(), + theta = double(), + prob = double(), + s = double(), + Nmin = double(0, default = -1), + Nmax = double(0, default = -1), + len = double(), + log = integer(0, default = 0)) { + if (length(x) != len) stop("in dNmixtureAD_BBNB_s, len must equal length(x).") + if (s <= 0) + if (log) return(-Inf) else return(0) + if (theta <= 0) + if (log) return(-Inf) else return(0) + r <- 1 / theta + pNB <- 1 / (1 + theta * lambda) + alpha <- prob * s + beta <- s - prob * s + if (lambda < 0) + if (log) return(-Inf) else return(0) + ## See comments above + if (Nmin == -1 | Nmax == -1) { + stop("Dynamic choice of Nmin/Nmax is not supported for beta binomial N-mixtures.") + } +# Clen <- 0L +# Clen <- ADbreak(len) + Nmin <- ADbreak(max( max(x), Nmin )) ## set Nmin to at least the largest x + logProb <- dNmixture_BBNB_steps(x, beta-x,lambda,theta,s,Nmin,Nmax, + dBetaBinom_s(x, Nmin, alpha, beta, len = len, log = TRUE)) + if (log) return(logProb) + else return(exp(logProb)) + returnType(double()) + }, buildDerivs=list(run=list()) +) + +#' @rdname dNmixtureAD +#' @export +dNmixtureAD_BBNB_oneObs <- nimbleFunction( + run = function(x = double(), + lambda = double(), + theta = double(), + prob = double(), + s = double(), + Nmin = double(0, default = -1), + Nmax = double(0, default = -1), + log = integer(0, default = 0)) { + xvec <- numeric(x, length = 1) + return(dNmixtureAD_BBNB_s(xvec, lambda, theta, prob, s, Nmin, Nmax, 1, log)) + returnType(double()) + }, buildDerivs=list(run=list()) +) + +##### rNmixtureAD extensions ##### +NULL +#' @rdname dNmixtureAD +#' @export +rNmixtureAD_BNB_v <- nimbleFunction( + run = function(n = double(), + lambda = double(), + theta = double(), + prob = double(1), + Nmin = double(0, default = -1), + Nmax = double(0, default = -1), + len = double()) { + return(rNmixture_BNB_v(n,lambda,theta,prob,Nmin,Nmax,len)) + returnType(double(1)) + }) + +NULL +#' @rdname dNmixtureAD +#' @export +rNmixtureAD_BNB_s <- nimbleFunction( + run = function(n = double(), + lambda = double(), + theta = double(), + prob = double(), + Nmin = double(0, default = -1), + Nmax = double(0, default = -1), + len = double()) { + return(rNmixture_BNB_s(n,lambda,theta,prob,Nmin,Nmax,len)) + returnType(double(1)) + }) + + +NULL +#' @rdname dNmixtureAD +#' @export +rNmixtureAD_BNB_oneObs <- nimbleFunction( + run = function(n = double(), + lambda = double(), + theta = double(), + prob = double(), + Nmin = double(0, default = -1), + Nmax = double(0, default = -1)) { + return(rNmixture_BNB_oneObs(n,lambda,theta,prob,Nmin,Nmax)) + returnType(double()) + }) + +NULL +#' @rdname dNmixtureAD +#' @export +rNmixtureAD_BBP_v <- nimbleFunction( + run = function(n = double(), + lambda = double(), + prob = double(1), + s = double(), + Nmin = double(0, default = -1), + Nmax = double(0, default = -1), + len = double()) { + return(rNmixture_BBP_v(n,lambda,prob,s,Nmin,Nmax,len)) + returnType(double(1)) + }) + +NULL +#' @rdname dNmixtureAD +#' @export +rNmixtureAD_BBP_s <- nimbleFunction( + run = function(n = double(), + lambda = double(), + prob = double(), + s = double(), + Nmin = double(0, default = -1), + Nmax = double(0, default = -1), + len = double()) { + return(rNmixture_BBP_s(n,lambda,prob,s,Nmin,Nmax,len)) + returnType(double(1)) + }) + +NULL +#' @rdname dNmixtureAD +#' @export +rNmixtureAD_BBP_oneObs <- nimbleFunction( + run = function(n = double(), + lambda = double(), + prob = double(), + s = double(), + Nmin = double(0, default = -1), + Nmax = double(0, default = -1)) { + return(rNmixture_BBP_oneObs(n,lambda,prob,s,Nmin,Nmax)) + returnType(double()) + }) + +NULL +#' @rdname dNmixtureAD +#' @export +rNmixtureAD_BBNB_v <- nimbleFunction( + run = function(n = double(), + lambda = double(), + theta = double(), + prob = double(1), + s = double(), + Nmin = double(0, default = -1), + Nmax = double(0, default = -1), + len = double()) { + return(rNmixture_BBNB_v(n,lambda,theta,prob,s,Nmin,Nmax,len)) + returnType(double(1)) + }) + +NULL +#' @rdname dNmixtureAD +#' @export +rNmixtureAD_BBNB_s <- nimbleFunction( + run = function(n = double(), + lambda = double(), + theta = double(), + prob = double(), + s = double(), + Nmin = double(0, default = -1), + Nmax = double(0, default = -1), + len = double()) { + return(rNmixture_BBNB_s(n,lambda,theta,prob,s,Nmin,Nmax,len)) + returnType(double(1)) + }) + +NULL +#' @rdname dNmixtureAD +#' @export +rNmixtureAD_BBNB_oneObs <- nimbleFunction( + run = function(n = double(), + lambda = double(), + theta = double(), + prob = double(), + s = double(), + Nmin = double(0, default = -1), + Nmax = double(0, default = -1)) { + return(rNmixture_BBNB_oneObs(n,lambda,theta,prob,s,Nmin,Nmax)) + returnType(double()) + }) diff --git a/R/dOcc.R b/R/dOcc.R index 9b3a895..b7d6233 100644 --- a/R/dOcc.R +++ b/R/dOcc.R @@ -84,6 +84,18 @@ #' \code{detections[i, 1:T] ~ dOcc_v(occupancyProbability, #' detectionProbability[1:T], len = T)} #' +#' @section Notes for use with automatic differentiation: +#' +#' The \code{dOcc_*} distributions should all work for models and algorithms +#' that use nimble's automatic differentiation (AD) system. In that system, some +#' kinds of values are "baked in" (cannot be changed) to the AD calculations +#' from the first call, unless and until the AD calculations are reset. For the +#' \code{dOcc_*} distributions, the lengths of vector inputs are baked in. These +#' can be different for different iterations through a for loop (or nimble model +#' declarations with different indices, for example), but the lengths for each +#' specific iteration will be "baked in" after the first call. \bold{It is +#' safest if one can assume that \code{x} are data and are not going to change.} +#' #' @return #' #' For \code{dOcc_*}: the probability (or likelihood) or log probability of observation vector \code{x}. @@ -130,7 +142,7 @@ dOcc_s <- nimbleFunction( prob_x <- exp(logProb_x_given_occupied) * probOcc + prob_x_given_unoccupied * (1 - probOcc) if (log) return(log(prob_x)) return(prob_x) - } + }, buildDerivs = TRUE ) #' @export @@ -149,7 +161,7 @@ dOcc_v <- nimbleFunction( prob_x <- exp(logProb_x_given_occupied) * probOcc + prob_x_given_unoccupied * (1 - probOcc) if (log) return(log(prob_x)) return(prob_x) - } + }, buildDerivs = TRUE ) #' @export diff --git a/R/utils.R b/R/utils.R index f85a64a..ef511ee 100644 --- a/R/utils.R +++ b/R/utils.R @@ -1,57 +1,294 @@ -#' Helper function for fast N-mixture calculation -#' @name nimNmixPois_logFac -#' @param numN first argument for helper function nimNmixPois_logFac, -#' representing the number of indices in the truncated sum for the N-mixture. -#' @param ff second argument for helper function nimNmixPois_logFac, a derived -#' vector of units calculated partway through the fast N-mixture algorithm. -#' @rdname nimNmixPois_logFac -#' @details This is a helper function for the fast N-mixture calculation. It -#' runs an iterative calculation present in all N-mixture varieties. It -#' doesn't have an interpretation outside of that context. -#' @export +#' Internal helper nimbleFunctions for dNmixture distributions +#' +#' None of these functions should be called directly. +#' +#' @name dNmixture_steps +#' +#' @aliases dNmixture_BNB_steps dNmixture_BBP_steps dNmixture_BBNB_steps +#' nimNmixPois_logFac +#' +#' @param x x from dNmixture distributions +#' @param lambda lambda from dNmixture distributions +#' @param theta theta from relevant dNmixture distributions +#' @param s s from relevant dNmixture distributions +#' @param Nmin start of summation over N +#' @param Nmax end of summation over N +#' @param sum_log_one_m_prob sum(log(1-prob)) from relevant dNmixture cases +#' @param sum_log_dbinom sum(log(dbinom(...))) from relevant dNmixture cases +#' @param sum_log_dbetabinom sum(log(dBetaBinom(...))) from relevant dNmixture +#' cases +#' @param beta_m_x beta-x from relevant dNmixture cases +#' @param usingAD TRUE if called from one of the dNmixtureAD distributions +#' @param numN number of indices in the truncated sum for the N-mixture. +#' @param ff a derived vector of units calculated partway through the fast +#' N-mixture algorithm. +#' @param max_index possibly the index of the max contribution to the summation. +#' For AD cases this is set by heuristic. For non-AD cases it is -1 and will +#' be determined automatically. +#' @details These are helper functions for the N-mixture calculations. They +#' don't have an interpretation outside of that context and are not intended +#' to be called directly. #' @seealso \code{\link{dNmixture}} -##### nimNmixPois_logFac ##### + +#' @rdname dNmixture_steps +#' @export nimNmixPois_logFac <- nimbleFunction( - run = function(numN = double(0), - ff = double(1)) { - i <- 1 + run = function(numN = integer(0), + ff = double(1), + max_index = integer(0, default=-1)) { + fixed_max_index <- max_index != -1 + + i <- 1L sum_ff_g1 <- 0 - hit_pos <- FALSE - while(i < numN & (ff[i] > 0 | !hit_pos)) { - sum_ff_g1 <- sum_ff_g1 + ff[i] - i <- i+1 - if (ff[i] > 0) { - hit_pos <- TRUE + if(!fixed_max_index) { + if(numN == 1) { + sum_ff_g1 <- ff[1] + max_index <- 1 + } else { + hit_pos <- FALSE + while(i < numN & (ff[i] > 0 | !hit_pos)) { + sum_ff_g1 <- sum_ff_g1 + ff[i] + i <- i+1 + if (ff[i] > 0) { + hit_pos <- TRUE + } + } + max_index <- i-1 + if (ff[i] > 0 & numN != max_index + 1) { + max_index <- i + sum_ff_g1 <- sum_ff_g1 + ff[i] + } + if(max_index == 0 | !hit_pos) { + max_index <- 1 # not sure this is relevant. it's defensive. + sum_ff_g1 <- ff[1] + } + } #end numN != 1 + } else { # end !fixed_max_index + # here we are in the case that max_index was provided, which we be in an AD case + for(i in 1:max_index) { + sum_ff_g1 <- sum_ff_g1 + ff[i] } } - max_index <- i-1 - if (ff[i] > 0 & numN != max_index + 1) { - max_index <- i - sum_ff_g1 <- sum_ff_g1 + ff[i] - } - if(max_index == 0 | !hit_pos) { - max_index <- 1 # not sure this is relevant. it's defensive. - sum_ff_g1 <- ff[1] - } - terms <- numeric(numN + 1) terms[max_index + 1] <- 1 sumff <- sum_ff_g1 ## should be the same as sum(ff[1:max_index]) for (i in 1:max_index) { - # terms[i] <- 1 / exp(sum(ff[i:max_index])) + # terms[i] <- 1 / exp(sum(ff[i:max_index])) terms[i] <- 1 / exp(sumff) sumff <- sumff - ff[i] } - - sumff <- 0 - for (i in (max_index + 1):numN) { - # terms[i + 1] <- exp(sum(ff[(max_index + 1):i])) - sumff <- sumff + ff[i] - terms[i + 1] <- exp(sumff) + if(numN > 1) { + sumff <- 0 + for (i in (max_index + 1):numN) { + # terms[i + 1] <- exp(sum(ff[(max_index + 1):i])) + sumff <- sumff + ff[i] + terms[i + 1] <- exp(sumff) + } } log_fac <- sum_ff_g1 + log(sum(terms)) # Final factor is the largest term * (all factors / largest term) } return(log_fac) returnType(double()) - }) + }, buildDerivs = list(run = list())) + +#' @rdname dNmixture_steps +#' @importFrom stats dpois +#' @export +dNmixture_steps <- nimbleFunction( + run = function(x = double(1), + lambda = double(), + Nmin = double(), + Nmax = double(), + sum_log_one_m_prob = double(), + sum_log_dbinom = double(), + usingAD = logical(0, default=FALSE)) { + logProb <- -Inf + max_index <- -1L + NminC <- NmaxC <- 0L + NminC <- ADbreak(Nmin) + NmaxC <- ADbreak(Nmax) + logProb <- dpois(Nmin, lambda, log = TRUE) + sum_log_dbinom + if(Nmax > Nmin) { + if(usingAD) { + # We need a choice for the max_index in nimNmixPois_logFac that is not a function of parameters, + # because it will be baked into the AD tape upon first call. + # A heuristic guess is either 2*Nmin or 0.2 between Nmin and Nmax + # whichever is smaller. + # But actually summation starts at Nmin+1, so these are tweaked accordingly. + # Note that mathematically the result should work for any choice + # of max_index, and a good choice only provides + # some stability against underflows. + + # for a couple of steps, max_index is on N scale, then re-used relative to Nmin + max_index <- ADbreak(min(2*NminC, + floor(NminC + 0.2*(NmaxC-NminC)))) + # Make completely sure we are at least one below the max, mean 2 below the max at this step + max_index <- ADbreak(min(max_index, + NmaxC - 2)) + # shift max_index relative to Nmin and 1-indexed + max_index <- max_index - NminC + 1 + # And make completely sure we are not at 0. Not sure that could happen, but being defensive. + if(max_index < 1) max_index <- 1 + } + numN <- 0L + numN <- NmaxC - NminC + 1 - 1 ## remember: +1 for the count, but -1 because the summation should run from N = maxN to N = minN + 1 + prods <- rep(0, numN) + for (i in (NminC + 1):NmaxC) { + prods[i - NminC] <- prod(i/(i - x)) / i + } + ff <- log(lambda) + sum_log_one_m_prob + log(prods) + log_fac <- nimNmixPois_logFac(numN, ff, max_index) + logProb <- logProb + log_fac + } + return(logProb) + returnType(double()) + }, + buildDerivs = list(run = list(ignore = c("i"))) +) + +##### N-mixture extensions ##### +##### dNmixture_BNB_v ##### +NULL +#' @rdname dNmixture_steps +#' @importFrom stats dnbinom +#' @export +dNmixture_BNB_steps <- nimbleFunction( + run = function(x = double(1), + lambda = double(), + theta = double(), + Nmin = double(), + Nmax = double(), + sum_log_one_m_prob = double(), + sum_log_dbinom = double(), + usingAD = logical(0, default=FALSE)) { + r <- 1 / theta + pNB <- 1 / (1 + theta * lambda) + logProb <- -Inf + max_index <- -1L + NminC <- NmaxC <- 0L + NminC <- ADbreak(Nmin) + NmaxC <- ADbreak(Nmax) + logProb <- dnbinom(Nmin, size = r, prob = pNB, log = TRUE) + sum_log_dbinom + if(Nmax > Nmin) { + if(usingAD) { + # see comments in basic case above + max_index <- ADbreak(min(2*NminC, + floor(NminC + 0.2*(NmaxC-NminC)))) + max_index <- ADbreak(min(max_index, + NmaxC - 2)) + max_index <- max_index - NminC + 1 + if(max_index < 1) max_index <- 1 + } + numN <- 0L + numN <- NmaxC - NminC + 1 - 1 # remember... + prods <- rep(0, numN) + for (i in (NminC + 1):NmaxC) + prods[i - NminC] <- (i + r - 1) * prod(i/(i - x)) / i + ff <- log(1 - pNB) + sum_log_one_m_prob + log(prods) + log_fac <- nimNmixPois_logFac(numN, ff, max_index) + logProb <- logProb + log_fac + } + return(logProb) + returnType(double()) + }, + buildDerivs = list(run = list(ignore = c("i"))) +) + + +##### dNmixture_BBP_v ##### +NULL +#' @rdname dNmixture_steps +#' @importFrom stats dpois +#' @export +dNmixture_BBP_steps <- nimbleFunction( + run = function(x = double(1), + beta_m_x = double(1), + lambda = double(), + s = double(), + Nmin = double(), + Nmax = double(), + sum_log_dbetabinom = double(), + usingAD = logical(0, default=FALSE)) { + logProb <- -Inf + max_index <- -1L + NminC <- NmaxC <- 0L + NminC <- ADbreak(Nmin) + NmaxC <- ADbreak(Nmax) + logProb <- dpois(Nmin, lambda, log = TRUE) + sum_log_dbetabinom + if(Nmax > Nmin) { + if(usingAD) { + # see comments in basic case above + max_index <- ADbreak(min(2*NminC, + floor(NminC + 0.2*(NmaxC-NminC)))) + max_index <- ADbreak(min(max_index, + NmaxC - 2)) + max_index <- max_index - NminC + 1 + if(max_index < 1) max_index <- 1 + } + numN <- 0L + numN <- NmaxC - NminC + 1 - 1 # remember... + prods <- rep(0, numN) + # N.B. alpha+beta == s + for (i in (NminC + 1):NmaxC) + prods[i - NminC] <- prod(i * (i - 1 + beta_m_x) / ((i - x) * (s + i - 1))) * (lambda / i) + ff <- log(prods) + log_fac <- nimNmixPois_logFac(numN, ff, max_index) + logProb <- logProb + log_fac + } + return(logProb) + returnType(double()) + }, + buildDerivs = list(run = list(ignore = c("i"))) +) + + +##### dNmixture_BBNB_v ##### +NULL +#' @rdname dNmixture_steps +#' @importFrom stats dnbinom +#' @export +dNmixture_BBNB_steps <- nimbleFunction( + run = function(x = double(1), + beta_m_x = double(1), + lambda = double(), + theta = double(), + s = double(), + Nmin = double(), + Nmax = double(), + sum_log_dbetabinom = double(), + usingAD = logical(0, default=FALSE)) { + r <- 1 / theta + pNB <- 1 / (1 + theta * lambda) + logProb <- -Inf + max_index <- -1L + NminC <- NmaxC <- 0L + NminC <- ADbreak(Nmin) + NmaxC <- ADbreak(Nmax) + logProb <- dnbinom(Nmin, size = r, prob = pNB, log = TRUE) + sum_log_dbetabinom + if(Nmax > Nmin) { + if(usingAD) { + # see comments in basic case above + max_index <- ADbreak(min(2*NminC, + floor(NminC + 0.2*(NmaxC-NminC)))) + max_index <- ADbreak(min(max_index, + NmaxC - 2)) + max_index <- max_index - NminC + 1 + if(max_index < 1) max_index <- 1 + } + numN <- 0L + numN <- NmaxC - NminC + 1 - 1 # remember... + prods <- rep(0, numN) + # N.B. alpha+beta == s + for (i in (NminC + 1):NmaxC) + prods[i - NminC] <- prod(i * (i - 1 + beta_m_x) / ((i - x) * (s + i - 1))) * + ((1 - pNB) * (i + r - 1) / i) + ff <- log(prods) + log_fac <- nimNmixPois_logFac(numN, ff, max_index) + logProb <- logProb + log_fac + } + return(logProb) + returnType(double()) + }, + buildDerivs = list(run = list(ignore = c("i"))) +) diff --git a/R/zzz.R b/R/zzz.R index 20c1d0d..eeb4436 100644 --- a/R/zzz.R +++ b/R/zzz.R @@ -1,7 +1,7 @@ # This file registers all distributions when the package is loaded. .onAttach <- function(libname, pkgname) { - packageStartupMessage("Loading nimbleEcology. Registering the following distributions:\n ", + packageStartupMessage("Loading nimbleEcology. Registering multiple variants of the following distributions:\n ", "dOcc", ", dDynOcc", ", dCJS", ", dHMM", ", dDHMM", ", dNmixture.\n") # Register the distributions explicitly for two reasons: @@ -13,7 +13,7 @@ BUGSdist = "dCJS_ss(probSurvive, probCapture, len)", Rdist = "dCJS_ss(probSurvive, probCapture, len = 0)", discrete = TRUE, - types = c('value = double(1)', 'probSurvive = double()', 'probCapture = double()', 'len = double()'), + types = c('value = double(1)', 'probSurvive = double()', 'probCapture = double()', 'len = integer()'), pqAvail = FALSE)), verbose = F ) @@ -22,7 +22,7 @@ BUGSdist = "dCJS_sv(probSurvive, probCapture, len)", Rdist = "dCJS_sv(probSurvive, probCapture, len = 0)", discrete = TRUE, - types = c('value = double(1)', 'probSurvive = double()', 'probCapture = double(1)', 'len = double()'), + types = c('value = double(1)', 'probSurvive = double()', 'probCapture = double(1)', 'len = integer()'), pqAvail = FALSE)), verbose = F ) @@ -31,7 +31,7 @@ BUGSdist = "dCJS_vs(probSurvive, probCapture, len)", Rdist = "dCJS_vs(probSurvive, probCapture, len = 0)", discrete = TRUE, - types = c('value = double(1)', 'probSurvive = double(1)', 'probCapture = double()', 'len = double()'), + types = c('value = double(1)', 'probSurvive = double(1)', 'probCapture = double()', 'len = integer()'), pqAvail = FALSE)), verbose = F ) @@ -40,7 +40,7 @@ BUGSdist = "dCJS_vv(probSurvive, probCapture, len)", Rdist = "dCJS_vv(probSurvive, probCapture, len = 0)", discrete = TRUE, - types = c('value = double(1)', 'probSurvive = double(1)', 'probCapture = double(1)', 'len = double()'), + types = c('value = double(1)', 'probSurvive = double(1)', 'probCapture = double(1)', 'len = integer()'), pqAvail = FALSE)), verbose = F ) @@ -238,8 +238,8 @@ 'init = double(1)', 'probObs = double(2)', 'probTrans = double(2)', - 'len = double(0)', - 'checkRowSums = double(0)'), + 'len = integer(0)', + 'checkRowSums = integer(0)'), mixedSizes = TRUE, pqAvail = FALSE)), verbose = F ) @@ -253,8 +253,8 @@ 'init = double(1)', 'probObs = double(3)', 'probTrans = double(2)', - 'len = double(0)', - 'checkRowSums = double(0)'), + 'len = integer(0)', + 'checkRowSums = integer(0)'), mixedSizes = TRUE, pqAvail = FALSE)), verbose = F ) @@ -267,8 +267,8 @@ 'init = double(1)', 'probObs = double(2)', 'probTrans = double(3)', - 'len = double()', - 'checkRowSums = double(0)'), + 'len = integer()', + 'checkRowSums = integer(0)'), mixedSizes = TRUE, pqAvail = FALSE)), verbose = F ) @@ -282,8 +282,8 @@ 'init = double(1)', 'probObs = double(3)', 'probTrans = double(3)', - 'len = double()', - 'checkRowSums = double(0)'), + 'len = integer()', + 'checkRowSums = integer(0)'), mixedSizes = TRUE, pqAvail = FALSE)), verbose = F ) @@ -305,6 +305,23 @@ )), verbose = F ) + registerDistributions(list( + dNmixtureAD_v = list( + BUGSdist = "dNmixtureAD_v(lambda, prob, Nmin, Nmax, len)", + Rdist = "dNmixtureAD_v(lambda, prob, Nmin, Nmax, len)", + discrete = TRUE, + types = c('value = double(1)', + 'lambda = double()', + 'prob = double(1)', + 'Nmin = double(0, default = -1)', + 'Nmax = double(0, default = -1)', + 'len = double()' + ), + mixedSizes = FALSE, + pqAvail = FALSE + )), verbose = F + ) + registerDistributions(list( dNmixture_s = list( BUGSdist = "dNmixture_s(lambda, prob, Nmin, Nmax, len)", @@ -322,6 +339,24 @@ )), verbose = F ) + registerDistributions(list( + dNmixtureAD_s = list( + BUGSdist = "dNmixtureAD_s(lambda, prob, Nmin, Nmax, len)", + Rdist = "dNmixtureAD_s(lambda, prob, Nmin, Nmax, len)", + discrete = TRUE, + types = c('value = double(1)', + 'lambda = double()', + 'prob = double()', + 'Nmin = double(0, default = -1)', + 'Nmax = double(0, default = -1)', + 'len = double()' + ), + mixedSizes = FALSE, + pqAvail = FALSE + )), verbose = F + ) + + registerDistributions(list( dNmixture_BNB_v = list( BUGSdist = "dNmixture_BNB_v(lambda, theta, prob, Nmin, Nmax, len)", @@ -339,6 +374,23 @@ pqAvail = FALSE )), verbose = F ) + registerDistributions(list( + dNmixtureAD_BNB_v = list( + BUGSdist = "dNmixtureAD_BNB_v(lambda, theta, prob, Nmin, Nmax, len)", + Rdist = "dNmixtureAD_BNB_v(lambda, theta, prob, Nmin, Nmax, len)", + discrete = TRUE, + types = c('value = double(1)', + 'lambda = double()', + 'theta = double()', + 'prob = double(1)', + 'Nmin = double(0, default = -1)', + 'Nmax = double(0, default = -1)', + 'len = double()' + ), + mixedSizes = FALSE, + pqAvail = FALSE + )), verbose = F + ) registerDistributions(list( dNmixture_BNB_s = list( BUGSdist = "dNmixture_BNB_s(lambda, theta, prob, Nmin, Nmax, len)", @@ -357,11 +409,11 @@ )), verbose = F ) registerDistributions(list( - dNmixture_BNB_oneObs = list( - BUGSdist = "dNmixture_BNB_oneObs(lambda, theta, prob, Nmin, Nmax, len)", - Rdist = "dNmixture_BNB_oneObs(lambda, theta, prob, Nmin, Nmax, len)", + dNmixtureAD_BNB_s = list( + BUGSdist = "dNmixtureAD_BNB_s(lambda, theta, prob, Nmin, Nmax, len)", + Rdist = "dNmixtureAD_BNB_s(lambda, theta, prob, Nmin, Nmax, len)", discrete = TRUE, - types = c('value = double()', + types = c('value = double(1)', 'lambda = double()', 'theta = double()', 'prob = double()', @@ -373,6 +425,38 @@ pqAvail = FALSE )), verbose = F ) + registerDistributions(list( + dNmixture_BNB_oneObs = list( + BUGSdist = "dNmixture_BNB_oneObs(lambda, theta, prob, Nmin, Nmax)", + Rdist = "dNmixture_BNB_oneObs(lambda, theta, prob, Nmin, Nmax)", + discrete = TRUE, + types = c('value = double()', + 'lambda = double()', + 'theta = double()', + 'prob = double()', + 'Nmin = double(0, default = -1)', + 'Nmax = double(0, default = -1)' + ), + mixedSizes = FALSE, + pqAvail = FALSE + )), verbose = F + ) + registerDistributions(list( + dNmixtureAD_BNB_oneObs = list( + BUGSdist = "dNmixtureAD_BNB_oneObs(lambda, theta, prob, Nmin, Nmax)", + Rdist = "dNmixtureAD_BNB_oneObs(lambda, theta, prob, Nmin, Nmax)", + discrete = TRUE, + types = c('value = double()', + 'lambda = double()', + 'theta = double()', + 'prob = double()', + 'Nmin = double(0, default = -1)', + 'Nmax = double(0, default = -1)' + ), + mixedSizes = FALSE, + pqAvail = FALSE + )), verbose = F + ) registerDistributions(list( dNmixture_BBP_v = list( @@ -391,6 +475,23 @@ pqAvail = FALSE )), verbose = F ) + registerDistributions(list( + dNmixtureAD_BBP_v = list( + BUGSdist = "dNmixtureAD_BBP_v(lambda, prob, s, Nmin, Nmax, len)", + Rdist = "dNmixtureAD_BBP_v(lambda, prob, s, Nmin, Nmax, len)", + discrete = TRUE, + types = c('value = double(1)', + 'lambda = double()', + 'prob = double(1)', + 's = double()', + 'Nmin = double(0, default = -1)', + 'Nmax = double(0, default = -1)', + 'len = double()' + ), + mixedSizes = FALSE, + pqAvail = FALSE + )), verbose = F + ) registerDistributions(list( dNmixture_BBP_s = list( @@ -409,11 +510,28 @@ pqAvail = FALSE )), verbose = F ) + registerDistributions(list( + dNmixtureAD_BBP_s = list( + BUGSdist = "dNmixtureAD_BBP_s(lambda, prob, s, Nmin, Nmax, len)", + Rdist = "dNmixtureAD_BBP_s(lambda, prob, s, Nmin, Nmax, len)", + discrete = TRUE, + types = c('value = double(1)', + 'lambda = double()', + 'prob = double()', + 's = double()', + 'Nmin = double(0, default = -1)', + 'Nmax = double(0, default = -1)', + 'len = double()' + ), + mixedSizes = FALSE, + pqAvail = FALSE + )), verbose = F + ) registerDistributions(list( dNmixture_BBP_oneObs = list( - BUGSdist = "dNmixture_BBP_oneObs(lambda, prob, s, Nmin, Nmax, len)", - Rdist = "dNmixture_BBP_oneObs(lambda, prob, s, Nmin, Nmax, len)", + BUGSdist = "dNmixture_BBP_oneObs(lambda, prob, s, Nmin, Nmax)", + Rdist = "dNmixture_BBP_oneObs(lambda, prob, s, Nmin, Nmax)", discrete = TRUE, types = c('value = double()', 'lambda = double()', @@ -427,6 +545,22 @@ pqAvail = FALSE )), verbose = F ) + registerDistributions(list( + dNmixtureAD_BBP_oneObs = list( + BUGSdist = "dNmixtureAD_BBP_oneObs(lambda, prob, s, Nmin, Nmax)", + Rdist = "dNmixtureAD_BBP_oneObs(lambda, prob, s, Nmin, Nmax)", + discrete = TRUE, + types = c('value = double()', + 'lambda = double()', + 'prob = double()', + 's = double()', + 'Nmin = double(0, default = -1)', + 'Nmax = double(0, default = -1)' + ), + mixedSizes = FALSE, + pqAvail = FALSE + )), verbose = F + ) @@ -450,6 +584,25 @@ )), verbose = F ) + registerDistributions(list( + dNmixtureAD_BBNB_v = list( + BUGSdist = "dNmixtureAD_BBNB_v(lambda, theta, prob, s, Nmin, Nmax, len)", + Rdist = "dNmixtureAD_BBNB_v(lambda, theta, prob, s, Nmin, Nmax, len)", + discrete = TRUE, + types = c('value = double(1)', + 'lambda = double()', + 'prob = double(1)', + 's = double()', + 'theta = double()', + 'Nmin = double(0, default = -1)', + 'Nmax = double(0, default = -1)', + 'len = double()' + ), + mixedSizes = FALSE, + pqAvail = FALSE + )), verbose = F + ) + registerDistributions(list( dNmixture_BBNB_s = list( BUGSdist = "dNmixture_BBNB_s(lambda, theta, prob, s, Nmin, Nmax, len)", @@ -468,11 +621,29 @@ pqAvail = FALSE )), verbose = F ) + registerDistributions(list( + dNmixtureAD_BBNB_s = list( + BUGSdist = "dNmixtureAD_BBNB_s(lambda, theta, prob, s, Nmin, Nmax, len)", + Rdist = "dNmixtureAD_BBNB_s(lambda, theta, prob, s, Nmin, Nmax, len)", + discrete = TRUE, + types = c('value = double(1)', + 'lambda = double()', + 'prob = double()', + 's = double()', + 'theta = double()', + 'Nmin = double(0, default = -1)', + 'Nmax = double(0, default = -1)', + 'len = double()' + ), + mixedSizes = FALSE, + pqAvail = FALSE + )), verbose = F + ) registerDistributions(list( dNmixture_BBNB_oneObs = list( - BUGSdist = "dNmixture_BBNB_oneObs(lambda, theta, prob, s, Nmin, Nmax, len)", - Rdist = "dNmixture_BBNB_oneObs(lambda, theta, prob, s, Nmin, Nmax, len)", + BUGSdist = "dNmixture_BBNB_oneObs(lambda, theta, prob, s, Nmin, Nmax)", + Rdist = "dNmixture_BBNB_oneObs(lambda, theta, prob, s, Nmin, Nmax)", discrete = TRUE, types = c('value = double()', 'lambda = double()', @@ -480,8 +651,24 @@ 'prob = double()', 's = double()', 'Nmin = double(0, default = -1)', - 'Nmax = double(0, default = -1)', - 'len = double()' + 'Nmax = double(0, default = -1)' + ), + mixedSizes = FALSE, + pqAvail = FALSE + )), verbose = F + ) + registerDistributions(list( + dNmixtureAD_BBNB_oneObs = list( + BUGSdist = "dNmixtureAD_BBNB_oneObs(lambda, theta, prob, s, Nmin, Nmax)", + Rdist = "dNmixtureAD_BBNB_oneObs(lambda, theta, prob, s, Nmin, Nmax)", + discrete = TRUE, + types = c('value = double()', + 'lambda = double()', + 'theta = double()', + 'prob = double()', + 's = double()', + 'Nmin = double(0, default = -1)', + 'Nmax = double(0, default = -1)' ), mixedSizes = FALSE, pqAvail = FALSE @@ -489,15 +676,15 @@ ) registerDistributions(list( - dBetaBinom = list( - BUGSdist = "dBetaBinom(N, shape1, shape2)", - Rdist = "dBetaBinom(N, shape1, shape2)", + dBetaBinom_v = list( + BUGSdist = "dBetaBinom_v(N, shape1, shape2, len)", + Rdist = "dBetaBinom_v(N, shape1, shape2, len)", discrete = TRUE, types = c('value = double(1)', 'N = double()', 'shape1 = double(1)', 'shape2 = double(1)', - 'log = double()' + 'len = double()' ), mixedSizes = FALSE, pqAvail = FALSE @@ -505,15 +692,15 @@ ) registerDistributions(list( - dBetaBinom_One = list( - BUGSdist = "dBetaBinom_One(N, shape1, shape2)", - Rdist = "dBetaBinom_One(N, shape1, shape2)", + dBetaBinom_s = list( + BUGSdist = "dBetaBinom_s(N, shape1, shape2, len)", + Rdist = "dBetaBinom_s(N, shape1, shape2, len)", discrete = TRUE, - types = c('value = double()', + types = c('value = double(1)', 'N = double()', 'shape1 = double()', 'shape2 = double()', - 'log = double()' + 'len = double()' ), mixedSizes = FALSE, pqAvail = FALSE diff --git a/inst/AD_test_utils.R b/inst/AD_test_utils.R new file mode 100644 index 0000000..03394c6 --- /dev/null +++ b/inst/AD_test_utils.R @@ -0,0 +1,1575 @@ +# source(system.file(file.path('tests', 'testthat', 'test_utils.R'), package = 'nimble')) + +# An environment for storing some defaults for AD testing. +# Having this allows one to change these values globally. +ADtestEnv <- new.env() +resetTols <- function() { + ADtestEnv$RRrelTol <<- c(1e-8, 1e-3, 1e-2) + ADtestEnv$RCrelTol <<- c(1e-15, 1e-8, 1e-3) + ADtestEnv$CCrelTol <<- rep(sqrt(.Machine$double.eps), 3) +} +resetTols() + +##################### +## knownFailure utils +##################### + +## test_param_name: an AD test parameterization name as produced by +## make_op_param() +## +## returns: a length 2 character vector with the op and args +## +split_test_param_name <- function(test_param_name) { + first_space <- regexpr(' ', test_param_name) + op <- substr(test_param_name, 1, first_space - 1) + args <- substr(test_param_name, first_space + 1, nchar(test_param_name)) + return(c(op, args)) +} + +is_failure <- function(test_param_name, failure_name, knownFailures = list()) { + if (length(knownFailures) == 0) return(FALSE) + op_and_args <- split_test_param_name(test_param_name) + op <- op_and_args[1] + args <- op_and_args[2] + if (!is.null(knownFailures[[op]])) { + return( + isTRUE(knownFailures[[op]][[args]][[failure_name]]) || + isTRUE(knownFailures[[op]][['*']][[failure_name]]) + ) + } + return(FALSE) +} + +## test_param_name: an AD test parameterization name as produced by +## make_op_param() +## knownFailures: a list of known test failures, e.g. in the format of +## AD_knownFailures +## +## returns: TRUE if the test param is known to lead to a compilation failure and +## FALSE otherwise +## +is_compilation_failure <- function(test_param_name, knownFailures = list()) { + return(is_failure(test_param_name, 'compilation', knownFailures)) +} + +is_segfault_failure <- function(test_param_name, knownFailures = list()) { + return(is_failure(test_param_name, 'segfault', knownFailures)) +} + +is_method_failure <- function(test_param_name, method_name, + output_name = c('value', 'jacobian', 'hessian'), + knownFailures = list()) { + if (length(knownFailures) == 0) return(FALSE) + output_name <- match.arg(output_name) + op_and_args <- split_test_param_name(test_param_name) + op <- op_and_args[1] + args <- op_and_args[2] + if (!is.null(knownFailures[[op]])) { + this_arg_fails <- all_args_fail <- FALSE + if (!is.null(knownFailures[[op]][[args]])) + this_arg_fails <- identical( + knownFailures[[op]][[args]][[output_name]], + method_name + ) + if (!is.null(knownFailures[[op]][['*']])) + all_args_fail <- identical( + knownFailures[[op]][['*']][[output_name]], + method_name + ) + return(this_arg_fails || all_args_fail) + } + return(FALSE) +} + +all_equal_ignore_zeros <- function(v1, v2, ...) { + if(length(v1) != length(v2)) return(FALSE) + zv1 <- as.logical(v1 == 0) ## as.logical will strip any names + zv2 <- as.logical(v2 == 0) + nzboth <- !(zv1 & zv2) + if(sum(nzboth)==0) return(FALSE) ## This is used to validate expected discrepancies except when they are zero, so if all are zero, we default to that there were discrepancies, which allows the test to pass. + all.equal(v1[nzboth], v2[nzboth], ...) +} + +######################## +## Main AD testing utils +######################## + +test_AD2_oneCall <- function(Robj, Cobj, + recordArgs, + testArgs, + order, + wrt = NULL, + check.equality = TRUE, + # In normal usage, the tols are set from test_AD2 and + # these defaults are not used + RRrelTol = c(1e-8, 1e-3, 1e-2, 1e-14), + RCrelTol = c(1e-15, 1e-8, 1e-3, 1e-14), + CCrelTol = c(rep(sqrt(.Machine$double.eps), 3), 1e-14), + Rmodel = NULL, Cmodel = NULL, + recordInits = NULL, testInits = NULL, + nodesToChange = NULL) { + resRecord <- list() + resTest <- list() + + useRmodel <- !is.null(Rmodel) + useCmodel <- !is.null(Cmodel) + + RRabsThresh <- 0 + RCabsthresh <- 0 + CCabsThresh <- 0 + if(length(RRrelTol) > 3) RRabsThresh <- RRrelTol[4] + if(length(RCrelTol) > 3) RCabsThresh <- RCrelTol[4] + if(length(CCrelTol) > 3) CCabsThresh <- CCrelTol[4] + + if(is.null(names(RRrelTol))) names(RRrelTol)[1:3] <- c("0", "1", "2") + if(is.null(names(RCrelTol))) names(RCrelTol)[1:3] <- c("0", "1", "2") + if(is.null(names(CCrelTol))) names(CCrelTol)[1:3] <- c("0", "1", "2") + + setModelInits <- function(m, inits) { + for(v in names(inits)) { + m[[v]] <- inits[[v]] + } + m$calculate() + } + + changeModelInits <- function(m, inits, nodesToChange) { + for(node in nodesToChange) { # This will not include constant nodes and can include like 'x[2:3]' + if(all.vars(parse(text = node))[1] %in% names(inits)) + eval(parse(text = paste0("m$", node, " <- inits$", node))) + } + m$calculate() + } + + do_one_call <- function(fun, argList) { + eval( as.call( c(fun, argList) ) ) + } + + do_one_set <- function(method, order, metaLevel = 0, name, + doR, doC, tapingLevel = c(1, 0, 2), fixedOrder = FALSE) { + # tapingLevel: 0 for the (run) function itself, 1 for a tape of run, 2 for a tape of a tape of run + # metaLevel: 0 if order 0 from the tape is order 0 result. 1 if order 0 gives order 1 result due to double taping. 2 if order 0 gives order 2 result due to double taping. + # fixedOrder: FALSE is return object is result of nimDerivs call. + # 0, 1, or 2 if return object is the value, jacobian or hessian directly. + if(tapingLevel == 0) order <- 0 + if(isFALSE(fixedOrder)) + metaOrder <- order[order >= metaLevel] - metaLevel # e.g. for metaLevel 1, count 1 as 0 + else + metaOrder <- fixedOrder + # Follow construction is because Robj[[method]] doesn't work until + # Robj$run (if method == "run") has been done. It's a flaw in reference classes + Rfun <- eval( parse( text = paste0("Robj$", method), keep.source = FALSE)[[1]]) + Cfun <- eval( parse( text = paste0("Cobj$", method), keep.source = FALSE)[[1]]) + extraArgs <- list() + if(tapingLevel >= 1) { + extraArgs$wrt <- wrt_all + # If there is a tape and no fixed order, the order arg is needed + if(isFALSE(fixedOrder)) + extraArgs$order <- metaOrder + extraArgs$reset <- TRUE + } + if(tapingLevel == 2) { + extraArgs$innerWrt = wrt_all + } + argList <- c(recordArgs, extraArgs) + if(doR) { + if(useRmodel) setModelInits(Rmodel, recordInits) + RansRecord <- do_one_call(Rfun, argList) + } + if(doC) { + if(useCmodel) setModelInits(Cmodel, recordInits) + CansRecord <- do_one_call(Cfun, argList) + } + if(tapingLevel >= 1) + extraArgs$reset <- FALSE + + argList <- c(testArgs, extraArgs) + if(doR) { + if(useRmodel) changeModelInits(Rmodel, testInits, nodesToChange) + RansTest <- do_one_call(Rfun, argList) + } + if(doC) { + if(useCmodel) changeModelInits(Cmodel, testInits, nodesToChange) + CansTest <- do_one_call(Cfun, argList) + } + + pieces <- c("0"="value", "1"="jacobian", "2"="hessian") + rName <- paste0("R", name) + cName <- paste0("C", name) + + if(!isFALSE(fixedOrder)) { + oChar <- as.character(fixedOrder) # Assume fixedOrder given directly (no need to adjust for metaOrder) + if(doR) { + resRecord[[oChar]][[rName]] <<- RansRecord + resTest[[oChar]][[rName]] <<- RansTest + } + if(doC) { + resRecord[[oChar]][[cName]] <<- CansRecord + resTest[[oChar]][[cName]] <<- CansTest + } + } else { + for(o in as.character(metaOrder)) { + oChar <- as.character(as.numeric(o) + metaLevel) + if((metaLevel == 1) & o == '1') { + argsLength <- length(wrt_all) + reorder_jac_jac <- function(x) { + outLength <- length(x) / (argsLength*argsLength) + aperm(array(as.numeric(x), c(outLength, argsLength, argsLength)), c(2, 3, 1)) + } + } + if(doR) { + if((metaLevel == 1) & o == '1') { + resRecord[[oChar]][[rName]] <<- reorder_jac_jac(RansRecord[[ pieces[[o]] ]]) + resTest[[oChar]][[rName]] <<- reorder_jac_jac(RansRecord[[ pieces[[o]] ]]) + } else { + resRecord[[oChar]][[rName]] <<- RansRecord[[ pieces[[o]] ]] + resTest[[oChar]][[rName]] <<- RansTest[[ pieces[[o]] ]] + } + } + if(doC) { + if((metaLevel == 1) & o == '1') { + resRecord[[oChar]][[cName]] <<- reorder_jac_jac(CansRecord[[ pieces[[o]] ]]) + resTest[[oChar]][[cName]] <<- reorder_jac_jac(CansTest[[ pieces[[o]] ]]) + } else { + resRecord[[oChar]][[cName]] <<- CansRecord[[ pieces[[o]] ]] + resTest[[oChar]][[cName]] <<- CansTest[[ pieces[[o]] ]] + } + } + } + } + } + + len_record <- sum(unlist(lapply(recordArgs, length))) + len_test <- sum(unlist(lapply(testArgs, length))) + if(len_record != len_test) { + stop('lengths of recordArgs and testArgs must match.') + } + + if(is.null(wrt)) + wrt_all <- 1:len_record + else + wrt_all <- wrt + + do_one_set("run", name = "run", + doR = TRUE, doC = TRUE, tapingLevel = 0, fixedOrder = 0) + do_one_set("derivsRun", order = order, metaLevel = 0, name = "derivsRun", + doR = TRUE, doC = TRUE, tapingLevel = 1, fixedOrder = FALSE) + do_one_set("value", name = "value", + doR = FALSE, doC = TRUE, tapingLevel = 1, fixedOrder = 0) + do_one_set("jac", name = "jacR1", + doR = FALSE, doC = TRUE, tapingLevel = 1, fixedOrder = 1) + do_one_set("jac2", name = "jacF1", + doR = FALSE, doC = TRUE, tapingLevel = 1, fixedOrder = 1) + do_one_set("hess", name = "hess", + doR = FALSE, doC = TRUE, tapingLevel = 1, fixedOrder = 2) + do_one_set("derivsValue", order = 0:2, name = "derivsValue", + doR = FALSE, doC = TRUE, tapingLevel = 2, fixedOrder = FALSE) + do_one_set("derivsJac", order = 1:2, name = "derivsJacR1", metaLevel = 1, + doR = FALSE, doC = TRUE, tapingLevel = 2, fixedOrder = FALSE) + do_one_set("derivsJac2", order = 1:2, name = "derivsJacF1", metaLevel = 1, + doR = FALSE, doC = TRUE, tapingLevel = 2, fixedOrder = FALSE) + do_one_set("derivsHess", order = 2, name = "derivsHess", metaLevel = 2, + doR = FALSE, doC = TRUE, tapingLevel = 2, fixedOrder = FALSE) + + all_equal_list <- function(first, others, tol, + abs_threshold, info = "") { + pass <- TRUE + for(i in seq_along(others)) { + pass <- pass && nim_all_equal(as.numeric(first), as.numeric(others[[i]]), + tol, abs_threshold = abs_threshold, + verbose = TRUE, info = info) + } + pass + } + + if(check.equality) { + pass <- TRUE + for(res in list(resRecord, resTest)) + for(o in as.character(order)) { + ansSet <- res[[o]] + splitAnsSet <- split(ansSet, + c("C", "R")[as.integer(grepl("^R", names(ansSet)))+1]) + RansSet <- splitAnsSet[["R"]] + CansSet <- splitAnsSet[["C"]] + if(pass) + if(length(RansSet) > 1) { + pass <- pass && all_equal_list(RansSet[[1]], RansSet[-1], tol = RRrelTol[[o]], + abs_threshold = RRabsThresh, + info = paste0("(RR order ", o,")")) + if(!pass) { + cat(paste('Some R-to-R derivatives do not match for order',o)) + # browser() + } + } + if(pass) + if(length(RansSet) > 0 && length(CansSet) > 0) { + pass <- pass && all_equal_list(RansSet[[1]], CansSet, tol = RCrelTol[[o]], + abs_threshold = RCabsThresh, + info = paste0("(RC order ", o,")")) + if(!pass) { + cat(paste('Some C-to-R derivatives to not match for order',o)) + browser() + } + } + if(pass) + if(length(CansSet) > 1) { + pass <- pass && all_equal_list(CansSet[[1]], CansSet[-1], CCrelTol[[o]], + abs_threshold = CCabsThresh, + info = paste0("(CC order ", o, ")")) + if(!pass) { + cat(paste('Some C-to-C derivatives to not match for order',o)) + browser() + } + } + } + expect_true(pass) + } + list(resRecord = resRecord, resTest = resTest) +} + +test_AD2 <- function(param, dir = file.path(tempdir(), "nimble_generatedCode"), + control = list(), verbose = nimbleOptions('verbose'), + catch_failures = FALSE, seed = NULL, + nimbleProject_name = '', return_compiled_nf = FALSE, + knownFailures = list()) { + if (!is.null(param$debug) && param$debug) browser() + if (verbose) cat(paste0("### Testing ", param$name, "\n")) + + ## by default, reset the seed for every test + if(is.numeric(param[['seed']])) set.seed(param[['seed']]) + else if(is.numeric(seed)) set.seed(seed) + + nf <- nimbleFunction( + setup = function() {}, + run = param$run, + methods = param$methods, + buildDerivs = param$buildDerivs + ) + Robj <- nf() + temporarilyAssignInGlobalEnv(Robj) + + if(!is.null(param$inputs)) { + inputsRecord <- param$inputs[[1]] + inputsTest <- param$inputs[[2]] + } else { + ## input_gen_funs might be generalized. + opParam <- param$opParam + if (is.null(param$input_gen_funs) || is.null(names(param$input_gen_funs))) + if (length(param$input_gen_funs) <= 1) { + inputsRecord <- lapply(opParam$args, arg_type_2_input, + input_gen_fun = param$input_gen_funs, size = param$size) + inputsTest <- lapply(opParam$args, arg_type_2_input, + input_gen_fun = param$input_gen_funs, size = param$size) + } + else + stop( + 'input_gen_funs of length greater than 1 must have names', + call. = FALSE + ) + else { + inputsRecord <- sapply( + names(opParam$args), + function(name) + arg_type_2_input(opParam$args[[name]], input_gen_fun = param$input_gen_funs[[name]], + size = param$size[[name]]), + simplify = FALSE + ) + inputsTest <- sapply( + names(opParam$args), + function(name) + arg_type_2_input(opParam$args[[name]], input_gen_fun = param$input_gen_funs[[name]], + size = param$size[[name]]), + simplify = FALSE + ) + } + } + ## + ## generate inputs that depend on the other inputs + ## + is_fun <- sapply(inputsRecord, is.function) + inputsRecord[is_fun] <- lapply( + inputsRecord[is_fun], function(fun) { + eval(as.call(c(fun, inputsRecord[names(formals(fun))]))) + } + ) + is_fun <- sapply(inputsTest, is.function) + inputsTest[is_fun] <- lapply( + inputsTest[is_fun], function(fun) { + eval(as.call(c(fun, inputsTest[names(formals(fun))]))) + } + ) + + # Currently only whole arguments supported. + # Hence all this does is create indices that skip over non-wrt args + lens <- lapply(inputsRecord, length) + if(!is.null(param$wrt_args)) { + nextInd <- 1 + lenInds <- lapply(lens, + function(x) { + ans <- nextInd:(nextInd + x - 1); nextInd <<- nextInd + x; ans}) + wrt <- unlist(lenInds[param$wrt_args]) + } else { + wrt <- 1:(sum(unlist(lens))) + } + + if (is_segfault_failure(param$name, knownFailures)) { + if (verbose) cat("## Skipping the rest of test before compilation", + "due to known segmentation fault\n") + return(invisible(NULL)) + } + + if (!is.null(param$dir)) dir <- param$dir + compilation_fails <- is_compilation_failure(param$name, knownFailures) + Cobj <- param$Cobj ## user provided compiled nimbleFunction? + if (is.null(Cobj)) { + if (verbose) cat("## Compiling nimbleFunction \n") + Cobj <- wrap_if_true(compilation_fails, expect_error, { + compileNimble( + Robj, dirName = dir, projectName = nimbleProject_name, + control = control + ) + }, wrap_in_try = isTRUE(catch_failures)) + } + if (isTRUE(catch_failures) && inherits(Cobj, 'try-error')) { + warning( + paste0( + 'The test of ', opParam$name, + ' failed to compile.\n' + ), + call. = FALSE, + immediate. = TRUE + ) + ## stop the test here because it didn't compile + return(invisible(NULL)) + } else if (compilation_fails) { + if (verbose) cat("## Compilation failed, as expected \n") + return(invisible(NULL)) + } else { + RRrelTol <- param$RRrelTol + RCrelTol <- param$RCrelTol + CCrelTol <- param$CCrelTol + if(is.null(RRrelTol)) RRrelTol <- ADtestEnv$RRrelTol + if(is.null(RCrelTol)) RCrelTol <- ADtestEnv$RCrelTol + if(is.null(CCrelTol)) CCrelTol <- ADtestEnv$CCrelTol + + res <- try(test_AD2_oneCall(Robj = Robj, Cobj = Cobj, + recordArgs = inputsRecord, testArgs = inputsTest, + order = 0:2, + wrt = wrt, + RRrelTol = RRrelTol, + RCrelTol = RCrelTol, + CCrelTol = CCrelTol)) + if (inherits(res, 'try-error')) { + msg <- paste( + 'Something failed in test', opParam$name, '.\n' + ) + if (isTRUE(catch_failures)) ## continue to compilation + warning(msg, call. = FALSE, immediate. = TRUE) + else + stop(msg, call. = FALSE) ## throw an error here + } + + } + if(!compilation_fails) { + nimble:::clearCompiled(Cobj) + } + list(res = res) +} +## Take a test parameterization created by make_AD_test() or +## make_distribution_fun_AD_test(), generate a random input, and test for +## matching nimDerivs outputs from uncompiled and compiled versions of a +## nimbleFunction. +## +## param: an test parameterization generated by make_AD_test() or +## make_distribution_fun_AD_test() +## dir: passed to compileNimble() as the dirName argument +## control: passed to compileNimble() as the control argument +## verbose: if TRUE, print messages while testing +## catch_failures: if TRUE, don't stop testing when a testthat expect_* +## fails +## seed: seed to use in set.seed() before generating random inputs +## nimbleProject_name: passed to compileNimble() as the projectName argument +## return_compiled_nf: if TRUE, don't call clearCompiled() and include the +## compiled nimbleFunction instance in the output +## +## returns: a list with the randomly generated input and possibly the compiled +## nimbleFunction instance, or NULL if the test has a known compilation +## failure +test_AD <- function(param, dir = file.path(tempdir(), "nimble_generatedCode"), + control = list(), verbose = nimbleOptions('verbose'), + catch_failures = FALSE, seed = 0, + nimbleProject_name = '', return_compiled_nf = FALSE, + knownFailures = list()) { + if (!is.null(param$debug) && param$debug) browser() + if (verbose) cat(paste0("### Testing ", param$name, "\n")) + + ## by default, reset the seed for every test + if (is.numeric(seed)) set.seed(seed) + + ## TODO: use nClass instead? + nf <- nimbleFunction( + setup = function() {}, + run = param$run, + methods = param$methods, + buildDerivs = param$buildDerivs + ) + nfInst <- nf() + temporarilyAssignInGlobalEnv(nfInst) + + ## + ## generate inputs for the nimbleFunction methods + ## + opParam <- param$opParam + if (is.null(param$input_gen_funs) || is.null(names(param$input_gen_funs))) + if (length(param$input_gen_funs) <= 1) + input <- lapply(opParam$args, arg_type_2_input, param$input_gen_funs) + else + stop( + 'input_gen_funs of length greater than 1 must have names', + call. = FALSE + ) + else { + input <- sapply( + names(opParam$args), + function(name) + arg_type_2_input(opParam$args[[name]], param$input_gen_funs[[name]]), + simplify = FALSE + ) + } + ## + ## generate inputs that depend on the other inputs + ## + is_fun <- sapply(input, is.function) + input[is_fun] <- lapply( + input[is_fun], function(fun) { + eval(as.call(c(fun, input[names(formals(fun))]))) + } + ) + + if (is_segfault_failure(param$name, knownFailures)) { + if (verbose) cat("## Skipping the rest of test before compilation", + "due to known segmentation fault\n") + return(invisible(NULL)) + } + + ## + ## compile the nimbleFunction + ## + if (!is.null(param$dir)) dir <- param$dir + compilation_fails <- is_compilation_failure(param$name, knownFailures) + CnfInst <- param$CnfInst ## user provided compiled nimbleFunction? + if (is.null(CnfInst)) { + if (verbose) cat("## Compiling nimbleFunction \n") + CnfInst <- wrap_if_true(compilation_fails, expect_error, { + compileNimble( + nfInst, dirName = dir, projectName = nimbleProject_name, + control = control + ) + }, wrap_in_try = isTRUE(catch_failures)) + } + if (isTRUE(catch_failures) && inherits(CnfInst, 'try-error')) { + warning( + paste0( + 'The test of ', opParam$name, + ' failed to compile.\n', CnfInst[1] + ), + call. = FALSE, + immediate. = TRUE + ) + ## stop the test here because it didn't compile + return(invisible(NULL)) + } else if (compilation_fails) { + if (verbose) cat("## Compilation failed, as expected \n") + } else { + ## + ## call R versions of nimbleFunction methods with generated input + ## + if (verbose) cat("## Calling R versions of nimbleFunction methods\n") + Rderivs <- try( + sapply(names(param$methods), function(method) { + do.call( + ## can't access nfInst[[method]] until $ has been used :( + eval(substitute(nfInst$method, list(method = as.name(method)))), input + ) + }, USE.NAMES = TRUE), silent = TRUE + ) + if (inherits(Rderivs, 'try-error')) { + msg <- paste( + 'Calling R version of test', opParam$name, + 'resulted in an error:\n', Rderivs[1] + ) + if (isTRUE(catch_failures)) ## continue to compilation + warning(msg, call. = FALSE, immediate. = TRUE) + else + stop(msg, call. = FALSE) ## throw an error here + } + + ## + ## call compiled nimbleFunction methods with generated input + ## + Cderivs <- sapply(names(param$methods), function(method) { + do.call( + ## same issue as with Rderivs + eval(substitute(CnfInst$method, list(method = as.name(method)))), + input + ) + }, USE.NAMES = TRUE) + if ('log' %in% names(opParam$args)) { + input2 <- input + input2$log <- as.numeric(!input$log) + Rderivs2 <- try( + sapply(names(param$methods), function(method) { + do.call(nfInst[[method]], input2) + }, USE.NAMES = TRUE), silent = TRUE + ) + Cderivs2 <- sapply(names(param$methods), function(method) { + do.call(CnfInst[[method]], input2) + }, USE.NAMES = TRUE) + } + + ## + ## loop over test methods (each with a different wrt arg) + ## + ## set expect_equal tolerances + tol1 <- if (is.null(param$tol1)) 1e-8 else param$tol1 + tol2 <- if (is.null(param$tol2)) 1e-6 else param$tol2 + tol3 <- if (is.null(param$tol3)) 1e-4 else param$tol3 + for (method_name in names(param$methods)) { + info <- paste0( + param$name, ', wrt = ', + paste0(param$wrts[[method_name]], collapse = ', ') + ) + if (verbose) { + cat(paste0( + "## Testing ", method_name, ': ', + paste0(param$wrts[[method_name]], collapse = ', '), + '\n' + )) + } + ## + ## test values + ## + value_test_fails <- is_method_failure( + param$name, method_name, 'value', knownFailures + ) + value_test <- wrap_if_true(value_test_fails, expect_failure, { + if (verbose) cat("## Checking values\n") + expect_equal( + Cderivs[[method_name]]$value, + Rderivs[[method_name]]$value, + tolerance = tol1, info = info + ) + if ('log' %in% names(opParam$args)) { + if (verbose) cat("## Checking log behavior for values\n") + expect_equal( + Cderivs2[[method_name]]$value, + Rderivs2[[method_name]]$value, + tolerance = tol1, info = info + ) + expect_false(isTRUE(all.equal( + Rderivs[[method_name]]$value, + Rderivs2[[method_name]]$value, + tolerance = tol1)), info = info + ) + expect_false(isTRUE(all.equal( + Cderivs[[method_name]]$value, + Cderivs2[[method_name]]$value, + tolerance = tol1)), info = info + ) + } + }, wrap_in_try = isTRUE(catch_failures)) + if (isTRUE(catch_failures) && inherits(value_test, 'try-error')) { + warning( + paste0( + 'There was something wrong with the values of ', + opParam$name, ' with wrt = c(', + paste0(param$wrts[[method_name]], collapse = ', '), ').\n', + value_test[1] + ), + call. = FALSE, + immediate. = TRUE + ) + } else if (value_test_fails) { + if (verbose) { + cat(paste0( + "## As expected, test of values failed for ", method_name, ' with wrt: ', + paste0(param$wrts[[method_name]], collapse = ', '), + '\n' + )) + } + ## stop testing after an expected failure + break + } + ## + ## test jacobians + ## + jacobian_test_fails <- is_method_failure( + param$name, method_name, 'jacobian', knownFailures + ) + jacobian_test <- wrap_if_true(jacobian_test_fails, expect_failure, { + if (verbose) cat("## Checking jacobians\n") + expect_equal( + Cderivs[[method_name]]$jacobian, + Rderivs[[method_name]]$jacobian, + tolerance = tol2, info = info + ) + if ('log' %in% names(opParam$args)) { + if (verbose) cat("## Checking log behavior for jacobians\n") + expect_equal( + Cderivs2[[method_name]]$jacobian, + Rderivs2[[method_name]]$jacobian, + tolerance = tol2, info = info + ) + expect_false(isTRUE(all_equal_ignore_zeros( + Rderivs[[method_name]]$jacobian, + Rderivs2[[method_name]]$jacobian, + tolerance = tol2)), info = info + ) + expect_false(isTRUE(all_equal_ignore_zeros( + Cderivs[[method_name]]$jacobian, + Cderivs2[[method_name]]$jacobian, + tolerance = tol2)), info = info + ) + } + }, wrap_in_try = isTRUE(catch_failures)) + if (isTRUE(catch_failures) && inherits(jacobian_test, 'try-error')) { + warning( + paste0( + 'There was something wrong with the jacobian of ', + opParam$name, ' with wrt = c(', + paste0(param$wrts[[method_name]], collapse = ', '), ').\n', + jacobian_test[1] + ), + call. = FALSE, + immediate. = TRUE + ) + } else if (jacobian_test_fails) { + if (verbose) { + cat(paste0( + "## As expected, test of jacobian failed for ", method_name, ' with wrt: ', + paste0(param$wrts[[method_name]], collapse = ', '), + '\n' + )) + } + ## stop testing after an expected failure + break + } + ## + ## test hessians + ## + hessian_test_fails <- is_method_failure( + param$name, method_name, 'hessian', knownFailures + ) + hessian_test <- wrap_if_true(hessian_test_fails, expect_failure, { + if (verbose) cat("## Checking hessians\n") + expect_equal( + Cderivs[[method_name]]$hessian, + Rderivs[[method_name]]$hessian, + tolerance = tol3, info = info + ) + if ('log' %in% names(opParam$args)) { + if (verbose) cat("## Checking log behavior for hessians\n") + expect_equal( + Cderivs2[[method_name]]$hessian, + Rderivs2[[method_name]]$hessian, + tolerance = tol3, info = info + ) + expect_false(isTRUE(all_equal_ignore_zeros( + Rderivs[[method_name]]$hessian, + Rderivs2[[method_name]]$hessian, + tolerance = tol3)), info = info + ) + expect_false(isTRUE(all_equal_ignore_zeros( + Cderivs[[method_name]]$hessian, + Cderivs2[[method_name]]$hessian, + tolerance = tol3)), info = info + ) + } + }, wrap_in_try = isTRUE(catch_failures)) + if (isTRUE(catch_failures) && inherits(hessian_test, 'try-error')) { + warning( + paste0( + 'There was something wrong with the hessian of ', + opParam$name, ' with wrt = c(', + paste0(param$wrts[[method_name]], collapse = ', '), ').\n', + hessian_test[1] + ), + call. = FALSE, + immediate. = TRUE + ) + } else if (hessian_test_fails) { + if (verbose) { + cat(paste0( + "## As expected, test of hessian failed for ", method_name, ' with wrt: ', + paste0(param$wrts[[method_name]], collapse = ', '), + '\n' + )) + } + ## stop testing after an expected failure + break + } + } + } + if (verbose) cat("### Test successful \n\n") + if (return_compiled_nf) + invisible(list(CnfInst = CnfInst, input = input)) + else if(!compilation_fails) { + nimble:::clearCompiled(CnfInst) + invisible(list(input = input)) + } + invisible(NULL) +} + +test_AD_batch <- function(batch, dir = file.path(tempdir(), "nimble_generatedCode"), + control = list(), verbose = nimbleOptions('verbose'), + catch_failures = FALSE, seed = 0, + nimbleProject_name = '', knownFailures = list(), + testFun = test_AD) { + ## could try to do something more clever here, like putting the entire batch + ## into one giant nimbleFunction generator so we only have to compile once + ## (perhaps conditional on having no knownFailures in the entire batch) + lapply( + batch, testFun, + dir, control, verbose, catch_failures, seed, + nimbleProject_name, FALSE, knownFailures + ) + invisible(NULL) +} + +######################################### +## AD test parameterization builder utils +######################################### + +## Takes a named list of `argTypes` and returns a list of character +## vectors, each of which is valid as the `wrt` argument of `nimDerivs()`. +## Each argument on its own and all combinations of the arguments will +## always be included, and then make_wrt will try to create up to `n_random` +## additional character vectors with random combinations of the arguments +## and indexing of those arguments when possible (i.e. for non-scalar args). +## n_arg_reps determines how many times an argument can be used in a given +## wrt character vector. By default, any argument will appear only 1 time. +make_wrt <- function(argTypes, n_random = 10, n_arg_reps = 1) { + + ## always include each arg on its own, and all combinations of the args + wrts <- as.list(names(argTypes)) + if (length(argTypes) > 1) + for (m in 2:length(argTypes)) { + this_combn <- combn(names(argTypes), m) + wrts <- c( + wrts, + unlist(apply(this_combn, 2, list), recursive = FALSE) + ) + } + + argSymbols <- lapply( + argTypes, function(argType) + add_missing_size(nimble:::argType2symbol(argType)) + ) + + while (n_random > 0) { + n_random <- n_random - 1 + n <- sample(1:length(argTypes), 1) # how many of the args to use? + ## grab a random subset of the args of length n + args <- sample(argSymbols, n) + ## may repeat an arg up to n_arg_reps times + reps <- sample(1:n_arg_reps, length(args), replace = TRUE) + this_wrt <- c() + for (i in 1:length(args)) { + while (reps[i] > 0) { + reps[i] <- reps[i] - 1 + ## coin flip determines whether to index vectors/matrices + use_indexing <- sample(c(TRUE, FALSE), 1) + if (use_indexing && args[[i]]$nDim > 0) { + rand_row <- sample(1:args[[i]]$size[1], size = 1) + ## another coin flip determines whether to use : in indexing or not + use_colon <- sample(c(TRUE, FALSE), 1) + if (use_colon && rand_row < args[[i]]$size[1]) { + end_row <- rand_row + + sample(1:(args[[i]]$size[1] - rand_row), size = 1) + rand_row <- paste0(rand_row, ':', end_row) + } + index <- rand_row + if (args[[i]]$nDim == 2) { + rand_col <- sample(1:args[[i]]$size[2], size = 1) + ## one more coin flip to subscript second dimension + use_colon_again <- sample(c(TRUE, FALSE), 1) + if (use_colon_again && rand_col < args[[i]]$size[2]) { + end_col <- rand_col + + sample(1:(args[[i]]$size[2] - rand_col), size = 1) + rand_col <- paste0(rand_col, ':', end_col) + } + index <- paste0(index, ',', rand_col) + } + this_wrt <- c(this_wrt, paste0(names(args)[i], '[', index, ']')) + } + ## if first coin flip was FALSE, just + ## use the arg name without indexing + else this_wrt <- c(this_wrt, names(args)[i]) + } + } + if (!is.null(this_wrt)) wrts <- c(wrts, list(unique(this_wrt))) + } + unique(wrts) +} + +## A version 2 of make_AD_test. +# +# Construct nimbleFunction with following methods: +# value - simply calculate the function, the core of everything. +# derivsValue - get any derivatives of value. +# value2 - get the value from derivsValue. can be used for double (meta) taping. +# jac - get the jacobian from derivsValue via reverse order 1. can double tape +# jac2 - get the jacobian from derivsValue via forward order 1 (same as en route to hessian). can double tape +# hess - get the hessian from derivsValue. can double tape. +# derivsJac - get any derivatives of jac +# derivsJac2 - get any derivatives of jac2 +# derivsHess - get any derivatives of hess. +# +# For later testing (notation is "requested orders" --> "actual orders") +# e.g. requesting order 0 from derivsJac gives order 1 of the actual function (value). +# run: gives 0 +# derivsRun: 0, 1, 2 --> 0, 1, 2 +# value: gives 0 (tests F0) +# jac: gives 1 (tests F0R1) +# jac2: gives 1 (tests F0F1) +# hess: gives 2 (tests F0F1R2) +# derivsValue: 0, 1, 2 --> 0, 1, 2 (tests meta-taped F0 for later derivs) +# derivsJac: 0, 1 --> 1, 2 (tests meta-taped F0R1 for later derivs) +# derivsJac2: 0, 1 --> 1, 2 (tests meta-taped F0F1 for later derivs) +# derivsHess: 0 --> 2 (tests meta-taped F0F0R2 for later derivs) +make_AD_test2 <- function(op, argTypes, wrt_args = NULL, + input_gen_funs = NULL, more_args = NULL, seed = 0, + outer_code = NULL, inner_codes = NULL, + size = NULL, inputs = NULL, + includeModelArgs = FALSE) { + if(!is.list(op)) { + opParam <- make_op_param(op, argTypes, more_args = more_args, + outer_code = outer_code, inner_codes = inner_codes) + } else { + opParam <- op + } + run <- gen_runFunCore(opParam) + + if(seed == 0) seed <- round(runif(1, 1, 10000)) + + modelArg <- NULL + updateNodesArg <- NULL + constantNodesArg <- NULL + if(isTRUE(includeModelArgs)) { + # If this is used, then some custom setup code + # will need to be inserted to create model, updateNodes, and constantNodes + modelArg <- as.name("model") + updateNodesArg <- as.name("updateNodes") + constantNodesArg <- as.name("constantNodes") + } + + + ## Make a set of methods. These need to be constructed + ## so that they can have different argument names, numbers, and types. + args_formals <- lapply(argTypes, function(argType) { + parse(text = argType)[[1]] + }) + if (is.null(names(args_formals))) + names(args_formals) <- paste0('arg', 1:length(args_formals)) + + runCall <- parse(text = paste0("run(", paste(names(args_formals), collapse=","), ")"), + keep.source = FALSE)[[1]] + + derivsRun <- eval(substitute( + function(#arg1 = double(1) etc to be inserted + wrt = double(1), + order = integer(1), + reset = logical(0, default = FALSE)) { + ans <- nimDerivs(RUNCALL, wrt = wrt, order = order, reset = reset, + model = MODELARG, updateNodes = UPDATENODESARG, + constantNodes = CONSTANTNODESARG) + return(ans) + returnType(ADNimbleList()) + }, + list(RUNCALL = runCall, MODELARG = modelArg, + UPDATENODESARG = updateNodesArg, + CONSTANTNODESARG = constantNodesArg) + )) + attributes(derivsRun)$srcref <- NULL + formals(derivsRun) <- c(args_formals, formals(derivsRun)) + + value <- eval(substitute( + function(#arg1 = double(1) etc to be inserted + wrt = double(1), + reset = logical(0, default = FALSE)) { + ans <- nimDerivs(RUNCALL, wrt = wrt, order = 0, reset = reset, + model = MODELARG, updateNodes = UPDATENODESARG, + constantNodes = CONSTANTNODESARG) + d1 <- dim(ans$value)[1] + res <- numeric(length = d1) + for(i in 1:d1) + res[i] <- ans$value[i] + return(res) + returnType(double(1)) + }, + list(RUNCALL = runCall, MODELARG = modelArg, + UPDATENODESARG = updateNodesArg, + CONSTANTNODESARG = constantNodesArg) + )) + attributes(value)$srcref <- NULL + formals(value) <- c(args_formals, formals(value)) + + jac <- eval(substitute( + function(#arg1 = double(1) etc to be inserted + wrt = double(1), + reset = logical(0, default = FALSE)) { + ans <- nimDerivs(RUNCALL, wrt = wrt, order = 1, reset = reset, + model = MODELARG, updateNodes = UPDATENODESARG, + constantNodes = CONSTANTNODESARG) + d1 <- dim(ans$jacobian)[1] + d2 <- dim(ans$jacobian)[2] + res <- numeric(length = d1*d2) + for(i in 1:d1) + for(j in 1:d2) + res[i + (j-1)*d1] <- ans$jacobian[i, j] + return(res) + returnType(double(1)) + }, + list(RUNCALL = runCall, MODELARG = modelArg, + UPDATENODESARG = updateNodesArg, + CONSTANTNODESARG = constantNodesArg) + )) + attributes(jac)$srcref <- NULL + formals(jac) <- c(args_formals, formals(jac)) + + jac2 <- eval(substitute( + function(#arg1 = double(1) etc to be inserted + wrt = double(1), + reset = logical(0, default = FALSE)) { + # Because this gets order 2, the order 1 comes from forward 1 + ans <- nimDerivs(RUNCALL, wrt = wrt, order = 1:2, reset = reset, + model = MODELARG, updateNodes = UPDATENODESARG, + constantNodes = CONSTANTNODESARG) + d1 <- dim(ans$jacobian)[1] + d2 <- dim(ans$jacobian)[2] + res <- numeric(length = d1*d2) + for(i in 1:d1) + for(j in 1:d2) + res[i + (j-1)*d1] <- ans$jacobian[i, j] + return(res) + returnType(double(1)) + }, + list(RUNCALL = runCall, MODELARG = modelArg, + UPDATENODESARG = updateNodesArg, + CONSTANTNODESARG = constantNodesArg) + )) + attributes(jac2)$srcref <- NULL + formals(jac2) <- c(args_formals, formals(jac2)) + + hess <- eval(substitute( + function(#arg1 = double(1) etc to be inserted + wrt = double(1), + reset = logical(0, default = FALSE)) { + ans <- nimDerivs(RUNCALL, wrt = wrt, order = 2, reset = reset, + model = MODELARG, updateNodes = UPDATENODESARG, + constantNodes = CONSTANTNODESARG) + d1 <- dim(ans$hessian)[1] + d2 <- dim(ans$hessian)[2] + d3 <- dim(ans$hessian)[3] + res <- numeric(length = d1*d2*d3) + # There was a bug in pulling out results in a single line. + # It is unrelated to AD so considered separate. + for(i in 1:d1) + for(j in 1:d2) + for(k in 1:d3) + res[i + (j-1)*d1 + (k-1)*d1*d2] <- ans$hessian[i, j, k] + return(res) + returnType(double(1)) + }, + list(RUNCALL = runCall, MODELARG = modelArg, + UPDATENODESARG = updateNodesArg, + CONSTANTNODESARG = constantNodesArg) + )) + attributes(hess)$srcref <- NULL + formals(hess) <- c(args_formals, formals(hess)) + + valueCall <- parse(text = paste0("value(", + paste(c(names(args_formals), + "wrt = innerWrt", + "reset=reset"), collapse=","), + ")"), + keep.source = FALSE)[[1]] + + derivsValue <- eval(substitute( + function(#arg1 = double(1) etc to be inserted + innerWrt = double(1), + wrt = double(1), + order = integer(1), + reset = logical(0, default = FALSE) ) { + ans <- nimDerivs(VALUECALL, wrt = wrt, order = order, reset = reset, + model = MODELARG, updateNodes = UPDATENODESARG, + constantNodes = CONSTANTNODESARG) + return(ans) + returnType(ADNimbleList()) + }, + list(VALUECALL = valueCall, MODELARG = modelArg, + UPDATENODESARG = updateNodesArg, + CONSTANTNODESARG = constantNodesArg) + )) + attributes(derivsValue)$srcref <- NULL + formals(derivsValue) <- c(args_formals, formals(derivsValue)) + + + jacCall <- parse(text = paste0("jac(", + paste(c(names(args_formals), + "wrt = innerWrt", + "reset=reset"), collapse=","), + ")"), + keep.source = FALSE)[[1]] + + derivsJac <- eval(substitute( + function(#arg1 = double(1) etc to be inserted + innerWrt = double(1), + wrt = double(1), + order = integer(1), + reset = logical(0, default = FALSE) ) { + ans <- nimDerivs(JACCALL, wrt = wrt, order = order, reset = reset, + model = MODELARG, updateNodes = UPDATENODESARG, + constantNodes = CONSTANTNODESARG) + return(ans) + returnType(ADNimbleList()) + }, + list(JACCALL = jacCall, MODELARG = modelArg, + UPDATENODESARG = updateNodesArg, + CONSTANTNODESARG = constantNodesArg) + )) + attributes(derivsJac)$srcref <- NULL + formals(derivsJac) <- c(args_formals, formals(derivsJac)) + + jac2Call <- parse(text = paste0("jac2(", + paste(c(names(args_formals), + "wrt = innerWrt", + "reset=reset"), collapse=","), + ")"), + keep.source = FALSE)[[1]] + + derivsJac2 <- eval(substitute( + function(#arg1 = double(1) etc to be inserted + innerWrt = double(1), + wrt = double(1), + order = integer(1), + reset = logical(0, default = FALSE) ) { + ans <- nimDerivs(JAC2CALL, wrt = wrt, order = order, reset = reset, + model = MODELARG, updateNodes = UPDATENODESARG, + constantNodes = CONSTANTNODESARG) + return(ans) + returnType(ADNimbleList()) + }, + list(JAC2CALL = jac2Call, MODELARG = modelArg, + UPDATENODESARG = updateNodesArg, + CONSTANTNODESARG = constantNodesArg) + )) + attributes(derivsJac2)$srcref <- NULL + formals(derivsJac2) <- c(args_formals, formals(derivsJac2)) + + + hessCall <- parse(text = paste0("hess(", + paste(c(names(args_formals), + "wrt = innerWrt", + "reset=reset"), collapse=","), + ")"), + keep.source = FALSE)[[1]] + derivsHess <- eval(substitute( + function(#arg1 = double(1) etc to be inserted + innerWrt = double(1), + wrt = double(1), + order = integer(1), + reset = logical(0, default = FALSE) ) { + ans <- nimDerivs(HESSCALL, wrt = wrt, order = order, reset = reset, + model = MODELARG, updateNodes = UPDATENODESARG, + constantNodes = CONSTANTNODESARG) + return(ans) + returnType(ADNimbleList()) + }, + list(HESSCALL = hessCall, MODELARG = modelArg, + UPDATENODESARG = updateNodesArg, + CONSTANTNODESARG = constantNodesArg) + )) + attributes(derivsHess)$srcref <- NULL + formals(derivsHess) <- c(args_formals, formals(derivsHess)) + + methods <- list(derivsRun = derivsRun, + value = value, + jac = jac, + jac2 = jac2, + hess = hess, + derivsValue = derivsValue, + derivsJac = derivsJac, + derivsJac2 = derivsJac2, + derivsHess = derivsHess + ) + + list( + name = opParam$name, + opParam = opParam, + run = run, + methods = methods, + buildDerivs = list(run = list() + , + value = list(ignore = c('wrt', 'i')), + jac = list(ignore = c('wrt', 'i', 'j')), + jac2 = list(ignore = c('wrt', 'i', 'j')), + hess = list(ignore = c('wrt', 'i', 'j', 'k')) + ), + wrt_args = wrt_args, + input_gen_funs = input_gen_funs, + size = size, + seed = seed, + inputs = inputs + ) +} + + +model_calculate_test_pieces <- make_AD_test2( + op = list( + expr = quote( { + values(model, derivNodes) <<- arg1 + out <- model$calculate(calcNodes) + return(out) + }), + args = list(arg1 = quote(double(1))), + outputType = quote(double()) + ), + argTypes = list(arg1 = "double(1)"), + includeModelArgs = TRUE) + +model_calculate_test <- nimbleFunction( + setup = function(model, nodesList) { + derivNodes <- nodesList$derivNodes + updateNodes <- nodesList$updateNodes + constantNodes <- nodesList$constantNodes + calcNodes <- nodesList$calcNodes + nNodes <- length(derivNodes) + }, + run = model_calculate_test_pieces$run, + methods = model_calculate_test_pieces$methods, + buildDerivs = model_calculate_test_pieces$buildDerivs +) + +setup_update_and_constant_nodes_for_tests <- function(model, + derivNodes, + forceConstantNodes = character(), + forceUpdateNodes = character()) { + ## "update" means "CppAD dynamic" + ## derivNodes <- model$expandNodeNames(derivNodes) # do not do this because do not want vector node names + nNodes <- length(derivNodes) + calcNodes <- model$getDependencies(derivNodes) + ucNodes <- makeModelDerivsInfo(model, derivNodes, calcNodes, dataAsConstantNodes = TRUE) + updateNodes <- ucNodes$updateNodes + constantNodes <- ucNodes$constantNodes + updateNodes <- setdiff(updateNodes, forceConstantNodes) # remove forceConstants from updates + constantNodes <- setdiff(constantNodes, forceUpdateNodes) # remove forceUpdates from constants + constantNodes <- union(constantNodes, forceConstantNodes) # add forceConstants to constants + updateNodes <- union(updateNodes, forceUpdateNodes) # add forceUpdates to updates + list(derivNodes = derivNodes, updateNodes = updateNodes, + constantNodes = constantNodes, calcNodes = calcNodes) +} + +model_calculate_test_case <- function(Rmodel, Cmodel, + deriv_nf, nodesList, + v1, v2 , + order, + varValues = list(), + varValues2 = list(), ...) { + # This sets up an instance of a checker fxn + Rfxn <- deriv_nf( Rmodel, nodesList) + Cfxn <- compileNimble(Rfxn, project = Rmodel) + + test_AD2_oneCall(Rfxn, Cfxn, + recordArgs = v1, testArgs = v2, + order = order, + Rmodel = Rmodel, Cmodel = Cmodel, + recordInits = varValues, testInits = varValues2, + nodesToChange = c(nodesList$updateNodes), + ...) +} + +## Make a test parameterization to be used by test_AD. This method is primarily +## used by make_AD_test_batch() and make_distribution_fun_AD_test(). +## +## op: Character string, the operator that will be the focus of the +## test. +## argTypes: Character vector of argType strings that, when parsed, can be +## passed to argType2symbol. If named, the names will as the formals +## of the nimbleFunction generator's run method and other methods. +## If not, formals are generated as arg1, arg2, etc. +## wrt_args: Optional character vector of args to use in make_wrt(). If NULL, +## assumes that all the arguments should be used. +## input_gen_funs: A list of input generation functions which is simply passed to +## the output list. Should be NULL (use defaults found in +## arg_type_2_input()), length 1 (use same input gen mechanism for each +## argType, or a named list with names from among the argType names +## (possibly the sequentially generated names). This will be NULL +## when bulk generating the test params using make_AD_test_batch and +## added later via modify_on_match(). Used in the call to +## make_AD_test() in make_distribution_fun_AD_test(). +## more_args: A named list of additional fixed arguments to use in the +## generated operator call. E.g., if op = 'dnorm', +## argTypes = c('double(1, 5)', 'double(0)'), and +## more_args = list(log = 1), the call to make_op_param will include +## the expression dnorm(arg1, arg2, log = 1). +## seed: A seed to use in set.seed(). +## +## returns: A list with the following elements: +## name: character string, the operator and args +## opParam: result from make_op_param +## run: result from calling gen_runFunCore with opParam +## methods: a list of nimbleFunction method expressions that are +## the calls to nimDerivs() of the run method, each with +## a different wrt argument +## buildDerivs: list('run') +## wrts: a list of character vectors, each of which is the wrt +## argument for the corresponding method in methods +## input_gen_funs: A list of random input generation functions to be +## used by arg_type_2_input(). +make_AD_test <- function(op, argTypes, wrt_args = NULL, + input_gen_funs = NULL, more_args = NULL, seed = 0, ...) { + ## set the seed for make_wrt + if (is.numeric(seed)) set.seed(seed) + opParam <- make_op_param(op, argTypes, more_args) + + run <- gen_runFunCore(opParam) + method <- function() { + {} + returnType(ADNimbleList()) + return(outList) + } + formals_list <- lapply(argTypes, function(argType) { + parse(text = argType)[[1]] + }) + if (is.null(names(formals_list))) + names(formals_list) <- paste0('arg', 1:length(formals_list)) + formals(method) <- formals_list + + body(method)[[2]] <- + substitute( + outList <- derivs(this_call), + list( + this_call = as.call( + c(quote(run), lapply(names(formals_list), as.name)) + ) + ) + ) + + methods <- list() + + if (is.null(wrt_args)) wrt_args_filter <- rep(TRUE, length(argTypes)) + else wrt_args_filter <- wrt_args + wrts <- make_wrt(opParam$args[wrt_args_filter]) + for (i in seq_along(wrts)) { + method_i <- paste0('method', i) + methods[[method_i]] <- method + body(methods[[method_i]])[[2]][[3]][['wrt']] <- wrts[[i]] + } + + if (is.null(wrt_args) || all(names(opParam$args) %in% wrt_args)) { + method_no_wrt <- paste0('method', length(methods) + 1) + methods[[method_no_wrt]] <- method + wrts <- c(wrts, 'no wrt') + } + + names(wrts) <- names(methods) + + ## method_all_wrt <- paste0('method', length(wrt) + 2) + ## wrts[[method_all_wrt]] <- paste0('wrt ', paste0(wrt, collapse = ', ')) + ## methods[[method_all_wrt]] <- methods[[method_no_wrt]] <- method + ## body(methods[[method_all_wrt]])[[2]][[3]][['wrt']] <- wrt + + list( + name = opParam$name, + opParam = opParam, + run = run, + methods = methods, + buildDerivs = 'run', + wrts = wrts, + input_gen_funs = input_gen_funs + ) +} + +## ops: character vector of operator names +## argTypes: list of character vectors of argTypes +## e.g. for a binary operator: +## list( +## c('double(1, 4)', 'double(0)'), +## c('double(1, 4)', 'double(1, 4)') +## ) +make_AD_test_batch <- function(ops, argTypes, seed = 0, + maker = make_AD_test, + outer_code = NULL, inner_codes = NULL) { + opTests <- vector(mode = 'list', length = length(ops) * length(argTypes)) + for(i in seq_along(ops)) { + for(j in seq_along(argTypes)) { + iOut <- (i-1) * length(argTypes) + j + opTests[[iOut]] <- maker(op = ops[[i]], argTypes = argTypes[[j]], + seed = seed, outer_code = outer_code, inner_codes = inner_codes) + } + } + + ## opTests <- unlist( + ## recursive = FALSE, + ## x = lapply( + ## ops, + ## function(x) { + ## mapply( + ## maker, + ## argTypes = argTypes, + ## MoreArgs = list(op = x, seed = seed, + ## outer_code = outer_code, inner_codes = inner_codes), + ## SIMPLIFY = FALSE + ## ) + ## }) + ## ) + names(opTests) <- sapply(opTests, `[[`, 'name') + invisible(opTests) +} + +## Takes an element of distn_params list and returns a list of AD test +## parameterizations, each of which test_AD can use. +## +## distn_param: Probability distribution parameterization, which +## must have the following fields/subfields: +## - name +## - variants +## - args +## - rand_variate +## - type +## Additional args must also have type field. +## more_args: Passed to make_op_param(). +## +make_distribution_fun_AD_test <- function(distn_param, maker = make_AD_test) { + distn_name <- distn_param$name + ops <- sapply(distn_param$variants, paste0, distn_name, simplify = FALSE) + + rand_variate_idx <- which(names(distn_param$args) == 'rand_variate') + + argTypes <- sapply(distn_param$variants, function(variant) { + op <- paste0(variant, distn_name) + rand_variate_type <- distn_param$args$rand_variate$type + first_argType <- switch( + variant, + d = rand_variate_type, + p = rand_variate_type, + q = c('double(0)')##, 'double(1, 4)') + ) + first_arg_name <- switch(variant, d = 'x', p = 'q', q = 'p') + ## need this complicated expand.grid call here because the argTypes might be + ## character vectors (e.g. if rand_variate_type is c("double(0)", "double(1, 4)") + ## then we create a test where we sample a scalar from the support and + ## another where we sample a vector of length 4 from the support + grid <- eval(as.call(c( + expand.grid, list(first_argType), + lapply(distn_param$args, `[[`, 'type')[-rand_variate_idx] + ))) + argTypes <- as.list(data.frame(t(grid), stringsAsFactors=FALSE)) + lapply(argTypes, function(v) { + names(v) <- c( + first_arg_name, + names(distn_param$args[-rand_variate_idx]) + ) + v + }) + }, simplify = FALSE) + for(i in seq_along(distn_param$args)) { + if(is.null(distn_param$args[[i]]$size)) + distn_param$args[[i]]$size <- lapply(distn_param$args[[i]]$type, + function(type) { + if(is.character(type)) + type <- parse(text = type, keep.source=FALSE)[[1]] + add_missing_size(nimble:::argType2symbol(type))$size + }) + } + argSizes <- sapply(distn_param$variants, function(variant) { + rand_variate_size <- distn_param$args$rand_variate$size + first_argSize <- switch( + variant, + d = rand_variate_size, + p = rand_variate_size, + q = 1 + ) + first_arg_name <- switch(variant, d = 'x', p = 'q', q = 'p') + grid <- eval(as.call(c( + expand.grid, list(first_argSize), + lapply(distn_param$args, `[[`, 'size')[-rand_variate_idx] + ))) + argSizes <- as.list(data.frame(t(grid), stringsAsFactors=FALSE)) + lapply(argSizes, function(v) { + names(v) <- c( + first_arg_name, + names(distn_param$args[-rand_variate_idx]) + ) + v + }) + }, simplify = FALSE) + input_gen_funs <- lapply(distn_param$args[-rand_variate_idx], `[[`, 'input_gen_fun') + input_gen_funs_list <- sapply(distn_param$variants, function(variant) { + arg1_input_gen_fun <- switch( + variant, + d = list(x = distn_param$args$rand_variate$input_gen_fun), + p = list(q = distn_param$args$rand_variate$input_gen_fun), + q = list(p = runif) + ) + c(arg1_input_gen_fun, input_gen_funs) + }, simplify = FALSE) + + op_params <- unlist(lapply(distn_param$variants, + function(variant) { + ans <- list() + for(i in seq_along(argTypes[[variant]])) { + these_argTypes <- argTypes[[variant]][[i]] + these_argSizes <- argSizes[[variant]][[i]] + wrt_args = intersect( + distn_param$wrt, names(these_argTypes) + ) + new_ans <- maker( + ops[[variant]], these_argTypes, + wrt_args = wrt_args, + input_gen_funs = input_gen_funs_list[[variant]], + more_args = distn_param$more_args[[variant]], + size = these_argSizes + ) + ans[[length(ans)+1]]<-new_ans + } + ans + }), recursive = FALSE) + + ## op_params <- unlist( + ## lapply( + ## distn_param$variants, function(variant) { + ## lapply( + ## argTypes[[variant]], + ## function(these_argTypes) { + ## wrt_args = intersect( + ## distn_param$wrt, names(these_argTypes) + ## ) + ## maker( + ## ops[[variant]], these_argTypes, + ## wrt_args = wrt_args, + ## input_gen_funs = input_gen_funs_list[[variant]], + ## more_args = distn_param$more_args[[variant]] + ## ) + ## } + ## ) + ## } + ## ), recursive = FALSE + ## ) + names(op_params) <- sapply(op_params, `[[`, 'name') + return(op_params) +} + +############################# +## input generation functions +############################# + +## arg_size comes from arg$size where arg is a symbolBasic object +gen_pos_def_matrix <- function(arg_size) { + m <- arg_size[1] ## assumes matrix argType is square + if(length(arg_size) == 1) # single length defined as arg_size = m^2 + m <- sqrt(m) + mat <- diag(m) + mat[lower.tri(mat, diag = TRUE)] <- runif(m*(m + 1)/2) + mat %*% t(mat) +} diff --git a/inst/CITATION b/inst/CITATION index 200451f..f006ec7 100644 --- a/inst/CITATION +++ b/inst/CITATION @@ -1,5 +1,8 @@ -citHeader("In published work that uses or mentions nimbleEcology, please cite - the package (Goldstein et al. 2020), including the version used. When using dCJS, dHMM and/or dDHMM, please also cite Turek et al. (2016). When using dOcc, dDynOcc and/or dNmixture, please also cite Ponisio et al. (2020).") +citHeader("In published work that uses or mentions nimbleEcology, please cite the +package (Goldstein et al. 2020), including the version used. When using cases of dCJS, dHMM + and/or dDHMM, please also cite Turek et al. (2016). When using cases of dOcc and/or dDynOcc, +please also cite Ponisio et al. (2020). When using cases of dNmixture, please also cite +Goldstein and de Valpine (2022).") year <- sub("-.*", "", meta$Date) note <- sprintf("{R} package version %s", meta$Version) @@ -46,3 +49,15 @@ bibentry( textVersion = "Ponisio, L., P. de Valpine, N. Michaud, and D. Turek. 2020. One size does not fit all: Customizing MCMC methods for hierarchical models using NIMBLE. Ecology and Evolution 10: 2385ā€“2416. ." ) +bibentry( + bibtype = "article", + title = "Comparing {N-mixture} Models and {GLMMs} for Relative Abundance Estimation in a Citizen Science Dataset", + journal = "Scientific Reports", + volume = "12", + pages = "12276", + year = "2022", + author = c(person("B.R.", "Goldstein"), + person("P.", "de Valpine")), + doi = "10.1038/s41598-022-16368-z", + textVersion = "Goldstein, B.R., and P. de Valpine. 2022. Comparing N-mixture Models and GLMMs for Relative Abundance Estimation in a Citizen Science Dataset. Scientific Reports 12: 12276. " +) diff --git a/inst/test_utils.R b/inst/test_utils.R new file mode 100644 index 0000000..7097c59 --- /dev/null +++ b/inst/test_utils.R @@ -0,0 +1,2984 @@ +require(testthat) +require(methods) +require(nimble) + +## These make it clear that error messages are expected. +if(FALSE) { + expect_failure <- function(...) { + cat('BEGIN expected failure:\n', file = stderr()) + testthat:::expect_failure(...) + argList <- list(...) + cat(argList$info) + cat('\nEND expected failure.\n', file = stderr()) + } +} + + +## Mark tests that are know to fail with `if(RUN_FAILING_TESTS)`. +## By default these tests will not be run, but we will occasionally clean up by running them with +## At the moment only used in test-optim.R; otherwise we have +## mechanisms in various test files for wrapping failing tests +## in expect_failure. +## $ RUN_FAILING_TESTS=1 Rscript test-my-stuff.R +RUN_FAILING_TESTS <- (nchar(Sys.getenv('RUN_FAILING_TESTS')) != 0) + +## We can get problems on Windows with system(cmd) if cmd contains +## paths with spaces in directory names. Solution is to make a cmd +## string with only local names (to avoid spaces) and indicate what +## directory it should be done in. setwd(dir) should handle dir with +## paths with spaces ok. +system.in.dir <- function(cmd, dir = '.') { + curDir <- getwd() + on.exit(setwd(curDir), add = TRUE) + setwd(dir) + if(.Platform$OS.type == "windows") + shell(shQuote(cmd)) + else + system(cmd) +} + +## This sets up sink to also capture messages (in particular warnings). +sink_with_messages <- function(file, ...) { + sinkfile <- file(file, open = 'wt') + sink(sinkfile) + sink(sinkfile, type = 'message') +} + +## This is useful for working around scoping issues with nimbleFunctions using other nimbleFunctions. +temporarilyAssignInGlobalEnv <- function(value, replace = FALSE) { + name <- deparse(substitute(value)) + assign(name, value, envir = .GlobalEnv) + if(!replace) { + rmCommand <- substitute(remove(name, envir = .GlobalEnv)) + do.call('on.exit', list(rmCommand, add = TRUE), envir = parent.frame()) + } +} + +withTempProject <- function(code) { + code <- substitute(code) + project <- nimble:::nimbleProjectClass() + nimbleOptions(nimbleProjectForTesting = project) + on.exit({ + ## messages are suppressed by test_that, so "assign('.check', 1, globalenv())" can be used as a way to verify this code was called + .project <- nimbleOptions('nimbleProjectForTesting') + nimbleOptions(nimbleProject = NULL) ## clear this before clearCompiled step, in case clearCompiled() itself fails + .project$clearCompiled() + }, add = TRUE) + eval(code) +} + +expect_compiles <- function(..., info = NULL, link = FALSE, forceO1 = TRUE) { + oldSCBL <- nimbleOptions('stopCompilationBeforeLinking') + nimbleOptions(stopCompilationBeforeLinking = !link) + oldForceO1 <- nimbleOptions('forceO1') + nimbleOptions(forceO1 = forceO1) + on.exit({ + assign('.check', 1, globalenv()) + nimbleOptions(stopCompilationBeforeLinking = oldSCBL) + nimbleOptions(forceO1 = oldForceO1) + }, add = TRUE) + if(!link) { + ans <- try(compileNimble(...)) ## expecting a thrown error + expect_identical(as.character(ans), 'Error : safely stopping before linking\n', info = info) + } else { + ans <- compileNimble(...) + ans + } +} + +gen_runFunCore <- function(input) { + runFun <- function() {} + formalsList <- input$args + if(is.null(formalsList)) formalsList <- list() + if(is.null(names(formalsList))) + if(length(formalsList) > 0) + names(formalsList) <- paste0('arg', seq_along(input$args)) + formals(runFun) <- formalsList + tmp <- quote({}) + tmp[[2]] <- input$expr + tmp[[3]] <- if(is.null(input[['return']])) + quote(return(out)) + else + input[['return']] + tmp[[4]] <- substitute(returnType(OUT), list(OUT = input$outputType)) + body(runFun) <- tmp + return(runFun) +} + +## Indexes the names of a list of input lists for test_coreRfeature +indexNames <- function(x) { + i <- 1 + lapply(x, function(z) {z$name <- paste(i, z$name); i <<- i + 1; z}) +} + +test_coreRfeature_batch <- function(input_batch, info = '', verbose = nimbleOptions('verbose'), dirName = NULL) { + test_coreRfeature_batch_internal(input_batch, verbose = verbose, dirName) +} + +test_coreRfeature_batch_internal <- function(input_batch, verbose = nimbleOptions('verbose'), dirName = NULL) { ## a lot like test_math but a bit more flexible + names(input_batch) <- paste0('batch_case_', seq_along(input_batch)) + runFuns <- lapply(input_batch, gen_runFunCore) + nfR <- nimbleFunction(setup = TRUE, methods = runFuns)() + + nfC <- compileNimble(nfR, dirName = dirName) + + for(i in seq_along(input_batch)) { + input <- input_batch[[i]] + if(verbose) cat("### Testing", input$name, "###\n") + test_that(input$name, { + funName <- names(input_batch)[i] + if(is.null(input$expectWarnings)) + input$expectWarnings <- list() + nArgs <- length(input$args) + evalEnv <- new.env() + eval(input$setArgVals, envir = evalEnv) + savedArgs <- as.list(evalEnv) + seedToUse <- if(is.null(input[['seed']])) 31415927 else input[['seed']] + set.seed(seedToUse) + wrap_if_matches("R eval", names(input$expectWarnings), expect_warning, { + eval(input$expr, envir = evalEnv) + }) + savedOutputs <- as.list(evalEnv) + list2env(savedArgs, envir = evalEnv) + ## with R ref classes, lookup of methods via `[[` does not work until it has been done via `$` + ## force it via `$` here to allow simpler syntax below + forceFind <- eval(substitute(nfR$FUNNAME, list(FUNNAME = as.name(funName)))) + forceFind <- eval(substitute(nfC$FUNNAME, list(FUNNAME = as.name(funName)))) + if(nArgs == 5) { + set.seed(seedToUse) + wrap_if_matches("R run", names(input$expectWarnings), expect_warning, { + out_nfR = nfR[[funName]](evalEnv$arg1, evalEnv$arg2, evalEnv$arg3, evalEnv$arg4, evalEnv$arg5)}) + list2env(savedArgs, envir = evalEnv) + set.seed(seedToUse) + out_nfC = nfC[[funName]](evalEnv$arg1, evalEnv$arg2, evalEnv$arg3, evalEnv$arg4, evalEnv$arg5) + } + if(nArgs == 4) { + set.seed(seedToUse) + wrap_if_matches("R run", names(input$expectWarnings), expect_warning, { + out_nfR = nfR[[funName]](evalEnv$arg1, evalEnv$arg2, evalEnv$arg3, evalEnv$arg4) + }) + list2env(savedArgs, envir = evalEnv) + set.seed(seedToUse) + out_nfC = nfC[[funName]](evalEnv$arg1, evalEnv$arg2, evalEnv$arg3, evalEnv$arg4) + } + if(nArgs == 3) { + set.seed(seedToUse) + wrap_if_matches("R run", names(input$expectWarnings), expect_warning, { + out_nfR = nfR[[funName]](evalEnv$arg1, evalEnv$arg2, evalEnv$arg3) + }) + list2env(savedArgs, envir = evalEnv) + set.seed(seedToUse) + out_nfC = nfC[[funName]](evalEnv$arg1, evalEnv$arg2, evalEnv$arg3) + } + if(nArgs == 2) { + set.seed(seedToUse) + wrap_if_matches("R run", names(input$expectWarnings), expect_warning, { + out_nfR = nfR[[funName]](evalEnv$arg1, evalEnv$arg2) + }) + list2env(savedArgs, envir = evalEnv) + set.seed(seedToUse) + out_nfC = nfC[[funName]](evalEnv$arg1, evalEnv$arg2) + } + if(nArgs == 1) { + set.seed(seedToUse) + wrap_if_matches("R run", names(input$expectWarnings), expect_warning, { + out_nfR = nfR[[funName]](evalEnv$arg1) + }) + list2env(savedArgs, envir = evalEnv) + set.seed(seedToUse) + out_nfC = nfC[[funName]](evalEnv$arg1) + } + if(nArgs == 0) { + set.seed(seedToUse) + wrap_if_matches("R run", names(input$expectWarnings), expect_warning, { + out_nfR = nfR[[funName]]() + }) + list2env(savedArgs, envir = evalEnv) + set.seed(seedToUse) + out_nfC = nfC[[funName]]() + } + out <- savedOutputs$out + ## clear any attributes except dim + dimOut <- attr(out, 'dim') + dimOutR <- attr(out_nfR, 'dim') + dimOutC <- attr(out_nfC, 'dim') + attributes(out) <- attributes(out_nfR) <- attributes(out_nfC) <- NULL + if(!is.null(input[['storage.mode']])) + storage.mode(out) <- storage.mode(out_nfR) <- storage.mode(out_nfC) <- input[['storage.mode']] + attr(out, 'dim') <- dimOut + attr(out_nfR, 'dim') <- dimOutR + attr(out_nfC, 'dim') <- dimOutC + checkEqual <- input[['checkEqual']] + if(is.null(checkEqual)) checkEqual <- FALSE + if(is.null(input[['return']])) { ## use default 'out' object + if(!checkEqual) { + expect_identical(class(out), class(out_nfC), info = paste('iden tmp test of class', class(out), class(out_nfC))) + expect_identical(dim(out), dim(out_nfC), info = 'iden test of dim') + expect_identical(round(out, 10), round(out_nfC, 10), info='iden test of round') + expect_identical(out, out_nfR, info = "Identical test of coreRfeature (direct R vs. R nimbleFunction)") + wh <- which.max(abs(out - out_nfC)) + expect_identical(out, out_nfC, info = paste("Identical test of coreRfeature (direct R vs. C++ nimbleFunction)", system('gcc --version', intern = T)[1], ' ', sprintf("%0.20f", out[wh]), " ", sprintf("%0.20f", out_nfC[wh]))) + } else { + expect_equal(out, out_nfR, info = "Equal test of coreRfeature (direct R vs. R nimbleFunction)") + expect_equal(out, out_nfC, info = "Equal test of coreRfeature (direct R vs. C++ nimbleFunction)") + } + } else { ## not using default return(out), so only compare out_nfR to out_nfC + if(!checkEqual) { + expect_identical(out_nfC, out_nfR, info = "Identical test of coreRfeature (compiled vs. uncompied nimbleFunction)") + } else { + expect_identical(out_nfC, out_nfR, info = "Equal test of coreRfeature (compiled vs. uncompied nimbleFunction)") + } + } + }) + } + ## unload DLL as R doesn't like to have too many loaded + if(.Platform$OS.type != 'windows') nimble:::clearCompiled(nfR) ##dyn.unload(project$cppProjects[[1]]$getSOName()) + invisible(NULL) +} + +test_coreRfeature <- function(input, verbose = nimbleOptions('verbose'), dirName = NULL) { + test_that(input$name, { + test_coreRfeature_internal(input, verbose, dirName) + }) +} + +test_coreRfeature_internal <- function(input, verbose = nimbleOptions('verbose'), dirName = NULL) { ## a lot like test_math but a bit more flexible + if(verbose) cat("### Testing", input$name, "###\n") + runFun <- gen_runFunCore(input) + nfR <- nimbleFunction(run = runFun) + ## This try is safe because failure is caught by expect_equal below + + expectedCompilerFail <- FALSE + if(!is.null(input[['expectedCompilerError']])) + if(isTRUE(input[['expectedCompilerError']])) + expectedCompilerError <- TRUE + if(!expectedCompilerError) { + nfC <- compileNimble(nfR, dirName = dirName) + } else { + expect_error(nfC <- compileNimble(nfR, dirName = dirName)) + return() + } + + nArgs <- length(input$args) + evalEnv <- new.env() + eval(input$setArgVals, envir = evalEnv) + savedArgs <- as.list(evalEnv) + seedToUse <- if(is.null(input[['seed']])) 31415927 else input[['seed']] + set.seed(seedToUse) + eval(input$expr, envir = evalEnv) + savedOutputs <- as.list(evalEnv) + list2env(savedArgs, envir = evalEnv) + if(nArgs == 5) { + set.seed(seedToUse) + out_nfR = nfR(evalEnv$arg1, evalEnv$arg2, evalEnv$arg3, evalEnv$arg4, evalEnv$arg5) + list2env(savedArgs, envir = evalEnv) + set.seed(seedToUse) + out_nfC = nfC(evalEnv$arg1, evalEnv$arg2, evalEnv$arg3, evalEnv$arg4, evalEnv$arg5) + } + if(nArgs == 4) { + set.seed(seedToUse) + out_nfR = nfR(evalEnv$arg1, evalEnv$arg2, evalEnv$arg3, evalEnv$arg4) + list2env(savedArgs, envir = evalEnv) + set.seed(seedToUse) + out_nfC = nfC(evalEnv$arg1, evalEnv$arg2, evalEnv$arg3, evalEnv$arg4) + } + + if(nArgs == 3) { + set.seed(seedToUse) + out_nfR = nfR(evalEnv$arg1, evalEnv$arg2, evalEnv$arg3) + list2env(savedArgs, envir = evalEnv) + set.seed(seedToUse) + out_nfC = nfC(evalEnv$arg1, evalEnv$arg2, evalEnv$arg3) + } + if(nArgs == 2) { + set.seed(seedToUse) + out_nfR = nfR(evalEnv$arg1, evalEnv$arg2) + list2env(savedArgs, envir = evalEnv) + set.seed(seedToUse) + out_nfC = nfC(evalEnv$arg1, evalEnv$arg2) + } + if(nArgs == 1) { + set.seed(seedToUse) + out_nfR = nfR(evalEnv$arg1) + list2env(savedArgs, envir = evalEnv) + set.seed(seedToUse) + out_nfC = nfC(evalEnv$arg1) + } + if(nArgs == 0) { + set.seed(seedToUse) + out_nfR = nfR() + list2env(savedArgs, envir = evalEnv) + set.seed(seedToUse) + out_nfC = nfC() + } + out <- savedOutputs$out + ## clearn any attributes except dim + dimOut <- attr(out, 'dim') + dimOutR <- attr(out_nfR, 'dim') + dimOutC <- attr(out_nfC, 'dim') + attributes(out) <- attributes(out_nfR) <- attributes(out_nfC) <- NULL + attr(out, 'dim') <- dimOut + attr(out_nfR, 'dim') <- dimOutR + attr(out_nfC, 'dim') <- dimOutC + checkEqual <- input[['checkEqual']] + if(is.null(checkEqual)) checkEqual <- FALSE + if(is.null(input[['return']])) { ## use default 'out' object + if(!checkEqual) { + expect_identical(out, out_nfR, info = paste0("FOO Identical test of coreRfeature (direct R vs. R nimbleFunction): ", input$name)) + expect_identical(out, out_nfC, info = paste0("FOO Identical test of coreRfeature (direct R vs. C++ nimbleFunction): ", input$name)) + } else { + expect_equal(out, out_nfR, info = paste0("Equal test of coreRfeature (direct R vs. R nimbleFunction): ", input$name) ) + expect_equal(out, out_nfC, info = paste0("Equal test of coreRfeature (direct R vs. C++ nimbleFunction): ", input$name)) + } + } else { ## not using default return(out), so only compare out_nfR to out_nfC + if(!checkEqual) { + expect_identical(out_nfC, out_nfR, info = paste0("Identical test of coreRfeature (compiled vs. uncompied nimbleFunction): ", input$name)) + } else { + expect_identical(out_nfC, out_nfR, info = paste0("Equal test of coreRfeature (compiled vs. uncompied nimbleFunction): ", input$name)) + } + } + # unload DLL as R doesn't like to have too many loaded + if(.Platform$OS.type != 'windows') nimble:::clearCompiled(nfR) ##dyn.unload(project$cppProjects[[1]]$getSOName()) + invisible(NULL) +} + +gen_runFun <- function(param, logicalArgs, returnType = "double") { + runFun <- function() {} + types <- rep('double', length(param$inputDim)) + types[logicalArgs] <- 'logical' + types <- paste0(types, '(', param$inputDim, ')') + formalsList <- lapply(types, function(x) parse(text = x)[[1]]) + names(formalsList) <- paste0('arg', seq_along(param$inputDim)) + formals(runFun) <- formalsList + tmp <- quote({}) + tmp[[2]] <- param$expr + tmp[[3]] <- quote(return(out)) + tmp[[4]] <- parse(text = paste0("returnType(", returnType, "(", param$outputDim, "))"))[[1]] + body(runFun) <- tmp + return(runFun) +} + +make_input <- function(dim, size = 3, logicalArg) { + if(!logicalArg) rfun <- rnorm else rfun <- function(n) { rbinom(n, 1, .5) } + if(dim == 0) return(rfun(1)) + if(dim == 1) return(rfun(size)) + if(dim == 2) return(matrix(rfun(size^2), size)) + stop("not set for dimension greater than 2") +} + +wrap_if_matches <- function(pattern, string, wrapper, expr) { + if (!is.null(pattern) && any(grepl(paste0('^', pattern, '$'), string))) { + wrapper(expr) + } else { + expr + } +} + +wrap_if_true <- function(test, wrapper, expr, wrap_in_try = FALSE) { + wrap <- if (isTRUE(wrap_in_try)) + function(x) try(x, silent = TRUE) + else identity + if (isTRUE(test)) wrap(wrapper(expr)) else wrap(expr) +} + +## This is a parametrized test, where `param` is a list with names: +## param$name - A descriptive test name. +## param$expr - A quoted expression `quote(out <- some_function_of(arg1, arg2, ...))`. +## param$Rcode - Optional R version of expr. +## param$inputDim - A vector of dimensions of the input `arg`s. +## param$outputDim - The dimension of the output `out. +## param$xfail - Optional regular expression of tests that are expected to fail. +test_math <- function(param, caseName, verbose = nimbleOptions('verbose'), size = 3, dirName = NULL) { + info <- paste0(caseName, ': ', param$name) + ## in some cases, expect_error does not suppress error messages (I believe this has + ## to do with how we trap errors in compilation), so make sure user realizes expectation + if('knownFailureReport' %in% names(param) && param$knownFailureReport) + cat("\nBegin expected error message:\n") + test_that(info, { + ## wrap_if_matches(param$xfail, paste0(info, ': compiles and runs'), expect_error, { + test_math_internal(param, info, verbose, size, dirName) + ## }) + }) + if('knownFailureReport' %in% names(param) && param$knownFailureReport) + cat("End expected error message.\n") + invisible(NULL) +} + +test_math_internal <- function(param, info, verbose = nimbleOptions('verbose'), size = 3, dirName = NULL) { + if(verbose) cat("### Testing", param$name, "###\n") + nArgs <- length(param$inputDim) + logicalArgs <- rep(FALSE, nArgs) + if("logicalArgs" %in% names(param)) + logicalArgs <- param$logicalArgs + returnType <- "double" + if("returnType" %in% names(param)) + returnType <- param$returnType + + runFun <- gen_runFun(param, logicalArgs, returnType) + wrap_if_matches(param$expectWarnings, "builds", expect_warning, { + nfR <- nimbleFunction( + run = runFun) + }) + + info <- paste0(info, ": compiles") + ## need expect_error not expect_failure(expect_something()) because otherwise + ## R error will stop execution + wrap_if_matches(param$knownFailure, info, expect_error, { + nfC <- compileNimble(nfR, dirName = dirName) + + arg1 <- make_input(param$inputDim[1], size = size, logicalArgs[1]) + if(nArgs > 1) + arg2 <- make_input(param$inputDim[2], size = size, logicalArgs[2]) + if(nArgs > 2) + arg3 <- make_input(param$inputDim[3], size = size, logicalArgs[3]) + if(nArgs > 3) + stop("test_math not set up for >3 args yet") + + if("Rcode" %in% names(param)) { + eval(param$Rcode) + } else { + eval(param$expr) + } + info <- paste0(info, ": runs") + wrap_if_matches(param$knownFailure, info, expect_failure, { + if(nArgs == 3) { + expect_silent(out_nfR <- nfR(arg1, arg2, arg3)) + expect_silent(out_nfC <- nfC(arg1, arg2, arg3)) + } + if(nArgs == 2) { + expect_silent(out_nfR <- nfR(arg1, arg2)) + expect_silent(out_nfC <- nfC(arg1, arg2)) + } + if(nArgs == 1) { + expect_silent(out_nfR <- nfR(arg1)) + expect_silent(out_nfC <- nfC(arg1)) + } + + attributes(out) <- attributes(out_nfR) <- attributes(out_nfC) <- NULL + + infoR <- paste0(info, ": R vs Nimble DSL") + wrap_if_matches(param$knownFailure, infoR, expect_failure, { + expect_equal(out, out_nfR, info = infoR) + }) + infoC <- paste0(info, ": R vs Nimble Cpp") + wrap_if_matches(param$knownFailure, infoC, expect_failure, { + expect_equal(out, out_nfC, info = infoC) + }) + }) + # Unload DLL as R doesn't like to have too many loaded. + if(.Platform$OS.type != 'windows') nimble:::clearCompiled(nfR) + }) + invisible(NULL) +} + + +### Function for testing MCMC called from test_mcmc.R + +test_mcmc <- function(example, model, data = NULL, inits = NULL, ..., name = NULL, knownFailures = list(), expectWarnings = list(), avoidNestedTest = FALSE) { + ## imitate processing test_mcmc_internal just to get a name for the test_that description + if(is.null(name)) { + if(!missing(example)) { + name <- example + } else { + if(is.character(model)) { + name <- model + } else { + name <- 'unnamed case' + } + } + } + name <- basename(name) ## name could be a pathed directory including tempdir(), which would change every time and hence appear as errors in line-by-line comparison with the gold file. So for futher purposes we use only the file name + ## `missing(example)` does not work inside the test_that + if(!missing(example)) { + ## classic-bugs example specified by name + dir = nimble:::getBUGSexampleDir(example) + if(missing(model)) model <- example + modelKnown <- TRUE + } else { + dir = "" + modelKnown <- !missing(model) + } + + if(avoidNestedTest) { ## sometimes test_mcmc is called from within a test_that; this avoids report of empty test as of testthat v2.0.0 + expect_true(modelKnown, 'Neither BUGS example nor model code supplied.') + Rmodel <- readBUGSmodel(model, data = data, inits = inits, dir = dir, useInits = TRUE, + check = FALSE) + test_mcmc_internal(Rmodel, ..., name = name, knownFailures = knownFailures, expectWarnings = expectWarnings) + } else { + test_that(name, { + expect_true(modelKnown, 'Neither BUGS example nor model code supplied.') + Rmodel <- readBUGSmodel(model, data = data, inits = inits, dir = dir, useInits = TRUE, + check = FALSE) + test_mcmc_internal(Rmodel, ..., name = name, knownFailures = knownFailures, expectWarnings = expectWarnings) + }) + } +} + + +test_mcmc_internal <- function(Rmodel, ##data = NULL, inits = NULL, + verbose = nimbleOptions('verbose'), numItsR = 5, numItsC = 1000, + basic = TRUE, exactSample = NULL, results = NULL, resultsTolerance = NULL, + numItsC_results = numItsC, + resampleData = FALSE, + topLevelValues = NULL, seed = 0, mcmcControl = NULL, samplers = NULL, removeAllDefaultSamplers = FALSE, + doR = TRUE, doCpp = TRUE, returnSamples = FALSE, name = NULL, knownFailures = list(), expectWarnings = list()) { + # There are three modes of testing: + # 1) basic = TRUE: compares R and C MCMC values and, if requested by passing values in 'exactSample', will compare results to actual samples (you'll need to make sure the seed matches what was used to generate those samples) + # 2) if you pass 'results', it will compare MCMC output to known posterior summaries within tolerance specified in resultsTolerance + # 3) resampleData = TRUE: runs initial MCMC to get top level nodes then simulates from the rest of the model, including data, to get known parameter values, and fits to the new data, comparing parameter estimates from MCMC with the known parameter values + + # samplers can be given individually for each node of interest or as a vector of nodes for univariate samplers or list of vectors of nodes for multivariate samplers + # e.g., + # multiple univar samplers: samplers(type = 'RW', target = c('mu', 'x')) + # single multivar sampler: samplers(type = "RW_block", target = c('x[1]', 'x[2]')) + # single multivar sampler: samplers(type = "RW_block", target = 'x') + # multiple multivar samplers: samplers(type = "RW_block", target = list('x', c('theta', 'mu'))) + + setSampler <- function(var, conf) { + currentTargets <- sapply(conf$samplerConfs, function(x) x$target) + # remove already defined scalar samplers + inds <- which(unlist(var$target) %in% currentTargets) + conf$removeSamplers(inds, print = FALSE) + # look for cases where one is adding a blocked sampler specified on a variable and should remove scalar samplers for constituent nodes + currentTargets <- sapply(conf$samplerConfs, function(x) x$target) + inds <- which(sapply(unlist(var$target), function(x) Rmodel$expandNodeNames(x)) %in% currentTargets) + conf$removeSamplers(inds, print = FALSE) + + if(is.list(var$target) && length(var$target) == 1) var$target <- var$target[[1]] + if(length(var$target) == 1 || (var$type %in% c("RW_block", "RW_PF_block", "RW_llFunction_block") && !is.list(var$target))) + tmp <- conf$addSampler(type = var$type, target = var$target, control = var$control, print = FALSE) else tmp <- sapply(var$target, function(x) conf$addSampler(type = var$type, target = x, control = var$control, print = FALSE)) + } + + wrap_if_matches('nameOK', names(knownFailures), expect_failure, { + expect_false(is.null(name), info = 'name argument NULL') + }) + + ## leaving this message permanently on for now + cat("===== Starting MCMC test for ", name, ". =====\n", sep = "") ## for log file, for comparison to gold file + system(paste0("echo \"===== Starting MCMC test for ", name, ". =====\n\"", sep = "")) ## for travis log file, so it knows the process is not dead after 10 minutes of silence (message() does not work) + + if(doCpp) { + Cmodel <- compileNimble(Rmodel) + } + if(!is.null(mcmcControl)) mcmcConf <- configureMCMC(Rmodel, control = mcmcControl) else mcmcConf <- configureMCMC(Rmodel) + if(removeAllDefaultSamplers) mcmcConf$removeSamplers() + + if(!is.null(samplers)) { + sapply(samplers, setSampler, mcmcConf) + cat("Setting samplers to:\n") + print(mcmcConf$getSamplers()) + } + + vars <- Rmodel$getDependencies(Rmodel$getNodeNames(topOnly = TRUE, stochOnly = TRUE), stochOnly = TRUE, includeData = FALSE, downstream = TRUE) + vars <- unique(nimble:::removeIndexing(vars)) + mcmcConf$addMonitors(vars, print = FALSE) + + Rmcmc <- buildMCMC(mcmcConf) + if(doCpp) { + Cmcmc <- compileNimble(Rmcmc, project = Rmodel) + } + + if(basic) { + ## do short runs and compare R and C MCMC output + if(doR) { + set.seed(seed) + R_samples <- NULL + ## need expect_error not expect_failure(expect_something()) because otherwise + ## R error will stop execution + wrap_if_matches('R MCMC', names(knownFailures), expect_error, { + RmcmcOut <- Rmcmc$run(numItsR) + RmvSample <- nfVar(Rmcmc, 'mvSamples') + R_samples <- as.matrix(RmvSample) + }) + } + if(doCpp) { + set.seed(seed) + Cmcmc$run(numItsC) + CmvSample <- nfVar(Cmcmc, 'mvSamples') + C_samples <- as.matrix(CmvSample) + ## for some reason columns in different order in CmvSample... + if(doR) + C_subSamples <- C_samples[seq_len(numItsR), attributes(R_samples)$dimnames[[2]], drop = FALSE] + } + if(doR && doCpp && !is.null(R_samples)) { + wrap_if_matches('R C samples match', names(knownFailures), expect_failure, { + expect_equal(R_samples, C_subSamples, info = paste("R and C posterior samples are not equal")) + }) + } + + if(doCpp) { + if(!is.null(exactSample)) { + for(varName in names(exactSample)) + wrap_if_matches('C samples match known samples', names(knownFailures), expect_failure, { + expect_equal(round(C_samples[seq_along(exactSample[[varName]]), varName], 8), + round(exactSample[[varName]], 8), + info = paste0("Equality of compiled MCMC samples and known exact samples for variable ", varName))}) + } + } + + summarize_posterior <- function(vals) + return(c(mean = mean(vals), sd = sd(vals), quantile(vals, .025), quantile(vals, .975))) + + if(doCpp) { + start <- round(numItsC / 2) + 1 + try(print(apply(C_samples[start:numItsC, , drop = FALSE], 2, summarize_posterior))) + } + } + + ## assume doR and doCpp from here down + if(!is.null(results)) { + ## do (potentially) longer run and compare results to inputs given + set.seed(seed) + Cmcmc$run(numItsC_results) + CmvSample <- nfVar(Cmcmc, 'mvSamples') + postBurnin <- (round(numItsC_results/2)+1):numItsC_results + C_samples <- as.matrix(CmvSample)[postBurnin, , drop = FALSE] + for(metric in names(results)) { + if(!metric %in% c('mean', 'median', 'sd', 'var', 'cov')) + stop("Results input should be named list with the names indicating the summary metrics to be assessed, from amongst 'mean', 'median', 'sd', 'var', and 'cov'.") + if(metric != 'cov') { + postResult <- apply(C_samples, 2, metric) + for(varName in names(results[[metric]])) { + samplesNames <- dimnames(C_samples)[[2]] + if(!grepl("[", varName, fixed = TRUE)) + samplesNames <- gsub("\\[.*\\]", "", samplesNames) + matched <- which(varName == samplesNames) + diff <- abs(postResult[matched] - results[[metric]][[varName]]) + for(ind in seq_along(diff)) { + strInfo <- ifelse(length(diff) > 1, paste0("[", ind, "]"), "") + wrap_if_matches(paste('MCMC match to known posterior:', varName, metric, ind), names(knownFailures), expect_failure, { + expect_true(diff[ind] < resultsTolerance[[metric]][[varName]][ind], + info = paste("Test of MCMC result against known posterior for :", metric, "(", varName, strInfo, ")")) + }) + } + } + } else { # 'cov' + for(varName in names(results[[metric]])) { + matched <- grep(varName, dimnames(C_samples)[[2]], fixed = TRUE) + postResult <- cov(C_samples[ , matched]) + # next bit is on vectorized form of matrix so a bit awkward + diff <- c(abs(postResult - results[[metric]][[varName]])) + for(ind in seq_along(diff)) { + strInfo <- ifelse(length(diff) > 1, paste0("[", ind, "]"), "") + wrap_if_matches(paste('MCMC match to known posterior:', varName, 'cov', ind), names(knownFailures), expect_failure, { + expect_true(diff[ind] < resultsTolerance[[metric]][[varName]][ind], + info = paste("Test of MCMC result against known posterior for:", metric, "(", varName, ")", strInfo)) + }) + } + } + } + } + } + if(returnSamples) { + if(exists('CmvSample')) + returnVal <- as.matrix(CmvSample) + } else returnVal <- NULL + + if(resampleData) { + topNodes <- Rmodel$getNodeNames(topOnly = TRUE, stochOnly = TRUE) + topNodesElements <- Rmodel$getNodeNames(topOnly = TRUE, stochOnly = TRUE, + returnScalarComponents = TRUE) + if(is.null(topLevelValues)) { + postBurnin <- (round(numItsC/2)):numItsC + if(is.null(results) && !basic) { + # need to generate top-level node values so do a basic run + set.seed(seed) + Cmcmc$run(numItsC) + CmvSample <- nfVar(Cmcmc, 'mvSamples') + C_samples <- as.matrix(CmvSample)[postBurnin, ] + } + topLevelValues <- as.list(apply(C_samples[ , topNodesElements, drop = FALSE], 2, mean)) + } + if(!is.list(topLevelValues)) { + topLevelValues <- as.list(topLevelValues) + if(sort(names(topLevelValues)) != sort(topNodesElements)) + stop("Values not provided for all top level nodes; possible name mismatch") + } + sapply(topNodesElements, function(x) Cmodel[[x]] <- topLevelValues[[x]]) + # check this works as side effect + nontopNodes <- Rmodel$getDependencies(topNodes, self = FALSE, includeData = TRUE, downstream = TRUE, stochOnly = FALSE) + nonDataNodesElements <- Rmodel$getDependencies(topNodes, self = TRUE, includeData = FALSE, downstream = TRUE, stochOnly = TRUE, returnScalarComponents = TRUE) + dataVars <- unique(nimble:::removeIndexing(Rmodel$getDependencies(topNodes, dataOnly = TRUE, downstream = TRUE))) + set.seed(seed) + Cmodel$resetData() + simulate(Cmodel, nontopNodes) + + dataList <- list() + for(var in dataVars) { + dataList[[var]] <- values(Cmodel, var) + if(Cmodel$modelDef$varInfo[[var]]$nDim > 1) + dim(dataList[[var]]) <- Cmodel$modelDef$varInfo[[var]]$maxs + } + Cmodel$setData(dataList) + + trueVals <- values(Cmodel, nonDataNodesElements) + names(trueVals) <- nonDataNodesElements + set.seed(seed) + Cmcmc$run(numItsC_results) + CmvSample <- nfVar(Cmcmc, 'mvSamples') + + postBurnin <- (round(numItsC_results/2)):numItsC + C_samples <- as.matrix(CmvSample)[postBurnin, nonDataNodesElements, drop = FALSE] + interval <- apply(C_samples, 2, quantile, c(.025, .975)) + interval <- interval[ , names(trueVals)] + covered <- trueVals <= interval[2, ] & trueVals >= interval[1, ] + coverage <- sum(covered) / length(nonDataNodesElements) + tolerance <- 0.15 + cat("Coverage for ", name, " is", coverage*100, "%.\n") + miscoverage <- abs(coverage - 0.95) + ## always print for purpose of goldfile + # if(miscoverage > tolerance || verbose) { + cat("True values with 95% posterior interval:\n") + print(cbind(trueVals, t(interval), covered)) + # } + wrap_if_matches('coverage', names(knownFailures), expect_failure, { + expect_true(miscoverage < tolerance, + info = paste("Test of MCMC coverage on known parameter values for:", name)) + }) + + } + + cat("===== Finished MCMC test for ", name, ". =====\n", sep = "") + + if(doCpp) { + if(.Platform$OS.type != "windows") { + nimble:::clearCompiled(Rmodel) + } + } + return(returnVal) +} + + +test_filter <- function(example, model, data = list(), inits = list(), + verbose = nimbleOptions('verbose'), numItsR = 3, numItsC = 10000, + basic = TRUE, exactSample = NULL, results = NULL, resultsTolerance = NULL, + numItsC_results = numItsC, + seed = 0, filterType = NULL, latentNodes = NULL, filterControl = NULL, + doubleCompare = FALSE, filterType2 = NULL, + doR = TRUE, doCpp = TRUE, returnSamples = FALSE, name = NULL, dirName = NULL, + knownFailures = list()) { + ## There are two modes of testing: + ## 1) basic = TRUE: compares R and C Particle Filter likelihoods and sampled states + ## 2) if you pass 'results', it will compare Filter output to known latent state posterior summaries, top-level parameter posterior summaries, + ## and likelihoods within tolerance specified in resultsTolerance. Results are compared for both weighted and unweighted samples. + ## filterType determines which filter to use for the model. Valid options are: "bootstrap", "auxiliary", "LiuWest", "ensembleKF" + ## filterControl specifies options to filter function, such as saveAll = TRUE/FALSE. + + if(is.null(name)) { + if(!missing(example)) { + name <- example + } else { + if(is.character(model)) { + name <- model + } else { + name <- 'unnamed case' + } + } + } + + ## keep this outside of test_that as use of missing within test_that triggers error with "'missing' can + ## only be used for arguments" + if(!missing(example)) { + ## classic-bugs example specified by name + dir <- getBUGSexampleDir(example) + if(missing(model)) model <- example + } else { + ## code, data and inits specified directly where 'model' contains the code + example = deparse(substitute(model)) + if(missing(model)) stop("Neither BUGS example nor model code supplied.") + dir <- "" + } + returnVal <- NULL + + cat("===== Starting Filter test for ", name, " using ", filterType, ". =====\n", sep = "") + + test_that(name, { + Rmodel <- readBUGSmodel(model, dir = dir, data = data, inits = inits, useInits = TRUE, check = FALSE) + if(doCpp) { + Cmodel <- compileNimble(Rmodel, dirName = dirName) + if(verbose) cat('done compiling model\n') + } + if(verbose) cat("Building filter\n") + if(filterType == "bootstrap"){ + if(!is.null(filterControl)) Rfilter <- buildBootstrapFilter(Rmodel, nodes = latentNodes, control = filterControl) + else Rfilter <- buildBootstrapFilter(Rmodel, nodes = latentNodes, control = list(saveAll = TRUE, thresh = 0)) + } + if(filterType == "auxiliary"){ + if(!is.null(filterControl)) Rfilter <- buildAuxiliaryFilter(Rmodel, nodes = latentNodes, control = filterControl) + else Rfilter <- buildAuxiliaryFilter(Rmodel, nodes = latentNodes, control = list(saveAll = TRUE)) + } + if(filterType == "LiuWest"){ + if(!is.null(filterControl)) Rfilter <- buildLiuWestFilter(Rmodel, nodes = latentNodes, control = filterControl) + else Rfilter <- buildLiuWestFilter(Rmodel, nodes = latentNodes, control = list(saveAll = TRUE)) + } + if(filterType == "ensembleKF"){ + if(!is.null(filterControl)) Rfilter <- buildEnsembleKF(Rmodel, nodes = latentNodes, control = filterControl) + else Rfilter <- buildEnsembleKF(Rmodel, nodes = latentNodes, control = list(saveAll = TRUE)) + } + saveAll <- TRUE + if(!is.null(filterControl) && exists('saveAll', filterControl)) + saveAll <- filterControl$saveAll + + if(doCpp) { + Cfilter <- compileNimble(Rfilter, project = Rmodel, dirName = dirName) + } + + if(basic) { + ## do short runs and compare R and C filter output + if(doR) { + set.seed(seed) + RfilterOut <- Rfilter$run(numItsR) + if(filterType == "ensembleKF"){ + RmvSample <- nfVar(Rfilter, 'mvSamples') + R_samples <- as.matrix(RmvSample) + } + else{ + RmvSample <- nfVar(Rfilter, 'mvWSamples') + RmvSample2 <- nfVar(Rfilter, 'mvEWSamples') + R_samples <- as.matrix(RmvSample) + R_samples2 <- as.matrix(RmvSample2) + if(filterType != 'LiuWest'){ + R_ESS <- Rfilter$returnESS() + } + } + } + if(doCpp) { + set.seed(seed) + CfilterOut <- Cfilter$run(numItsR) + if(filterType == "ensembleKF"){ + CmvSample <- nfVar(Cfilter, 'mvSamples') + C_samples <- as.matrix(CmvSample) + C_subSamples <- C_samples[, attributes(R_samples)$dimnames[[2]], drop = FALSE] + } + else{ + CmvSample <- nfVar(Cfilter, 'mvWSamples') + CmvSample2 <- nfVar(Cfilter, 'mvEWSamples') + C_samples <- as.matrix(CmvSample) + C_samples2 <- as.matrix(CmvSample2) + C_subSamples <- C_samples[, attributes(R_samples)$dimnames[[2]], drop = FALSE] + C_subSamples2 <- C_samples2[, attributes(R_samples2)$dimnames[[2]], drop = FALSE] + if(filterType != 'LiuWest'){ + C_ESS <- Rfilter$returnESS() + for(i in seq_along(length(C_ESS))){ + wrap_if_matches('C ESS >= 0', names(knownFailures), expect_failure, { + expect_gte(C_ESS[i], 0) + }) + wrap_if_matches('C ESS <= numIts', names(knownFailures), expect_failure, { + expect_lte(C_ESS[i], numItsR) + }) + } + } + } + ## for some reason columns in different order in CmvSample... + } + if(doR && doCpp && !is.null(R_samples)) { + ## context(paste0("testing ", example," ", filterType, " filter")) + if(filterType == "ensembleKF"){ + expect_equal(R_samples, C_subSamples, info = paste("R and C posterior samples are not equal")) + } + else{ + expect_equal(R_samples, C_subSamples, info = paste("R and C weighted posterior samples are not equal")) + expect_equal(R_samples2, C_subSamples2, info = paste("R and C equally weighted posterior samples are not equal")) + expect_equal(RfilterOut, CfilterOut, info = paste("R and C log likelihood estimates are not equal")) + if(filterType != 'LiuWest'){ + wrap_if_matches('R C ESS match', names(knownFailures), expect_failure, { + expect_equal(R_ESS, C_ESS, info = paste("R and C ESS are not equal")) + }) + } + } + } + + if(doCpp) { + if(!is.null(exactSample)) { + for(varName in names(exactSample)) + expect_equal(round(C_samples[seq_along(exactSample[[varName]]), varName], 8), round(exactSample[[varName]], 8), info = paste0("filter result does not match known samples for: ", varName)) + } + } + + summarize_posterior <- function(vals) + return(c(mean = mean(vals), sd = sd(vals), quantile(vals, .025), quantile(vals, .975))) + + if(doCpp) { + ## if(verbose) { + ## try(print(apply(C_samples[, , drop = FALSE], 2, summarize_posterior))) + ## } + } + } + + ## assume doR and doCpp from here down + if(!is.null(results)) { + ## do (potentially) longer run and compare results to inputs given + set.seed(seed) + Cll <- Cfilter$run(numItsC_results) + for(wMetric in c(TRUE, FALSE)){ + weightedOutput <- 'unweighted' + if(filterType == "ensembleKF") + CfilterSample <- nfVar(Cfilter, 'mvSamples') + else{ + if(wMetric){ + CfilterSample <- nfVar(Cfilter, 'mvWSamples') + weightedOutput <-"weighted" + } + else + CfilterSample <- nfVar(Cfilter, 'mvEWSamples') + } + + C_samples <- as.matrix(CfilterSample)[, , drop = FALSE] + if(weightedOutput == "weighted"){ + wtIndices <- grep("^wts\\[", dimnames(C_samples)[[2]]) + C_weights <- as.matrix(C_samples[,wtIndices, drop = FALSE]) + C_samples <- as.matrix(C_samples[,-wtIndices, drop = FALSE]) + } + latentNames <- Rmodel$expandNodeNames(latentNodes, sort = TRUE, returnScalarComponents = TRUE) + if(weightedOutput == "weighted"){ + samplesToWeightsMatch <- rep(dim(C_weights)[2], dim(C_samples)[2]) + latentIndices <- match(latentNames, dimnames(C_samples)[[2]]) + latentSampLength <- length(latentNames) + if(!saveAll) { ## added without careful checking; may not be robust + latentIndices <- latentIndices[!is.na(latentIndices)] + latentSampLength <- 1 + } + latentDim <- latentSampLength/dim(C_weights)[2] + samplesToWeightsMatch[latentIndices] <- rep(1:dim(C_weights)[2], each = latentDim ) + } + for(metric in names(results)) { + if(!metric %in% c('mean', 'median', 'sd', 'var', 'cov', 'll')) + stop("Results input should be named list with the names indicating the summary metrics to be assessed, from amongst 'mean', 'median', 'sd', 'var', 'cov', and 'll'.") + if(!(metric %in% c('cov', 'll'))) { + if(weightedOutput == "weighted"){ + postResult <- sapply(1:dim(C_samples)[2], weightedMetricFunc, metric = metric, weights = C_weights, samples = C_samples, samplesToWeightsMatch) + } + else + postResult <- apply(C_samples, 2, metric) + for(varName in names(results[[metric]])) { + samplesNames <- dimnames(C_samples)[[2]] + if(!grepl(varName, "[", fixed = TRUE)) + samplesNames <- gsub("\\[.*\\]", "", samplesNames) + matched <- which(varName == samplesNames) + if(!saveAll) { ## added without careful checking; may not be robust + diff <- abs(postResult[matched] - results[[metric]][[varName]][length(results[[metric]][[varName]])]) + } else { + diff <- abs(postResult[matched] - results[[metric]][[varName]]) + } + for(ind in seq_along(diff)) { + strInfo <- ifelse(length(diff) > 1, paste0("[", ind, "]"), "") + expect_lt(diff[ind], resultsTolerance[[metric]][[varName]][ind], + label = paste0("filter posterior result against known posterior for:", weightedOutput, metric, "(", varName, strInfo, ")")) + } + } + } else if (metric == 'cov' ) { + for(varName in names(results[[metric]])) { + matched <- grep(varName, dimnames(C_samples)[[2]], fixed = TRUE) + ## if(weightedOutput == "weighted") + ## postResult <- cov.wt(C_samples[, matched], wt = ) #weighted covariance not currently implemented + ## else + postResult <- cov(C_samples[ , matched]) + ## next bit is on vectorized form of matrix so a bit awkward + diff <- c(abs(postResult - results[[metric]][[varName]])) + for(ind in seq_along(diff)) { + strInfo <- ifelse(length(diff) > 1, paste0("[", ind, "]"), "") + expect_lt(diff[ind], resultsTolerance[[metric]][[varName]][ind], + label = paste0("filter posterior result against known posterior for", example, ":", metric, "(", varName, ")", strInfo)) + } + } + } + else { # ll (log likelihood) + diff <- abs(Cll - results[[metric]][[1]][1]) + expect_lt(diff, resultsTolerance[[metric]][[1]][1], label = paste0("filter log-likelihood result against known log-likelihood for", example, ":", metric)) + } + } + } + } + try(print(apply(as.matrix(C_samples), 2, summarize_posterior))) ## print summaries of equally weighted samples + if(returnSamples) { + if(exists('CmvSample')) + returnVal <- as.matrix(CmvSample) + } + if(doCpp) { + if(.Platform$OS.type != 'windows') + nimble:::clearCompiled(Rmodel) + } + + }) + cat("===== Finished ", filterType, " filter test for ", name, ". =====\n", sep = "") + + return(returnVal) +} + +## Testing for correct behavior of different resampling methods +## used within PFs. +## samplerName - A string with the name of +## the resampling function to be tested. +## wtsList - A list, where each element is a vector of weights to use for +## testing, given as input to the resampler functions. +## reps - An integer, the number of repetitions to conduct. +## +## For each provided set of weights in wtsList, the test will produce +## 'reps' number of samples according to those weights. For each set of +## samples, the number of times each element was sampled is recorded. These +## recorded counts are averaged over the 'reps' number of samples, and then +## the averages are compared to the expected count of each element +## (i.e. wts*length(wts)). + +test_resampler <- function(samplerName, wtsList, reps = 500, creps = reps){ + n <- sapply(wtsList, function(x){return(length(x))}) + output <- lapply(n, function(x){return(numeric(x))}) + avgCounts <- output + samplerFunction <- getFromNamespace(samplerName, 'nimble')() + for(rep in 1:reps){ + counts <- list() + for(i in 1:length(wtsList)){ + output[[i]] <- samplerFunction$run(wtsList[[i]]) + counts[[i]] <- numeric(length(output[[i]])) + for(j in 1:n[i]){ + counts[[i]][j] <- length(which(output[[i]] == j)) + } + avgCounts[[i]] <- avgCounts[[i]] + counts[[i]] + } + } + expectedValue <- list(length(wtsList)) + for(i in 1:length(wtsList)){ + avgCounts[[i]] <- avgCounts[[i]]/reps + expectedValue[[i]] <- n[i]*(wtsList[[i]]/sum(wtsList[[i]])) + diffVec <- abs(expectedValue[[i]] - avgCounts[[i]]) + for(j in 1:n[i]){ + test_that(paste0("Test of accurate samples for uncompiled resampling + method ", samplerName, ", weight set ", i, + ", weight number ", j), + expect_lt(diffVec[j], sqrt(expectedValue[[i]][j]) + .01)) + } + } + avgCounts <- lapply(n, function(x){return(numeric(x))}) + compiledSamplerFunction <- compileNimble(samplerFunction) + for(rep in 1:reps){ + counts <- list() + for(i in 1:length(wtsList)){ + output[[i]] <- compiledSamplerFunction$run(wtsList[[i]]) + counts[[i]] <- numeric(length(output[[i]])) + for(j in 1:n[i]){ + counts[[i]][j] <- length(which(output[[i]] == j)) + } + avgCounts[[i]] <- avgCounts[[i]] + counts[[i]] + } + } + expectedValue <- list(length(wtsList)) + for(i in 1:length(wtsList)){ + avgCounts[[i]] <- avgCounts[[i]]/reps + expectedValue[[i]] <- n[i]*(wtsList[[i]]/sum(wtsList[[i]])) + diffVec <- abs(expectedValue[[i]] - avgCounts[[i]]) + for(j in 1:n[i]){ + test_that(paste0("Test of accurate samples for compiled resampling + method ", samplerName, ", weight set ", i, + ", weight number ", j), + expect_lt(diffVec[j], sqrt(expectedValue[[i]][j]) + .01)) + } + } +} + + + + +weightedMetricFunc <- function(index, samples, weights, metric, samplesToWeightsMatch){ + samples <- samples[,index] + weights <- exp(weights[,samplesToWeightsMatch[index]])/sum(exp(weights[,samplesToWeightsMatch[index]])) + if(metric == "median"){ + ord <- order(samples) + weights <- weights[ord] + samples <- samples[ord] + sumWts <- 0 + while(sumWts < .5){ + sumWts <- sumWts + weights[1] + weights <- weights[-1] + } + return(samples[length(samples)-length(weights)]) + } + wtMean <- weighted.mean(samples, weights) + if(metric == "mean"){ + return(wtMean) + } + wtVar <- sum(weights*(samples - wtMean)^2) + if(metric == "var"){ + return(wtVar) + } + if(metric == "sd"){ + return(sqrt(wtVar)) + } +} + +test_size <- function(input, verbose = nimbleOptions('verbose')) { + if(is.null(input$expectPassWithConst)) input$expectPassWithConst <- input$expectPass + if(is.null(input$knownProblem)) input$knownProblem <- FALSE + if(is.null(input$knownProblemWithConst)) input$knownProblemWithConst <- input$knownProblem + if(is.null(input$expectWarn)) input$expectWarn <- FALSE + if(is.null(input$expectWarnWithConst)) input$expectWarnWithConst <- input$expectWarn + + if(verbose) cat("### Testing", input$name, " with RHS variable ###\n") + code <- quote({ + m <- nimbleModel(code = input$expr, data = input$data, inits = input$inits) + calculate(m) ## Calculates from scratch. + calculate(m) ## Uses cached value. + }) + message = paste(input$name, 'with RHS variable', ifelse(input$expectPass, 'works', 'fails'), 'as expected') + if (input$knownProblem) message = paste(message, 'marked as KNOWN ISSUE') + if(xor(input$expectPass, input$knownProblem)) { + if(input$expectWarn) { + test_that(message, expect_warning(eval(code))) + } else test_that(message, expect_silent(eval(code))) + } else { + test_that(message, expect_error(suppressWarnings(eval(code)))) + } + + if(verbose) cat("### Testing", input$name, "with RHS constant ###\n") + code <- quote({ + m <- nimbleModel(code = input$expr, data = input$data, constants = input$inits) + calculate(m) ## Calculates from scratch. + calculate(m) ## Uses cached value. + }) + message = paste(input$name, 'with RHS constant', ifelse(input$expectPassWithConst, 'works', 'fails'), 'as expected') + if (input$knownProblemWithConst) message = paste(message, 'marked as KNOWN ISSUE') + if(xor(input$expectPassWithConst, input$knownProblemWithConst)) { + if(input$expectWarnWithConst) { + test_that(message, expect_warning(eval(code))) + } else test_that(message, expect_silent(eval(code))) + } else { + ## As of testthat 3.0, warnings bubble up so need to deal with them by suppression. + test_that(message, expect_error(suppressWarnings(eval(code)))) + } + + invisible(NULL) +} + +# could redo test_size to always expect specific error, but not taking time to do that now +test_size_specific_error <- function(input, verbose = nimbleOptions('verbose')) { + if(verbose) cat("### Testing", input$name, "###\n") + test_that(paste0("Test 1 of size/dimension check: ", input$name), { + expect_error(nimbleModel(code = input$expr, data = input$data, inits = input$inits), + regexp = input$correctErrorMsg, info = paste("Result does not match", input$expectPass)) + }) + + invisible(NULL) +} + + +test_getParam <- function(distCall, dist = NULL) { + distCallText <- deparse(distCall) + test_that(distCallText, { + gpScalar <- nimbleFunction( + setup = function(model, node, param) {}, + run = function() { + ans1 <- model$getParam(node, param) + ans2 <- getParam(model, node, param) ## to become model$getParam(node, param) + if(ans1 != ans2) stop('oops, ans1 != ans2') + return(ans1) + returnType(double()) + }) + if(is.null(dist)) dist <- nimble:::getDistributionInfo(as.character(distCall[[1]])) else dist <- nimble:::getDistributionInfo(dist) + code <- substitute({x ~ DISTCALL}, list(DISTCALL = distCall)) + m <- nimbleModel( code = code ) + cm <- compileNimble(m) + gpFuns <- list() + expectedResults <- list() + altParams <- dist$altParams + altParamNames <- names(altParams) + distCallText <- deparse(distCall) + + reqdArgs <- dist$reqdArgs ## these are canonical + exprs <- dist$exprs + alts <- dist$alts + providedArgs <- names(distCall) + providedArgs <- providedArgs[providedArgs != ""] + whichExpr <- NULL + ## figure out which way arguments were provided in distCall + for(i in seq_along(exprs)) { + if(all(providedArgs %in% alts[[i]])) whichExpr <- i + } + if(is.null(whichExpr)) { + if(all(providedArgs %in% reqdArgs)) whichExpr <- 0 + } + expect_equal(is.null(whichExpr), FALSE, 'args not found') + + + ## exprs give expressions for calculating reqdArgs from alts + + ## altParams give expressions for calculating individual alts from reqdArgs + + ## put reqd in evalEnv, which means using exprs for the alts as needed + ## if testing on something provided, grab what was provided. + ## if testing on something not provided, if it is reqd then use it directly + ## otherwise calculate it from altParams + + evalEnv <- new.env() + + for(i in seq_along(distCall)) { + if(names(distCall)[i] != "") assign(names(distCall)[i], distCall[[i]], envir = evalEnv) + } + if(whichExpr > 0) { ## what was provided was not canonical + for(i in seq_along(exprs[[whichExpr]])) { + assign(names(exprs[[whichExpr]])[i], eval(exprs[[whichExpr]][[i]], envir = evalEnv), envir = evalEnv) + } + } + + ## check recovery of alternative param names from what was provided + for(i in seq_along(altParamNames)) { + gpFuns[[i]] <- gpScalar(m, 'x', altParamNames[i]) + if(altParamNames[i] %in% providedArgs) ## it was provided so simply eval the name + expectedResults[[i]] <- eval(as.name(altParamNames[i]), envir = evalEnv) + else ## it wasn't provided so eval the expression to calculate it from reqdArgs + expectedResults[[i]] <- eval(altParams[[i]], envir = evalEnv) + expect_equal(gpFuns[[i]]$run(), expectedResults[[i]], info = paste('error in uncompiled use', + altParamNames[i])) + expect_equal(m$getParam('x', altParamNames[i]), expectedResults[[i]], + info = paste('error in R getParam', altParamNames[i])) + expect_equal(cm$getParam('x', altParamNames[i]), expectedResults[[i]], + info = paste('error in C getParam', altParamNames[i])) + } + + resultsNames <- altParamNames + nextI <- length(expectedResults)+1 + for(i in seq_along(reqdArgs)) { + gpFuns[[nextI]] <- gpScalar(m, 'x', reqdArgs[i]) + expectedResults[[nextI]] <- eval(as.name(reqdArgs[i]), envir = evalEnv) ## it was already calculated into evalEnv above + expect_equal(gpFuns[[nextI]]$run(), expectedResults[[nextI]], + info = paste('error in uncompiled reqd', reqdArgs[i])) + expect_equal(m$getParam('x', reqdArgs[i]), expectedResults[[nextI]], + info = paste('error in R model reqd', reqdArgs[i])) + expect_equal(cm$getParam('x', reqdArgs[i]), expectedResults[[nextI]], + info = paste('error in C model reqd', reqdArgs[i])) + resultsNames[nextI] <- reqdArgs[i] + nextI <- nextI + 1 + } + + compiled <- do.call('compileNimble', c(list(m), gpFuns, list(resetFunctions = TRUE))) + for(i in seq_along(expectedResults)) { + expect_equal(compiled[[i+1]]$run(), expectedResults[[i]], + info = paste('error in compiled', resultsNames[i])) + } + if(.Platform$OS.type != 'windows') nimble:::clearCompiled(m) + }) + invisible(NULL) +} + + +test_getBound <- function(model, cmodel, test, node, bnd, truth, info) { + test_that(paste0("getBound test: ", info), { + rtest <- test(model, node, bnd) + project <- nimble:::nimbleProjectClass(NULL, name = 'foo') + ctest <- compileNimble(rtest, project = project) + + out1 <- model$getBound(node, bnd) + out2 <- getBound(model, node, bnd) + out3 <- cmodel$getBound(node, bnd) + out4 <- getBound(cmodel, node, bnd) + nfOutput <- rtest$run() + cnfOutput <- ctest$run() + + expect_equal(truth, out1, info = paste0("mismatch of true bound with getBound result: ", info)) + expect_equal(out1, out2, info = paste0("function vs. method getBound call mismatch for uncompiled model with: ", info)) + expect_equal(out1, out2, info = paste0("function vs. method getBound call mismatch for compiled model with: ", info)) + expect_equal(out1, out3, info = paste0("uncompiled vs. compiled getBound call mismatch with: ", info)) + expect_equal(out1, nfOutput[1], info = paste0("direct vs. nimbleFunction getBound call mismatch for uncompiled model with: ", info)) + expect_equal(nfOutput[1], nfOutput[2], info = paste0("function vs. method getBound call mismatch for uncompiled nimbleFunction with: ", info)) + expect_equal(out3, cnfOutput[1], info = paste0("direct vs. nimbleFunction getBound call mismatch for compiled model with: ", info)) + expect_equal(cnfOutput[1], cnfOutput[2], info = paste0("function vs. method getBound call mismatch for compiled nimbleFunction with: ", info)) + # if(.Platform$OS.type != 'windows') dyn.unload(project$cppProjects[[1]]$getSOName()) + }) + invisible(NULL) +} + +## Nick's version of a nf that embeds deriv of model calculate +testCompiledModelDerivsNimFxn <- nimbleFunction( + setup = function(model, calcNodes, wrtNodes, order){ + }, + run = function(){ + ansList <- nimDerivs(model$calculate(calcNodes), wrt = wrtNodes, order = order) + returnType(ADNimbleList()) + return(ansList) + } +) + +## Chris' version of a nf that embeds deriv of model calculate +derivsNimbleFunction <- nimbleFunction( + setup = function(model, calcNodes, wrt) {}, + run = function(x = double(1), + order = double(1), + reset = logical(0, default = FALSE)) { + values(model, wrt) <<- x + ans <- nimDerivs(model$calculate(calcNodes), wrt = wrt, order = order, reset = reset) + return(ans) + returnType(ADNimbleList()) + } ## don't need buildDerivs if call nimDerivs directly, but would if just have model$calc in nf +) + +## nf for double-taping +derivsNimbleFunctionMeta <- nimbleFunction( + setup = function(model, calcNodes, wrt, reset = FALSE) { + innerWrtVec <- seq_along(model$expandNodeNames(wrt, returnScalarComponents = TRUE)) + d <- length(innerWrtVec) + derivsInfo <- makeModelDerivsInfo(model, wrt, calcNodes) + updateNodes <- derivsInfo$updateNodes + constantNodes <- derivsInfo$constantNodes + }, + run = function(x = double(1)) { + values(model, wrt) <<- x + ans <- model$calculate(calcNodes) + return(ans) + returnType(double()) + }, + methods = list( + ## inner first-order deriv + derivs1Run = function(x = double(1)) { + ans <- nimDerivs(run(x), wrt = innerWrtVec, order = 1, reset = reset, + updateNodes = updateNodes, constantNodes = constantNodes, model = model) + return(ans$jacobian[1,]) + returnType(double(1)) + }, + ## inner second-order deriv + derivs2Run = function(x = double(1)) { + ans <- nimDerivs(run(x), wrt = innerWrtVec, order = 2, reset = reset, + updateNodes = updateNodes, constantNodes = constantNodes, model = model) + ## not clear why can't do ans$hessian[,,1] with double(2) returnType + return(ans$hessian) + returnType(double(3)) + }, + ## outer arbitrary-order deriv calling inner first order + metaDerivs1Run = function(x = double(1), + order = double(1), + reset = logical(0, default = FALSE)) { + wrtVec <- 1:length(x) + if(length(wrtVec) != d) stop("inner and outer wrt mismatch") + ans <- nimDerivs(derivs1Run(x), wrt = wrtVec, order = order, reset = reset, + updateNodes = updateNodes, constantNodes = constantNodes, model = model) + return(ans) + returnType(ADNimbleList()) + }, + ## outer arbitrary-order deriv calling inner second order + metaDerivs2Run = function(x = double(1), + order = double(1), + reset = logical(0, default = FALSE)) { + wrtVec <- 1:length(x) + if(length(wrtVec) != d) stop("inner and outer wrt mismatch") + ans <- nimDerivs(derivs2Run(x), wrt = wrtVec, order = order, reset = reset, + updateNodes = updateNodes, constantNodes = constantNodes, model = model) + return(ans) + returnType(ADNimbleList()) + } + ), + buildDerivs = list(run = list(), + derivs1Run = list(), + derivs2Run = list()) +) + + +derivsNimbleFunctionParamTransform <- nimbleFunction( + setup = function(model, calcNodes, wrt) { + wrtNodesAsScalars <- model$expandNodeNames(wrt, returnScalarComponents = TRUE) + my_parameterTransform <- parameterTransform(model, wrtNodesAsScalars) + d <- my_parameterTransform$getTransformedLength() + nimDerivs_wrt <- 1:d + derivsInfo <- makeModelDerivsInfo(model, wrt, calcNodes) + updateNodes <- derivsInfo$updateNodes + constantNodes <- derivsInfo$constantNodes + }, + run = function(x = double(1), + order = double(1), + reset = logical(0, default = FALSE)) { + transformed_x <- my_parameterTransform$transform(x) # transform(x) + ans <- nimDerivs(inverseTransformStoreCalculate(transformed_x), order = order, wrt = nimDerivs_wrt, + model = model, updateNodes = updateNodes, + constantNodes = constantNodes, reset = reset) + + return(ans) + returnType(ADNimbleList()) + }, + methods = list( + inverseTransformStoreCalculate = function(transformed_x = double(1)) { + values(model, wrt) <<- my_parameterTransform$inverseTransform(transformed_x) + lp <- model$calculate(calcNodes) + returnType(double()) + return(lp) + } + ), buildDerivs = 'inverseTransformStoreCalculate' +) + + +derivsNimbleFunctionParamTransformMeta <- nimbleFunction( + setup = function(model, calcNodes, wrt, reset = FALSE) { + wrtNodesAsScalars <- model$expandNodeNames(wrt, returnScalarComponents = TRUE) + my_parameterTransform <- parameterTransform(model, wrtNodesAsScalars) + d <- my_parameterTransform$getTransformedLength() + nimDerivs_wrt <- 1:d + derivsInfo <- makeModelDerivsInfo(model, wrt, calcNodes) + updateNodes <- derivsInfo$updateNodes + constantNodes <- derivsInfo$constantNodes + }, + ## formerly inverseTransformStoreCalculate + run = function(transformed_x = double(1)) { + values(model, wrt) <<- my_parameterTransform$inverseTransform(transformed_x) + lp <- model$calculate(calcNodes) + returnType(double()) + return(lp) + }, + methods = list( + derivs1Run = function(transformed_x = double(1)) { + if(length(transformed_x) != d) stop("mismatch of x and wrtVec") + ans <- nimDerivs(run(transformed_x), wrt = nimDerivs_wrt, order = 1, reset = reset, + updateNodes = updateNodes, constantNodes = constantNodes, model = model) + return(ans$jacobian[1,]) + returnType(double(1)) + }, + derivs2Run = function(transformed_x = double(1)) { + if(length(transformed_x) != d) stop("mismatch of x and wrtVec") + ans <- nimDerivs(run(transformed_x), wrt = nimDerivs_wrt, order = 2, reset = reset, + updateNodes = updateNodes, constantNodes = constantNodes, model = model) + ## not clear why can't do ans$hessian[,,1] with double(2) returnType + return(ans$hessian) + returnType(double(3)) + }, + metaDerivs1Run = function(x = double(1), + order = double(1), + reset = logical(0, default = FALSE)) { + transformed_x <- my_parameterTransform$transform(x) + if(length(transformed_x) != d) stop("mismatch of x and wrtVec") + ans <- nimDerivs(derivs1Run(transformed_x), wrt = nimDerivs_wrt, order = order, reset = reset, + updateNodes = updateNodes, constantNodes = constantNodes, model = model) + return(ans) + returnType(ADNimbleList()) + }, + ## outer arbitrary-order deriv calling inner second order + metaDerivs2Run = function(x = double(1), + order = double(1), + reset = logical(0, default = FALSE)) { + transformed_x <- my_parameterTransform$transform(x) + if(length(transformed_x) != d) stop("mismatch of x and wrtVec") + ans <- nimDerivs(derivs2Run(transformed_x), wrt = nimDerivs_wrt, order = order, reset = reset, + updateNodes = updateNodes, constantNodes = constantNodes, model = model) + return(ans) + returnType(ADNimbleList()) + } + ), + buildDerivs = list(run = list(), + derivs1Run = list(), + derivs2Run = list()) +) + + + + +## For use with R-based derivative of compiled model calculate when useFasterRderivs is TRUE. +calcNodesForDerivs <- nimbleFunction( + setup = function(model, calcNodes, wrt){ + }, + run = function(x = double(1)){ + values(model, wrt) <<- x + ans <- calculate(model, calcNodes) + return(ans) + returnType(double()) + }, + buildDerivs = 'run') + + +## Tests taking derivatives of calls to model$calculate(nodes) (or equivalently calculate(model, nodes)) +## Arguments: +## model: The uncompiled nimbleModel object to use in the call to calculate(model, nodes). +## name: The name of the model being tested. +## calcNodeNames: A list, each element of which should be a character vector. List elements +## will be iterated through, and each element will be used as the 'nodes' argument +## in the call to calculate(model, nodes). +## wrt: A list, each element of which should be a character vector. List elements will be iterated +## through, and each element will be used as the 'wrt' argument in a call to nimDerivs(calculate(model, nodes), wrt) +## testR: A logical argument. If TRUE, the R version of nimDerivs will be checked for correct derivative calculations. +## This is accomplished by comparing derivatives calculated using the chain rule to derivatives of a function that +## wraps a call to calculate(model, nodes). +## testCompiled: A logical argument. Currently only checks whether the model can compile. +## tolerance: A numeric argument, the tolerance to use when comparing wrapperDerivs to chainRuleDerivs. +## verbose: A logical argument. Currently serves no purpose. +test_ADModelCalculate_nick <- function(model, name = NULL, calcNodeNames = NULL, wrt = NULL, order = c(0,1,2), + testCompiled = TRUE, tolerance = .001, verbose = TRUE, gc = FALSE){ + temporarilyAssignInGlobalEnv(model) + + if(testCompiled){ + expect_message(cModel <- compileNimble(model)) + } + for(i in seq_along(calcNodeNames)){ + for(j in seq_along(wrt)){ + test_that(paste('R derivs of calculate function work for model', name, ', for calcNodes ', i, + 'and wrt ', j), { + wrapperDerivs <- nimDerivs(model$calculate(calcNodeNames[[i]]), wrt = wrt[[j]], order = order) + if(testCompiled){ + print(calcNodeNames[[i]]) + print(wrt[[j]]) + testFunctionInstance <- testCompiledModelDerivsNimFxn(model, calcNodeNames[[i]], wrt[[j]], order) + if(gc) gc() + expect_message(ctestFunctionInstance <- compileNimble(testFunctionInstance, project = model, resetFunctions = TRUE)) + if(gc) gc() + cDerivs <- ctestFunctionInstance$run() + if(0 %in% order) expect_equal(wrapperDerivs$value, cDerivs$value, tolerance = tolerance) + if(1 %in% order) expect_equal(wrapperDerivs$jacobian, cDerivs$jacobian, tolerance = tolerance) + if(2 %in% order) expect_equal(wrapperDerivs$hessian, cDerivs$hessian, tolerance = tolerance) + } + }) + } + } +} + +## Chris' version of test_ADModelCalculate +## By default test a standardized set of {wrt, calcNodes} pairs representing common use cases (MAP, max lik, EB), +## unless user provides 'wrt' and 'calcNodes'. +test_ADModelCalculate <- function(model, name = 'unknown', x = 'given', xNew = NULL, calcNodes = NULL, wrt = NULL, + newUpdateNodes = NULL, newConstantNodes = NULL, + relTol = c(1e-15, 1e-8, 1e-3, 1e-3), absTolThreshold = 0, useFasterRderivs = FALSE, useParamTransform = FALSE, + checkDoubleTape = TRUE, checkCompiledValuesIdentical = TRUE, checkDoubleUncHessian = TRUE, + doAllUncHessian = TRUE, seed = 1, verbose = FALSE, debug = FALSE){ + if(!is.null(seed)) + set.seed(seed) + initsHandling <- x + xNewIn <- xNew + ## Save model state so can restore for later use cases below. + mv <- modelValues(model) + nodes <- model$getNodeNames() + nimCopy(model, mv, nodes, nodes, rowTo = 1, logProb = TRUE) + + if(is.null(wrt) && is.null(calcNodes)) { + + ## HMC/MAP use case + if(verbose) cat("============================================\ntesting HMC/MAP-based scenario\n--------------------------------------------\n") + calcNodes <- model$getNodeNames() + wrt <- model$getNodeNames(stochOnly = TRUE, includeData = FALSE) + tmp <- values(model, wrt) + ## Hopefully values in (0,1) will always be legitimate for our test models. + if(initsHandling %in% c('given','prior')) { + x <- tmp + } else if(initsHandling == 'random') { + x <- runif(length(tmp)) + } + if(initsHandling == 'prior') { + model$simulate(wrt) + xNew <- values(model, wrt) + nimCopy(mv, model, nodes, nodes, row = 1, logProb = TRUE) + } else xNew <- runif(length(tmp)) + if(!is.null(xNewIn)) { + wrtScalars <- model$expandNodeNames(wrt, returnScalarComponents = TRUE) + for(nm in names(xNewIn)) { + wh <- grep(paste0("(^",nm,"$)|(^",nm,"\\[)"), wrtScalars) + xNew[wh] <- c(xNewIn[[nm]]) + } + } + try(test_ADModelCalculate_internal(model, name = name, x = x, xNew = xNew, calcNodes = calcNodes, wrt = wrt, + newUpdateNodes = newUpdateNodes, newConstantNodes = newConstantNodes, + savedMV = mv, relTol = relTol, absTolThreshold = absTolThreshold, + useFasterRderivs = useFasterRderivs, useParamTransform = useParamTransform, + checkDoubleTape = checkDoubleTape, + checkCompiledValuesIdentical = checkCompiledValuesIdentical, + checkDoubleUncHessian = checkDoubleUncHessian, doAllUncHessian = doAllUncHessian, + verbose = verbose, debug = debug)) + ## max. lik. use case + if(!is.null(seed)) ## There has weirdness where whether a test fails or passes modifies the RNG state, affecting the next call to test_ADModelCalculate_internal. + set.seed(seed+1) + if(verbose) cat("============================================\ntesting ML-based scenario\n--------------------------------------------\n") + nimCopy(mv, model, nodes, nodes, row = 1, logProb = TRUE) + calcNodes <- model$getNodeNames() + topNodes <- model$getNodeNames(topOnly = TRUE, stochOnly = TRUE) + latentNodes <- model$getNodeNames(latentOnly = TRUE, stochOnly = TRUE, includeData = FALSE) + calcNodes <- calcNodes[!calcNodes %in% c(topNodes, latentNodes)] # should be data + deterministic + wrt <- model$getNodeNames(stochOnly = TRUE, includeData = FALSE) # will include hyps if present, but derivs wrt those should be zero + tmp <- values(model, wrt) + if(initsHandling %in% c('given','prior')) { + x <- tmp + } else if(initsHandling == 'random') { + x <- runif(length(tmp)) + } + if(initsHandling == 'prior') { + model$simulate(wrt) + xNew <- values(model, wrt) + nimCopy(mv, model, nodes, nodes, row = 1, logProb = TRUE) + } else xNew <- runif(length(tmp)) + if(!is.null(xNewIn)) { + wrtScalars <- model$expandNodeNames(wrt, returnScalarComponents = TRUE) + for(nm in names(xNewIn)) { + wh <- grep(paste0("(^",nm,"$)|(^",nm,"\\[)"), wrtScalars) + xNew[wh] <- c(xNewIn[[nm]]) + } + } + try(test_ADModelCalculate_internal(model, name = name, x = x, xNew = xNew, calcNodes = calcNodes, wrt = wrt, + newUpdateNodes = newUpdateNodes, newConstantNodes = newConstantNodes, + savedMV =mv, relTol = relTol, absTolThreshold = absTolThreshold, + useFasterRderivs = useFasterRderivs, useParamTransform = useParamTransform, + checkDoubleTape = checkDoubleTape, + checkCompiledValuesIdentical = checkCompiledValuesIdentical, + checkDoubleUncHessian = checkDoubleUncHessian, doAllUncHessian = doAllUncHessian, + verbose = verbose, debug = debug)) + + if(!is.null(seed)) ## There has weirdness where whether a test fails or passes modifies the RNG state, affecting the next call to test_ADModelCalculate_internal. + set.seed(seed+2) + ## modular HMC/MAP use case + if(verbose) cat("============================================\ntesting HMC/MAP partial-based scenario\n--------------------------------------------\n") + nimCopy(mv, model, nodes, nodes, row = 1, logProb = TRUE) + calcNodes <- model$getNodeNames() + wrt <- model$getNodeNames(stochOnly = TRUE, includeData = FALSE) + wrtIdx <- sample(seq_along(wrt), round(length(wrt)/2), replace = FALSE) + ## sample full wrt in case there are constraints built in, then subset wrt + if(!length(wrtIdx)) + wrtIdx <- seq_along(wrt) + tmp <- values(model, wrt) + if(initsHandling %in% c('given','prior')) { + x <- tmp + } else if(initsHandling == 'random') { + x <- runif(length(tmp)) + } + if(initsHandling == 'prior') { + model$simulate(wrt) + xNew <- values(model, wrt) + } else xNew <- runif(length(tmp)) + if(!is.null(xNewIn)) { + wrtScalars <- model$expandNodeNames(wrt, returnScalarComponents = TRUE) + for(nm in names(xNewIn)) { + wh <- grep(paste0("(^",nm,"$)|(^",nm,"\\[)"), wrtScalars) + xNew[wh] <- c(xNewIn[[nm]]) + } + } + wrtSub <- wrt[wrtIdx] + ## get correct subset of x, xNew + values(model, wrt) <- x + x <- values(model, wrtSub) + values(model, wrt) <- xNew + xNew <- values(model, wrtSub) + nimCopy(mv, model, nodes, nodes, row = 1, logProb = TRUE) + try(test_ADModelCalculate_internal(model, name = name, x = x, xNew = xNew, calcNodes = calcNodes, wrt = wrtSub, + newUpdateNodes = newUpdateNodes, newConstantNodes = newConstantNodes, + savedMV = mv, relTol = relTol, absTolThreshold = absTolThreshold, + useFasterRderivs = useFasterRderivs, useParamTransform = useParamTransform, + checkDoubleTape = checkDoubleTape, + checkCompiledValuesIdentical = checkCompiledValuesIdentical, + checkDoubleUncHessian = checkDoubleUncHessian, doAllUncHessian = doAllUncHessian, + verbose = verbose, debug = debug)) + + if(!is.null(seed)) ## There has weirdness where whether a test fails or passes modifies the RNG state, affecting the next call to test_ADModelCalculate_internal. + set.seed(seed+3) + ## conditional max. lik. use case + if(verbose) cat("============================================\ntesting ML partial-based scenario\n--------------------------------------------\n") + nimCopy(mv, model, nodes, nodes, row = 1, logProb = TRUE) + calcNodes <- model$getNodeNames() + topNodes <- model$getNodeNames(topOnly = TRUE, stochOnly = TRUE) + latentNodes <- model$getNodeNames(latentOnly = TRUE, stochOnly = TRUE, includeData = FALSE) + calcNodes <- calcNodes[!calcNodes %in% c(topNodes, latentNodes)] # should be data + deterministic + wrt <- model$getNodeNames(stochOnly = TRUE, includeData = FALSE) + wrtIdx <- sample(seq_along(wrt), round(length(wrt)/2), replace = FALSE) + ## sample full wrt in case there are constraints built in, then subset wrt + if(!length(wrtIdx)) + wrtIdx <- seq_along(wrt) + tmp <- values(model, wrt) + if(initsHandling %in% c('given','prior')) { + x <- tmp + } else if(initsHandling == 'random') { + x <- runif(length(tmp)) + } + if(initsHandling == 'prior') { + model$simulate(wrt) + xNew <- values(model, wrt) + } else xNew <- runif(length(tmp)) + if(!is.null(xNewIn)) { + wrtScalars <- model$expandNodeNames(wrt, returnScalarComponents = TRUE) + for(nm in names(xNewIn)) { + wh <- grep(paste0("(^",nm,"$)|(^",nm,"\\[)"), wrtScalars) + xNew[wh] <- c(xNewIn[[nm]]) + } + } + wrtSub <- wrt[wrtIdx] + ## get correct subset of x, xNew + values(model, wrt) <- x + x <- values(model, wrtSub) + values(model, wrt) <- xNew + xNew <- values(model, wrtSub) + nimCopy(mv, model, nodes, nodes, row = 1, logProb = TRUE) + try(test_ADModelCalculate_internal(model, name = name, x = x, xNew = xNew, calcNodes = calcNodes, wrt = wrtSub, + newUpdateNodes = newUpdateNodes, newConstantNodes = newConstantNodes, + savedMV = mv, relTol = relTol, absTolThreshold = absTolThreshold, + useFasterRderivs = useFasterRderivs, useParamTransform = useParamTransform, + checkDoubleTape = checkDoubleTape, + checkCompiledValuesIdentical = checkCompiledValuesIdentical, + checkDoubleUncHessian = checkDoubleUncHessian, doAllUncHessian = doAllUncHessian, + verbose = verbose, debug = debug)) + + if(!is.null(seed)) ## There has weirdness where whether a test fails or passes modifies the RNG state, affecting the next call to test_ADModelCalculate_internal. + set.seed(seed+4) + ## empirical Bayes use case (though not actually integrating over any latent nodes) + if(verbose) cat('============================================\ntesting EB-based scenario\n--------------------------------------------\n') + nimCopy(mv, model, nodes, nodes, row = 1, logProb = TRUE) + calcNodes <- model$getNodeNames() + topNodes <- model$getNodeNames(topOnly = TRUE, stochOnly = TRUE) + calcNodes <- calcNodes[!calcNodes %in% topNodes] # EB doesn't use hyperpriors + wrt <- model$getNodeNames(stochOnly = TRUE, includeData = FALSE) + tmp <- values(model, wrt) + if(initsHandling %in% c('given','prior')) { + x <- tmp + } else if(initsHandling == 'random') { + x <- runif(length(tmp)) + } + if(initsHandling == 'prior') { + model$simulate(wrt) + xNew <- values(model, wrt) + nimCopy(mv, model, nodes, nodes, row = 1, logProb = TRUE) + } else xNew <- runif(length(tmp)) + if(!is.null(xNewIn)) { + wrtScalars <- model$expandNodeNames(wrt, returnScalarComponents = TRUE) + for(nm in names(xNewIn)) { + wh <- grep(paste0("(^",nm,"$)|(^",nm,"\\[)"), wrtScalars) + xNew[wh] <- c(xNewIn[[nm]]) + } + } + try(test_ADModelCalculate_internal(model, name = name, x = x, xNew = xNew, calcNodes = calcNodes, wrt = wrt, + newUpdateNodes = newUpdateNodes, newConstantNodes = newConstantNodes, + savedMV = mv, relTol = relTol, absTolThreshold = absTolThreshold, + useFasterRderivs = useFasterRderivs, useParamTransform = useParamTransform, + checkDoubleTape = checkDoubleTape, + checkCompiledValuesIdentical = checkCompiledValuesIdentical, + checkDoubleUncHessian = checkDoubleUncHessian, doAllUncHessian = doAllUncHessian, + verbose = verbose, debug = debug)) + } else { + if(is.null(calcNodes)) calcNodes <- model$getNodeNames() + if(is.null(wrt)) wrt <- model$getNodeNames(stochOnly = TRUE, includeData = FALSE) + ## Apply test to user-provided sets of nodes + tmp <- values(model, wrt) + if(initsHandling %in% c('given','prior')) { + x <- tmp + } else if(initsHandling == 'random') { + x <- runif(length(tmp)) + } + if(initsHandling == 'prior') { + model$simulate(wrt) + xNew <- values(model, wrt) + nimCopy(mv, model, nodes, nodes, row = 1, logProb = TRUE) + } else xNew <- runif(length(tmp)) + if(!is.null(xNewIn)) { + wrtScalars <- model$expandNodeNames(wrt, returnScalarComponents = TRUE) + for(nm in names(xNewIn)) { + wh <- grep(paste0("(^",nm,"$)|(^",nm,"\\[)"), wrtScalars) + xNew[wh] <- c(xNewIn[[nm]]) + } + } + try(test_ADModelCalculate_internal(model, name = name, x = x, xNew = xNew, calcNodes = calcNodes, wrt = wrt, + newUpdateNodes = newUpdateNodes, newConstantNodes = newConstantNodes, + relTol = relTol, absTolThreshold = absTolThreshold, + useFasterRderivs = useFasterRderivs, useParamTransform = useParamTransform, + checkDoubleTape = checkDoubleTape, + checkCompiledValuesIdentical = checkCompiledValuesIdentical, + checkDoubleUncHessian = checkDoubleUncHessian, doAllUncHessian = doAllUncHessian, + verbose = verbose, debug = debug)) + } +} + + +## This does the core assessment, by default running with various sets of order values to be able to assess +## forward and backward mode and to assess whether values in the model are updated. +test_ADModelCalculate_internal <- function(model, name = 'unknown', xOrig = NULL, xNew = NULL, + calcNodes = NULL, wrt = NULL, savedMV = NULL, + newUpdateNodes = NULL, newConstantNodes = NULL, + relTol = c(1e-15, 1e-8, 1e-3, 1e-3), absTolThreshold = 0, useFasterRderivs = FALSE, + useParamTransform = FALSE, checkDoubleTape = TRUE, + checkCompiledValuesIdentical = TRUE, checkDoubleUncHessian = TRUE, + doAllUncHessian = TRUE, + verbose = FALSE, debug = FALSE){ + + saved_edition <- edition_get() + local_edition(3) + on.exit(local_edition(saved_edition)) + + test_that(paste0("Derivatives of calculate for model ", name), { + if(exists('paciorek') && paciorek == 0) browser() + if(is.null(calcNodes)) + calcNodes <- model$getNodeNames() + + nodes <- model$getNodeNames() + if(!(exists('CobjectInterface', model) && !is(model$CobjectInterface, 'uninitializedField'))) { + cModel <- compileNimble(model) + } else { + cModel <- eval(quote(model$CobjectInterface)) + nimCopy(savedMV, cModel, nodes, nodes, row = 1, logProb = TRUE) + } + tmpMV <- modelValues(model) + + if(is.null(wrt)) wrt <- calcNodes + wrt <- model$expandNodeNames(wrt, unique = FALSE) + discrete <- sapply(wrt, function(x) model$isDiscrete(x)) + wrt <- wrt[is.na(discrete) | !discrete] # NAs from deterministic nodes, which in most cases should be continuous + + otherNodes <- model$getNodeNames() + otherNodes <- otherNodes[!otherNodes %in% wrt] + + checkSquareMatrix <- function(node) { + val <- model[[node]] + d <- dim(val) + if(!is.null(d) && length(d) == 2 && d[1] == d[2]) return(TRUE) else return(FALSE) + } + + derivsInfo <- makeModelDerivsInfo(model, wrt, calcNodes) + updateNodes <- derivsInfo$updateNodes + constantNodes <- derivsInfo$constantNodes + + updateNodesDeps <- model$getDependencies(updateNodes) + constantNodesDeps <- model$getDependencies(constantNodes) + + if(useParamTransform) { + rDerivs <- derivsNimbleFunctionParamTransform(model, calcNodes = calcNodes, wrt = wrt) + } else rDerivs <- derivsNimbleFunction(model, calcNodes = calcNodes, wrt = wrt) + + ## Seem to need resetFunctions because of use of my_parameterTransform twice in paramTransform case. + cDerivs <- compileNimble(rDerivs, project = model, resetFunctions = TRUE) + + if(checkDoubleTape) { + if(useParamTransform) { + rDerivsMeta <- derivsNimbleFunctionParamTransformMeta(model, calcNodes = calcNodes, wrt = wrt) + rDerivsMetaReset <- derivsNimbleFunctionParamTransformMeta(model, calcNodes = calcNodes, wrt = wrt, reset = TRUE) + } else { + rDerivsMeta <- derivsNimbleFunctionMeta(model, calcNodes = calcNodes, wrt = wrt) + rDerivsMetaReset <- derivsNimbleFunctionMeta(model, calcNodes = calcNodes, wrt = wrt, reset = TRUE) + } + cDerivsMeta <- compileNimble(rDerivsMeta, project = model, resetFunctions = TRUE) + cDerivsMetaReset <- compileNimble(rDerivsMetaReset, project = model, resetFunctions = TRUE) + } + + if(useFasterRderivs) { + ## Set up a nf so R derivs use a model calculate that is done fully in compiled code (cModel$calculate loops over nodes in R) + if(useParamTransform) { + ## Need wrapper so that we are calling nimDerivs on a function call and not a nf method + wrapper <- function(x) { + cDerivs$inverseTransformStoreCalculate(x) + } + } else { + rCalcNodes <- calcNodesForDerivs(model, calcNodes = calcNodes, wrt = wrt) + cCalcNodes <- compileNimble(rCalcNodes, project = model) + + ## temporarilyAssignInGlobalEnv(cCalcNodes) + + ## Need wrapper so that we are calling nimDerivs on a function call and not a nf method + wrapper <- function(x) { + cCalcNodes$run(x) + } + } + if(checkDoubleTape) { + wrapperMeta1 <- function(x) { + ans <- nimDerivs(wrapper(x), order = 1, reset = FALSE) + return(ans$jacobian[1, ]) + } + wrapperMeta1Reset <- function(x) { + ans <- nimDerivs(wrapper(x), order = 1, reset = TRUE) + return(ans$jacobian[1, ]) + } + wrapperMeta2 <- function(x) { + ans <- nimDerivs(wrapper(x), order = 2, reset = FALSE) + return(ans$hessian) + } + wrapperMeta2Reset <- function(x) { + ans <- nimDerivs(wrapper(x), order = 2, reset = TRUE) + return(ans$hessian) + } + } + } + + xList <- list(xOrig) + if(!is.null(xNew)) { + xList[[2]] <- xNew + xList[[3]] <- xNew + } + + for(case in 1:2) { + for(idx in seq_along(xList)) { + if(exists('paciorek') && paciorek == idx) browser() + if(verbose) { + if(case == 1) { + if(idx == 1) { + cat("Testing initial wrt values with initial constantNodes\n") + cat(" Using wrt: ", wrt, "\n") + } + if(idx == 2) cat("Testing new wrt values with initial constantNodes\n") + if(idx == 3 && length(updateNodes)) { + cat("Testing new updateNode values with initial constantNodes\n") + cat(" Using updateNodes: ", updateNodes, "\n") + } + } else { + if(idx == 1 && length(constantNodes)) { + cat("Testing initial wrt values with new constantNodes\n") + cat(" Using constantNodes: ", constantNodes, "\n") + } + if(idx == 2 && length(constantNodes)) cat("Testing new wrt values with new constantNodes\n") + if(idx == 3 && length(constantNodes) && length(updateNodes)) cat("Testing new updateNode values with new constantNodes\n") + } + } + + if(idx == 3) { + if(length(updateNodes)) { + values(model, updateNodes) <- runif(length(updateNodes)) + values(cModel, updateNodes) <- values(model, updateNodes) + ## Overwrite for tricky cases (p.d. matrices, integers, etc.) + if(!is.null(newUpdateNodes)) { + for(nm in names(newUpdateNodes)) { + ## Expansion is needed so that fill by element, which retains dimensionality in uncompiled model. + values(model, model$expandNodeNames(nm, returnScalarComponents = TRUE)) <- newUpdateNodes[[nm]] + values(cModel, nm) <- values(model, nm) + } + } + model$calculate(updateNodesDeps) + cModel$calculate(updateNodesDeps) + } else next + } + + reset <- FALSE + if(case == 2) { + reset <- TRUE + if(length(constantNodes)) { + values(model, constantNodes) <- runif(length(constantNodes)) + values(cModel, constantNodes) <- values(model, constantNodes) + + if(!is.null(newConstantNodes)) { + for(nm in names(newConstantNodes)) { + ## Expansion is needed so that fill by element, which retains dimensionality in uncompiled model. + values(model, model$expandNodeNames(nm, returnScalarComponents = TRUE)) <- newConstantNodes[[nm]] + values(cModel, nm) <- values(model, nm) + } + } + model$calculate(constantNodesDeps) + cModel$calculate(constantNodesDeps) + } else next + } + + x <- xList[[idx]] + + nimCopy(model, tmpMV, nodes, nodes, row = 1, logProb = TRUE) + ## Ensure both models are consistent. + nimCopy(tmpMV, cModel, nodes, nodes, row = 1, logProb = TRUE) + + rWrt_orig <- cWrt_orig <- values(model, wrt) + + ## Store current logProb and non-wrt values to check that order=c(1,2) doesn't change them. + ## Don't calculate model as want to assess possibility model is out-of-state. + ## model$calculate() + ## cModel$calculate() + + rLogProb_orig <- cLogProb_orig <- model$getLogProb(calcNodes) + rVals_orig <- cVals_orig <- values(model, otherNodes) + + if(useFasterRderivs) { + inputx <- x + if(useParamTransform) + inputx <- rDerivs$my_parameterTransform$transform(x) + + + rOutput012 <- nimDerivs(wrapper(inputx), order = 0:2, reset = reset) + rVals012 <- values(cModel, otherNodes) + rLogProb012 <- cModel$getLogProb(calcNodes) + rWrt012 <- values(cModel, wrt) + + nimCopy(tmpMV, cModel, nodes, nodes, row = 1, logProb = TRUE) + rOutput01 <- nimDerivs(wrapper(inputx), order = 0:1, reset = reset) + rLogProb01 <- cModel$getLogProb(calcNodes) + rVals01 <- values(cModel, otherNodes) + rWrt01 <- values(cModel, wrt) + + if(doAllUncHessian) { + nimCopy(tmpMV, cModel, nodes, nodes, row = 1, logProb = TRUE) + rOutput12 <- nimDerivs(wrapper(inputx), order = 1:2, model = cModel, reset = reset) + rVals12 <- values(cModel, otherNodes) + rLogProb12 <- cModel$getLogProb(calcNodes) + rWrt12 <- values(cModel, wrt) + + nimCopy(tmpMV, cModel, nodes, nodes, row = 1, logProb = TRUE) + rOutput02 <- nimDerivs(wrapper(inputx), order = c(0,2), reset = reset) + rVals02 <- values(cModel, otherNodes) + rLogProb02 <- cModel$getLogProb(calcNodes) + rWrt02 <- values(cModel, wrt) + } + + if(checkDoubleTape) { + ## Note that because inner deriv is order 1 or 2, don't expect model to be updated, + ## so need to do this before 01, 012 cases below. + nimCopy(tmpMV, cModel, nodes, nodes, row = 1, logProb = TRUE) + if(reset) { + rOutput1d <- nimDerivs(wrapperMeta1Reset(inputx), order = 0, model = cModel, reset = reset) + } else rOutput1d <- nimDerivs(wrapperMeta1(inputx), order = 0, model = cModel, reset = reset) + rVals1d <- values(cModel, otherNodes) + rLogProb1d <- cModel$getLogProb(calcNodes) + rWrt1d <- values(cModel, wrt) + + if(doAllUncHessian) { + nimCopy(tmpMV, cModel, nodes, nodes, row = 1, logProb = TRUE) + if(reset) { + rOutput2d <- nimDerivs(wrapperMeta2Reset(inputx), order = 0, model = cModel, reset = reset) + } else rOutput2d <- nimDerivs(wrapperMeta2(inputx), order = 0, model = cModel, reset = reset) + rVals2d <- values(cModel, otherNodes) + rLogProb2d <- cModel$getLogProb(calcNodes) + rWrt2d <- values(cModel, wrt) + } + + if(checkDoubleUncHessian) { + nimCopy(tmpMV, cModel, nodes, nodes, row = 1, logProb = TRUE) + if(reset) { + rOutput2d11 <- nimDerivs(wrapperMeta1Reset(inputx), order = 1, model = cModel, reset = reset) + } else rOutput2d11 <- nimDerivs(wrapperMeta1(inputx), order = 1, model = cModel, reset = reset) + + rVals2d11 <- values(cModel, otherNodes) + rLogProb2d11 <- cModel$getLogProb(calcNodes) + rWrt2d11 <- values(cModel, wrt) + } + } + + rLogProb_new <- wrapper(inputx) + rVals_new <- values(cModel, otherNodes) + + ## now reset cModel for use in compiled derivs + nimCopy(tmpMV, cModel, nodes, nodes, row = 1, logProb = TRUE) + + } else { + + rOutput012 <- rDerivs$run(x, 0:2, reset = reset) + rVals012 <- values(model, otherNodes) + rLogProb012 <- model$getLogProb(calcNodes) + rWrt012 <- values(model, wrt) + + nimCopy(tmpMV, model, nodes, nodes, row = 1, logProb = TRUE) + rOutput01 <- rDerivs$run(x, 0:1, reset = reset) + rLogProb01 <- model$getLogProb(calcNodes) + rVals01 <- values(model, otherNodes) + rWrt01 <- values(model, wrt) + + if(doAllUncHessian) { + nimCopy(tmpMV, model, nodes, nodes, row = 1, logProb = TRUE) + rOutput12 <- rDerivs$run(x, 1:2, reset = reset) + rVals12 <- values(model, otherNodes) + rLogProb12 <- model$getLogProb(calcNodes) + rWrt12 <- values(model, wrt) + + nimCopy(tmpMV, model, nodes, nodes, row = 1, logProb = TRUE) + rOutput02 <- rDerivs$run(x, c(0,2), reset = reset) + rVals02 <- values(model, otherNodes) + rLogProb02 <- model$getLogProb(calcNodes) + rWrt02 <- values(model, wrt) + } + + if(checkDoubleTape) { + ## Note that because inner deriv is order 1 or 2, don't expect model to be updated. + nimCopy(tmpMV, model, nodes, nodes, row = 1, logProb = TRUE) + if(reset) { + rOutput1d <- rDerivsMetaReset$metaDerivs1Run(x = x, order = 0, reset = reset) + } else rOutput1d <- rDerivsMeta$metaDerivs1Run(x = x, order = 0, reset = reset) + rVals1d <- values(model, otherNodes) + rLogProb1d <- model$getLogProb(calcNodes) + rWrt1d <- values(model, wrt) + + if(doAllUncHessian) { + nimCopy(tmpMV, model, nodes, nodes, row = 1, logProb = TRUE) + if(reset) { + rOutput2d <- rDerivsMetaReset$metaDerivs2Run(x = x, order = 0, reset = reset) + } else rOutput2d <- rDerivsMeta$metaDerivs2Run(x = x, order = 0, reset = reset) + rVals2d <- values(model, otherNodes) + rLogProb2d <- model$getLogProb(calcNodes) + rWrt2d <- values(model, wrt) + } + + if(checkDoubleUncHessian) { + nimCopy(tmpMV, model, nodes, nodes, row = 1, logProb = TRUE) + if(reset) { + rOutput2d11 <- rDerivsMetaReset$metaDerivs1Run(x = x, order = 1, reset = reset) + } else rOutput2d11 <- rDerivsMeta$metaDerivs1Run(x = x, order = 1, reset = reset) + rVals2d11 <- values(model, otherNodes) + rLogProb2d11 <- model$getLogProb(calcNodes) + rWrt2d11 <- values(model, wrt) + } + } + + values(model, wrt) <- x + rLogProb_new <- model$calculate(calcNodes) + rVals_new <- values(model, otherNodes) + } + + ## Without useParamTransform, wrt should be updated, because assignment into model is outside nimDerivs call, + ## but with useParamTransform they should not, + cOutput12 <- cDerivs$run(x, 1:2, reset = reset) + cVals12 <- values(cModel, otherNodes) + cLogProb12 <- cModel$getLogProb(calcNodes) + cWrt12 <- values(cModel, wrt) + + nimCopy(tmpMV, cModel, nodes, nodes, row = 1, logProb = TRUE) + cOutput01 <- cDerivs$run(x, 0:1, reset = reset) + cVals01 <- values(cModel, otherNodes) + cLogProb01 <- cModel$getLogProb(calcNodes) + cWrt01 <- values(cModel, wrt) + + nimCopy(tmpMV, cModel, nodes, nodes, row = 1, logProb = TRUE) + cOutput012 <- cDerivs$run(x, 0:2, reset = reset) + cVals012 <- values(cModel, otherNodes) + cLogProb012 <- cModel$getLogProb(calcNodes) + cWrt012 <- values(cModel, wrt) + + nimCopy(tmpMV, cModel, nodes, nodes, row = 1, logProb = TRUE) + cOutput02 <- cDerivs$run(x, c(0,2), reset = reset) + cVals02 <- values(cModel, otherNodes) + cLogProb02 <- cModel$getLogProb(calcNodes) + cWrt02 <- values(cModel, wrt) + + if(checkDoubleTape) { + ## Note that because inner deriv is order 1 or 2, don't expect model to be updated. + nimCopy(tmpMV, cModel, nodes, nodes, row = 1, logProb = TRUE) + if(reset) { + cOutput1d <- cDerivsMetaReset$metaDerivs1Run(x = x, order = 0, reset = reset) + } else cOutput1d <- cDerivsMeta$metaDerivs1Run(x = x, order = 0, reset = reset) + cVals1d <- values(cModel, otherNodes) + cLogProb1d <- cModel$getLogProb(calcNodes) + cWrt1d <- values(cModel, wrt) + + nimCopy(tmpMV, cModel, nodes, nodes, row = 1, logProb = TRUE) + if(reset) { + cOutput2d <- cDerivsMetaReset$metaDerivs2Run(x = x, order = 0, reset = reset) + } else cOutput2d <- cDerivsMeta$metaDerivs2Run(x = x, order = 0, reset = reset) + cVals2d <- values(cModel, otherNodes) + cLogProb2d <- cModel$getLogProb(calcNodes) + cWrt2d <- values(cModel, wrt) + + nimCopy(tmpMV, cModel, nodes, nodes, row = 1, logProb = TRUE) + if(reset) { + cOutput2d11 <- cDerivsMetaReset$metaDerivs1Run(x = x, order = 1, reset = reset) + } else cOutput2d11 <- cDerivsMeta$metaDerivs1Run(x = x, order = 1, reset = reset) + cVals2d11 <- values(cModel, otherNodes) + cLogProb2d11 <- cModel$getLogProb(calcNodes) + cWrt2d11 <- values(cModel, wrt) + } + + values(cModel, wrt) <- x + cLogProb_new <- cModel$calculate(calcNodes) + cVals_new <- values(cModel, otherNodes) + + ## Check results ## + + if(checkCompiledValuesIdentical) { + expect_fun <- expect_identical + } else expect_fun <- nim_expect_equal + + ## Check that only requested orders provided. + + expect_identical(length(rOutput012$value), 1L) + expect_identical(length(rOutput01$value), 1L) + if(doAllUncHessian) { + expect_identical(length(rOutput12$value), 0L) + expect_identical(length(rOutput02$value), 1L) + } + expect_identical(length(cOutput01$value), 1L) + expect_identical(length(cOutput12$value), 0L) + expect_identical(length(cOutput012$value), 1L) + expect_identical(length(cOutput02$value), 1L) + + expect_gte(length(rOutput012$jacobian), 1) + expect_gte(length(rOutput01$jacobian), 1) + if(doAllUncHessian) { + expect_gte(length(rOutput12$jacobian), 1) + expect_identical(length(rOutput02$jacobian), 0L) + } + expect_gte(length(cOutput01$jacobian), 1) + expect_gte(length(cOutput12$jacobian), 1) + expect_gte(length(cOutput012$jacobian), 1) + expect_identical(length(cOutput02$jacobian), 0L) + + expect_gte(length(rOutput012$hessian), 1) + expect_identical(length(rOutput01$hessian), 0L) + if(doAllUncHessian) { + expect_gte(length(rOutput12$hessian), 1) + expect_gte(length(rOutput02$hessian), 1) + } + expect_identical(length(cOutput01$hessian), 0L) + expect_gte(length(cOutput12$hessian), 1) + expect_gte(length(cOutput012$hessian), 1) + expect_gte(length(cOutput02$hessian), 1) + + if(checkDoubleTape) { + expect_gte(length(rOutput1d$value), 1) + expect_identical(length(rOutput1d$jacobian), 0L) + expect_identical(length(rOutput1d$hessian), 0L) + expect_gte(length(cOutput1d$value), 1) + expect_identical(length(cOutput1d$jacobian), 0L) + expect_identical(length(cOutput1d$hessian), 0L) + + if(doAllUncHessian) { + expect_gte(length(rOutput2d$value), 1) + expect_identical(length(rOutput2d$jacobian), 0L) + expect_identical(length(rOutput2d$hessian), 0L) + } + expect_gte(length(cOutput2d$value), 1) + expect_identical(length(cOutput2d$jacobian), 0L) + expect_identical(length(cOutput2d$hessian), 0L) + + if(checkDoubleUncHessian) { + expect_identical(length(rOutput2d11$value), 0L) + expect_gte(length(rOutput2d11$jacobian), 1) + expect_identical(length(rOutput2d11$hessian), 0L) + } + expect_identical(length(cOutput2d11$value), 0L) + expect_gte(length(cOutput2d11$jacobian), 1) + expect_identical(length(cOutput2d11$hessian), 0L) + } + + ## 0th order 'derivative' + expect_identical(rOutput01$value, rLogProb_new) + expect_identical(rOutput012$value, rLogProb_new) + if(doAllUncHessian) + expect_identical(rOutput02$value, rLogProb_new) + expect_fun(cOutput01$value, cLogProb_new) + expect_fun(cOutput012$value, cLogProb_new) + expect_fun(cOutput02$value, cLogProb_new) + + nim_expect_equal(rOutput01$value, cOutput01$value, tolerance = relTol[1], abs_threshold = absTolThreshold) + nim_expect_equal(rOutput012$value, cOutput012$value, tolerance = relTol[1], abs_threshold = absTolThreshold) + if(doAllUncHessian) + nim_expect_equal(rOutput02$value, cOutput02$value, tolerance = relTol[1], abs_threshold = absTolThreshold) + + if(FALSE) { ## not relevant if using nim_expect_equal + ## expect_equal (via waldo::compare and waldo::num_equal) uses absolute tolerance if 'y' value <= tolerance. + ## Consider creating a nim_equal operator that checks if max(abs(x-y)/abs(y)) > tolerance using + ## relative tolerance unless y == 0. + if(mean(abs(cOutput01$value)) <= relTol[1]) + warning("Using absolute tolerance for 01$value comparison.") + if(mean(abs(cOutput012$value)) <= relTol[1]) + warning("Using absolute tolerance for 012$value comparison.") + if(mean(abs(cOutput02$value)) <= relTol[1]) + warning("Using absolute tolerance for 02$value comparison.") + } + + expect_identical(sum(is.na(rOutput01$value)), 0L, info = "NAs found in uncompiled 0th derivative") + expect_identical(sum(is.na(cOutput01$value)), 0L, info = "NAs found in compiled 0th derivative") + expect_identical(sum(is.na(rOutput012$value)), 0L, info = "NAs found in uncompiled 0th derivative") + expect_identical(sum(is.na(cOutput012$value)), 0L, info = "NAs found in compiled 0th derivative") + if(doAllUncHessian) + expect_identical(sum(is.na(rOutput02$value)), 0L, info = "NAs found in uncompiled 0th derivative") + expect_identical(sum(is.na(cOutput02$value)), 0L, info = "NAs found in compiled 0th derivative") + + ## 1st derivative + nim_expect_equal(rOutput01$jacobian, cOutput01$jacobian, tolerance = relTol[2], abs_threshold = absTolThreshold) + expect_identical(sum(is.na(rOutput01$jacobian)), 0L, info = "NAs found in uncompiled 1st derivative") + expect_identical(sum(is.na(cOutput01$jacobian)), 0L, info = "NAs found in compiled 1st derivative") + + if(doAllUncHessian) { + nim_expect_equal(rOutput12$jacobian, cOutput12$jacobian, tolerance = relTol[2], abs_threshold = absTolThreshold) + expect_identical(sum(is.na(rOutput12$jacobian)), 0L, info = "NAs found in uncompiled 1st derivative") + } + expect_identical(sum(is.na(cOutput12$jacobian)), 0L, info = "NAs found in compiled 1st derivative") + + nim_expect_equal(rOutput012$jacobian, cOutput012$jacobian, tolerance = relTol[2], abs_threshold = absTolThreshold) + expect_identical(sum(is.na(rOutput012$jacobian)), 0L, info = "NAs found in uncompiled 1st derivative") + expect_identical(sum(is.na(cOutput012$jacobian)), 0L, info = "NAs found in compiled 1st derivative") + + if(FALSE) { ## not relevant if using nim_expect_equal + if(mean(abs(cOutput01$jacobian)) <= relTol[2]) + warning("Using absolute tolerance for 01$jacobian comparison.") + if(mean(abs(cOutput12$jacobian)) <= relTol[2]) + warning("Using absolute tolerance for 12$jacobian comparison.") + if(mean(abs(cOutput012$jacobian)) <= relTol[2]) + warning("Using absolute tolerance for 012$jacobian comparison.") + } + + ## explicit comparison of first derivs; + ## both of these are reverse mode because 2nd order reverse also invokes first order reverse + expect_identical(cOutput01$jacobian, cOutput012$jacobian) + + expect_identical(cOutput12$jacobian, cOutput012$jacobian) + + if(checkDoubleTape) { + nim_expect_equal(rOutput1d$value, cOutput1d$value, tolerance = relTol[2], abs_threshold = absTolThreshold) + expect_identical(sum(is.na(rOutput1d$value)), 0L, info = "NAs found in uncompiled double-taped 1st derivative") + expect_identical(sum(is.na(cOutput1d$value)), 0L, info = "NAs found in compiled double-taped 1st derivative") + + ## explicit comparison to single-taped result + expect_fun(cOutput1d$value, c(cOutput012$jacobian)) + + if(FALSE) { ## not relevant if using nim_expect_equal + if(mean(abs(cOutput1d$value)) <= relTol[2]) + warning("Using absolute tolerance for 1d$value comparison.") + } + } + + ## 2nd derivative + if(doAllUncHessian) { + nim_expect_equal(rOutput12$hessian, cOutput12$hessian, tolerance = relTol[3], abs_threshold = absTolThreshold) + expect_identical(sum(is.na(rOutput12$hessian)), 0L, info = "NAs found in uncompiled 2nd derivative") + } + expect_identical(sum(is.na(cOutput12$hessian)), 0L, info = "NAs found in compiled 2nd derivative") + + nim_expect_equal(rOutput012$hessian, cOutput012$hessian, tolerance = relTol[3], abs_threshold = absTolThreshold) + expect_identical(sum(is.na(rOutput012$hessian)), 0L, info = "NAs found in uncompiled 2nd derivative") + expect_identical(sum(is.na(cOutput012$hessian)), 0L, info = "NAs found in compiled 2nd derivative") + + if(doAllUncHessian) { + nim_expect_equal(rOutput02$hessian, cOutput02$hessian, tolerance = relTol[3], abs_threshold = absTolThreshold) + expect_identical(sum(is.na(rOutput02$hessian)), 0L, info = "NAs found in uncompiled 2nd derivative") + } + expect_identical(sum(is.na(cOutput02$hessian)), 0L, info = "NAs found in compiled 2nd derivative") + + if(FALSE) { ## not relevant if using nim_expect_equal + if(mean(abs(cOutput12$hessian)) <= relTol[3]) + warning("Using absolute tolerance for 12$hessian comparison.") + if(mean(abs(cOutput012$hessian)) <= relTol[3]) + warning("Using absolute tolerance for 012$hessian comparison.") + if(mean(abs(cOutput02$hessian)) <= relTol[3]) + warning("Using absolute tolerance for 02$hessian comparison.") + } + + expect_identical(cOutput12$hessian, cOutput012$hessian) + expect_identical(cOutput02$hessian, cOutput012$hessian) + + + if(checkDoubleTape) { + if(doAllUncHessian) { + nim_expect_equal(rOutput2d$value, cOutput2d$value, tolerance = relTol[3], abs_threshold = absTolThreshold) + expect_identical(sum(is.na(rOutput2d$value)), 0L, info = "NAs found in uncompiled double-taped 2nd derivative") + } + expect_identical(sum(is.na(cOutput2d$value)), 0L, info = "NAs found in compiled double-taped 2nd derivative") + + if(checkDoubleUncHessian) { + nim_expect_equal(rOutput2d11$jacobian, cOutput2d11$jacobian, tolerance = relTol[4], abs_threshold = absTolThreshold) + expect_identical(sum(is.na(rOutput2d11$jacobian)), 0L, info = "NAs found in uncompiled double-taped 2nd derivative") + } + expect_identical(sum(is.na(cOutput2d11$jacobian)), 0L, info = "NAs found in compiled double-taped 2nd derivative") + + ## explicit comparison to single-taped result + ## Not clear why 2d$value not identical to 012$hessian + nim_expect_equal(cOutput2d$value, c(cOutput012$hessian), tolerance = 1e-15, abs_threshold = absTolThreshold) + if(length(cOutput2d11$jacobian) == 1) cOutput2d11$jacobian <- c(cOutput2d11$jacobian) + expect_fun(cOutput2d11$jacobian, cOutput012$hessian[,,1]) + + if(FALSE) { ## not relevant if using nim_expect_equal + if(mean(abs(cOutput2d$value)) <= relTol[3]) + warning("Using absolute tolerance for 2d$value comparison.") + if(mean(abs(cOutput2d11$jacobian)) <= relTol[4]) + warning("Using absolute tolerance for 2d11$jacobian comparison.") + } + } + + ## wrt values should equal original wrt values if order !=0 or doubleTape + ## because setting of wrt in model is done within nimDerivs call, so should obey our rules about when model state is altered. + + expect_identical(rWrt01, x) + ## We provide the model to nimDerivs, so expect restoration except for non-paramTransform + ## and non-fasterRderivs, where assignment is outside nimDerivs. + if(doAllUncHessian) { + if(!useParamTransform && !useFasterRderivs) { + expect_identical(rWrt12, x) + } else expect_identical(rWrt12, rWrt_orig) + } + expect_identical(rWrt012, x) + expect_identical(cWrt01, x) + if(!useParamTransform) { + ## Assignment to model is outside nimDerivs call, so expect change. + expect_identical(cWrt12, x) + } else expect_identical(cWrt12, cWrt_orig) + expect_identical(cWrt012, x) + + if(checkDoubleTape) { + expect_identical(rWrt1d, rWrt_orig) + if(doAllUncHessian) + expect_identical(rWrt2d, rWrt_orig) + if(checkDoubleUncHessian) + expect_identical(rWrt2d11, rWrt_orig) + expect_identical(cWrt1d, cWrt_orig) + expect_identical(cWrt2d, cWrt_orig) + expect_identical(cWrt2d11, cWrt_orig) + } + + ## Also, should we take otherNodes and break into those that are in calcNodes and those not? + ## Those in not in calcNodes should never be changed. So maybe nothing to check. + + ## model state - when order 0 is included, logProb and determistic nodes should be updated; otherwise not + expect_identical(rLogProb01, rLogProb_new) + expect_identical(rVals01, rVals_new) + expect_identical(rLogProb012, rLogProb_new) + expect_identical(rVals012, rVals_new) + if(doAllUncHessian) { + expect_identical(rLogProb02, rLogProb_new) + expect_identical(rVals02, rVals_new) + expect_identical(rLogProb12, rLogProb_orig) + expect_identical(rVals12, rVals_orig) + } + + if(checkDoubleTape) { + ## Double tapes here don't have order = 0 in inner tape, so model should not be updated since I do pass model into nimDerivs. + expect_identical(rLogProb1d, rLogProb_orig) + if(doAllUncHessian) + expect_identical(rLogProb2d, rLogProb_orig) + if(checkDoubleUncHessian) + expect_identical(rLogProb2d11, rLogProb_orig) + expect_identical(rVals1d, rVals_orig) + if(doAllUncHessian) + expect_identical(rVals2d, rVals_orig) + if(checkDoubleUncHessian) + expect_identical(rVals2d11, rVals_orig) + } + + ## Not clear if next check should be expect_identical (in many cases they are identical); + ## Check with PdV whether values from taped model could get into the compiled model. + + expect_fun(cLogProb01, cLogProb_new) + expect_fun(cVals01, cVals_new) + expect_fun(cLogProb012, cLogProb_new) + expect_fun(cVals012, cVals_new) + expect_fun(cLogProb02, cLogProb_new) + expect_fun(cVals02, cVals_new) + + expect_fun(cLogProb12, cLogProb_orig) + expect_fun(cVals12, cVals_orig) + + if(checkDoubleTape) { + ## Double tapes here don't have order = 0 in inner tape, so model should not be updated. + expect_fun(cLogProb1d, cLogProb_orig) + expect_fun(cVals1d, cVals_orig) + expect_fun(cLogProb2d, cLogProb_orig) + expect_fun(cVals2d, cVals_orig) + expect_fun(cLogProb2d11, cLogProb_orig) + expect_fun(cVals2d11, cVals_orig) + } + } + } + }) +} + + +## Makes random vectors of wrt elements, following James Duncan's code +make_wrt <- function(argTypes, n_random = 10, allCombinations = FALSE) { + ## always include each arg on its own, and all combinations of the args + ## Note that for models with a large number of variables this might turn out to be too much. + wrts <- as.list(names(argTypes)) + if(allCombinations) { + if (length(argTypes) > 1) + for (m in 2:length(argTypes)) { + this_combn <- combn(names(argTypes), m) + wrts <- c( + wrts, + unlist(apply(this_combn, 2, list), recursive = FALSE) + ) + } + } + + while (n_random > 0) { + n_random <- n_random - 1 + n <- sample(1:length(argTypes), 1) # how many of the args to use? + ## grab a random subset of the args of length n + args <- sample(argTypes, n) + ## may repeat an arg up to 2 times + reps <- sample(1:2, length(args), replace = TRUE) + argSymbols <- lapply(args, nimble:::argType2symbol) + this_wrt <- c() + for (i in 1:length(args)) { + while (reps[i] > 0) { + reps[i] <- reps[i] - 1 + ## coin flip determines whether to index vectors/matrices + use_indexing <- sample(c(TRUE, FALSE), 1) + if (use_indexing && argSymbols[[i]]$nDim > 0) { + rand_row <- sample(1:argSymbols[[i]]$size[1], size = 1) + ## another coin flip determines whether to use : in indexing or not + use_colon <- sample(c(TRUE, FALSE), 1) + if (use_colon && rand_row < argSymbols[[i]]$size[1]) { + end_row <- rand_row + + sample(1:(argSymbols[[i]]$size[1] - rand_row), size = 1) + rand_row <- paste0(rand_row, ':', end_row) + } + index <- rand_row + if (argSymbols[[i]]$nDim == 2) { + rand_col <- sample(1:argSymbols[[i]]$size[2], size = 1) + ## one more coin flip to subscript second dimension + use_colon_again <- sample(c(TRUE, FALSE), 1) + if (use_colon_again && rand_col < argSymbols[[i]]$size[2]) { + end_col <- rand_col + + sample(1:(argSymbols[[i]]$size[2] - rand_col), size = 1) + rand_col <- paste0(rand_col, ':', end_col) + } + index <- paste0(index, ',', rand_col) + } + this_wrt <- c(this_wrt, paste0(names(args)[i], '[', index, ']')) + } + ## if first coin flip was FALSE, just + ## use the arg name without indexing + else this_wrt <- c(this_wrt, names(args)[i]) + } + } + if (!is.null(this_wrt)) wrts <- c(wrts, list(unique(this_wrt))) + } + wrts <- unique(wrts) +} + + +makeADDistributionTestList <- function(distnList){ + argsList <- lapply(distnList$args, function(x){ + return(x) + }) + ansList <- list(args = argsList, + expr = substitute(out <- nimDerivs(METHODEXPR, wrt = WRT, order = c(0,1,2)), + list(METHODEXPR = as.call(c(list(quote(method1)), + lapply(names(distnList$args), + function(x){return(parse(text = x)[[1]])}))), + WRT = if(is.null(distnList$WRT)) names(distnList$args) else distnList$WRT + )), + outputType = quote(ADNimbleList()) + ) + return(ansList) +} + +makeADDistributionMethodTestList <- function(distnList){ + argsList <- lapply(distnList$args, function(x){ + return(x) + }) + argsValsList <- list() + for(iArg in seq_along(distnList$args)){ + argsValsList[[names(distnList$args)[iArg]]] <- parse(text = names(distnList$args)[iArg])[[1]] + } + ansList <- list(args = argsList, + expr = substitute({out <- numeric(2); + out[1] <- DISTNEXPR; + out[2] <- LOGDISTNEXPR;}, + list(DISTNEXPR = as.call(c(list(parse(text = distnList$distnName)[[1]]), + argsValsList, + list(log = FALSE))), + LOGDISTNEXPR = as.call(c(list(parse(text = distnList$distnName)[[1]]), + argsValsList, + list(log = TRUE))) + + )), + outputType = quote(double(1, 2)) + ) + return(ansList) +} + +testADDistribution <- function(ADfunGen, argsList, name, debug = FALSE){ + ADfun <- ADfunGen() + CADfun <- compileNimble(ADfun) + for(iArg in seq_along(argsList)){ + iOrdersToCheck <- argsList[[iArg]][['ordersToCheck']] + if(is.null(iOrdersToCheck)) iOrdersToCheck <- 0:2 ## check all orders if not specified + else argsList[[iArg]][['ordersToCheck']] <- NULL + RfunCallList <- c(list(quote(ADfun$run)), argsList[[iArg]]) + CfunCallList <- c(list(quote(CADfun$run)), argsList[[iArg]]) + RderivsList <- eval(as.call(RfunCallList)) + CderivsList <- eval(as.call(CfunCallList)) + argValsText <- paste(sapply(names(argsList[[iArg]]), + function(x){return(paste(x, " = ", + paste(argsList[[iArg]][[x]], collapse = ', ')))}), collapse = ', ') + if(is.logical(debug) && debug == TRUE) browser() + else if(is.numeric(debug) && debug == iArg) browser() + if(0 %in% iOrdersToCheck) + expect_equal(RderivsList$value, CderivsList$value, tolerance = 1e-6, + info = paste("Values of", name , "not equal for arguments: ", + argValsText, '.')) + else + print(paste("Skipping check of R and C++ `value` equality for ", + name, " with arguments: ", argValsText )) + if(1 %in% iOrdersToCheck) + expect_equal(RderivsList$jacobian, CderivsList$jacobian, tolerance = 1e-2, + info = paste("Jacobians of", name , "not equal for arguments: ", + argValsText, '.')) + else + print(paste("Skipping check of R and C++ `jacobian` equality for ", + name, " with arguments: ", argValsText )) + if(2 %in% iOrdersToCheck) + expect_equal(RderivsList$hessian, CderivsList$hessian, tolerance = 1e-2, + info = paste("Hessians of", name , "not equal for arguments: ", + argValsText, '.')) + else + print(paste("Skipping check of R and C++ `hessian` equality for ", + name, " with arguments: ", argValsText )) + } + nimble:::clearCompiled(CADfun) +} + +expandNames <- function(var, ...) { + tmp <- as.matrix(expand.grid(...)) + indChars <- apply(tmp, 1, paste0, collapse=', ') + ## Use of sort here only puts names in same order as NIMBLE if have no double-digit indexes. + sort(paste0(var, "[", indChars, "]")) +} + +test_dynamic_indexing_model <- function(param) { + test_that(param$case, test_dynamic_indexing_model_internal(param)) + invisible(NULL) +} + +test_dynamic_indexing_model_internal <- function(param) { + if(!is.null(param$expectError) && param$expectError) { + expect_error(m <- nimbleModel(param$code, dimensions = param$dims, inits = param$inits, data = param$data, constants = param$constants), param$expectErrorMsg, info = "expected error not generated") + } else { + m <- nimbleModel(param$code, dimensions = param$dims, inits = param$inits, data = param$data, constants = param$constants) + expect_true(inherits(m, 'modelBaseClass'), info = "problem creating model") + for(i in seq_along(param$expectedDeps)) + expect_identical(m$getDependencies(param$expectedDeps[[i]]$parent, stochOnly = TRUE), + param$expectedDeps[[i]]$result, info = paste0("dependencies don't match expected in dependency of ", param$expectedDeps[[i]]$parent)) + cm <- compileNimble(m) + expect_true(is.Cmodel(cm), info = "compiled model object improperly formed") + expect_identical(calculate(m), calculate(cm), info = "problem with R vs. C calculate with initial indexes") + for(i in seq_along(param$validIndexes)) { + for(j in seq_along(param$validIndexes[[i]]$var)) { + m[[param$validIndexes[[i]]$var[j]]] <- param$validIndexes[[i]]$value[j] + cm[[param$validIndexes[[i]]$var[j]]] <- param$validIndexes[[i]]$value[j] + } + expect_true(is.numeric(calculate(cm)), 1, info = paste0("problem with C calculate with valid indexes, case: ", i)) + expect_true(is.numeric(calculateDiff(cm)), info = paste0("problem with C calculateDiff with valid indexes, case: ", i)) + expect_identical(calculate(m), calculate(cm), info = paste0("problem with R vs. C calculate with valid indexes, case: ", i)) + deps <- m$getDependencies(param$invalidIndexes[[i]]$var, self = FALSE) + expect_true(is.null(simulate(cm, deps, includeData = TRUE)), info = paste0("problem with C simulate with valid indexes, case: ", i)) + ## Reset values so can models have same values for comparisons in later iterations. + cm$setInits(param$inits) + cm$setData(param$data) + cm$calculate() + } + for(i in seq_along(param$invalidIndexes)) { + for(j in seq_along(param$invalidIndexes[[i]]$var)) { + m[[param$invalidIndexes[[i]]$var[j]]] <- param$invalidIndexes[[i]]$value[j] + + cm[[param$invalidIndexes[[i]]$var[j]]] <- param$invalidIndexes[[i]]$value[j] + } + expect_output(out <- calculate(m), "Dynamic index out of bounds", info = paste0("problem with lack of warning in R calculate with non-NA invalid indexes, case: ", i)) + expect_equal(out, NaN, info = paste0("problem with lack of NaN in R calculate with non-NA invalid indexes, case: ", i)) + expect_output(out <- calculate(cm), "Dynamic index out of bounds", info = paste0("problem with lack of warning in C calculate with invalid indexes, case: ", i)) + expect_equal(out, NaN, info = paste0("problem with lack of NaN in C calculate with invalid indexes, case: ", i)) + expect_output(out <- calculateDiff(cm), "Dynamic index out of bounds", info = paste0("problem with lack of warning in C calculateDiff with invalid indexes, case: ", i)) + expect_equal(out, NaN, info = paste0("problem with lack of NaN in C calculateDiff with invalid indexes, case: ", i)) + deps <- m$getDependencies(param$invalidIndexes[[i]]$var, self = FALSE) + expect_output(simulate(cm, deps, includeData = TRUE), "Dynamic index out of bounds", info = paste0("problem with lack of warning in C simulate with invalid indexes, case: ", i)) + expect_true(sum(is.nan(values(cm, deps))) >= 1, info = paste0("problem with lack of NaN in C simulate with invalid indexes, case: ", i)) + } + if(.Platform$OS.type != "windows") { + nimble:::clearCompiled(m) + } + } + invisible(NULL) +} + + +## utilities for saving test output to a reference file +## and making the test a comparison of the file +clearOldOutput <- function(filename) { + if(file.exists(filename)) file.remove(filename) +} + +appendOutput <- function(filename, case, caseName, casePrefix = "") { + outputConnection <- file(filename, open = 'at') + writeLines(caseName, con = outputConnection) + outputAns <- lapply(case, function(x) writeLines(paste0(casePrefix, paste(x, collapse = " ")), con = outputConnection)) + close(outputConnection) +} + +writeOutput <- function(cases, filename) { + clearOldOutput(filename) + for(i in seq_along(cases)) appendOutput(filename, cases[[i]], names(cases)[i], casePrefix = paste0(i,": ")) +} + +stripTestPlacementWarning <- function(lines) { + ## deal with Placing tests in `inst/tests/` is deprecated warning + ## as it doesn't seem entirely predictable when/where it appears + coreLines <- grep("^Placing tests in", lines) + addedLines <- lines[coreLines-1] == "In addition: Warning message:" + totalLines <- c(coreLines-addedLines, coreLines) + if(length(totalLines)) + return(lines[-totalLines]) else return(lines) +} + +stripTestsPassedMessage <- function(lines) { + stripLines <- grep("^Test passed", lines) + if(length(stripLines)) + return(lines[-stripLines]) else return(lines) +} + + +compareFilesByLine <- function(trialResults, correctResults, main = "") { + trialResults <- stripTestsPassedMessage(stripTestPlacementWarning(trialResults)) + correctResults <- stripTestPlacementWarning(correctResults) + expect_equal(length(trialResults), length(correctResults)) + + linesToTest <- min(length(trialResults), length(correctResults)) + mapply(function(lineno, trialLine, correctLine) { + expect_identical(trialLine, correctLine) + }, 1:linesToTest, trialResults, correctResults) + invisible(NULL) +} + +compareFilesUsingDiff <- function(trialFile, correctFile, main = "") { + if(main == "") main <- paste0(trialFile, ' and ', correctFile, ' do not match\n') + diffOutput <- system2('diff', c(trialFile, correctFile), stdout = TRUE) + test_that(paste0(main, paste0(diffOutput, collapse = '\n')), + expect_true(length(diffOutput) == 0) + ) + invisible(NULL) +} + +## Create a nimbleFunction parametrization to be passed to gen_runFunCore(). +## +## op: An operator string. +## argTypes: A character vector of argTypes (e.g. "double(0)"). +## If this is a named vector, then the names will be +## interpreted as the formals to the constructed op call. +## more_args: A named list, e.g. list(log = 1), that will be added +## as a formal to the output expr but not part of args. +## +## output_code: quoted code such as quote(Y^2) to be used with Y +## substituted as the operation result. E.g., if the +## operation is `arg1 + arg2`, this would replace it with +## (arg1 + arg2)^2. +## +## inner_codes: list of quoted code such as quote(X^2) to be used with +## X substituted as the corresponding argument. The list +## must be as long as the number of arugments, with NULL +## entries indicating no substitution. E.g. with +## list(NULL, quote(X^2)), `arg1 + arg2` would be changed +## to `art1 + arg2^2`. +make_op_param <- function(op, argTypes, more_args = NULL, + outer_code = NULL, inner_codes = NULL) { + arg_names <- names(argTypes) + + if (is.null(arg_names)) { + arg_names <- paste0('arg', 1:length(argTypes)) + op_args <- lapply(arg_names, as.name) + } else { + op_args <- sapply(arg_names, as.name, simplify = FALSE) + } + + args_string <- paste0(arg_names, ' = ', argTypes, collapse = ' ') + name <- paste(op, args_string) + + # Add inner funs around arguments + if(!is.null(inner_codes)) { + for(i in seq_along(op_args)) { + if(!is.null(inner_codes[[i]])) { + op_args[[i]] <- eval(substitute( + substitute(INNER_CODE, + list(X = op_args[[i]])), + list(INNER_CODE = inner_codes[[i]])) + ) + } + } + } + + this_call <- as.call(c( + substitute(FOO, list(FOO = as.name(op))), + op_args, more_args + )) + + if(is.null(outer_code)) { + expr <- substitute( + out <- THIS_CALL, + list(THIS_CALL = this_call) + ) + } else { + expr <- eval(substitute( + substitute( + out <- OUTER_CODE, + list(Y = this_call)), + list(OUTER_CODE = outer_code)) + ) + } + + argTypesList <- as.list(argTypes) + names(argTypesList) <- arg_names + argTypesList <- lapply(argTypesList, function(arg) { + parse(text = arg)[[1]] + }) + + list( + name = name, + expr = expr, + args = argTypesList, + outputType = parse(text = return_type_string(op, argTypes))[[1]] + ) +} + +## Takes an operator and its input types as a character vector and +## creates a string representing the returnType for the operation. +## +## op: An operator string +## argTypes: A character vector of argTypes (e.g. "double(0)". +## +return_type_string <- function(op, argTypes) { + + ## multivariate distributions ops. These do not support recycling rule behavior, so the return type is always double(0) + mvdist_ops <- names(nimble:::sizeCalls)[nimble:::sizeCalls == 'sizeScalarRecurseAllowMaps'] + if(op %in% mvdist_ops) + return("double(0)") + + ## see ops handled by eigenize_recyclingRuleFunction in genCpp_eigenization.R + recycling_rule_ops <- c( + nimble:::scalar_distribution_dFuns, + nimble:::scalar_distribution_pFuns, + nimble:::scalar_distribution_qFuns, + nimble:::scalar_distribution_rFuns, + paste0(c('d', 'q', 'p', 'r'), 't'), + paste0(c('d', 'q', 'p', 'r'), 'exp'), + 'bessel_k' + ) + + returnTypeCode <- nimble:::returnTypeHandling[[op]] + + if (is.null(returnTypeCode)) + if (!op %in% recycling_rule_ops) + return(argTypes[1]) + else returnTypeCode <- 1 + + scalarTypeString <- switch( + returnTypeCode, + 'double', ## 1 + 'integer', ## 2 + 'logical' ## 3 + ) + + args <- lapply( + argTypes, function(argType) + nimble:::argType2symbol(parse(text = argType)[[1]]) + ) + + if (is.null(scalarTypeString)) ## returnTypeCode is 4 or 5 + scalarTypeString <- + if (length(argTypes) == 1) + if (returnTypeCode == 5 && args[[1]]$type == 'logical') 'integer' + else args[[1]]$type + else if (length(argTypes) == 2) { + aot <- nimble:::arithmeticOutputType(args[[1]]$type, args[[2]]$type) + if (returnTypeCode == 5 && aot == 'logical') 'integer' + else aot + } + + reductionOperators <- c( + nimble:::reductionUnaryOperators, + nimble:::matrixSquareReductionOperators, + nimble:::reductionBinaryOperatorsEither, + 'dmulti' + ) + + nDim <- if (op %in% reductionOperators) 0 + else max(sapply(args, `[[`, 'nDim')) + + if (nDim > 2) + stop( + 'Testing does not currently support args with nDim > 2', + call. = FALSE + ) + + sizes <- if (nDim == 0) 1 + else if (length(argTypes) == 1) args[[1]]$size + else if (op %in% nimble:::matrixMultOperators) { + if (!length(argTypes) == 2) + stop( + paste0( + 'matrixMultOperators should only have 2 args but got ', + length(argTypes) + ), call. = FALSE + ) + c(args[[1]]$size[1], args[[2]]$size[2]) + } else if (nDim == 2) { + ## one arg is a matrix but this is not matrix multiplication + ## so assume that the first arg with nDim > 1 + has_right_nDim <- sapply(args, function(arg) arg$nDim == nDim) + args[has_right_nDim][[1]]$size + } else { + ## nDim is 1 so either recycling rule or simple vector operator + max((sapply(args, `[[`, 'size'))) + } + + size_string <- if(all(is.na(sizes))) + '' + else if (nDim > 0) + paste0(', c(', paste(sizes, collapse = ', '), ')') + else '' + + return(paste0(scalarTypeString, '(', nDim, size_string, ')')) +} + +## Takes an argSymbol and if argSymbol$size is NA adds default sizes. +add_missing_size <- function(argSymbol, vector_size = 3, matrix_size = c(3, 4)) { + if (any(is.na(argSymbol$size))) { + if (argSymbol$nDim == 1) + argSymbol$size <- vector_size + else if (argSymbol$nDim == 2) + argSymbol$size <- matrix_size + } + invisible(argSymbol) +} + +arg_type_2_input <- function(argType, input_gen_fun = NULL, size = NULL, return_function = FALSE) { + argSymbol <- add_missing_size( + nimble:::argType2symbol(argType) + ) + type <- argSymbol$type + nDim <- argSymbol$nDim + if(is.null(size)) + size <- argSymbol$size + + if (is.null(input_gen_fun)) + input_gen_fun <- switch( + type, + "double" = function(arg_size) rnorm(prod(arg_size)), + "integer" = function(arg_size) rgeom(prod(arg_size), 0.5), + "logical" = function(arg_size) + sample(c(TRUE, FALSE), prod(arg_size), replace = TRUE) + ) + if(is.character(input_gen_fun)) { + new_fun <- function(size) {} + body(new_fun) <- parse(text = input_gen_fun, keep.source = FALSE)[[1]] + input_gen_fun <- new_fun + } + ans <- function() { + arg <- switch( + nDim + 1, + input_gen_fun(1), ## nDim is 0 + input_gen_fun(prod(size)), ## nDim is 1 + matrix(input_gen_fun(prod(size)), nrow = size[1], ncol = size[2]), ## nDim is 2 + array(input_gen_fun(prod(size)), dim = size) ## nDim is 3 + ) + if(is.null(arg)) + stop('Something went wrong while making test input.', call.=FALSE) + arg + } + if(return_function) + return(ans) + ans() +} + +modify_on_match <- function(x, pattern, key, value, env = parent.frame(), ...) { + ## Modify any elements of a named list that match pattern. + ## + ## @param x A named list of lists. + ## @param pattern A regex pattern to compare with `names(x)`. + ## @param key The key to modify in any lists whose names match `pattern`. + ## @param value The new value for `key`. + ## @param env The environment in which to modify `x`. + ## @param ... Additional arguments for `grepl`. + for (name in names(x)) { + if (grepl(pattern, name, ...)) { + eval(substitute(x[[name]][[key]] <- value), env) + } + } +} + +nim_all_equal <- function(x, y, tolerance = .Machine$double.eps^0.5, abs_threshold = 0, xlab = NULL, ylab = NULL, verbose = FALSE, info = "") { + if(is.null(xlab)) + xlab <- deparse1(substitute(x)) + if(is.null(ylab)) + ylab <- deparse1(substitute(y)) + + denom <- abs(y) + ## Use absolute tolerance for sufficiently small values. + ## This is necessary for y values exactly zero. + ## In some cases (such as derivatives very near zero and affected by floating point errors) + ## we also need to use absolute tolerance. + denom[denom <= abs_threshold] <- 1 + rel_diff <- abs((x-y)/denom) + result <- rel_diff < tolerance + all_result <- all(result) + if(verbose) { + if(!all_result) { + ord <- order(rel_diff, decreasing = TRUE) + wh <- 1:min(length(rel_diff), 5) + report <- cbind(x[ord[wh]], y[ord[wh]], rel_diff[ord[wh]]) + report <- report[report[,3] > tolerance, ] + cat("\n******************\n") + cat("Detected some values out of relative tolerance ", info, ": ", xlab, " ", ylab, ".\n") + print(report) + cat("******************\n") + } + } + all_result +} + +nim_expect_equal <- function(x, y, tolerance = .Machine$double.eps^0.5, abs_threshold = 0) { + xlab <- deparse1(substitute(x)) + ylab <- deparse1(substitute(y)) + expect_true(nim_all_equal(x, y, xlab = xlab, ylab = ylab, tolerance = tolerance, abs_threshold = abs_threshold, verbose = TRUE)) +} diff --git a/man/dBetaBinom.Rd b/man/dBetaBinom.Rd index ac49f2a..ff71c9e 100644 --- a/man/dBetaBinom.Rd +++ b/man/dBetaBinom.Rd @@ -3,25 +3,26 @@ \name{dBetaBinom} \alias{dBetaBinom} \alias{nimBetaFun} -\alias{dBetaBinom_One} -\alias{rBetaBinom} -\alias{rBetaBinom_One} +\alias{dBetaBinom_v} +\alias{dBetaBinom_s} +\alias{rBetaBinom_v} +\alias{rBetaBinom_s} \title{A beta binomial distribution and beta function for use in \code{nimble} models} \usage{ nimBetaFun(a, b, log) -dBetaBinom(x, N, shape1, shape2, log = 0) +dBetaBinom_v(x, N, shape1, shape2, len, log = 0) -dBetaBinom_One(x, N, shape1, shape2, log = 0) +dBetaBinom_s(x, N, shape1, shape2, len, log = 0) -rBetaBinom(n, N, shape1, shape2) +rBetaBinom_v(n, N, shape1, shape2, len) -rBetaBinom_One(n, N, shape1, shape2) +rBetaBinom_s(n, N, shape1, shape2, len) } \arguments{ -\item{a}{shape1 argument of the beta function nimBetaFun.} +\item{a}{shape1 argument of the beta function.} -\item{b}{shape2 argument of the beta function nimBetaFun.} +\item{b}{shape2 argument of the beta function.} \item{log}{TRUE or 1 to return log probability. FALSE or 0 to return probability.} @@ -30,46 +31,49 @@ probability.} \item{N}{number of trials, sometimes called "size".} -\item{shape1}{shape1 parameter of the beta-binomial distribution.} +\item{shape1}{shape1 parameter of the beta distribution.} -\item{shape2}{shape2 parameter of the beta-binomial distribution.} +\item{shape2}{shape2 parameter of the beta distribution.} + +\item{len}{length of \code{x}.} \item{n}{number of random draws, each returning a vector of length \code{len}. Currently only \code{n = 1} is supported, but the argument exists for standardization of "\code{r}" functions.} } \description{ -\code{dBetaBinom} and \code{dBetaBinom_One} provide a beta binomial +\code{dBetaBinom_v} and \code{dBetaBinom_s} provide a beta binomial distribution that can be used directly from R or in \code{nimble} models. These are also used by beta binomial variations of dNmixture distributions. \code{nimBetaFun} is the beta function. } \details{ -These nimbleFunctions provide distributions that can be - used directly in R or in \code{nimble} hierarchical models (via - \code{\link[nimble]{nimbleCode}} and - \code{\link[nimble]{nimbleModel}}). They were originally written for - the beta binomial N-mixture extensions. +These nimbleFunctions provide distributions that can be used + directly in R or in \code{nimble} hierarchical models (via + \code{\link[nimble]{nimbleCode}} and \code{\link[nimble]{nimbleModel}}). + They are used by the beta-binomial variants of the N-mixture distributions + (\code{\link{dNmixture}}). -The beta binomial distribution is equivalent to a binomial distribution whose -probability is itself a beta distributed random variable. +The beta binomial is the marginal distribution of a binomial distribution whose +probability follows a beta distribution. The probability mass function of the beta binomial is \code{choose(N, x) * B(x + shape1, N - x + shape2) / B(shape1, shape2)}, where \code{B(shape1, shape2)} is the beta function. -The beta binomial distribution is provided in two forms. \code{dBetaBinom} and -\code{rBetaBinom} are used when \code{x} is a vector (i.e. \code{length(x) > 1}), -in which case the parameters \code{alpha} and \code{beta} must also be vectors. -When \code{x} is scalar, \code{dBetaBinom_One} and \code{rBetaBinom_One} are -used. +\code{nimBetaFun(shape1, shape2)} calculates \code{B(shape1, shape2)}. + +The beta binomial distribution is provided in two forms. \code{dBetaBinom_v} and +is when \code{shape1} and \code{shape2} are vectors. +\code{dBetaBinom_s} is used when \code{shape1} and \code{shape2} are scalars. +In both cases, \code{x} is a vector. } \examples{ -# Calculate a beta binomial probability -dBetaBinom(x = c(4, 0, 0, 3), N = 10, +# Calculate a beta binomial probability with different shape1 and shape2 for each x[i] +dBetaBinom_v(x = c(4, 0, 0, 3), N = 10, shape1 = c(0.5, 0.5, 0.3, 0.5), shape2 = c(0.2, 0.4, 1, 1.2)) -# Same for case with one observation -dBetaBinom_One(x = 3, N = 10, shape1 = 0.5, shape2 = 0.5, log = TRUE) +# or with constant shape1 and shape2 +dBetaBinom_s(x = c(4, 0, 0, 3), N = 10, shape1 = 0.5, shape2 = 0.5, log = TRUE) } \seealso{ For beta binomial N-mixture models, see \code{\link{dNmixture}}. diff --git a/man/dCJS.Rd b/man/dCJS.Rd index b20692e..ae42bd1 100644 --- a/man/dCJS.Rd +++ b/man/dCJS.Rd @@ -125,6 +125,21 @@ If both survival and capture probabilities are time-dependent, use and so on for each combination of time-dependent and time-independent parameters. } +\section{Notes for use with automatic differentiation}{ + + +The \code{dCJS_**} distributions should all work for models and algorithms +that use nimble's automatic differentiation (AD) system. In that system, +some kinds of values are "baked in" (cannot be changed) to the AD calculations +from the first call, unless and until the AD calculations are reset. For +the \code{dCJS_**} distributions, the lengths of vector inputs and the data +(\code{x}) values themselves are baked in. These can be different for different +iterations through a for loop (or nimble model declarations with different indices, +for example), but the lengths and data values for each specific iteration +will be "baked in" after the first call. \bold{In other words, it is assumed that +\code{x} are data and are not going to change.} +} + \examples{ # Set up constants and initial values for defining the model dat <- c(1,1,0,0,0) # A vector of observations diff --git a/man/dDHMM.Rd b/man/dDHMM.Rd index be53a4d..5e66ec2 100644 --- a/man/dDHMM.Rd +++ b/man/dDHMM.Rd @@ -133,6 +133,17 @@ If the observation probabilities are time-dependent, one would use: \code{observedStates[1:T] ~ dDHMMo(initStates[1:S], observationProbs[1:S, 1:O, 1:T], transitionProbs[1:S, 1:S, 1:(T-1)], 1, T)} + +The \code{dDHMM[o]} distributions should work for models and algorithms that +use nimble's automatic differentiation (AD) system. In that system, some +kinds of values are "baked in" (cannot be changed) to the AD calculations +from the first call, unless and until the AD calculations are reset. For the +\code{dDHMM[o]} distributions, the sizes of the inputs and the data (\code{x}) +values themselves are baked in. These can be different for different +iterations through a for loop (or nimble model declarations with different +indices, for example), but the sizes and data values for each specific +iteration will be "baked in" after the first call. \bold{In other words, it +is assumed that \code{x} are data and are not going to change.} } \examples{ # Set up constants and initial values for defining the model diff --git a/man/dDynOcc.Rd b/man/dDynOcc.Rd index 9dc50c7..b6ba762 100644 --- a/man/dDynOcc.Rd +++ b/man/dDynOcc.Rd @@ -210,6 +210,22 @@ If the colonization probabilities are time-dependent, one would use: probPersist = persistence_prob, probColonize = colonization_prob[1:(T-1)], p = p[1:T, 1:O])} } +\section{Notes for use with automatic differentiation}{ + + +The \code{dDynOcc_***} distributions should all work for models and +algorithms that use nimble's automatic differentiation (AD) system. In that +system, some kinds of values are "baked in" (cannot be changed) to the AD +calculations from the first call, unless and until the AD calculations are +reset. For the \code{dDynOcc_***} distributions, the lengths or dimensions of +vector and/or matrix inputs and the \code{start} and \code{end} values +themselves are baked in. These can be different for different iterations +through a for loop (or nimble model declarations with different indices, for +example), but the for each specific iteration will be "baked in" after the +first call. \bold{It is safest if one can assume that \code{x} are data and +are not going to change.} +} + \examples{ # Set up constants and initial values for defining the model x <- matrix(c(0,0,0,0, diff --git a/man/dHMM.Rd b/man/dHMM.Rd index 997ac3f..3227481 100644 --- a/man/dHMM.Rd +++ b/man/dHMM.Rd @@ -134,6 +134,21 @@ These nimbleFunctions provide distributions that can be used \code{observedStates[1:T] ~ dHMMo(initStates[1:S], observationProbs[1:S, 1:O, 1:T], transitionProbs[1:S, 1:S], 1, T)} } +\section{Notes for use with automatic differentiation}{ + + +The \code{dHMM[o]} distributions should work for models and algorithms that +use nimble's automatic differentiation (AD) system. In that system, some +kinds of values are "baked in" (cannot be changed) to the AD calculations +from the first call, unless and until the AD calculations are reset. For the +\code{dHMM[o]} distributions, the sizes of the inputs and the data (\code{x}) +values themselves are baked in. These can be different for different +iterations through a for loop (or nimble model declarations with different +indices, for example), but the sizes and data values for each specific +iteration will be "baked in" after the first call. \bold{In other words, it +is assumed that \code{x} are data and are not going to change.} +} + \examples{ # Set up constants and initial values for defining the model len <- 5 # length of dataset diff --git a/man/dNmixture.Rd b/man/dNmixture.Rd index 414f14c..76c5966 100644 --- a/man/dNmixture.Rd +++ b/man/dNmixture.Rd @@ -14,7 +14,7 @@ \alias{dNmixture_BBP_oneObs} \alias{dNmixture_BBNB_v} \alias{dNmixture_BBNB_s} -\alias{rNmixture_BBNB_oneObs} +\alias{dNmixture_BBNB_oneObs} \alias{rNmixture_BNB_v} \alias{rNmixture_BNB_s} \alias{rNmixture_BNB_oneObs} @@ -23,8 +23,8 @@ \alias{rNmixture_BBP_oneObs} \alias{rNmixture_BBNB_v} \alias{rNmixture_BBNB_s} -\alias{dNmixture_BBNB_oneObs} -\title{N-mixture distribution for use in \code{nimble} models} +\alias{rNmixture_BBNB_oneObs} +\title{dNmixture distribution for use in \code{nimble} models} \usage{ dNmixture_v(x, lambda, prob, Nmin = -1, Nmax = -1, len, log = 0) @@ -38,63 +38,45 @@ dNmixture_BNB_v(x, lambda, theta, prob, Nmin = -1, Nmax = -1, len, log = 0) dNmixture_BNB_s(x, lambda, theta, prob, Nmin = -1, Nmax = -1, len, log = 0) -dNmixture_BNB_oneObs( - x, - lambda, - theta, - prob, - Nmin = -1, - Nmax = -1, - len, - log = 0 -) +dNmixture_BNB_oneObs(x, lambda, theta, prob, Nmin = -1, Nmax = -1, log = 0) dNmixture_BBP_v(x, lambda, prob, s, Nmin = -1, Nmax = -1, len, log = 0) dNmixture_BBP_s(x, lambda, prob, s, Nmin = -1, Nmax = -1, len, log = 0) -dNmixture_BBP_oneObs(x, lambda, prob, s, Nmin = -1, Nmax = -1, len, log = 0) +dNmixture_BBP_oneObs(x, lambda, prob, s, Nmin = -1, Nmax = -1, log = 0) dNmixture_BBNB_v(x, lambda, theta, prob, s, Nmin = -1, Nmax = -1, len, log = 0) dNmixture_BBNB_s(x, lambda, theta, prob, s, Nmin = -1, Nmax = -1, len, log = 0) -dNmixture_BBNB_oneObs( - x, - lambda, - theta, - prob, - s, - Nmin = -1, - Nmax = -1, - len, - log = 0 -) +dNmixture_BBNB_oneObs(x, lambda, theta, prob, s, Nmin = -1, Nmax = -1, log = 0) rNmixture_BNB_v(n, lambda, theta, prob, Nmin = -1, Nmax = -1, len) rNmixture_BNB_s(n, lambda, theta, prob, Nmin = -1, Nmax = -1, len) -rNmixture_BNB_oneObs(n, lambda, theta, prob, Nmin = -1, Nmax = -1, len) +rNmixture_BNB_oneObs(n, lambda, theta, prob, Nmin = -1, Nmax = -1) rNmixture_BBP_v(n, lambda, prob, s, Nmin = -1, Nmax = -1, len) rNmixture_BBP_s(n, lambda, prob, s, Nmin = -1, Nmax = -1, len) -rNmixture_BBP_oneObs(n, lambda, prob, s, Nmin = -1, Nmax = -1, len) +rNmixture_BBP_oneObs(n, lambda, prob, s, Nmin = -1, Nmax = -1) rNmixture_BBNB_v(n, lambda, theta, prob, s, Nmin = -1, Nmax = -1, len) rNmixture_BBNB_s(n, lambda, theta, prob, s, Nmin = -1, Nmax = -1, len) -rNmixture_BBNB_oneObs(n, lambda, theta, prob, s, Nmin = -1, Nmax = -1, len) +rNmixture_BBNB_oneObs(n, lambda, theta, prob, s, Nmin = -1, Nmax = -1) } \arguments{ \item{x}{vector of integer counts from a series of sampling occasions.} \item{lambda}{expected value of the Poisson distribution of true abundance} -\item{prob}{detection probability (scalar for \code{dNmixture_s}, vector for \code{dNmixture_v}).} +\item{prob}{detection probability (scalar for \code{dNmixture_s}, vector for +\code{dNmixture_v}).} \item{Nmin}{minimum abundance to sum over for the mixture probability. Set to -1 to select automatically (not available for beta binomial variations; see @@ -106,20 +88,20 @@ Details).} \item{len}{The length of the x vector} -\item{log}{TRUE or 1 to return log probability. FALSE or 0 to return probability.} +\item{log}{TRUE or 1 to return log probability. FALSE or 0 to return +probability.} \item{n}{number of random draws, each returning a vector of length -\code{len}. Currently only \code{n = 1} is supported, but the -argument exists for standardization of "\code{r}" functions.} +\code{len}. Currently only \code{n = 1} is supported, but the argument +exists for standardization of "\code{r}" functions.} -\item{theta}{abundance overdispersion parameter required for negative binomial -(*NB) N-mixture models. theta is parameterized such that variance of -the negative binomial variable x is \code{lambda^2 * theta + lambda}} +\item{theta}{abundance overdispersion parameter required for negative +binomial (*NB) N-mixture models. The negative binomial is parameterized +such that variance of x is \code{lambda^2 * theta + lambda}} \item{s}{detection overdispersion parameter required for beta binomial (BB*) -N-mixture models. s is parameterized such that variance of the beta -binomial variable x is \code{V(x) = N \* prob \* (1-prob) \* (N + -s) / (s + 1)}} +N-mixture models. The beta binomial is parameterized such that variance of +x is \code{V(x) = N * prob * (1-prob) * (N + s) / (s + 1)}} } \value{ For \code{dNmixture_s} and \code{dNmixture_v}: the probability (or likelihood) or log @@ -128,7 +110,11 @@ probability of observation vector \code{x}. For \code{rNmixture_s} and \code{rNmixture_v}: a simulated detection history, \code{x}. } \description{ -\code{dNmixture_s} and \code{dNmixture_v} provide Poisson-Binomial mixture distributions of abundance ("N-mixture") for use in \code{nimble} models. Overdispersion alternatives are also provided. +\code{dNmixture_s} and \code{dNmixture_v} provide Poisson-Binomial mixture +distributions of abundance ("N-mixture") for use in \code{nimble} models. +Overdispersion alternatives using the negative binomial distribution (for +the abundance submodel) and the beta binomial distribution (for the detection +submodel) are also provided. } \details{ These nimbleFunctions provide distributions that can be @@ -159,12 +145,6 @@ These distributions allow you to replace the Poisson (P) abundance distribution with the negative binomial (NB) and the binomial (B) detection distribution with the beta binomial (BB). -\strong{NOTE: These variants should work but are considered to be in development. -Their function names, parameter names, and implementations are subject to -change. Use with caution while this message is present. Please contact the -authors on the nimble-users listserv if you have any questions. dNmixture_v -and dNmixture_s are \emph{not} considered to be in development.} - Binomial-negative binomial: BNB N-mixture models use a binomial distribution for detection and a negative binomial distribution for abundance with scalar overdispersion parameter \code{theta} (0-Inf). We parameterize such that the @@ -179,8 +159,8 @@ the site (so both x and prob are scalar). Beta-binomial-Poisson: BBP N-mixture uses a beta binomial distribution for detection probabilities and a Poisson distribution for abundance. The beta binomial distribution has scalar overdispersion parameter s (0-Inf). We -parameterize such that the variance of the beta binomial is \code{N \* prob -\* (1-prob) \* (N + s) / (s + 1)}, with greater s indicating less variance +parameterize such that the variance of the beta binomial is \code{N * prob +* (1-prob) * (N + s) / (s + 1)}, with greater s indicating less variance (greater-than-binomial relatedness between observations at the site) and s -> 0 indicating the binomial. The BBP is available in three suffixed forms: \code{dNmixture_BBP_v} is used if \code{prob} varies between observations, @@ -214,8 +194,9 @@ Otherwise one can set both to -1, in which case values for \code{Nmin} and marginal distributions of each count, using the minimum over counts of the former and the maximum over counts of the latter. -The summation over N uses the efficient method given by Meehan et al. (2020). -See Appendix B for the algorithm. +The summation over N uses the efficient method given by Meehan et al. (2020, +see Appendix B) for the basic Poisson-Binomial case, extended for the +overdispersion cases in Goldstein and de Valpine (2022). These are \code{nimbleFunction}s written in the format of user-defined distributions for NIMBLE's extension of the BUGS model language. More @@ -257,6 +238,14 @@ If the observation probabilities are visit-independent, one would use: prob[i], Nmin, Nmax, T)} } +\section{Notes for use with automatic differentiation}{ + + +The N-mixture distributions are the only ones in \code{nimbleEcology} for which +one must use different versions when AD support is needed. See +\code{\link{dNmixtureAD}}. +} + \examples{ # Set up constants and initial values for defining the model len <- 5 # length of dataset @@ -290,9 +279,13 @@ D. Turek, P. de Valpine and C. J. Paciorek. 2016. Efficient Markov chain Monte Carlo sampling for hierarchical hidden Markov models. Environmental and Ecological Statistics 23:549ā€“564. DOI 10.1007/s10651-016-0353-z -Meehan, T. D., Michel, N. L., & Rue, H. (2020). Estimating Animal Abundance +Meehan, T. D., Michel, N. L., & Rue, H. 2020. Estimating Animal Abundance with N-Mixture Models Using the Rā€”INLA Package for R. Journal of Statistical Software, 95(2). https://doi.org/10.18637/jss.v095.i02 + +Goldstein, B.R., and P. de Valpine. 2022. Comparing N-mixture Models and +GLMMs for Relative Abundance Estimation in a Citizen Science Dataset. +Scientific Reports 12: 12276. DOI:10.1038/s41598-022-16368-z } \seealso{ For occupancy models dealing with detection/nondetection data, diff --git a/man/dNmixtureAD.Rd b/man/dNmixtureAD.Rd new file mode 100644 index 0000000..2f0b77a --- /dev/null +++ b/man/dNmixtureAD.Rd @@ -0,0 +1,167 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/dNmixtureAD.R +\name{dNmixtureAD} +\alias{dNmixtureAD} +\alias{dNmixtureAD_s} +\alias{dNmixtureAD_v} +\alias{rNmixtureAD_s} +\alias{rNmixtureAD_v} +\alias{dNmixtureAD_BNB_v} +\alias{dNmixtureAD_BNB_s} +\alias{dNmixtureAD_BNB_oneObs} +\alias{dNmixtureAD_BBP_v} +\alias{dNmixtureAD_BBP_s} +\alias{dNmixtureAD_BBP_oneObs} +\alias{dNmixtureAD_BBNB_v} +\alias{dNmixtureAD_BBNB_s} +\alias{rNmixtureAD_BBNB_oneObs} +\alias{rNmixtureAD_BNB_v} +\alias{rNmixtureAD_BNB_s} +\alias{rNmixtureAD_BNB_oneObs} +\alias{rNmixtureAD_BBP_v} +\alias{rNmixtureAD_BBP_s} +\alias{rNmixtureAD_BBP_oneObs} +\alias{rNmixtureAD_BBNB_v} +\alias{rNmixtureAD_BBNB_s} +\alias{dNmixtureAD_BBNB_oneObs} +\title{N-mixture distributions with AD support for use in \code{nimble} models} +\usage{ +dNmixtureAD_v(x, lambda, prob, Nmin = -1, Nmax = -1, len, log = 0) + +dNmixtureAD_s(x, lambda, prob, Nmin = -1, Nmax = -1, len, log = 0) + +rNmixtureAD_v(n, lambda, prob, Nmin, Nmax, len) + +rNmixtureAD_s(n, lambda, prob, Nmin, Nmax, len) + +dNmixtureAD_BNB_v(x, lambda, theta, prob, Nmin = -1, Nmax = -1, len, log = 0) + +dNmixtureAD_BNB_s(x, lambda, theta, prob, Nmin = -1, Nmax = -1, len, log = 0) + +dNmixtureAD_BNB_oneObs(x, lambda, theta, prob, Nmin = -1, Nmax = -1, log = 0) + +rNmixtureAD_BNB_oneObs(n, lambda, theta, prob, Nmin = -1, Nmax = -1) + +dNmixtureAD_BBP_v(x, lambda, prob, s, Nmin = -1, Nmax = -1, len, log = 0) + +dNmixtureAD_BBP_s(x, lambda, prob, s, Nmin = -1, Nmax = -1, len, log = 0) + +dNmixtureAD_BBP_oneObs(x, lambda, prob, s, Nmin = -1, Nmax = -1, log = 0) + +dNmixtureAD_BBNB_v( + x, + lambda, + theta, + prob, + s, + Nmin = -1, + Nmax = -1, + len, + log = 0 +) + +dNmixtureAD_BBNB_s( + x, + lambda, + theta, + prob, + s, + Nmin = -1, + Nmax = -1, + len, + log = 0 +) + +dNmixtureAD_BBNB_oneObs( + x, + lambda, + theta, + prob, + s, + Nmin = -1, + Nmax = -1, + log = 0 +) + +rNmixtureAD_BNB_v(n, lambda, theta, prob, Nmin = -1, Nmax = -1, len) + +rNmixtureAD_BNB_s(n, lambda, theta, prob, Nmin = -1, Nmax = -1, len) + +rNmixtureAD_BNB_oneObs(n, lambda, theta, prob, Nmin = -1, Nmax = -1) + +rNmixtureAD_BBP_v(n, lambda, prob, s, Nmin = -1, Nmax = -1, len) + +rNmixtureAD_BBP_s(n, lambda, prob, s, Nmin = -1, Nmax = -1, len) + +rNmixtureAD_BBP_oneObs(n, lambda, prob, s, Nmin = -1, Nmax = -1) + +rNmixtureAD_BBNB_v(n, lambda, theta, prob, s, Nmin = -1, Nmax = -1, len) + +rNmixtureAD_BBNB_s(n, lambda, theta, prob, s, Nmin = -1, Nmax = -1, len) + +rNmixtureAD_BBNB_oneObs(n, lambda, theta, prob, s, Nmin = -1, Nmax = -1) +} +\arguments{ +\item{x}{vector of integer counts from a series of sampling occasions.} + +\item{lambda}{expected value of the Poisson distribution of true abundance} + +\item{prob}{detection probability (scalar for \code{dNmixture_s}, vector for \code{dNmixture_v}).} + +\item{Nmin}{minimum abundance to sum over for the mixture probability. Must be provided.} + +\item{Nmax}{maximum abundance to sum over for the mixture probability. Must be provided.} + +\item{len}{The length of the x vector} + +\item{log}{TRUE or 1 to return log probability. FALSE or 0 to return probability.} + +\item{n}{number of random draws, each returning a vector of length +\code{len}. Currently only \code{n = 1} is supported, but the +argument exists for standardization of "\code{r}" functions.} + +\item{theta}{abundance overdispersion parameter required for negative binomial +(*NB) N-mixture models. theta is parameterized such that variance of +the negative binomial variable x is \code{lambda^2 * theta + lambda}} + +\item{s}{detection overdispersion parameter required for beta binomial (BB*) +N-mixture models. s is parameterized such that variance of the beta +binomial variable x is \code{V(x) = N \* prob \* (1-prob) \* (N + +s) / (s + 1)}} +} +\value{ +The probability (or likelihood) or log probability of an observation + vector \code{x}. +} +\description{ +\code{dNmixtureAD_s} and \code{dNmixtureAD_v} provide Poisson-Binomial +mixture distributions of abundance ("N-mixture") for use in \code{nimble} +models when automatic differentiation may be needed by an algorithm. +Overdispersion alternatives are also provided. +} +\details{ +These nimbleFunctions provide distributions that can be + used directly in R or in \code{nimble} hierarchical models (via + \code{\link[nimble]{nimbleCode}} and + \code{\link[nimble]{nimbleModel}}). + +See \code{\link{dNmixture}} for more information about the N-mixture +distributions. + +The versions here can be used in models that will be used by algorithms that +use nimble's system for automatic differentiation (AD). The primary +difference is that \code{Nmin} and \code{Nmax} must be provided. There are no +automatic defaults for these. + +In the AD system some kinds of values are "baked in" (cannot be changed) to +the AD calculations from the first call, unless and until the AD calculations +are reset. For all variants of the \code{dNmixtureAD} distributions, the +sizes of the inputs as well as \code{Nmin} and \code{Nmax} are baked in. +These can be different for different iterations through a for loop (or nimble +model declarations with different indices, for example), but the sizes and +\code{Nmin} and \code{Nmax} values for each specific iteration will be +"baked in" after the first call. +} +\author{ +Ben Goldstein, Lauren Ponisio, and Perry de Valpine +} diff --git a/man/dNmixture_steps.Rd b/man/dNmixture_steps.Rd new file mode 100644 index 0000000..cf8e04f --- /dev/null +++ b/man/dNmixture_steps.Rd @@ -0,0 +1,100 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/utils.R +\name{dNmixture_steps} +\alias{dNmixture_steps} +\alias{nimNmixPois_logFac} +\alias{dNmixture_BNB_steps} +\alias{dNmixture_BBP_steps} +\alias{dNmixture_BBNB_steps} +\title{Internal helper nimbleFunctions for dNmixture distributions} +\usage{ +nimNmixPois_logFac(numN, ff, max_index = -1) + +dNmixture_steps( + x, + lambda, + Nmin, + Nmax, + sum_log_one_m_prob, + sum_log_dbinom, + usingAD = FALSE +) + +dNmixture_BNB_steps( + x, + lambda, + theta, + Nmin, + Nmax, + sum_log_one_m_prob, + sum_log_dbinom, + usingAD = FALSE +) + +dNmixture_BBP_steps( + x, + beta_m_x, + lambda, + s, + Nmin, + Nmax, + sum_log_dbetabinom, + usingAD = FALSE +) + +dNmixture_BBNB_steps( + x, + beta_m_x, + lambda, + theta, + s, + Nmin, + Nmax, + sum_log_dbetabinom, + usingAD = FALSE +) +} +\arguments{ +\item{numN}{number of indices in the truncated sum for the N-mixture.} + +\item{ff}{a derived vector of units calculated partway through the fast +N-mixture algorithm.} + +\item{max_index}{possibly the index of the max contribution to the summation. +For AD cases this is set by heuristic. For non-AD cases it is -1 and will +be determined automatically.} + +\item{x}{x from dNmixture distributions} + +\item{lambda}{lambda from dNmixture distributions} + +\item{Nmin}{start of summation over N} + +\item{Nmax}{end of summation over N} + +\item{sum_log_one_m_prob}{sum(log(1-prob)) from relevant dNmixture cases} + +\item{sum_log_dbinom}{sum(log(dbinom(...))) from relevant dNmixture cases} + +\item{usingAD}{TRUE if called from one of the dNmixtureAD distributions} + +\item{theta}{theta from relevant dNmixture distributions} + +\item{beta_m_x}{beta-x from relevant dNmixture cases} + +\item{s}{s from relevant dNmixture distributions} + +\item{sum_log_dbetabinom}{sum(log(dBetaBinom(...))) from relevant dNmixture +cases} +} +\description{ +None of these functions should be called directly. +} +\details{ +These are helper functions for the N-mixture calculations. They + don't have an interpretation outside of that context and are not intended + to be called directly. +} +\seealso{ +\code{\link{dNmixture}} +} diff --git a/man/dOcc.Rd b/man/dOcc.Rd index dcb50eb..6c92ddf 100644 --- a/man/dOcc.Rd +++ b/man/dOcc.Rd @@ -104,6 +104,20 @@ If the detection probabilities are time-dependent, use: \code{detections[i, 1:T] ~ dOcc_v(occupancyProbability, detectionProbability[1:T], len = T)} } +\section{Notes for use with automatic differentiation}{ + + +The \code{dOcc_*} distributions should all work for models and algorithms +that use nimble's automatic differentiation (AD) system. In that system, some +kinds of values are "baked in" (cannot be changed) to the AD calculations +from the first call, unless and until the AD calculations are reset. For the +\code{dOcc_*} distributions, the lengths of vector inputs are baked in. These +can be different for different iterations through a for loop (or nimble model +declarations with different indices, for example), but the lengths for each +specific iteration will be "baked in" after the first call. \bold{It is +safest if one can assume that \code{x} are data and are not going to change.} +} + \examples{ # Set up constants and initial values for defining the model dat <- c(1,1,0,0) # A vector of observations diff --git a/man/nimNmixPois_logFac.Rd b/man/nimNmixPois_logFac.Rd deleted file mode 100644 index 3f5f9d1..0000000 --- a/man/nimNmixPois_logFac.Rd +++ /dev/null @@ -1,26 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/utils.R -\name{nimNmixPois_logFac} -\alias{nimNmixPois_logFac} -\title{Helper function for fast N-mixture calculation} -\usage{ -nimNmixPois_logFac(numN, ff) -} -\arguments{ -\item{numN}{first argument for helper function nimNmixPois_logFac, -representing the number of indices in the truncated sum for the N-mixture.} - -\item{ff}{second argument for helper function nimNmixPois_logFac, a derived -vector of units calculated partway through the fast N-mixture algorithm.} -} -\description{ -Helper function for fast N-mixture calculation -} -\details{ -This is a helper function for the fast N-mixture calculation. It - runs an iterative calculation present in all N-mixture varieties. It - doesn't have an interpretation outside of that context. -} -\seealso{ -\code{\link{dNmixture}} -} diff --git a/run_tests.R b/run_tests.R index 76c02c2..e148a0e 100644 --- a/run_tests.R +++ b/run_tests.R @@ -65,6 +65,7 @@ runTest <- function(test, logToFile = FALSE, runViaTestthat = TRUE) { name <- gsub('test-(.*)\\.R', '\\1', test) script <- paste0('library(methods);', 'library(testthat);', + # 'devtools::install_github("nimble-dev/nimble/packages/nimble", ref = "ADoak");', 'library(nimble);', 'library(nimbleEcology);', 'tryCatch(test_package("nimbleEcology", "^', name, '$",', diff --git a/tests/testthat/.DS_Store b/tests/testthat/.DS_Store new file mode 100644 index 0000000..5008ddf Binary files /dev/null and b/tests/testthat/.DS_Store differ diff --git a/tests/testthat/test-AD.R b/tests/testthat/test-AD.R new file mode 100644 index 0000000..3089fc6 --- /dev/null +++ b/tests/testthat/test-AD.R @@ -0,0 +1,1836 @@ +# Testing examples: + +# install nimble from CRAN: +#devtools::install_cran("nimble", force = TRUE) +# devtools::install_github("nimble-dev/nimble", ref = "devel", subdir = "packages/nimble") +# install nimbleEcology from branch Nmixture-AD: devtools::install_github("nimble-dev/nimbleEcology", ref = "Nmixture-AD") + +# load nimble's testing tools +#library(nimble) +library(nimbleEcology) +source(system.file('test_utils.R', package = 'nimbleEcology')) +source(system.file('AD_test_utils.R', package = 'nimbleEcology')) +# source("../nimble/packages/nimble/tests/testthat/test_utils.R") +# source("../nimble/packages/nimble/tests/testthat/AD_test_utils.R") + +EDopt <- nimbleOptions("enableDerivs") +BMDopt <- nimbleOptions("buildModelDerivs") +nimbleOptions(enableDerivs = TRUE) +nimbleOptions(buildModelDerivs = TRUE) + +test_that("dOcc works with AD", +{ +##################### +#### dOcc_s case #### + + dat <- c(1,1,0,0) # A vector of observations + probOcc <- 0.6 + probDetect <- 0.4 + + nc <- nimbleCode({ + x[1:4] ~ dOcc_s(probOcc, probDetect, len = 4) + probOcc ~ dunif(0,1) + probDetect ~ dunif(0,1) + }) + + Rmodel <- nimbleModel(nc, data = list(x = dat), + inits = list(probOcc = probOcc, + probDetect = probDetect), + buildDerivs=TRUE) + + Cmodel <- compileNimble(Rmodel) + + nodesList_case1 <- + setup_update_and_constant_nodes_for_tests(Rmodel, c('probOcc', 'probDetect')) + v1_case1 <- list(arg1 = c(0.6, 0.4)) # taping values for probOcc and probDetect + v2_case1 <- list(arg1 = c(0.65, 0.35)) # testing values for probOcc and probDetect + RCrelTol = c(1e-15, 1e-8, 1e-3, 1e-14) + + res <- model_calculate_test_case(Rmodel, Cmodel, + model_calculate_test, nodesList_case1, + v1_case1, v2_case1, + 0:2) # lots of output numbers with no warning messages means it passes. + +##################### +#### dOcc_v case #### + + x <- c(1,0,1,1,0) + probOcc <- 0.4 + probDetect <- c(0.7, 0.3, 0.5, 0.7, 0.25) + + probOcc2 <- 0.5 + probDetect2 <- c(0.77, 0.39, 0.52, 0.78, 0.32) + + + nc <- nimbleCode({ + x[1:5] ~ dOcc_v(probOcc, probDetect[1:5], len = 5) + for (i in 1:5) { + probDetect[i] ~ dunif(0,1) + } + probOcc ~ dunif(0,1) + }) + Rmodel <- nimbleModel(nc, data = list(x = x), + inits = list(probOcc = probOcc, + probDetect = probDetect), + buildDerivs=TRUE) + Rmodel$calculate() + + Cmodel <- compileNimble(Rmodel) + + nodesList_case1 <- setup_update_and_constant_nodes_for_tests(Rmodel, + c('probOcc', Rmodel$expandNodeNames('probDetect[1:5]'))) + v1_case1 <- list(arg1 = c(probOcc, probDetect)) # taping values for probOcc and probDetect + v2_case1 <- list(arg1 = c(probOcc2, probDetect2)) # testing values for probOcc and probDetect + + res <- model_calculate_test_case(Rmodel, Cmodel, + model_calculate_test, nodesList_case1, + v1_case1, v2_case1, + 0:2) +}) + +test_that ("dNmixture works with AD", { +########################## +#### dNmixture_s case #### + + x <- c(7, 7, 6, 9, 10) + lambda <- 15 + prob <- 0.7 + + lambda2 <- 18 + prob2 <- 0.5 + + + nc <- nimbleCode({ + x[1:5] ~ dNmixtureAD_s(lambda, prob, + Nmin = 0, Nmax = 100, len = 5) + prob ~ dunif(0, 1) + lambda ~ dunif(0, 100) + }) + + Rmodel <- nimbleModel(nc, data = list(x = x), + inits = list(prob = prob, + lambda = lambda), + buildDerivs=TRUE) + Rmodel$calculate() + + Cmodel <- compileNimble(Rmodel) + Cmodel$calculate() + + nodesList_case1 <- setup_update_and_constant_nodes_for_tests(Rmodel, c('prob', 'lambda')) + v1_case1 <- list(arg1 = c(prob, lambda)) # taping values for prob and lambda + v2_case1 <- list(arg1 = c(prob2, lambda2)) # testing values for prob and lambda + res <- model_calculate_test_case(Rmodel, Cmodel, + model_calculate_test, nodesList_case1, + v1_case1, v2_case1, + 0:2, RCrelTol = c(2e-15, 1e-8, 1e-3, 1e-14)) + +########################## +#### dNmixture_BNB_s case #### + + x <- c(7, 7, 6, 9, 10) + lambda <- 15 + prob <- 0.7 + theta <- 1.1 + + lambda2 <- 18 + prob2 <- 0.5 + theta2 <- 1.2 + + nc <- nimbleCode({ + x[1:5] ~ dNmixtureAD_BNB_s(lambda, prob, theta = theta, + Nmin = 0, Nmax = 100, len = 5) + prob ~ dunif(0, 1) + lambda ~ dunif(0, 100) + }) + + Rmodel <- nimbleModel(nc, data = list(x = x), + inits = list(prob = prob, + lambda = lambda, + theta = theta), + buildDerivs=TRUE) + Rmodel$calculate() + + Cmodel <- compileNimble(Rmodel) + Cmodel$calculate() + + nodesList_case1 <- setup_update_and_constant_nodes_for_tests(Rmodel, c('prob', 'lambda', 'theta')) + v1_case1 <- list(arg1 = c(prob, lambda, theta)) # taping values for prob and lambda + v2_case1 <- list(arg1 = c(prob2, lambda2, theta2)) # testing values for prob and lambda + + res <- model_calculate_test_case(Rmodel, Cmodel, + model_calculate_test, nodesList_case1, + v1_case1, v2_case1, + 0:2, RCrelTol = c(2e-15, 1e-8, 1e-3, 1e-14)) + +############################## +#### dNmixture_BBP_s case #### + + x <- c(7, 7, 6, 9, 10) + lambda <- 15 + prob <- 0.7 + s <- 0.93 + + lambda2 <- 18 + prob2 <- 0.5 + s2 <- 1.111 + + nc <- nimbleCode({ + x[1:5] ~ dNmixtureAD_BBP_s(lambda, prob, s = s, + Nmin = 0, Nmax = 100, len = 5) + prob ~ dunif(0, 1) + lambda ~ dunif(0, 100) + }) + + Rmodel <- nimbleModel(nc, data = list(x = x), + inits = list(prob = prob, + lambda = lambda, + s = s), + buildDerivs = TRUE) + Rmodel$calculate() + + Cmodel <- compileNimble(Rmodel) + Cmodel$calculate() + + nodesList_case1 <- setup_update_and_constant_nodes_for_tests(Rmodel, c('prob', 'lambda', 's')) + v1_case1 <- list(arg1 = c(prob, lambda, s)) # taping values for prob and lambda + v2_case1 <- list(arg1 = c(prob2, lambda2, s2)) # testing values for prob and lambda + + res <- model_calculate_test_case(Rmodel, Cmodel, + model_calculate_test, nodesList_case1, + v1_case1, v2_case1, + 0:2, RCrelTol = c(2e-15, 1e-8, 1e-3, 1e-14)) + +############################## +#### dNmixture_BBNB_s case #### + + x <- c(7, 7, 6, 9, 10) + lambda <- 15 + prob <- 0.7 + theta <- 1.1 + s <- 0.93 + + lambda2 <- 18 + prob2 <- 0.5 + theta2 <- 1.2 + s2 <- 1.111 + + nc <- nimbleCode({ + x[1:5] ~ dNmixtureAD_BBNB_s(lambda, prob, theta = theta, s = s, + Nmin = 0, Nmax = 100, len = 5) + prob ~ dunif(0, 1) + lambda ~ dunif(0, 100) + }) + + Rmodel <- nimbleModel(nc, data = list(x = x), + inits = list(prob = prob, + lambda = lambda, + theta = theta, s = s), + buildDerivs=TRUE) + Rmodel$calculate() + + Cmodel <- compileNimble(Rmodel) + Cmodel$calculate() + + nodesList_case1 <- setup_update_and_constant_nodes_for_tests(Rmodel, c('prob', 'lambda', 'theta', 's')) + v1_case1 <- list(arg1 = c(prob, lambda, theta, s)) # taping values for prob and lambda + v2_case1 <- list(arg1 = c(prob2, lambda2, theta2, s2)) # testing values for prob and lambda + + res <- model_calculate_test_case(Rmodel, Cmodel, + model_calculate_test, nodesList_case1, + v1_case1, v2_case1, + 0:2, RCrelTol = c(2e-15, 1e-8, 1e-3, 1e-14)) + +########################## +#### dNmixture_v case #### + + x <- c(7, 7, 6, 9, 10) + lambda <- 15 + prob <- c(0.6, 0.6, 0.4, 0.9, 0.8) + + lambda2 <- 18 + prob2 <- c(0.65, 0.65, 0.45, 0.95, 0.85) + + + nc <- nimbleCode({ + x[1:5] ~ dNmixtureAD_v(lambda, prob[1:5], + Nmin = 0, Nmax = 100, len = 5) + for (i in 1:5) { + prob[i] ~ dunif(0, 1) + } + lambda ~ dunif(0, 100) + }) + + Rmodel <- nimbleModel(nc, data = list(x = x), + inits = list(prob = prob, + lambda = lambda), + buildDerivs=TRUE) + Rmodel$calculate() + + Cmodel <- compileNimble(Rmodel) + Cmodel$calculate() + + nodesList_case1 <- setup_update_and_constant_nodes_for_tests(Rmodel, c('prob', 'lambda')) + v1_case1 <- list(arg1 = c(prob, lambda)) # taping values for prob and lambda + v2_case1 <- list(arg1 = c(prob2, lambda2)) # testing values for prob and lambda + + res <- model_calculate_test_case(Rmodel, Cmodel, + model_calculate_test, nodesList_case1, + v1_case1, v2_case1, + 0:2, RCrelTol = c(2e-15, 1e-8, 1e-3, 1e-14)) + +############################## +#### dNmixture_BNB_v case #### + + x <- c(7, 7, 6, 9, 10) + lambda <- 15 + prob <- c(0.6, 0.6, 0.4, 0.9, 0.8) + theta = 1.1 + + + lambda2 <- 18 + prob2 <- c(0.65, 0.65, 0.45, 0.95, 0.85) + theta2 <- 1.2 + + + nc <- nimbleCode({ + x[1:5] ~ dNmixtureAD_BNB_v(lambda, prob[1:5], theta = theta, + Nmin = 0, Nmax = 100, len = 5) + for (i in 1:5) { + prob[i] ~ dunif(0, 1) + } + lambda ~ dunif(0, 100) + }) + + Rmodel <- nimbleModel(nc, data = list(x = x), + inits = list(prob = prob, + lambda = lambda, + theta = theta), + buildDerivs=TRUE) + Rmodel$calculate() + + Cmodel <- compileNimble(Rmodel) + Cmodel$calculate() + + nodesList_case1 <- setup_update_and_constant_nodes_for_tests(Rmodel, c('prob', 'lambda', 'theta')) + v1_case1 <- list(arg1 = c(prob, lambda, theta)) # taping values for prob and lambda + v2_case1 <- list(arg1 = c(prob2, lambda2, theta2)) # testing values for prob and lambda + + res <- model_calculate_test_case(Rmodel, Cmodel, + model_calculate_test, nodesList_case1, + v1_case1, v2_case1, + 0:2, RCrelTol = c(2e-15, 1e-8, 1e-3, 1e-14)) + +############################## +#### dNmixture_BBP_v case #### + + x <- c(7, 7, 6, 9, 10) + lambda <- 15 + prob <- c(0.6, 0.6, 0.4, 0.9, 0.8) + s <- 0.9 + + + lambda2 <- 18 + prob2 <- c(0.65, 0.65, 0.45, 0.95, 0.85) + s2 <- 1.2 + + + nc <- nimbleCode({ + x[1:5] ~ dNmixtureAD_BBP_v(lambda, prob[1:5], s = s, + Nmin = 0, Nmax = 100, len = 5) + for (i in 1:5) { + prob[i] ~ dunif(0, 1) + } + lambda ~ dunif(0, 100) + }) + + Rmodel <- nimbleModel(nc, data = list(x = x), + inits = list(prob = prob, + lambda = lambda, + s = s), + buildDerivs=TRUE) + Rmodel$calculate() + + Cmodel <- compileNimble(Rmodel) + Cmodel$calculate() + + nodesList_case1 <- setup_update_and_constant_nodes_for_tests(Rmodel, c('prob', 'lambda', 's')) + v1_case1 <- list(arg1 = c(prob, lambda, s)) # taping values for prob and lambda + v2_case1 <- list(arg1 = c(prob2, lambda2, s2)) # testing values for prob and lambda + + res <- model_calculate_test_case(Rmodel, Cmodel, + model_calculate_test, nodesList_case1, + v1_case1, v2_case1, + 0:2, RCrelTol = c(2e-15, 1e-8, 1e-3, 1e-14)) + +############################## +#### dNmixture_BBNB_v case #### + + x <- c(7, 7, 6, 9, 10) + lambda <- 15 + prob <- c(0.6, 0.6, 0.4, 0.9, 0.8) + theta <- 1.1 + s <- 0.9 + + + lambda2 <- 18 + prob2 <- c(0.65, 0.65, 0.45, 0.95, 0.85) + theta2 <- 1.2 + s2 <- 1.2 + + + nc <- nimbleCode({ + x[1:5] ~ dNmixtureAD_BBNB_v(lambda, prob[1:5], theta = theta, s = s, + Nmin = 0, Nmax = 100, len = 5) + for (i in 1:5) { + prob[i] ~ dunif(0, 1) + } + lambda ~ dunif(0, 100) + }) + + Rmodel <- nimbleModel(nc, data = list(x = x), + inits = list(prob = prob, + lambda = lambda, + theta = theta, s = s), + buildDerivs=TRUE) + Rmodel$calculate() + + Cmodel <- compileNimble(Rmodel) + Cmodel$calculate() + + nodesList_case1 <- setup_update_and_constant_nodes_for_tests(Rmodel, c('prob', 'lambda', 'theta', 's')) + v1_case1 <- list(arg1 = c(prob, lambda, theta, s)) # taping values for prob and lambda + v2_case1 <- list(arg1 = c(prob2, lambda2, theta2, s2)) # testing values for prob and lambda + + res <- model_calculate_test_case(Rmodel, Cmodel, + model_calculate_test, nodesList_case1, + v1_case1, v2_case1, + 0:2, RCrelTol = c(2e-15, 1e-8, 1e-3, 1e-14)) + +########################## +#### dNmixture_BNB_oneObs case #### + + x <- 8 + lambda <- 15 + prob <- 0.7 + theta <- 1.1 + + lambda2 <- 18 + prob2 <- 0.66 + theta2 <- 1.4 + + nc <- nimbleCode({ + x ~ dNmixtureAD_BNB_oneObs(lambda, prob, theta = theta, + Nmin = 0, Nmax = 100) + prob ~ dunif(0, 1) + lambda ~ dunif(0, 100) + }) + + Rmodel <- nimbleModel(nc, data = list(x = x), + inits = list(prob = prob, theta = theta, + lambda = lambda), + buildDerivs=TRUE) + Rmodel$calculate() + + Cmodel <- compileNimble(Rmodel) + Cmodel$calculate() + + nodesList_case1 <- setup_update_and_constant_nodes_for_tests(Rmodel, c('prob', 'lambda', 'theta')) + v1_case1 <- list(arg1 = c(prob, lambda, theta)) # taping values for prob and lambda + v2_case1 <- list(arg1 = c(prob2, lambda2, theta2)) # testing values for prob and lambda + + res <- model_calculate_test_case(Rmodel, Cmodel, + model_calculate_test, nodesList_case1, + v1_case1, v2_case1, + 0:2, RCrelTol = c(2e-15, 1e-8, 1e-3, 1e-14)) + +########################## +#### dNmixture_BBP_oneObs case #### + + x <- 8 + lambda <- 15 + prob <- 0.7 + s <- 0.8 + + lambda2 <- 18 + prob2 <- 0.66 + s <- 1.1 + + nc <- nimbleCode({ + x ~ dNmixtureAD_BBP_oneObs(lambda, prob, s = s, + Nmin = 0, Nmax = 100) + prob ~ dunif(0, 1) + lambda ~ dunif(0, 100) + }) + + Rmodel <- nimbleModel(nc, data = list(x = x), + inits = list(prob = prob, s=s, + lambda = lambda), + buildDerivs=TRUE) + Rmodel$calculate() + + Cmodel <- compileNimble(Rmodel) + Cmodel$calculate() + + nodesList_case1 <- setup_update_and_constant_nodes_for_tests(Rmodel, c('prob', 'lambda', 's')) + v1_case1 <- list(arg1 = c(prob, lambda, s2)) # taping values for prob and lambda + v2_case1 <- list(arg1 = c(prob2, lambda2, s2)) # testing values for prob and lambda + + res <- model_calculate_test_case(Rmodel, Cmodel, + model_calculate_test, nodesList_case1, + v1_case1, v2_case1, + 0:2, RCrelTol = c(2e-15, 1e-8, 1e-3, 1e-14)) + +########################## +#### dNmixture_BBNB_oneObs case #### + + x <- 8 + lambda <- 15 + prob <- 0.7 + theta <- 1.1 + s <- 0.8 + + lambda2 <- 18 + prob2 <- 0.66 + theta2 <- 1.4 + s <- 1.1 + + nc <- nimbleCode({ + x ~ dNmixtureAD_BBNB_oneObs(lambda, prob, theta = theta, s = s, + Nmin = 0, Nmax = 100) + prob ~ dunif(0, 1) + lambda ~ dunif(0, 100) + }) + + Rmodel <- nimbleModel(nc, data = list(x = x), + inits = list(prob = prob, theta = theta, s=s, + lambda = lambda), + buildDerivs=TRUE) + Rmodel$calculate() + + Cmodel <- compileNimble(Rmodel) + Cmodel$calculate() + + nodesList_case1 <- setup_update_and_constant_nodes_for_tests(Rmodel, c('prob', 'lambda', 'theta', 's')) + v1_case1 <- list(arg1 = c(prob, lambda, theta, s2)) # taping values for prob and lambda + v2_case1 <- list(arg1 = c(prob2, lambda2, theta2, s2)) # testing values for prob and lambda + + res <- model_calculate_test_case(Rmodel, Cmodel, + model_calculate_test, nodesList_case1, + v1_case1, v2_case1, + 0:2, RCrelTol = c(2e-15, 1e-8, 1e-3, 1e-14)) +}) + + +test_that("dCJS works with AD", { +###################### +#### dCJS_ss case #### + + x <- c(1, 0, 1, 0, 0, 0) + probSurvive <- 0.8 + probCapture <- 0.4 + + probSurvive2 <- 0.7 + probCapture2 <- 0.2 + + + nc <- nimbleCode({ + x[1:6] ~ dCJS_ss(probSurvive, probCapture, len = 6) + probSurvive ~ dunif(0, 1) + probCapture ~ dunif(0, 1) + }) + Rmodel <- nimbleModel(nc, data = list(x = x), + inits = list(probSurvive = probSurvive, + probCapture = probCapture), + buildDerivs=TRUE) + Rmodel$calculate() + + Cmodel <- compileNimble(Rmodel) + Cmodel$calculate() + + nodesList_case1 <- setup_update_and_constant_nodes_for_tests(Rmodel, c('probSurvive', 'probCapture')) + v1_case1 <- list(arg1 = c(probSurvive, probCapture)) # taping values for prob and lambda + v2_case1 <- list(arg1 = c(probSurvive2, probCapture2)) # testing values for prob and lambda + + res <- model_calculate_test_case(Rmodel, Cmodel, + model_calculate_test, nodesList_case1, + v1_case1, v2_case1, + 0:2) + + +###################### +#### dCJS_sv case #### + + x <- c(1, 0, 1, 0, 0, 0) + probSurvive <- 0.8 + probCapture <- c(1, 0.5, 0.5, 0.4, 0.3, 0.4) + + probSurvive2 <- 0.7 + probCapture2 <- c(1, 0.6, 0.7, 0.4, 0.2, 0.2) + + + nc <- nimbleCode({ + x[1:6] ~ dCJS_sv(probSurvive, probCapture[1:6], len = 6) + for (i in 1:6) { + probCapture[i] ~ dunif(0, 1) + } + probSurvive ~ dunif(0, 1) + + }) + Rmodel <- nimbleModel(nc, data = list(x = x), + inits = list(probSurvive = probSurvive, + probCapture = probCapture), + buildDerivs=TRUE) + Rmodel$calculate() + + Cmodel <- compileNimble(Rmodel) + Cmodel$calculate() + + nodesList_case1 <- setup_update_and_constant_nodes_for_tests(Rmodel, c('probSurvive', Rmodel$expandNodeNames('probCapture[2:6]'))) + v1_case1 <- list(arg1 = c(probSurvive, probCapture[2:6])) # taping values for prob and lambda + v2_case1 <- list(arg1 = c(probSurvive2, probCapture2[2:6])) # testing values for prob and lambda + + res <- model_calculate_test_case(Rmodel, Cmodel, deriv_nf = model_calculate_test, + nodesList = nodesList_case1, v1 = v1_case1, v2 = v2_case1, + order = 0:2) + + +###################### +#### dCJS_vs case #### + + x <- c(1, 0, 1, 0, 0, 0) + probSurvive <- c(0.8, 0.5, 0.3, 0.9, 0.9) + probCapture <- 0.5 + + probSurvive2 <- c(0.7, 0.55, 0.32, 0.8, 0.1) + probCapture2 <- 0.7 + + + nc <- nimbleCode({ + x[1:6] ~ dCJS_vs(probSurvive[1:5], probCapture, len = 6) + for (i in 1:5) { + probSurvive[i] ~ dunif(0, 1) + } + probCapture ~ dunif(0, 1) + + }) + Rmodel <- nimbleModel(nc, data = list(x = x), + inits = list(probSurvive = probSurvive, + probCapture = probCapture), + buildDerivs=TRUE) + Rmodel$calculate() + + Cmodel <- compileNimble(Rmodel) + Cmodel$calculate() + + nodesList_case1 <- setup_update_and_constant_nodes_for_tests(Rmodel, c(Rmodel$expandNodeNames('probSurvive[1:5]'), 'probCapture')) + v1_case1 <- list(arg1 = c(probSurvive, probCapture)) # taping values for prob and lambda + v2_case1 <- list(arg1 = c(probSurvive2, probCapture2)) # testing values for prob and lambda + + res <- model_calculate_test_case(Rmodel, Cmodel, deriv_nf = model_calculate_test, + nodesList = nodesList_case1, v1 = v1_case1, v2 = v2_case1, + order = 0:2) + +###################### +#### dCJS_vv case #### + + x <- c(1, 0, 1, 0, 0, 0) + probSurvive <- c(0.8, 0.5, 0.3, 0.9, 0.9) + probCapture <- c(1, 0.5, 0.5, 0.4, 0.3, 0.4) + + probSurvive2 <- c(0.7, 0.55, 0.32, 0.8, 0.1) + probCapture2 <- c(-10, 0.6, 0.7, 0.4, 0.2, 0.2) + + + nc <- nimbleCode({ + x[1:6] ~ dCJS_vv(probSurvive[1:5], probCapture[1:6], len = 6) + for (i in 1:5) { + probSurvive[i] ~ dunif(0, 1) + } + for (i in 1:6) { + probCapture[i] ~ dunif(0, 1) + } + + }) + Rmodel <- nimbleModel(nc, data = list(x = x), + inits = list(probSurvive = probSurvive, + probCapture = probCapture), + buildDerivs=TRUE) + Rmodel$calculate() + + Cmodel <- compileNimble(Rmodel) + Cmodel$calculate() + + nodesList_case1 <- setup_update_and_constant_nodes_for_tests(Rmodel, c(Rmodel$expandNodeNames('probSurvive[1:5]'), + Rmodel$expandNodeNames('probCapture[2:6]'))) + v1_case1 <- list(arg1 = c(probSurvive, probCapture[2:6])) # taping values for prob and lambda + v2_case1 <- list(arg1 = c(probSurvive2, probCapture2[2:6])) # testing values for prob and lambda + # v1_case1 <- list(arg1 = c(probSurvive, probCapture)) # taping values for prob and lambda + # v2_case1 <- list(arg1 = c(probSurvive2, probCapture2)) # testing values for prob and lambda + + res <- model_calculate_test_case(Rmodel, Cmodel, deriv_nf = model_calculate_test, + nodesList = nodesList_case1, v1 = v1_case1, v2 = v2_case1, + order = 0:2) +}) + +test_that("dHMM works with AD", { + ###################### + #### dHMM case #### + + x <- c(1, 1, 1, 2, 2) + + init <- c(0.4, 0.2, 0.4) + probObs <- t(array( + c(0.9, 0.1, + 0.1, 0.9, + 0.8, 0.2), + c(2, 3))) + + probTrans <- t(array( + c(0.3, 0.4, 0.2, + 0.1, 0.1, 0.8, + 0.05, 0.05, 0.9), + c(3,3))) + + init2 <- c(0.6, 0.1, 0.3) + probObs2 <- t(array( + c(0.9, 0.1, + 0.1, 0.9, + 0.7, 0.3), + c(2, 3))) + + probTrans2 <- t(array( + c(0.4, 0.4, 0.2, + 0.05, 0.25, 0.7, + 0.05, 0.15, 0.8), + c(3,3))) + + nc <- nimbleCode({ + x[1:5] ~ dHMM(init[1:3], probObs = probObs[1:3,1:2], + probTrans = probTrans[1:3, 1:3], len = 5, checkRowSums = 0) + for (i in 1:2) { + init[i] ~ dunif(0, 1) + } + init[3] <- 1 - init[1] - init[2] + + for (i in 1:3) { + probObs[i, 1] ~ dunif(0, 1) + probObs[i, 2] <- 1 - probObs[i, 1] + probTrans[i, 1] ~ dunif(0, 1) + probTrans[i, 2] ~ dunif(0, 1 - probTrans[i, 1]) + probTrans[i, 3] <- 1 - probTrans[i, 1] - probTrans[i, 2] + } + + }) + Rmodel <- nimbleModel(nc, data = list(x = x), + inits = list( + init = init, + probObs = probObs, + probTrans = probTrans + ), + buildDerivs=TRUE) + Rmodel$calculate() + + Cmodel <- compileNimble(Rmodel) + Cmodel$calculate() + + nodesList_case1 <- setup_update_and_constant_nodes_for_tests(Rmodel, c(Rmodel$expandNodeNames('init[1:3]'), + Rmodel$expandNodeNames('probObs[1:3, 1:2]'), + Rmodel$expandNodeNames('probTrans[1:3, 1:3]') + )) + v1_case1 <- list(arg1 = c(init[1:3], probObs[1:3, 1:2], probTrans[1:3, 1:3])) + v2_case1 <- list(arg1 = c(init2[1:3], probObs2[1:3, 1:2], probTrans2[1:3, 1:3])) + + stuff <- model_calculate_test_case(Rmodel, Cmodel, deriv_nf = model_calculate_test, + nodesList = nodesList_case1, v1 = v1_case1, v2 = v2_case1, + order = 0:2) + ####### + + # { + # ###################### + # # #### dHMM with 0s in transition matrix case #### + # + # x <- c(1, 1, 1, 2, 2) + # + # init <- c(0.4, 0.2, 0.4) + # probObs <- t(array( + # c(0.9, 0.1, + # 0.1, 0.9, + # 0.8, 0.2), + # c(2, 3))) + # + # probTrans <- t(array( + # c(0.3, 0.4, 0.2, + # 0.1, 0.1, 0.8, + # 0.05, 0.05, 0.9), + # c(3,3))) + # + # init2 <- c(0.6, 0.1, 0.3) + # probObs2 <- t(array( + # c(1, 0, + # 0, 1, + # 0.7, 0.3), + # c(2, 3))) + # + # probTrans2 <- t(array( + # c(0.4, 0.4, 0.2, + # 0, 0.3, 0.7, + # 0, 0, 1), + # c(3,3))) + # + # nc <- nimbleCode({ + # x[1:5] ~ dHMM(init[1:3], probObs = probObs[1:3,1:2], + # probTrans = probTrans[1:3, 1:3], len = 5, checkRowSums = 0) + # for (i in 1:2) { + # init[i] ~ dunif(0, 1) + # } + # init[3] <- 1 - init[1] - init[2] + # + # for (i in 1:3) { + # probObs[i, 1] ~ dunif(0, 1) + # probObs[i, 2] <- 1 - probObs[i, 1] + # probTrans[i, 1] ~ dunif(0, 1) + # probTrans[i, 2] ~ dunif(0, 1 - probTrans[i, 1]) + # probTrans[i, 3] <- 1 - probTrans[i, 1] - probTrans[i, 2] + # } + # + # }) + # Rmodel <- nimbleModel(nc, data = list(x = x), + # inits = list( + # init = init, + # probObs = probObs, + # probTrans = probTrans + # ), + # buildDerivs=TRUE) + # Rmodel$calculate() + # + # Cmodel <- compileNimble(Rmodel) + # Cmodel$calculate() + # + # nodesList_case1 <- setup_update_and_constant_nodes_for_tests(Rmodel, c(Rmodel$expandNodeNames('init[1:3]'), + # Rmodel$expandNodeNames('probObs[1:3, 1:2]'), + # Rmodel$expandNodeNames('probTrans[1:3, 1:3]') + # )) + # v1_case1 <- list(arg1 = c(init[1:3], probObs[1:3, 1:2], probTrans[1:3, 1:3])) + # v2_case1 <- list(arg1 = c(init2[1:3], probObs2[1:3, 1:2], probTrans2[1:3, 1:3])) + # + # model_calculate_test_case(Rmodel, Cmodel, deriv_nf = model_calculate_test, + # nodesList = nodesList_case1, v1 = v1_case1, v2 = v2_case1, + # order = 0:2) + # } + ####### + + ###################### + #### dHMMo case #### + + x <- c(1, 1, 1, 2, 2) + + init <- c(0.4, 0.2, 0.4) + probObs <- array( + c(0.95, 0.05, 0.8, 0.05, 0.95, 0.2, + 0.95, 0.05, 0.8, 0.05, 0.95, 0.2, + 0.9, 0.05, 0.8, 0.1, 0.95, 0.2, + 0.95, 0.1, 0.8, 0.05, 0.9, 0.2, + 0.95, 0.05, 0.7, 0.05, 0.95, 0.3), + c(3, 2, 5)) + + probTrans <- t(array( + c(0.3, 0.4, 0.2, + 0.1, 0.1, 0.8, + 0.05, 0.05, 0.9), + c(3,3))) + + init2 <- c(0.6, 0.1, 0.3) + probObs2 <- array( + c(0.6, 0.05, 0.8, 0.4, 0.95, 0.2, + 0.8, 0.05, 0.6, 0.2, 0.95, 0.4, + 0.9, 0.05, 0.8, 0.1, 0.95, 0.2, + 0.9, 0.1, 0.8, 0.1, 0.9, 0.2, + 0.95, 0.05, 0.4, 0.05, 0.95, 0.6), + c(3, 2, 5)) + + probTrans2 <- t(array( + c(0.4, 0.4, 0.2, + 0.05, 0.25, 0.7, + 0.05, 0.15, 0.8), + c(3,3))) + + nc <- nimbleCode({ + x[1:5] ~ dHMMo(init[1:3], probObs = probObs[1:3,1:2,1:5], + probTrans = probTrans[1:3, 1:3], len = 5, checkRowSums = 0) + for (i in 1:2) { + init[i] ~ dunif(0, 1) + } + init[3] <- 1 - init[1] - init[2] + + for (i in 1:3) { + for (j in 1:5) { + probObs[i, 1, j] ~ dunif(0, 1) + probObs[i, 2, j] <- 1 - probObs[i, 1, j] + } + probTrans[i, 1] ~ dunif(0, 1) + probTrans[i, 2] ~ dunif(0, 1 - probTrans[i, 1]) + probTrans[i, 3] <- 1 - probTrans[i, 1] - probTrans[i, 2] + } + + }) + + # capture <- capture_warning( + # The warning is due to getParam for nDim > 2 + Rmodel <- suppressWarnings(nimbleModel(nc, data = list(x = x), + inits = list( + init = init, + probObs = probObs, + probTrans = probTrans + ), + buildDerivs=TRUE) + ) + + Rmodel$calculate() + + Cmodel <- compileNimble(Rmodel) + Cmodel$calculate() + + nodesList_case1 <- setup_update_and_constant_nodes_for_tests(Rmodel, c(Rmodel$expandNodeNames('init[1:3]'), + Rmodel$expandNodeNames('probObs[1:3, 1:2, 1:5]'), + Rmodel$expandNodeNames('probTrans[1:3, 1:3]') + )) + v1_case1 <- list(arg1 = c(init[1:3], probObs[1:3, 1:2, 1:5], probTrans[1:3, 1:3])) + v2_case1 <- list(arg1 = c(init2[1:3], probObs2[1:3, 1:2, 1:5], probTrans2[1:3, 1:3])) + + res <- model_calculate_test_case(Rmodel, Cmodel, deriv_nf = model_calculate_test, + nodesList = nodesList_case1, v1 = v1_case1, v2 = v2_case1, + order = 0:2) + ####### +}) + +test_that("dDHMM works with AD", { + ###################### + #### dDHMM case #### + + x <- c(1, 1, 1, 2, 2) + + init <- c(0.4, 0.2, 0.4) + probObs <- t(array( + c(0.9, 0.1, + 0.1, 0.9, + 0.8, 0.2), + c(2, 3))) + + probTrans <- array( + c(0.6, 0.05, 0.05, 0.3, 0.65, 0.25, 0.1, 0.3, 0.7, + 0.6, 0.05, 0.05, 0.2, 0.65, 0.05, 0.2, 0.3, 0.9, + 0.6, 0.05, 0.05, 0.2, 0.65, 0.05, 0.2, 0.3, 0.9, + 0.6, 0.05, 0.20, 0.3, 0.65, 0.05, 0.1, 0.3, 0.75 + ), + c(3,3,4)) + + init2 <- c(0.6, 0.1, 0.3) + probObs2 <- t(array( + c(0.9, 0.1, + 0.1, 0.9, + 0.7, 0.3), + c(2, 3))) + probTrans2 <- array( + c(0.5, 0.05, 0.02, 0.4, 0.65, 0.28, 0.1, 0.3, 0.7, + 0.5, 0.05, 0.02, 0.3, 0.75, 0.08, 0.2, 0.2, 0.9, + 0.6, 0.05, 0.05, 0.2, 0.75, 0.05, 0.2, 0.2, 0.9, + 0.6, 0.05, 0.20, 0.3, 0.65, 0.05, 0.1, 0.3, 0.75 + ), + c(3,3,4)) + + nc <- nimbleCode({ + x[1:5] ~ dDHMM(init[1:3], probObs = probObs[1:3,1:2], + probTrans = probTrans[1:3, 1:3, 1:4], len = 5, checkRowSums = 0) + for (i in 1:2) { + init[i] ~ dunif(0, 1) + } + init[3] <- 1 - init[1] - init[2] + + for (i in 1:3) { + probObs[i, 1] ~ dunif(0, 1) + probObs[i, 2] <- 1 - probObs[i, 1] + for (k in 1:4) { + probTrans[i, 1, k] ~ dunif(0, 1) + probTrans[i, 2, k] ~ dunif(0, 1 - probTrans[i, 1, k]) + probTrans[i, 3, k] <- 1 - probTrans[i, 1, k] - probTrans[i, 2, k] + } + } + + }) + + # capture <- capture_warning( + Rmodel <- suppressWarnings(nimbleModel(nc, data = list(x = x), + inits = list( + init = init, + probObs = probObs, + probTrans = probTrans + ), + buildDerivs=TRUE) + ) + Rmodel$calculate() + + Cmodel <- compileNimble(Rmodel) + Cmodel$calculate() + + nodesList_case1 <- setup_update_and_constant_nodes_for_tests(Rmodel, c(Rmodel$expandNodeNames('init[1:3]'), + Rmodel$expandNodeNames('probObs[1:3, 1:2]'), + Rmodel$expandNodeNames('probTrans[1:3, 1:3, 1:4]') + )) + v1_case1 <- list(arg1 = c(init[1:3], probObs[1:3, 1:2], probTrans[1:3, 1:3, 1:4])) + v2_case1 <- list(arg1 = c(init2[1:3], probObs2[1:3, 1:2], probTrans2[1:3, 1:3, 1:4])) + + res <- model_calculate_test_case(Rmodel, Cmodel, deriv_nf = model_calculate_test, + nodesList = nodesList_case1, v1 = v1_case1, v2 = v2_case1, + order = 0:2, + RCrelTol = c(3e-15, 1e-8, 2e-3, 1e-14)) + ####### + + + ###################### + #### dDHMMo case #### + + x <- c(1, 1, 1, 2, 2) + + init <- c(0.4, 0.2, 0.4) + probObs <- array( + c(0.95, 0.05, 0.8, 0.05, 0.95, 0.2, + 0.95, 0.05, 0.8, 0.05, 0.95, 0.2, + 0.9, 0.05, 0.8, 0.1, 0.95, 0.2, + 0.95, 0.1, 0.8, 0.05, 0.9, 0.2, + 0.95, 0.05, 0.7, 0.05, 0.95, 0.3), + c(3, 2, 5)) + + probTrans <- array( + c(0.6, 0.05, 0.05, 0.3, 0.65, 0.25, 0.1, 0.3, 0.7, + 0.6, 0.05, 0.05, 0.2, 0.65, 0.05, 0.2, 0.3, 0.9, + 0.6, 0.05, 0.05, 0.2, 0.65, 0.05, 0.2, 0.3, 0.9, + 0.6, 0.05, 0.20, 0.3, 0.65, 0.05, 0.1, 0.3, 0.75 + ), + c(3,3,4)) + + init2 <- c(0.6, 0.1, 0.3) + probObs2 <- array( + c(0.6, 0.05, 0.8, 0.4, 0.95, 0.2, + 0.8, 0.05, 0.6, 0.2, 0.95, 0.4, + 0.9, 0.05, 0.8, 0.1, 0.95, 0.2, + 0.9, 0.1, 0.8, 0.1, 0.9, 0.2, + 0.95, 0.05, 0.4, 0.05, 0.95, 0.6), + c(3, 2, 5)) + + probTrans2 <- array( + c(0.5, 0.05, 0.02, 0.4, 0.65, 0.28, 0.1, 0.3, 0.7, + 0.5, 0.05, 0.02, 0.3, 0.75, 0.08, 0.2, 0.2, 0.9, + 0.6, 0.05, 0.05, 0.2, 0.75, 0.05, 0.2, 0.2, 0.9, + 0.6, 0.05, 0.20, 0.3, 0.65, 0.05, 0.1, 0.3, 0.75 + ), + c(3,3,4)) + + nc <- nimbleCode({ + x[1:5] ~ dDHMMo(init[1:3], probObs = probObs[1:3,1:2,1:5], + probTrans = probTrans[1:3, 1:3, 1:4], len = 5, checkRowSums = 0) + for (i in 1:2) { + init[i] ~ dunif(0, 1) + } + init[3] <- 1 - init[1] - init[2] + + for (i in 1:3) { + for (j in 1:5) { + probObs[i, 1, j] ~ dunif(0, 1) + probObs[i, 2, j] <- 1 - probObs[i, 1, j] + } + for (k in 1:4) { + probTrans[i, 1, k] ~ dunif(0, 1) + probTrans[i, 2, k] ~ dunif(0, 1 - probTrans[i, 1, k]) + probTrans[i, 3, k] <- 1 - probTrans[i, 1, k] - probTrans[i, 2, k] + } + } + + }) + # capture <- capture_warning( + Rmodel <- suppressWarnings(nimbleModel(nc, data = list(x = x), + inits = list( + init = init, + probObs = probObs, + probTrans = probTrans + ), + buildDerivs=TRUE) + ) + Rmodel$calculate() + + Cmodel <- compileNimble(Rmodel) + Cmodel$calculate() + + nodesList_case1 <- setup_update_and_constant_nodes_for_tests(Rmodel, c(Rmodel$expandNodeNames('init[1:3]'), + Rmodel$expandNodeNames('probObs[1:3, 1:2, 1:5]'), + Rmodel$expandNodeNames('probTrans[1:3, 1:3, 1:4]') + )) + v1_case1 <- list(arg1 = c(init[1:3], probObs[1:3, 1:2, 1:5], probTrans[1:3, 1:3, 1:4])) + v2_case1 <- list(arg1 = c(init2[1:3], probObs2[1:3, 1:2, 1:5], probTrans2[1:3, 1:3, 1:4])) + + res <- model_calculate_test_case(Rmodel, Cmodel, deriv_nf = model_calculate_test, + nodesList = nodesList_case1, v1 = v1_case1, v2 = v2_case1, + order = 0:2, + RCrelTol = c(1e-14, 1e-8, 2e-3, 2e-14)) + ####### +}) + + +test_that("dDynOcc works with AD", { +###################### +#### dDynOcc_vvm case #### + # ADtestEnv$RCrelTol sets tolerance + # [1] value [2] first order [3] second order + # can also look at ADtestEnv$CCrelTol + + x <- matrix(c(0,0,NA,0, + 1,1,1,0, + 0,0,0,0, + 0,0,1,0, + 0,0,0,NA), nrow = 4) + start <- c(1,1,2,1) + end <- c(5,5,5,4) + + init <- 0.7 + probPersist <- c(0.4, 0.4, 0.1) + probColonize <- c(0.4, 0.2, 0.1) + p <- matrix(rep(c(0.8, 0.7, 0.8, 0.8, 0.9), each = 4), nrow = 4, byrow =TRUE) + + init2 <- 0.9 + probPersist2 <- c(0.5, 0.55, 0.2) + probColonize2 <- c(0.3, 0.3, 0.6) + p2 <- matrix(rep(c(0.7, 0.5, 0.3, 0.8, 0.66), each = 4), nrow = 4, byrow =TRUE) + + + nc <- nimbleCode({ + x[1:4, 1:5] ~ dDynOcc_vvm(init, + probPersist[1:3], + probColonize[1:3], + p[1:4,1:5], + start[1:4], end[1:4]) + + init ~ dunif(0, 1) + for (i in 1:3) { + probColonize[i] ~ dunif(0, 1) + probPersist[i] ~ dunif(0, 1) + } + for (i in 1:4) { + for (j in 1:5) { + p[i, j] ~ dunif(0, 1) + } + } + + }) + + Rmodel <- nimbleModel(nc, data = list(x = x), + constants = list(start = start, end = end), + inits = list( + init = init, + p = p, + probColonize = probColonize, + probPersist = probPersist + ), + buildDerivs=TRUE) + Rmodel$calculate() + + Cmodel <- compileNimble(Rmodel) + Cmodel$calculate() + + nodesList_case1 <- setup_update_and_constant_nodes_for_tests(Rmodel, c( + "init", + Rmodel$expandNodeNames('p[1:4, 1:5]'), + Rmodel$expandNodeNames('probColonize[1:3]'), + Rmodel$expandNodeNames('probPersist[1:3]') + )) + v1_case1 <- list(arg1 = c(init, p, probColonize, probPersist)) + v2_case1 <- list(arg1 = c(init, p2, probColonize2, probPersist2)) + + res <- model_calculate_test_case(Rmodel, Cmodel, deriv_nf = model_calculate_test, + nodesList = nodesList_case1, v1 = v1_case1, v2 = v2_case1, + order = 0:2, + RCrelTol = c(ADtestEnv$RCrelTol[1], 1e-6, + 0.004, 1e-7)) + +###################### +#### dDynOcc_vsm case #### + + x <- matrix(c(0,0,NA,0, + 1,1,1,0, + 0,0,0,0, + 0,0,1,0, + 0,0,0,NA), nrow = 4) + start <- c(1,1,2,1) + end <- c(5,5,5,4) + + init <- 0.7 + probPersist <- c(0.2, 0.1, 0.3) + probColonize <- 0.4 + p <- matrix(rep(c(0.8, 0.7, 0.8, 0.7, 0.9), each = 4), nrow = 4, byrow =TRUE) + + init2 <- 0.9 + probPersist2 <- c(0.4, 0.4, 0.1) + probColonize2 <- 0.6 + p2 <- matrix(rep(c(0.7, 0.5, 0.3, 0.8, 0.66), each = 4), nrow = 4, byrow =TRUE) + + + nc <- nimbleCode({ + x[1:4, 1:5] ~ dDynOcc_vsm(init, + probPersist[1:3], + probColonize, + p[1:4,1:5], + start[1:4], end[1:4]) + + init ~ dunif(0, 1) + probColonize ~ dunif(0, 1) + for (i in 1:3) { + probPersist[i] ~ dunif(0, 1) + } + for (i in 1:4) { + for (j in 1:5) { + p[i, j] ~ dunif(0, 1) + } + } + + }) + + Rmodel <- nimbleModel(nc, data = list(x = x), + constants = list(start = start, end = end), + inits = list( + init = init, + p = p, + probColonize = probColonize, + probPersist = probPersist + ), + buildDerivs=TRUE) + Rmodel$calculate() + + Cmodel <- compileNimble(Rmodel) + Cmodel$calculate() + + nodesList_case1 <- setup_update_and_constant_nodes_for_tests(Rmodel, c( + "init", + Rmodel$expandNodeNames('p[1:4, 1:5]'), + Rmodel$expandNodeNames('probColonize'), + Rmodel$expandNodeNames('probPersist[1:3]') + )) + v1_case1 <- list(arg1 = c(init, + p, + probColonize, + probPersist + )) + v2_case1 <- list(arg1 = c(init2, p2, + probColonize2, + probPersist2 + )) + + res <- model_calculate_test_case(Rmodel, Cmodel, deriv_nf = model_calculate_test, + nodesList = nodesList_case1, v1 = v1_case1, v2 = v2_case1, + order = 0:2, + RCrelTol = c(ADtestEnv$RCrelTol[1], 4e-7, 0.014, 1e-6)) + #0.004, 1e-7)) + + +###################### +#### dDynOcc_svm case #### + + x <- matrix(c(0,0,NA,0, + 1,1,1,0, + 0,0,0,0, + 0,0,1,0, + 0,0,0,NA), nrow = 4) + start <- c(1,1,2,1) + end <- c(5,5,5,4) + + init <- 0.7 + probPersist <- 0.4 + probColonize <- c(0.4, 0.2, 0.1) + p <- matrix(rep(c(0.8, 0.7, 0.8, 0.8, 0.9), each = 4), nrow = 4, byrow =TRUE) + + init2 <- 0.9 + probPersist2 <- 0.6 + probColonize2 <- c(0.4, 0.2, 0.1) + p2 <- matrix(rep(c(0.7, 0.5, 0.3, 0.8, 0.66), each = 4), nrow = 4, byrow =TRUE) + + + nc <- nimbleCode({ + x[1:4, 1:5] ~ dDynOcc_svm(init, + probPersist, + probColonize[1:3], + p[1:4,1:5], + start[1:4], end[1:4]) + + init ~ dunif(0, 1) + for (i in 1:3) { + probColonize[i] ~ dunif(0, 1) + } + probPersist ~ dunif(0, 1) + for (i in 1:4) { + for (j in 1:5) { + p[i, j] ~ dunif(0, 1) + } + } + + }) + + Rmodel <- nimbleModel(nc, data = list(x = x), + constants = list(start = start, end = end), + inits = list( + init = init, + p = p, + probColonize = probColonize, + probPersist = probPersist + ), + buildDerivs=TRUE) + Rmodel$calculate() + + Cmodel <- compileNimble(Rmodel) + Cmodel$calculate() + + nodesList_case1 <- setup_update_and_constant_nodes_for_tests(Rmodel, c( + "init", + Rmodel$expandNodeNames('p[1:4, 1:5]'), + Rmodel$expandNodeNames('probColonize[1:3]'), + Rmodel$expandNodeNames('probPersist') + )) + v1_case1 <- list(arg1 = c(init, p, probColonize, probPersist)) + v2_case1 <- list(arg1 = c(init, p2, probColonize2, probPersist2)) + + res <- model_calculate_test_case(Rmodel, Cmodel, deriv_nf = model_calculate_test, + nodesList = nodesList_case1, v1 = v1_case1, v2 = v2_case1, + order = 0:2, + RCrelTol = c(ADtestEnv$RCrelTol[1], 2e-7, 1e-3, 2e-6)) + +###################### +#### dDynOcc_vvv case #### + + x <- matrix(c(0,0,NA,0, + 1,1,1,0, + 0,0,0,0, + 0,0,1,0, + 0,0,0,NA), nrow = 4) + start <- c(1,1,2,1) + end <- c(5,5,5,4) + + init <- 0.7 + probPersist <- c(0.4, 0.4, 0.1) + probColonize <- c(0.4, 0.2, 0.1) + p <- c(0.8, 0.7, 0.8, 0.8) + + init2 <- 0.9 + probPersist2 <- c(0.4, 0.4, 0.1) + probColonize2 <- c(0.4, 0.2, 0.1) + p2 <- c(0.7, 0.5, 0.3, 0.8) + + + nc <- nimbleCode({ + x[1:4, 1:5] ~ dDynOcc_vvv(init, + probPersist[1:3], + probColonize[1:3], + p[1:4], + start[1:4], end[1:4]) + + init ~ dunif(0, 1) + for (i in 1:3) { + probColonize[i] ~ dunif(0, 1) + probPersist[i] ~ dunif(0, 1) + } + for (i in 1:4) { + p[i] ~ dunif(0, 1) + } + + }) + + Rmodel <- nimbleModel(nc, data = list(x = x), + constants = list(start = start, end = end), + inits = list( + init = init, + p = p, + probColonize = probColonize, + probPersist = probPersist + ), + buildDerivs=TRUE) + Rmodel$calculate() + + Cmodel <- compileNimble(Rmodel) + Cmodel$calculate() + + nodesList_case1 <- setup_update_and_constant_nodes_for_tests(Rmodel, c( + "init", + Rmodel$expandNodeNames('p[1:4]'), + Rmodel$expandNodeNames('probColonize[1:3]'), + Rmodel$expandNodeNames('probPersist[1:3]') + )) + v1_case1 <- list(arg1 = c(init, p, probColonize, probPersist)) + v2_case1 <- list(arg1 = c(init, p2, probColonize2, probPersist2)) + + res <- model_calculate_test_case(Rmodel, Cmodel, deriv_nf = model_calculate_test, + nodesList = nodesList_case1, v1 = v1_case1, v2 = v2_case1, + order = 0:2, + RCrelTol = c(ADtestEnv$RCrelTol[1], 1e-7, 1e-3, 1e-14)) + + +###################### +#### dDynOcc_vsv case #### + + x <- matrix(c(0,0,NA,0, + 1,1,1,0, + 0,0,0,0, + 0,0,1,0, + 0,0,0,NA), nrow = 4) + start <- c(1,1,2,1) + end <- c(5,5,5,4) + + init <- 0.7 + probPersist <- c(0.4, 0.4, 0.1) + probColonize <- 0.5 + p <- c(0.8, 0.7, 0.8, 0.8) + + init2 <- 0.9 + probPersist2 <- c(0.4, 0.4, 0.1) + probColonize2 <- 0.7 + p2 <- c(0.7, 0.5, 0.3, 0.8) + + + nc <- nimbleCode({ + x[1:4, 1:5] ~ dDynOcc_vsv(init, + probPersist[1:3], + probColonize, + p[1:4], + start[1:4], end[1:4]) + + init ~ dunif(0, 1) + probColonize ~ dunif(0, 1) + for (i in 1:3) { + probPersist[i] ~ dunif(0, 1) + } + for (i in 1:4) { + p[i] ~ dunif(0, 1) + } + + }) + + Rmodel <- nimbleModel(nc, data = list(x = x), + constants = list(start = start, end = end), + inits = list( + init = init, + p = p, + probColonize = probColonize, + probPersist = probPersist + ), + buildDerivs=TRUE) + Rmodel$calculate() + + Cmodel <- compileNimble(Rmodel) + Cmodel$calculate() + + nodesList_case1 <- setup_update_and_constant_nodes_for_tests(Rmodel, c( + "init", + Rmodel$expandNodeNames('p[1:4]'), + Rmodel$expandNodeNames('probColonize'), + Rmodel$expandNodeNames('probPersist[1:3]') + )) + v1_case1 <- list(arg1 = c(init, p, probColonize, probPersist)) + v2_case1 <- list(arg1 = c(init, p2, probColonize2, probPersist2)) + + res <- model_calculate_test_case(Rmodel, Cmodel, deriv_nf = model_calculate_test, + nodesList = nodesList_case1, v1 = v1_case1, v2 = v2_case1, + order = 0:2, + RCrelTol = c(ADtestEnv$RCrelTol[1], 1e-7, + ADtestEnv$RCrelTol[3], 1e-14)) + # 0.014, 1e-6)) + + +###################### +#### dDynOcc_svv case #### + + x <- matrix(c(0,0,NA,0, + 1,1,1,0, + 0,0,0,0, + 0,0,1,0, + 0,0,0,NA), nrow = 4) + start <- c(1,1,2,1) + end <- c(5,5,5,4) + + init <- 0.7 + probPersist <- 0.5 + probColonize <- c(0.4, 0.2, 0.1) + p <- c(0.8, 0.7, 0.8, 0.8) + + init2 <- 0.9 + probPersist2 <- 0.5 + probColonize2 <- c(0.4, 0.2, 0.1) + p2 <- c(0.7, 0.5, 0.3, 0.8) + + + nc <- nimbleCode({ + x[1:4, 1:5] ~ dDynOcc_svv(init, + probPersist, + probColonize[1:3], + p[1:4], + start[1:4], end[1:4]) + + init ~ dunif(0, 1) + for (i in 1:3) { + probColonize[i] ~ dunif(0, 1) + } + probPersist ~ dunif(0, 1) + for (i in 1:4) { + p[i] ~ dunif(0, 1) + } + + }) + + Rmodel <- nimbleModel(nc, data = list(x = x), + constants = list(start = start, end = end), + inits = list( + init = init, + p = p, + probColonize = probColonize, + probPersist = probPersist + ), + buildDerivs=TRUE) + Rmodel$calculate() + + Cmodel <- compileNimble(Rmodel) + Cmodel$calculate() + + nodesList_case1 <- setup_update_and_constant_nodes_for_tests(Rmodel, c( + "init", + Rmodel$expandNodeNames('p[1:4]'), + Rmodel$expandNodeNames('probColonize[1:3]'), + Rmodel$expandNodeNames('probPersist') + )) + v1_case1 <- list(arg1 = c(init, p, probColonize, probPersist)) + v2_case1 <- list(arg1 = c(init, p2, probColonize2, probPersist2)) + + res <- model_calculate_test_case(Rmodel, Cmodel, deriv_nf = model_calculate_test, + nodesList = nodesList_case1, v1 = v1_case1, v2 = v2_case1, + order = 0:2) + + +###################### +#### dDynOcc_ssv case #### + + x <- matrix(c(0,0,NA,0, + 1,1,1,0, + 0,0,0,0, + 0,0,1,0, + 0,0,0,NA), nrow = 4) + start <- c(1,1,2,1) + end <- c(5,5,5,4) + + init <- 0.7 + probPersist <- 0.5 + probColonize <- 0.4 + p <- c(0.8, 0.7, 0.8, 0.8) + + init2 <- 0.9 + probPersist2 <- 0.5 + probColonize2 <- 0.8 + p2 <- c(0.7, 0.5, 0.3, 0.8) + + + nc <- nimbleCode({ + x[1:4, 1:5] ~ dDynOcc_ssv(init, + probPersist, + probColonize, + p[1:4], + start[1:4], end[1:4]) + + init ~ dunif(0, 1) + probColonize ~ dunif(0, 1) + probPersist ~ dunif(0, 1) + for (i in 1:4) { + p[i] ~ dunif(0, 1) + } + + }) + + Rmodel <- nimbleModel(nc, data = list(x = x), + constants = list(start = start, end = end), + inits = list( + init = init, + p = p, + probColonize = probColonize, + probPersist = probPersist + ), + buildDerivs=TRUE) + Rmodel$calculate() + + Cmodel <- compileNimble(Rmodel) + Cmodel$calculate() + + nodesList_case1 <- setup_update_and_constant_nodes_for_tests(Rmodel, c( + "init", + Rmodel$expandNodeNames('p[1:4]'), + Rmodel$expandNodeNames('probColonize'), + Rmodel$expandNodeNames('probPersist') + )) + v1_case1 <- list(arg1 = c(init, p, probColonize, probPersist)) + v2_case1 <- list(arg1 = c(init, p2, probColonize2, probPersist2)) + + res <- model_calculate_test_case(Rmodel, Cmodel, deriv_nf = model_calculate_test, + nodesList = nodesList_case1, v1 = v1_case1, v2 = v2_case1, + order = 0:2) + +###################### +#### dDynOcc_vvs case #### + + x <- matrix(c(0,0,NA,0, + 1,1,1,0, + 0,0,0,0, + 0,0,1,0, + 0,0,0,NA), nrow = 4) + start <- c(1,1,2,1) + end <- c(5,5,5,4) + + init <- 0.7 + probPersist <- c(0.4, 0.4, 0.1) + probColonize <- c(0.4, 0.2, 0.1) + p <- c(0.8) + + init2 <- 0.9 + probPersist2 <- c(0.4, 0.4, 0.1) + probColonize2 <- c(0.4, 0.2, 0.1) + p2 <- c(0.7) + + + nc <- nimbleCode({ + x[1:4, 1:5] ~ dDynOcc_vvs(init, + probPersist[1:3], + probColonize[1:3], + p, + start[1:4], end[1:4]) + + init ~ dunif(0, 1) + for (i in 1:3) { + probColonize[i] ~ dunif(0, 1) + probPersist[i] ~ dunif(0, 1) + } + p ~ dunif(0, 1) + + }) + + Rmodel <- nimbleModel(nc, data = list(x = x), + constants = list(start = start, end = end), + inits = list( + init = init, + p = p, + probColonize = probColonize, + probPersist = probPersist + ), + buildDerivs=TRUE) + Rmodel$calculate() + + Cmodel <- compileNimble(Rmodel) + Cmodel$calculate() + + nodesList_case1 <- setup_update_and_constant_nodes_for_tests(Rmodel, c( + "init", "p", + Rmodel$expandNodeNames('probColonize[1:3]'), + Rmodel$expandNodeNames('probPersist[1:3]') + )) + v1_case1 <- list(arg1 = c(init, p, probColonize, probPersist)) + v2_case1 <- list(arg1 = c(init, p2, probColonize2, probPersist2)) + + res <- model_calculate_test_case(Rmodel, Cmodel, deriv_nf = model_calculate_test, + nodesList = nodesList_case1, v1 = v1_case1, v2 = v2_case1, + order = 0:2) + + +###################### +#### dDynOcc_vss case #### + + x <- matrix(c(0,0,NA,0, + 1,1,1,0, + 0,0,0,0, + 0,0,1,0, + 0,0,0,NA), nrow = 4) + start <- c(1,1,2,1) + end <- c(5,5,5,4) + + init <- 0.7 + probPersist <- c(0.4, 0.4, 0.1) + probColonize <- 0.2 + p <- c(0.8) + + init2 <- 0.9 + probPersist2 <- c(0.4, 0.4, 0.1) + probColonize2 <- 0.8 + p2 <- c(0.7) + + + nc <- nimbleCode({ + x[1:4, 1:5] ~ dDynOcc_vss(init, + probPersist[1:3], + probColonize, + p, + start[1:4], end[1:4]) + + init ~ dunif(0, 1) + probColonize ~ dunif(0, 1) + for (i in 1:3) { + probPersist[i] ~ dunif(0, 1) + } + p ~ dunif(0, 1) + + }) + + Rmodel <- nimbleModel(nc, data = list(x = x), + constants = list(start = start, end = end), + inits = list( + init = init, + p = p, + probColonize = probColonize, + probPersist = probPersist + ), + buildDerivs=TRUE) + Rmodel$calculate() + + Cmodel <- compileNimble(Rmodel) + Cmodel$calculate() + + nodesList_case1 <- setup_update_and_constant_nodes_for_tests(Rmodel, c( + "init", "p", + Rmodel$expandNodeNames('probColonize'), + Rmodel$expandNodeNames('probPersist[1:3]') + )) + v1_case1 <- list(arg1 = c(init, p, probColonize, probPersist)) + v2_case1 <- list(arg1 = c(init, p2, probColonize2, probPersist2)) + + res <- model_calculate_test_case(Rmodel, Cmodel, deriv_nf = model_calculate_test, + nodesList = nodesList_case1, v1 = v1_case1, v2 = v2_case1, + order = 0:2, + RCrelTol = c(ADtestEnv$RCrelTol[1], ADtestEnv$RCrelTol[2], + ADtestEnv$RCrelTol[3], 2e-14)) + + +###################### +#### dDynOcc_svs case #### + + x <- matrix(c(0,0,NA,0, + 1,1,1,0, + 0,0,0,0, + 0,0,1,0, + 0,0,0,NA), nrow = 4) + start <- c(1,1,2,1) + end <- c(5,5,5,4) + + init <- 0.7 + probPersist <- 0.3 + probColonize <- c(0.4, 0.2, 0.1) + p <- c(0.8) + + init2 <- 0.9 + probPersist2 <- 0.44 + probColonize2 <- c(0.4, 0.2, 0.1) + p2 <- c(0.7) + + + nc <- nimbleCode({ + x[1:4, 1:5] ~ dDynOcc_svs(init, + probPersist, + probColonize[1:3], + p, + start[1:4], end[1:4]) + + init ~ dunif(0, 1) + for (i in 1:3) { + probColonize[i] ~ dunif(0, 1) + } + probPersist ~ dunif(0, 1) + p ~ dunif(0, 1) + + }) + + Rmodel <- nimbleModel(nc, data = list(x = x), + constants = list(start = start, end = end), + inits = list( + init = init, + p = p, + probColonize = probColonize, + probPersist = probPersist + ), + buildDerivs=TRUE) + Rmodel$calculate() + + Cmodel <- compileNimble(Rmodel) + Cmodel$calculate() + + nodesList_case1 <- setup_update_and_constant_nodes_for_tests(Rmodel, c( + "init", "p", + Rmodel$expandNodeNames('probColonize[1:3]'), + Rmodel$expandNodeNames('probPersist') + )) + v1_case1 <- list(arg1 = c(init, p, probColonize, probPersist)) + v2_case1 <- list(arg1 = c(init, p2, probColonize2, probPersist2)) + + res <- model_calculate_test_case(Rmodel, Cmodel, deriv_nf = model_calculate_test, + nodesList = nodesList_case1, v1 = v1_case1, v2 = v2_case1, + order = 0:2) + +###################### +#### dDynOcc_sss case #### + + x <- matrix(c(0,0,NA,0, + 1,1,1,0, + 0,0,0,0, + 0,0,1,0, + 0,0,0,NA), nrow = 4) + start <- c(1,1,2,1) + end <- c(5,5,5,4) + + init <- 0.7 + probPersist <- 0.3 + probColonize <- 0.6 + p <- c(0.8) + + init2 <- 0.9 + probPersist2 <- 0.44 + probColonize2 <- 0.7 + p2 <- c(0.7) + + + nc <- nimbleCode({ + x[1:4, 1:5] ~ dDynOcc_sss(init, + probPersist, + probColonize, + p, + start[1:4], end[1:4]) + + init ~ dunif(0, 1) + probColonize ~ dunif(0, 1) + probPersist ~ dunif(0, 1) + p ~ dunif(0, 1) + + }) + + Rmodel <- nimbleModel(nc, data = list(x = x), + constants = list(start = start, end = end), + inits = list( + init = init, + p = p, + probColonize = probColonize, + probPersist = probPersist + ), + buildDerivs=TRUE) + Rmodel$calculate() + + Cmodel <- compileNimble(Rmodel) + Cmodel$calculate() + + nodesList_case1 <- setup_update_and_constant_nodes_for_tests(Rmodel, c( + "init", "p", + Rmodel$expandNodeNames('probColonize'), + Rmodel$expandNodeNames('probPersist') + )) + v1_case1 <- list(arg1 = c(init, p, probColonize, probPersist)) + v2_case1 <- list(arg1 = c(init, p2, probColonize2, probPersist2)) + + res <- model_calculate_test_case(Rmodel, Cmodel, deriv_nf = model_calculate_test, + nodesList = nodesList_case1, v1 = v1_case1, v2 = v2_case1, + order = 0:2) +}) + +# reset options before finishing +nimbleOptions(enableDerivs = EDopt) +nimbleOptions(buildModelDerivs = BMDopt) diff --git a/tests/testthat/test-BetaBinom.R b/tests/testthat/test-BetaBinom.R index 4e8b2ef..8ecf811 100644 --- a/tests/testthat/test-BetaBinom.R +++ b/tests/testthat/test-BetaBinom.R @@ -1,13 +1,14 @@ -test_that("dBetaBinom works", +test_that("dBetaBinom_v works", { # Uncompiled calculation x <- c(4, 2, 8, 0, 3) N <- 10 shape1 <- c(0.1, 2.2, 1.4, 0.4, 0.9) shape2 <- c(0.3, 0.5, 0.1, 4, 1.2) - probX <- dBetaBinom(x, N, shape1, shape2) + probX <- dBetaBinom_v(x, N, shape1, shape2) + len <- 5 # Manually calculate the correct answer correctProbX <- prod( @@ -18,21 +19,35 @@ test_that("dBetaBinom works", expect_equal(probX, correctProbX) # Uncompiled log probability - lProbX <- dBetaBinom(x, N, shape1, shape2, log = TRUE) + lProbX <- dBetaBinom_v(x, N, shape1, shape2, log = TRUE) lCorrectProbX <- log(correctProbX) expect_equal(lProbX, lCorrectProbX) # Compilation and compiled calculations - CdBetaBinom <- compileNimble(dBetaBinom) - CprobX <- CdBetaBinom(x, N, shape1, shape2) + call_dBetaBinom_v <- nimbleFunction( + name = "call_dBetaBinom_v", + run = function(x = double(1), + N = double(), + shape1 = double(1), + shape2 = double(1), + len = double(), + log = integer(0, default = 0)) { + return(dBetaBinom_v(x, N, shape1, shape2, len, log)) + returnType(double()) + } + ) + + # Compilation and compiled calculations + CdBetaBinom_v <- compileNimble(call_dBetaBinom_v) + CprobX <- CdBetaBinom_v(x, N, shape1, shape2, len = len) expect_equal(CprobX, probX) - ClProbX <- CdBetaBinom(x, N, shape1, shape2, log = TRUE) + ClProbX <- CdBetaBinom_v(x, N, shape1, shape2, len, log = TRUE) expect_equal(ClProbX, lProbX) # Use in Nimble model nc <- nimbleCode({ - x[1:5] ~ dBetaBinom(N, shape1[1:5], shape2[1:5]) + x[1:5] ~ dBetaBinom_v(N, shape1[1:5], shape2[1:5], len=5) N ~ dpois(10) for (i in 1:5) { @@ -72,12 +87,12 @@ test_that("dBetaBinom works", nSim <- 10 xSim <- array(NA, dim = c(nSim, length(x))) for(i in 1:nSim) - xSim[i,] <- rBetaBinom(1, N = N, shape1 = shape1, shape2 = shape2) + xSim[i,] <- rBetaBinom_v(1, N = N, shape1 = shape1, shape2 = shape2, len=len) set.seed(1) - CrBetaBinom <- compileNimble(rBetaBinom) + CrBetaBinom_v <- compileNimble(rBetaBinom_v) CxSim <- array(NA, dim = c(nSim, length(x))) for(i in 1:nSim) - CxSim[i,] <- CrBetaBinom(1, N = N, shape1 = shape1, shape2 = shape2) + CxSim[i,] <- CrBetaBinom_v(1, N = N, shape1 = shape1, shape2 = shape2, len=len) expect_equal(xSim, CxSim) simNodes <- m$getDependencies(c('shape1', 'shape2', 'N'), self = FALSE) @@ -100,14 +115,15 @@ test_that("dBetaBinom works", -test_that("dBetaBinom_One works", +test_that("dBetaBinom_s works", { # Uncompiled calculation - x <- c(4) + x <- c(4, 2, 8, 0, 3) N <- 10 shape1 <- c(0.1) shape2 <- c(0.3) - probX <- dBetaBinom_One(x, N, shape1, shape2) + len <- 5 + probX <- dBetaBinom_s(x, N, shape1, shape2, len) # Manually calculate the correct answer correctProbX <- prod( @@ -118,21 +134,35 @@ test_that("dBetaBinom_One works", expect_equal(probX, correctProbX) # Uncompiled log probability - lProbX <- dBetaBinom_One(x, N, shape1, shape2, log = TRUE) + lProbX <- dBetaBinom_s(x, N, shape1, shape2, log = TRUE) lCorrectProbX <- log(correctProbX) expect_equal(lProbX, lCorrectProbX) # Compilation and compiled calculations - CdBetaBinom_One <- compileNimble(dBetaBinom_One) - CprobX <- CdBetaBinom_One(x, N, shape1, shape2) + call_dBetaBinom_s <- nimbleFunction( + name = "call_dBetaBinom_s", + run = function(x = double(1), + N = double(), + shape1 = double(), + shape2 = double(), + len = double(), + log = integer(0, default = 0)) { + return(dBetaBinom_s(x, N, shape1, shape2, len, log)) + returnType(double()) + } + ) + + # Compilation and compiled calculations + CdBetaBinom_s <- compileNimble(call_dBetaBinom_s) + CprobX <- CdBetaBinom_s(x, N, shape1, shape2, len) expect_equal(CprobX, probX) - ClProbX <- CdBetaBinom_One(x, N, shape1, shape2, log = TRUE) + ClProbX <- CdBetaBinom_s(x, N, shape1, shape2, len,log = TRUE) expect_equal(ClProbX, lProbX) # Use in Nimble model nc <- nimbleCode({ - x ~ dBetaBinom_One(N, shape1, shape2) + x[1:5] ~ dBetaBinom_s(N, shape1, shape2,len=5) N ~ dpois(10) shape1 ~ dunif(0,10) shape2 ~ dunif(0,10) @@ -150,7 +180,7 @@ test_that("dBetaBinom_One works", expect_equal(CMlProbX, lProbX) # Test imputing value for all NAs - xNA <- c(NA) + xNA <- c(NA, NA, NA, NA, NA) mNA <- nimbleModel(nc, data = list(x = xNA), inits = list(N = N, shape1 = shape1, shape2 = shape2)) mNAConf <- configureMCMC(mNA) @@ -162,19 +192,19 @@ test_that("dBetaBinom_One works", cmNA$mNA_MCMC$run(10) # Did the imputed values come back? - expect_true(all(!is.na(as.matrix(cmNA$mNA_MCMC$mvSamples)[,"x"]))) + expect_true(all(!is.na(as.matrix(cmNA$mNA_MCMC$mvSamples)[,"x[2]"]))) # Test simulation code set.seed(1) nSim <- 10 xSim <- array(NA, dim = c(nSim, length(x))) for(i in 1:nSim) - xSim[i,] <- rBetaBinom_One(1, N = N, shape1 = shape1, shape2 = shape2) + xSim[i,] <- rBetaBinom_s(1, N = N, shape1 = shape1, shape2 = shape2, len) set.seed(1) - CrBetaBinom_One <- compileNimble(rBetaBinom_One) + CrBetaBinom_s <- compileNimble(rBetaBinom_s) CxSim <- array(NA, dim = c(nSim, length(x))) for(i in 1:nSim) - CxSim[i,] <- CrBetaBinom_One(1, N = N, shape1 = shape1, shape2 = shape2) + CxSim[i,] <- CrBetaBinom_s(1, N = N, shape1 = shape1, shape2 = shape2, len) expect_equal(xSim, CxSim) simNodes <- m$getDependencies(c('shape1', 'shape2', 'N'), self = FALSE) diff --git a/tests/testthat/test-CJS.R b/tests/testthat/test-CJS.R index b649965..3b2de30 100644 --- a/tests/testthat/test-CJS.R +++ b/tests/testthat/test-CJS.R @@ -2,413 +2,453 @@ # ----------------------------------------------------------------------------- # 0. Load -# Set the context for testthat -context("Testing dCJS-related functions.") - # ----------------------------------------------------------------------------- # 1. Test dCJS_ss # dCJS_ss is used in the case that probSurvive and probCapture are both # scalar values. test_that("dCJS_ss works", - { - # Uncompiled calculation - x <- c(1, 0, 1, 0, 0) - probSurvive <- 0.6 - probCapture <- 0.4 - probX <- dCJS_ss(x, probSurvive, probCapture, len = 5) - # Manually calculate the correct answer - correctProbX <- probSurvive * (1 - probCapture) * - probSurvive * (probCapture) * - (probSurvive^2 * (1 - probCapture)^2 + - probSurvive * (1 - probCapture) * (1 - probSurvive) + - (1 - probSurvive)) - - expect_equal(probX, correctProbX) - - # Uncompiled log probability - lProbX <- dCJS_ss(x, probSurvive, probCapture, log = TRUE, len = 5) - lCorrectProbX <- log(correctProbX) - expect_equal(lProbX, lCorrectProbX) - - # Compilation and compiled calculations - CdCJS_ss <- compileNimble(dCJS_ss) - CprobX <- CdCJS_ss(x, probSurvive, probCapture, len = 5) - expect_equal(CprobX, probX) - - ClProbX <- CdCJS_ss(x, probSurvive, probCapture, log = TRUE, len = 5) - expect_equal(ClProbX, lProbX) - - # Use in Nimble model - nc <- nimbleCode({ - x[1:5] ~ dCJS_ss(probSurvive, probCapture, len = 5) - probSurvive ~ dunif(0,1) - probCapture ~ dunif(0,1) - }) - m <- nimbleModel(nc, data = list(x = x), - inits = list(probSurvive = probSurvive, - probCapture = probCapture)) - m$calculate() - MlProbX <- m$getLogProb("x") - expect_equal(MlProbX, lProbX) - - # Compiled model - cm <- compileNimble(m) - cm$calculate() - CMlProbX <- cm$getLogProb("x") - expect_equal(CMlProbX, lProbX) - - # Test imputing value for all NAs - xNA <- c(NA, NA, NA, NA, NA) - mNA <- nimbleModel(nc, data = list(x = xNA), - inits = list(probSurvive = probSurvive, - probCapture = probCapture)) - mNAConf <- configureMCMC(mNA) - mNAConf$addMonitors('x') - mNA_MCMC <- buildMCMC(mNAConf) - cmNA <- compileNimble(mNA, mNA_MCMC) - - set.seed(0) - cmNA$mNA_MCMC$run(10) - - # Did the imputed values come back? - expect_true(all(!is.na(as.matrix(cmNA$mNA_MCMC$mvSamples)[,"x[2]"]))) - - # Test simulation code - set.seed(1) - nSim <- 10 - xSim <- array(NA, dim = c(nSim, length(x))) - for(i in 1:nSim) - xSim[i,] <- rCJS_ss(1, probSurvive, probCapture, len = length(x)) - set.seed(1) - CrCJS_ss <- compileNimble(rCJS_ss) - CxSim <- array(NA, dim = c(nSim, length(x))) - for(i in 1:nSim) - CxSim[i,] <- CrCJS_ss(1, probSurvive, probCapture, len = length(x)) - expect_identical(xSim, CxSim) - - simNodes <- m$getDependencies(c('probSurvive', 'probCapture'), self = FALSE) - mxSim <- array(NA, dim = c(nSim, length(x))) - set.seed(1) - for(i in 1:nSim) { - m$simulate(simNodes, includeData = TRUE) - mxSim[i,] <- m$x - } - expect_identical(mxSim, xSim) - - CmxSim <- array(NA, dim = c(nSim, length(x))) - set.seed(1) - for(i in 1:nSim) { - cm$simulate(simNodes, includeData = TRUE) - CmxSim[i,] <- cm$x - } - expect_identical(CmxSim, mxSim) - }) +{ + # Uncompiled calculation + x <- c(1, 0, 1, 0, 0) + probSurvive <- 0.6 + probCapture <- 0.4 + probX <- dCJS_ss(x, probSurvive, probCapture, len = 5) + # Manually calculate the correct answer + correctProbX <- probSurvive * (1 - probCapture) * + probSurvive * (probCapture) * + (probSurvive^2 * (1 - probCapture)^2 + + probSurvive * (1 - probCapture) * (1 - probSurvive) + + (1 - probSurvive)) + + expect_equal(probX, correctProbX) + + # Uncompiled log probability + lProbX <- dCJS_ss(x, probSurvive, probCapture, log = TRUE, len = 5) + lCorrectProbX <- log(correctProbX) + expect_equal(lProbX, lCorrectProbX) + + # Compilation and compiled calculations + call_dCJS_ss <- nimbleFunction( + run = function(x = double(1), + probSurvive = double(), + probCapture = double(), + len = integer(0, default = 0), + log = integer(0, default = 0)) { + return(dCJS_ss(x, probSurvive, probCapture, len, log)) + returnType(double()) + } + ) + CdCJS_ss <- compileNimble(call_dCJS_ss) + CprobX <- CdCJS_ss(x, probSurvive, probCapture, len = 5) + expect_equal(CprobX, probX) + + ClProbX <- CdCJS_ss(x, probSurvive, probCapture, log = TRUE, len = 5) + expect_equal(ClProbX, lProbX) + + # Use in Nimble model + nc <- nimbleCode({ + x[1:5] ~ dCJS_ss(probSurvive, probCapture, len = 5) + probSurvive ~ dunif(0,1) + probCapture ~ dunif(0,1) + }) + m <- nimbleModel(nc, data = list(x = x), + inits = list(probSurvive = probSurvive, + probCapture = probCapture)) + m$calculate() + MlProbX <- m$getLogProb("x") + expect_equal(MlProbX, lProbX) + + # Compiled model + cm <- compileNimble(m) + cm$calculate() + CMlProbX <- cm$getLogProb("x") + expect_equal(CMlProbX, lProbX) + + # Test imputing value for all NAs + xNA <- c(NA, NA, NA, NA, NA) + mNA <- nimbleModel(nc, data = list(x = xNA), + inits = list(probSurvive = probSurvive, + probCapture = probCapture)) + mNAConf <- configureMCMC(mNA) + mNAConf$addMonitors('x') + mNA_MCMC <- buildMCMC(mNAConf) + cmNA <- compileNimble(mNA, mNA_MCMC) + + set.seed(0) + cmNA$mNA_MCMC$run(10) + + # Did the imputed values come back? + expect_true(all(!is.na(as.matrix(cmNA$mNA_MCMC$mvSamples)[,"x[2]"]))) + + # Test simulation code + set.seed(1) + nSim <- 10 + xSim <- array(NA, dim = c(nSim, length(x))) + for(i in 1:nSim) + xSim[i,] <- rCJS_ss(1, probSurvive, probCapture, len = length(x)) + set.seed(1) + CrCJS_ss <- compileNimble(rCJS_ss) + CxSim <- array(NA, dim = c(nSim, length(x))) + for(i in 1:nSim) + CxSim[i,] <- CrCJS_ss(1, probSurvive, probCapture, len = length(x)) + expect_identical(xSim, CxSim) + + simNodes <- m$getDependencies(c('probSurvive', 'probCapture'), self = FALSE) + mxSim <- array(NA, dim = c(nSim, length(x))) + set.seed(1) + for(i in 1:nSim) { + m$simulate(simNodes, includeData = TRUE) + mxSim[i,] <- m$x + } + expect_identical(mxSim, xSim) + + CmxSim <- array(NA, dim = c(nSim, length(x))) + set.seed(1) + for(i in 1:nSim) { + cm$simulate(simNodes, includeData = TRUE) + CmxSim[i,] <- cm$x + } + expect_identical(CmxSim, mxSim) +}) # ----------------------------------------------------------------------------- # 2. Test dCJS_sv # dCJS_sv is used in the case where survival probability is a scalar and # capture probability is a vector. test_that("dCJS_sv works", - { - # Uncompiled calculation - x <- c(1, 0, 1, 0, 0) - probSurvive <- 0.6 - probCapture <- c(1, 0.25, 0.6, 0.4, 0.8) - probX <- dCJS_sv(x, probSurvive, probCapture) - # Manually calculate the correct answer - correctProbX <- probSurvive * (1 - probCapture[2]) * - probSurvive * (probCapture[3]) * - (probSurvive^2 * (1 - probCapture[4]) * (1 - probCapture[5]) + - probSurvive * (1 - probCapture[4]) * (1 - probSurvive) + - (1 - probSurvive)) - - expect_equal(probX, correctProbX) - - # Uncompiled log probability - lProbX <- dCJS_sv(x, probSurvive, probCapture, log = TRUE) - lCorrectProbX <- log(correctProbX) - expect_equal(lProbX, lCorrectProbX) - - # Compilation and compiled calculations - CdCJS_sv <- compileNimble(dCJS_sv) - CprobX <- CdCJS_sv(x, probSurvive, probCapture) - expect_equal(CprobX, probX) - - ClProbX <- CdCJS_sv(x, probSurvive, probCapture, log = TRUE) - expect_equal(ClProbX, lProbX) - - - # Use in Nimble model - nc <- nimbleCode({ - x[1:5] ~ dCJS_sv(probSurvive, probCapture[1:5], len = 5) - probSurvive ~ dunif(0,1) - for (i in 1:5) { - probCapture[i] ~ dunif(0,1) - } - }) - m <- nimbleModel(nc, data = list(x = x), - inits = list(probSurvive = probSurvive, - probCapture = probCapture)) - m$calculate() - MlProbX <- m$getLogProb("x") - expect_equal(MlProbX, lProbX) - - # Compiled model - cm <- compileNimble(m) - cm$calculate() - CMlProbX <- cm$getLogProb("x") - expect_equal(CMlProbX, lProbX) - - # Test imputing value for all NAs - xNA <- c(NA, NA, NA, NA, NA) - mNA <- nimbleModel(nc, data = list(x = xNA), - inits = list(probSurvive = probSurvive, - probCapture = probCapture)) - mNAConf <- configureMCMC(mNA) - mNAConf$addMonitors('x') - mNA_MCMC <- buildMCMC(mNAConf) - cmNA <- compileNimble(mNA, mNA_MCMC) - - set.seed(10) - cmNA$mNA_MCMC$run(5) - - # Did the imputed values come back? - expect_true(all(!is.na(as.matrix(cmNA$mNA_MCMC$mvSamples)[,"x[1]"]))) - - # Test simulation code - set.seed(1) - nSim <- 10 - xSim <- array(NA, dim = c(nSim, length(x))) - for(i in 1:nSim) - xSim[i,] <- rCJS_sv(1, probSurvive, probCapture, len = length(x)) - set.seed(1) - CrCJS_sv <- compileNimble(rCJS_sv) - CxSim <- array(NA, dim = c(nSim, length(x))) - for(i in 1:nSim) - CxSim[i,] <- CrCJS_sv(1, probSurvive, probCapture, len = length(x)) - expect_identical(xSim, CxSim) - - simNodes <- m$getDependencies(c('probSurvive', 'probCapture'), self = FALSE) - mxSim <- array(NA, dim = c(nSim, length(x))) - set.seed(1) - for (i in 1:nSim) { - m$simulate(simNodes, includeData = TRUE) - mxSim[i,] <- m$x - } - expect_identical(mxSim, xSim) - - CmxSim <- array(NA, dim = c(nSim, length(x))) - set.seed(1) - for (i in 1:nSim) { - cm$simulate(simNodes, includeData = TRUE) - CmxSim[i,] <- cm$x - } - expect_identical(CmxSim, mxSim) - }) +{ + # Uncompiled calculation + x <- c(1, 0, 1, 0, 0) + probSurvive <- 0.6 + probCapture <- c(1, 0.25, 0.6, 0.4, 0.8) + probX <- dCJS_sv(x, probSurvive, probCapture) + # Manually calculate the correct answer + correctProbX <- probSurvive * (1 - probCapture[2]) * + probSurvive * (probCapture[3]) * + (probSurvive^2 * (1 - probCapture[4]) * (1 - probCapture[5]) + + probSurvive * (1 - probCapture[4]) * (1 - probSurvive) + + (1 - probSurvive)) + + expect_equal(probX, correctProbX) + + # Uncompiled log probability + lProbX <- dCJS_sv(x, probSurvive, probCapture, log = TRUE) + lCorrectProbX <- log(correctProbX) + expect_equal(lProbX, lCorrectProbX) + + # Compilation and compiled calculations + call_dCJS_sv <- nimbleFunction( + run = function(x = double(1), + probSurvive = double(), + probCapture = double(1), + len = integer(0, default = 0), + log = integer(0, default = 0)) { + return(dCJS_sv(x, probSurvive, probCapture, len, log)) + returnType(double()) + } + ) + + CdCJS_sv <- compileNimble(call_dCJS_sv) + CprobX <- CdCJS_sv(x, probSurvive, probCapture) + expect_equal(CprobX, probX) + + ClProbX <- CdCJS_sv(x, probSurvive, probCapture, log = TRUE) + expect_equal(ClProbX, lProbX) + + + # Use in Nimble model + nc <- nimbleCode({ + x[1:5] ~ dCJS_sv(probSurvive, probCapture[1:5], len = 5) + probSurvive ~ dunif(0,1) + for (i in 1:5) { + probCapture[i] ~ dunif(0,1) + } + }) + m <- nimbleModel(nc, data = list(x = x), + inits = list(probSurvive = probSurvive, + probCapture = probCapture)) + m$calculate() + MlProbX <- m$getLogProb("x") + expect_equal(MlProbX, lProbX) + + # Compiled model + cm <- compileNimble(m) + cm$calculate() + CMlProbX <- cm$getLogProb("x") + expect_equal(CMlProbX, lProbX) + + # Test imputing value for all NAs + xNA <- c(NA, NA, NA, NA, NA) + mNA <- nimbleModel(nc, data = list(x = xNA), + inits = list(probSurvive = probSurvive, + probCapture = probCapture)) + mNAConf <- configureMCMC(mNA) + mNAConf$addMonitors('x') + mNA_MCMC <- buildMCMC(mNAConf) + cmNA <- compileNimble(mNA, mNA_MCMC) + + set.seed(10) + cmNA$mNA_MCMC$run(5) + + # Did the imputed values come back? + expect_true(all(!is.na(as.matrix(cmNA$mNA_MCMC$mvSamples)[,"x[1]"]))) + + # Test simulation code + set.seed(1) + nSim <- 10 + xSim <- array(NA, dim = c(nSim, length(x))) + for(i in 1:nSim) + xSim[i,] <- rCJS_sv(1, probSurvive, probCapture, len = length(x)) + set.seed(1) + CrCJS_sv <- compileNimble(rCJS_sv) + CxSim <- array(NA, dim = c(nSim, length(x))) + for(i in 1:nSim) + CxSim[i,] <- CrCJS_sv(1, probSurvive, probCapture, len = length(x)) + expect_identical(xSim, CxSim) + + simNodes <- m$getDependencies(c('probSurvive', 'probCapture'), self = FALSE) + mxSim <- array(NA, dim = c(nSim, length(x))) + set.seed(1) + for (i in 1:nSim) { + m$simulate(simNodes, includeData = TRUE) + mxSim[i,] <- m$x + } + expect_identical(mxSim, xSim) + + CmxSim <- array(NA, dim = c(nSim, length(x))) + set.seed(1) + for (i in 1:nSim) { + cm$simulate(simNodes, includeData = TRUE) + CmxSim[i,] <- cm$x + } + expect_identical(CmxSim, mxSim) +}) # ----------------------------------------------------------------------------- # 3. Test dCJS_vs # dCJS_vs is used in the case where survival probability is a vector and # capture probability is a scalar. test_that("dCJS_vs works", - { - # Uncompiled calculation - x <- c(1, 0, 1, 0, 0) - probSurvive <- c(0.8, 0.45, 0.4, 0.7) - probCapture <- 0.6 - probX <- dCJS_vs(x, probSurvive, probCapture) - # Manually calculate the correct answer - correctProbX <- probSurvive[1] * (1 - probCapture) * - probSurvive[2] * probCapture * - (probSurvive[3] * probSurvive[4] * (1 - probCapture)^2 + - probSurvive[3] * (1 - probCapture) * (1 - probSurvive[4]) + - (1 - probSurvive[3])) - - expect_equal(probX, correctProbX) - - # Uncompiled log probability - lProbX <- dCJS_vs(x, probSurvive, probCapture, log = TRUE) - lCorrectProbX <- log(correctProbX) - expect_equal(lProbX, lCorrectProbX) - - # Compilation and compiled calculations - CdCJS_vs <- compileNimble(dCJS_vs) - CprobX <- CdCJS_vs(x, probSurvive, probCapture) - expect_equal(CprobX, probX) - - ClProbX <- CdCJS_vs(x, probSurvive, probCapture, log = TRUE) - expect_equal(ClProbX, lProbX) - - # Use in Nimble model - nc <- nimbleCode({ - x[1:5] ~ dCJS_vs(probSurvive[1:4], probCapture, len = 5) - probCapture ~ dunif(0,1) - for (i in 1:4) { - probSurvive[i] ~ dunif(0,1) - } - }) - m <- nimbleModel(nc, data = list(x = x), - inits = list(probSurvive = probSurvive, - probCapture = probCapture)) - m$calculate() - MlProbX <- m$getLogProb("x") - expect_equal(MlProbX, lProbX) - - # Compiled model - cm <- compileNimble(m) - cm$calculate() - CMlProbX <- cm$getLogProb("x") - expect_equal(CMlProbX, lProbX) - - # Test imputing value for all NAs - xNA <- c(NA, NA, NA, NA, NA) - mNA <- nimbleModel(nc, data = list(x = xNA), - inits = list(probSurvive = probSurvive, - probCapture = probCapture)) - mNAConf <- configureMCMC(mNA) - mNAConf$addMonitors('x') - mNA_MCMC <- buildMCMC(mNAConf) - cmNA <- compileNimble(mNA, mNA_MCMC) - - set.seed(5) - cmNA$mNA_MCMC$run(10) - - # Did the imputed values come back? - expect_true(all(!is.na(as.matrix(cmNA$mNA_MCMC$mvSamples)[,"x[1]"]))) - - # Test simulation code - set.seed(1) - nSim <- 10 - xSim <- array(NA, dim = c(nSim, length(x))) - for(i in 1:nSim) - xSim[i,] <- rCJS_vs(1, probSurvive, probCapture, len = length(x)) - set.seed(1) - CrCJS_vs <- compileNimble(rCJS_vs) - CxSim <- array(NA, dim = c(nSim, length(x))) - for(i in 1:nSim) - CxSim[i,] <- CrCJS_vs(1, probSurvive, probCapture, len = length(x)) - expect_identical(xSim, CxSim) - - simNodes <- m$getDependencies(c('probSurvive', 'probCapture'), self = FALSE) - mxSim <- array(NA, dim = c(nSim, length(x))) - set.seed(1) - for(i in 1:nSim) { - m$simulate(simNodes, includeData = TRUE) - mxSim[i,] <- m$x - } - expect_identical(mxSim, xSim) - - CmxSim <- array(NA, dim = c(nSim, length(x))) - set.seed(1) - for(i in 1:nSim) { - cm$simulate(simNodes, includeData = TRUE) - CmxSim[i,] <- cm$x - } - expect_identical(CmxSim, mxSim) - }) +{ + # Uncompiled calculation + x <- c(1, 0, 1, 0, 0) + probSurvive <- c(0.8, 0.45, 0.4, 0.7) + probCapture <- 0.6 + probX <- dCJS_vs(x, probSurvive, probCapture) + # Manually calculate the correct answer + correctProbX <- probSurvive[1] * (1 - probCapture) * + probSurvive[2] * probCapture * + (probSurvive[3] * probSurvive[4] * (1 - probCapture)^2 + + probSurvive[3] * (1 - probCapture) * (1 - probSurvive[4]) + + (1 - probSurvive[3])) + + expect_equal(probX, correctProbX) + + # Uncompiled log probability + lProbX <- dCJS_vs(x, probSurvive, probCapture, log = TRUE) + lCorrectProbX <- log(correctProbX) + expect_equal(lProbX, lCorrectProbX) + + # Compilation and compiled calculations + + call_dCJS_vs <- nimbleFunction( + run = function(x = double(1), + probSurvive = double(1), + probCapture = double(), + len = integer(0, default = 0), + log = integer(0, default = 0)) { + return(dCJS_vs(x, probSurvive, probCapture, len, log)) + returnType(double()) + } + ) + + CdCJS_vs <- compileNimble(call_dCJS_vs) + CprobX <- CdCJS_vs(x, probSurvive, probCapture) + expect_equal(CprobX, probX) + + ClProbX <- CdCJS_vs(x, probSurvive, probCapture, log = TRUE) + expect_equal(ClProbX, lProbX) + + # Use in Nimble model + nc <- nimbleCode({ + x[1:5] ~ dCJS_vs(probSurvive[1:4], probCapture, len = 5) + probCapture ~ dunif(0,1) + for (i in 1:4) { + probSurvive[i] ~ dunif(0,1) + } + }) + m <- nimbleModel(nc, data = list(x = x), + inits = list(probSurvive = probSurvive, + probCapture = probCapture)) + m$calculate() + MlProbX <- m$getLogProb("x") + expect_equal(MlProbX, lProbX) + + # Compiled model + cm <- compileNimble(m) + cm$calculate() + CMlProbX <- cm$getLogProb("x") + expect_equal(CMlProbX, lProbX) + + # Test imputing value for all NAs + xNA <- c(NA, NA, NA, NA, NA) + mNA <- nimbleModel(nc, data = list(x = xNA), + inits = list(probSurvive = probSurvive, + probCapture = probCapture)) + mNAConf <- configureMCMC(mNA) + mNAConf$addMonitors('x') + mNA_MCMC <- buildMCMC(mNAConf) + cmNA <- compileNimble(mNA, mNA_MCMC) + + set.seed(5) + cmNA$mNA_MCMC$run(10) + + # Did the imputed values come back? + expect_true(all(!is.na(as.matrix(cmNA$mNA_MCMC$mvSamples)[,"x[1]"]))) + + # Test simulation code + set.seed(1) + nSim <- 10 + xSim <- array(NA, dim = c(nSim, length(x))) + for(i in 1:nSim) + xSim[i,] <- rCJS_vs(1, probSurvive, probCapture, len = length(x)) + set.seed(1) + CrCJS_vs <- compileNimble(rCJS_vs) + CxSim <- array(NA, dim = c(nSim, length(x))) + for(i in 1:nSim) + CxSim[i,] <- CrCJS_vs(1, probSurvive, probCapture, len = length(x)) + expect_identical(xSim, CxSim) + + simNodes <- m$getDependencies(c('probSurvive', 'probCapture'), self = FALSE) + mxSim <- array(NA, dim = c(nSim, length(x))) + set.seed(1) + for(i in 1:nSim) { + m$simulate(simNodes, includeData = TRUE) + mxSim[i,] <- m$x + } + expect_identical(mxSim, xSim) + + CmxSim <- array(NA, dim = c(nSim, length(x))) + set.seed(1) + for(i in 1:nSim) { + cm$simulate(simNodes, includeData = TRUE) + CmxSim[i,] <- cm$x + } + expect_identical(CmxSim, mxSim) +}) # ----------------------------------------------------------------------------- # 4. Test dCJS_vv test_that("dCJS_vv works", - { - ## Uncompiled calculation - x <- c(1, 0,1,0,0) - probSurvive <- c(0.6, 0.5, 0.4, 0.55) - probCapture <- c(1, 0.45, 0.5, 0.55, 0.6) - len <- 5 - probX <- dCJS_vv(x, probSurvive, probCapture, len) - - correctProbX <- - probSurvive[1] * (1 - probCapture[2]) * - probSurvive[2] * (probCapture[3]) * - ((probSurvive[3] * (1 - probCapture[4]) * - probSurvive[4] * (1 - probCapture[5])) + - (probSurvive[3] * (1 - probCapture[4]) * - (1 - probSurvive[4])) + - (1 - probSurvive[3])) - - expect_equal(probX, correctProbX) - - ## log Prob - lProbX <- dCJS_vv(x, probSurvive, probCapture, log = TRUE) - lCorrectProbX <- log(correctProbX) - expect_equal(lProbX, lCorrectProbX) - - ## Compiles - CdCJS_vv <- compileNimble(dCJS_vv) - CprobX <- CdCJS_vv(x, probSurvive, probCapture) - expect_equal(CprobX, probX) - - ClProbX <- CdCJS_vv(x, probSurvive, probCapture, len = 5, log = TRUE) - expect_equal(ClProbX, lProbX) - - ## Use in model - nc <- nimbleCode({ - x[1:5] ~ dCJS_vv(probSurvive[1:4], probCapture[1:5], len = 5) - for (i in 1:4) { - probSurvive[i] ~ dunif(0,1) - probCapture[i] ~ dunif(0,1) - } - }) - m <- nimbleModel(nc, data = list(x = x), - inits = list(probSurvive = probSurvive, - probCapture = probCapture)) - m$calculate() - MlProbX <- m$getLogProb("x") - expect_equal(MlProbX, lProbX) - - cm <- compileNimble(m) - cm$calculate() - CMlProbX <- cm$getLogProb("x") - expect_equal(CMlProbX, lProbX) - - # Test imputing value for all NAs - xNA <- c(NA, NA, NA, NA, NA) - mNA <- nimbleModel(nc, data = list(x = xNA), - inits = list(probSurvive = probSurvive, - probCapture = probCapture)) - mNAConf <- configureMCMC(mNA) - mNAConf$addMonitors('x') - mNA_MCMC <- buildMCMC(mNAConf) - cmNA <- compileNimble(mNA, mNA_MCMC) - - set.seed(5) - cmNA$mNA_MCMC$run(10) - - # Did the imputed values come back? - expect_true(all(!is.na(as.matrix(cmNA$mNA_MCMC$mvSamples)[,"x[1]"]))) - - # Test simulation code - set.seed(1) - nSim <- 10 - xSim <- array(NA, dim = c(nSim, length(x))) - for(i in 1:nSim) - xSim[i,] <- rCJS_vv(1, probSurvive, probCapture, len = length(x)) - set.seed(1) - CrCJS_vv <- compileNimble(rCJS_vv) - CxSim <- array(NA, dim = c(nSim, length(x))) - for(i in 1:nSim) - CxSim[i,] <- CrCJS_vv(1, probSurvive, probCapture, len = length(x)) - expect_identical(xSim, CxSim) - - simNodes <- m$getDependencies(c('probSurvive', 'probCapture'), self = FALSE) - mxSim <- array(NA, dim = c(nSim, length(x))) - set.seed(1) - for(i in 1:nSim) { - m$simulate(simNodes, includeData = TRUE) - mxSim[i,] <- m$x - } - expect_identical(mxSim, xSim) - - CmxSim <- array(NA, dim = c(nSim, length(x))) - set.seed(1) - for(i in 1:nSim) { - cm$simulate(simNodes, includeData = TRUE) - CmxSim[i,] <- cm$x - } - expect_identical(CmxSim, mxSim) +{ + ## Uncompiled calculation + x <- c(1, 0,1,0,0) + probSurvive <- c(0.6, 0.5, 0.4, 0.55) + probCapture <- c(1, 0.45, 0.5, 0.55, 0.6) + len <- 5 + probX <- dCJS_vv(x, probSurvive, probCapture, len) + + correctProbX <- + probSurvive[1] * (1 - probCapture[2]) * + probSurvive[2] * (probCapture[3]) * + ((probSurvive[3] * (1 - probCapture[4]) * + probSurvive[4] * (1 - probCapture[5])) + + (probSurvive[3] * (1 - probCapture[4]) * + (1 - probSurvive[4])) + + (1 - probSurvive[3])) + + expect_equal(probX, correctProbX) + + ## log Prob + lProbX <- dCJS_vv(x, probSurvive, probCapture, log = TRUE) + lCorrectProbX <- log(correctProbX) + expect_equal(lProbX, lCorrectProbX) + + ## Compiles + call_dCJS_vv <- nimbleFunction( + run = function(x = double(1), + probSurvive = double(1), + probCapture = double(1), + len = integer(0, default = 0), + log = integer(0, default = 0)) { + return(dCJS_vv(x, probSurvive, probCapture, len, log)) + returnType(double()) + } + ) + CdCJS_vv <- compileNimble(call_dCJS_vv) + CprobX <- CdCJS_vv(x, probSurvive, probCapture) + expect_equal(CprobX, probX) + + ClProbX <- CdCJS_vv(x, probSurvive, probCapture, len = 5, log = TRUE) + expect_equal(ClProbX, lProbX) + + ## Use in model + nc <- nimbleCode({ + x[1:5] ~ dCJS_vv(probSurvive[1:4], probCapture[1:5], len = 5) + for (i in 1:4) { + probSurvive[i] ~ dunif(0,1) + probCapture[i] ~ dunif(0,1) + } }) + m <- nimbleModel(nc, data = list(x = x), + inits = list(probSurvive = probSurvive, + probCapture = probCapture)) + m$calculate() + MlProbX <- m$getLogProb("x") + expect_equal(MlProbX, lProbX) + + cm <- compileNimble(m) + cm$calculate() + CMlProbX <- cm$getLogProb("x") + expect_equal(CMlProbX, lProbX) + + # Test imputing value for all NAs + xNA <- c(NA, NA, NA, NA, NA) + mNA <- nimbleModel(nc, data = list(x = xNA), + inits = list(probSurvive = probSurvive, + probCapture = probCapture)) + mNAConf <- configureMCMC(mNA) + mNAConf$addMonitors('x') + mNA_MCMC <- buildMCMC(mNAConf) + cmNA <- compileNimble(mNA, mNA_MCMC) + + set.seed(5) + cmNA$mNA_MCMC$run(10) + + # Did the imputed values come back? + expect_true(all(!is.na(as.matrix(cmNA$mNA_MCMC$mvSamples)[,"x[1]"]))) + + # Test simulation code + set.seed(1) + nSim <- 10 + xSim <- array(NA, dim = c(nSim, length(x))) + for(i in 1:nSim) + xSim[i,] <- rCJS_vv(1, probSurvive, probCapture, len = length(x)) + set.seed(1) + CrCJS_vv <- compileNimble(rCJS_vv) + CxSim <- array(NA, dim = c(nSim, length(x))) + for(i in 1:nSim) + CxSim[i,] <- CrCJS_vv(1, probSurvive, probCapture, len = length(x)) + expect_identical(xSim, CxSim) + + simNodes <- m$getDependencies(c('probSurvive', 'probCapture'), self = FALSE) + mxSim <- array(NA, dim = c(nSim, length(x))) + set.seed(1) + for(i in 1:nSim) { + m$simulate(simNodes, includeData = TRUE) + mxSim[i,] <- m$x + } + expect_identical(mxSim, xSim) + + CmxSim <- array(NA, dim = c(nSim, length(x))) + set.seed(1) + for(i in 1:nSim) { + cm$simulate(simNodes, includeData = TRUE) + CmxSim[i,] <- cm$x + } + expect_identical(CmxSim, mxSim) +}) test_that("dCJS errors", { @@ -464,10 +504,53 @@ test_that("dCJS errors", { ### Compiled errors - CdCJS_ss <- compileNimble(dCJS_ss) - CdCJS_sv <- compileNimble(dCJS_sv) - CdCJS_vs <- compileNimble(dCJS_vs) - CdCJS_vv <- compileNimble(dCJS_vv) + call_dCJS_ss <- nimbleFunction( + run = function(x = double(1), + probSurvive = double(), + probCapture = double(), + len = integer(0, default = 0), + log = integer(0, default = 0)) { + return(dCJS_ss(x, probSurvive, probCapture, len, log)) + returnType(double()) + } + ) + call_dCJS_sv <- nimbleFunction( + run = function(x = double(1), + probSurvive = double(), + probCapture = double(1), + len = integer(0, default = 0), + log = integer(0, default = 0)) { + return(dCJS_sv(x, probSurvive, probCapture, len, log)) + returnType(double()) + } + ) + call_dCJS_vs <- nimbleFunction( + run = function(x = double(1), + probSurvive = double(1), + probCapture = double(), + len = integer(0, default = 0), + log = integer(0, default = 0)) { + return(dCJS_vs(x, probSurvive, probCapture, len, log)) + returnType(double()) + } + ) + call_dCJS_vv <- nimbleFunction( + run = function(x = double(1), + probSurvive = double(1), + probCapture = double(1), + len = integer(0, default = 0), + log = integer(0, default = 0)) { + return(dCJS_vv(x, probSurvive, probCapture, len, log)) + returnType(double()) + } + ) + + + + CdCJS_ss <- compileNimble(call_dCJS_ss) + CdCJS_sv <- compileNimble(call_dCJS_sv) + CdCJS_vs <- compileNimble(call_dCJS_vs) + CdCJS_vv <- compileNimble(call_dCJS_vv) expect_error( CdCJS_ss(x = c(1,0,1,0,0), probCapture = 0.4, probSurvive = 0.5, len = 3) diff --git a/tests/testthat/test-DHMM.R b/tests/testthat/test-DHMM.R index 90ad085..3ca0a25 100644 --- a/tests/testthat/test-DHMM.R +++ b/tests/testthat/test-DHMM.R @@ -2,9 +2,6 @@ # ----------------------------------------------------------------------------- # 0. Load -# Set the context for testthat -context("Testing dDHMM-related functions.") - # ----------------------------------------------------------------------------- # 1. Test dDHMM, distribution for Dynamic Hidden Markov Model test_that("Testing dDHMM", { @@ -76,8 +73,17 @@ test_that("Testing dDHMM", { expect_equal(lProbX1, log(correctProbX1)) expect_equal(lProbX2, log(correctProbX2)) + call_dDHMM <- nimbleFunction( + run = function(x=double(1), init=double(1), probObs=double(2), + probTrans=double(3), len=integer(0,default=0), + checkRowSums = integer(0,default=1), + log=integer(0, default=0)) { + return(dDHMM(x,init,probObs,probTrans,len,checkRowSums,log)) + returnType(double()) + }) + # Repeat for the compiled function - CdDHMM <- compileNimble(dDHMM) + CdDHMM <- compileNimble(call_dDHMM) CprobX1 <- CdDHMM(x = x1, init = init, probObs = probObs, probTrans = probTrans, len = len, log = F) @@ -94,9 +100,9 @@ test_that("Testing dDHMM", { probTrans = probTrans[1:3, 1:3, 1:4], len = 5, checkRowSums = 1) }) - m <- nimbleModel(nc, data = list(x = x1), + m <- suppressWarnings(nimbleModel(nc, data = list(x = x1), inits = list(init = init, probObs = probObs, - probTrans = probTrans)) + probTrans = probTrans))) # Calculate probability of x from the model m$calculate() MlProbX <- m$getLogProb("x") @@ -232,7 +238,16 @@ test_that("Testing dDHMMo", { expect_equal(lProbX2, log(correctProbX2)) # Repeat for the compiled function - CdDHMMo <- compileNimble(dDHMMo) + call_dDHMMo <- nimbleFunction( + run = function(x=double(1), init=double(1), probObs=double(3), + probTrans=double(3), len=integer(0,default=0), + checkRowSums = integer(0,default=1), + log=integer(0, default=0)) { + return(dDHMMo(x,init,probObs,probTrans,len,checkRowSums,log)) + returnType(double()) + }) + + CdDHMMo <- compileNimble(call_dDHMMo) CprobX1 <- CdDHMMo(x = x1, init = init, probObs = probObs, probTrans = probTrans, len = len, log = F) @@ -249,9 +264,9 @@ test_that("Testing dDHMMo", { probTrans = probTrans[1:3, 1:3, 1:4], len = 5, checkRowSums = 1) }) - m <- nimbleModel(nc, data = list(x = x1), + m <- suppressWarnings(nimbleModel(nc, data = list(x = x1), inits = list(init = init, probObs = probObs, - probTrans = probTrans)) + probTrans = probTrans))) # Calculate probability of x from the model m$calculate() MlProbX <- m$getLogProb("x") @@ -348,6 +363,7 @@ test_that("dDHMM and dDHMMo compatibility", { test_that("dDHMM errors where expected", { + message("2 error messages are expected.") len <- 5 x <- c(1, 1, 1, 2, 1) init <- c(0.4, 0.2, 0.4) @@ -423,6 +439,7 @@ test_that("dDHMM errors where expected", { test_that("dDHMMo errors where expected", { + message("6 more error messages are expected.") len <- 5 x <- c(1, 1, 1, 2, 1) init <- c(0.4, 0.2, 0.4) @@ -520,6 +537,7 @@ test_that("dDHMMo errors where expected", { test_that("rDHMM errors where expected", { + message("2 error messages are expected") len <- 5 x <- c(1, 1, 1, 2, 1) init <- c(0.4, 0.2, 0.4) @@ -590,6 +608,7 @@ test_that("rDHMM errors where expected", { test_that("rDHMMo errors where expected", { + message("6 more error messages are expected") len <- 5 x <- c(1, 1, 1, 2, 1) init <- c(0.4, 0.2, 0.4) diff --git a/tests/testthat/test-DynOcc.R b/tests/testthat/test-DynOcc.R index 04bc1db..6fcefb5 100644 --- a/tests/testthat/test-DynOcc.R +++ b/tests/testthat/test-DynOcc.R @@ -1,6 +1,4 @@ -context("Testing dDynOcc-related functions.") - test_that("dDynOcc_vvm works", { x <- matrix(c(0,0,NA,0, 1,1,1,0, @@ -53,7 +51,18 @@ test_that("dDynOcc_vvm works", { expect_equal(exp(lCorrectProbX), probX) expect_equal(lCorrectProbX, lProbX) - CdDynOcc_vvm <- compileNimble(dDynOcc_vvm) + call_dDynOcc_vvm <- nimbleFunction( + run = function(x=double(2), init = double(), + probPersist = double(1), probColonize=double(1), + p = double(2), + start = double(1), + end = double(1), + log = integer(0,default=0)) { + return(dDynOcc_vvm(x,init,probPersist,probColonize,p,start,end,log)) + returnType(double()) + }) + + CdDynOcc_vvm <- compileNimble(call_dDynOcc_vvm) CprobX <- CdDynOcc_vvm(x, init, probPersist, probColonize, p, start, end, log = FALSE) expect_equal(CprobX, probX) @@ -77,13 +86,13 @@ test_that("dDynOcc_vvm works", { MlProbX <- m$getLogProb("x") expect_equal(MlProbX, lProbX) - cm <- compileNimble(m, showCompilerOutput = TRUE) + cm <- compileNimble(m) cm$calculate() CMlProbX <- cm$getLogProb("x") expect_equal(CMlProbX, lProbX) set.seed(2468) - cm$simulate('x') + cm$simulate('x[1:4, 1:5]', includeData = TRUE) # Test simulation code set.seed(1) @@ -187,7 +196,18 @@ test_that("dDynOcc_vsm works", { expect_equal(exp(lCorrectProbX), probX) expect_equal(lCorrectProbX, lProbX) - CdDynOcc_vsm <- compileNimble(dDynOcc_vsm) + call_dDynOcc_vsm <- nimbleFunction( + run = function(x=double(2), init = double(), + probPersist = double(1), probColonize=double(0), + p = double(2), + start = double(1), + end = double(1), + log = integer(0,default=0)) { + return(dDynOcc_vsm(x,init,probPersist,probColonize,p,start,end,log)) + returnType(double()) + }) + + CdDynOcc_vsm <- compileNimble(call_dDynOcc_vsm) CprobX <- CdDynOcc_vsm(x, init, probPersist, probColonize, p, start, end, log = FALSE) expect_equal(CprobX, probX) @@ -222,13 +242,13 @@ test_that("dDynOcc_vsm works", { MlProbX <- m$getLogProb("x") expect_equal(MlProbX, lProbX) - cm <- compileNimble(m, showCompilerOutput = TRUE) + cm <- compileNimble(m) cm$calculate() CMlProbX <- cm$getLogProb("x") expect_equal(CMlProbX, lProbX) set.seed(2468) - cm$simulate('x') + cm$simulate('x[1:4, 1:5]', includeData = TRUE) # Test simulation code set.seed(1) @@ -328,7 +348,18 @@ test_that("dDynOcc_svm works", { expect_equal(exp(lCorrectProbX), probX) expect_equal(lCorrectProbX, lProbX) - CdDynOcc_svm <- compileNimble(dDynOcc_svm) + call_dDynOcc_svm <- nimbleFunction( + run = function(x=double(2), init = double(), + probPersist = double(0), probColonize=double(1), + p = double(2), + start = double(1), + end = double(1), + log = integer(0,default=0)) { + return(dDynOcc_svm(x,init,probPersist,probColonize,p,start,end,log)) + returnType(double()) + }) + + CdDynOcc_svm <- compileNimble(call_dDynOcc_svm) CprobX <- CdDynOcc_svm(x, init, probPersist, probColonize, p, start, end, log = FALSE) expect_equal(CprobX, probX) @@ -366,13 +397,13 @@ test_that("dDynOcc_svm works", { MlProbX <- m$getLogProb("x") expect_equal(MlProbX, lProbX) - cm <- compileNimble(m, showCompilerOutput = TRUE) + cm <- compileNimble(m) cm$calculate() CMlProbX <- cm$getLogProb("x") expect_equal(CMlProbX, lProbX) set.seed(2468) - cm$simulate('x') + cm$simulate('x[1:4, 1:5]', includeData = TRUE) # Test simulation code set.seed(1) @@ -473,7 +504,18 @@ test_that("dDynOcc_ssm works", { expect_equal(exp(lCorrectProbX), probX) expect_equal(lCorrectProbX, lProbX) - CdDynOcc_ssm <- compileNimble(dDynOcc_ssm) + call_dDynOcc_ssm <- nimbleFunction( + run = function(x=double(2), init = double(), + probPersist = double(0), probColonize=double(0), + p = double(2), + start = double(1), + end = double(1), + log = integer(0,default=0)) { + return(dDynOcc_ssm(x,init,probPersist,probColonize,p,start,end,log)) + returnType(double()) + }) + + CdDynOcc_ssm <- compileNimble(call_dDynOcc_ssm) CprobX <- CdDynOcc_ssm(x, init, probPersist, probColonize, p, start, end, log = FALSE) expect_equal(CprobX, probX) @@ -506,13 +548,13 @@ test_that("dDynOcc_ssm works", { MlProbX <- m$getLogProb("x") expect_equal(MlProbX, lProbX) - cm <- compileNimble(m, showCompilerOutput = TRUE) + cm <- compileNimble(m) cm$calculate() CMlProbX <- cm$getLogProb("x") expect_equal(CMlProbX, lProbX) set.seed(2468) - cm$simulate('x') + cm$simulate('x[1:4, 1:5]', includeData = TRUE) # Test simulation code set.seed(1) @@ -562,11 +604,52 @@ test_that("dDynOcc_ssm works", { test_that("Case errors in compiled dDynOcc_**m work", { - CdDynOcc_ssm <- compileNimble(dDynOcc_ssm) - CdDynOcc_svm <- compileNimble(dDynOcc_svm) - CdDynOcc_vsm <- compileNimble(dDynOcc_vsm) - CdDynOcc_vvm <- compileNimble(dDynOcc_vvm) + call_dDynOcc_ssm <- nimbleFunction( + run = function(x=double(2), init = double(), + probPersist = double(), probColonize=double(), + p = double(2), + start = double(1), + end = double(1), + log = integer(0,default=0)) { + return(dDynOcc_ssm(x,init,probPersist,probColonize,p,start,end,log)) + returnType(double()) + }) + call_dDynOcc_svm <- nimbleFunction( + run = function(x=double(2), init = double(), + probPersist = double(), probColonize=double(1), + p = double(2), + start = double(1), + end = double(1), + log = integer(0,default=0)) { + return(dDynOcc_svm(x,init,probPersist,probColonize,p,start,end,log)) + returnType(double()) + }) + call_dDynOcc_vsm <- nimbleFunction( + run = function(x=double(2), init = double(), + probPersist = double(1), probColonize=double(), + p = double(2), + start = double(1), + end = double(1), + log = integer(0,default=0)) { + return(dDynOcc_vsm(x,init,probPersist,probColonize,p,start,end,log)) + returnType(double()) + }) + call_dDynOcc_vvm <- nimbleFunction( + run = function(x=double(2), init = double(), + probPersist = double(1), probColonize=double(1), + p = double(2), + start = double(1), + end = double(1), + log = integer(0,default=0)) { + return(dDynOcc_vvm(x,init,probPersist,probColonize,p,start,end,log)) + returnType(double()) + }) + + CdDynOcc_ssm <- compileNimble(call_dDynOcc_ssm) + CdDynOcc_svm <- compileNimble(call_dDynOcc_svm) + CdDynOcc_vsm <- compileNimble(call_dDynOcc_vsm) + CdDynOcc_vvm <- compileNimble(call_dDynOcc_vvm) x <- matrix(c(0,0,NA,0, 1,1,1,0, @@ -604,9 +687,6 @@ test_that("Case errors in compiled dDynOcc_**m work", { }) - - - test_that("dDynOcc_vvv works", { x <- matrix(c(0,0,NA,0, 1,1,1,0, @@ -659,7 +739,18 @@ test_that("dDynOcc_vvv works", { expect_equal(exp(lCorrectProbX), probX) expect_equal(lCorrectProbX, lProbX) - CdDynOcc_vvv <- compileNimble(dDynOcc_vvv) + call_dDynOcc_vvv <- nimbleFunction( + run = function(x=double(2), init = double(), + probPersist = double(1), probColonize=double(1), + p = double(1), + start = double(1), + end = double(1), + log = integer(0,default=0)) { + return(dDynOcc_vvv(x,init,probPersist,probColonize,p,start,end,log)) + returnType(double()) + }) + + CdDynOcc_vvv <- compileNimble(call_dDynOcc_vvv) CprobX <- CdDynOcc_vvv(x, init, probPersist, probColonize, p, start, end, log = FALSE) expect_equal(CprobX, probX) @@ -693,7 +784,7 @@ test_that("dDynOcc_vvv works", { MlProbX <- m$getLogProb("x") expect_equal(MlProbX, lProbX) - cm <- compileNimble(m, showCompilerOutput = TRUE) + cm <- compileNimble(m) cm$calculate() CMlProbX <- cm$getLogProb("x") expect_equal(CMlProbX, lProbX) @@ -743,7 +834,6 @@ test_that("dDynOcc_vvv works", { # expect_true(all(!is.na(as.matrix(cmNA$mNA_MCMC$mvSamples)[,"x[1]"]))) }) - test_that("dDynOcc_vsv works", { x <- matrix(c(0,0,NA,0, 1,1,1,0, @@ -798,7 +888,18 @@ test_that("dDynOcc_vsv works", { expect_equal(exp(lCorrectProbX), probX) expect_equal(lCorrectProbX, lProbX) - CdDynOcc_vsv <- compileNimble(dDynOcc_vsv) + call_dDynOcc_vsv <- nimbleFunction( + run = function(x=double(2), init = double(), + probPersist = double(1), probColonize=double(), + p = double(1), + start = double(1), + end = double(1), + log = integer(0,default=0)) { + return(dDynOcc_vsv(x,init,probPersist,probColonize,p,start,end,log)) + returnType(double()) + }) + + CdDynOcc_vsv <- compileNimble(call_dDynOcc_vsv) CprobX <- CdDynOcc_vsv(x, init, probPersist, probColonize, p, start, end, log = FALSE) expect_equal(CprobX, probX) @@ -831,13 +932,13 @@ test_that("dDynOcc_vsv works", { MlProbX <- m$getLogProb("x") expect_equal(MlProbX, lProbX) - cm <- compileNimble(m, showCompilerOutput = TRUE) + cm <- compileNimble(m) cm$calculate() CMlProbX <- cm$getLogProb("x") expect_equal(CMlProbX, lProbX) set.seed(2468) - cm$simulate('x') + cm$simulate('x[1:4, 1:5]', includeData = TRUE) # Test simulation code set.seed(1) @@ -937,7 +1038,18 @@ test_that("dDynOcc_svv works", { expect_equal(exp(lCorrectProbX), probX) expect_equal(lCorrectProbX, lProbX) - CdDynOcc_svv <- compileNimble(dDynOcc_svv) + call_dDynOcc_svv <- nimbleFunction( + run = function(x=double(2), init = double(), + probPersist = double(), probColonize=double(1), + p = double(1), + start = double(1), + end = double(1), + log = integer(0,default=0)) { + return(dDynOcc_svv(x,init,probPersist,probColonize,p,start,end,log)) + returnType(double()) + }) + + CdDynOcc_svv <- compileNimble(call_dDynOcc_svv) CprobX <- CdDynOcc_svv(x, init, probPersist, probColonize, p, start, end, log = FALSE) expect_equal(CprobX, probX) @@ -973,13 +1085,13 @@ test_that("dDynOcc_svv works", { MlProbX <- m$getLogProb("x") expect_equal(MlProbX, lProbX) - cm <- compileNimble(m, showCompilerOutput = TRUE) + cm <- compileNimble(m) cm$calculate() CMlProbX <- cm$getLogProb("x") expect_equal(CMlProbX, lProbX) set.seed(2468) - cm$simulate('x') + cm$simulate('x[1:4, 1:5]', includeData = TRUE) # Test simulation code set.seed(1) @@ -1027,7 +1139,6 @@ test_that("dDynOcc_svv works", { # expect_true(all(!is.na(as.matrix(cmNA$mNA_MCMC$mvSamples)[,"x[1]"]))) }) - test_that("dDynOcc_ssv works", { x <- matrix(c(0,0,NA,0, 1,1,1,0, @@ -1080,7 +1191,18 @@ test_that("dDynOcc_ssv works", { expect_equal(exp(lCorrectProbX), probX) expect_equal(lCorrectProbX, lProbX) - CdDynOcc_ssv <- compileNimble(dDynOcc_ssv) + call_dDynOcc_ssv <- nimbleFunction( + run = function(x=double(2), init = double(), + probPersist = double(), probColonize=double(), + p = double(1), + start = double(1), + end = double(1), + log = integer(0,default=0)) { + return(dDynOcc_ssv(x,init,probPersist,probColonize,p,start,end,log)) + returnType(double()) + }) + + CdDynOcc_ssv <- compileNimble(call_dDynOcc_ssv) CprobX <- CdDynOcc_ssv(x, init, probPersist, probColonize, p, start, end, log = FALSE) expect_equal(CprobX, probX) @@ -1111,13 +1233,13 @@ test_that("dDynOcc_ssv works", { MlProbX <- m$getLogProb("x") expect_equal(MlProbX, lProbX) - cm <- compileNimble(m, showCompilerOutput = TRUE) + cm <- compileNimble(m) cm$calculate() CMlProbX <- cm$getLogProb("x") expect_equal(CMlProbX, lProbX) set.seed(2468) - cm$simulate('x') + cm$simulate('x[1:4, 1:5]', includeData = TRUE) # Test simulation code set.seed(1) @@ -1164,13 +1286,53 @@ test_that("dDynOcc_ssv works", { # expect_true(all(!is.na(as.matrix(cmNA$mNA_MCMC$mvSamples)[,"x[1]"]))) }) - test_that("Case errors in compiled dDynOcc_**v work", { - CdDynOcc_ssv <- compileNimble(dDynOcc_ssv) - CdDynOcc_svv <- compileNimble(dDynOcc_svv) - CdDynOcc_vsv <- compileNimble(dDynOcc_vsv) - CdDynOcc_vvv <- compileNimble(dDynOcc_vvv) - + call_dDynOcc_ssv <- nimbleFunction( + run = function(x=double(2), init = double(), + probPersist = double(), probColonize=double(), + p = double(1), + start = double(1), + end = double(1), + log = integer(0,default=0)) { + return(dDynOcc_ssv(x,init,probPersist,probColonize,p,start,end,log)) + returnType(double()) + }) + call_dDynOcc_svv <- nimbleFunction( + run = function(x=double(2), init = double(), + probPersist = double(), probColonize=double(1), + p = double(1), + start = double(1), + end = double(1), + log = integer(0,default=0)) { + return(dDynOcc_svv(x,init,probPersist,probColonize,p,start,end,log)) + returnType(double()) + }) + call_dDynOcc_vsv <- nimbleFunction( + run = function(x=double(2), init = double(), + probPersist = double(1), probColonize=double(), + p = double(1), + start = double(1), + end = double(1), + log = integer(0,default=0)) { + return(dDynOcc_vsv(x,init,probPersist,probColonize,p,start,end,log)) + returnType(double()) + }) + call_dDynOcc_vvv <- nimbleFunction( + run = function(x=double(2), init = double(), + probPersist = double(1), probColonize=double(1), + p = double(1), + start = double(1), + end = double(1), + log = integer(0,default=0)) { + return(dDynOcc_vvv(x,init,probPersist,probColonize,p,start,end,log)) + returnType(double()) + }) + + + CdDynOcc_ssv <- compileNimble(call_dDynOcc_ssv) + CdDynOcc_svv <- compileNimble(call_dDynOcc_svv) + CdDynOcc_vsv <- compileNimble(call_dDynOcc_vsv) + CdDynOcc_vvv <- compileNimble(call_dDynOcc_vvv) x <- matrix(c(0,0,NA,0, 1,1,1,0, @@ -1208,15 +1370,12 @@ test_that("Case errors in compiled dDynOcc_**v work", { }) - -context("Testing dDynOcc-related functions.") - test_that("dDynOcc_vvs works", { - x <- matrix(c(0,0,NA,0, + x <- matrix(c(0,0,-1,0, 1,1,1,0, 0,0,0,0, 0,0,1,0, - 0,0,0,NA), nrow = 4) + 0,0,0,-1), nrow = 4) start <- c(1,1,2,1) end <- c(5,5,5,4) init <- 0.7 @@ -1263,7 +1422,18 @@ test_that("dDynOcc_vvs works", { expect_equal(exp(lCorrectProbX), probX) expect_equal(lCorrectProbX, lProbX) - CdDynOcc_vvs <- compileNimble(dDynOcc_vvs) + call_dDynOcc_vvs <- nimbleFunction( + run = function(x=double(2), init = double(), + probPersist = double(1), probColonize=double(1), + p = double(), + start = double(1), + end = double(1), + log = integer(0,default=0)) { + return(dDynOcc_vvs(x,init,probPersist,probColonize,p,start,end,log)) + returnType(double()) + }) + + CdDynOcc_vvs <- compileNimble(call_dDynOcc_vvs) CprobX <- CdDynOcc_vvs(x, init, probPersist, probColonize, p, start, end, log = FALSE) expect_equal(CprobX, probX) @@ -1295,13 +1465,13 @@ test_that("dDynOcc_vvs works", { MlProbX <- m$getLogProb("x") expect_equal(MlProbX, lProbX) - cm <- compileNimble(m, showCompilerOutput = TRUE) + cm <- compileNimble(m) cm$calculate() CMlProbX <- cm$getLogProb("x") expect_equal(CMlProbX, lProbX) set.seed(2468) - cm$simulate('x') + cm$simulate('x[1:4, 1:5]', includeData = TRUE) # Test simulation code set.seed(1) @@ -1348,7 +1518,6 @@ test_that("dDynOcc_vvs works", { # expect_true(all(!is.na(as.matrix(cmNA$mNA_MCMC$mvSamples)[,"x[1]"]))) }) - test_that("dDynOcc_vss works", { x <- matrix(c(0,0,NA,0, 1,1,1,0, @@ -1403,7 +1572,18 @@ test_that("dDynOcc_vss works", { expect_equal(exp(lCorrectProbX), probX) expect_equal(lCorrectProbX, lProbX) - CdDynOcc_vss <- compileNimble(dDynOcc_vss) + call_dDynOcc_vss <- nimbleFunction( + run = function(x=double(2), init = double(), + probPersist = double(1), probColonize=double(), + p = double(), + start = double(1), + end = double(1), + log = integer(0,default=0)) { + return(dDynOcc_vss(x,init,probPersist,probColonize,p,start,end,log)) + returnType(double()) + }) + + CdDynOcc_vss <- compileNimble(call_dDynOcc_vss) CprobX <- CdDynOcc_vss(x, init, probPersist, probColonize, p, start, end, log = FALSE) expect_equal(CprobX, probX) @@ -1434,13 +1614,13 @@ test_that("dDynOcc_vss works", { MlProbX <- m$getLogProb("x") expect_equal(MlProbX, lProbX) - cm <- compileNimble(m, showCompilerOutput = TRUE) + cm <- compileNimble(m) cm$calculate() CMlProbX <- cm$getLogProb("x") expect_equal(CMlProbX, lProbX) set.seed(2468) - cm$simulate('x') + cm$simulate('x[1:4, 1:5]', includeData = TRUE) # Test simulation code set.seed(1) @@ -1539,7 +1719,18 @@ test_that("dDynOcc_svs works", { expect_equal(exp(lCorrectProbX), probX) expect_equal(lCorrectProbX, lProbX) - CdDynOcc_svs <- compileNimble(dDynOcc_svs) + call_dDynOcc_svs <- nimbleFunction( + run = function(x=double(2), init = double(), + probPersist = double(), probColonize=double(1), + p = double(), + start = double(1), + end = double(1), + log = integer(0,default=0)) { + return(dDynOcc_svs(x,init,probPersist,probColonize,p,start,end,log)) + returnType(double()) + }) + + CdDynOcc_svs <- compileNimble(call_dDynOcc_svs) CprobX <- CdDynOcc_svs(x, init, probPersist, probColonize, p, start, end, log = FALSE) expect_equal(CprobX, probX) @@ -1573,13 +1764,13 @@ test_that("dDynOcc_svs works", { MlProbX <- m$getLogProb("x") expect_equal(MlProbX, lProbX) - cm <- compileNimble(m, showCompilerOutput = TRUE) + cm <- compileNimble(m) cm$calculate() CMlProbX <- cm$getLogProb("x") expect_equal(CMlProbX, lProbX) set.seed(2468) - cm$simulate('x') + cm$simulate('x[1:4, 1:5]', includeData = TRUE) # Test simulation code set.seed(1) @@ -1626,7 +1817,6 @@ test_that("dDynOcc_svs works", { # expect_true(all(!is.na(as.matrix(cmNA$mNA_MCMC$mvSamples)[,"x[1]"]))) }) - test_that("dDynOcc_sss works", { x <- matrix(c(0,0,NA,0, 1,1,1,0, @@ -1679,7 +1869,18 @@ test_that("dDynOcc_sss works", { expect_equal(exp(lCorrectProbX), probX) expect_equal(lCorrectProbX, lProbX) - CdDynOcc_sss <- compileNimble(dDynOcc_sss) + call_dDynOcc_sss <- nimbleFunction( + run = function(x=double(2), init = double(), + probPersist = double(), probColonize=double(), + p = double(), + start = double(1), + end = double(1), + log = integer(0,default=0)) { + return(dDynOcc_sss(x,init,probPersist,probColonize,p,start,end,log)) + returnType(double()) + }) + + CdDynOcc_sss <- compileNimble(call_dDynOcc_sss) CprobX <- CdDynOcc_sss(x, init, probPersist, probColonize, p, start, end, log = FALSE) expect_equal(CprobX, probX) @@ -1708,13 +1909,13 @@ test_that("dDynOcc_sss works", { MlProbX <- m$getLogProb("x") expect_equal(MlProbX, lProbX) - cm <- compileNimble(m, showCompilerOutput = TRUE) + cm <- compileNimble(m) cm$calculate() CMlProbX <- cm$getLogProb("x") expect_equal(CMlProbX, lProbX) set.seed(2468) - cm$simulate('x') + cm$simulate('x[1:4, 1:5]', includeData = TRUE) # Test simulation code set.seed(1) @@ -1761,12 +1962,51 @@ test_that("dDynOcc_sss works", { # expect_true(all(!is.na(as.matrix(cmNA$mNA_MCMC$mvSamples)[,"x[1]"]))) }) - test_that("Case errors in compiled dDynOcc_** work", { - CdDynOcc_sss <- compileNimble(dDynOcc_sss) - CdDynOcc_svs <- compileNimble(dDynOcc_svs) - CdDynOcc_vss <- compileNimble(dDynOcc_vss) - CdDynOcc_vvs <- compileNimble(dDynOcc_vvs) + call_dDynOcc_sss <- nimbleFunction( + run = function(x=double(2), init = double(), + probPersist = double(), probColonize=double(), + p = double(), + start = double(1), + end = double(1), + log = integer(0,default=0)) { + return(dDynOcc_sss(x,init,probPersist,probColonize,p,start,end,log)) + returnType(double()) + }) + call_dDynOcc_svs <- nimbleFunction( + run = function(x=double(2), init = double(), + probPersist = double(), probColonize=double(1), + p = double(), + start = double(1), + end = double(1), + log = integer(0,default=0)) { + return(dDynOcc_svs(x,init,probPersist,probColonize,p,start,end,log)) + returnType(double()) + }) + call_dDynOcc_vss <- nimbleFunction( + run = function(x=double(2), init = double(), + probPersist = double(1), probColonize=double(), + p = double(), + start = double(1), + end = double(1), + log = integer(0,default=0)) { + return(dDynOcc_vss(x,init,probPersist,probColonize,p,start,end,log)) + returnType(double()) + }) + call_dDynOcc_vvs <- nimbleFunction( + run = function(x=double(2), init = double(), + probPersist = double(1), probColonize=double(1), + p = double(), + start = double(1), + end = double(1), + log = integer(0,default=0)) { + return(dDynOcc_vvs(x,init,probPersist,probColonize,p,start,end,log)) + returnType(double()) + }) + CdDynOcc_sss <- compileNimble(call_dDynOcc_sss) + CdDynOcc_svs <- compileNimble(call_dDynOcc_svs) + CdDynOcc_vss <- compileNimble(call_dDynOcc_vss) + CdDynOcc_vvs <- compileNimble(call_dDynOcc_vvs) x <- matrix(c(0,0,NA,0, diff --git a/tests/testthat/test-HMM.R b/tests/testthat/test-HMM.R index 140137a..f6a09a9 100644 --- a/tests/testthat/test-HMM.R +++ b/tests/testthat/test-HMM.R @@ -2,10 +2,6 @@ # ----------------------------------------------------------------------------- # 0. Load - -# Set the context for testthat -context("Testing dHMM-related functions.") - # ----------------------------------------------------------------------------- # 1. Test dHMM, distribution for Hidden Markov Model test_that("dHMM works", { @@ -78,7 +74,15 @@ test_that("dHMM works", { expect_equal(lProbX2, log(correctProbX2)) # Repeat for the compiled function - CdHMM <- compileNimble(dHMM) + call_dHMM <- nimbleFunction( + run = function(x=double(1), init=double(1), probObs=double(2), + probTrans=double(2), len=integer(0,default=0), + checkRowSums = integer(0,default=1), + log=integer(0, default=0)) { + return(dHMM(x,init,probObs,probTrans,len,checkRowSums,log)) + returnType(double()) + }) + CdHMM <- compileNimble(call_dHMM) CprobX1 <- CdHMM(x = x1, init = init, probObs = probObs, probTrans = probTrans, len = len, log = FALSE) @@ -228,7 +232,16 @@ test_that("dHMMo works", { expect_equal(lProbX2, log(correctProbX2)) # Repeat for compiled nimbleFunction - CdHMMo <- compileNimble(dHMMo) + call_dHMMo <- nimbleFunction( + run = function(x=double(1), init=double(1), probObs=double(3), + probTrans=double(2), len=integer(0,default=0), + checkRowSums = integer(0,default=1), + log=integer(0, default=0)) { + return(dHMMo(x,init,probObs,probTrans,len,checkRowSums,log)) + returnType(double()) + }) + + CdHMMo <- compileNimble(call_dHMMo) CprobX1 <- CdHMMo(x = x1, init = init, probObs = probObs, probTrans = probTrans, len = len, log = FALSE) @@ -246,9 +259,9 @@ test_that("dHMMo works", { probTrans = probTrans[1:3, 1:3], len = 5, checkRowSums = 1) }) # Build a nimbleModel - m <- nimbleModel(nc, data = list(x = x1), + m <- suppressWarnings(nimbleModel(nc, data = list(x = x1), inits = list(init = init, probObs = probObs, - probTrans = probTrans)) + probTrans = probTrans))) # Use the nimbleModel to calculate probabilities and compare m$calculate() @@ -346,7 +359,7 @@ test_that("dHMM and dHMMo compatibility", { # ----------------------------------------------------------------------------- # 3. Test that dHMM errors when input assumptions are violated test_that("dHMM errors where expected", { - + message("6 error messages are expected.") # Start with good inputs and break it one by one len <- 5 x <- c(1, 1, 1, 2, 1) @@ -418,6 +431,7 @@ test_that("dHMM errors where expected", { # 4. Test that dHMMo errors when input assumptions are violated test_that("dHMMo errors where expected", { + message("6 more error messages are expected.") len <- 5 x <- c(1, 1, 1, 2, 1) init <- c(0.4, 0.2, 0.4) @@ -501,7 +515,7 @@ test_that("dHMMo errors where expected", { # ----------------------------------------------------------------------------- # 5. Test that rHMM errors when input assumptions are violated test_that("rHMM errors where expected", { - + message("8 error messages are expected.") # Start with good inputs and break it one by one len <- 5 x <- c(1, 1, 1, 2, 1) @@ -573,6 +587,7 @@ test_that("rHMM errors where expected", { # 6. Test that rHMMo errors when input assumptions are violated test_that("rHMMo errors where expected", { + message("7 more error messages are expected.") len <- 5 x <- c(1, 1, 1, 2, 1) init <- c(0.4, 0.2, 0.4) diff --git a/tests/testthat/test-Nmixture.R b/tests/testthat/test-Nmixture.R index 6352b14..32f1e81 100644 --- a/tests/testthat/test-Nmixture.R +++ b/tests/testthat/test-Nmixture.R @@ -24,10 +24,26 @@ test_that("dNmixture_v works", correctProbX <- correctProbX + dpois(N, lambda) * prod(dbinom(x, N, prob)) } expect_equal(probX, correctProbX) + # Manually calculate the correct answer with special-case Nmin and Nmax + Nmin <- 3 + for(Nmax in 3:5) { + probX <- dNmixture_v(x, lambda, prob, Nmin, Nmax, len) + correctProbX <- 0 + for (N in Nmin:Nmax) { + correctProbX <- correctProbX + dpois(N, lambda) * prod(dbinom(x, N, prob)) + } + expect_equal(probX, correctProbX) + } + Nmin <- 0 + Nmax <- 250 # Uncompiled log probability lProbX <- dNmixture_v(x, lambda, prob, Nmin, Nmax, len, log = TRUE) - lCorrectProbX <- log(correctProbX) + correctProbX <- 0 + for (N in Nmin:Nmax) { + correctProbX <- correctProbX + dpois(N, lambda) * prod(dbinom(x, N, prob)) + } + lCorrectProbX <- log(correctProbX) expect_equal(lProbX, lCorrectProbX) # Dynamic Nmin / Nmax @@ -42,13 +58,16 @@ test_that("dNmixture_v works", # Compilation and compiled calculations CdNmixture_v <- compileNimble(dNmixture_v) - CprobX <- CdNmixture_v(x, lambda, prob, Nmin, Nmax, len) - expect_equal(CprobX, probX) + CprobX <- CdNmixture_v(x, lambda, prob, Nmin, Nmax, len) + probX <- dNmixture_v(x, lambda, prob, Nmin, Nmax, len) + expect_equal(CprobX, probX) ClProbX <- CdNmixture_v(x, lambda, prob, Nmin, Nmax, len, log = TRUE) - expect_equal(ClProbX, lProbX) + lProbX <- dNmixture_v(x, lambda, prob, Nmin, Nmax, len, log=TRUE) + expect_equal(ClProbX, lProbX) CdynProbX <- CdNmixture_v(x, lambda, prob, Nmin = -1, Nmax = -1, len) + dynCorrectProbX <- dNmixture_v(x, lambda, prob, Nmin, Nmax, len) expect_equal(CdynProbX, dynCorrectProbX) # Use in Nimble model @@ -295,17 +314,37 @@ test_that("dNmixture_BNB_v works", } expect_equal(dynProbX, dynCorrectProbX) + # Some special-case Nmax values + r <- 1 / theta + pNB <- 1 / (1 + theta * lambda) + Nmin <- 3 + for(Nmax in 3:6) { + probX <- dNmixture_BNB_v(x, lambda, theta, prob, Nmin, Nmax, len) + # Manually calculate the correct answer + correctProbX <- 0 + for (N in Nmin:Nmax) { + correctProbX <- correctProbX + dnbinom(N, size = r, prob = pNB) * + prod(dbinom(x, N, prob)) + } + expect_equal(probX, correctProbX) + } + Nmax <- 0 + Nmax <- 250 # Compilation and compiled calculations CdNmixture_BNB_v <- compileNimble(dNmixture_BNB_v) CprobX <- CdNmixture_BNB_v(x, lambda, theta, prob, Nmin, Nmax, len) + probX <- dNmixture_BNB_v(x, lambda, theta, prob, Nmin, Nmax, len) expect_equal(CprobX, probX) ClProbX <- CdNmixture_BNB_v(x, lambda, theta, prob, Nmin, Nmax, len, log = TRUE) + lProbX <- dNmixture_BNB_v(x, lambda, theta, prob, Nmin, Nmax, len, log = TRUE) expect_equal(ClProbX, lProbX) CdynProbX <- CdNmixture_BNB_v(x, lambda, theta, prob, Nmin = -1, Nmax = -1, len) + dynProbX <- dNmixture_BNB_v(x, lambda, theta, prob, Nmin = -1, + Nmax = -1, len) expect_equal(CdynProbX, dynCorrectProbX) # Use in Nimble model @@ -532,9 +571,8 @@ test_that("dNmixture_BNB_oneObs works", prob <- c(0.5) Nmin <- 0 Nmax <- 250 - len <- 5 - probX <- dNmixture_BNB_oneObs(x, lambda, theta, prob, Nmin, Nmax, len) + probX <- dNmixture_BNB_oneObs(x, lambda, theta, prob, Nmin, Nmax) # Manually calculate the correct answer r <- 1 / theta @@ -548,12 +586,12 @@ test_that("dNmixture_BNB_oneObs works", expect_equal(probX, correctProbX) # Uncompiled log probability - lProbX <- dNmixture_BNB_oneObs(x, lambda, theta, prob, Nmin, Nmax, len, log = TRUE) + lProbX <- dNmixture_BNB_oneObs(x, lambda, theta, prob, Nmin, Nmax, log = TRUE) lCorrectProbX <- log(correctProbX) expect_equal(lProbX, lCorrectProbX) # Dynamic Nmin/Nmax - dynProbX <- dNmixture_BNB_oneObs(x, lambda, theta, prob, Nmin = -1, Nmax = -1, len) + dynProbX <- dNmixture_BNB_oneObs(x, lambda, theta, prob, Nmin = -1, Nmax = -1) dNmin <- 1; dNmax <- 17 dynCorrectProbX <- 0 for (N in dNmin:dNmax) { @@ -563,18 +601,18 @@ test_that("dNmixture_BNB_oneObs works", expect_equal(dynProbX, dynCorrectProbX) CdynProbX <- dNmixture_BNB_oneObs(x, lambda, theta, prob, Nmin = -1, - Nmax = -1, len) + Nmax = -1) expect_equal(CdynProbX, dynCorrectProbX) # Compilation and compiled calculations CdNmixture_BNB_oneObs <- compileNimble(dNmixture_BNB_oneObs) - CprobX <- CdNmixture_BNB_oneObs(x, lambda, theta, prob, Nmin, Nmax, len) + CprobX <- CdNmixture_BNB_oneObs(x, lambda, theta, prob, Nmin, Nmax) expect_equal(CprobX, probX) - ClProbX <- CdNmixture_BNB_oneObs(x, lambda, theta, prob, Nmin, Nmax, len, log = TRUE) + ClProbX <- CdNmixture_BNB_oneObs(x, lambda, theta, prob, Nmin, Nmax, log = TRUE) expect_equal(ClProbX, lProbX) - CdynProbX <- CdNmixture_BNB_oneObs(x, lambda, theta, prob, Nmin = -1, Nmax = -1, len) + CdynProbX <- CdNmixture_BNB_oneObs(x, lambda, theta, prob, Nmin = -1, Nmax = -1) expect_equal(CdynProbX, dynCorrectProbX) @@ -582,8 +620,7 @@ test_that("dNmixture_BNB_oneObs works", nc <- nimbleCode({ x ~ dNmixture_BNB_oneObs(lambda = lambda, prob = prob, theta = theta, - Nmin = Nmin, Nmax = Nmax, len = len) - + Nmin = Nmin, Nmax = Nmax) }) m <- nimbleModel(code = nc, @@ -591,8 +628,7 @@ test_that("dNmixture_BNB_oneObs works", inits = list(lambda = lambda, prob = prob, theta = theta), - constants = list(Nmin = Nmin, Nmax = Nmax, - len = len)) + constants = list(Nmin = Nmin, Nmax = Nmax)) m$calculate() MlProbX <- m$getLogProb("x") expect_equal(MlProbX, lProbX) @@ -609,8 +645,7 @@ test_that("dNmixture_BNB_oneObs works", inits = list(lambda = lambda, theta = theta, prob = prob), - constants = list(Nmin = Nmin, Nmax = Nmax, - len = len)) + constants = list(Nmin = Nmin, Nmax = Nmax)) mNAConf <- configureMCMC(mNA) @@ -629,14 +664,14 @@ test_that("dNmixture_BNB_oneObs works", xSim <- numeric(nSim) set.seed(1) for (i in 1:nSim) { - xSim[i] <- rNmixture_BNB_oneObs(1, lambda, theta, prob, Nmin, Nmax, len) + xSim[i] <- rNmixture_BNB_oneObs(1, lambda, theta, prob, Nmin, Nmax) } CrNmixture_BNB_oneObs <- compileNimble(rNmixture_BNB_oneObs) CxSim <- numeric(nSim) set.seed(1) for (i in 1:nSim) { - CxSim[i] <- CrNmixture_BNB_oneObs(1, lambda, theta, prob, Nmin, Nmax, len) + CxSim[i] <- CrNmixture_BNB_oneObs(1, lambda, theta, prob, Nmin, Nmax) } expect_identical(xSim, CxSim) @@ -681,7 +716,7 @@ test_that("dNmixture_BBP_v works", correctProbX <- 0 for (N in Nmin:Nmax) { correctProbX <- correctProbX + dpois(N, lambda) * - prod(dBetaBinom(x, N, shape1 = alpha, shape2 = beta)) + prod(dBetaBinom_v(x, N, shape1 = alpha, shape2 = beta)) } expect_equal(probX, correctProbX) @@ -824,8 +859,8 @@ test_that("dNmixture_BBP_s works", correctProbX <- 0 for (N in Nmin:Nmax) { correctProbX <- correctProbX + dpois(N, lambda) * - prod(dBetaBinom(x, N, - shape1 = rep(alpha, len), shape2 = rep(beta, len))) + prod(dBetaBinom_s(x, N, + alpha, shape2 = beta)) } expect_equal(probX, correctProbX) @@ -959,7 +994,7 @@ test_that("dNmixture_BBP_oneObs works", Nmax <- 250 len <- 5 - probX <- dNmixture_BBP_oneObs(x, lambda, prob, s, Nmin, Nmax, len) + probX <- dNmixture_BBP_oneObs(x, lambda, prob, s, Nmin, Nmax) # Manually calculate the correct answer alpha <- prob * s @@ -967,49 +1002,49 @@ test_that("dNmixture_BBP_oneObs works", correctProbX <- 0 for (N in Nmin:Nmax) { correctProbX <- correctProbX + dpois(N, lambda) * - prod(dBetaBinom_One(x, N, alpha, beta)) + prod(dBetaBinom_s(x, N, alpha, beta)) } expect_equal(probX, correctProbX) # Uncompiled log probability - lProbX <- dNmixture_BBP_oneObs(x, lambda, prob, s, Nmin, Nmax, len, log = TRUE) + lProbX <- dNmixture_BBP_oneObs(x, lambda, prob, s, Nmin, Nmax, log = TRUE) lCorrectProbX <- log(correctProbX) expect_equal(lProbX, lCorrectProbX) # Compilation and compiled calculations CdNmixture_BBP_oneObs <- compileNimble(dNmixture_BBP_oneObs) - CprobX <- CdNmixture_BBP_oneObs(x, lambda, prob, s, Nmin, Nmax, len) + CprobX <- CdNmixture_BBP_oneObs(x, lambda, prob, s, Nmin, Nmax) expect_equal(CprobX, probX) - ClProbX <- CdNmixture_BBP_oneObs(x, lambda, prob, s, Nmin, Nmax, len, log = TRUE) + ClProbX <- CdNmixture_BBP_oneObs(x, lambda, prob, s, Nmin, Nmax, log = TRUE) expect_equal(ClProbX, lProbX) # Dynamic Nmin / Nmax isn't allowed expect_error({ - dNmixture_BBP_oneObs(x, lambda, prob, s, Nmin = -1, Nmax = -1, len) + dNmixture_BBP_oneObs(x, lambda, prob, s, Nmin = -1, Nmax = -1) }) expect_error({ - dNmixture_BBP_oneObs(x, lambda, prob, s, Nmin = -1, Nmax, len) + dNmixture_BBP_oneObs(x, lambda, prob, s, Nmin = -1, Nmax) }) expect_error({ - dNmixture_BBP_oneObs(x, lambda, prob, s, Nmin, Nmax = -1, len) + dNmixture_BBP_oneObs(x, lambda, prob, s, Nmin, Nmax = -1) }) expect_error({ - CdNmixture_BBP_oneObs(x, lambda, prob, s, Nmin = -1, Nmax = -1, len) + CdNmixture_BBP_oneObs(x, lambda, prob, s, Nmin = -1, Nmax = -1) }) expect_error({ - CdNmixture_BBP_oneObs(x, lambda, prob, s, Nmin = -1, Nmax, len) + CdNmixture_BBP_oneObs(x, lambda, prob, s, Nmin = -1, Nmax) }) expect_error({ - CdNmixture_BBP_oneObs(x, lambda, prob, s, Nmin, Nmax = -1, len) + CdNmixture_BBP_oneObs(x, lambda, prob, s, Nmin, Nmax = -1) }) # Use in Nimble model nc <- nimbleCode({ x ~ dNmixture_BBP_oneObs(lambda = lambda, prob = prob, s = s, - Nmin = Nmin, Nmax = Nmax, len = len) + Nmin = Nmin, Nmax = Nmax) }) @@ -1018,8 +1053,7 @@ test_that("dNmixture_BBP_oneObs works", inits = list(lambda = lambda, prob = prob, s = s), - constants = list(Nmin = Nmin, Nmax = Nmax, - len = len)) + constants = list(Nmin = Nmin, Nmax = Nmax)) m$calculate() MlProbX <- m$getLogProb("x") expect_equal(MlProbX, lProbX) @@ -1036,8 +1070,7 @@ test_that("dNmixture_BBP_oneObs works", inits = list(lambda = lambda, s = s, prob = prob), - constants = list(Nmin = Nmin, Nmax = Nmax, - len = len)) + constants = list(Nmin = Nmin, Nmax = Nmax)) mNAConf <- configureMCMC(mNA) @@ -1056,14 +1089,14 @@ test_that("dNmixture_BBP_oneObs works", xSim <- numeric(nSim) set.seed(1) for (i in 1:nSim) { - xSim[i] <- rNmixture_BBP_oneObs(1, lambda, prob, s, Nmin, Nmax, len) + xSim[i] <- rNmixture_BBP_oneObs(1, lambda, prob, s, Nmin, Nmax) } CrNmixture_BBP_oneObs <- compileNimble(rNmixture_BBP_oneObs) CxSim <- numeric(nSim) set.seed(1) for (i in 1:nSim) { - CxSim[i] <- CrNmixture_BBP_oneObs(1, lambda, prob, s, Nmin, Nmax, len) + CxSim[i] <- CrNmixture_BBP_oneObs(1, lambda, prob, s, Nmin, Nmax) } expect_identical(xSim, CxSim) @@ -1113,7 +1146,7 @@ test_that("dNmixture_BBNB_v works", correctProbX <- 0 for (N in Nmin:Nmax) { correctProbX <- correctProbX + dnbinom(N, size = r, prob = pNB) * - prod(dBetaBinom(x, N, shape1 = alpha, shape2 = beta)) + prod(dBetaBinom_v(x, N, shape1 = alpha, shape2 = beta)) } expect_equal(probX, correctProbX) @@ -1256,7 +1289,7 @@ test_that("dNmixture_BBNB_s works", correctProbX <- 0 for (N in Nmin:Nmax) { correctProbX <- correctProbX + dnbinom(N, size = r, prob = pNB) * - prod(dBetaBinom(x, N, shape1 = rep(alpha, len), shape2 = rep(beta, len))) + prod(dBetaBinom_s(x, N, shape1 = alpha, shape2 = beta)) } expect_equal(probX, correctProbX) @@ -1388,7 +1421,7 @@ test_that("dNmixture_BBNB_oneObs works", Nmax <- 250 len <- 1 - probX <- dNmixture_BBNB_oneObs(x, lambda, theta, prob, s, Nmin, Nmax, len) + probX <- dNmixture_BBNB_oneObs(x, lambda, theta, prob, s, Nmin, Nmax) # Manually calculate the correct answer alpha <- prob * s @@ -1398,49 +1431,49 @@ test_that("dNmixture_BBNB_oneObs works", correctProbX <- 0 for (N in Nmin:Nmax) { correctProbX <- correctProbX + dnbinom(N, size = r, prob = pNB) * - prod(dBetaBinom_One(x, N, alpha, beta)) + prod(dBetaBinom_s(x, N, alpha, beta)) } expect_equal(probX, correctProbX) # Uncompiled log probability - lProbX <- dNmixture_BBNB_oneObs(x, lambda, theta, prob, s, Nmin, Nmax, len, log = TRUE) + lProbX <- dNmixture_BBNB_oneObs(x, lambda, theta, prob, s, Nmin, Nmax, log = TRUE) lCorrectProbX <- log(correctProbX) expect_equal(lProbX, lCorrectProbX) # Compilation and compiled calculations CdNmixture_BBNB_oneObs <- compileNimble(dNmixture_BBNB_oneObs) - CprobX <- CdNmixture_BBNB_oneObs(x, lambda, theta, prob, s, Nmin, Nmax, len) + CprobX <- CdNmixture_BBNB_oneObs(x, lambda, theta, prob, s, Nmin, Nmax) expect_equal(CprobX, probX) - ClProbX <- CdNmixture_BBNB_oneObs(x, lambda, theta, prob, s, Nmin, Nmax, len, log = TRUE) + ClProbX <- CdNmixture_BBNB_oneObs(x, lambda, theta, prob, s, Nmin, Nmax, log = TRUE) expect_equal(ClProbX, lProbX) # Dynamic Nmin / Nmax isn't allowed expect_error({ - dNmixture_BBNB_oneObs(x, lambda, theta, prob, s, Nmin = -1, Nmax = -1, len) + dNmixture_BBNB_oneObs(x, lambda, theta, prob, s, Nmin = -1, Nmax = -1) }) expect_error({ - dNmixture_BBNB_oneObs(x, lambda, theta, prob, s, Nmin, Nmax = -1, len) + dNmixture_BBNB_oneObs(x, lambda, theta, prob, s, Nmin, Nmax = -1) }) expect_error({ - dNmixture_BBNB_oneObs(x, lambda, theta, prob, s, Nmin = -1, Nmax, len) + dNmixture_BBNB_oneObs(x, lambda, theta, prob, s, Nmin = -1, Nmax) }) expect_error({ - CdNmixture_BBNB_oneObs(x, lambda, theta, prob, s, Nmin = -1, Nmax = -1, len) + CdNmixture_BBNB_oneObs(x, lambda, theta, prob, s, Nmin = -1, Nmax = -1) }) expect_error({ - CdNmixture_BBNB_oneObs(x, lambda, theta, prob, s, Nmin, Nmax = -1, len) + CdNmixture_BBNB_oneObs(x, lambda, theta, prob, s, Nmin, Nmax = -1) }) expect_error({ - CdNmixture_BBNB_oneObs(x, lambda, theta, prob, s, Nmin = -1, Nmax, len) + CdNmixture_BBNB_oneObs(x, lambda, theta, prob, s, Nmin = -1, Nmax) }) # Use in Nimble model nc <- nimbleCode({ x ~ dNmixture_BBNB_oneObs(lambda = lambda, prob = prob, s = s, theta = theta, - Nmin = Nmin, Nmax = Nmax, len = len) + Nmin = Nmin, Nmax = Nmax) }) @@ -1450,8 +1483,7 @@ test_that("dNmixture_BBNB_oneObs works", prob = prob, theta = theta, s = s), - constants = list(Nmin = Nmin, Nmax = Nmax, - len = len)) + constants = list(Nmin = Nmin, Nmax = Nmax)) m$calculate() MlProbX <- m$getLogProb("x") expect_equal(MlProbX, lProbX) @@ -1468,8 +1500,7 @@ test_that("dNmixture_BBNB_oneObs works", inits = list(lambda = lambda, s = s, theta = theta, prob = prob), - constants = list(Nmin = Nmin, Nmax = Nmax, - len = len)) + constants = list(Nmin = Nmin, Nmax = Nmax)) mNAConf <- configureMCMC(mNA) @@ -1488,14 +1519,14 @@ test_that("dNmixture_BBNB_oneObs works", xSim <- numeric(nSim) set.seed(1) for (i in 1:nSim) { - xSim[i] <- rNmixture_BBNB_oneObs(1, lambda, theta, prob, s, Nmin, Nmax, len) + xSim[i] <- rNmixture_BBNB_oneObs(1, lambda, theta, prob, s, Nmin, Nmax) } CrNmixture_BBNB_oneObs <- compileNimble(rNmixture_BBNB_oneObs) CxSim <- numeric(nSim) set.seed(1) for (i in 1:nSim) { - CxSim[i] <- CrNmixture_BBNB_oneObs(1, lambda, theta, prob, s, Nmin, Nmax, len) + CxSim[i] <- CrNmixture_BBNB_oneObs(1, lambda, theta, prob, s, Nmin, Nmax) } expect_identical(xSim, CxSim) diff --git a/tests/testthat/test-NmixtureADnoDerivs.R b/tests/testthat/test-NmixtureADnoDerivs.R new file mode 100644 index 0000000..67fe741 --- /dev/null +++ b/tests/testthat/test-NmixtureADnoDerivs.R @@ -0,0 +1,1766 @@ +# Test the N-mixture distribution nimbleFunction. +library(nimbleEcology) +# ----------------------------------------------------------------------------- +# 0. Load +# ----------------------------------------------------------------------------- +#### 1. Test dNmixtureAD_v #### +test_that("dNmixtureAD_v works uncompiled", +{ + # Uncompiled calculation + x <- c(1, 0, 1, 3, 0) + lambda <- 8 + prob <- c(0.5, 0.3, 0.5, 0.4, 0.1) + Nmin <- 0 + Nmax <- 250 + len <- 5 + + probX <- dNmixtureAD_v(x, lambda, prob, Nmin, Nmax, len) + # Manually calculate the correct answer + correctProbX <- 0 + for (N in Nmin:Nmax) { + correctProbX <- correctProbX + dpois(N, lambda) * prod(dbinom(x, N, prob)) + } + expect_equal(probX, correctProbX) + + # Uncompiled log probability + lProbX <- dNmixtureAD_v(x, lambda, prob, Nmin, Nmax, len, log = TRUE) + lCorrectProbX <- log(correctProbX) + expect_equal(lProbX, lCorrectProbX) + + # Other Nmin / Nmax + Nmin <- 3 + for(Nmax in 3:6) { + dynProbX <- dNmixtureAD_v(x, lambda, prob, Nmin = Nmin, Nmax = Nmax, len) + dynCorrectProbX <- 0 + for (N in Nmin:Nmax) { + dynCorrectProbX <- dynCorrectProbX + dpois(N, lambda) * prod(dbinom(x, N, prob)) + } + expect_equal(dynProbX, dynCorrectProbX) + } + Nmin <- 0 + Nmax <- 250 + # Compilation and compiled calculations + call_dNmixtureAD_v <- nimbleFunction( + name = "t1", + run=function(x=double(1), + lambda=double(), + prob=double(1), + Nmin = double(0), + Nmax = double(0), + len=double(), + log = integer(0, default=0)) { + return(dNmixtureAD_v(x,lambda,prob,Nmin,Nmax,len,log)) + returnType(double()) + } + ) + CdNmixtureAD_v <- compileNimble(call_dNmixtureAD_v) + CprobX <- CdNmixtureAD_v(x, lambda, prob, Nmin, Nmax, len) + probX <- dNmixtureAD_v(x, lambda, prob, Nmin, Nmax, len) + expect_equal(CprobX, probX) + + ClProbX <- CdNmixtureAD_v(x, lambda, prob, Nmin, Nmax, len, log = TRUE) + lProbX <- dNmixtureAD_v(x, lambda, prob, Nmin, Nmax, len, log = TRUE) + expect_equal(ClProbX, lProbX) + + expect_error(CdynProbX <- CdNmixtureAD_v(x, lambda, prob, Nmin = -1, Nmax = -1, len)) + + # Use in Nimble model + nc <- nimbleCode({ + x[1:5] ~ dNmixtureAD_v(lambda = lambda, prob = prob[1:5], + Nmin = Nmin, Nmax = Nmax, + len = len) + }) + + m <- nimbleModel(code = nc, + data = list(x = x), + inits = list(lambda = lambda, + prob = prob), + constants = list(Nmin = Nmin, Nmax = Nmax, + len = len)) + m$calculate() + MlProbX <- m$getLogProb("x") + expect_equal(MlProbX, lProbX) + + # Compiled model + cm <- compileNimble(m) + cm$calculate() + CMlProbX <- cm$getLogProb("x") + expect_equal(CMlProbX, lProbX) + + # Test imputing value for all NAs + xNA <- c(NA, NA, NA, NA, NA) + mNA <- nimbleModel(nc, data = list(x = xNA), + inits = list(lambda = lambda, + prob = prob), + constants = list(Nmin = Nmin, Nmax = Nmax, + len = len)) + + + mNAConf <- configureMCMC(mNA) + mNAConf$addMonitors('x') + mNA_MCMC <- buildMCMC(mNAConf) + cmNA <- compileNimble(mNA, mNA_MCMC) + + set.seed(0) + cmNA$mNA_MCMC$run(10) + + # Did the imputed values come back? + expect_true(all(!is.na(as.matrix(cmNA$mNA_MCMC$mvSamples)[,"x[2]"]))) + + # Test simulation code + nSim <- 10 + xSim <- array(NA, dim = c(nSim, len)) + set.seed(1) + for (i in 1:nSim) { + xSim[i,] <- rNmixtureAD_v(1, lambda, prob, Nmin, Nmax, len) + } + + CrNmixtureAD_v <- compileNimble(rNmixtureAD_v) + CxSim <- array(NA, dim = c(nSim, len)) + set.seed(1) + for (i in 1:nSim) { + CxSim[i,] <- CrNmixtureAD_v(1, lambda, prob, Nmin, Nmax, len) + } + expect_identical(xSim, CxSim) + + simNodes <- m$getDependencies(c('prob', 'lambda'), self = FALSE) + mxSim <- array(NA, dim = c(nSim, len)) + set.seed(1) + for(i in 1:nSim) { + m$simulate(simNodes, includeData = TRUE) + mxSim[i,] <- m$x + } + expect_identical(mxSim, xSim) + + CmxSim <- array(NA, dim = c(nSim, len)) + set.seed(1) + for(i in 1:nSim) { + cm$simulate(simNodes, includeData = TRUE) + CmxSim[i,] <- cm$x + } + expect_identical(CmxSim, mxSim) +}) + +# ----------------------------------------------------------------------------- +#### 2. Test dNmixtureAD_s #### +test_that("dNmixtureAD_s works", + { + # Uncompiled calculation + x <- c(1, 0, 1, 3, 2) + lambda <- 8 + prob <- 0.4 + Nmin <- 0 + Nmax <- 250 + len <- 5 + + probX <- dNmixtureAD_s(x, lambda, prob, Nmin, Nmax, len) + # Manually calculate the correct answer + correctProbX <- 0 + for (N in Nmin:Nmax) { + correctProbX <- correctProbX + dpois(N, lambda) * prod(dbinom(x, N, prob)) + } + + expect_equal(probX, correctProbX) + + # Uncompiled log probability + lProbX <- dNmixtureAD_s(x, lambda, prob, Nmin, Nmax, len, log = TRUE) + lCorrectProbX <- log(correctProbX) + expect_equal(lProbX, lCorrectProbX) + + # Dynamic Nmin / Nmax + expect_error(dynProbX <- + dNmixtureAD_s(x, lambda, prob, Nmin = -1, Nmax = -1, len)) + + # Other Nmin / Nmax + Nmin <- 3 + for(Nmax in 3:6) { + dynProbX <- dNmixtureAD_s(x, lambda, prob, Nmin = Nmin, Nmax = Nmax, len) + dynCorrectProbX <- 0 + for (N in Nmin:Nmax) { + dynCorrectProbX <- dynCorrectProbX + dpois(N, lambda) * prod(dbinom(x, N, prob)) + } + expect_equal(dynProbX, dynCorrectProbX) + } + Nmin <- 0 + Nmax <- 250 + # Compilation and compiled calculations + call_dNmixtureAD_s <- nimbleFunction( + name = "t2", + run=function(x=double(1), + lambda=double(), + prob=double(), + Nmin = double(0), + Nmax = double(0), + len=double(), + log = integer(0, default=0)) { + return(dNmixtureAD_s(x,lambda,prob,Nmin,Nmax,len,log)) + returnType(double()) + } + ) + + + # Compilation and compiled calculations + CdNmixtureAD_s <- compileNimble(call_dNmixtureAD_s) + CprobX <- CdNmixtureAD_s(x, lambda, prob, Nmin, Nmax, len) + expect_equal(CprobX, probX) + + ClProbX <- CdNmixtureAD_s(x, lambda, prob, Nmin, Nmax, len, log = TRUE) + expect_equal(ClProbX, lProbX) + + expect_error(CdynProbX <- CdNmixtureAD_s(x, lambda, prob, Nmin = -1, Nmax = -1, len)) + + # Use in Nimble model + nc <- nimbleCode({ + x[1:5] ~ dNmixtureAD_s(lambda = lambda, prob = prob, + Nmin = Nmin, Nmax = Nmax, len = len) + }) + + m <- nimbleModel(code = nc, + data = list(x = x), + inits = list(lambda = lambda, + prob = prob), + constants = list(Nmin = Nmin, Nmax = Nmax, + len = len)) + m$calculate() + MlProbX <- m$getLogProb("x") + expect_equal(MlProbX, lProbX) + + # Compiled model + cm <- compileNimble(m) + cm$calculate() + CMlProbX <- cm$getLogProb("x") + expect_equal(CMlProbX, lProbX) + + # Test imputing value for all NAs + xNA <- c(NA, NA, NA, NA, NA) + mNA <- nimbleModel(nc, data = list(x = xNA), + inits = list(lambda = lambda, + prob = prob), + constants = list(Nmin = Nmin, Nmax = Nmax, + len = len)) + + + mNAConf <- configureMCMC(mNA) + mNAConf$addMonitors('x') + mNA_MCMC <- buildMCMC(mNAConf) + cmNA <- compileNimble(mNA, mNA_MCMC) + + set.seed(0) + cmNA$mNA_MCMC$run(10) + + # Did the imputed values come back? + expect_true(all(!is.na(as.matrix(cmNA$mNA_MCMC$mvSamples)[,"x[2]"]))) + + # Test simulation code + nSim <- 10 + xSim <- array(NA, dim = c(nSim, len)) + set.seed(1) + for (i in 1:nSim) { + xSim[i,] <- rNmixtureAD_s(1, lambda, prob, Nmin, Nmax, len) + } + + CrNmixtureAD_s <- compileNimble(rNmixtureAD_s) + CxSim <- array(NA, dim = c(nSim, len)) + set.seed(1) + for (i in 1:nSim) { + CxSim[i,] <- CrNmixtureAD_s(1, lambda, prob, Nmin, Nmax, len) + } + expect_identical(xSim, CxSim) + + simNodes <- m$getDependencies(c('prob', 'lambda'), self = FALSE) + mxSim <- array(NA, dim = c(nSim, len)) + set.seed(1) + for(i in 1:nSim) { + m$simulate(simNodes, includeData = TRUE) + mxSim[i,] <- m$x + } + expect_identical(mxSim, xSim) + + CmxSim <- array(NA, dim = c(nSim, len)) + set.seed(1) + for(i in 1:nSim) { + cm$simulate(simNodes, includeData = TRUE) + CmxSim[i,] <- cm$x + } + expect_identical(CmxSim, mxSim) + }) + + + +# ----------------------------------------------------------------------------- +#### 3. Test dNmixtureAD_BNB_v #### +test_that("dNmixtureAD_BNB_v works", + { + # Uncompiled calculation + x <- c(1, 0, 3, 3, 0) + lambda <- 8 + theta <- 2 + prob <- c(0.5, 0.3, 0.5, 0.4, 0.1) + Nmin <- 0 + Nmax <- 250 + len <- 5 + + probX <- dNmixtureAD_BNB_v(x, lambda, theta, prob, Nmin, Nmax, len) + + # Manually calculate the correct answer + r <- 1 / theta + pNB <- 1 / (1 + theta * lambda) + correctProbX <- 0 + for (N in Nmin:Nmax) { + correctProbX <- correctProbX + dnbinom(N, size = r, prob = pNB) * + prod(dbinom(x, N, prob)) + } + + expect_equal(probX, correctProbX) + + # Uncompiled log probability + lProbX <- dNmixtureAD_BNB_v(x, lambda, theta, prob, Nmin, Nmax, len, log = TRUE) + lCorrectProbX <- log(correctProbX) + expect_equal(lProbX, lCorrectProbX) + + # Dynamic Nmin/Nmax + expect_error(dynProbX <- dNmixtureAD_BNB_v(x, lambda, theta, prob, Nmin = -1, Nmax = -1, len)) + + # Other Nmin / Nmax + Nmin <- 3 + for(Nmax in 3:6) { + dynProbX <- dNmixtureAD_BNB_v(x, lambda, theta, prob, Nmin = Nmin, Nmax = Nmax, len) + dynCorrectProbX <- 0 + for (N in Nmin:Nmax) { + dynCorrectProbX <- dynCorrectProbX + dnbinom(N, size = r, prob = pNB) * + prod(dbinom(x, N, prob)) + } + expect_equal(dynProbX, dynCorrectProbX) + } + Nmin <- 0 + Nmax <- 250 + # Compilation and compiled calculations + call_dNmixtureAD_BNB_v <- nimbleFunction( + name = "t3", + run=function(x=double(1), + lambda=double(), + theta=double(), + prob=double(1), + Nmin = double(0), + Nmax = double(0), + len=double(), + log = integer(0, default=0)) { + return(dNmixtureAD_BNB_v(x,lambda,theta,prob,Nmin,Nmax,len,log)) + returnType(double()) + } + ) + + # Compilation and compiled calculations + CdNmixtureAD_BNB_v <- compileNimble(call_dNmixtureAD_BNB_v) + CprobX <- CdNmixtureAD_BNB_v(x, lambda, theta, prob, Nmin, Nmax, len) + probX <- dNmixtureAD_BNB_v(x, lambda, theta, prob, Nmin, Nmax, len) + expect_equal(CprobX, probX) + + ClProbX <- CdNmixtureAD_BNB_v(x, lambda, theta, prob, Nmin, Nmax, len, log = TRUE) + lprobX <- dNmixtureAD_BNB_v(x, lambda, theta, prob, Nmin, Nmax, len, log = TRUE) + expect_equal(ClProbX, lProbX) + + expect_error(CdynProbX <- CdNmixture_BNB_v(x, lambda, theta, prob, Nmin = -1, + Nmax = -1, len)) + + # Use in Nimble model + nc <- nimbleCode({ + x[1:5] ~ dNmixtureAD_BNB_v(lambda = lambda, prob = prob[1:5], + theta = theta, + Nmin = Nmin, Nmax = Nmax, len = len) + + }) + + m <- nimbleModel(code = nc, + data = list(x = x), + inits = list(lambda = lambda, + prob = prob, + theta = theta), + constants = list(Nmin = Nmin, Nmax = Nmax, + len = len)) + m$calculate() + MlProbX <- m$getLogProb("x") + expect_equal(MlProbX, lProbX) + + # Compiled model + cm <- compileNimble(m) + cm$calculate() + CMlProbX <- cm$getLogProb("x") + expect_equal(CMlProbX, lProbX) + + # Test imputing value for all NAs + xNA <- c(NA, NA, NA, NA, NA) + mNA <- nimbleModel(nc, data = list(x = xNA), + inits = list(lambda = lambda, + theta = theta, + prob = prob), + constants = list(Nmin = Nmin, Nmax = Nmax, + len = len)) + + + mNAConf <- configureMCMC(mNA) + mNAConf$addMonitors('x') + mNA_MCMC <- buildMCMC(mNAConf) + cmNA <- compileNimble(mNA, mNA_MCMC) + + set.seed(0) + cmNA$mNA_MCMC$run(10) + + # Did the imputed values come back? + expect_true(all(!is.na(as.matrix(cmNA$mNA_MCMC$mvSamples)[,"x[2]"]))) + + # Test simulation code + nSim <- 10 + xSim <- array(NA, dim = c(nSim, len)) + set.seed(1) + for (i in 1:nSim) { + xSim[i,] <- rNmixtureAD_BNB_v(1, lambda, theta, prob, Nmin, Nmax, len) + } + + CrNmixtureAD_BNB_v <- compileNimble(rNmixtureAD_BNB_v) + CxSim <- array(NA, dim = c(nSim, len)) + set.seed(1) + for (i in 1:nSim) { + CxSim[i,] <- CrNmixtureAD_BNB_v(1, lambda, theta, prob, Nmin, Nmax, len) + } + expect_identical(xSim, CxSim) + + simNodes <- m$getDependencies(c('prob', 'lambda'), self = FALSE) + mxSim <- array(NA, dim = c(nSim, len)) + set.seed(1) + for(i in 1:nSim) { + m$simulate(simNodes, includeData = TRUE) + mxSim[i,] <- m$x + } + expect_identical(mxSim, xSim) + + CmxSim <- array(NA, dim = c(nSim, len)) + set.seed(1) + for(i in 1:nSim) { + cm$simulate(simNodes, includeData = TRUE) + CmxSim[i,] <- cm$x + } + expect_identical(CmxSim, mxSim) + }) + +# ----------------------------------------------------------------------------- +#### 4. Test dNmixtureAD_BNB_s #### +test_that("dNmixtureAD_BNB_s works", + { + # Uncompiled calculation + x <- c(1, 0, 3, 3, 0) + lambda <- 8 + theta <- 2 + prob <- 0.4 + Nmin <- 0 + Nmax <- 250 + len <- 5 + + probX <- dNmixtureAD_BNB_s(x, lambda, theta = theta, prob, Nmin, Nmax, len) + + # Manually calculate the correct answer + r <- 1 / theta + pNB <- 1 / (1 + theta * lambda) + + correctProbX <- 0 + for (N in Nmin:Nmax) { + correctProbX <- correctProbX + dnbinom(N, size = r, prob = pNB) * + prod(dbinom(x, N, prob)) + } + + expect_equal(probX, correctProbX) + + # Uncompiled log probability + lProbX <- dNmixtureAD_BNB_s(x, lambda, theta, prob, Nmin, Nmax, len, log = TRUE) + lCorrectProbX <- log(correctProbX) + expect_equal(lProbX, lCorrectProbX) + + # Dynamic Nmin/Nmax + expect_error(dynProbX <- dNmixtureAD_BNB_s(x, lambda, theta, prob, Nmin = -1, Nmax = -1, len)) + + # Other Nmin / Nmax + Nmin <- 3 + for(Nmax in 3:6) { + dynProbX <- dNmixtureAD_BNB_s(x, lambda, theta, prob, Nmin = Nmin, Nmax = Nmax, len) + dynCorrectProbX <- 0 + for (N in Nmin:Nmax) { + dynCorrectProbX <- dynCorrectProbX + dnbinom(N, size = r, prob = pNB) * + prod(dbinom(x, N, prob)) + } + expect_equal(dynProbX, dynCorrectProbX) + } + Nmin <- 0 + Nmax <- 250 + # Compilation and compiled calculations + call_dNmixtureAD_BNB_s <- nimbleFunction( + name = "t4", + run=function(x=double(1), + lambda=double(), + theta=double(), + prob=double(), + Nmin = double(0), + Nmax = double(0), + len=double(), + log = integer(0, default=0)) { + return(dNmixtureAD_BNB_s(x,lambda,theta,prob,Nmin,Nmax,len,log)) + returnType(double()) + } + ) + + # Compilation and compiled calculations + CdNmixtureAD_BNB_s <- compileNimble(call_dNmixtureAD_BNB_s) + CprobX <- CdNmixtureAD_BNB_s(x, lambda, theta, prob, Nmin, Nmax, len) + expect_equal(CprobX, probX) + + ClProbX <- CdNmixtureAD_BNB_s(x, lambda, theta, prob, Nmin, Nmax, len, log = TRUE) + expect_equal(ClProbX, lProbX) + + expect_error(CdynProbX <- CdNmixtureAD_BNB_s(x, lambda, theta, prob, Nmin = -1, + Nmax = -1, len)) + + # Use in Nimble model + nc <- nimbleCode({ + x[1:5] ~ dNmixtureAD_BNB_s(lambda = lambda, prob = prob, + theta = theta, + Nmin = Nmin, Nmax = Nmax, len = len) + + }) + + m <- nimbleModel(code = nc, + data = list(x = x), + inits = list(lambda = lambda, + prob = prob, + theta = theta), + constants = list(Nmin = Nmin, Nmax = Nmax, + len = len)) + m$calculate() + MlProbX <- m$getLogProb("x") + expect_equal(MlProbX, lProbX) + + # Compiled model + cm <- compileNimble(m) + cm$calculate() + CMlProbX <- cm$getLogProb("x") + expect_equal(CMlProbX, lProbX) + + # Test imputing value for all NAs + xNA <- c(NA, NA, NA, NA, NA) + mNA <- nimbleModel(nc, data = list(x = xNA), + inits = list(lambda = lambda, + theta = theta, + prob = prob), + constants = list(Nmin = Nmin, Nmax = Nmax, + len = len)) + + mNAConf <- configureMCMC(mNA) + mNAConf$addMonitors('x') + mNA_MCMC <- buildMCMC(mNAConf) + cmNA <- compileNimble(mNA, mNA_MCMC) + + set.seed(0) + cmNA$mNA_MCMC$run(10) + + # Did the imputed values come back? + expect_true(all(!is.na(as.matrix(cmNA$mNA_MCMC$mvSamples)[,"x[2]"]))) + + # Test simulation code + nSim <- 10 + xSim <- array(NA, dim = c(nSim, len)) + set.seed(1) + for (i in 1:nSim) { + xSim[i,] <- rNmixtureAD_BNB_s(1, lambda, theta, prob, Nmin, Nmax, len) + } + + CrNmixtureAD_BNB_s <- compileNimble(rNmixtureAD_BNB_s) + CxSim <- array(NA, dim = c(nSim, len)) + set.seed(1) + for (i in 1:nSim) { + CxSim[i,] <- CrNmixtureAD_BNB_s(1, lambda, theta, prob, Nmin, Nmax, len) + } + expect_identical(xSim, CxSim) + + simNodes <- m$getDependencies(c('prob', 'lambda'), self = FALSE) + mxSim <- array(NA, dim = c(nSim, len)) + set.seed(1) + for(i in 1:nSim) { + m$simulate(simNodes, includeData = TRUE) + mxSim[i,] <- m$x + } + expect_identical(mxSim, xSim) + + CmxSim <- array(NA, dim = c(nSim, len)) + set.seed(1) + for(i in 1:nSim) { + cm$simulate(simNodes, includeData = TRUE) + CmxSim[i,] <- cm$x + } + expect_identical(CmxSim, mxSim) + }) + +# ----------------------------------------------------------------------------- +#### 5. Test dNmixtureAD_BNB_oneObs #### +test_that("dNmixtureAD_BNB_oneObs works", + { + # Uncompiled calculation + x <- c(1) + lambda <- 8 + theta <- 2 + prob <- c(0.5) + Nmin <- 0 + Nmax <- 250 + len <- 5 + + probX <- dNmixtureAD_BNB_oneObs(x, lambda, theta, prob, Nmin, Nmax) + + # Manually calculate the correct answer + r <- 1 / theta + pNB <- 1 / (1 + theta * lambda) + correctProbX <- 0 + for (N in Nmin:Nmax) { + correctProbX <- correctProbX + dnbinom(N, size = r, prob = pNB) * + prod(dbinom(x, N, prob)) + } + + expect_equal(probX, correctProbX) + + # Uncompiled log probability + lProbX <- dNmixtureAD_BNB_oneObs(x, lambda, theta, prob, Nmin, Nmax, log = TRUE) + lCorrectProbX <- log(correctProbX) + expect_equal(lProbX, lCorrectProbX) + + # Dynamic Nmin/Nmax + expect_error(dynProbX <- dNmixtureAD_BNB_oneObs(x, lambda, theta, prob, Nmin = -1, Nmax = -1)) + + # Other Nmin / Nmax + Nmin <- 3 + for(Nmax in 3:6) { + dynProbX <- dNmixtureAD_BNB_oneObs(x, lambda, theta, prob, Nmin = Nmin, Nmax = Nmax) + dynCorrectProbX <- 0 + for (N in Nmin:Nmax) { + dynCorrectProbX <- dynCorrectProbX + dnbinom(N, size = r, prob = pNB) * + prod(dbinom(x, N, prob)) + } + expect_equal(dynProbX, dynCorrectProbX) + } + Nmin <- 0 + Nmax <- 250 + + call_dNmixtureAD_BNB_oneObs <- nimbleFunction( + name = "t5", + run=function(x=double(), + lambda=double(), + theta=double(), + prob=double(), + Nmin = double(0), + Nmax = double(0), + log = integer(0, default=0)) { + return(dNmixtureAD_BNB_oneObs(x,lambda,theta,prob,Nmin,Nmax,log)) + returnType(double()) + } + ) + + # Compilation and compiled calculations + CdNmixtureAD_BNB_oneObs <- compileNimble(call_dNmixtureAD_BNB_oneObs) + CprobX <- CdNmixtureAD_BNB_oneObs(x, lambda, theta, prob, Nmin, Nmax) + expect_equal(CprobX, probX) + + ClProbX <- CdNmixtureAD_BNB_oneObs(x, lambda, theta, prob, Nmin, Nmax, log = TRUE) + expect_equal(ClProbX, lProbX) + + expect_error(CdynProbX <- CdNmixtureAD_BNB_oneObs(x, lambda, theta, prob, Nmin = -1, Nmax = -1)) + + # Use in Nimble model + nc <- nimbleCode({ + x ~ dNmixtureAD_BNB_oneObs(lambda = lambda, prob = prob, + theta = theta, + Nmin = Nmin, Nmax = Nmax) + }) + + m <- nimbleModel(code = nc, + data = list(x = x), + inits = list(lambda = lambda, + prob = prob, + theta = theta), + constants = list(Nmin = Nmin, Nmax = Nmax)) + m$calculate() + MlProbX <- m$getLogProb("x") + expect_equal(MlProbX, lProbX) + + # Compiled model + cm <- compileNimble(m) + cm$calculate() + CMlProbX <- cm$getLogProb("x") + expect_equal(CMlProbX, lProbX) + + # Test imputing value for all NAs + xNA <- NA + mNA <- nimbleModel(nc, data = list(x = xNA), + inits = list(lambda = lambda, + theta = theta, + prob = prob), + constants = list(Nmin = Nmin, Nmax = Nmax)) + + mNAConf <- configureMCMC(mNA) + mNAConf$addMonitors('x') + mNA_MCMC <- buildMCMC(mNAConf) + cmNA <- compileNimble(mNA, mNA_MCMC) + + set.seed(0) + cmNA$mNA_MCMC$run(10) + + # Did the imputed values come back? + expect_true(all(!is.na(as.matrix(cmNA$mNA_MCMC$mvSamples)[,"x"]))) + + # Test simulation code + nSim <- 10 + xSim <- numeric(nSim) + set.seed(1) + for (i in 1:nSim) { + xSim[i] <- rNmixtureAD_BNB_oneObs(1, lambda, theta, prob, Nmin, Nmax) + } + + CrNmixtureAD_BNB_oneObs <- compileNimble(rNmixtureAD_BNB_oneObs) + CxSim <- numeric(nSim) + set.seed(1) + for (i in 1:nSim) { + CxSim[i] <- CrNmixtureAD_BNB_oneObs(1, lambda, theta, prob, Nmin, Nmax) + } + expect_identical(xSim, CxSim) + + simNodes <- m$getDependencies(c('prob', 'lambda'), self = FALSE) + mxSim <- numeric(nSim) + set.seed(1) + for(i in 1:nSim) { + m$simulate(simNodes, includeData = TRUE) + mxSim[i] <- m$x + } + expect_identical(mxSim, xSim) + + CmxSim <- numeric(nSim) + set.seed(1) + for(i in 1:nSim) { + cm$simulate(simNodes, includeData = TRUE) + CmxSim[i] <- cm$x + } + expect_identical(CmxSim, mxSim) + }) + + +# ----------------------------------------------------------------------------- +#### 6. Test dNmixtureAD_BBP_v #### +test_that("dNmixtureAD_BBP_v works", + { + # Uncompiled calculation + x <- c(1, 0, 3, 3, 0) + lambda <- 8 + s <- 2 + prob <- c(0.5, 0.3, 0.5, 0.4, 0.1) + Nmin <- max(x) + Nmax <- 250 + len <- 5 + + probX <- dNmixtureAD_BBP_v(x, lambda, prob, s, Nmin, Nmax, len) + + # Manually calculate the correct answer + alpha <- prob * s + beta <- s - prob * s + + correctProbX <- 0 + for (N in Nmin:Nmax) { + correctProbX <- correctProbX + dpois(N, lambda) * + prod(dBetaBinom_v(x, N, shape1 = alpha, shape2 = beta)) + } + + expect_equal(probX, correctProbX) + + # Uncompiled log probability + lProbX <- dNmixtureAD_BBP_v(x, lambda, prob, s, Nmin, Nmax, len, log = TRUE) + lCorrectProbX <- log(correctProbX) + expect_equal(lProbX, lCorrectProbX) + + # Other Nmin / Nmax + Nmin <- 3 + for(Nmax in 3:6) { + dynProbX <- dNmixtureAD_BBP_v(x, lambda, prob, s, Nmin = Nmin, Nmax = Nmax, len) + dynCorrectProbX <- 0 + for (N in Nmin:Nmax) { + dynCorrectProbX <- dynCorrectProbX + dpois(N, lambda) * + prod(dBetaBinom_v(x, N, shape1 = alpha, shape2 = beta)) + } + expect_equal(dynProbX, dynCorrectProbX) + } + Nmin <- 0 + Nmax <- 250 + # Compilation and compiled calculations + call_dNmixtureAD_BBP_v <- nimbleFunction( + name = "t6", + run=function(x=double(1), + lambda=double(), + prob=double(1), + s=double(), + Nmin = double(0), + Nmax = double(0), + len=double(), + log = integer(0, default=0)) { + return(dNmixtureAD_BBP_v(x,lambda,prob,s,Nmin,Nmax,len,log)) + returnType(double()) + } + ) + # Compilation and compiled calculations + CdNmixtureAD_BBP_v <- compileNimble(call_dNmixtureAD_BBP_v) + CprobX <- CdNmixtureAD_BBP_v(x, lambda, prob, s, Nmin, Nmax, len) + expect_equal(CprobX, probX) + + ClProbX <- CdNmixtureAD_BBP_v(x, lambda, prob, s, Nmin, Nmax, len, log = TRUE) + expect_equal(ClProbX, lProbX) + + # Dynamic Nmin / Nmax isn't allowed + expect_error({ + dNmixtureAD_BBP_v(x, lambda, prob, s, Nmin = -1, Nmax = -1, len) + }) + expect_error({ + dNmixtureAD_BBP_v(x, lambda, prob, s, Nmin = -1, Nmax, len) + }) + expect_error({ + dNmixtureAD_BBP_v(x, lambda, prob, s, Nmin, Nmax = -1, len) + }) + expect_error({ + CdNmixtureAD_BBP_v(x, lambda, prob, s, Nmin = -1, Nmax = -1, len) + }) + expect_error({ + CdNmixtureAD_BBP_v(x, lambda, prob, s, Nmin = -1, Nmax, len) + }) + expect_error({ + CdNmixtureAD_BBP_v(x, lambda, prob, s, Nmin, Nmax = -1, len) + }) + + + # Use in Nimble model + nc <- nimbleCode({ + x[1:5] ~ dNmixtureAD_BBP_v(lambda = lambda, prob = prob[1:5], + s = s, + Nmin = Nmin, Nmax = Nmax, len = len) + }) + + m <- nimbleModel(code = nc, + data = list(x = x), + inits = list(lambda = lambda, + prob = prob, + s = s), + constants = list(Nmin = Nmin, Nmax = Nmax, + len = len)) + m$calculate() + MlProbX <- m$getLogProb("x") + expect_equal(MlProbX, lProbX) + + # Compiled model + cm <- compileNimble(m) + cm$calculate() + CMlProbX <- cm$getLogProb("x") + expect_equal(CMlProbX, lProbX) + + # Test imputing value for all NAs + xNA <- c(NA, NA, NA, NA, NA) + mNA <- nimbleModel(nc, data = list(x = xNA), + inits = list(lambda = lambda, + s = s, + prob = prob), + constants = list(Nmin = Nmin, Nmax = Nmax, + len = len)) + + mNAConf <- configureMCMC(mNA) + mNAConf$addMonitors('x') + mNA_MCMC <- buildMCMC(mNAConf) + cmNA <- compileNimble(mNA, mNA_MCMC) + + set.seed(0) + cmNA$mNA_MCMC$run(10) + + # Did the imputed values come back? + expect_true(all(!is.na(as.matrix(cmNA$mNA_MCMC$mvSamples)[,"x[2]"]))) + + # Test simulation code + nSim <- 10 + xSim <- array(NA, dim = c(nSim, len)) + set.seed(1) + for (i in 1:nSim) { + xSim[i,] <- rNmixtureAD_BBP_v(1, lambda, prob, s, Nmin, Nmax, len) + } + + CrNmixtureAD_BBP_v <- compileNimble(rNmixtureAD_BBP_v) + CxSim <- array(NA, dim = c(nSim, len)) + set.seed(1) + for (i in 1:nSim) { + CxSim[i,] <- CrNmixtureAD_BBP_v(1, lambda, prob, s, Nmin, Nmax, len) + } + expect_equal(xSim, CxSim) + + simNodes <- m$getDependencies(c('prob', 'lambda'), self = FALSE) + mxSim <- array(NA, dim = c(nSim, len)) + set.seed(1) + for(i in 1:nSim) { + m$simulate(simNodes, includeData = TRUE) + mxSim[i,] <- m$x + } + expect_equal(mxSim, xSim) + + CmxSim <- array(NA, dim = c(nSim, len)) + set.seed(1) + for(i in 1:nSim) { + cm$simulate(simNodes, includeData = TRUE) + CmxSim[i,] <- cm$x + } + expect_identical(CmxSim, mxSim) + }) + +# ----------------------------------------------------------------------------- +#### 7. Test dNmixtureAD_BBP_s #### +test_that("dNmixtureAD_BBP_s works", + { + # Uncompiled calculation + x <- c(1, 0, 3, 3, 0) + lambda <- 8 + s <- 2 + prob <- 0.4 + Nmin <- max(x) + Nmax <- 250 + len <- 5 + + probX <- dNmixtureAD_BBP_s(x, lambda, prob, s, Nmin, Nmax, len) + + # Manually calculate the correct answer + alpha <- prob * s + beta <- s - prob * s + + correctProbX <- 0 + for (N in Nmin:Nmax) { + correctProbX <- correctProbX + dpois(N, lambda) * + prod(dBetaBinom_s(x, N, + shape1 = alpha, shape2 = beta)) + } + + expect_equal(probX, correctProbX) + + # Uncompiled log probability + lProbX <- dNmixtureAD_BBP_s(x, lambda, prob, s, Nmin, Nmax, len, log = TRUE) + lCorrectProbX <- log(correctProbX) + expect_equal(lProbX, lCorrectProbX) + + # Other Nmin / Nmax + Nmin <- 3 + for(Nmax in 3:6) { + dynProbX <- dNmixtureAD_BBP_s(x, lambda, prob, s, Nmin = Nmin, Nmax = Nmax, len) + dynCorrectProbX <- 0 + for (N in Nmin:Nmax) { + dynCorrectProbX <- dynCorrectProbX + dpois(N, lambda) * + prod(dBetaBinom_s(x, N, shape1 = alpha, shape2 = beta)) + } + expect_equal(dynProbX, dynCorrectProbX) + } + Nmin <- 0 + Nmax <- 250 + + # Compilation and compiled calculations + call_dNmixtureAD_BBP_s <- nimbleFunction( + name = "t7", + run=function(x=double(1), + lambda=double(), + prob=double(), + s=double(), + Nmin = double(0), + Nmax = double(0), + len=double(), + log = integer(0, default=0)) { + return(dNmixtureAD_BBP_s(x,lambda,prob,s,Nmin,Nmax,len,log)) + returnType(double()) + } + ) + # Compilation and compiled calculations + CdNmixtureAD_BBP_s <- compileNimble(call_dNmixtureAD_BBP_s) + CprobX <- CdNmixtureAD_BBP_s(x, lambda, prob, s, Nmin, Nmax, len) + expect_equal(CprobX, probX) + + ClProbX <- CdNmixtureAD_BBP_s(x, lambda, prob, s, Nmin, Nmax, len, log = TRUE) + expect_equal(ClProbX, lProbX) + + + # Dynamic Nmin / Nmax isn't allowed + expect_error({ + dNmixtureAD_BBP_s(x, lambda, prob, s, Nmin = -1, Nmax = -1, len) + }) + expect_error({ + dNmixtureAD_BBP_s(x, lambda, prob, s, Nmin = -1, Nmax, len) + }) + expect_error({ + dNmixtureAD_BBP_s(x, lambda, prob, s, Nmin, Nmax = -1, len) + }) + expect_error({ + CdNmixtureAD_BBP_s(x, lambda, prob, s, Nmin = -1, Nmax = -1, len) + }) + expect_error({ + CdNmixtureAD_BBP_s(x, lambda, prob, s, Nmin = -1, Nmax, len) + }) + expect_error({ + CdNmixtureAD_BBP_s(x, lambda, prob, s, Nmin, Nmax = -1, len) + }) + + + # Use in Nimble model + nc <- nimbleCode({ + x[1:5] ~ dNmixtureAD_BBP_s(lambda = lambda, prob = prob, + s = s, + Nmin = Nmin, Nmax = Nmax, len = len) + + }) + + m <- nimbleModel(code = nc, + data = list(x = x), + inits = list(lambda = lambda, + prob = prob, + s = s), + constants = list(Nmin = Nmin, Nmax = Nmax, + len = len)) + m$calculate() + MlProbX <- m$getLogProb("x") + expect_equal(MlProbX, lProbX) + + # Compiled model + cm <- compileNimble(m) + cm$calculate() + CMlProbX <- cm$getLogProb("x") + expect_equal(CMlProbX, lProbX) + + # Test imputing value for all NAs + xNA <- c(NA, NA, NA, NA, NA) + mNA <- nimbleModel(nc, data = list(x = xNA), + inits = list(lambda = lambda, + s = s, + prob = prob), + constants = list(Nmin = Nmin, Nmax = Nmax, + len = len)) + + + mNAConf <- configureMCMC(mNA) + mNAConf$addMonitors('x') + mNA_MCMC <- buildMCMC(mNAConf) + cmNA <- compileNimble(mNA, mNA_MCMC) + + set.seed(0) + cmNA$mNA_MCMC$run(10) + + # Did the imputed values come back? + expect_true(all(!is.na(as.matrix(cmNA$mNA_MCMC$mvSamples)[,"x[2]"]))) + + # Test simulation code + nSim <- 10 + xSim <- array(NA, dim = c(nSim, len)) + set.seed(1) + for (i in 1:nSim) { + xSim[i,] <- rNmixtureAD_BBP_s(1, lambda, prob, s, Nmin, Nmax, len) + } + + CrNmixtureAD_BBP_s <- compileNimble(rNmixtureAD_BBP_s) + CxSim <- array(NA, dim = c(nSim, len)) + set.seed(1) + for (i in 1:nSim) { + CxSim[i,] <- CrNmixtureAD_BBP_s(1, lambda, prob, s, Nmin, Nmax, len) + } + expect_equal(xSim, CxSim) + + simNodes <- m$getDependencies(c('prob', 'lambda'), self = FALSE) + mxSim <- array(NA, dim = c(nSim, len)) + set.seed(1) + for(i in 1:nSim) { + m$simulate(simNodes, includeData = TRUE) + mxSim[i,] <- m$x + } + expect_equal(mxSim, xSim) + + CmxSim <- array(NA, dim = c(nSim, len)) + set.seed(1) + for(i in 1:nSim) { + cm$simulate(simNodes, includeData = TRUE) + CmxSim[i,] <- cm$x + } + expect_identical(CmxSim, mxSim) + }) + + +# ----------------------------------------------------------------------------- +#### 8. Test dNmixtureAD_BBP_oneObs #### +test_that("dNmixtureAD_BBP_oneObs works", + { + # Uncompiled calculation + x <- c(1) + lambda <- 8 + s <- 2 + prob <- c(0.5) + Nmin <- max(x) + Nmax <- 250 + len <- 5 + + probX <- dNmixtureAD_BBP_oneObs(x, lambda, prob, s, Nmin, Nmax) + + # Manually calculate the correct answer + alpha <- prob * s + beta <- s - prob * s + correctProbX <- 0 + for (N in Nmin:Nmax) { + correctProbX <- correctProbX + dpois(N, lambda) * + prod(dBetaBinom_s(x, N, alpha, beta)) + } + + expect_equal(probX, correctProbX) + + # Uncompiled log probability + lProbX <- dNmixtureAD_BBP_oneObs(x, lambda, prob, s, Nmin, Nmax, log = TRUE) + lCorrectProbX <- log(correctProbX) + expect_equal(lProbX, lCorrectProbX) + + # Other Nmin / Nmax + Nmin <- 3 + for(Nmax in 3:6) { + dynProbX <- dNmixtureAD_BBP_oneObs(x, lambda, prob, s, Nmin = Nmin, Nmax = Nmax) + dynCorrectProbX <- 0 + for (N in Nmin:Nmax) { + dynCorrectProbX <- dynCorrectProbX + dpois(N, lambda) * + prod(dBetaBinom_s(x, N, shape1 = alpha, shape2 = beta)) + } + expect_equal(dynProbX, dynCorrectProbX) + } + Nmin <- 0 + Nmax <- 250 + + # Compilation and compiled calculations + call_dNmixtureAD_BBP_oneObs <- nimbleFunction( + name = "t8", + run=function(x=double(), + lambda=double(), + prob=double(), + s=double(), + Nmin = double(0), + Nmax = double(0), + log = integer(0, default=0)) { + return(dNmixtureAD_BBP_oneObs(x,lambda,prob,s,Nmin,Nmax,log)) + returnType(double()) + } + ) + + # Compilation and compiled calculations + CdNmixtureAD_BBP_oneObs <- compileNimble(call_dNmixtureAD_BBP_oneObs) + CprobX <- CdNmixtureAD_BBP_oneObs(x, lambda, prob, s, Nmin, Nmax) + expect_equal(CprobX, probX) + + ClProbX <- CdNmixtureAD_BBP_oneObs(x, lambda, prob, s, Nmin, Nmax, log = TRUE) + expect_equal(ClProbX, lProbX) + + # Dynamic Nmin / Nmax isn't allowed + expect_error({ + dNmixtureAD_BBP_oneObs(x, lambda, prob, s, Nmin = -1, Nmax = -1) + }) + expect_error({ + dNmixtureAD_BBP_oneObs(x, lambda, prob, s, Nmin = -1, Nmax) + }) + expect_error({ + dNmixtureAD_BBP_oneObs(x, lambda, prob, s, Nmin, Nmax = -1) + }) + expect_error({ + CdNmixtureAD_BBP_oneObs(x, lambda, prob, s, Nmin = -1, Nmax = -1) + }) + expect_error({ + CdNmixtureAD_BBP_oneObs(x, lambda, prob, s, Nmin = -1, Nmax) + }) + expect_error({ + CdNmixtureAD_BBP_oneObs(x, lambda, prob, s, Nmin, Nmax = -1) + }) + + # Use in Nimble model + nc <- nimbleCode({ + x ~ dNmixtureAD_BBP_oneObs(lambda = lambda, prob = prob, + s = s, + Nmin = Nmin, Nmax = Nmax) + + }) + + m <- nimbleModel(code = nc, + data = list(x = x), + inits = list(lambda = lambda, + prob = prob, + s = s), + constants = list(Nmin = Nmin, Nmax = Nmax)) + m$calculate() + MlProbX <- m$getLogProb("x") + expect_equal(MlProbX, lProbX) + + # Compiled model + cm <- compileNimble(m) + cm$calculate() + CMlProbX <- cm$getLogProb("x") + expect_equal(CMlProbX, lProbX) + + # Test imputing value for all NAs + xNA <- NA + mNA <- nimbleModel(nc, data = list(x = xNA), + inits = list(lambda = lambda, + s = s, + prob = prob), + constants = list(Nmin = Nmin, Nmax = Nmax)) + + + mNAConf <- configureMCMC(mNA) + mNAConf$addMonitors('x') + mNA_MCMC <- buildMCMC(mNAConf) + cmNA <- compileNimble(mNA, mNA_MCMC) + + set.seed(0) + cmNA$mNA_MCMC$run(10) + + # Did the imputed values come back? + expect_true(all(!is.na(as.matrix(cmNA$mNA_MCMC$mvSamples)[,"x"]))) + + # Test simulation code + nSim <- 10 + xSim <- numeric(nSim) + set.seed(1) + for (i in 1:nSim) { + xSim[i] <- rNmixtureAD_BBP_oneObs(1, lambda, prob, s, Nmin, Nmax) + } + + CrNmixtureAD_BBP_oneObs <- compileNimble(rNmixtureAD_BBP_oneObs) + CxSim <- numeric(nSim) + set.seed(1) + for (i in 1:nSim) { + CxSim[i] <- CrNmixtureAD_BBP_oneObs(1, lambda, prob, s, Nmin, Nmax) + } + expect_identical(xSim, CxSim) + + simNodes <- m$getDependencies(c('prob', 'lambda'), self = FALSE) + mxSim <- numeric(nSim) + set.seed(1) + for(i in 1:nSim) { + m$simulate(simNodes, includeData = TRUE) + mxSim[i] <- m$x + } + expect_identical(mxSim, xSim) + + CmxSim <- numeric(nSim) + set.seed(1) + for(i in 1:nSim) { + cm$simulate(simNodes, includeData = TRUE) + CmxSim[i] <- cm$x + } + expect_identical(CmxSim, mxSim) + }) + + + + +# ----------------------------------------------------------------------------- +#### 9. Test dNmixtureAD_BBNB_v #### +test_that("dNmixtureAD_BBNB_v works", + { + # Uncompiled calculation + x <- c(1, 0, 3, 3, 0) + lambda <- 8 + s <- 2 + theta <- 1.5 + prob <- c(0.5, 0.3, 0.5, 0.4, 0.1) + Nmin <- max(x) + Nmax <- 250 + len <- 5 + + probX <- dNmixtureAD_BBNB_v(x, lambda, theta, prob, s, Nmin, Nmax, len) + + # Manually calculate the correct answer + alpha <- prob * s + beta <- s - prob * s + r <- 1 / theta + pNB <- 1 / (1 + theta * lambda) + + correctProbX <- 0 + for (N in Nmin:Nmax) { + correctProbX <- correctProbX + dnbinom(N, size = r, prob = pNB) * + prod(dBetaBinom_v(x, N, shape1 = alpha, shape2 = beta)) + } + + expect_equal(probX, correctProbX) + + # Uncompiled log probability + lProbX <- dNmixtureAD_BBNB_v(x, lambda, theta, prob, s, Nmin, Nmax, len, log = TRUE) + lCorrectProbX <- log(correctProbX) + expect_equal(lProbX, lCorrectProbX) + + # Other Nmin / Nmax + Nmin <- 3 + for(Nmax in 3:6) { + dynProbX <- dNmixtureAD_BBNB_v(x, lambda, theta, prob, s, Nmin = Nmin, Nmax = Nmax, len) + dynCorrectProbX <- 0 + for (N in Nmin:Nmax) { + dynCorrectProbX <- dynCorrectProbX + dnbinom(N, size = r, prob = pNB) * + prod(dBetaBinom_v(x, N, shape1 = alpha, shape2 = beta)) + } + expect_equal(dynProbX, dynCorrectProbX) + } + Nmin <- 0 + Nmax <- 250 + + # Compilation and compiled calculations + call_dNmixtureAD_BBNB_v <- nimbleFunction( + name = "t9", + run=function(x=double(1), + lambda=double(), + theta=double(), + prob=double(1), + s=double(), + Nmin = double(0), + Nmax = double(0), + len=double(), + log = integer(0, default=0)) { + return(dNmixtureAD_BBNB_v(x,lambda,theta,prob,s,Nmin,Nmax,len,log)) + returnType(double()) + } + ) + # Compilation and compiled calculations + CdNmixtureAD_BBNB_v <- compileNimble(call_dNmixtureAD_BBNB_v) + CprobX <- CdNmixtureAD_BBNB_v(x, lambda, theta, prob, s, Nmin, Nmax, len) + expect_equal(CprobX, probX) + + ClProbX <- CdNmixtureAD_BBNB_v(x, lambda, theta, prob, s, Nmin, Nmax, len, log = TRUE) + expect_equal(ClProbX, lProbX) + + # Dynamic Nmin / Nmax isn't allowed + expect_error({ + dNmixtureAD_BBNB_v(x, lambda, theta, prob, s, Nmin = -1, Nmax = -1, len) + }) + expect_error({ + dNmixtureAD_BBNB_v(x, lambda, theta, prob, s, Nmin, Nmax = -1, len) + }) + expect_error({ + dNmixtureAD_BBNB_v(x, lambda, theta, prob, s, Nmin = -1, Nmax, len) + }) + expect_error({ + CdNmixtureAD_BBNB_v(x, lambda, theta, prob, s, Nmin = -1, Nmax = -1, len) + }) + expect_error({ + CdNmixtureAD_BBNB_v(x, lambda, theta, prob, s, Nmin, Nmax = -1, len) + }) + expect_error({ + CdNmixtureAD_BBNB_v(x, lambda, theta, prob, s, Nmin = -1, Nmax, len) + }) + + # Use in Nimble model + nc <- nimbleCode({ + x[1:5] ~ dNmixtureAD_BBNB_v(lambda = lambda, prob = prob[1:5], + theta = theta, s = s, + Nmin = Nmin, Nmax = Nmax, len = len) + + }) + + m <- nimbleModel(code = nc, + data = list(x = x), + inits = list(lambda = lambda, + prob = prob, + s = s, theta = theta), + constants = list(Nmin = Nmin, Nmax = Nmax, + len = len)) + m$calculate() + MlProbX <- m$getLogProb("x") + expect_equal(MlProbX, lProbX) + + # Compiled model + cm <- compileNimble(m) + cm$calculate() + CMlProbX <- cm$getLogProb("x") + expect_equal(CMlProbX, lProbX) + + # Test imputing value for all NAs + xNA <- c(NA, NA, NA, NA, NA) + mNA <- nimbleModel(nc, data = list(x = xNA), + inits = list(lambda = lambda, + s = s, theta = theta, + prob = prob), + constants = list(Nmin = Nmin, Nmax = Nmax, + len = len)) + + mNAConf <- configureMCMC(mNA) + mNAConf$addMonitors('x') + mNA_MCMC <- buildMCMC(mNAConf) + cmNA <- compileNimble(mNA, mNA_MCMC) + + set.seed(0) + cmNA$mNA_MCMC$run(10) + + # Did the imputed values come back? + expect_true(all(!is.na(as.matrix(cmNA$mNA_MCMC$mvSamples)[,"x[2]"]))) + + # Test simulation code + nSim <- 10 + xSim <- array(NA, dim = c(nSim, len)) + set.seed(1) + for (i in 1:nSim) { + xSim[i,] <- rNmixtureAD_BBNB_v(1, lambda, theta, prob, s, Nmin, Nmax, len) + } + + CrNmixtureAD_BBNB_v <- compileNimble(rNmixtureAD_BBNB_v) + CxSim <- array(NA, dim = c(nSim, len)) + set.seed(1) + for (i in 1:nSim) { + CxSim[i,] <- CrNmixtureAD_BBNB_v(1, lambda, theta, prob, s, Nmin, Nmax, len) + } + expect_equal(xSim, CxSim) + + simNodes <- m$getDependencies(c('prob', 'lambda'), self = FALSE) + mxSim <- array(NA, dim = c(nSim, len)) + set.seed(1) + for(i in 1:nSim) { + m$simulate(simNodes, includeData = TRUE) + mxSim[i,] <- m$x + } + expect_equal(mxSim, xSim) + + CmxSim <- array(NA, dim = c(nSim, len)) + set.seed(1) + for(i in 1:nSim) { + cm$simulate(simNodes, includeData = TRUE) + CmxSim[i,] <- cm$x + } + expect_identical(CmxSim, mxSim) + }) + +# ----------------------------------------------------------------------------- +#### 10. Test dNmixtureAD_BBNB_s #### +test_that("dNmixtureAD_BBNB_s works", + { + # Uncompiled calculation + x <- c(1, 0, 3, 3, 0) + lambda <- 8 + s <- 2 + theta <- 1.5 + prob <- 0.4 + Nmin <- max(x) + Nmax <- 250 + len <- 5 + + probX <- dNmixtureAD_BBNB_s(x, lambda, theta, prob, s, Nmin, Nmax, len) + + # Manually calculate the correct answer + alpha <- prob * s + beta <- s - prob * s + r <- 1 / theta + pNB <- 1 / (1 + theta * lambda) + + correctProbX <- 0 + for (N in Nmin:Nmax) { + correctProbX <- correctProbX + dnbinom(N, size = r, prob = pNB) * + prod(dBetaBinom_s(x, N, shape1 = alpha, shape2 = beta)) + } + + expect_equal(probX, correctProbX) + + # Uncompiled log probability + lProbX <- dNmixtureAD_BBNB_s(x, lambda, theta, prob, s, Nmin, Nmax, len, log = TRUE) + lCorrectProbX <- log(correctProbX) + expect_equal(lProbX, lCorrectProbX) + # Other Nmin / Nmax + Nmin <- 3 + for(Nmax in 3:6) { + dynProbX <- dNmixtureAD_BBNB_s(x, lambda, theta, prob, s, Nmin = Nmin, Nmax = Nmax, len) + dynCorrectProbX <- 0 + for (N in Nmin:Nmax) { + dynCorrectProbX <- dynCorrectProbX + dnbinom(N, size = r, prob = pNB) * + prod(dBetaBinom_s(x, N, shape1 = alpha, shape2 = beta)) + } + expect_equal(dynProbX, dynCorrectProbX) + } + Nmin <- 0 + Nmax <- 250 + + # Compilation and compiled calculations + call_dNmixtureAD_BBNB_s <- nimbleFunction( + name = "t10", + run=function(x=double(1), + lambda=double(), + theta=double(), + prob=double(), + s=double(), + Nmin = double(0), + Nmax = double(0), + len=double(), + log = integer(0, default=0)) { + return(dNmixtureAD_BBNB_s(x,lambda,theta,prob,s,Nmin,Nmax,len,log)) + returnType(double()) + } + ) + # Compilation and compiled calculations + CdNmixtureAD_BBNB_s <- compileNimble(call_dNmixtureAD_BBNB_s) + CprobX <- CdNmixtureAD_BBNB_s(x, lambda, theta, prob, s, Nmin, Nmax, len) + expect_equal(CprobX, probX) + + ClProbX <- CdNmixtureAD_BBNB_s(x, lambda, theta, prob, s, Nmin, Nmax, len, log = TRUE) + expect_equal(ClProbX, lProbX) + + # Dynamic Nmin / Nmax isn't allowed + expect_error({ + dNmixtureAD_BBNB_s(x, lambda, theta, prob, s, Nmin = -1, Nmax = -1, len) + }) + expect_error({ + dNmixtureAD_BBNB_s(x, lambda, theta, prob, s, Nmin, Nmax = -1, len) + }) + expect_error({ + dNmixtureAD_BBNB_s(x, lambda, theta, prob, s, Nmin = -1, Nmax, len) + }) + expect_error({ + CdNmixtureAD_BBNB_s(x, lambda, theta, prob, s, Nmin = -1, Nmax = -1, len) + }) + expect_error({ + CdNmixtureAD_BBNB_s(x, lambda, theta, prob, s, Nmin, Nmax = -1, len) + }) + expect_error({ + CdNmixtureAD_BBNB_s(x, lambda, theta, prob, s, Nmin = -1, Nmax, len) + }) + # Use in Nimble model + nc <- nimbleCode({ + x[1:5] ~ dNmixtureAD_BBNB_s(lambda = lambda, prob = prob, + theta = theta, s = s, + Nmin = Nmin, Nmax = Nmax, len = len) + }) + + m <- nimbleModel(code = nc, + data = list(x = x), + inits = list(lambda = lambda, + prob = prob, + s = s, theta = theta), + constants = list(Nmin = Nmin, Nmax = Nmax, + len = len)) + m$calculate() + MlProbX <- m$getLogProb("x") + expect_equal(MlProbX, lProbX) + + # Compiled model + cm <- compileNimble(m) + cm$calculate() + CMlProbX <- cm$getLogProb("x") + expect_equal(CMlProbX, lProbX) + + # Test imputing value for all NAs + xNA <- c(NA, NA, NA, NA, NA) + mNA <- nimbleModel(nc, data = list(x = xNA), + inits = list(lambda = lambda, + s = s, theta = theta, + prob = prob), + constants = list(Nmin = Nmin, Nmax = Nmax, + len = len)) + + mNAConf <- configureMCMC(mNA) + mNAConf$addMonitors('x') + mNA_MCMC <- buildMCMC(mNAConf) + cmNA <- compileNimble(mNA, mNA_MCMC) + + set.seed(0) + cmNA$mNA_MCMC$run(10) + + # Did the imputed values come back? + expect_true(all(!is.na(as.matrix(cmNA$mNA_MCMC$mvSamples)[,"x[2]"]))) + + # Test simulation code + nSim <- 10 + xSim <- array(NA, dim = c(nSim, len)) + set.seed(1) + for (i in 1:nSim) { + xSim[i,] <- rNmixtureAD_BBNB_s(1, lambda, theta, prob, s, Nmin, Nmax, len) + } + + CrNmixtureAD_BBNB_s <- compileNimble(rNmixtureAD_BBNB_s) + CxSim <- array(NA, dim = c(nSim, len)) + set.seed(1) + for (i in 1:nSim) { + CxSim[i,] <- CrNmixtureAD_BBNB_s(1, lambda, theta, prob, s, Nmin, Nmax, len) + } + expect_equal(xSim, CxSim) + + simNodes <- m$getDependencies(c('prob', 'lambda'), self = FALSE) + mxSim <- array(NA, dim = c(nSim, len)) + set.seed(1) + for(i in 1:nSim) { + m$simulate(simNodes, includeData = TRUE) + mxSim[i,] <- m$x + } + expect_equal(mxSim, xSim) + + CmxSim <- array(NA, dim = c(nSim, len)) + set.seed(1) + for(i in 1:nSim) { + cm$simulate(simNodes, includeData = TRUE) + CmxSim[i,] <- cm$x + } + expect_identical(CmxSim, mxSim) + }) + + +# ----------------------------------------------------------------------------- +#### 11. Test dNmixtureAD_BBNB_oneObs #### +test_that("dNmixtureAD_BBNB_oneObs works", + { + # Uncompiled calculation + x <- c(1) + lambda <- 8 + theta <- 1.5 + s <- 2 + prob <- c(0.5) + Nmin <- max(x) + Nmax <- 250 + len <- 1 + + probX <- dNmixtureAD_BBNB_oneObs(x, lambda, theta, prob, s, Nmin, Nmax) + + # Manually calculate the correct answer + alpha <- prob * s + beta <- s - prob * s + r <- 1 / theta + pNB <- 1 / (1 + theta * lambda) + correctProbX <- 0 + for (N in Nmin:Nmax) { + correctProbX <- correctProbX + dnbinom(N, size = r, prob = pNB) * + prod(dBetaBinom_s(x, N, alpha, beta)) + } + + expect_equal(probX, correctProbX) + + # Uncompiled log probability + lProbX <- dNmixtureAD_BBNB_oneObs(x, lambda, theta, prob, s, Nmin, Nmax, log = TRUE) + lCorrectProbX <- log(correctProbX) + expect_equal(lProbX, lCorrectProbX) + + # Other Nmin / Nmax + Nmin <- 3 + for(Nmax in 3:6) { + dynProbX <- dNmixtureAD_BBNB_oneObs(x, lambda, theta, prob, s, Nmin = Nmin, Nmax = Nmax) + dynCorrectProbX <- 0 + for (N in Nmin:Nmax) { + dynCorrectProbX <- dynCorrectProbX + dnbinom(N, size = r, prob = pNB) * + prod(dBetaBinom_s(x, N, alpha, beta)) + } + expect_equal(dynProbX, dynCorrectProbX) + } + Nmin <- 0 + Nmax <- 250 + + # Compilation and compiled calculations + call_dNmixtureAD_BBNB_oneObs <- nimbleFunction( + name = "t11", + run=function(x=double(), + lambda=double(), + theta=double(), + prob=double(), + s=double(), + Nmin = double(0), + Nmax = double(0), + log = integer(0, default=0)) { + return(dNmixtureAD_BBNB_oneObs(x,lambda,theta,prob,s,Nmin,Nmax,log)) + returnType(double()) + } + )# Compilation and compiled calculations + CdNmixtureAD_BBNB_oneObs <- compileNimble(call_dNmixtureAD_BBNB_oneObs) + CprobX <- CdNmixtureAD_BBNB_oneObs(x, lambda, theta, prob, s, Nmin, Nmax) + expect_equal(CprobX, probX) + + ClProbX <- CdNmixtureAD_BBNB_oneObs(x, lambda, theta, prob, s, Nmin, Nmax, log = TRUE) + expect_equal(ClProbX, lProbX) + + # Dynamic Nmin / Nmax isn't allowed + expect_error({ + dNmixtureAD_BBNB_oneObs(x, lambda, theta, prob, s, Nmin = -1, Nmax = -1) + }) + expect_error({ + dNmixtureAD_BBNB_oneObs(x, lambda, theta, prob, s, Nmin, Nmax = -1) + }) + expect_error({ + dNmixtureAD_BBNB_oneObs(x, lambda, theta, prob, s, Nmin = -1, Nmax) + }) + expect_error({ + CdNmixtureAD_BBNB_oneObs(x, lambda, theta, prob, s, Nmin = -1, Nmax = -1) + }) + expect_error({ + CdNmixtureAD_BBNB_oneObs(x, lambda, theta, prob, s, Nmin, Nmax = -1) + }) + expect_error({ + CdNmixtureAD_BBNB_oneObs(x, lambda, theta, prob, s, Nmin = -1, Nmax) + }) + + # Use in Nimble model + nc <- nimbleCode({ + x ~ dNmixtureAD_BBNB_oneObs(lambda = lambda, prob = prob, + s = s, theta = theta, + Nmin = Nmin, Nmax = Nmax) + }) + + m <- nimbleModel(code = nc, + data = list(x = x), + inits = list(lambda = lambda, + prob = prob, + theta = theta, + s = s), + constants = list(Nmin = Nmin, Nmax = Nmax)) + m$calculate() + MlProbX <- m$getLogProb("x") + expect_equal(MlProbX, lProbX) + + # Compiled model + cm <- compileNimble(m) + cm$calculate() + CMlProbX <- cm$getLogProb("x") + expect_equal(CMlProbX, lProbX) + + # Test imputing value for all NAs + xNA <- NA + mNA <- nimbleModel(nc, data = list(x = xNA), + inits = list(lambda = lambda, + s = s, theta = theta, + prob = prob), + constants = list(Nmin = Nmin, Nmax = Nmax)) + + + mNAConf <- configureMCMC(mNA) + mNAConf$addMonitors('x') + mNA_MCMC <- buildMCMC(mNAConf) + cmNA <- compileNimble(mNA, mNA_MCMC) + + set.seed(0) + cmNA$mNA_MCMC$run(10) + + # Did the imputed values come back? + expect_true(all(!is.na(as.matrix(cmNA$mNA_MCMC$mvSamples)[,"x"]))) + + # Test simulation code + nSim <- 10 + xSim <- numeric(nSim) + set.seed(1) + for (i in 1:nSim) { + xSim[i] <- rNmixtureAD_BBNB_oneObs(1, lambda, theta, prob, s, Nmin, Nmax) + } + + CrNmixtureAD_BBNB_oneObs <- compileNimble(rNmixtureAD_BBNB_oneObs) + CxSim <- numeric(nSim) + set.seed(1) + for (i in 1:nSim) { + CxSim[i] <- CrNmixtureAD_BBNB_oneObs(1, lambda, theta, prob, s, Nmin, Nmax) + } + expect_identical(xSim, CxSim) + + simNodes <- m$getDependencies(c('prob', 'lambda'), self = FALSE) + mxSim <- numeric(nSim) + set.seed(1) + for(i in 1:nSim) { + m$simulate(simNodes, includeData = TRUE) + mxSim[i] <- m$x + } + expect_identical(mxSim, xSim) + + CmxSim <- numeric(nSim) + set.seed(1) + for(i in 1:nSim) { + cm$simulate(simNodes, includeData = TRUE) + CmxSim[i] <- cm$x + } + expect_identical(CmxSim, mxSim) + }) diff --git a/tests/testthat/test-Occ.R b/tests/testthat/test-Occ.R index fdb34bf..d570241 100644 --- a/tests/testthat/test-Occ.R +++ b/tests/testthat/test-Occ.R @@ -2,8 +2,6 @@ # ----------------------------------------------------------------------------- # 0. Load -context("Testing dOcc-related functions.") - # Test scalar-scalar version test_that("dOcc_s and rOcc_s work", { x <- c(1,0,1,1,0) @@ -25,7 +23,17 @@ test_that("dOcc_s and rOcc_s work", { lCorrectProbX <- log(correctProbX) expect_equal(lProbX, lCorrectProbX) - CdOcc_s <- compileNimble(dOcc_s) + # we must wrap the call to avoid the error that + # dOcc_s has no setup code but buildDerivs=TRUE so can't be compiled alone + call_dOcc_s <- nimbleFunction( + run = function(x = double(1), probOcc=double(), + probDetect=double(), + log = integer(0, default = 0)) { + return(dOcc_s(x, probOcc, probDetect, 0, log)) + returnType(double()) + } + ) + CdOcc_s <- compileNimble(call_dOcc_s) CprobX <- CdOcc_s(x, probOcc, probDetect) expect_equal(CprobX, probX) @@ -91,7 +99,6 @@ test_that("dOcc_s and rOcc_s work", { cmNA$mNA_MCMC$run(10) # Did the imputed values come back? expect_true(all(!is.na(as.matrix(cmNA$mNA_MCMC$mvSamples)[,"x[1]"]))) - }) @@ -118,7 +125,18 @@ test_that("dOcc_v works", { lCorrectProbX <- log(correctProbX) expect_equal(lProbX, lCorrectProbX) - CdOcc_v <- compileNimble(dOcc_v) + # we must wrap the call to avoid the error that + # dOcc_v has no setup code but buildDerivs=TRUE so can't be compiled alone + call_dOcc_v <- nimbleFunction( + run = function(x = double(1), probOcc=double(0), + probDetect=double(1), + log = integer(0, default = 0)) { + return(dOcc_v(x, probOcc, probDetect, 0, log)) + returnType(double()) + } + ) + + CdOcc_v <- compileNimble(call_dOcc_v) CprobX <- CdOcc_v(x, probOcc, probDetect) expect_equal(CprobX, probX) @@ -205,9 +223,29 @@ test_that("Checking errors", { ) -### Compiled errors - CdOcc_s <- compileNimble(dOcc_s) - CdOcc_v <- compileNimble(dOcc_v) + ### Compiled errors + # we must wrap the call to avoid the error that + # dOcc_[s,v] has no setup code but buildDerivs=TRUE so can't be compiled alone + call_dOcc_s <- nimbleFunction( + run = function(x = double(1), probOcc=double(), + probDetect=double(), + len = integer(0), + log = integer(0, default = 0)) { + return(dOcc_s(x, probOcc, probDetect, len, log)) + returnType(double()) + } + ) + call_dOcc_v <- nimbleFunction( + run = function(x = double(1), probOcc=double(0), + probDetect=double(1), + len = integer(0), + log = integer(0, default = 0)) { + return(dOcc_v(x, probOcc, probDetect, len, log)) + returnType(double()) + } + ) + CdOcc_s <- compileNimble(call_dOcc_s) + CdOcc_v <- compileNimble(call_dOcc_v) expect_error( CdOcc_s(x = c(0,1,0,0), probOcc = 0.4, probDetect = 0.5, len = 3) diff --git a/vignettes/Introduction_to_nimbleEcology.Rmd b/vignettes/Introduction_to_nimbleEcology.Rmd index e08ce44..848e949 100644 --- a/vignettes/Introduction_to_nimbleEcology.Rmd +++ b/vignettes/Introduction_to_nimbleEcology.Rmd @@ -49,14 +49,14 @@ The best way to seek user support is the nimble-users list. Information on how # The concept of using new distributions for ecological model components. -The distributions provided in `nimbleEcology` let you simplify model code and the algorithms that use it, such as MCMC. For the ecological models in `nimbleEcology`, the simplification comes from removing some discrete latent states from the model and instead doing the corresponding probability (or likelihood) calculations in a specialized distribution. +The distributions provided in `nimbleEcology` let you simplify model code and the algorithms that use it, such as MCMC. For the ecological models in `nimbleEcology`, the simplification comes from removing some discrete latent states from the model and instead doing the corresponding probability (or likelihood) calculations in a specialized distribution that marginalizes over the latent states. -For each of the ecological model components provided by `nimbleEcology`, here are the discrete latent states that are replaced by use of a specialized distribution: +For each of the ecological model components provided by `nimbleEcology`, here are the discrete latent states that are replaced by use of a marginalized distribution: - CJS (basic capture-recapture): Latent individual alive-or-dead state at each time. - HMM and DHMM: Latent individual state, such as location or breeding status, as well as alive-or-dead, at each time. - Occupancy: Latent occupancy status of a site. -- Dynamic occupancy: latent occupancy status of a site at each time. +- Dynamic occupancy: Latent occupancy status of a site at each time. - N-mixture: Latent number of individuals at a site. Before going further, let's illustrate how `nimbleEcology` can be used for a basic occupancy model. @@ -120,7 +120,7 @@ We can run an MCMC for this model in the following steps: 5. Run the MCMC. 6. Extract the samples. -The function `nimbleMCMC` does all of these steps for you. The function `runMCMC` does steps 5-6 for you, with convenient management of options such as discarding burn-in samples. The full set of steps allows great control over how you use a model and configure and use an MCMC. We will go through the steps 1-4 and then use `runMCMC` for steps 5-6. +The function `nimbleMCMC` does all of these steps for you. The function `runMCMC` does steps 5-6 for you, with convenient management of options such as discarding burn-in samples. The full set of steps allows greater control over how you use a model and configure and use an MCMC. We will go through the steps 1-4 and then use `runMCMC` for steps 5-6. In this example, we also need simulated data. We can use the same model to create simulated data, rather than writing separate R code for that purpose. @@ -154,7 +154,7 @@ MCMC <- buildMCMC(occupancy_model) ```{r} ## These can be done in one step, but many people -## find it convenient to do it in two steps. +## find it convenient to do them in two steps. Coccupancy_model <- compileNimble(occupancy_model) CMCMC <- compileNimble(MCMC, project = occupancy_model) ``` @@ -217,6 +217,16 @@ COccLogLik <- compileNimble(OccLogLik, project = occupancy_model_new) optim(c(0.5, 0.5), COccLogLik$run, control = list(fnscale = -1))$par ``` +## Support for automatic differentiation with `nimbleEcology`. + +As of `nimble` version 1.0.0, there is a system for automatic (or algorithmic) differentiation, known as AD. This is used by algorithms such as Hamiltonian Monte Carlo (see package [`nimbleHMC`](https://cran.r-project.org/web/packages/nimbleHMC/index.html)) and Laplace approximation (`buildLaplace` in `nimble`). + +The distributions provided in `nimbleEcology` support AD as much as possible. There are three main points to keep in mind: + +1. It is not possible to take derivatives with respect to discrete values, and the "data" for the `nimbleEcology` distributions are all discrete values. It *is* possible to take derivatives with respect to continuous parameters of the distributions. If the "data" are marked as `data` in the model (and hence will not be sampled by MCMC, for example), there is no problem. +2. Some values will be "baked in" to the AD calculations, meaning that the values first present will be used permanently in later AD calculations. In all cases of `nimbleEcology` distributions, the values "baked in" sizes of variables. In some cases (such ash dHMM and dDHMM) they also include the data values. See the help page for each distribution for more details (e.g. `help(dOcc)`). If the data are scientific data that do not need to be changed after creating the model and algorithm, there is no problem. +3. For the N-mixture distributions only, one needs to use different distribution names. Every `dNmixture` portion of a distribution name below should be replaced with `dNmixtureAD`. + # Distributions provided in `nimbleEcology` In this section, we introduce each of the `nimbleEcology` distributions in more detail. We will summarize the calculations using mathematical notation and then describe how to use the distributions in a `nimble` model. @@ -270,7 +280,7 @@ The usage for each is similar. An example for `dCJS_vs` is: Note the following points: -- `y[i, 1:T]` is a vector of capture history. It is written as if `i` indexes individual, but it could be any vector in any variable in the model. +- `y[i, 1:T]` is a vector of the capture history. It is written as if `i` indexes individual, but it could be any vector in any variable in the model. - Arguments to `dCJS_sv` are named. As in R, this is optional but helpful. Without names, the order matters. - `probSurvive` is provided as a scalar value, assuming there is a variable called `phi`. - In variants where `probSurvive` is a vector (`dOcc_vs` and `dOcc_vv`), the $t^{\mbox{th}}$ element of `probSurvive` is $\phi_t$ above, namely the probability of survival from occasion $t$ to $t+1$. @@ -449,10 +459,10 @@ where $P(N | \lambda)$ is a Poisson probability and $P(y_t | N)$ is a binomial p In practice, the summation over $N$ can start at a value greater than 0 and must be truncated at some value less than infinity. Two options are provided for the range of summation: -1. Start the summation at the largest value of $y_t$ (there must be at least this many individuals) and truncate it at a value of $maxN$ provided by the user. -2. The following heuristic can be used. +1. The user can provide values $Nmix$ and $Nmax$ to start and end the summation, respectively. A typical choice for $Nmin$ would be the largest value of $y_t$ (there must be at least this many individuals). +2. The following heuristic can be used: -If we consider a single $y_t$, then $N - y_t | y_t \sim \mbox{Poisson}(\lambda (1-p_t))$ (*See opening example of Royle and Dorazio*). Thus, a natural upper end for the summation range of $N$ would be $y_t$ plus a very high quantile of The $\mbox{Poisson}(\lambda (1-p_t))$ distribution. For a set of observations, a natural choice would be the maximum of such values across the observation times. We use the 0.99999 quantile to be conservative. +If we consider a single $y_t$, then $N - y_t | y_t \sim \mbox{Poisson}(\lambda (1-p_t))$ (*See opening example of [Royle and Dorazio, 2008](https://www.mbr-pwrc.usgs.gov/pubanalysis/roylebook/roylebook.html)*). Thus, a natural upper end for the summation range of $N$ would be $y_t$ plus a very high quantile of The $\mbox{Poisson}(\lambda (1-p_t))$ distribution. For a set of observations, a natural choice would be the maximum of such values across the observation times. We use the 0.99999 quantile to be conservative. Correspondingly, the summation can begin at smallest of the 0.00001 quantiles of $N | y_t$. If $p_t$ is small, this can be considerably larger than the maximum value of $y_t$, allowing more efficient computation. @@ -462,15 +472,15 @@ Standard (binomial-Poisson) N-mixture models are available in two distributions An example is: -`y[i, 1:T] ~ dNmixture_v(lambda = lambda, p = p[1:T], minN = minN, maxN = maxN, len = T)` +`y[i, 1:T] ~ dNmixture_v(lambda = lambda, p = p[1:T], Nmin = Nmin, Nmax = Nmax, len = T)` - As in the examples above, this is written as if `i` indexes the individual site, but the variables could be arranged in other ways. - `lambda` is $\lambda$ above. - `p[1:T]` is $\mathbf{p}$ above. If $p$ were constant across visits, we would use `dNmixture_s` and a scalar value of `p`. - `len` is $T$. -- `minN` and `maxN` provide the lower and upper bounds for the sum over Ns (option 1 above). If both are set to `-1`, bounds are chosen dynamically using quantiles of the Poisson distribution (option 2 above). +- `Nmin` and `Nmax` provide the lower and upper bounds for the sum over Ns (option 1 above). If both are set to `-1`, bounds are chosen dynamically using quantiles of the Poisson distribution (option 2 above). -Three variations of the N-mixture model are also available, in which the Poisson distribution is substituted for a negative binomial, the binomial is substituted for a beta binomial, or both are substituted. These are called `dNmixture_BNB_*`, `dNmixture_BBP_*`, and `dNmixture_BBNB_*`. Each has three suffixes: `_v` and `_s` correspond to the cases provided above, and `_oneObs` distributions are provided for the case where the data are scalar (i.e., only one observation at the site). No `_oneObs` observation is provided for the default `dNmixture` because `dNmixture(x[1:1], lambda, prob[1:1])` is equivalent to `dpois(x[1:1], lambda * prob[1:1])`. +Three variations of the N-mixture model are also available, in which the Poisson distribution is replaced by negative binomial, the binomial is replaced by beta binomial, or both. These are called `dNmixture_BNB_*`, `dNmixture_BBP_*`, and `dNmixture_BBNB_*`, respectively. Each has three suffixes: `_v` and `_s` correspond to the cases provided above, and `_oneObs` distributions are provided for the case where the data are scalar (i.e., only one observation at the site). No `_oneObs` observation is provided for the default `dNmixture` because `dNmixture(x[1:1], lambda, prob[1:1])` is equivalent to `dpois(x[1:1], lambda * prob[1:1])`. These combinations lead to the following set of 11 N-mixture distributions: @@ -486,3 +496,6 @@ These combinations lead to the following set of 11 N-mixture distributions: - `dNmixture_BBNB_s` - `dNmixture_BBNB_oneObs` +If an N-mixture distribution needs to be used with AD (e.g. for HMC or Laplace approximation), replace `dNmixture` with `dNmixtureAD`. In that case, one must provide `Nmin` and `Nmax` values manually; the second (heuristic) option described above is not available. + +Further details on all the distributions in `nimbleEcology` can be found on the help pages within R, e.g. `help(dNmixture)`.