General functions

##======================================================================
## Tranformations
lambda2mu <- function(lambda, nu) {
    phi <- log(nu)
    mu <- lambda^(1/nu) - (nu - 1)/(2 * nu)
    list("mu" = mu, "phi" = phi)
}

mu2lambda <- function(mu, phi) {
    nu <- exp(phi)
    lambda <- (mu + (nu-1)/(2*nu))^nu
    list("lambda" = lambda, "nu" = nu)
}

##======================================================================
## Compute momentes mean and variance
calc_moments <- Vectorize(FUN = function(mu, phi) {
    pars <- mu2lambda(mu, phi)
    mu <- cmpreg::calc_mean_cmp(log(pars[["lambda"]]), phi)
    va <- cmpreg::calc_var_cmp(log(pars[["lambda"]]), phi)
    ## mu <- do.call(compoisson::com.mean, pars)
    ## va <- do.call(compoisson::com.var, pars)
    c("mean" = mu, "var" = va)
}, vectorize.args = c("mu", "phi"))

##======================================================================
## Negative log-likelihood functions

## COM-Poisson Model (Original parameterization)
llcmp <- function (params, y, X, offset = NULL, sumto = NULL) {
    if (!is.null(offset)) {
        stop("This model doesn't support offset yet.")
    }
    phi <- params[1]
    nu <- exp(phi)
    Xb <- X %*% params[-1]
    i <- 0:sumto
    zs <- sapply(Xb, function(loglambda) {
        sum(exp(i * loglambda - nu * lfactorial(i)))
    })
    Z <- sum(log(zs))
    ll <- sum(y * Xb - nu * lfactorial(y)) - Z
    return(-ll)
}

## COM-Poisson Model (mean-parameterization)
llcmp2 <- function (params, y, X, offset = NULL, sumto = NULL) {
    if (!is.null(offset)) {
        stop("This model doesn't support offset yet.")
    }
    phi2 <- params[1]
    nu <- exp(phi2)
    ##-------------------------------------------
    mu <- exp(X %*% params[-1])
    Xb <- nu * log(mu + (nu - 1)/(2 * nu))
    ##-------------------------------------------
    i <- 0:sumto
    zs <- sapply(Xb, function(loglambda) {
        sum(exp(i * loglambda - nu * lfactorial(i)))
    })
    Z <- sum(log(zs))
    ll <- sum(y * Xb - nu * lfactorial(y)) - Z
    return(-ll)
}

##======================================================================
## Framework for fit count models (using bbmle::mle2 function)
fitcm <- function(formula, data, start = NULL,
                  model = c("CP", "CP2"), ...) {
    dots <- list(...)
    model <- match.arg(model)
    ##-------------------------------------------
    frame <- model.frame(formula, data)
    terms <- attr(frame, "terms")
    y <- model.response(frame)
    X <- model.matrix(terms, frame)
    off <- model.offset(frame)
    if (is.null(start)) {
        m0 <- glm.fit(x = X, y = y, offset = off, family = poisson())
        start <- c(0, m0$coefficients)

    }
    ##-------------------------------------------
    if (model == "CP") {
        names(start)[1] <- "phi"
        bbmle::parnames(llcmp) <- names(start)
        sumto <- dots$sumto
        dots$sumto <- NULL
        args <- append(
            list(minuslog = llcmp, start = start,
                 data = list(y = y, X = X, offset = off,
                             sumto = sumto, model = model),
                 vecpar = TRUE), dots)
        fit <- suppressWarnings(
            do.call(what = bbmle::mle2, args = args))
    }
    if (model == "CP2") {
        names(start)[1] <- "phi2"
        bbmle::parnames(llcmp2) <- names(start)
        sumto <- dots$sumto
        dots$sumto <- NULL
        args <- append(
            list(minuslog = llcmp2, start = start,
                 data = list(y = y, X = X, offset = off,
                             sumto = sumto, model = model),
                 vecpar = TRUE), dots)
        fit <- suppressWarnings(
            do.call(what = bbmle::mle2, args = args))
    }
    slot(fit, "call.orig") <- match.call()
    return(fit)
}

##======================================================================
## Analysis of Deviance Table to glm and mle2 objetcs
getAnova <- function(..., print = TRUE) {
    model.list <- list(...)
    test <- "Chisq"
    cls <- sapply(model.list, function(x) class(x)[1])
    if (all(cls == "glm")) {
        fam <- sapply(model.list, function(x) x$family$family)
        if (all(fam == "quasipoisson")) {
            test <- "F"
        }
    }
    nps <- sapply(model.list, function(x) attr(logLik(x), "df"))
    aic <- sapply(model.list, function(x) AIC(x))
    if (test == "Chisq") {
        lls <- sapply(model.list, function(x) logLik(x))
        cst <- 2 * diff(lls)
        pvs <- pchisq(cst, df = diff(nps), lower.tail = FALSE)
        tab <- cbind(
            "np" = nps,
            "ll" = lls,
            "AIC" = aic,
            "2*dif" = c(NA, cst),
            "dif np" = c(NA, diff(nps)),
            "Pr(>Chisq)" = c(NA, pvs))
    }
    if (test == "F") {
        ## ver pág. 187 expresssão 7.10 em Modern Applied, Ripley
        sigma <- sapply(model.list, function(x) summary(x)$dispersion)
        df.sigma <- sapply(model.list, function(x) x$df.residual)
        dev <- sapply(model.list, function(x) deviance(x))
        dfs <- c(NA, -diff(dev))
        dnp <- c(NA, diff(nps))
        F <- c(NA, -diff(dev))/(dnp * sigma)
        pvs <- pf(F, df1 = dnp, df2 = df.sigma, lower.tail = FALSE)
        tab <- cbind(
            "np" = nps,
            "dev" = dev,
            "AIC" = aic,
            "F" = F,
            "dif np" = dnp,
            "Pr(>F)" = pvs)
    }
    rownames(tab) <- 1:length(model.list)
    if (print) printCoefmat(tab, na.print = "", cs.ind = 1)
    invisible(tab)
}

## Get the estimated coefficients of the parameters
getCoefs <- function(model, digits = 3, rownames = NULL) {
    ##-------------------------------------------
    glue <- function(est, d = digits) {
        est
        ## apply(est, 1, FUN = function(x)
        ##     paste0(format(x[1], digits = d, nsmall = d), " (",
        ##            format(x[2], digits = d, nsmall = d), ")"))
    }
    ##-------------------------------------------
    cls <- class(model)[1]
    est <- switch(
        cls,
        "glm" = {
            fam <- model$family$family
            if (!fam %in% c("poisson", "quasipoisson")) {
                stop("Família de distribuições não contemplada")
            }
            switch(
                fam,
                "poisson" = {
                    glue(rbind(c(NA, NA),
                               summary(model)$coef[, c(1, 3)]))
                },
                "quasipoisson" = {
                    glue(rbind(c(summary(model)$dispersion, NA),
                          summary(model)$coef[, c(1, 3)]))
                }
            )
        },
        "mle2" = {
            glue(bbmle::summary(model)@coef[, c(1, 3)])
        }
    )
    if (!is.null(rownames)) {
        rownames(est) <- rownames
    }
    colnames(est) <- c("Est", "Est/EP")
    return(est)
}

##======================================================================
## Predict Values

## Get the interval values of linear predictors (for Delta method with
## cholesky decomposition)
cholV_eta <- function(V, X, b, qn) {
    eta <- c(X %*% b)
    if (length(qn) > 1) {
        U <- chol(V)
        stderr <- sqrt(
            apply(X %*% t(U), MARGIN = 1, FUN = function(x) sum(x^2))
        )
        eta <- sweep(x = outer(stderr, qn, FUN = "*"),
                     MARGIN = 1,
                     STATS = eta,
                     FUN = "+")
    }
    return(eta)
}

## Get the means (inverse link function)
calc_mean <- function(eta, dispersion, model = c("CP", "CP2"),
                      tol = 1e-5, offset = NULL, ...) {
    if (is.null(offset)) {
        offset <- 1L
    }
    model <- match.arg(model)
    names(eta) <- NULL
    names(dispersion) <- NULL
    pars <- data.frame(eta = eta, dispersion = dispersion)
    ##-------------------------------------------
    if (model == "CP") {
        ##-------------------------------------------
        pars <- transform(pars, lambda = exp(eta),
                          nu = exp(dispersion))
        sumto <- list(...)$sumto
        ##-------------------------------------------
        approxmu <- with(pars, lambda^(1/nu) - (nu - 1)/(2 * nu))
        sigma <- with(pars, (1/nu) * approxmu)
        sigma <- ifelse(sigma < 0, 0, sigma)
        ymax <- ceiling(max(approxmu + 5 * sqrt(sigma)))
        etamax <- max(pars$eta)
        dispersionmin <- min(pars$dispersion)
        pmax <- exp(
            -llcmp(params = c(dispersionmin, etamax), y = ymax, X = 1,
                   sumto = sumto))
        while (pmax > tol) {
            ymax <- ymax + 1
            pmax <- exp(
                -llcmp(params = c(dispersionmin, etamax),
                        y = ymax, X = 1, sumto = sumto))
        }
        y <- 1:ymax
        estmean <- mapply(
            eta = pars$eta,
            nu = pars$nu,
            MoreArgs = list(y = y),
            FUN = function(eta, nu, y) {
                i <- 0:sumto
                zs <- sapply(eta, function(eta) {
                    sum(exp(i * eta - nu * lfactorial(i)))
                })
                py <- exp(y * eta - nu * lfactorial(y) - sum(log(zs)))
                sum(y * py)
            },
            SIMPLIFY = TRUE)
    }
    if (model == "CP2") {
        estmean <- exp(eta)
    }
    names(estmean) <- NULL
    return(c(estmean))
}

## Calculate the confidence interval of predict values
predictcm <- function(object, newdata, offset = NULL, level = 0.95) {
    ##-------------------------------------------
    if (missing(newdata)) {
        X <- object@data$X
    } else {
        X <- newdata
    }
    ##-------------------------------------------
    qn <- -qnorm((1 - level[1])/2)
    qn <- qn * c(fit = 0, lwr = -1, upr = 1)
    #--------------------------------------------
    V <- vcov(object)
    Vc <- V[-1, -1] - V[-1, 1] %*% solve(V[1, 1]) %*% V[1, -1]
    ## Vc <- V[-1, -1]
    eta <- cholV_eta(Vc, X, b = coef(object)[-1], qn = qn)
    ##-------------------------------------------
    args <- list(dispersion = coef(object)[1],
                 model = object@data$model,
                 offset = offset)
    if (object@data$model == "CP") {
        args$sumto <- object@data$sumto
    }
    pred <- apply(as.matrix(eta), MARGIN = 2,
                  FUN = function(x) {
                      do.call(what = calc_mean,
                              args = append(list(eta = x), args))
                  })
    colnames(pred) <- names(qn)
    return(pred)
}

##======================================================================
## Trellis panel functions for lattice graphics (use wzRfun)
library(wzRfun)

body(panel.cbH)[[14]][[3]][[3]] <-
    quote(panel.lines(xo, lyo, col = col.line, lty = list(...)$lty))
body(panel.cbH)[[14]][[3]][[4]] <-
    quote(panel.lines(xo, uyo, col = col.line, lty = list(...)$lty))

## To profile log-likelihood
myprofile <- function(model, which) {
    sel <- c("param", "z", "focal")
    prof <- profile(model, which = which)
    as.data.frame(prof)[, sel]
}

## To plot profile in lattice
xyprofile <- function(prof, conf = c(0.9, 0.95, 0.99),
                      namestrip = NULL, subset = 4,
                      scales.x = "free", ...) {
    ##-------------------------------------------
    conf <- conf[order(conf, decreasing = TRUE)]
    da <- subset(prof, abs(z) <= subset)
    ##-------------------------------------------
    fl <- levels(da$param)
    if (!is.null(namestrip)) {
        fl <- namestrip
    }
    xyplot(abs(z) ~ focal | param,
           data = da,
           layout = c(NA, 1),
           ## ylab = expression(abs(z)~~(sqrt(~Delta~"deviance"))),
           ylab = expression(sqrt(2~(L(hat(phi))-L(phi)))),
           scales = list(x = scales.x),
           type = c("l", "g"),
           strip = strip.custom(
               factor.levels = fl),
           panel = function(x, y, subscripts, ...) {
               conf <- c(0.9, 0.95, 0.99)
               hl <- sqrt(qchisq(conf, 1))
               ##-------------------------------------------
               mle <- x[y == 0]
               xl <- x[x < mle]; yl <- y[x < mle]
               xr <- x[x > mle]; yr <- y[x > mle]
               ##-------------------------------------------
               funleft <- approxfun(x = yl, y = xl)
               funright <- approxfun(x = yr, y = xr)
               vxl <- funleft(hl)
               vxr <- funright(hl)
               vz <- c(hl, hl)
               ##-------------------------------------------
               panel.xyplot(x, y, ...)
               panel.arrows(c(vxl, vxr), 0, c(vxl, vxr), vz,
                            code = 1, length = 0.1, lty = 2,
                            col = "gray40")
               panel.segments(vxl, vz, vxr, vz, lty = 2,
                              col = "gray40")
               panel.abline(h = 0, v = mle, lty = 3)
               panel.text(x = rep(mle, 2), y = vz + 0.1,
                          labels = paste(conf*100, "%"),
                          col = "gray20")
           }, ...)
}

Graphics configuration

## Report settings
library(knitr)
opts_chunk$set(
    warning = FALSE,
    message = FALSE,
    fig.width = 9,
    fig.height = 4,
    dev.args = list(family = "Palatino"))
## Load the lattice packages and customize outputs graphics
library(lattice)
library(latticeExtra)
library(grid)

## http://www.magesblog.com/2013/04/how-to-change-alpha-value-of-colours-in.html
add.alpha <- function(col, alpha = 1){
    apply(sapply(col, col2rgb)/255, 2,
          function(x) rgb(x[1], x[2], x[3], alpha = alpha))
}

## Define Colors
mycol <- c(1, "#377EB8", "#E41A1C", "#4DAF4A",
           "#ff00ff", "#FF7F00", "#984EA3", "#FFFF33", "#808080")
myreg <- colorRampPalette(c(mycol[3],  "gray90", mycol[2]))(100)

## Trellis graphical style.
ps <- list(
    superpose.symbol = list(
        col = mycol, pch = 1,
        fill = add.alpha(mycol, alpha = 0.4)),
    box.rectangle = list(col = 1, fill = c("gray70")),
    box.umbrella = list(col = 1, lty = 1),
    box.dot = list(pch = "|"),
    dot.symbol = list(col = 1, pch = 19),
    dot.line = list(col = "gray50", lty = 3),
    plot.symbol = list(col = 1),
    plot.line = list(col = 1),
    plot.polygon = list(col = "gray95"),
    superpose.line = list(col = mycol, lty = 1),
    superpose.polygon = list(col = mycol),
    strip.background = list(col = c("gray90", "gray70")),
    regions = list(col = myreg),
    par.sub.text = list(
        font = 1, just = "left", cex = 0.9,
        x = grid::unit(10, "mm"))
    )

trellis.par.set(ps)

## Alternative settings
ac <- list(pad1 = 0.5, pad2 = 0.5, tck = 0.5)
ps2 <- list(
    layout.widths = list(
        left.padding = 0.25,
        right.padding = -1,
        ylab.axis.padding = 0),
    layout.heights = list(
        bottom.padding = 0.25,
        top.padding = 0,
        axis.xlab.padding = 0,
        xlab.top = 0),
    axis.components = list(
        bottom = ac, top = ac,
        left = ac, right = ac)
)

Moments Approximation

## =====================================================================
## Analysis of Approximation moments
##                                                        Eduardo Junior
##                                                    edujrrib@gmail.com
##                                                            2017-03-16
## =====================================================================
## Load package and functions

source("config.R")
source("functions.R")

## Colors for legends
cols <- trellis.par.get("superpose.line")$col
## Convergence of Z(lambda, nu) constant
computeZ <- function(lambda, nu, maxit = 1e4, tol = 1e-5) {
    z <- vector("numeric", maxit)
    j = 1
    z[j] <- exp(j * log(lambda) - nu * lfactorial(j))
    while (abs(z[j] - 0) > tol && j <= maxit) {
        j = j + 1
        z[j] <- exp(j * log(lambda) - nu * lfactorial(j))
    }
    if (lambda == 1 & nu == 0) z <- Inf
    return(sum(z, na.rm = TRUE) + 1)
}

grid <- expand.grid(
    lambda = c(0.5, 1, 5, 10, 30, 50),
    nu = seq(0, 1, length.out = 11))
grid$z <- apply(grid, 1, function(par) {
    computeZ(lambda = par[1], nu = par[2], maxit = 1e4)
})

xt <- xtabs(z ~ nu + lambda, data = grid)
xt
##      lambda
## nu              0.5             1             5            10
##   0    1.999992e+00           Inf           Inf           Inf
##   0.1  1.916269e+00  7.642745e+00           Inf           Inf
##   0.2  1.856385e+00  5.253523e+00 3.167527e+273           Inf
##   0.3  1.810601e+00  4.319695e+00  1.602324e+29 2.541984e+282
##   0.4  1.774097e+00  3.803204e+00  4.710590e+10  1.326456e+56
##   0.5  1.744133e+00  3.469505e+00  1.339958e+06  3.668680e+22
##   0.6  1.719005e+00  3.233784e+00  2.050181e+04  4.993080e+12
##   0.7  1.697585e+00  3.057328e+00  2.372940e+03  3.685683e+08
##   0.8  1.679084e+00  2.919745e+00  6.488009e+02  2.698812e+06
##   0.9  1.662937e+00  2.809192e+00  2.740935e+02  1.466388e+05
##   1    1.648721e+00  2.718282e+00  1.484132e+02  2.202647e+04
##      lambda
## nu               30            50
##   0             Inf           Inf
##   0.1           Inf           Inf
##   0.2           Inf           Inf
##   0.3           Inf           Inf
##   0.4           Inf           Inf
##   0.5 3.319764e+196           Inf
##   0.6  1.730117e+76 4.627475e+177
##   0.7  4.929488e+39  6.933351e+81
##   0.8  5.086211e+24  3.425421e+46
##   0.9  1.801582e+17  2.191499e+30
##   1    1.068647e+13  5.184706e+21
## Study the approximation
## Mean and variance relationship
aux <- expand.grid(
    mu = seq(2, 30, length.out = 50),
    phi = seq(log(0.3), log(2.5), length.out = 50))

moments <- mapply(FUN = calc_moments,
                  mu = aux$mu,
                  phi = aux$phi,
                  SIMPLIFY = FALSE)
grid <- cbind(aux, t(do.call(cbind, moments)))
grid <- transform(grid, va = mu / exp(phi))

col <- brewer.pal(n = 8, name = "RdBu")
myreg <- colorRampPalette(colors = col)
xy1 <- xyplot(var ~ mean,
              groups = phi,
              data = grid,
              type = "l",
              lwd = 2,
              axis = axis.grid,
              xlab = expression(E(X)),
              ylab = expression(V(X)),
              sub = "(a)",
              legend = list(
                  top = list(
                      fun = draw.colorkey,
                      args = list(
                          key = list(
                              space = "top",
                              col = myreg(length(unique(grid$phi))),
                              at = unique(grid$phi),
                              draw = FALSE)))),
              par.settings = modifyList(
                  ps2, list(superpose.line = list(
                                col = myreg(length(unique(grid$phi))))
                            )
              ),
              panel = function(x, y, ...) {
                  panel.xyplot(x, y, ...)
                  panel.curve(1*x, min(x), max(x), lty = 2)
              },
              page = function(...) {
                  grid.text(expression(log(nu)),
                            just = "bottom",
                            x = unit(0.10, "npc"),
                            y = unit(0.83, "npc"))
              })
## Errors in approximations for E[Y] and V[Y]
grid <- transform(grid,
                  emu = (mu - mean)^2,
                  eva = (va - var)^2)

myreg <- colorRampPalette(c("gray90",  "gray20"))(100)
xy2 <- levelplot(emu ~ phi + mu, data = grid,
                 aspect = "fill",
                 col.regions = myreg,
                 xlab = expression(phi),
                 ylab = expression(mu),
                 sub = "(b)",
                 colorkey = list(space = "top"),
                 par.settings = ps2,
                 panel = function(x, y, z, ...) {
                     panel.levelplot(x, y, z, ...)
                     panel.curve(10 - ( exp(x) - 1)/(2 * exp(x)),
                                 lty = 2)
                     panel.abline(v = 0, lty = 2)
                 })

xy3 <- levelplot(eva ~ phi + mu, data = grid,
                 aspect = "fill",
                 col.regions = myreg,
                 xlab = expression(phi),
                 ylab = expression(mu),
                 sub = "(c)",
                 colorkey = list(space = "top"),
                 par.settings = ps2,
                 panel = function(x, y, z, ...) {
                     panel.levelplot(x, y, z, ...)
                     panel.curve((10 - ( exp(x) - 1)/
                                  (2 * exp(x)))/exp(x), lty = 2)
                     panel.abline(v = 0, lty = 2)
                 })
print(xy1, split = c(1, 1, 3, 1), more = TRUE)
print(xy2, split = c(2, 1, 3, 1), more = TRUE)
print(xy3, split = c(3, 1, 3, 1), more = FALSE)

## COM-Poisson probability mass function (mean parametrization)
dcmp <- Vectorize(FUN = function(y, mu, phi, sumto = 100)
    exp(-llcmp2(c(phi, log(mu)), y = y, X = 1, sumto = sumto)),
    vectorize.args = c("y", "mu", "phi"))

grid <- expand.grid(mu = c(2, 8, 15), phi = log(c(0.5, 1, 2.5)))
y <- 0:30
py <- mapply(FUN = dcmp,
             mu = grid$mu,
             phi = grid$phi,
             MoreArgs = list(y = y, sumto = 100),
             SIMPLIFY = FALSE)
grid <- cbind(grid[rep(1:nrow(grid), each = length(y)), ],
              y = y,
              py = unlist(py))
## COM-Poisson p.m.f. to different combination betwenn phi and mu
leg_phi <- parse(
    text = paste("phi == \"",
                 formatC(unique(grid$phi), 1, format = "f"),
                 "\""))
barchart(py ~ y | factor(mu),
         groups = factor(phi),
         data = grid,
         horizontal = FALSE,
         layout = c(NA, 1),
         as.table = TRUE,
         axis = axis.grid,
         origin = 0,
         xlim = extendrange(y, f = 0.01),
         border = "transparent",
         scales = list(x = list(at = pretty(y))),
         ylab = expression(P(Y==y)),
         xlab = expression(y),
         par.settings = list(
             superpose.polygon = list(col = c(mycol[2:4])),
             superpose.line = list(col = c(mycol[2:4]))
         ),
         auto.key = list(
             columns = 3,
             rectangles = FALSE,
             lines = TRUE,
             text = leg_phi
         ),
         strip = strip.custom(
             strip.names = TRUE,
             var.name = expression(mu == ""),
             sep = ""))

Defoliation experiment

## =====================================================================
## Analysis of Artificial Defoliation in Cotton Plants
##                                                        Eduardo Junior
##                                                    edujrrib@gmail.com
##                                                            2017-03-16
## =====================================================================
## Load package and functions

library(bbmle)
library(multcomp)
library(plyr)

source("config.R")
source("functions.R")

## Colors for legends
cols <- trellis.par.get("superpose.line")$col
## Load data
## data(cottonBolls, package = "cmpreg")
cottonBolls <- read.table("./data/cottonBolls.txt",
                          header = TRUE, sep = "\t")
str(cottonBolls)
## 'data.frame':    125 obs. of  4 variables:
##  $ est : Factor w/ 5 levels "botão floral",..: 5 5 5 5 5 5 5 5 5 5 ...
##  $ des : num  0 0 0 0 0 0.25 0.25 0.25 0.25 0.25 ...
##  $ rept: int  1 2 3 4 5 1 2 3 4 5 ...
##  $ ncap: int  10 9 8 8 10 11 9 10 10 10 ...
## Exploratory analysis

## Scatter plot with a beewarm taste
xy1 <- xyplot(ncap ~ des | est,
              data = cottonBolls,
              layout = c(2, 3),
              as.table = TRUE,
              type = c("p", "smooth", "g"),
              xlab = "Níveis de desfolha artificial",
              ylab = "Número de capulhos produzidos",
              spread = 0.05,
              panel = panel.beeswarm)

## Sample variance vs sample mean (evidence in favor of the
## underdispersion).
mv <- aggregate(ncap ~ est + des, data = cottonBolls,
                FUN = function(x) c(mean = mean(x), var = var(x)))
xlim <- ylim <- extendrange(c(mv$ncap), f = 0.05)

xy2 <- xyplot(ncap[, "var"] ~ ncap[, "mean"],
              data = mv,
              type = c("p", "r", "g"),
              xlim = xlim,
              ylim = ylim,
              xlab = expression("Média"~"amostral"~(bar(y))),
              ylab = expression("Variância"~"amostral"~(s^2)),
              panel = function(...) {
                  panel.xyplot(...)
                  panel.abline(a = 0, b = 1, lty = 2)
              })
print(xy1, split = c(1, 1, 2, 1), more = TRUE)
print(xy2, split = c(2, 1, 2, 1), more = FALSE)

## Fit models

mnames <- c("PO", "C1", "C2", "QP")

## Predictor, following Zeviani et al. (2014)
form1 <- ncap ~ est:(des + I(des^2))

m1PO <- glm(form1, data = cottonBolls, family = poisson)
system.time(
    m1C1 <- fitcm(form1, data = cottonBolls, model = "CP", sumto = 50)
)
##    user  system elapsed 
##   4.656   0.000   4.655
system.time(
    m1C2 <- fitcm(form1, data = cottonBolls, model = "CP2", sumto = 50)
)
##    user  system elapsed 
##   3.516   0.000   3.518
m1QP <- glm(form1, data = cottonBolls, family = quasipoisson)

models.ncap <- list(m1PO, m1C1, m1C2, m1QP)
names(models.ncap) <- mnames

## Numbers of calls to loglik and numerical gradient
models.ncap$C1@details$counts
## function gradient 
##       86       39
models.ncap$C2@details$counts
## function gradient 
##       63       19
## LRT and profile extra parameter

## LRT between Poisson and COM-Poisson (test: phi == 0)
getAnova(m1PO, m1C2)
##   np     ll    AIC  2*dif dif np Pr(>Chisq)    
## 1 11 -255.8 533.61                             
## 2 12 -208.4 440.80 94.811      1  < 2.2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
profs.ncap <- lapply(list(c(m1C1, "phi"), c(m1C2, "phi2")),
                     function(x) myprofile(x[[1]], x[[2]]))
profs.ncap <- do.call("rbind", profs.ncap)

## Plot profile extra parameter
snames <- parse(text = c("'COM-Poisson'~(phi)",
                         "'COM-Poisson'[mu]~(phi)"))
xyprofile(profs.ncap, namestrip = snames,
          ylim = extendrange(c(0, 3)),
          xlab = "Dispersion/Precision parameter")