#-----------------------------------------------------------------------
# Pacotes.
library(readxl)
library(broom)
library(gtsummary)
library(tidyverse)
## ── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ──
## ✔ dplyr 1.1.4 ✔ readr 2.1.5
## ✔ forcats 1.0.0 ✔ stringr 1.5.1
## ✔ ggplot2 3.5.1 ✔ tibble 3.2.1
## ✔ lubridate 1.9.3 ✔ tidyr 1.3.1
## ✔ purrr 1.0.2
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## ✖ purrr::%||%() masks base::%||%()
## ✖ dplyr::filter() masks stats::filter()
## ✖ dplyr::lag() masks stats::lag()
## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors
# https://github.com/aphalo/ggpp
library(ggpp)
## Registered S3 methods overwritten by 'ggpp':
## method from
## heightDetails.titleGrob ggplot2
## widthDetails.titleGrob ggplot2
##
## Attaching package: 'ggpp'
##
## The following object is masked from 'package:ggplot2':
##
## annotate
library(nls2)
## Loading required package: proto
library(gslnls)
#-----------------------------------------------------------------------
# Importação e preparo.
tb <- read_excel("exvivo.xlsx", sheet = 1)
str(tb)
## tibble [4,480 × 7] (S3: tbl_df/tbl/data.frame)
## $ exp : num [1:4480] 1 1 1 1 1 1 1 1 1 1 ...
## $ iso : chr [1:4480] "CrAaPR-01" "CrAaPR-01" "CrAaPR-01" "CrAaPR-01" ...
## $ rept: num [1:4480] 1 2 3 4 5 1 2 3 4 5 ...
## $ tem : num [1:4480] 15 15 15 15 15 15 15 15 15 15 ...
## $ mol : num [1:4480] 6 6 6 6 6 6 6 6 6 6 ...
## $ sev : num [1:4480] 0 0 0 0 0 0 0 0 0 0 ...
## $ time: num [1:4480] 12 12 12 12 12 24 24 24 24 24 ...
# DataExplorer::plot_scatterplot(tb, by = "sev", ncol = 2, nrow = 5)
# Resumo numérico das variáveis.
# gtsummary::tbl_summary(tb)
gtsummary::tbl_summary(tb) |>
gtsummary::as_kable()
exp |
|
1 |
2,240 (50%) |
2 |
2,240 (50%) |
iso |
|
CrAaPR-01 |
1,120 (25%) |
CrAaPR-27 |
1,120 (25%) |
CrAaPR-40 |
1,120 (25%) |
CrAlPR-73 |
1,120 (25%) |
rept |
|
1 |
896 (20%) |
2 |
896 (20%) |
3 |
896 (20%) |
4 |
896 (20%) |
5 |
896 (20%) |
tem |
|
15 |
1,120 (25%) |
21 |
1,120 (25%) |
27 |
1,120 (25%) |
33 |
1,120 (25%) |
mol |
|
6 |
1,120 (25%) |
12 |
1,120 (25%) |
24 |
1,120 (25%) |
48 |
1,120 (25%) |
sev |
0.00 (0.00, 0.05) |
time |
|
12 |
640 (14%) |
24 |
640 (14%) |
48 |
640 (14%) |
62 |
640 (14%) |
84 |
640 (14%) |
100 |
640 (14%) |
124 |
640 (14%) |
# Número de repetições por condição experimental/avaliação
tb |>
count(exp, iso, tem, mol, time)
## # A tibble: 896 × 6
## exp iso tem mol time n
## <dbl> <chr> <dbl> <dbl> <dbl> <int>
## 1 1 CrAaPR-01 15 6 12 5
## 2 1 CrAaPR-01 15 6 24 5
## 3 1 CrAaPR-01 15 6 48 5
## 4 1 CrAaPR-01 15 6 62 5
## 5 1 CrAaPR-01 15 6 84 5
## 6 1 CrAaPR-01 15 6 100 5
## 7 1 CrAaPR-01 15 6 124 5
## 8 1 CrAaPR-01 15 12 12 5
## 9 1 CrAaPR-01 15 12 24 5
## 10 1 CrAaPR-01 15 12 48 5
## # ℹ 886 more rows
# Obervações no tempo.
tb |>
count(time)
## # A tibble: 7 × 2
## time n
## <dbl> <int>
## 1 12 640
## 2 24 640
## 3 48 640
## 4 62 640
## 5 84 640
## 6 100 640
## 7 124 640
# Converte variáveis qualitativas para fator.
tb <-
tb |>
mutate_at(c("exp", "iso"), factor)
ggplot(tb, aes(x = time, y = sev, color = iso,
shape = exp, linetype = exp)) +
facet_grid(tem ~ mol) +
stat_summary(geom = "line", fun = "mean") +
geom_point()

# Confere a média em cada combinação de fatores.
ggplot(tb, aes(x = time, y = sev)) +
facet_grid(exp + tem ~ iso + mol) +
stat_summary(geom = "line", fun = "mean") +
geom_point()

# Destaca cada unidade experimental.
ggplot(tb, aes(x = time, y = sev)) +
facet_grid(exp + tem ~ iso + mol) +
geom_line(aes(group = rept)) +
geom_point()

# ggsave("sev_time.png", width = 16, height = 9)
# NOTE: Muitas curvas com severidade igual a zero em todo o perÃodo.
# Isso impede que de ajustar qualquer modelo de regressão pois não há
# variação nos dados.
# QUESTION: Para que estimar uma curva para a severidade em função do
# tempo?
# Identifica a unidade experimental onde a avaliação da severidade é
# realizada ao longo do tempo.
tb <- tb |>
mutate(ue = interaction(exp, iso, tem, mol, rept, drop = TRUE))
#-----------------------------------------------------------------------
# Selecionar uma condição experimental.
tb |>
filter(tem == 27, mol == 24) |>
ggplot(aes(x = time, y = sev)) +
facet_grid(exp ~ iso) +
geom_line(aes(group = rept), alpha = 0.15) +
geom_point() +
stat_summary(geom = "line", fun = "mean", color = "red") +
labs(title = "Temperatura 27°C e Molhamento 24h")

# IMPORTANT: Existe um problema grave para a inferência a partir destes
# dados. A variância é zero em muitas avaliação. Isso
# inquestionavelmente viola a suposição de erros homocedásticos e irá
# afetar medidas de incerteza na estimação como intervalos de confiança.
#-----------------------------------------------------------------------
# Prototipação do ajuste de modelo aos dados.
levels(tb$iso)
## [1] "CrAaPR-01" "CrAaPR-27" "CrAaPR-40" "CrAlPR-73"
tbi <- tb |>
filter(tem == 27, mol == 24, exp == "2", iso == levels(iso)[4])
plot(tbi$time, tbi$sev)
with(list(r = 0.01, x0 = 50), {
curve((1 - exp(-r * (x - x0))) * (x > x0),
from = 0, to = max(tbi$time),
ylim = c(0, 1),
col = "red",
add = TRUE,
lwd = 2)
abline(h = 0, v = x0, col = "gray")
})

# Para mapear (-Inf, Inf) em (0, 1) para a assintota.
inv_logit <- function(x) 1 / (1 + exp(-x))
n0 <- nls(sev ~ inv_logit(A) * (1 - exp(-r * (time - x0))) * (time > x0),
data = tbi,
start = list(A = 0.8, r = 0.01, x0 = 50))
summary(n0)
##
## Formula: sev ~ inv_logit(A) * (1 - exp(-r * (time - x0))) * (time > x0)
##
## Parameters:
## Estimate Std. Error t value Pr(>|t|)
## A 1.65022 3.50005 0.471 0.640
## r 0.01841 0.01962 0.938 0.355
## x0 46.58073 6.54753 7.114 4.51e-08 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.2173 on 32 degrees of freedom
##
## Number of iterations to convergence: 7
## Achieved convergence tolerance: 9.86e-07
plot(tbi$time, tbi$sev)
with(as.list(coef(n0)), {
curve(inv_logit(A) * (1 - exp(-r * (x - x0))) * (x > x0),
from = 0, to = max(tbi$time),
ylim = c(0, 1),
col = "red",
add = TRUE,
lwd = 2)
abline(h = 0, v = x0, col = "gray")
})

broom::tidy(n0)
## # A tibble: 3 × 5
## term estimate std.error statistic p.value
## <chr> <dbl> <dbl> <dbl> <dbl>
## 1 A 1.65 3.50 0.471 0.640
## 2 r 0.0184 0.0196 0.938 0.355
## 3 x0 46.6 6.55 7.11 0.0000000451
broom::glance(n0)
## # A tibble: 1 × 9
## sigma isConv finTol logLik AIC BIC deviance df.residual nobs
## <dbl> <lgl> <dbl> <dbl> <dbl> <dbl> <dbl> <int> <int>
## 1 0.217 TRUE 0.000000986 5.33 -2.66 3.56 1.51 32 35
# confint(n0, level = 0.95)
confint.default(n0, level = 0.95)
## 2.5 % 97.5 %
## A -5.20975260 8.51020076
## r -0.02005055 0.05687076
## x0 33.74780966 59.41365958
# Calculando R² como quadrado da correlação entre observado e predito.
cor(fitted(n0), fitted(n0) + residuals(n0))^2
## [1] 0.5838758
#-----------------------------------------------------------------------
# Ajustar usando o pacote `gslnls` que tem o método delta.
# GSL (Levenberg-Marquardt algorithm)
# library(gslnls) ## v1.1.1
n0 <-
gslnls::gsl_nls(
fn = sev ~ inv_logit(A) * (1 - exp(-r * (time - x0))) * (time > x0),
data = tbi,
start = list(A = 0.8, r = 0.01, x0 = 50),
algorithm = "lm",
control = list(scale = "levenberg")
)
summary(n0)
##
## Formula: sev ~ inv_logit(A) * (1 - exp(-r * (time - x0))) * (time > x0)
##
## Parameters:
## Estimate Std. Error t value Pr(>|t|)
## A 1.65021 3.49997 0.471 0.640
## r 0.01841 0.01962 0.938 0.355
## x0 46.58075 6.54751 7.114 4.51e-08 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.2173 on 32 degrees of freedom
##
## Number of iterations to convergence: 15
## Achieved convergence tolerance: 0
broom::tidy(n0)
## # A tibble: 3 × 5
## term estimate std.error statistic p.value
## <chr> <dbl> <dbl> <dbl> <dbl>
## 1 A 1.65 3.50 0.471 0.640
## 2 r 0.0184 0.0196 0.938 0.355
## 3 x0 46.6 6.55 7.11 0.0000000451
broom::glance(n0)
## # A tibble: 1 × 9
## sigma isConv finTol logLik AIC BIC deviance df.residual nobs
## <dbl> <lgl> <dbl> <dbl> <dbl> <dbl> <dbl> <int> <int>
## 1 0.217 TRUE 0 5.33 -2.66 3.56 1.51 32 35
confint(n0, level = 0.95)
## 2.5 % 97.5 %
## A -5.47900752 8.77941998
## r -0.02156086 0.05838128
## x0 33.24391914 59.91758938
# confint(n0, method = "profile", level = 0.95)
confint(n0, method = "asymptotic", level = 0.95)
## 2.5 % 97.5 %
## A -5.47900752 8.77941998
## r -0.02156086 0.05838128
## x0 33.24391914 59.91758938
confint.default(n0, level = 0.95)
## 2.5 % 97.5 %
## A -5.20961676 8.5100292
## r -0.02005048 0.0568709
## x0 33.74787655 59.4136320
# Intervalos de confiança e predição.
predict(n0, interval = "confidence", level = 0.95)
## fit lwr upr
## [1,] 0.0000000 0.0000000 0.0000000
## [2,] 0.0000000 0.0000000 0.0000000
## [3,] 0.0000000 0.0000000 0.0000000
## [4,] 0.0000000 0.0000000 0.0000000
## [5,] 0.0000000 0.0000000 0.0000000
## [6,] 0.0000000 0.0000000 0.0000000
## [7,] 0.0000000 0.0000000 0.0000000
## [8,] 0.0000000 0.0000000 0.0000000
## [9,] 0.0000000 0.0000000 0.0000000
## [10,] 0.0000000 0.0000000 0.0000000
## [11,] 0.0216359 -0.1647946 0.2080664
## [12,] 0.0216359 -0.1647946 0.2080664
## [13,] 0.0216359 -0.1647946 0.2080664
## [14,] 0.0216359 -0.1647946 0.2080664
## [15,] 0.0216359 -0.1647946 0.2080664
## [16,] 0.2073276 0.0792786 0.3353766
## [17,] 0.2073276 0.0792786 0.3353766
## [18,] 0.2073276 0.0792786 0.3353766
## [19,] 0.2073276 0.0792786 0.3353766
## [20,] 0.2073276 0.0792786 0.3353766
## [21,] 0.4176725 0.2798348 0.5555103
## [22,] 0.4176725 0.2798348 0.5555103
## [23,] 0.4176725 0.2798348 0.5555103
## [24,] 0.4176725 0.2798348 0.5555103
## [25,] 0.4176725 0.2798348 0.5555103
## [26,] 0.5251507 0.4088590 0.6414424
## [27,] 0.5251507 0.4088590 0.6414424
## [28,] 0.5251507 0.4088590 0.6414424
## [29,] 0.5251507 0.4088590 0.6414424
## [30,] 0.5251507 0.4088590 0.6414424
## [31,] 0.6372133 0.4531054 0.8213211
## [32,] 0.6372133 0.4531054 0.8213211
## [33,] 0.6372133 0.4531054 0.8213211
## [34,] 0.6372133 0.4531054 0.8213211
## [35,] 0.6372133 0.4531054 0.8213211
predict(n0, interval = "prediction", level = 0.95)
## fit lwr upr
## [1,] 0.0000000 -0.44266542 0.4426654
## [2,] 0.0000000 -0.44266542 0.4426654
## [3,] 0.0000000 -0.44266542 0.4426654
## [4,] 0.0000000 -0.44266542 0.4426654
## [5,] 0.0000000 -0.44266542 0.4426654
## [6,] 0.0000000 -0.44266542 0.4426654
## [7,] 0.0000000 -0.44266542 0.4426654
## [8,] 0.0000000 -0.44266542 0.4426654
## [9,] 0.0000000 -0.44266542 0.4426654
## [10,] 0.0000000 -0.44266542 0.4426654
## [11,] 0.0216359 -0.45868587 0.5019577
## [12,] 0.0216359 -0.45868587 0.5019577
## [13,] 0.0216359 -0.45868587 0.5019577
## [14,] 0.0216359 -0.45868587 0.5019577
## [15,] 0.0216359 -0.45868587 0.5019577
## [16,] 0.2073276 -0.25348604 0.6681413
## [17,] 0.2073276 -0.25348604 0.6681413
## [18,] 0.2073276 -0.25348604 0.6681413
## [19,] 0.2073276 -0.25348604 0.6681413
## [20,] 0.2073276 -0.25348604 0.6681413
## [21,] 0.4176725 -0.04595655 0.8813016
## [22,] 0.4176725 -0.04595655 0.8813016
## [23,] 0.4176725 -0.04595655 0.8813016
## [24,] 0.4176725 -0.04595655 0.8813016
## [25,] 0.4176725 -0.04595655 0.8813016
## [26,] 0.5251507 0.06746473 0.9828367
## [27,] 0.5251507 0.06746473 0.9828367
## [28,] 0.5251507 0.06746473 0.9828367
## [29,] 0.5251507 0.06746473 0.9828367
## [30,] 0.5251507 0.06746473 0.9828367
## [31,] 0.6372133 0.15778819 1.1166383
## [32,] 0.6372133 0.15778819 1.1166383
## [33,] 0.6372133 0.15778819 1.1166383
## [34,] 0.6372133 0.15778819 1.1166383
## [35,] 0.6372133 0.15778819 1.1166383
# IC para funções de parâmetros usando método delta. Determinar o valor
# da assintota.
gslnls::confintd(n0, expr = c(Ass = "1/(1 + exp(-A))"), level = 0.95)
## fit lwr upr
## 1/(1 + exp(-A)) 0.8389189 -0.12448 1.802318
# Criando a predição com bandas.
tb_pred <- data.frame(time = seq(0, max(tbi$time), length.out = 151))
tb_pred <-
predict(n0, newdata = tb_pred, interval = "confidence", level = 0.95) |>
as.data.frame() |>
cbind(tb_pred)
# Graico com banda de confiança.
plot(sev ~ time,
data = tbi,
ylim = range(tbi$sev, tb_pred$upr, tb_pred$lwr))
with(tb_pred, matlines(x = time, y = cbind(fit, lwr, upr)))

#-----------------------------------------------------------------------
# Usar o método `nls2` para fazer a busca de parâmetros iniciais.
# Definição do intervalo para A.
inv_logit(c(-1, 5))
## [1] 0.2689414 0.9933071
# Criar um grid de valores iniciais. Por exame, a estimativa apropriada
# dos parâmetros ficará dentro da região definida a seguir. Essa região
# é compatÃvel especificamente com estes dados.
tb_start <- expand.grid(A = seq(0.1, 5, length.out = 12),
x0 = seq(20, 80, length.out = 12),
r = seq(0.01, 0.2, length.out = 12),
KEEP.OUT.ATTRS = FALSE)
nrow(tb_start)
## [1] 1728
names(tb_start)
## [1] "A" "x0" "r"
# Ajuste com a opção `grid-search` = `brute-force`.
n0 <- nls2::nls2(sev ~ inv_logit(A) * (1 - exp(-r * (time - x0))) * (time > x0),
data = tbi,
algorithm = "grid-search",
start = tb_start)
summary(n0)
##
## Formula: sev ~ inv_logit(A) * (1 - exp(-r * (time - x0))) * (time > x0)
##
## Parameters:
## Estimate Std. Error t value Pr(>|t|)
## A 0.99091 1.76276 0.562 0.578
## x0 52.72727 11.13825 4.734 4.3e-05 ***
## r 0.02727 0.03394 0.803 0.428
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.2187 on 32 degrees of freedom
##
## Number of iterations to convergence: 1728
## Achieved convergence tolerance: NA
# Resultado.
plot(tbi$time, tbi$sev)
with(as.list(coef(n0)), {
curve(inv_logit(A) * (1 - exp(-r * (x - x0))) * (x > x0),
from = 0, to = max(tbi$time),
ylim = c(0, 1),
col = "magenta",
add = TRUE,
lwd = 2)
abline(h = 0, v = x0, col = "gray")
})

# IMPORTANT: Usar o `grid-search` para fornecer valores iniciais dentro
# de uma self-start.
#-----------------------------------------------------------------------
# Implantando um função self-start.
# Função self-start.
model_self_start <- function(mCall, data, LHS, ...){
xy <- data.frame(sortedXyData(mCall[["time"]], LHS, data))
n0 <- nls2::nls2(
formula = y ~ inv_logit(A) * (1 - exp(-r * (x - x0))) * (x > x0),
data = xy,
algorithm = "brute-force",
start = tb_start
)
value <- coef(n0)
return(value)
}
# Derivadas parciais para retornar o gradiente.
model <- expression((1/(1 + exp(-A))) * (1 - (exp(-r * (time - x0)))))
pars <- c("A", "x0", "r")
sapply(pars, D, expr = model)
## $A
## exp(-A)/(1 + exp(-A))^2 * (1 - (exp(-r * (time - x0))))
##
## $x0
## -((1/(1 + exp(-A))) * (exp(-r * (time - x0)) * r))
##
## $r
## (1/(1 + exp(-A))) * (exp(-r * (time - x0)) * (time - x0))
model_function <- function(time, A, x0, r){
# Valor da função f(theta, x).
value <- (1/(1 + exp(-A))) * (1 - (exp(-r * (time - x0)))) * (time > x0)
# Valor da função f'(theta, x).
attr(value, "gradient") <-
cbind(
A = exp(-A)/(1 + exp(-A))^2 *
(1 - (exp(-r * (time - x0)))) * (time > x0),
x0 = -((1/(1 + exp(-A))) *
(exp(-r * (time - x0)) * r)) * (time > x0),
r = (1/(1 + exp(-A))) *
(exp(-r * (time - x0)) * (time - x0)) * (time > x0)
)
return(value)
}
# str(model_function)
# Testa o gradiente.
with(unique(tbi[, c("time")]), {
cbind(time = time,
attr(model_function(time, A = 1, x0 = 50, r = 0.01),
which = "gradient"))
})
## time A x0 r
## [1,] 12 0.00000000 0.000000000 0.00000
## [2,] 24 0.00000000 0.000000000 0.00000
## [3,] 48 0.00000000 0.000000000 0.00000
## [4,] 62 0.02223279 -0.006483908 7.78069
## [5,] 84 0.05666939 -0.005203458 17.69176
## [6,] 100 0.07736077 -0.004434094 22.17047
## [7,] 124 0.10280564 -0.003487982 25.81107
# Saà o gradiente atributo gradiente junto.
model <- selfStart(
model = model_function,
parameters = c("A", "x0", "r"),
initial = model_self_start,
template = function(time, A, x0, r) NULL
)
model
## function(time, A, x0, r){
## # Valor da função f(theta, x).
## value <- (1/(1 + exp(-A))) * (1 - (exp(-r * (time - x0)))) * (time > x0)
## # Valor da função f'(theta, x).
## attr(value, "gradient") <-
## cbind(
## A = exp(-A)/(1 + exp(-A))^2 *
## (1 - (exp(-r * (time - x0)))) * (time > x0),
## x0 = -((1/(1 + exp(-A))) *
## (exp(-r * (time - x0)) * r)) * (time > x0),
## r = (1/(1 + exp(-A))) *
## (exp(-r * (time - x0)) * (time - x0)) * (time > x0)
## )
## return(value)
## }
## <bytecode: 0x556021faa260>
## attr(,"initial")
## function(mCall, data, LHS, ...){
## xy <- data.frame(sortedXyData(mCall[["time"]], LHS, data))
## n0 <- nls2::nls2(
## formula = y ~ inv_logit(A) * (1 - exp(-r * (x - x0))) * (x > x0),
## data = xy,
## algorithm = "brute-force",
## start = tb_start
## )
## value <- coef(n0)
## return(value)
## }
## attr(,"pnames")
## [1] "A" "x0" "r"
## attr(,"class")
## [1] "selfStart"
# SSmodel
# As pré-estimativas avaliadas.
getInitial(sev ~ model(time, A, x0, r),
data = tbi)
## A x0 r
## 0.99090909 52.72727273 0.02727273
#-----------------------------------------------------------------------
# Teste com a obtenção de todos os valores iniciais.
tbi <-
tb |>
filter(tem == 27, mol == 24) |>
droplevels() |>
mutate(cond = interaction(exp, iso, tem, mol, drop = TRUE))
str(tbi)
## tibble [280 × 9] (S3: tbl_df/tbl/data.frame)
## $ exp : Factor w/ 2 levels "1","2": 1 1 1 1 1 1 1 1 1 1 ...
## $ iso : Factor w/ 4 levels "CrAaPR-01","CrAaPR-27",..: 1 1 1 1 1 1 1 1 1 1 ...
## $ rept: num [1:280] 1 2 3 4 5 1 2 3 4 5 ...
## $ tem : num [1:280] 27 27 27 27 27 27 27 27 27 27 ...
## $ mol : num [1:280] 24 24 24 24 24 24 24 24 24 24 ...
## $ sev : num [1:280] 0 0 0 0 0 0 0 0 0 0 ...
## $ time: num [1:280] 12 12 12 12 12 24 24 24 24 24 ...
## $ ue : Factor w/ 40 levels "1.CrAaPR-01.27.24.1",..: 1 9 17 25 33 1 9 17 25 33 ...
## $ cond: Factor w/ 8 levels "1.CrAaPR-01.27.24",..: 1 1 1 1 1 1 1 1 1 1 ...
tbi |>
group_by(cond) |>
do({
getInitial(sev ~ model(time, A, x0, r),
data = .) |>
as.list() |>
as.data.frame()
}) |>
mutate(Ass = inv_logit(A))
## # A tibble: 8 × 5
## # Groups: cond [8]
## cond A x0 r Ass
## <fct> <dbl> <dbl> <dbl> <dbl>
## 1 1.CrAaPR-01.27.24 5 58.2 0.0445 0.993
## 2 2.CrAaPR-01.27.24 1.44 52.7 0.131 0.808
## 3 1.CrAaPR-27.27.24 0.545 80 0.0273 0.633
## 4 2.CrAaPR-27.27.24 1.44 58.2 0.0791 0.808
## 5 1.CrAaPR-40.27.24 4.11 52.7 0.165 0.984
## 6 2.CrAaPR-40.27.24 4.55 41.8 0.0445 0.990
## 7 1.CrAlPR-73.27.24 3.66 47.3 0.0618 0.975
## 8 2.CrAlPR-73.27.24 0.991 52.7 0.0273 0.729
#-----------------------------------------------------------------------
# Ajuste para todas as condições experimentais.
tb_fits <-
tbi |>
group_by(cond) |>
summarise(fit = list({
tb_local <- pick(everything())
# Valores inciais.
start <- getInitial(sev ~ model(time, A, x0, r),
data = tb_local)
# Ajuste com o algoritmo de Levenberg-Marquardt.
# n0 <-
# minpack.lm::nlsLM(
# formula = sev ~ model(time, A, x0, r),
# # data = cur_data(),
# data = tb_local,
# start = start,
# control = nls.control(maxiter = 1000,
# minFactor = 1e-8)
# )
n0 <- gslnls::gsl_nls(
# fn = sev ~ (1/(1 + exp(-A))) * (1 - exp(-r * (time - x0))) * (time > x0),
# Para usar gradiente analÃtico.
fn = sev ~ model(time, A, x0, r),
data = tb_local,
start = start,
algorithm = "lm",
control = list(scale = "levenberg", maxiter = 1000)
)
n0
}))
# Obter os valores ajustados.
str(tb_fits, max.level = 2)
## tibble [8 × 2] (S3: tbl_df/tbl/data.frame)
## $ cond: Factor w/ 8 levels "1.CrAaPR-01.27.24",..: 1 2 3 4 5 6 7 8
## $ fit :List of 8
# Estimativas dos parâmetros.
map(tb_fits$fit, broom::tidy)
## [[1]]
## # A tibble: 3 × 5
## term estimate std.error statistic p.value
## <chr> <dbl> <dbl> <dbl> <dbl>
## 1 A 20.3 1.03e+8 0.000000197 1.00e+ 0
## 2 x0 57.7 3.44e+0 16.8 2.10e-17
## 3 r 0.0461 2.53e-2 1.82 7.84e- 2
##
## [[2]]
## # A tibble: 3 × 5
## term estimate std.error statistic p.value
## <chr> <dbl> <dbl> <dbl> <dbl>
## 1 A 1.58 0.618 2.56 1.55e- 2
## 2 x0 47.5 1.93 24.7 2.09e-22
## 3 r 0.0834 0.0436 1.91 6.46e- 2
##
## [[3]]
## # A tibble: 3 × 5
## term estimate std.error statistic p.value
## <chr> <dbl> <dbl> <dbl> <dbl>
## 1 A -0.486 0.567 -0.857 3.98e- 1
## 2 x0 83.2 3.20 26.0 4.30e-23
## 3 r 0.108 0.169 0.635 5.30e- 1
##
## [[4]]
## # A tibble: 3 × 5
## term estimate std.error statistic p.value
## <chr> <dbl> <dbl> <dbl> <dbl>
## 1 A 1.07 0.270 3.98 3.73e- 4
## 2 x0 60.5 5.18 11.7 4.45e-13
## 3 r 0.208 0.721 0.289 7.75e- 1
##
## [[5]]
## # A tibble: 3 × 5
## term estimate std.error statistic p.value
## <chr> <dbl> <dbl> <dbl> <dbl>
## 1 A 3.89 1.50 2.60 0.0140
## 2 x0 59.5 47207. 0.00126 0.999
## 3 r 0.629 11741. 0.0000535 1.00
##
## [[6]]
## # A tibble: 3 × 5
## term estimate std.error statistic p.value
## <chr> <dbl> <dbl> <dbl> <dbl>
## 1 A 2.87 2.18 1.32 1.97e- 1
## 2 x0 44.7 3.56 12.5 6.74e-14
## 3 r 0.0610 0.0347 1.76 8.84e- 2
##
## [[7]]
## # A tibble: 3 × 5
## term estimate std.error statistic p.value
## <chr> <dbl> <dbl> <dbl> <dbl>
## 1 A 3.75 3.79 0.989 3.30e- 1
## 2 x0 47.1 1.85 25.5 8.08e-23
## 3 r 0.0611 0.0225 2.71 1.07e- 2
##
## [[8]]
## # A tibble: 3 × 5
## term estimate std.error statistic p.value
## <chr> <dbl> <dbl> <dbl> <dbl>
## 1 A 1.65 3.50 0.471 0.640
## 2 x0 46.6 6.55 7.11 0.0000000451
## 3 r 0.0184 0.0196 0.938 0.355
map(tb_fits$fit, ~as.data.frame(confint.default(.)))
## [[1]]
## 2.5 % 97.5 %
## A -2.021824e+08 2.021825e+08
## x0 5.096303e+01 6.445873e+01
## r -3.582357e-03 9.570369e-02
##
## [[2]]
## 2.5 % 97.5 %
## A 0.369598162 2.7912380
## x0 43.774379278 51.3216076
## r -0.002004125 0.1688818
##
## [[3]]
## 2.5 % 97.5 %
## A -1.5982782 0.6255286
## x0 76.9639839 89.5099731
## r -0.2244084 0.4395261
##
## [[4]]
## 2.5 % 97.5 %
## A 0.5445334 1.602374
## x0 50.3444438 70.645870
## r -1.2055758 1.622333
##
## [[5]]
## 2.5 % 97.5 %
## A 9.569205e-01 6.826727
## x0 -9.246432e+04 92583.261813
## r -2.301217e+04 23013.431266
##
## [[6]]
## 2.5 % 97.5 %
## A -1.395830170 7.1347844
## x0 37.720428935 51.6893831
## r -0.007035714 0.1290324
##
## [[7]]
## 2.5 % 97.5 %
## A -3.67589864 11.1678898
## x0 43.44894312 50.6914533
## r 0.01694395 0.1052514
##
## [[8]]
## 2.5 % 97.5 %
## A -5.20961637 8.5100285
## x0 33.74787677 59.4136317
## r -0.02005048 0.0568709
# Resumo dos ajustes.
tb_gof <-
tb_fits |>
mutate(a = map(fit, ~broom::glance(.)),
R2 = map_dbl(fit, function(n0) {
cor(fitted(n0), fitted(n0) + residuals(n0))^2
})) |>
unnest(a) |>
select(-any_of(c("fit", "isConv", "finTol")))
tb_gof
## # A tibble: 8 × 9
## cond sigma logLik AIC BIC deviance df.residual nobs R2
## <fct> <dbl> <dbl> <dbl> <dbl> <dbl> <int> <int> <dbl>
## 1 1.CrAaPR-01.27.24 0.212 6.13 -4.26 1.96 1.44 32 35 0.807
## 2 2.CrAaPR-01.27.24 0.277 -3.16 14.3 20.5 2.45 32 35 0.663
## 3 1.CrAaPR-27.27.24 0.244 1.25 5.51 11.7 1.91 32 35 0.306
## 4 2.CrAaPR-27.27.24 0.157 16.7 -25.4 -19.2 0.788 32 35 0.844
## 5 1.CrAaPR-40.27.24 0.0928 35.1 -62.2 -56.0 0.276 32 35 0.965
## 6 2.CrAaPR-40.27.24 0.283 -3.91 15.8 22.0 2.56 32 35 0.653
## 7 1.CrAlPR-73.27.24 0.219 5.10 -2.20 4.02 1.53 32 35 0.804
## 8 2.CrAlPR-73.27.24 0.217 5.33 -2.66 3.56 1.51 32 35 0.584
# Estimativas dos parâmetros.
tb_param <-
tb_fits |>
mutate(theta = map(fit, ~as.data.frame(as.list(coef(.))))) |>
unnest(theta) |>
select(-any_of(c("fit"))) |>
mutate(Ass = inv_logit(A))
tb_param
## # A tibble: 8 × 5
## cond A x0 r Ass
## <fct> <dbl> <dbl> <dbl> <dbl>
## 1 1.CrAaPR-01.27.24 20.3 57.7 0.0461 1.00
## 2 2.CrAaPR-01.27.24 1.58 47.5 0.0834 0.829
## 3 1.CrAaPR-27.27.24 -0.486 83.2 0.108 0.381
## 4 2.CrAaPR-27.27.24 1.07 60.5 0.208 0.745
## 5 1.CrAaPR-40.27.24 3.89 59.5 0.629 0.980
## 6 2.CrAaPR-40.27.24 2.87 44.7 0.0610 0.946
## 7 1.CrAlPR-73.27.24 3.75 47.1 0.0611 0.977
## 8 2.CrAlPR-73.27.24 1.65 46.6 0.0184 0.839
# IMPORTANT: As taxas `r` são relativas. Elas não são diretamente
# comparáveis porque as assintotas podem não ser iguais. A taxa de
# crescimento da função f(x) no ponto em que x = x0 (pela direita) é
# definido por.
# Expressão da taxa no ponto x = x0.
# f(time) = (1/(1 + exp(-A))) * (1 - (exp(-r * (time - x0)))) * (time > x0)
D(expression((1/(1 + exp(-A))) * (1 - (exp(-r * (time - x0))))),
name = "time")
## (1/(1 + exp(-A))) * (exp(-r * (time - x0)) * r)
# Calcula as taxas no ponto x = x0.
tb_param |>
mutate(rate_at_x0 = {
time <- x0
(1/(1 + exp(-A))) * (exp(-r * (time - x0)) * r)
})
## # A tibble: 8 × 6
## cond A x0 r Ass rate_at_x0
## <fct> <dbl> <dbl> <dbl> <dbl> <dbl>
## 1 1.CrAaPR-01.27.24 20.3 57.7 0.0461 1.00 0.0461
## 2 2.CrAaPR-01.27.24 1.58 47.5 0.0834 0.829 0.0692
## 3 1.CrAaPR-27.27.24 -0.486 83.2 0.108 0.381 0.0410
## 4 2.CrAaPR-27.27.24 1.07 60.5 0.208 0.745 0.155
## 5 1.CrAaPR-40.27.24 3.89 59.5 0.629 0.980 0.616
## 6 2.CrAaPR-40.27.24 2.87 44.7 0.0610 0.946 0.0577
## 7 1.CrAlPR-73.27.24 3.75 47.1 0.0611 0.977 0.0597
## 8 2.CrAlPR-73.27.24 1.65 46.6 0.0184 0.839 0.0154
#-----------------------------------------------------------------------
# Predição.
# Condições experimentais para a predição.
x_seq <- c(seq(0, max(tbi$time), length.out = 151)) |>
append(values = tb_param$x0) |>
sort()
# Fazendo a predição.
tb_pred <-
tb_fits |>
group_by(cond) |>
reframe(time = x_seq,
sev = {
# print(class(.$fit[[1]]))
predict(fit[[1]],
newdata = data.frame(time = x_seq))
})
str(tb_pred)
## tibble [1,272 × 3] (S3: tbl_df/tbl/data.frame)
## $ cond: Factor w/ 8 levels "1.CrAaPR-01.27.24",..: 1 1 1 1 1 1 1 1 1 1 ...
## $ time: num [1:1272] 0 0.827 1.653 2.48 3.307 ...
## $ sev : num [1:1272] 0 0 0 0 0 0 0 0 0 0 ...
# Puxando as outras informações.
tb_pred <- left_join(tb_pred,
distinct(tbi, exp, iso, tem, mol, cond))
## Joining with `by = join_by(cond)`
str(tb_pred)
## tibble [1,272 × 7] (S3: tbl_df/tbl/data.frame)
## $ cond: Factor w/ 8 levels "1.CrAaPR-01.27.24",..: 1 1 1 1 1 1 1 1 1 1 ...
## $ time: num [1:1272] 0 0.827 1.653 2.48 3.307 ...
## $ sev : num [1:1272] 0 0 0 0 0 0 0 0 0 0 ...
## $ exp : Factor w/ 2 levels "1","2": 1 1 1 1 1 1 1 1 1 1 ...
## $ iso : Factor w/ 4 levels "CrAaPR-01","CrAaPR-27",..: 1 1 1 1 1 1 1 1 1 1 ...
## $ tem : num [1:1272] 27 27 27 27 27 27 27 27 27 27 ...
## $ mol : num [1:1272] 24 24 24 24 24 24 24 24 24 24 ...
# Resultado gráfico.
tbi |>
ggplot(aes(x = time, y = sev)) +
facet_grid(exp ~ iso) +
geom_line(aes(group = rept), alpha = 0.10) +
stat_summary(geom = "line", fun = "mean",
color = "darkgreen",
alpha = 0.40) +
geom_point() +
labs(title = "Temperatura 27°C e Molhamento 24h") +
geom_line(data = tb_pred, aes(y = sev, group = cond),
color = "red",
linewidth = 1)

# Anotações com os parâmetros.
tb_ann <-
tb_param |>
mutate(
x0 = sprintf("%0.1f", x0),
r = sprintf("%0.4f", r),
Ass = sprintf("%0.4f", Ass)
) |>
nest(params = c("x0", "r", "Ass")) |>
left_join(distinct(tbi, exp, iso, tem, mol, cond)) |>
mutate(x = 0, y = 0.95)
## Joining with `by = join_by(cond)`
tb_ann
## # A tibble: 8 × 9
## cond A params exp iso tem mol x y
## <fct> <dbl> <list> <fct> <fct> <dbl> <dbl> <dbl> <dbl>
## 1 1.CrAaPR-01.27.24 20.3 <tibble [1 × 3]> 1 CrAaP… 27 24 0 0.95
## 2 2.CrAaPR-01.27.24 1.58 <tibble [1 × 3]> 2 CrAaP… 27 24 0 0.95
## 3 1.CrAaPR-27.27.24 -0.486 <tibble [1 × 3]> 1 CrAaP… 27 24 0 0.95
## 4 2.CrAaPR-27.27.24 1.07 <tibble [1 × 3]> 2 CrAaP… 27 24 0 0.95
## 5 1.CrAaPR-40.27.24 3.89 <tibble [1 × 3]> 1 CrAaP… 27 24 0 0.95
## 6 2.CrAaPR-40.27.24 2.87 <tibble [1 × 3]> 2 CrAaP… 27 24 0 0.95
## 7 1.CrAlPR-73.27.24 3.75 <tibble [1 × 3]> 1 CrAlP… 27 24 0 0.95
## 8 2.CrAlPR-73.27.24 1.65 <tibble [1 × 3]> 2 CrAlP… 27 24 0 0.95
tbi |>
ggplot(aes(x = time, y = sev)) +
facet_grid(exp ~ iso) +
geom_line(aes(group = rept), alpha = 0.10) +
stat_summary(geom = "line", fun = "mean",
color = "darkgreen",
alpha = 0.40) +
ggpp::geom_table(
data = tb_ann,
mapping = aes(x = x, y = y, label = params),
hjust = 0, vjust = 1
) +
geom_point() +
geom_line(data = tb_pred, aes(y = sev, group = cond),
color = "red",
linewidth = 1) +
labs(title = "Temperatura 27°C e Molhamento 24h",
x = "PerÃodo (horas)",
y = "Severidade")

#-----------------------------------------------------------------------