R version 3.2.3 (2015-12-10) -- "Wooden Christmas-Tree" Copyright (C) 2015 The R Foundation for Statistical Computing Platform: x86_64-pc-linux-gnu (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. Natural language support but running in an English locale R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > > # The purpose of this example is to make using nlm a bit easier. > # (1) We do not have to provide analytic derivatives. It will > # approximate them by finite differences if we don't. This > # won't work quite as well, but it will (usually) work. > # (2) We do not have to provide the data as an argument to mlogl. > # We can just look them up in the R global environment. Many > # programmers scorn global variables and this should not be > # done if you are writing code to be used by others. But it > # is OK for one-off projects. > > # these are the data that were simulated in likelihood.R > nA <- 71 > nB <- 23 > nO <- 100 > nAB <- 6 > > # this is the minimal mlogl that works > # Welllllll. Taking out the stopifnot's would also work, but they > # are there for your protection. If you leave them out you will > # probably use the function wrong an get errors that these checks > # would catch > > mlogl <- function(theta) { + stopifnot(is.numeric(theta)) + stopifnot(is.finite(theta)) + stopifnot(length(theta) == 2) + alpha <- theta[1] + beta <- theta[2] + gamma <- 1 - alpha - beta + if (alpha <= 0 | beta <= 0 | gamma <= 0) return(Inf) + return(- (nA * log(alpha^2 + 2 * alpha * gamma) + + nB * log(beta^2 + 2 * beta * gamma) + + nO * log(gamma^2) + + nAB * log(2 * alpha * beta))) + + } > > # everything from here on is just the same as in likelihood.R > > nout <- nlm(mlogl, c(1/3, 1/3), hessian = TRUE) Warning messages: 1: In nlm(mlogl, c(1/3, 1/3), hessian = TRUE) : NA/Inf replaced by maximum positive value 2: In nlm(mlogl, c(1/3, 1/3), hessian = TRUE) : NA/Inf replaced by maximum positive value 3: In nlm(mlogl, c(1/3, 1/3), hessian = TRUE) : NA/Inf replaced by maximum positive value 4: In nlm(mlogl, c(1/3, 1/3), hessian = TRUE) : NA/Inf replaced by maximum positive value > nout <- nlm(mlogl, nout$estimate, hessian = TRUE) > nout$estimate [1] 0.21597208 0.07540479 > > # was (0.21597208 0.07540475) in likelihood.Rout > > alpha.hat <- nout$estimate[1] > beta.hat <- nout$estimate[2] > gamma.hat <- 1 - alpha.hat - beta.hat > > # Wald type standard errors from inverse observed Fisher information > inv.fisher <- solve(nout$hessian) > se.inv.fisher <- sqrt(diag(inv.fisher)) > se.inv.fisher [1] 0.02194870 0.01348812 > > # to get standard error for gamma.hat we need to know > # var(a + b X) = b^2 Var(X) for constants a and b > # var(X + Y) = var(X) + var(Y) + 2 cov(X, Y) > # var(X - Y) = var(X) + var(Y) - 2 cov(X, Y) > # so var(1 - alpha.hat - beta.hat) = var(alpha.hat + beta.hat) > # = var(alpha.hat) + var(beta.hat) + 2 cov(alpha.hat, beta.hat) > se.gamma.hat <- sqrt(inv.fisher[1,1] + inv.fisher[2,2] + 2 * inv.fisher[1,2]) > > foo <- cbind(c(alpha.hat, beta.hat, gamma.hat), c(se.inv.fisher, se.gamma.hat)) > rownames(foo) <- c("alpha", "beta", "gamma") > colnames(foo) <- c("estimate", "std. err.") > round(foo, 3) estimate std. err. alpha 0.216 0.022 beta 0.075 0.013 gamma 0.709 0.024 > > # and, of course, point estimate +/- 1.96 standard errors is an asymptotic > # 95% confidence interval > bar <- cbind(foo[,1] - qnorm(0.975) * foo[,2], + foo[,1] + qnorm(0.975) * foo[,2]) > rownames(bar) <- rownames(foo) > colnames(bar) <- c("lower", "upper") > round(bar, 3) lower upper alpha 0.173 0.259 beta 0.049 0.102 gamma 0.661 0.756 > > > > proc.time() user system elapsed 0.121 0.017 0.243