0

I am trying to estimate the MLE value for the gamma_estimate parameter in R using the Optim() function, with lower bound of 1 and upper bound of 1.9.

However, when using the "Brent" and "L-BFGS-B" methods, I am consistently getting the lower bound value, regardless of my initial values and bounds. Using the "Nelder-Mead" method without bounds, I get an estimate of 0.07711332, which is not reliable as well. I am also having an error when using the "SANN" method.

Can anyone suggest how I can properly estimate the MLE for gamma_estimate in my code?

gamma_mle <- function(gamma_estimates) {

  # Initialization

  Mu <-test[nrow(test), 2:4] %>% as.double()
  var <- test[nrow(test), 5:7] %>% as.double()
  pi <- test[nrow(test), 8:10] %>% as.double()
  alpha <- test[nrow(test), 11:13] %>% as.double()
  beta <- test[nrow(test), 14:16] %>% as.double()
  loglike <- c(0, test[nrow(test), 17]) %>% as.double()
  read_cover <- 100
  n = 2
  k = 3

  
  ## E step
  tau <- matrix("0", k) %>% as.list()
  total <- matrix(0, nrow = nrow(data))
  
  for (i in 1:k) {
    tau[[i]]<- pi[i] * dbeta(data, alpha[i], beta[i])}
  
  for (i in 1:k) {
    total <- total + tau[[i]]}
  
  for (i in 1:k) {
    tau[[i]]<- tau[[i]] %>% as.matrix() / total %>% as.matrix()
  }
  
  # Update Rule
  # Update mixing coefficients
  for (i in 1:k) {
    pi[[i]] <- sum(tau[[i]]) / length(data)
  }
  
  # Update Mu
  for (i in 1:k) {
    Mu[[i]] <- sum(tau[[i]] * data)/sum(tau[[i]])
  }
  
  # Update variance
  for (i in 1:k) {
    var[[i]] <- ((Mu[[i]] * (1 - Mu[[i]])) * gamma_estimates) / read_cover
  }
  
  # Convert Mu, var to updated alpha, beta 
  updated <- matrix("0", k) %>% as.list()
  for (i in 1:k) {
    updated[[i]] <- estBetaParams(Mu[[i]], var[[i]]) %>% unlist()
    alpha[[i]] <- c(updated[[i]][1])
    beta[[i]] <- c(updated[[i]][2])
  }
  
  ## M step
  #Maximize the loglikelihood
  for (i in 1:k) {
    loglike <-sum(log(pi[i] * dbeta(data, alpha[i], beta[i])))
  }
  return(loglike)
}

And here's the code I used for the Optim() function.

gamma_estimates <- optim(fn = gamma_mle, 
                         par = c(1.50), # parameter = k, gamma_estimates
                         lower = c(1.000), #Lower bound on parameters
                         upper = c(1.990), #Upper bound on parameters
                         hessian = FALSE, #Return Hessian for SEs
                         method = "Brent", 
                         control = list(maxit = 10000, parscale = 0.00001)
)

gamma_estimates <- optim(fn = gamma_mle,
                         par = c(1.30), # parameter = k, gamma_estimates
                         lower = c(1.00), #Lower bound on parameters
                         upper = c(1.6), #Upper bound on parameters
                         hessian = FALSE, #Return Hessian for SEs
                         method = "L-BFGS-B"
)

gamma_estimates <- optim(fn = gamma_mle,
                         par = c(1.5), # parameter = k, gamma_estimates
                         hessian = FALSE, #Return Hessian for SEs
                         method = "Nelder-Mead",
                         control = list(maxit = 10000, parscale = 0.00001)
)

gamma_estimates <- optim(fn = gamma_mle, 
                         par = c(1.01), # parameter = k, gamma_estimates
                         #lower = c(-Inf), #Lower bound on parameters
                         #upper = c(Inf), #Upper bound on parameters
                         hessian = TRUE, #Return Hessian for SEs
                         method = "SANN"
)
hollyjolly
  • 115
  • 11
  • 1
    Are E/M steps part of expectation maximization algorithm? If so, that algorithm is iterative and you should not use `optim`. – det Jan 27 '23 at 13:46
  • I am implementing Expectation-Maximization (EM) and Optim() separately for different parameters. For the Optim() method, I am only training the Gamma parameter based on the log likelihood. – hollyjolly Jan 28 '23 at 10:41

0 Answers0