# file MASS/add.q
# copyright (C) 1994-9 W. N. Venables and B. D. Ripley
#
addterm <-
  function(object, ...) UseMethod("addterm")

addterm.default <-
  function(object, scope, scale = 0, test = c("none", "Chisq"),
           k = 2, sorted = FALSE, trace = FALSE, ...)
{
  if(missing(scope) || is.null(scope)) stop("no terms in scope")
  if(!is.character(scope))
    scope <- add.scope(object, update.formula(object, scope))
  if(!length(scope))
    stop("no terms in scope for adding to object")
  ns <- length(scope)
  ans <- matrix(nrow = ns + 1, ncol = 2,
                dimnames = list(c("<none>", scope), c("df", "AIC")))
  ans[1,  ] <- extractAIC(object, scale, k = k, ...)
  for(i in seq(ns)) {
    tt <- scope[i]
    if(trace) cat("trying +", tt, "\n")
    nfit <- update(object, as.formula(paste("~ . +", tt)))
    ans[i+1,  ] <- extractAIC(nfit, scale, k = k, ...)
  }
  dfs <- ans[,1] - ans[1,1]
  dfs[1] <- NA
  aod <- data.frame(Df = dfs, AIC = ans[,2])
  if(sorted) aod <- aod[order(aod$AIC), ]
  test <- match.arg(test)
  if(test == "Chisq") {
    dev <- ans[,2] - k*ans[, 1]
    dev <- dev[1] - dev; dev[1] <- NA
    nas <- !is.na(dev)
    P <- dev
    P[nas] <- 1 - pchisq(dev[nas], dfs[nas])
    aod[, c("LRT", "Pr(Chi)")] <- list(dev, P)
  }
  head <- c("Single term additions", "\nModel:",
            deparse(as.vector(formula(object))))
  if(scale > 0)
    head <- c(head, paste("\nscale: ", format(scale), "\n"))
  class(aod) <- c("anova", "data.frame")
  attr(aod, "heading") <- head
  aod
}

addterm.lm <-
  function(object, scope, scale = 0, test = c("none", "Chisq", "F"),
           k = 2, sorted = FALSE, ...)
{
  Fstat <- function(table, RSS, rdf) {
    dev <- table$"Sum of Sq"
    df <- table$Df
    rms <- (RSS - dev)/(rdf - df)
    Fs <- (dev/df)/rms
    Fs[df < 1e-4] <- NA
    P <- Fs
    nnas <- !is.na(Fs)
    P[nnas] <- 1 - pf(Fs[nnas], df[nnas], rdf - df[nnas])
    list(Fs=Fs, P=P)
  }

  if(missing(scope) || is.null(scope)) stop("no terms in scope")
  aod <- add1.lm(object, scope=scope, scale=scale)[ , -4]
  dfs <- c(0, aod$Df[-1]) + object$rank; RSS <- aod$RSS
  n <- length(object$residuals)
  if(scale > 0) aic <- RSS/scale - n + k*dfs
  else aic <- n * log(RSS/n) + k*dfs
  aod$AIC <- aic
  if(sorted) aod <- aod[order(aod$AIC), ]
  if(scale > 0) names(aod) <- c("Df", "Sum of Sq", "RSS", "Cp")
  test <- match.arg(test)
  if(test == "Chisq") {
    dev <- aod$"Sum of Sq"
    nas <- !is.na(dev)
    dev[nas] <- 1 - pchisq(dev[nas]/scale, aod$Df[nas])
    aod[, "Pr(Chi)"] <- dev
  } else if(test == "F") {
    rdf <- object$df.resid
    aod[, c("F Value", "Pr(F)")] <- Fstat(aod, aod$RSS[1], rdf)
  }
  head <- c("Single term additions", "\nModel:",
            deparse(as.vector(formula(object))))
  if(scale > 0)
    head <- c(head, paste("\nscale: ", format(scale), "\n"))
  class(aod) <- c("anova", "data.frame")
  attr(aod, "heading") <- head
  aod
}

addterm.negbin <- addterm.survreg <-
  function(object, ...)  addterm.default(object, ...)

addterm.glm <-
  function(object, scope, scale = 0, test = c("none", "Chisq"),
           k = 2, sorted = FALSE, trace = FALSE, ...)
{
  if(missing(scope) || is.null(scope)) stop("no terms in scope")
  if(!is.character(scope))
    scope <- add.scope(object, update.formula(object, scope))
  if(!length(scope))
    stop("no terms in scope for adding to object")
  oTerms <- attr(object$terms, "term.labels")
  int <- attr(object$terms, "intercept")
  ns <- length(scope)
  dfs <- dev <- numeric(ns+1)
  names(dfs) <- names(dev) <- c("<none>", scope)
  dfs[1] <- object$rank
  dev[1] <- object$deviance
  add.rhs <- paste(scope, collapse = "+")
  add.rhs <- eval(parse(text = paste("~ . +", add.rhs)))
  new.form <- update.formula(object, add.rhs)
  oc <- object$call
  Terms <- terms(new.form)
  oc$formula <- Terms
  fob <- list(call = oc)
  class(fob) <- class(object)
  x <- model.matrix(Terms, model.frame(fob, xlev = object$xlevels),
                    contrasts = object$contrasts)
  n <- nrow(x)
  y <- object$y
  m <- model.frame(object)
  if(is.null(y)) y <- model.response(m, "numeric")
  wt <- object$prior.weights
  if(is.null(wt)) wt <- rep(1, n)
  Terms <- attr(Terms, "term.labels")
  asgn <- attr(x, "assign")
  ousex <- match(asgn, match(oTerms, Terms), 0) > 0
  if(int) ousex[1] <- TRUE
  for(tt in scope) {
    if(trace) cat("trying +", tt, "\n")
    usex <- match(asgn, match(tt, Terms), 0) > 0
    X <- x[, usex|ousex, drop = FALSE]
    z <-  glm.fit(X, y, wt, offset=object$offset,
                  family=object$family, control=object$control)
    dfs[tt] <- z$rank
    dev[tt] <- z$deviance
  }
  if (is.null(scale) || scale == 0)
    dispersion <- summary(object, dispersion = NULL)$dispersion
  else dispersion <- scale
  if(object$family$family == "gaussian") {
    if(scale > 0) loglik <- dev/scale - n
    else loglik <- n * log(dev/n)
  } else loglik <- dev/dispersion
  aic <- loglik + k * dfs
  dfs <- dfs - dfs[1]
  dfs[1] <- NA
  aod <- data.frame(Df = dfs, Deviance = dev, AIC = aic,
                    row.names = names(dfs))
  if(sorted) aod <- aod[order(aod$AIC), ]
  test <- match.arg(test)
  if(test == "Chisq") {
    dev <- loglik[1] - loglik
    dev[1] <- NA
    aod[, "LRT"] <- dev
    nas <- !is.na(dev)
    dev[nas] <- 1 - pchisq(dev[nas]/dispersion, aod$Df[nas])
    aod[, "Pr(Chi)"] <- dev
  }
  head <- c("Single term additions", "\nModel:",
            deparse(as.vector(formula(object))))
  if(scale > 0)
    head <- c(head, paste("\nscale: ", format(scale), "\n"))
  class(aod) <- c("anova", "data.frame")
  attr(aod, "heading") <- head
  aod
}

addterm.mlm <- function(...)
  stop("no addterm method implemented for mlm models")

dropterm <- function(object, ...) UseMethod("dropterm")

dropterm.default <-
  function(object, scope, scale = 0, test = c("none", "Chisq"),
           k = 2, sorted = FALSE, trace = FALSE, ...)
{
  tl <- attr(object$terms, "term.labels")
  if(missing(scope)) scope <- drop.scope(object)
  else {
    if(!is.character(scope))
      scope <- attr(terms(update.formula(object, scope)), "term.labels")
    if(!all(match(scope, tl, FALSE)))
      stop("scope is not a subset of term labels")
  }
  ns <- length(scope)
  ans <- matrix(nrow = ns + 1, ncol = 2,
                dimnames =  list(c("<none>", scope), c("df", "AIC")))
  ans[1,  ] <- extractAIC(object, scale, k = k, ...)
  for(i in seq(ns)) {
    tt <- scope[i]
    if(trace) cat("trying -", tt, "\n")
    nfit <- update(object, as.formula(paste("~ . -", tt)))
    ans[i+1,  ] <- extractAIC(nfit, scale, k = k, ...)
  }
  dfs <- ans[1,1] - ans[,1]
  dfs[1] <- NA
  aod <- data.frame(Df = dfs, AIC = ans[,2])
  if(sorted) aod <- aod[order(aod$AIC), ]
  test <- match.arg(test)
  if(test == "Chisq") {
    dev <- ans[, 2] - k*ans[, 1]
    dev <- dev - dev[1] ; dev[1] <- NA
    nas <- !is.na(dev)
    P <- dev
    P[nas] <- 1 - pchisq(dev[nas], dfs[nas])
    aod[, c("LRT", "Pr(Chi)")] <- list(dev, P)
  }
  head <- c("Single term deletions", "\nModel:",
            deparse(as.vector(formula(object))))
  if(scale > 0)
    head <- c(head, paste("\nscale: ", format(scale), "\n"))
  class(aod) <- c("anova", "data.frame")
  attr(aod, "heading") <- head
  aod
}

dropterm.lm <-
  function(object, scope = drop.scope(object), scale = 0,
           test = c("none", "Chisq", "F"),
           k = 2, sorted = FALSE, ...)
{
  aod <- drop1.lm(object, scope=scope, scale=scale)[, -4]
  dfs <-  object$rank - c(0, aod$Df[-1]); RSS <- aod$RSS
  n <- length(object$residuals)
  aod$AIC <- if(scale > 0)RSS/scale - n + k*dfs
  else n * log(RSS/n) + k*dfs
  if(sorted) aod <- aod[order(aod$AIC), ]
  if(scale > 0) names(aod) <- c("Df", "Sum of Sq", "RSS", "Cp")
  test <- match.arg(test)
  if(test == "Chisq") {
    dev <- aod$"Sum of Sq"
    nas <- !is.na(dev)
    dev[nas] <- 1 - pchisq(dev[nas]/scale, aod$Df[nas])
    aod[, "Pr(Chi)"] <- dev
  } else if(test == "F") {
    rdf <- object$df.resid
    dev <- aod$"Sum of Sq"
    dfs <- aod$Df
    rms <- aod$RSS[1]/rdf
    Fs <- (dev/dfs)/rms
    Fs[dfs < 1e-4] <- NA
    P <- Fs
    nas <- !is.na(Fs)
    P[nas] <- 1 - pf(Fs[nas], dfs[nas], rdf)
    aod[, c("F Value", "Pr(F)")] <- list(Fs, P)
  }
  head <- c("Single term deletions", "\nModel:",
            deparse(as.vector(formula(object))))
  if(scale > 0)
    head <- c(head, paste("\nscale: ", format(scale), "\n"))
  class(aod) <- c("anova", "data.frame")
  attr(aod, "heading") <- head
  aod
}

dropterm.mlm <- function(object, ...)
  stop("dropterm not implemented for mlm models")

dropterm.glm <-
  function(object, scope, scale = 0, test = c("none", "Chisq"),
           k = 2, sorted = FALSE, trace = FALSE, ...)
{
  setdiff <- function(x, y)
    if(length(x) == 0 || length(y) == 0) x else x[match(x, y, 0) == 0]

  x <- model.matrix(object)
  iswt <- !is.null(wt <- object$weights)
  n <- nrow(x)
  asgn <- attr(x, "assign")
  tl <- attr(object$terms, "term.labels")
  if(missing(scope)) scope <- drop.scope(object)
  else {
    if(!is.character(scope))
      scope <- attr(terms(update.formula(object, scope)), "term.labels")
    if(!all(match(scope, tl, FALSE)))
      stop("scope is not a subset of term labels")
  }
  ns <- length(scope)
  ndrop <- match(scope, tl)
  rdf <- object$df.resid
  chisq <- object$deviance
  dfs <- numeric(ns)
  dev <- numeric(ns)
  y <- object$y
  if(is.null(y)) y <- model.response(model.frame(object), "numeric")
  na.coef <- (1:length(object$coefficients))[!is.na(object$coefficients)]
  wt <- model.weights(model.frame(object))
  if(is.null(wt)) wt <- rep(1, n)
  rank <- object$rank
  for(i in 1:ns) {
    if(trace) cat("trying -", scope[i], "\n")
    ii <- seq(along=asgn)[asgn == ndrop[i]]
    jj <- setdiff(seq(ncol(x)), ii)
    z <-  glm.fit(x[, jj, drop = FALSE], y, wt, offset=object$offset,
                  family=object$family, control=object$control)
    dfs[i] <- z$rank
    dev[i] <- z$deviance
  }
  scope <- c("<none>", scope)
  dfs <- c(object$rank, dfs)
  dev <- c(chisq, dev)
  if (is.null(scale) || scale == 0)
    dispersion <- summary(object, dispersion = NULL)$dispersion
  else dispersion <- scale
  if(object$family$family == "gaussian") {
    if(scale > 0) loglik <- dev/scale - n
    else loglik <- n * log(dev/n)
  } else loglik <- dev/dispersion
  aic <- loglik + k * dfs
  dfs <- dfs[1] - dfs
  dfs[1] <- NA
  aod <- data.frame(Df = dfs, Deviance = dev, AIC = aic, row.names = scope)
  if(sorted) aod <- aod[order(aod$AIC), ]
  test <- match.arg(test)
  if(test == "Chisq") {
    dev <- loglik - loglik[1]
    dev[1] <- NA
    nas <- !is.na(dev)
    aod[, "LRT"] <- dev
    dev[nas] <- 1 - pchisq(dev[nas]/dispersion, aod$Df[nas])
    aod[, "Pr(Chi)"] <- dev
  }
  head <- c("Single term deletions", "\nModel:",
            deparse(as.vector(formula(object))))
  if(scale > 0)
    head <- c(head, paste("\nscale: ", format(scale), "\n"))
  class(aod) <- c("anova", "data.frame")
  attr(aod, "heading") <- head
  aod
}

dropterm.negbin <- dropterm.survreg <-
  function(object, ...) dropterm.default(object, ...)
# file MASS/area.q
# copyright (C) 1994-9 W. N. Venables and B. D. Ripley
#
"area"<-
function(f, a, b, ..., fa = f(a, ...), fb = f(b, ...), limit
	 = 10, eps = 1e-5)
{
    h <- b - a
    d <- (a + b)/2
    fd <- f(d, ...)
    a1 <- ((fa + fb) * h)/2
    a2 <- ((fa + 4 * fd + fb) * h)/6
    if(abs(a1 - a2) < eps)
        return(a2)
    if(limit == 0) {
        warning(paste("iteration limit reached near x = ",
                      d))
        return(a2)
    }
    Recall(f, a, d, ..., fa = fa, fb = fd, limit = limit - 1,
           eps = eps) + Recall(f, d, b, ..., fa = fd, fb =
           fb, limit = limit - 1, eps = eps)
}
"fbeta"<-
function(x, alpha, beta)
{
    x^(alpha - 1) * (1 - x)^(beta - 1)
}
"print.abbrev"<-
function(x, ...)
{
    if(is.list(x))
        x <- unlist(x)
    NextMethod("print")
}
# file MASS/boxcox.q
# copyright (C) 1994-9 W. N. Venables and B. D. Ripley
#
"boxcox"<- function(object, ...)
UseMethod("boxcox")

"boxcox.formula"<-
function(object, lambda = seq(-2, 2, 1/10), plotit = length(
	dev.list()) > 0, interp = (plotit && (m < 100)), eps = 1/
	50, xlab = "lambda", ylab = "log-Likelihood", ...)
{
    object <- lm(object, y = TRUE, qr = TRUE, ...)
    result <- NextMethod()
    if(plotit) invisible(result)
    else result
}

"boxcox.lm"<-
function(object, lambda = seq(-2, 2, 1/10), plotit = length(
	dev.list()) > 0, interp = (plotit && (m < 100)), eps = 1/
	50, xlab = "lambda", ylab = "log-Likelihood", ...)
{
    if(is.null(object$y) || is.null(object$qr))
        object <- update(object, y = TRUE, qr = TRUE, ...)
    result <- NextMethod()
    if(plotit) invisible(result)
    else result
}

"boxcox.default"<-
function(object, lambda = seq(-2, 2, 1/10), plotit = length(
	dev.list()) > 0, interp = (plotit && (m < 100)), eps = 1/
	50, xlab = "lambda", ylab = "log-Likelihood", ...)
{
    if(is.null(object$y) || is.null(object$qr))
        stop(paste(deparse(substitute(object)),
                   "does not have both `qr' and `y' components"
                   ))
    y <- object$y
    n <- length(y)
    if(any(y <= 0))
        stop("Response variable must be positive")
    xqr <- object$qr
    logy <- log(y)
    ydot <- exp(mean(logy))
    xl <- loglik <- as.vector(lambda)
    m <- length(xl)
    for(i in 1:m) {
        if(abs(la <- xl[i]) > eps)
            yt <- (y^la - 1)/la
        else yt <- logy * (1 + (la * logy)/2 *
                           (1 + (la * logy)/3 * (1 + (la * logy)/4)))
        loglik[i] <-  - n/2 * log(sum(qr.resid(xqr, yt/ydot^(la - 1))^2))
    }
    if(interp) {
        sp <- spline(xl, loglik, n = 100)
        xl <- sp$x
        loglik <- sp$y
        m <- length(xl)
    }
    if(plotit) {
        mx <- (1:m)[loglik == max(loglik)][1]
        Lmax <- loglik[mx]
        lim <- Lmax - qchisq(19/20, 1)/2
        plot(xl, loglik, xlab = xlab, ylab = ylab, type
             = "l", ylim = range(loglik, lim))
        plims <- par("usr")
        abline(h = lim, lty = 3)
        y0 <- plims[3]
        scal <- (1/10 * (plims[4] - y0))/par("pin")[2]
        scx <- (1/10 * (plims[2] - plims[1]))/par("pin")[1]
        text(xl[1] + scx, lim + scal, " 95%")
        la <- xl[mx]
        if(mx > 1 && mx < m)
            segments(la, y0, la, Lmax, lty = 3)
        ind <- range((1:m)[loglik > lim])
        if(loglik[1] < lim) {
            i <- ind[1]
            x <- xl[i - 1] + ((lim - loglik[i - 1]) *
                              (xl[i] - xl[i - 1]))/(loglik[i] - loglik[i - 1])
            segments(x, y0, x, lim, lty = 3)
        }
        if(loglik[m] < lim) {
            i <- ind[2] + 1
            x <- xl[i - 1] + ((lim - loglik[i - 1]) *
                              (xl[i] - xl[i - 1]))/(loglik[i] - loglik[i - 1])
            segments(x, y0, x, lim, lty = 3)
        }
    }
    list(x = xl, y = loglik)
}

# file MASS/confint.q
# copyright (C) 1994-9 W. N. Venables and B. D. Ripley
#
confint <- function(object, parm, level = 0.95, ...) UseMethod("confint")

confint.glm <- function(object, parm, level = 0.95, trace = FALSE, ...)
{
    pnames <- names(coef(object))
    if(missing(parm)) parm <- seq(along=pnames)
    else if(is.character(parm))  parm <- match(parm, pnames, nomatch = 0)
    cat("Waiting for profiling to be done...\n")
    object <- profile(object, which = parm, alpha = (1. - level)/4.,
                      trace = trace)
    confint(object, parm=parm, level=level, trace=trace, ...)
}

confint.profile.glm <-
  function(object, parm = seq(along=pnames), level = 0.95, ...)
{
    of <- attr(object, "original.fit")
    pnames <- names(coef(of))
    if(is.character(parm))  parm <- match(parm, pnames, nomatch = 0)
    a <- (1-level)/2
    a <- c(a, 1-a)
    pct <- paste(round(100*a, 1), "%")
    ci <- array(NA, dim = c(length(parm), 2),
                dimnames = list(pnames[parm], pct))
    cutoff <- qnorm(a)
    for(pm in parm) {
        pro <- object[[pm]]
        sp <- spline(x = pro[, "par.vals"][, pm], y = pro$z)
        ci[pnames[pm], ] <- approx(sp$y, sp$x, xout = cutoff)$y
    }
    drop(ci)
}

confint.nls <-
  function(object, parm = seq(along=pnames), level = 0.95, ...)
{
  pnames <- names(coef(object))
  if(is.character(parm))  parm <- match(parm, pnames, nomatch = 0)
  cat("Waiting for profiling to be done...\n")
  object <- profile(object, which = parm, alphamax = (1. - level)/4.)
  confint(object, parm=parm, level=level, ...)
}

confint.profile.nls <-
  function(object, parm = seq(along=pnames), level = 0.95, ...)
{
  of <- attr(object, "original.fit")
  pnames <- names(coef(of))
  if(is.character(parm))  parm <- match(parm, pnames, nomatch = 0)
  n <- length(fitted(of)) - length(of$m$getPars())
  a <- (1-level)/2
  a <- c(a, 1-a)
  pct <- paste(round(100*a, 1), "%", sep = "")
  ci <- array(NA, dim = c(length(parm), 2),
              dimnames = list(pnames[parm], pct))
  cutoff <- qt(a, n)
  for(pm in parm) {
    pro <- object[[pm]]
    sp <- spline(x = pro[, "par.vals"][, pm], y = pro$tau)
    ci[pnames[pm], ] <- approx(sp$y, sp$x, xout = cutoff)$y
  }
  drop(ci)
}




# file MASS/contr.sdif.q
# copyright (C) 1994-9 W. N. Venables and B. D. Ripley
#
contr.sdif <- function(n, contrasts = TRUE)
{
    # contrasts generator giving `successive difference' contrasts.
    if(is.numeric(n) && length(n) == 1) {
        if(n %% 1 || n < 2)
            stop("invalid degree")
        lab <- as.character(seq(n))
    } else {
        lab <- as.character(n)
        n <- length(n)
        if(n < 2)
            stop("invalid number of levels")
    }
    if(contrasts) {
        contr <- col(matrix(nrow = n, ncol = n - 1))
        upper.tri <- !lower.tri(contr)
        contr[upper.tri] <- contr[upper.tri] - n
        structure(contr/n,
                  dimnames = list(lab, paste(lab[-1], lab[-n], sep="-")))
    } else structure(diag(n), dimnames = list(lab, lab))
}
# file MASS/corresp.q
# copyright (C) 1994-9 W. N. Venables and B. D. Ripley
#
"corresp" <- function(x, ...)
{
  if(is.null(class(x))) class(x) <- data.class(x)
  UseMethod("corresp")
}

"corresp.crosstabs" <- function(x, ...)
{
  if((m <- length(dim(x))) > 2)
    stop(paste("Frequency table is", m, "dimensional"))
  corresp.matrix(x)
}

"corresp.data.frame" <- function(x, ...)
corresp.matrix(as.matrix(x), ...)

"corresp.default" <- function(x, ...)
stop("invalid table specification")

"corresp.factor" <- function(x, y, ...)
corresp.matrix(table(x, y), ...)

"corresp.formula" <- function(formula, data = sys.frame(sys.parent()), ...)
{
  rhs <- formula[[length(formula)]]
  if(length(rhs[[2]]) > 1 || length(rhs[[3]]) > 1)
    stop("higher way table requested.  Only 2 way allowed")
  tab <- table(eval(rhs[[2]], data), eval(rhs[[3]], data))
  names(dimnames(tab)) <- as.character(c(rhs[[2]], rhs[[3]]))
  corresp.matrix(tab, ...)
}

"corresp.matrix" <- function(x, nf = 1, ...)
{
  if(any(x < 0 | x %% 1 > 10 * sqrt(.Machine$double.eps)))
    warning("negative or non-integer entries in table")
  if((N <- sum(x)) == 0) stop("all frequencies are zero")
  Dr <- drop(x %*% (rep(1/N, ncol(x))))
  Dc <- drop((rep(1/N, nrow(x))) %*% x)
  if(any(Dr == 0) || any(Dc == 0)) stop("empty row or column in table")
  x1 <- x/N - outer(Dr, Dc)
  Dr <- 1/sqrt(Dr)
  Dc <- 1/sqrt(Dc)
  if(is.null(dimnames(x)))
    dimnames(x) <- list(Row = paste("R", 1:nrow(x)),
                        Col = paste("C", 1:ncol(x)))
  if(is.null(names(dimnames(x))))
    names(dimnames(x)) <- c("Row", "Column")
  X.svd <- svd(t(t(x1 * Dr) * Dc))
  dimnames(X.svd$u) <- list(rownames(x), NULL)
  dimnames(X.svd$v) <- list(colnames(x), NULL)
  res <- list(cor = X.svd$d[1:nf], rscore = X.svd$u[, 1:nf] * Dr,
              cscore = X.svd$v[, 1:nf] * Dc, Freq = x)
  class(res) <- "correspondence"
  res
}

"plot.correspondence" <- function(x, scale=1, ...)
{
  if(length(x$cor) > 1) return(invisible(biplot(x, ...)))
  Fr <- x$Freq
  rs <- x$rscore
  cs <- x$cscore
  xs <- range(cs)
  xs <- xs + diff(xs) * c(-1/5, 1/5)
  ys <- range(rs)
  ys <- ys + diff(ys) * c(-1/5, 1/5)
  x <- cs[col(Fr)]
  y <- rs[row(Fr)]
  rcn <- names(dimnames(Fr))
  plot(x, y, xlim = xs, ylim = ys, xlab = rcn[2], ylab = rcn[1], pch = 3)
  size <- min(par("pin"))/20 * scale
  symbols(x, y, circles = as.vector(sqrt(Fr)), inches = size, add = TRUE)
  x0 <- (min(cs) + min(xs))/2
  y0 <- (min(rs) + min(ys))/2
  text(cs, y0, names(cs))
  text(x0, rs, names(rs), adj = 1)
  invisible()
}

"print.correspondence" <- function(x, ...)
{
  cat("First canonical correlation(s):", format(x$cor, ...), "\n")
  rcn <- names(dimnames(x$Freq))
  cat("\n", rcn[1], "scores:\n")
  print(x$rscore)
  cat("\n", rcn[2], "scores:\n")
  print(x$cscore)
  invisible(x)
}

biplot.correspondence <-
  function(obj, type=c("symmetric", "rows", "columns"), ...)
{
  if(length(obj$cor) < 2) stop("biplot is only possible if nf >= 2")
  type <- match.arg(type)
  X <- obj$rscore[, 1:2]
  if(type != "columns") X <- X %*% diag(obj$cor[1:2])
  colnames(X) <- rep("", 2)
  Y <- obj$cscore[, 1:2]
  if(type != "rows")  Y <- Y %*% diag(obj$cor[1:2])
  switch(type, "symmetric" = biplot.default(X, Y, var.axes=F, ...),
         "rows" = biplot.bdr(X, Y, ...),
         "columns" = biplot.bdr(Y, X, ...))
  invisible()
}

biplot.bdr <-
function(obs, bivars, col, cex = rep(par("cex"), 2),
	olab = NULL, vlab = NULL, xlim = NULL, ylim = NULL, ...)
{
 # for cases where we need equal scales for the two sets of vars.
  expand.range <- function(x)
  {
    if(x[1] > 0) x[1] <-  - x[1]
    else if(x[2] < 0) x[2] <-  - x[2]
    x
  }
  n <- dim(obs)[1]
  p <- dim(bivars)[1]
  vlab.real <- rownames(bivars)
  if(is.logical(vlab)) vlab <- vlab.real[vlab]
  else if(length(vlab) != p) vlab <- vlab.real
  else vlab <- as.character(vlab)
  if(!length(vlab)) {
    vlab.real <- vlab <- paste("Var", 1:p)
    dimnames(bivars) <- list(vlab, colnames(bivars))
  }
  if(length(olab)) olab <- rep(as.character(olab), length = n)
  else {
    olab <- rownames(obs)
    if(length(olab) != n) olab <- as.character(1:n)
  }
  if(length(cex) != 2) cex <- rep(cex, length = 2)
  if(missing(col)) {
    col <- par("col")
    if (!is.numeric(col)) col <- match(col, palette())
    col <- c(col, col + 1)
  }
  else if(length(col) != 2) col <- rep(col, length = 2)
  ro1 <- expand.range(range(obs[, 1]))
  ro2 <- expand.range(range(obs[, 2]))
  rv1 <- expand.range(range(bivars[, 1]))
  rv2 <- expand.range(range(bivars[, 2]))
  if(!(length(xlim) || length(ylim)))
    xlim <- ylim <- range(ro1, ro2, rv1, rv2)
  else if(length(ylim)) xlim <- range(ro1, rv1)
  else ylim <- range(ro2, rv2)
  on.exit(par(oldpar))
  oldpar <- par(pty = "s")
  plot(obs, type = "n", xlim = xlim, ylim = ylim, col = col[1], ...)
  text(obs, labels=olab, cex = cex[1], col = col[1], ...)
  par(new = TRUE)
  plot(bivars, axes = F, type = "n", xlim = xlim, ylim =
       ylim, xlab = "", ylab = "", col = col[1], ...)
  axis(3, col = col[2])
  axis(4, col = col[2])
  box(col = col[1])
  text(bivars, labels=vlab, cex = cex[2], col = col[2], ...)#
  invisible()
}

# file MASS/cov.trob.q
# copyright (C) 1994-9 W. N. Venables and B. D. Ripley
#
cov.trob <- function(x, wt = rep(1, n), cor = FALSE, center = TRUE, nu = 5,
                     maxit = 25, tol = 0.01)
{
    test.values <- function(x)
    {
        if(any(is.na(x)) || any(is.infinite(x)))
            stop(paste("error in cov.trob: missing or infinite values in",
                       deparse(substitute(x))))
    }
    scale.simp <- function(x, center, n, p) x - rep(center, rep(n, p))

    x <- as.matrix(x)
    n <- nrow(x)
    p <- ncol(x)
    dn <- colnames(x)
    test.values(x)
    if(!(miss.wt <- missing(wt))) {
        test.values(wt)
        if(length(wt) != n)
            stop("length of wt must equal number of observations")
        if(any(wt < 0)) stop("negative weights not allowed")
        if(!sum(wt)) stop("no positive weights")
    }
    loc <- apply(wt * x, 2, sum)/sum(wt)
    if(is.numeric(center)) {
        if(length(center) != p) stop("center is not the right length")
        loc <- center
    } else if(is.logical(center) && !center) loc <- rep(0, p)
    use.loc <- is.logical(center) && center
    w <- wt * (1 + p/nu)
    endit <- 0
    for(iter in 1:maxit) {
        w0 <- w
        X <- scale.simp(x, loc, n, p)
        sX <- svd(sqrt(w/n) * X, nu = 0)
        wX <- X %*% sX$v %*% diag(1/sX$d,  , p)
        Q <- drop(wX^2 %*% rep(1, p))
        w <- (wt * (nu + p))/(nu + Q)
        #    print(summary(w))
        if(use.loc) loc <- apply(w * x, 2, sum)/n
        if(all(abs(w - w0) < tol)) break
        endit <- iter
    }
    if(endit == maxit || abs(mean(w) - mean(wt)) > tol ||
       abs(mean(w * Q)/p - 1) > tol)
        warning("Probable convergence failure")
    cov <- crossprod(sqrt(w) * X)/sum(wt)
    if(length(dn)) {
        dimnames(cov) <- list(dn, dn)
        names(loc) <- dn
    }
    if(miss.wt)
        ans <- list(cov = cov, center = loc, n.obs = n)
    else ans <- list(cov = cov, center = loc, wt = wt, n.obs = n)
    if(cor) {
        sd <- sqrt(diag(cov))
        cor <- (cov/sd)/rep(sd, rep.int(p, p))
        if(length(dn)) dimnames(cor) <- list(dn, dn)
        ans <- c(ans, list(cor = cor))
    }
    ans$call <- match.call()
    ans$iter <- endit
    ans
}
# file MASS/cpgram.q
# copyright (C) 1994-9 W. N. Venables and B. D. Ripley
#
cpgram <- function(ts, taper=0.1,
   main=paste("Series: ", deparse(substitute(ts))) )
{
  eval(main)
  x <- as.vector(ts)
  x <- x[!is.na(x)]
  x <- spec.taper(scale(x, TRUE, FALSE), p=taper)
  y <- Mod(fft(x))^2/length(x)
  y[1] <- 0
  n <- length(x)
  x <- (0:(n/2))*frequency(ts)/n
  if(length(x)%%2==0) {
    n <- length(x)-1
    y <- y[1:n]
    x <- x[1:n]
  } else y <- y[seq(along=x)]
  xm <- frequency(ts)/2
  mp <- length(x)-1
  crit <- 1.358/(sqrt(mp)+0.12+0.11/sqrt(mp))
  oldpty <- par()$pty
  par(pty="s")
  plot(x, cumsum(y)/sum(y), type="s", xlim=c(0, xm),
       ylim=c(0, 1), xaxs="i", yaxs="i", xlab="frequency",
       ylab="", pty="s")
  lines(c(0, xm*(1-crit)), c(crit, 1))
  lines(c(xm*crit, xm), c(0, 1-crit))
  title(main = main)
  par(pty=oldpty)
  invisible()
}
# file MASS/digamma.q
# copyright (C) 1994-9 W. N. Venables and B. D. Ripley
#
digamma <- function(z)
{
    if(any(omit <- Re(z) <= 0)) {
        ps <- z
        ps[omit] <- NA
        if(any(!omit))
            ps[!omit] <- Recall(z[!omit])
        return(ps)
    }
    if(any(small <- Mod(z) < 5)) {
        ps <- z
        x <- z[small]
        ps[small] <- Recall(x + 5) - 1/x - 1/(x + 1) - 1/
            (x + 2) - 1/(x + 3) - 1/(x + 4)
        if(any(!small))
            ps[!small] <- Recall(z[!small])
        return(ps)
    }
    x <- 1/z^2
    tail <- ((x * (-1/12 + ((x * (1/120 + ((x * (-1/252 + ((x * (1/240 + ((x * (-1/132 + ((x * (691/32760 + ((x * (-1/12 + (3617 * x)/8160)))))))))))))))))))
              ))
    log(z) - 1/(2 * z) + tail
}

trigamma <- function(z)
{
    if(any(omit <- Re(z) <= 0)) {
        ps <- z
        ps[omit] <- NA
        if(any(!omit))
            ps[!omit] <- Recall(z[!omit])
        return(ps)
    }
    if(any(small <- Mod(z) < 5)) {
        ps <- z
        x <- z[small]
        ps[small] <- Recall(x + 5) + 1/x^2 + 1/(x + 1)^2 +
            1/(x + 2)^2 + 1/(x + 3)^2 + 1/(x + 4)^2
        if(any(!small))
            ps[!small] <- Recall(z[!small])
        return(ps)
    }
    x <- 1/z^2
    tail <- 1 + (x * (1/6 + (x * (-1/30 + (x * (1/42 + (x * (-1/30 + (x * (5/66 + (x * (-691/2370 + (x * (7/6 - (3617 * x)/510))))))))))))))
    1/(2 * z^2) + tail/z
}
# file MASS/dose.p.q
# copyright (C) 1994-9 W. N. Venables and B. D. Ripley
#
dose.p <- function(obj, cf = 1:2, p = 0.5) {
  eta <- family(obj)$linkfun(p)
  b <- coef(obj)[cf]
  x.p <- (eta - b[1])/b[2]
  names(x.p) <- paste("p = ", format(p), ":", sep = "")
  pd <-  -cbind(1, x.p)/b[2]
  SE <- sqrt(((pd %*% vcov(obj)[cf, cf]) * pd) %*% c(1, 1))
  res <- structure(x.p, SE = SE, p = p)
  class(res) <- "glm.dose"
  res
}

print.glm.dose <- function(x, ...)
{
  M <- cbind(x, attr(x, "SE"))
  dimnames(M) <- list(names(x), c("Dose", "SE"))
  x <- M
  NextMethod("print")
}
# file MASS/enlist.q
# copyright (C) 1994-9 W. N. Venables and B. D. Ripley
#
"enlist"<-
function(vec)
{
    x <- as.list(vec)
    names(x) <- names(vec)
    x
}
# file MASS/eqscplot.q
# copyright (C) 1994-9 W. N. Venables and B. D. Ripley
#
eqscplot <- function(x, y, ratio = 1, tol = 0.04, xlim = range(x, na.rm=TRUE),
                     ylim = range(y, na.rm=TRUE),
		     xlab, ylab,
		     uin, ...)
{
  if(is.matrix(x)) {
    y <- x[, 2]
    x <- x[, 1]
    if(!is.null(dn <- colnames(x))) {
      xlab0 <- dn[1]
      ylab0 <- dn[2]
    } else {
      xlab0 <- ""
      ylab0 <- ""
    }
  } else if(is.list(x)) {
    y <- x$y
    x <- x$x
    xlab0 <- "x"; ylab0 <- "y"
  } else {
    xlab0 <- deparse(substitute(x))
    ylab0 <- deparse(substitute(y))
  }
  if(missing(xlab)) xlab <- xlab0
  if(missing(ylab)) ylab <- ylab0
  midx <- 0.5 * (xlim[2] + xlim[1])
  xlim <- midx + (1 + tol) * 0.5 * c(-1, 1) * (xlim[2] - xlim[1])
  midy <- 0.5 * (ylim[2] + ylim[1])
  ylim <- midy + (1 + tol) * 0.5 * c(-1, 1) * (ylim[2] - ylim[1])
  oldpin <- par("pin")
  xuin <- oxuin <- oldpin[1]/diff(xlim)
  yuin <- oyuin <- oldpin[2]/diff(ylim)
  if(missing(uin)) {
    if(yuin > xuin*ratio) yuin <- xuin*ratio
    else xuin <- yuin/ratio
  } else {
    if(length(uin) == 1) uin <- uin * c(1, ratio)
    if(any(c(xuin, yuin) < uin)) stop("uin is too large to fit plot in")
    xuin <- uin[1]; yuin <- uin[2]
  }
  xlim <- midx + oxuin/xuin * c(-1, 1) * diff(xlim) * 0.5
  ylim <- midy + oyuin/yuin * c(-1, 1) * diff(ylim) * 0.5
  plot(x, y, xlim = xlim, ylim = ylim, xaxs = "i", yaxs = "i",
       xlab = xlab, ylab = ylab, ...)
}
# file MASS/fractions.q
# copyright (C) 1994-9 W. N. Venables and B. D. Ripley
#
.rat <- function(x, cycles = 10, max.denominator = 2000)
{
  a0 <- rep(0, length(x))
  A <- matrix(b0 <- rep(1, length(x)))
  B <- matrix(floor(x))
  r <- as.vector(x) - drop(B)
  len <- 0
  while(any(which <- (r > 1/max.denominator)) &&
	(len <- len + 1) <= cycles) {
    a <- a0
    b <- b0
    a[which] <- 1
    r[which] <- 1/r[which]
    b[which] <- floor(r[which])
    r[which] <- r[which] - b[which]
    A <- cbind(A, a)
    B <- cbind(B, b)
  }
  pq1 <- cbind(b0, a0)
  pq <- cbind(B[, 1], b0)
  len <- 1
  while((len <- len + 1) <= ncol(B)) {
    pq0 <- pq1
    pq1 <- pq
    pq <- B[, len] * pq1 + A[, len] * pq0
  }
  list(rat = pq, x = x)
}

rational <- function(x, ...) {
  ans <- .rat(x, ...)$rat
  do.call("structure", c(list(ans[,1]/ans[,2]), attributes(x)))
}

fractions <- function(x, ...) {
  ans <- .rat(x, ...)
  ndc <- paste(ans$rat[, 1], ans$rat[, 2], sep = "/")
  int <- ans$rat[, 2] == 1
  ndc[int] <- as.character(ans$rat[int, 1])
  ans <- structure(ans$x, fracs = ndc)
  class(ans) <- c("fractions", class(ans$x))
  ans
}

"t.fractions"<- function(x)
{
  xt <- NextMethod()
  class(xt) <- class(x)
  attr(xt, "fracs") <- t(array(attr(x, "fracs"), dim(x)))
  xt
}

"Math.fractions"<- function(x, ...)
{
  x <- unclass(x)
  fractions(NextMethod())
}

"Ops.fractions"<- function(e1, e2)
{
  e1 <- unclass(e1)
  if(!missing(e2))
    e2 <- unclass(e2)
  fractions(NextMethod(.Generic))
}

"Summary.fractions"<- function(x, ...)
{
  x <- unclass(x)
  fractions(NextMethod())
}

"[.fractions"<- function(x, ...)
{
  x <- unclass(x)
  fractions(NextMethod())
}

"[<-.fractions"<- function(x, ..., value)
{
  x <- unclass(x)
  fractions(NextMethod())
}

"as.character.fractions"<- function(x)
structure(attr(x, "fracs"), dim = dim(x), dimnames = dimnames(x))

"as.fractions"<- function(x)
if(is.fractions(x)) x else fractions(x)

"is.fractions"<- function(f)
inherits(f, "fractions")

"print.fractions"<- function(x, ...)
{
  y <- attr(x, "fracs")
  mc <- max(ncy <- nchar(y))
  if(any(small <- ncy < mc)) {
    blanks <- "    "
    while(nchar(blanks) < mc) blanks <- paste(blanks, blanks)
    blanks <- rep(blanks, sum(small))
    blanks <- substring(blanks, 1, mc - ncy)
    y[small] <- paste(blanks[small], y[small], sep = "")
  }
  att <- attributes(x)
  att$fracs <- att$class <- NULL
  x <- do.call("structure", c(list(y), att))
  NextMethod("print", quote = F)
}

# file MASS/gamma.shape.q
# copyright (C) 1994-9 W. N. Venables and B. D. Ripley
#
gamma.shape <- function(object, ...) UseMethod("gamma.shape")

gamma.shape.glm <- function(object, it.lim = 10,
                            eps.max = .Machine$double.eps^0.25,
			    verbose = FALSE)
{
    if(is.null(object$y)) object <- update(object, y = TRUE)
    y <- object$y
    A <- object$prior.weights
    if(is.null(A)) A <- rep(1, length(y))
    u <- object$fitted
    Dbar <- object$deviance/object$df.residual
    alpha <- (6 + 2*Dbar)/(Dbar*(6 + Dbar))
    if(verbose) cat("Initial estimate:", format(alpha), "\n")
    fixed <-  -y/u - log(u) + log(A) + 1 + log(y + (y == 0))
    eps <- 1
    itr <- 0
    while(abs(eps) > eps.max && (itr <- itr + 1) <= it.lim) {
        sc <- sum(A * (fixed + log(alpha) - digamma(A * alpha)))
        inf <- sum(A * (A * trigamma(A * alpha) - 1/alpha))
        alpha <- alpha + (eps <- sc/inf)
        if(verbose) cat("Iter. ", itr, " Alpha:", alpha, "\n")
    }
    if(itr > it.lim) warning("Iteration limit reached")
    res <- list(alpha = alpha, SE = sqrt(1/inf))
    class(res) <- "gamma.shape"
    res
}

gamma.dispersion <- function(object, ...)
    1/gamma.shape(object, ...)[[1]]

print.gamma.shape <- function(x, ...)
{
    y <- x
    x <- array(unlist(x), dim = 2:1,
               dimnames = list(c("Alpha:", "SE:"), ""))
    NextMethod("print")
    invisible(y)
}
# file MASS/hist.scott.q
# copyright (C) 1994-9 W. N. Venables and B. D. Ripley
#
nclass.scott <- function(x)
{
    h <- 3.5 * sqrt(var(x)) * length(x)^(-1/3)
    ceiling(diff(range(x))/h)
}
nclass.FD <- function(x)
{
    r <- as.vector(quantile(x, c(0.25, 0.75)))
    h <- 2 * (r[2] - r[1]) * length(x)^(-1/3)
    ceiling(diff(range(x))/h)
}

hist.scott <- function(x, prob = TRUE, xlab = deparse(substitute(x)), ...)
   invisible(hist(x, nclass.scott(x), prob=prob, xlab=xlab, ...))
hist.FD <- function(x, prob = TRUE, xlab = deparse(substitute(x)), ...)
   invisible(hist(x, nclass.FD(x), prob=prob, xlab=xlab, ...))

frequency.polygon <- function(x, nclass = nclass.freq(x),
    xlab="", ylab="", ...)
{
    hst <- hist(x, nclass, probability=TRUE, plot=FALSE, ...)
    midpoints <- 0.5 * (hst$breaks[-length(hst$breaks)]
                        + hst$breaks[-1])
    plot(midpoints, hst$counts, type="l", xlab=xlab, ylab=ylab)
}

nclass.freq <- function(x)
{
    h <- 2.15 * sqrt(var(x)) * length(x)^(-1/5)
    ceiling(diff(range(x))/h)
}

bandwidth.nrd <- function(x)
{
    r <- quantile(x, c(0.25, 0.75))
    h <- (r[2] - r[1])/1.34
    4 * 1.06 * min(sqrt(var(x)), h) * length(x) ^ (-1/5)
}
# file MASS/histplot.q
# copyright (C) 1994-9 W. N. Venables and B. D. Ripley
#
"histplot" <-
function(formula, data = sys.parent(1),
	 prepanel = prepanel.histplot, panel = panel.histplot,
	 nbins = 5, h , x0 = -h/1000, breaks, prob = TRUE,
	 aspect = "fill",
	 xlab = deparse(do.formula.trellis(formula)$expr[[1]]),
	 ylab = if(prob) "Density" else "Counts",
	 groups = NULL, ..., subset = TRUE)
{
  .NotYetImplemented()
  if(mode(formula) != "call")
    formula <- formula.default(paste("~", deparse(substitute(formula))))
  Z <- do.formula.trellis(formula)
  if(!Z$no.response || is.null(Z$expr))
    stop("formula should be in the form of x or ~ x | g1*g2*...")
  subset <- eval(substitute(subset), data)
  x <- eval(Z$expr[[1]], data)[subset]
  x <- x[!is.na(x)]
  if(missing(breaks)) {
    if(missing(h)) h <- diff(pretty(x, nbins))[1]
    first <- floor((min(x) - x0)/h)
    last <- ceiling((max(x) - x0)/h)
    breaks <- x0 + h * c(first:last)
  }
  if(any(diff(breaks) <= 0)) stop("breaks must be strictly increasing")
  if(min(x) < min(breaks) || max(x) > max(breaks))
     stop("breaks do not cover the data")
  db <- diff(breaks)
  if(!prob && sqrt(var(db)) > mean(db)/1000)
    warning("Uneven breaks with prob = F will give a misleading plot")
  setup.2d.trellis(formula, data = data, prepanel = prepanel,
		   prepanel.arg = list(breaks = breaks, prob = prob),
		   breaks = breaks, prob = prob, panel = panel,
		   aspect = aspect, xlab = xlab, ylab = ylab,
		   groups = eval(substitute(groups), data), ...,
		   subset = subset)
}
"prepanel.histplot" <-
function(x, y, breaks, prob=TRUE, ...)
{
  data <- x[!is.na(x)]
  x <- breaks
  xl <- max(seq(along=x)[x < min(data)])
  xm <- min(seq(along=x)[x > max(data)])
  x <- x[xl:xm]
  bin <- cut(data, x, include.lowest = TRUE)
  est <- tabulate(bin, length(levels(bin)))
  if (prob) est <- est/(diff(x) * length(data))
  list(xlim = range(x), ylim = c(0, max(est)), dx = diff(x)[-1], dy = diff(est))
}
"panel.histplot" <-
function(x, y, breaks, prob = T, col = trellis.par.get("bar.fill")$col, ...)
{
  data <- x[!is.na(x)]
  x <- breaks
  xl <- max(seq(along=x)[x < min(data)])
  xm <- min(seq(along=x)[x > max(data)])
  x <- x[xl:xm]
  bin <- cut(data, x, include.lowest = TRUE)
  est <- tabulate(bin, length(levels(bin)))
  if (prob) est <- est/(diff(x) * length(data))
  nx <- length(x)
  X <- as.vector(rbind(x[-1], x[ - nx], x[ - nx], x[-1], NA))
  Y <- as.vector(rbind(0, 0, est, est, NA))
  polygon(X, Y, col = col, border = 1, ...)
}



# file MASS/huber.q
# copyright (C) 1994-9 W. N. Venables and B. D. Ripley
#
huber <- function(y, k=1.5, tol = 1.0e-6)
{
    y <- y[!is.na(y)]
    n <- length(y)
    mu <- median(y)
    s <- mad(y)
    repeat{
        yy <- pmin(pmax(mu-k*s,y),mu+k*s)
        mu1 <- sum(yy)/n
        if(abs(mu-mu1) < tol*s) break
        mu <- mu1
    }
    list(mu=mu,s=s)
}
# file MASS/hubers.q
# copyright (C) 1994-9 W. N. Venables and B. D. Ripley
#
hubers <- function(y, k = 1.5, mu, s, initmu = median(y), tol = 1.0e-6)
{
    y <- y[!is.na(y)]
    n <- length(y)
    if(missing(mu)) {
        mu0 <- initmu
        n1 <- n - 1
    } else {
        mu0 <- mu
        mu1 <- mu
        n1 <- n
    }
    if(missing(s)) s0 <- mad(y) else {s0 <- s;s1 <- s}
    th <- 2*pnorm(k)-1
    beta <- th + k^2*(1-th) - 2*k*dnorm(k)
    repeat{
        yy <- pmin(pmax(mu0-k*s0,y), mu0+k*s0)
        if(missing(mu)) mu1 <- sum(yy)/n
        if(missing(s)) {
            ss <- sum((yy-mu1)^2)/n1
            s1 <- sqrt(ss/beta)
        }
        if((abs(mu0-mu1) < tol*s0) && abs(s0-s1) < tol*s0) break
        mu0 <- mu1; s0 <- s1
    }
    list(mu=mu0, s=s0)
}
# file MASS/isoMDS.q
# copyright (C) 1994-9 W. N. Venables and B. D. Ripley
#
isoMDS <- function(d, y=cmdscale(d, 2), maxit=50, trace=TRUE)
{
  if(any(!is.finite(as.vector(d)))) stop("NAs/Infs not allowed in d")
  if(is.null(n <- attr(d, "Size"))) {
    x <- as.matrix(d)
    if((n <- nrow(x)) != ncol(x))
      stop("Distances must be result of dist or a square matrix")
  } else {
    x <- matrix(0, n, n)
    x[row(x) > col(x)] <- d
    x <- x + t(x)
  }
  if (any(ab <- x[row(x) < col(x)] <= 0)) {
    aa <- cbind(as.vector(row(x)), as.vector(col(x)))[row(x) < col(x),]
    aa <- aa[ab, , drop=FALSE]
    stop(paste("zero or negative distance between objects",aa[1,1],
	       "and", aa[1,2]))
  }
  dis <- x[row(x) > col(x)]
  ord <- order(dis)
  nd <- length(ord)
  n <- nrow(y)
  k <- ncol(y)
  on.exit(.C("VR_mds_unload"))
  .C("VR_mds_init_data",
     as.integer(nd),
     as.integer(k),
     as.integer(n),
     as.integer(ord - 1),
     as.integer(order(ord) - 1),
     as.double(y)
     )
  tmp <- .C("VR_mds_dovm",
	    val = double(1),
	    as.integer(maxit),
	    as.integer(trace),
	    y = as.double(y)
	    )
  list(points = matrix(tmp$y,,k), stress = tmp$val)
}

Shepard <- function(d, x)
{
#
# Given a dissimilarity d and configuration x, compute Shepard plot
#
  n <- nrow(x)
  k <- ncol(x)
  y <- dist(x)
  ord <- order(d)
  y <- y[ord]
  nd <- length(ord)
  Z <- .C("VR_mds_fn",
	  as.double(y),
	  yf=as.double(y),
	  as.integer(nd),
	  ssq = double(1),
	  as.integer(order(ord)-1),
	  as.double(x),
	  as.integer(n),
	  as.integer(k),
	  g=double(n*k),
	  as.integer(1)
	  )
  list(x = d[ord], y = y, yf = Z$yf)
}

# file MASS/kde2d.q
# copyright (C) 1994-9 W. N. Venables and B. D. Ripley
#
kde2d <- function(x, y, h, n = 25, lims=c(range(x), range(y)) )
{
    nx <- length(x)
    if(length(y) != nx)
        stop("Data vectors must be the same length")
    gx <- seq(lims[1], lims[2], length = n)
    gy <- seq(lims[3], lims[4], length = n)
    if (missing(h))
        h <- c(bandwidth.nrd(x), bandwidth.nrd(y))
    h <- h/4                            # for S's bandwidth scale
    ax <- outer(gx, x, "-" )/h[1]
    ay <- outer(gy, y, "-" )/h[2]
    z <- matrix(dnorm(ax), n, nx) %*%
        t(matrix(dnorm(ay),n, nx))/ (nx * h[1] * h[2])
    return(list(x = gx, y = gy, z = z))
}
# file MASS/lda.q
# copyright (C) 1994-9 W. N. Venables and B. D. Ripley
#
lda <- function(x, ...)
{
   if(is.null(class(x))) class(x) <- data.class(x)
   UseMethod("lda", x, ...)
}

lda.formula <- function(formula, data = NULL, ...,
			subset, na.action = na.fail)
{
    m <- match.call(expand.dots = FALSE)
    if(is.matrix(eval(m$data, sys.frame(sys.parent()))))
        m$data <- as.data.frame(data)
    m$... <- NULL
    m[[1]] <- as.name("model.frame")
    m <- eval(m, sys.frame(sys.parent()))
    Terms <- attr(m, "terms")
    grouping <- model.extract(m, "response")
    x <- model.matrix(Terms, m)
    xvars <- as.character(attr(Terms, "variables"))[-1]
   if ((yvar <- attr(Terms, "response")) > 0) xvars <- xvars[-yvar]
    xlev <- if (length(xvars) > 0) {
        xlev <- lapply(m[xvars], levels)
        xlev[!sapply(xlev, is.null)]
    }
    xint <- match("(Intercept)", colnames(x), nomatch=0)
    if(xint > 0) x <- x[, -xint, drop=FALSE]
    res <- lda.default(x, grouping, ...)
    res$terms <- Terms
    res$call <- match.call()
    res$contrasts <- attr(x, "contrasts")
    res$xlevels <- xlev
    attr(res, "na.message") <- attr(m, "na.message")
    if(!is.null(attr(m, "na.action"))) res$na.action <- attr(m, "na.action")
    res
}

lda.data.frame <- function(x, ...)
{
    res <- lda.matrix(structure(data.matrix(x), class="matrix"), ...)
    res$call <- match.call()
    res
}

lda.Matrix <- function(x, ...)
{
    res <- lda.matrix(as.matrix(x), ...)
    res$call <- match.call()
    res
}

lda.matrix <- function(x, grouping, ...,
		       subset, na.action = na.fail)
{
    if(!missing(subset)) {
        x <- x[subset, , drop = FALSE]
        grouping <- grouping[subset]
    }
    if(!missing(na.action)) {
        dfr <- na.action(structure(list(g = grouping, x = x),
                                   class = "data.frame"))
        grouping <- dfr$g
        x <- dfr$x
    }
    res <- NextMethod("lda")
    res$call <- match.call()
    res
}

lda.default <-
  function(x, grouping, prior = proportions, tol = 1.0e-4,
           method = c("moment", "mle", "mve", "t"), CV=F,
           nu = 5, ...)
{
    if(is.null(dim(x))) stop("x is not a matrix")
    x <- as.matrix(x)
    n <- nrow(x)
    p <- ncol(x)
    if(n != length(grouping))
        stop("nrow(x) and length(grouping) are different")
    g <- as.factor(grouping)
    lev <- lev1 <- levels(g)
    counts <- as.vector(table(g))
    if(any(counts == 0)) {
        warning(paste("group(s)", paste(lev[counts == 0], collapse=" "),
                      "are empty"))
        lev1 <- lev[counts > 0]
        g <- factor(g, levels=lev1)
        counts <- as.vector(table(g))
    }
    proportions <- counts/n
    ng <- length(proportions)
    if(any(prior < 0) || round(sum(prior), 5) != 1) stop("invalid prior")
    if(length(prior) != ng) stop("prior is of incorrect length")
    names(prior) <- names(counts) <- lev1
    method <- match.arg(method)
    if(CV && !(method == "moment" || method == "mle"))
        stop(paste("Cannot use leave-one-out CV with method", method))
    group.means <- tapply(x, list(rep(g, p), col(x)), mean)
    f1 <- sqrt(diag(var(x - group.means[g,  ])))
    if(any(f1 < tol))
        stop(paste("variable(s)",
                   paste(format((1:p)[f1 < tol]), collapse = " "),
                   "appear to be constant within groups"))
    # scale columns to unit variance before checking for collinearity
    scaling <- diag(1/f1,,p)
    if(method == "mve") {
        # adjust to "unbiased" scaling of covariance matrix
        cov <- n/(n-ng) * cov.rob((x - group.means[g,  ]) %*% scaling)$cov
        sX <- svd(cov, nu = 0)
        rank <- sum(sX$d > tol^2)
        if(rank < p) warning("variables are collinear")
        scaling <- scaling %*% sX$v[, 1:rank] %*%
            diag(sqrt(1/sX$d[1:rank]),,rank)
    } else if(method == "t") {
        if(nu <= 2) stop("nu must exceed 2")
        w <- rep(1, n)
        repeat {
            w0 <- w
            X <- x - group.means[g, ]
            sX <- svd(sqrt((1 + p/nu)*w/n) * X, nu=0)
            X <- X %*% sX$v %*% diag(1/sX$d,, p)
            w <- 1/(1 + drop(X^2 %*% rep(1, p))/nu)
            print(summary(w))
            group.means <- tapply(w*x, list(rep(g, p), col(x)), sum)/
                rep(tapply(w, g, sum), p)
            if(all(abs(w - w0) < 1e-2)) break
        }
        X <-  sqrt(nu/(nu-2)*(1 + p/nu)/n * w) * (x - group.means[g,  ]) %*% scaling
        X.s <- svd(X, nu = 0)
        rank <- sum(X.s$d > tol)
        if(rank < p) warning("variables are collinear")
        scaling <- scaling %*% X.s$v[, 1:rank] %*% diag(1/X.s$d[1:rank],,rank)
    } else {
        if(method == "moment") fac <- 1/(n-ng) else fac <- 1/n
        X <- sqrt(fac) * (x - group.means[g,  ]) %*% scaling
        X.s <- svd(X, nu = 0)
        rank <- sum(X.s$d > tol)
        if(rank < p) warning("variables are collinear")
        scaling <- scaling %*% X.s$v[, 1:rank] %*% diag(1/X.s$d[1:rank],,rank)
    }
    # now have variables scaled so that W is the identity
    if(CV) {
        x <- x %*% scaling
        dm <- group.means %*% scaling
        K <- if(method == "moment") ng else 0
        dist <- matrix(0, n, ng)
        for(i in 1:ng) {
            dev <- x - matrix(dm[i,  ], n, p, byrow = TRUE)
            dist[, i] <- apply(dev^2, 1, sum)
        }
        ind <- cbind(1:n, g)
        nc <- counts[g]
        cc <- nc/((nc-1)*(n-K))
        dist2 <- dist
        for(i in 1:ng) {
            dev <- x - matrix(dm[i,  ], n, p, byrow = TRUE)
            dev2 <- x - dm[g, ]
            tmp <- apply(dev*dev2, 1, sum)
            dist[, i] <- (n-1-K)/(n-K) * (dist2[, i] +  cc*tmp^2/(1 - cc*dist2[ind]))
        }
        dist[ind] <- dist2[ind] * (n-1-K)/(n-K) * (nc/(nc-1))^2 /
            (1 - cc*dist2[ind])
        dist <- 0.5 * dist - matrix(log(prior), n, ng, byrow=TRUE)
        dist <- exp(-(dist - min(dist, na.rm=TRUE)))
        cl <- factor(max.col(dist), levels=seq(along=lev), labels=lev)
        #  convert to posterior probabilities
        posterior <- dist/drop(dist %*% rep(1, length(prior)))
        dimnames(posterior) <- list(rownames(x), lev1)
        return(list(class = cl, posterior = posterior))
    }
    xbar <- apply(prior %*% group.means, 2, sum)
    if(method == "mle") fac <-  1/ng else fac <- 1/(ng - 1)
    X <- sqrt((n * prior)*fac) * scale(group.means, xbar, FALSE) %*% scaling
    X.s <- svd(X, nu = 0)
    rank <- sum(X.s$d > tol * X.s$d[1])
    scaling <- scaling %*% X.s$v[, 1:rank]
    if(is.null(dimnames(x)))
        dimnames(scaling) <- list(NULL, paste("LD", 1:rank, sep = ""))
    else {
        dimnames(scaling) <- list(colnames(x), paste("LD", 1:rank, sep = ""))
        dimnames(group.means)[[2]] <- colnames(x)
    }
    structure(list(prior = prior, counts = counts, means = group.means,
                   scaling = scaling, lev = lev, svd = X.s$d[1:rank],
                   N = n, call = match.call()),
              class = "lda")
}

predict.lda <- function(object, newdata, prior = object$prior, dimen,
			method = c("plug-in", "predictive", "debiased"), ...)
{
    if(!inherits(object, "lda")) stop("object not of class lda")
    if(!is.null(Terms <- object$terms)) { #
    # formula fit
        if(missing(newdata)) newdata <- model.frame(object)
        else {
            newdata <- model.frame(as.formula(delete.response(Terms)),
                                   newdata, na.action=function(x) x,
                                   xlev = object$xlevels)
        }
        x <- model.matrix(delete.response(Terms), newdata,
                          contrasts = object$contrasts)
        xint <- match("(Intercept)", colnames(x), nomatch=0)
        if(xint > 0) x <- x[, -xint, drop=FALSE]
    } else { #
    # matrix or data-frame fit
        if(missing(newdata)) {
            if(!is.null(sub <- object$call$subset))
                newdata <- eval(parse(text=paste(deparse(object$call$x),"[",
                                      deparse(sub),",]")), sys.frame(sys.parent()))
            else newdata <- eval(object$call$x, sys.frame(sys.parent()))
            if(!is.null(nas <- object$call$na.action))
                newdata <- eval(call(nas, newdata))
        }
        if(is.null(dim(newdata)))
            dim(newdata) <- c(1, length(newdata))  # a row vector
        x <- as.matrix(newdata)		# to cope with dataframes
    }

    if(ncol(x) != ncol(object$means)) stop("wrong number of variables")
    if(length(colnames(x)) > 0 &&
      any(colnames(x) != dimnames(object$means)[[2]]))
         warning("Variable names in newdata do not match those in object")
    ng <- length(prior)
#   remove overall means to keep distances small
    means <- apply(prior*object$means, 2, sum)
    scaling <- object$scaling
    x <- scale(x, means, FALSE) %*% scaling
    dm <- scale(object$means, means, FALSE) %*% scaling
    method <- match.arg(method)
    if(missing(dimen)) dimen <- length(object$svd)
    else dimen <- min(dimen, length(object$svd))
    N <- object$N
    if(method == "plug-in") {
        dm <- dm[, 1:dimen, drop=FALSE]
        dist <- matrix(0.5 * apply(dm^2, 1, sum) - log(prior), nrow(x),
                       length(prior), byrow = TRUE) - x[, 1:dimen, drop=FALSE] %*% t(dm)
        dist <- exp( -(dist - min(dist, na.rm=TRUE)))
    } else if (method == "debiased") {
        dm <- dm[, 1:dimen, drop=FALSE]
        dist <- matrix(0.5 * apply(dm^2, 1, sum), nrow(x), ng, byrow = TRUE) -
            x[, 1:dimen, drop=FALSE] %*% t(dm)
        dist <- (N - ng - dimen - 1)/(N - ng) * dist -
            matrix(log(prior) - dimen/object$counts , nrow(x), ng, byrow=TRUE)
        dist <- exp( -(dist - min(dist, na.rm=TRUE)))
    } else {                            # predictive
        dist <- matrix(0, nrow = nrow(x), ncol = ng)
        p <- ncol(object$means)
        # adjust to ML estimates of covariances
        X <- x * sqrt(N/(N-ng))
        for(i in 1:ng) {
            nk <- object$counts[i]
            dev <- scale(X, dm[i, ], FALSE)
            dev <- 1 + apply(dev^2, 1, sum) * nk/(N*(nk+1))
            dist[, i] <- prior[i] * (nk/(nk+1))^(p/2) * dev^(-(N - ng + 1)/2)
        }
    }
    posterior <- dist / drop(dist %*% rep(1, ng))
    cl <- factor(max.col(posterior), levels=seq(along=object$lev),
                 labels=object$lev)
    dimnames(posterior) <- list(rownames(x), names(prior))
    list(class = cl, posterior = posterior, x = x[, 1:dimen, drop=FALSE])
}

print.lda <- function(x, ...)
{
    if(!is.null(cl <- x$call)) {
        names(cl)[2] <- ""
        cat("Call:\n")
        dput(cl)
    }
    cat("\nPrior probabilities of groups:\n")
    print(x$prior, ...)
    cat("\nGroup means:\n")
    print(x$means, ...)
    cat("\nCoefficients of linear discriminants:\n")
    print(x$scaling, ...)
    svd <- x$svd
    names(svd) <- dimnames(x$scaling)[[2]]
    if(length(svd) > 1) {
        cat("\nProportion of trace:\n")
        print(round(svd^2/sum(svd^2), 4), ...)
    }
    invisible(x)
}

plot.lda <- function(x, panel = panel.lda, ..., cex=0.7,
                     dimen, abbrev = F, xlab = "LD1", ylab = "LD2")
{
    panel.lda <- function(x, y, ...) {
        text(x, y, as.character(g.lda), cex=tcex)
    }
    if(!is.null(Terms <- x$terms)) { #
    # formula fit
        data <- model.frame(x)
        X <- model.matrix(delete.response(Terms), data)
        g <- model.extract(data, "response")
        xint <- match("(Intercept)", colnames(X), nomatch=0)
        if(xint > 0) X <- X[, -xint, drop=FALSE]
    } else { #
    # matrix or data-frame fit
        xname <- x$call$x
        gname <- x$call[[3]]
        if(!is.null(sub <- x$call$subset)) {
            X <- eval(parse(text=paste(deparse(xname),"[", deparse(sub),",]")),
                      sys.frame(sys.parent()))
            g <- eval(parse(text=paste(deparse(gname),"[", deparse(sub),"]")),
                      sys.frame(sys.parent()))
        } else {
            X <- eval(xname, sys.frame(sys.parent()))
            g <- eval(gname, sys.frame(sys.parent()))
        }
        if(!is.null(nas <- x$call$na.action)) {
            df <- data.frame(g = g, X = X)
            df <- eval(call(nas, df))
            g <- df$g
            X <- df$X
        }
    }
    if(abbrev) levels(g) <- abbreviate(levels(g), abbrev)
    assign("g.lda", g)
    assign("tcex", cex)
    means <- apply(x$means, 2, mean)
    X <- scale(X, means, FALSE) %*% x$scaling
    if(!missing(dimen) && dimen < ncol(X)) X <- X[, 1:dimen, drop=FALSE]
    if(ncol(X) > 2) {
        pairs(X, panel=panel, ...)
    } else if(ncol(X) == 2)  {
        eqscplot(X[, 1:2], xlab=xlab, ylab=ylab, type="n", ...)
        panel(X[, 1], X[, 2], ...)
    } else ldahist(X[,1], g, xlab=xlab, ...)
    invisible(NULL)
}

ldahist <-
function(data, g, nbins = 25, h, x0 = -h/1000, breaks,
	 xlim = range(breaks), ymax = 0, width,
         type = c("histogram", "density", "both"), sep = (type != "density"),
         col = 5,
	 xlab = deparse(substitute(data)), bty = "n", ...)
{
    xlab
    type <- match.arg(type)
    data <- data[!is.na(data)]
    g <- g[!is.na(data)]
    counts <- table(g)
    groups <- names(counts)[counts > 0]
    if(missing(breaks)) {
        if(missing(h)) h <- diff(pretty(data, nbins))[1]
        first <- floor((min(data) - x0)/h)
        last <- ceiling((max(data) - x0)/h)
        breaks <- x0 + h * c(first:last)
    }
    if(type=="histogram" || type=="both") {
        if(any(diff(breaks) <= 0)) stop("breaks must be strictly increasing")
        if(min(data) < min(breaks) || max(data) > max(breaks))
            stop("breaks do not cover the data")
        est <- vector("list", length(groups))
        for (grp in groups){
            bin <- cut(data[g==grp], breaks, include.lowest = TRUE)
            est1 <- tabulate(bin, length(levels(bin)))
            est1 <- est1/(diff(breaks) * length(data[g==grp]))
            ymax <- max(ymax, est1)
            est[[grp]] <- est1
        }
    }
    if(type=="density" || type == "both"){
        xd <- vector("list", length(groups))
        for (grp in groups){
            if(missing(width)) width <- width.SJ(data[g==grp])
            xd1 <- density(data[g==grp], n=200, width=width,
                           from=xlim[1], to=xlim[2])
            ymax <- max(ymax, xd1$y)
            xd[[grp]] <- xd1
        }
    }
    if(!sep) plot(xlim, c(0, ymax), type = "n", xlab = xlab, ylab = "",
                  bty = bty)
    else {
        oldpar <- par(mfrow=c(length(groups), 1))
        on.exit(par(oldpar))
    }
    for (grp in groups) {
        if(sep) plot(xlim, c(0, ymax), type = "n",
                     xlab = paste("group", grp), ylab = "", bty = bty)
        if(type=="histogram" || type=="both") {
            n <- length(breaks)
            rect(breaks[-n], 0, breaks[-1], est[[grp]], col = col, ...)
        }
        if(type=="density" || type == "both") lines(xd[[grp]])
    }
    invisible()
}

pairs.lda <- function(x, labels = colnames(x), panel = panel.lda,
                      dimen, abbrev = FALSE, ..., cex = 0.7,
                      type = c("std", "trellis"))
{
    panel.lda <- function(x,y, ...) {
        text(x, y, as.character(g.lda), cex=tcex, ...)
    }
    type <- match.arg(type)
    if(type == "trellis") {
        warn("type = trellis is not supported in R")
        type <- "std"
    }
    if(!is.null(Terms <- x$terms)) { #
    # formula fit
        data <- model.frame(x)
        X <- model.matrix(delete.response(Terms), data)
        g <- model.extract(data, "response")
        xint <- match("(Intercept)", colnames(X), nomatch=0)
        if(xint > 0) X <- X[, -xint, drop=FALSE]
    } else { #
    # matrix or data-frame fit
        xname <- x$call$x
        gname <- x$call[[3]]
        if(!is.null(sub <- x$call$subset)) {
            X <- eval(parse(text=paste(deparse(xname),"[", deparse(sub),",]")),
                      sys.frame(sys.parent()))
            g <- eval(parse(text=paste(deparse(gname),"[", deparse(sub),"]")),
                      sys.frame(sys.parent()))
        } else {
            X <- eval(xname, sys.frame(sys.parent()))
            g <- eval(gname, sys.frame(sys.parent()))
        }
        if(!is.null(nas <- x$call$na.action)) {
            df <- data.frame(g = g, X = X)
            df <- eval(call(nas, df))
            g <- df$g
            X <- df$X
        }
    }
    g <- as.factor(g)
    if(abbrev) levels(g) <- abbreviate(levels(g), abbrev)
    assign("g.lda", g)
    assign("tcex", cex)
    means <- apply(x$means, 2, mean)
    X <- scale(X, means, FALSE) %*% x$scaling
    if(!missing(dimen) && dimen < ncol(X)) X <- X[, 1:dimen]
    if(type=="std") pairs.default(X, panel=panel, ...)
    else {
        print(splom(~X, groups = g, panel=panel.superpose,
                    key = list(
                    text=list(levels(g)),
                    points = Rows(trellis.par.get("superpose.symbol"),
                    seq(along=levels(g))),
                    columns = min(5, length(levels(g)))
                    )
                    ))
    }
    invisible(NULL)
}

model.frame.lda <-
function(formula, data = NULL, na.action = NULL, ...)
{
    oc <- formula$call
    names(oc)[2:3] <- c("formula", "data")
    oc$prior <- oc$tol <- oc$method <- oc$CV <- oc$nu <- NULL
    oc[[1]] <- as.name("model.frame")
    if(length(data)) {
        oc$data <- substitute(data)
        eval(oc, sys.frame(sys.parent()))
    }
    else eval(oc, list())
}


# file MASS/lm.gls.q
# copyright (C) 1994-9 W. N. Venables and B. D. Ripley
#
lm.gls <- function(formula, data, W, subset, na.action,
        inverse = F, method = "qr",
        model = FALSE, x = FALSE, y = FALSE, contrasts = NULL, ...)
{
    call <- match.call()
    m <- match.call(expand = FALSE)
    m$W <- m$inverse <- m$method <- m$model <- m$x <-
        m$y <- m$contrasts <- m$... <- NULL
    m[[1]] <- as.name("model.frame")
    m <- eval(m, sys.frame(sys.parent()))
    if(method == "model.frame") return(m)
    Terms <- attr(m, "terms")
    Y <- model.extract(m, response)
    X <- model.matrix(Terms, m, contrasts)
    xvars <- as.character(attr(Terms, "variables"))[-1]
    if ((yvar <- attr(Terms, "response")) > 0) xvars <- xvars[-yvar]
    xlev <- if (length(xvars) > 0) {
        xlev <- lapply(m[xvars], levels)
        xlev[!sapply(xlev, is.null)]
    }
    n <- nrow(X)
    if(any(dim(W) != c(n, n))) stop("dim(W) is not correct")
    eW <- eigen(W, TRUE)
    d <- eW$values
    if(any(d <= 0)) stop("W is not positive definite")
    A <- diag(d^ifelse(inverse, -0.5, 0.5)) %*% t(eW$vector)
    fit <- lm.fit(A %*% X, A %*% Y, method, ...)
    fit$terms <- Terms
    fit$call <- call
    if(model) fit$model <- m
    if(x) fit$x <- X
    if(y) fit$y <- Y
    attr(fit, "na.message") <- attr(m, "na.message")
    if(!is.null(attr(m, "na.action"))) fit$na.action <- attr(m, "na.action")
    class(fit) <- c("lm.gls", class(fit))
    fit$xlevels <- xlev
    fit$contrasts <- attr(X, "contrasts")
    fit
}
# file MASS/lm.ridge.q
# copyright (C) 1994-9 W. N. Venables and B. D. Ripley
#
lm.ridge <- function(formula, data, subset, na.action,
    lambda = 0, model = FALSE, x = FALSE, y = FALSE, contrasts = NULL, ...)
{
    call <- match.call()
    m <- match.call(expand = FALSE)
    m$model <- m$x <- m$y <- m$contrasts <- m$... <- m$lambda <- NULL
    m[[1]] <- as.name("model.frame")
    m <- eval(m, sys.frame(sys.parent()))
    Terms <- attr(m, "terms")
    Y <- model.extract(m, response)
    X <- model.matrix(Terms, m, contrasts)
    n <- nrow(X); p <- ncol(X)
    if(Inter <- attr(Terms, "intercept"))
    {
        Xm <- apply(X[, -Inter], 2, mean)
        Ym <- mean(Y)
        p <- p - 1
        X <- X[, -Inter] - rep(Xm, rep(n, p))
        Y <- Y - Ym
    } else Ym <- Xm <- NA
    Xscale <- drop(rep(1/n, n) %*% X^2)^0.5
    X <- X/rep(Xscale, rep(n, p))
    Xs <- svd(X)
    rhs <- t(Xs$u) %*% Y
    d <- Xs$d
    lscoef <-  Xs$v %*% (rhs/d)
    lsfit <- X %*% lscoef
    resid <- Y - lsfit
    s2 <- sum(resid^2)/(n - p - Inter)
    HKB <- (p-2)*s2/sum(lscoef^2)
    LW <- (p-2)*s2*n/sum(lsfit^2)
    k <- length(lambda)
    div <- d^2 + rep(lambda, rep(p,k))
    a <- drop(d*rhs)/div
    dim(a) <- c(p, k)
    coef <- Xs$v %*% a
    dimnames(coef) <- list(names(Xscale), format(lambda))
    GCV <- apply((Y - X %*% coef)^2, 2, sum)/
        (n-apply(matrix(d^2/div,p), 2, sum))^2
    res <- list(coef = drop(coef), scales = Xscale,
                Inter = Inter, lambda = lambda, ym = Ym, xm = Xm,
                GCV = GCV, kHKB = HKB, kLW = LW)
    class(res) <- "ridgelm"
    res
}

print.ridgelm <- function(x, ...)
{
    scaledcoef <- t(as.matrix(x$coef / x$scales))
    if(x$Inter) {
        inter <- x$ym - scaledcoef %*% x$xm
        scaledcoef<- cbind(Intercept=inter, scaledcoef)
    }
    print(drop(scaledcoef), ...)
}

select <- function(obj) UseMethod("select")

select.ridgelm <- function(obj)
{
    cat("modified HKB estimator is", format(obj$kHKB), "\n")
    cat("modified L-W estimator is", format(obj$kLW), "\n")
    GCV <- obj$GCV
    if(length(GCV) > 0) {
        k <- seq(along=GCV)[GCV==min(GCV)]
        cat("smallest value of GCV  at",
            format(obj$lambda[k]), "\n")
    }
}

plot.ridgelm <- function(x)
    matplot(x$lambda, t(x$coef), type = "l")
# file MASS/loglm.q
# copyright (C) 1994-9 W. N. Venables and B. D. Ripley
#
denumerate <- function(x) UseMethod("denumerate")

renumerate <- function(x) UseMethod("renumerate")

denumerate.formula <- function(x)
{
    if(length(x) == 1) {
        if(mode(x) == "numeric" ||
           (mode(x) == "name" &&
            any(substring(as.character(x), 1, 1) == as.character(1:9))))
            x <- as.name(paste(".v", x, sep = ""))
    } else {
        x[[2]] <- Recall(x[[2]])
        if(length(x) == 3 && x[[1]] != as.name("^"))
            x[[3]] <- Recall(x[[3]])
    }
    x
}

renumerate.formula <- function(x)
{
    if(length(x) == 1) {
        if(mode(x) == "name"
           && nchar(xx <- as.character(x)) > 2
           && substring(xx, 1, 2) == ".v")
            x <- as.name(substring(xx, 3))
    } else {
        x[[2]] <- Recall(x[[2]])
        if(length(x) == 3 && x[[1]] != as.name("^"))
           x[[3]] <- Recall(x[[3]])
    }
    x
}

loglm <-
  function(formula, data = sys.frame(sys.parent()), subset, na.action, ...)
{
    assign(".call", match.call(), envir=.GlobalEnv)
    if(missing(data) || inherits(data, "data.frame")) {
        m <- match.call(expand = FALSE)
        m$... <- NULL
        m[[1]] <- as.name("model.frame")
        data <- eval(m, sys.frame(sys.parent()))
        assign(".formula", as.formula(attr(data, "terms")), envir=.GlobalEnv)
    } else {
        trms <- attr(data, "terms") <- terms(formula <- denumerate(formula))
        assign(".formula", renumerate(as.formula(trms)), envir=.GlobalEnv)
    }
    loglm1(formula, data, ...)
}

loglm1 <- function(formula, data, ...) UseMethod("loglm1", data)

loglm1.crosstabs <-
function(formula, data, ...)
{
    attr(data, "marginals") <- attr(data, "call") <- class(data) <- NULL
    NextMethod("loglm1")
}

loglm1.data.frame <-
function(formula, data, ...)
{
    trms <- attr(data, "terms")
    if(is.null(trms)) stop("data has no trms attribute")
    if(attr(trms, "response") == 0) stop("Formula specifies no response")
    resp <- match(as.character(attr(trms, "variables"))[1+attr(trms, "response")],
                  names(data))
    fac <- data.frame(lapply(data[,  - resp], as.factor))
    rsp <- data[, resp]
    tab <- table(fac)
    if(max(tab) > 1) {
#
# and extra factor needed for repeated frequencies
#
        i <- do.call("order", rev(fac))
        fac <- fac[i,  ]
        rsp <- rsp[i]
        fac$.Within. <-
            factor(unlist(sapply(tab,
                                 function(x) if(x > 0) seq(x) else NULL)))
    }
    dn <- lapply(fac, levels)
    dm <- sapply(dn, length)
    data <- structure(array(-1, dm, dn), terms = trms)
    data[do.call("cbind", lapply(fac, as.numeric))] <- rsp
    st <- array(as.numeric(data >= 0), dm, dn)
    data[data < 0] <- 0
    loglm1.default(formula, data, ..., start = st)
}

loglm1.default <-
function(formula, data, start = rep(1, length(data)), fitted = FALSE,
	keep.frequencies = fitted, param = TRUE, eps =
	1/10, iter = 40, print = FALSE, ...)
{
    trms <- attr(data, "terms")
    if(is.null(trms)) stop("data has no trms attribute")
    factors <- attr(trms, "factors") > 0
    if((r <- attr(trms, "response"))) factors <- factors[ - r,  , drop = F]
    nt <- ncol(factors)
    fo <- order(apply(factors, 2, sum))
    factors <- factors[, fo, drop = FALSE]
    ff <- crossprod(factors)
    keep <- rep(TRUE, nt)
    j <- 0
    while((j <- j + 1) < nt) keep[j] <- ff[j, j] > max(ff[j, (j + 1):nt])
    factors <- factors[, keep, drop = FALSE]
    ldim <- length(dim(data))
    nnames <- paste(".v", 1:ldim, sep = "")
    which <- structure(1:ldim, names = nnames)
    if(!is.null(anames <- names(dimnames(data))))
        which <- c(which, structure(which, names = anames))
    margins <- apply(factors, 2, function(x, which, nam)
                     as.vector(which[nam[x]]), which, rownames(factors))
    if(is.matrix(margins))
        margins <- as.list(data.frame(margins))
    else margins <- structure(as.list(margins), names = names(margins))
    Fit <- loglin(data, margins, start = start, fit = fitted,
                  param = param, eps = eps, iter = iter, print = print)
    if(exists(".formula")) {
        Fit$call <- .call
        Fit$formula <- .formula
    }
    class(Fit) <- "loglm"
    if(keep.frequencies) Fit$frequencies <- structure(data, terms = NULL)
    if(fitted) {
        names(Fit)[match("fit", names(Fit))] <- "fitted"
        attr(Fit$fitted, "terms") <- NULL
    }
    Fit$deviance <- Fit$lrt
    Fit$nobs <- length(data)
    Fit$df <- Fit$df - sum(start == 0)
    Fit
}


anova.loglm <- function(object, ..., test = c("Chisq", "chisq", "LR"))
{
    test <- match.arg(test)
    margs <- function(...) nargs()
    if(!(k <- margs(...))) return(object)
    objs <- list(object, ...)
    dfs <- sapply(objs, "[[", "df")
    o <- order( - dfs)
    objs <- objs[o]
    dfs <- c(dfs[o], 0)
    forms <- lapply(objs, formula)
    dev <- c(sapply(objs, "[[", "lrt"), 0)
    M <- array(0, c(k + 2, 5),
               list(c(paste("Model", 1:(k + 1)), "Saturated"),
                    c("Deviance", "df", "Delta(Dev)", "Delta(df)", "P(> Delta(Dev)")))
    M[, 1] <- dev
    M[, 2] <- dfs
    M[-1, 3] <- dev[1:(k + 1)] - dev[2:(k + 2)]
    M[-1, 4] <- dfs[1:(k + 1)] - dfs[2:(k + 2)]
    M[-1, 5] <- 1 - pchisq(M[-1, 3], M[-1, 4])
    res <- structure(M, formulae = forms)
    class(res) <- "anova.loglm"
    res
}

print.anova.loglm <- function(x, ...)
{
    rjustify <- function(str) {
        m <- max(n <- nchar(str))
        blanks <- format(c("", str[n == m][1]))[1]
        paste(substring(blanks, 0, m - n), str, sep = "")
    }
    y <- x
    y[, 5] <- round(y[, 5], 5)
    R <- array("", dim(x), dimnames(x))
    for(j in 1:5) {
        colj <- rjustify(c(colnames(x)[j], format(y[, j])))
        R[, j] <- colj[-1]
        colnames(R)[j] <- colj[1]
    }
    R[1, 3:5] <- ""
    pform <- function(form)
        if(length(form) == 2) form else form[c(2, 1, 3)]
    forms <- attr(x, "formulae")
    cat("LR tests for hierarchical log-linear models\n\n")
    for(i in seq(along=forms))
        cat(paste("Model ", i, ":\n", sep = ""),
            deparse(pform(forms[[i]])), "\n")
    cat("\n")
    print(R, quote = FALSE)
    invisible(x)
}

print.loglm <- function(x, ...)
{
    cat("Call:\n")
    print(x$call)
    ts.array <- rbind(c(x$lrt, x$df,
                        if(x$df > 0) 1 - pchisq(x$lrt, x$df) else 1),
                      c(x$pearson, x$df,
                        if(x$df > 0) 1 - pchisq(x$pearson, x$df)
                        else 1))
    dimnames(ts.array) <- list(c("Likelihood Ratio",
                                 "Pearson"), c("X^2", "df", "P(> X^2)"))
    cat("\nStatistics:\n")
    print(ts.array)
    invisible(x)
}

summary.loglm <- function(object, fitted = FALSE, ...)
{
    ts.array <- rbind(c(object$lrt, object$df,
                        if(object$df > 0) 1 - pchisq(object$lrt, object$df)
                        else 1), c(object$pearson, object$df,
                                   if(object$df > 0)
                                   1 - pchisq(object$pearson, object$df)
                                   else 1))
    dimnames(ts.array) <- list(c("Likelihood Ratio", "Pearson"),
                               c("X^2", "df", "P(> X^2)"))
    if(fitted) {
        if(is.null(object$fitted) || is.null(object$freqencies)) {
            cat("Re-fitting to find fitted values\n")
            object <- update(object, fitted = TRUE, keep.frequencies = TRUE)
        }
        fit <- format(round(object$fit, 1))
        OE <- array(paste(format(object$freq), " (", fit, ")", sep = ""),
                    dim(fit), dimnames(object$freq))
    }  else OE <- NULL
    structure(list(formula = formula(object), tests = ts.array, oe = OE),
              class = "summary.loglm")
}

print.summary.loglm <- function(x, ...)
{
    cat("Formula:\n")
    print(formula(x))
    cat("\nStatistics:\n")
    print(x$tests)
    if(!is.null(x$oe)) {
        cat("\nObserved (Expected):\n")
        print(x$oe, quote = FALSE)
    }
    invisible(x)
}

update.loglm <- function (object, formula., ...)
{
    setdiff <- function(x, y)
        if(length(x) == 0 || length(y) == 0) x else x[match(x, y, 0) == 0]

    if (is.null(call <- object$call))
        stop("object has no call component.  Updating not possible")
    if (fix <- !missing(formula.)) {
        object$formula <- denumerate(object$formula)
        formula. <- denumerate(formula.)
        call$formula <- update.formula(formula(object), formula.)
    }
    extras <- match.call(expand.dots = FALSE)$...
    if (length(extras) > 0) {
        existing <- !is.na(match(names(extras), names(call)))
        ## do these individually to allow NULL to remove entries.
        for (a in names(extras)[existing]) call[[a]] <- extras[[a]]
        if (any(!existing)) {
            call <- c(as.list(call), extras[!existing])
            call <- as.call(call)
        }
    }
    result <- eval(call, sys.frame(sys.parent()))
    if (fix) {
        form <- renumerate(result$formula)
        result$call$formula <- unclass(result$formula <- form)
    }
    result
}

fitted.loglm <- function(object, ...)
{
    if(!is.null(object$fit))
        return(unclass(object$fit))
    cat("Re-fitting to get fitted values\n")
    unclass(update(object, fitted = TRUE, keep.frequencies = FALSE)$fitted)
}

residuals.loglm <-
function(object, type = c("deviance", "pearson", "response"))
{
    type <- match.arg(type)
    if(is.null(object$fit) || is.null(object$freq)) {
        cat("Re-fitting to get frequencies and fitted values\n")
        object <- update(object, fitted = TRUE, keep.frequencies = TRUE)
    }
    y <- object$freq
    mu <- object$fit
    res <- y - mu
    nz <- mu > 0
    y <- y[nz]
    mu <- mu[nz]
    res[nz] <-
        switch(type,
               deviance = sign(y - mu) *
                 sqrt(2*abs(y*log((y + (y == 0))/mu) - (y - mu))),
               pearson = (y - mu)/sqrt(mu),
               response = y - mu)
    res
}

coef.loglm <- function(object, ...)
{
    if(!is.null(cf <- object$param)) return(cf)
    cat("Re-fitting to calculate missing coefficients\n")
    update(object, param = TRUE)$param
}
# file MASS/logtrans.q
# copyright (C) 1994-9 W. N. Venables and B. D. Ripley
#
logtrans <- function(object, ...) UseMethod("logtrans")

logtrans.default<-
function(object, ..., alpha = seq(0.5, 6, by = 0.25) - min(y),
	plotit = length(dev.list()) > 0, interp = (plotit && (m <
	100)), xlab = "alpha", ylab = "log Likelihood")
{
    if(is.null(object$y) || is.null(object$qr))
        stop(paste(deparse(substitute(object)),
                   "does not have both `qr' and `y' components"))
    y <- object$y
    n <- length(y)
    if(any(y + min(alpha) <= 0))
        stop("Response variable must be positive after additions")
    xqr <- object$qr
    xl <- loglik <- as.vector(alpha)
    m <- length(xl)
    for(i in 1:m) {
        rs <- qr.resid(xqr, yt <- log(y + alpha[i]))
        loglik[i] <-  - n/2 * log(sum(rs^2)) - sum(yt)
    }
    if(interp) {
        sp <- spline(alpha, loglik, n = 100)
        xl <- sp$x
        loglik <- sp$y
        m <- length(xl)
    }
    if(plotit) {
        mx <- (1:m)[loglik == max(loglik)][1]
        Lmax <- loglik[mx]
        lim <- Lmax - qchisq(19/20, 1)/2
        plot(xl, loglik, xlab = xlab, ylab = ylab, type
             = "l", ylim = range(loglik, lim))
        plims <- par("usr")
        abline(h = lim, lty = 3)
        y0 <- plims[3]
        scal <- (1/10 * (plims[4] - y0))/par("pin")[2]
        scx <- (1/10 * (plims[2] - plims[1]))/par("pin")[1]
        text(xl[1] + scx, lim + scal, " 95%")
        la <- xl[mx]
        if(mx > 1 && mx < m)
            segments(la, y0, la, Lmax, lty = 3)
        ind <- range((1:m)[loglik > lim])
        if(loglik[1] < lim) {
            i <- ind[1]
            x <- xl[i - 1] + ((lim - loglik[i - 1]) *
                              (xl[i] - xl[i - 1]))/(loglik[i] - loglik[i - 1])
            segments(x, y0, x, lim, lty = 3)
        }
        if(loglik[m] < lim) {
            i <- ind[2] + 1
            x <- xl[i - 1] + ((lim - loglik[i - 1]) *
                              (xl[i] - xl[i - 1]))/(loglik[i] - loglik[i - 1])
            segments(x, y0, x, lim, lty = 3)
        }
    }
    invisible(list(x = xl, y = loglik))
}
"logtrans.formula"<-
function(object, data = NULL, ...)
{
  object <- aov(object, data = data, y = TRUE, qr = TRUE)
  invisible(NextMethod("logtrans"))
}

logtrans.lm <- function(object, ...)
{
    if(is.null(object$y) || is.null(object$qr))
        object <- update(object, y = TRUE, qr = TRUE)
    invisible(NextMethod("logtrans"))
}
# file MASS/max.col.q
# copyright (C) 1994-9 W. N. Venables and B. D. Ripley
#
max.col <- function(m)
{
    if(!is.loaded(symbol.C("VR_max_col")))
        stop("Compiled code has not been dynamically loaded")
    m <- as.matrix(m)
    n <- nrow(m)
    .C("VR_max_col",
       as.double(m),
       as.integer(n),
       as.integer(ncol(m)),
       rmax = integer(n),
       NAOK=TRUE
       )$rmax
}

# file MASS/mca.q
# copyright (C) 1994-9 W. N. Venables and B. D. Ripley
#
mca <- function(df, nf = 2, abbrev = FALSE)
{
  class.ind <- function(cl)
  {
    n <- length(cl); cl <- as.factor(cl)
    x <- matrix(0, n, length(levels(cl)))
    x[(1:n) + n * (unclass(cl) - 1)] <- 1
    dimnames(x) <- list(names(cl), levels(cl))
    x
  }
  if(!all(unlist(lapply(df, is.factor))))
    stop("All variables must be factors")
  n <- nrow(df); p <- length(df)
  G <- as.matrix(do.call("data.frame", c(lapply(df, class.ind),
                                         check.names=FALSE)))
  Dc <- drop((rep(1, n)) %*% G)
  X <- t(t(G)/(sqrt(p*Dc)))
  X.svd <- svd(X)
  sec <- 1 + (1:nf)
  rs <- X %*% X.svd$v[, sec]/p
  cs <- diag(1/(sqrt(p*Dc))) %*% X.svd$v[, sec]
  fs <- X.svd$u[, sec]/rep(p*X.svd$d[sec], c(n,n))
  dimnames(rs) <- list(row.names(df), as.character(1:nf))
  dimnames(fs) <- dimnames(rs)
  varnames <- if(abbrev) unlist(lapply(df, levels))
              else colnames(G)
  dimnames(cs) <- list(varnames, as.character(1:nf))
  structure(list(rs=rs, cs=cs, fs=fs, d=X.svd$d[sec], p=p,
                 call=match.call()), class="mca")
}

print.mca <- function(x, ...)
{
  if(!is.null(cl <- x$call)) {
    cat("Call:\n")
    dput(cl)
  }
  cat("\nMultiple correspondence analysis of",
            nrow(x$rs), "cases of", x$p,
            "factors\n")
  cat("\nCorrelations", format(round(x$d,3), ...))
  p <- 100 * cumsum(x$d)/(x$p - 1)
  cat("  cumulative % explained", format(round(p,2), ...), "\n")
  invisible(x)
}

plot.mca <- function(x, rows=T, col, cex = par("cex"), ...)
{
  if(length(cex) == 1) cex <- rep(cex, 2)
  eqscplot(x$cs, type="n", xlab="", ...)
  if(missing(col)) {
    col <- par("col")
    if (!is.numeric(col)) col <- match(col, palette())
    col <- c(col, col + 1)
  } else if(length(col) != 2) col <- rep(col, length = 2)
  if(rows) text(x$rs, cex=cex[1], col=col[1])
  text(x$cs, labels=dimnames(x$cs)[[1]], cex=cex[2], col=col[2])
  invisible(x)
}

predict.mca <- function(object, newdata, type=c("row", "factor"), ...)
{
  class.ind <- function(cl)
  {
    n <- length(cl); cl <- as.factor(cl)
    x <- matrix(0, n, length(levels(cl)))
    x[(1:n) + n * (unclass(cl) - 1)] <- 1
    dimnames(x) <- list(names(cl), levels(cl))
    x
  }
  
  type <- match.arg(type)
  if(is.null(abbrev <- object$call$abbrev)) abbrev <- FALSE
  if(!all(unlist(lapply(newdata, is.factor))))
    stop("All variables must be factors")
  G <- as.matrix(do.call("data.frame", c(lapply(newdata, class.ind),
                                         check.names=FALSE)))
  if(abbrev) colnames(G) <- unlist(lapply(newdata, levels))
  if(type == "row") {
    # predict new row(s)
    if(!all(colnames(G) == dimnames(object$cs)[[1]]))
       stop("factors in newdata do not match those for fit")
    G %*% object$cs/object$p
  } else {
    # predict positions of new factor(s)
    n <- nrow(G)
    Dc <- drop((rep(1, n)) %*% G)
    if(n != nrow(object$fs))
      stop("newdata is not of the right length")
    (t(G)/Dc) %*% object$fs
  }
}
# file MASS/misc.q
# copyright (C) 1994-9 W. N. Venables and B. D. Ripley
#

mat2tr <- function(z)
{
    dn <- names(dimnames(z))
    dx <- rownames(z)
    x <- as.numeric(substring(dx, nchar(dn[1]) + 2))
    dy <- colnames(z)
    y <- as.numeric(substring(dy, nchar(dn[2]) + 2))
    cbind(expand.grid(x = x, y = y), z = as.vector(z))
}

con2tr <- function(obj)
{
    data.frame(expand.grid(x=obj$x,y=obj$y),z=as.vector(obj$z))
}

Null <- function(M)
{
    tmp <- qr(M)
    set <- if(tmp$rank == 0) 1:ncol(M) else  - (1:tmp$rank)
    qr.Q(tmp, complete = TRUE)[, set, drop = FALSE]
}

ginv <- function(X, tol = sqrt(.Machine$double.eps))
{
#
# based on suggestions of R. M. Heiberger, T. M. Hesterburg and WNV
#
    if(length(dim(X)) > 2 || !(is.numeric(X) || is.complex(X)))
        stop("X must be a numeric or complex matrix")
    if(!is.matrix(X)) X <- as.matrix(X)
    Xsvd <- svd(X)
    if(is.complex(X)) Xsvd$u <- Conj(Xsvd$u)
    Positive <- Xsvd$d > max(tol * Xsvd$d[1], 0)
    if(!any(Positive)) array(0, dim(X)[2:1])
    else Xsvd$v[, Positive] %*% ((1/Xsvd$d[Positive]) * t(Xsvd$u[, Positive]))
}
# file MASS/mvrnorm.q
# copyright (C) 1994-9 W. N. Venables and B. D. Ripley
#
mvrnorm <- function(n = 1, mu, Sigma, tol=1e-6)
{
    p <- length(mu)
    if(!all(dim(Sigma) == c(p,p))) stop("incompatible arguments")
    eS <- eigen(Sigma, sym = TRUE)
    ev <- eS$values
    if(!all(ev >= -tol*abs(ev[1]))) stop("Sigma is not positive definite")
    X <- mu + eS$vectors %*% diag(sqrt(pmax(ev, 0))) %*% matrix(rnorm(p*n),p)
    nm <- names(mu)
    if(is.null(nm) && !is.null(dn <- dimnames(Sigma))) nm <- dn[[1]]
    dimnames(X) <- list(nm, NULL)
    if(n == 1) drop(X) else t(X)
}
napredict <- function(omit, object) UseMethod("napredict")
napredict.omit <- napredict.default <- function(omit, object) object

naresid <- function(omit, object) UseMethod("naresid")
naresid.omit <- naresid.default <- function(omit, object) object
# file MASS/neg.bin.q
# copyright (C) 1994-9 W. N. Venables and B. D. Ripley
#
neg.bin <- function(theta = stop("theta must be given"))
{
  .Theta <- theta
  stats <- make.link("log")
  variance <- function(mu)
    mu + mu^2/.Theta
  validmu <- function(mu)
    all(mu > 0)
  dev.resids <- function(y, mu, wt)
    2 * wt * (y * log(pmax(1, y)/mu) - (y + .Theta) *
              log((y + .Theta)/ (mu + .Theta)))
  aic <- function(y, n, mu, wt, dev) {
    term <- (y + .Theta) * log((y + .Theta)/ (mu + .Theta)) - y * log(mu) +
      lgamma(y + 1) - .Theta * log(.Theta) + lgamma(.Theta) - lgamma(.Theta+y)
    2 * sum(term * wt)
  }
  initialize <- expression({
    if (any(y < 0)) stop(paste("Negative values not allowed for",
                               "the Poisson family"))
    n <- rep(1, nobs)
    mustart <- y + (y == 0)/6
  })
  structure(list(family = "Negative Binomial", link = "log", linkfun = stats$linkfun,
                 linkinv = stats$linkinv, variance = variance, dev.resids = dev.resids,
                 aic = aic, mu.eta = stats$mu.eta, initialize = initialize,
                 validmu = validmu, valideta = stats$valideta), class = "family")
}
# file MASS/negbin.q
# copyright (C) 1994-9 W. N. Venables and B. D. Ripley
#
"anova.negbin"<-
function(object, ..., test = "Chisq")
{
  dots <- list(...)
  if(length(dots) == 0) {
    warning("tests made without re-estimating theta")
    object$call[[1]] <- as.name("glm")
    if(is.null(object$link))
      object$link <- as.name("log")
    object$call$family <- call("negative.binomial", theta = object$
                               theta, link = object$link)
    anova.glm(object, test = test)
  } else {
    if(test != "Chisq")
      warning("only Chi-squared LR tests are implemented")
    mlist <- list(object, ...)
    nt <- length(mlist)
    dflis <- sapply(mlist, function(x) x$df.resid)
    s <- order(-dflis)
    mlist <- mlist[s]
    if(any(!sapply(mlist, inherits, "negbin")))
      stop("not all objects are of class `negbin'")
    rsp <- unique(sapply(mlist, function(x) paste(formula(x)[2])))
    mds <- sapply(mlist, function(x) paste(formula(x)[3]))
    ths <- sapply(mlist, function(x) x$theta)
    dfs <- dflis[s]
    lls <- sapply(mlist, function(x) x$twologlik)
    tss <- c("", paste(1:(nt - 1), 2:nt, sep = " vs "))
    df <- c(NA,  - diff(dfs))
    x2 <- c(NA, diff(lls))
    pr <- c(NA, 1 - pchisq(x2[-1], df[-1]))
    out <- data.frame(Model = mds, theta = ths, Resid.df = dfs,
                      "2 x log-lik." = lls, Test = tss, df = df, LRtest = x2,
                      Prob = pr)
    names(out) <- c("Model", "theta", "Resid. df",
                    "   2 x log-lik.", "Test", "   df", "LR stat.", "Pr(Chi)")
    class(out) <- c("Anova", "data.frame")
    attr(out, "heading") <-
      c("Likelihood ratio tests of Negative Binomial Models\n",
        paste("Response:", rsp))
    out
  }
}

print.Anova <- function(x, ...)
{
    heading <- attr(x, "heading")
    if(!is.null(heading)) cat(heading, sep = "\n")
    attr(x, "heading") <- NULL
    print.data.frame(x)
}

"family.negbin"<-
function(object)
    object$family

"glm.convert"<-
function(object)
{
    object$call[[1]] <- as.name("glm")
    if(is.null(object$link))
        object$link <- as.name("log")
    object$call$family <- call("negative.binomial", theta = object$theta,
                               link = object$link)
    object$call$init.theta <- object$call$link <- NULL
    class(object) <- c("glm", "lm")
    object
}

glm.nb <- function(formula = formula(data), data = sys.frame(sys.parent()), weights,
		   subset, na.action, start = eta,
		   control = glm.control(...), method = "glm.fit",
		   model = FALSE, x = FALSE, y = TRUE, contrasts = NULL,
		   init.theta, link = log, ...)
{
    loglik <- function(n, th, mu, y)
    {
        sum(lgamma(th + y) - lgamma(th) + th * log(th) +
            y * log(mu + (y == 0)) - (th + y) * log(th + mu))
    }
    link <- substitute(link)
    th <- as.name("th")
    if(missing(init.theta)) {
        fam0 <- do.call("poisson", list(link = link))
    } else {
        fam0 <- do.call("negative.binomial", list(theta = init.theta, link = link))
    }
    Call <- match.call()
    m <- match.call(expand = FALSE)
    m$method <- m$model <- m$x <- m$y <- m$control <- m$contrasts <-
        m$init.theta <- m$link <- m$... <- NULL
    m[[1]] <- as.name("model.frame")
    m <- eval(m, sys.frame(sys.parent()))
    Terms <- attr(m, "terms")
    if(method == "model.frame") return(m)
    xvars <- as.character(attr(Terms, "variables"))[-1]
    if(length(xvars) > 0) {
        xlev <- lapply(m[xvars], levels)
        xlev <- xlev[!sapply(xlev, is.null)]
    } else xlev <- NULL
    a <- attributes(m)
    Y <- model.extract(m, response)
    X <- model.matrix(Terms, m, contrasts)
    w <- model.extract(m, weights)
    if(!length(w)) w <- rep(1, nrow(m))
    else if(any(w < 0)) stop("negative weights not allowed")
    start <- model.extract(m, start)
    offset <- model.extract(m, offset)
    n <- length(Y)
    if(!is.null(method)) {
            if(!exists(method, mode = "function"))
                stop(paste("unimplemented method:", method))
    }
    else method <- "glm.fit"
    glm.fitter <- get(method)
    if(control$trace > 1) cat("Initial fit:\n")
    fit <- glm.fitter(x = X, y = Y, w = w, etastart = start,
                      offset = offset, family = fam0,
                      control = list(maxit=control$maxit,
                      epsilon = control$epsilon,
                      trace = control$trace > 1))
    class(fit) <- c("glm", "lm")
    mu <- fit$fitted
    th <- as.vector(theta.ml(Y, mu, n, limit=control$maxit, trace =
                             control$trace> 2))
    if(control$trace > 1)
        cat("Initial value for theta:", signif(th), "\n")
    fam <- do.call("negative.binomial", list(theta = th, link = link))
    iter <- 0
    d1 <- sqrt(2 * max(1, fit$df.residual))
    d2 <- del <- 1
    g <- fam$linkfun
    Lm <- loglik(n, th, mu, Y)
    Lm0 <- Lm + 2 * d1
    while((iter <- iter + 1) <= control$maxit &&
          (abs(Lm0 - Lm)/d1 + abs(del)/d2) > control$epsilon) {
        eta <- g(mu)
        fit <- glm.fitter(x = X, y = Y, w = w, etastart =
                          eta, offset = offset, family = fam,
                          control = list(maxit=control$maxit,
                          epsilon = control$epsilon,
                          trace = control$trace > 1))
        t0 <- th
        th <- theta.ml(Y, mu, n, limit=control$maxit, trace = control$trace > 2)
        fam <- do.call("negative.binomial", list(theta = th, link = link))
        class(fit) <- c("negbin", "glm", "lm")
        mu <- fit$fitted
        del <- t0 - th
        Lm0 <- Lm
        Lm <- loglik(n, th, mu, Y)
        if(control$trace) {
            Ls <- loglik(n, th, Y, Y)
            Dev <- 2 * (Ls - Lm)
            cat("Theta(", iter, ") =", signif(th),
                ", 2(Ls - Lm) =", signif(Dev), "\n")
        }
    }
    if(!is.null(attr(th, "warn"))) fit$th.warn <- attr(th, "warn")
    if(iter > control$maxit) {
        warning("alternation limit reached")
        fit$th.warn <- "alternation limit reached"
    }

  # If an offset and intercept are present, iterations are needed to
  # compute the Null deviance; these are done here, unless the model
  # is NULL, in which case the computations have been done already
  #
    if(any(offset) && attr(Terms, "intercept")) {
        null.deviance <-
            if(length(Terms))
                glm.fitter(X[, "(Intercept)", drop = FALSE], Y, w,
                           offset = offset, family = fam,
                           control = list(maxit=control$maxit,
                           epsilon = control$epsilon, trace = control$trace > 1)
                           )$deviance
           else fit$deviance
        fit$null.deviance <- null.deviance
    }
    class(fit) <- c("negbin", "glm", "lm")
    fit$terms <- Terms
    fit$formula <- as.vector(attr(Terms, "formula"))
    Call$init.theta <- as.vector(th)
    Call$link <- link
    fit$call <- Call
    if(model) fit$model <- m
    if(x) fit$x <- X
    if(!y) fit$y <- NULL
    fit$theta <- as.vector(th)
    fit$SE.theta <- attr(th, "SE")
    fit$twologlik <- as.vector(2 * Lm)
    fit$contrasts <- if (length(clv <- unlist(lapply(m, class))))
        options("contrasts")[[1]] else FALSE
    fit$xlevels <- xlev
    fit
}

"negative.binomial" <-
function(theta = stop("theta must be specified"), link = "log")
{
    linktemp <- substitute(link)
    if (!is.character(linktemp)) {
        linktemp <- deparse(linktemp)
        if (linktemp == "link")
            linktemp <- eval(link)
    }
    if (any(linktemp == c("log", "identity", "sqrt")))
        stats <- make.link(linktemp)
    else stop(paste(linktemp, "link not available for negative binomial",
                    "family; available links are", "\"identity\", \"log\" and \"sqrt\""))
    .Theta <- theta
    stats <- make.link("log")
    variance <- function(mu)
        mu + mu^2/.Theta
    validmu <- function(mu)
        all(mu > 0)
    dev.resids <- function(y, mu, wt)
        2 * wt * (y * log(pmax(1, y)/mu) - (y + .Theta) *
                  log((y + .Theta)/ (mu + .Theta)))
    aic <- function(y, n, mu, wt, dev) {
        term <- (y + .Theta) * log((y + .Theta)/ (mu + .Theta)) - y * log(mu) +
            lgamma(y + 1) - .Theta * log(.Theta) + lgamma(.Theta) - lgamma(.Theta+y)
        2 * sum(term * wt)
    }
    initialize <- expression({
        if (any(y < 0)) stop(paste("Negative values not allowed for",
                                   "the Poisson family"))
        n <- rep(1, nobs)
        mustart <- y + (y == 0)/6
    })
    famname <- paste("Negative Binomial(", format(round(theta, 4)), ")",
                     sep = "")
    structure(list(family = famname, link = linktemp, linkfun = stats$linkfun,
                   linkinv = stats$linkinv, variance = variance, dev.resids = dev.resids,
                   aic = aic, mu.eta = stats$mu.eta, initialize = initialize,
                   validmu = validmu, valideta = stats$valideta), class = "family")
}

rnegbin <-
function(n, mu = n, theta = stop("theta must be given"))
{
    k <- if(length(n) > 1) length(n) else n
    rpois(k, (mu * rgamma(k, theta))/theta)
}

"summary.negbin" <-
function(object, dispersion = 1, correlation = TRUE, ...)
{
    if(is.null(dispersion)) dispersion <- 1
    summ <- c(summary.glm(object, dispersion = dispersion,
                          correlation = correlation),
              object[c("theta", "SE", "twologlik", "th.warn")])
    class(summ) <- c("summary.negbin", "summary.glm")
    summ
}

"print.summary.negbin" <- function(x, ...)
{
    NextMethod()
    dp <- 2 - floor(log10(x$SE))
    cat("\n              Theta: ", format(round(x$theta, dp), sci=F, nsmall=dp),
        "\n          Std. Err.: ", format(round(x$SE, dp), sci=F, nsmall=dp),
        "\n")
    if(!is.null(x$th.warn))
        cat("Warning while fitting theta:", x$th.warn,"\n")
    cat("\n 2 x log-likelihood: ", format(round(x$twologlik, 3), sci=F, nsmall=3), "\n")
    invisible(x)
}

"theta.md" <-
    function(y, u, dfr, limit = 20, eps = .Machine$double.eps^0.25)
{
    if(inherits(y, "lm")) {
        u <- y$fitted
        dfr <- y$df.residual
        y <- if(is.null(y$y)) u + residuals(y) else y$y
    }
    n <- length(y)
    t0 <- n/sum((y/u - 1)^2)
    a <- 2 * sum(y * log(pmax(1, y)/u)) - dfr
    it <- 0
    del <- 1
    while((it <- it + 1) < limit && abs(del) > eps) {
        t0 <- abs(t0)
        top <- a - 2 * sum((y + t0) * log((y + t0)/(u + t0)))
        bot <- 2 * sum((y - u)/(u + t0) - log((y + t0)/(u + t0)))
        del <- top/bot
        t0 <- t0 - del
    }
    if(t0 < 0) {
        t0 <- 0
        warning("estimator truncated at zero")
        attr(t0, "warn") <- "estimate truncated at zero"
    }
    t0
}

"theta.ml" <-
    function(y, mu, n = length(y), limit = 10, eps = .Machine$double.eps^0.25,
             trace=FALSE)
{
    score <- function(n, th, mu, y)
        sum(digamma(th + y) - digamma(th) + log(th) +
            1 - log(th + mu) - (y + th)/(mu + th))
    info <- function(n, th, mu, y)
        sum( - trigamma(th + y) + trigamma(th) - 1/th +
            2/(mu + th) - (y + th)/(mu + th)^2)
    if(inherits(y, "lm")) {
        mu <- y$fitted
        y <- if(is.null(y$y)) mu + residuals(y) else y$y
    }
    t0 <- n/sum((y/mu - 1)^2)
    it <- 0
    del <- 1
    if(trace) cat("theta.ml: initial theta =", signif(t0), "\n")
    while((it <- it + 1) < limit && abs(del) > eps) {
        t0 <- abs(t0)
        del <- score(n, t0, mu, y)/(i <- info(n, t0, mu, y))
        t0 <- t0 + del
        if(trace) cat("theta.ml: iter", it," theta =", signif(t0), "\n")
    }
    if(t0 < 0) {
        t0 <- 0
        warning("estimator truncated at zero")
        attr(t0, "warn") <- "estimate truncated at zero"
    }
    if(it == limit) {
        warning("iteration limit reached")
        attr(t0, "warn") <- "iteration limit reached"
    }
    attr(t0, "SE") <- sqrt(1/i)
    t0
}

"theta.mm" <-
function(y, u, dfr, limit = 10, eps = .Machine$double.eps^0.25)
{
  if(inherits(y, "lm")) {
    u <- y$fitted
    dfr <- y$df.residual
    y <- if(is.null(y$y)) u + residuals(y) else y$y
  }
  n <- length(y)
  t0 <- n/sum((y/u - 1)^2)
  it <- 0
  del <- 1
  while((it <- it + 1) < limit && abs(del) > eps) {
    t0 <- abs(t0)
    del <- (sum((y - u)^2/(u + u^2/t0)) - dfr)/sum((y - u)^2/(u + t0)^2)
    t0 <- t0 - del
  }
  if(t0 < 0) {
    t0 <- 0
    warning("estimator truncated at zero")
    attr(t0, "warn") <- "estimate truncated at zero"
  }
  t0
}
# file MASS/negexp.q
# copyright (C) 1994-9 W. N. Venables and B. D. Ripley
#
negexp.ival <- function(x, b0, b1, th)
{
    pnames <- as.character(sys.call()[3:5])
    y <- get(".nls.initial.response")
    if(length(unique(x)) < 3)
        stop("at least 3 distinct x values are needed")
    mx <- mean(x)
    b <- as.vector(lsfit(cbind(x - mx, -(x - mx)^2/2), y)$coef)
    rx <- range(x)
    xh <- mx + b[2]/b[3]
    if(prod(xh - rx) < 0)
        if(xh - rx[1] > rx[2] - xh) rx[2] <- xh  else rx[1] <- xh
    x0 <- c(rx[1], sum(rx)/2, rx[2])
    dy <- diff(b[1] + b[2]*(x0 - mx) - (b[3]*(x0 - mx)^2)/2)
    th <- (x0[2] - x0[1])/log(dy[1]/dy[2])
    b <- as.vector(lsfit(exp( - x/th), y)$coef)
    pars <- list(b[1], b[2], th)
    names(pars) <- pnames
    print(unlist(pars))
    pars
}

negexp.SSival <- function(mCall, data, LHS)
{
    x <- eval(mCall[["x"]], data)
    if(length(x) < 3)
        stop("at least 3 distinct x values are needed")
    y <- eval(LHS, data)
    mx <- mean(x)
    b <- as.vector(lsfit(cbind(x - mx,  - (x - mx)^2/2), y)$coef)
    rx <- range(x)
    xh <- mx + b[2]/b[3]
    if(prod(xh - rx) < 0)
        if(xh - rx[1] > rx[2] - xh)
            rx[2] <- xh
        else rx[1] <- xh
    x0 <- c(rx[1], sum(rx)/2, rx[2])
    dy <- diff(b[1] + b[2] * (x0 - mx) - (b[3] * (x0 - mx)^2)/2)
    th <- (x0[2] - x0[1])/log(dy[1]/dy[2])
    b <- as.vector(lsfit(exp( - x/th), y)$coef)
    pars <- list(b[1], b[2], th)
    names(pars) <- mCall[c("b0", "b1", "th")]
    print(unlist(pars))
    pars
}
# file MASS/orlm.q
# copyright (C) 1994-9 W. N. Venables and B. D. Ripley
#
orlm <-
    function(formula, data, weights, subset, na.action, method = "qr",
             model = FALSE, x = FALSE, y = FALSE, ...)
{
    call <- match.call()
    m <- match.call(expand = FALSE)
    m$method <- m$model <- m$x <- m$y <- m$... <- NULL
    m[[1]] <- as.name("model.frame")
    m <- eval(m, sys.frame(sys.parent()))
    if(method == "model.frame") return(m)
    Terms <- attr(m, "terms")
    weights <- model.extract(m, weights)
    Y <- model.extract(m, response)
    X <- model.matrix(Terms, m)
    if(length(weights)==0) weights <- rep(1, nrow(X))
    fit <- hsreg(X, Y, wx = weights, ...)
    fit$terms <- Terms
    fit$call <- call
    if(model) fit$model <- m
    fit$x <- X
    fit$y <- Y
    attr(fit, "na.message") <- attr(m, "na.message")
    if(!is.null(attr(m, "na.action"))) fit$na.action <- attr(m, "na.action")
    fit
}

hsreg <-
    function(x, y, w = rep(1, nrow(x)), k=1.345, wx, maxit = 20, sw=1000,
             acc = .Machine$double.eps^0.25, test.vec = "resid", ...)
{
    irls.delta <- function(old, new)
        sqrt(sum((old - new)^2)/max(1e-20, sum(old^2)))
    irls.rrxwr <- function(x, w, r)
    {
        w <- sqrt(w)
        max(abs((matrix(r * w, 1, length(r)) %*% x)/
                sqrt(matrix(w, 1, length(r)) %*% (x^2))))/sqrt(sum(w * r^2))
    }
    if(!(any(test.vec == c("resid", "coef", "w", "NULL")) || is.null(test.vec)))
        stop("invalid testvec")
    if(!missing(wx)) {
        if(length(wx) != nrow(x))
            stop("Length of wx must equal number of observations")
        if(any(wx < 0))
            stop("Negative wx value")
        w <- w * wx
    }
    temp <- lm.wfit(x, y, w, method="qr", ...)
    coef <- temp$coef
    resid <- temp$resid
    th <- 2*pnorm(k)-1
    gamma <- th + k^2*(1-th) -2*k*dnorm(k)

    done <- FALSE
    conv <- NULL
    n1 <- nrow(x) - ncol(x)
    scale <- median(abs(resid))/0.6745
    for(iiter in 1:maxit) {
        if(!is.null(test.vec)) testpv <- get(test.vec)
        ks <- k*scale
        if(iiter < sw) scale <- median(abs(resid))/0.6745
        else scale <- sqrt(sum(pmin(resid^2,ks^2))/(n1*gamma))
        if(scale == 0) {
            done <- TRUE
            break
        }
        w <- as.vector(wt.huber(resid/scale, k))
        if(!missing(wx)) w <- w * wx    # adjust for wx weights
        temp <- lm.wfit(x, y, w, method="qr")
        coef <- temp$coef
        resid <- temp$residuals
        if(!is.null(test.vec))
            convi <- irls.delta(testpv, get(test.vec))
        else convi <- irls.rrxwr(x, w, resid)
        conv <- c(conv, convi)
        done <- convi <= acc
        if(!done) next
        if(!exists("method.done") || method.done) break
    }
    if(!done)
        warning(paste("hsreg failed to converge in", maxit, "steps"))
    if(!missing(wx)) {
        tmp <- (wx != 0)
        w[tmp] <- w[tmp]/wx[tmp]
    }
    names(scale) <- NULL                # since median assigns a name
    fit <- list(coefficients = coef, residuals = resid,
                fitted.values = temp$fitted.values, rank = temp$rank,
                assign =temp$assign,  w = w, k = k, s = scale,
                conv = conv, converged = done)
    class(fit) <- c("orlm", "lm")
    fit
}

print.orlm <-
function(x, ...)
{
    if(!is.null(cl <- x$call)) {
        cat("Call:\n")
        dput(cl)
    }
    if(x$converged)
        cat("Converged in",length(x$conv), "iterations\n")
    else cat("Ran",length(x$conv), "iterations without convergence\n")
    coef <- x$coef
    cat("\nCoefficients:\n")
    print(coef, ...)
    nobs <- length(x$resid)
    rdf <- nobs - length(coef)
    cat("\nDegrees of freedom:", nobs, "total;", rdf, "residual\n")
    cat("Scale estimate:", format(signif(x$s,3)), "\n")
    invisible(x)
}

summary.orlm <- function(object, ...)
{
    k <- object$k
    s <- object$s
    ks <- k*s
    coef <- object$coef
    cnames <- names(coef)
    ptotal <- length(coef)
    resid <- object$resid
    n <- length(resid)
    if(any(na <- is.na(coef))) coef <- coef[!na]
    p <- length(coef)
    rdf <- n - p
    rinv <- diag(p)
    dimnames(rinv) <- list(cnames, cnames)
    S <- sum(pmin(abs(resid), ks)^2)/rdf
    m <- sum(abs(resid) < ks)/n
    kappa <- 1 + p*(1-m)/(n*m)
    stddev <- sqrt(S)*(kappa/m)
    R <- qr(object$x)$qr
    R <- R[1:p, 1:p, drop = FALSE]
    # for(i in 2:p)for(j in 1:(i-1))R[i,j] <- 0
    R[lower.tri(R)] <- 0
    rinv <- solve(R, rinv)
    rowlen <- (rinv^2 %*% rep(1, p))^0.5
    names(rowlen) <- cnames
    correl <- rinv * array(1/rowlen, c(p, p))
    correl <- correl %*% t(correl)
    coef <- array(coef, c(p, 3))
    dimnames(coef) <- list(cnames, c("Value", "Std. Error", "t value"))
    coef[, 2] <- rowlen %o% stddev
    coef[, 3] <- coef[, 1]/coef[, 2]
    object <- object["call"]
    object$residuals <- resid
    object$coefficients <- coef
    object$sigma <- s
    object$stddev <- stddev
    object$df <- c(p, rdf, ptotal)
    object$r.squared <- NA
    object$cov.unscaled <- rinv %*% t(rinv)
    object$correlation <- correl
    object$terms <- NA
    class(object) <- "summary.lm"
    object
}
# file MASS/polynom.q
# copyright (C) 1994-9 W. N. Venables and B. D. Ripley
#
"polynom"<- function(x, b)
{
	if(!is.loaded(symbol.C("poly"))) dyn.load("horner.o")
	m <- as.integer(length(x))
	n <- as.integer(length(b) - 1)
	storage.mode(x) <- "double"
	p <- x
	storage.mode(b) <- "double"
	.C("poly",
		m,
		val = p,
		x,
		n,
		b)$val
}
# File MASS/profiles.q copyright (C) 1996 D. M. Bates and W. N. Venables.
#
# port to R by B. D. Ripley copyright (C) 1998
#
profile.glm <- function(fitted, which = 1:p, alpha = 0.01,
			maxsteps = 10, del = zmax/5, trace = FALSE)
{
  Pnames <- names(B0 <- coefficients(fitted))
  pv0 <- t(as.matrix(B0))
  p <- length(Pnames)
  if(is.character(which)) which <- match(which, Pnames)
  summ <- summary(fitted)
  std.err <- summ$coefficients[, "Std. Error"]
  mf <- update(fitted, method = "model.frame")
  n <- length(Y <- model.response(mf))
  O <- model.offset(mf)
  if(!length(O)) O <- rep(0, n)
  W <- model.extract(mf, weights)
  if(length(W) == 0) W <- rep(1, n)
  OriginalDeviance <- deviance(fitted)
  DispersionParameter <- summ$dispersion
  X <- model.matrix(fitted)
  fam <- family(fitted)
  switch(fam$family,
	 binomial = {
	   if(!is.null(dim(Y))) {
	     n <- n/2
	     O <- O[1:n]
	     Y <- Y[, 1]/(W <- drop(Y %*% c(1, 1)))
	   }
	   zmax <- sqrt(qchisq(1 - alpha/2, p))
	   profName <- "z"
	 },
	 poisson = ,
	 "Negative Binomial" = {
	   zmax <- sqrt(qchisq(1 - alpha/2, p))
	   profName <- "z"
	 }
	 ,
	 gaussian = ,
	 quasi = ,
	 "inverse.gaussian" = ,
	 {
	   zmax <- sqrt(p * qf(1 - alpha/2, p, n - p))
	   profName <- "tau"
	 }
	 )
  prof <- vector("list", length=length(which))
  names(prof) <- Pnames[which]
  for(i in which) {
    zi <- 0
    pvi <- pv0
    Xi <- X[,  - i, drop = FALSE]
    Pi <- Pnames[ - i]
    pi <- Pnames[i]
    for(sgn in c(-1, 1)) {
      if(trace) cat("\nParameter:", pi, c("up", "down")[(sgn + 1)/2 + 1], "\n")
      step <- 0
      z <- 0
## LP is the linear predictor including offset. What $linear.predictor
## gives depends on the version of S-PLUS.
##
      LP <- X %*% fitted$coef + O
      while((step <- step + 1) < maxsteps && abs(z) < zmax) {
	bi <- B0[i] + sgn * step * del * std.err[i]
        o <- O + X[, i] * bi
## R 0.63.3 or later: earlier needs etastart = LP - o
	fm <- glm.fit(x = Xi, y = Y, weights = W, etastart = LP,
		      offset = o, family = fam, control = fitted$control)
	LP <- Xi %*% fm$coef + o
	ri <- pv0
	ri[, names(coef(fm))] <- coef(fm)
	ri[, pi] <- bi
	pvi <- rbind(pvi, ri)
	z <- sgn * sqrt((fm$deviance - OriginalDeviance)/DispersionParameter)
	zi <- c(zi, z)
      }
    }
    si <- order(zi)
    prof[[pi]] <- structure(data.frame(zi[si]), names = profName)
    prof[[pi]]$par.vals <- pvi[si, ]
  }
  val <- structure(prof, original.fit = fitted, summary = summ)
  class(val) <- c("profile.glm", "profile")
  val
}

plot.profile <-
  ## R version: non-Trellis-based replacement for plot.profile
  function(x, nseg, ...)
{
  nulls <- sapply(x, is.null)
  if (all(nulls)) return(NULL)
  x <- x[!nulls]
  nm <- names(x)
  nr <- ceiling(sqrt(length(nm)))
  oldpar <- par(mfrow = c(nr, nr))
  on.exit(par(oldpar))
  for(nm in names(x)) {
    tau <- x[[nm]][[1]]
    parval <- x[[nm]][[2]][, nm]
    plot(parval, tau, xlab = nm, ylab = "tau", type="n")
    points(parval[tau == 0], 0, pch = 3)
    splineVals <- spline(parval, tau)
    lines(splineVals$x, splineVals$y)
  }
}
is.trellis <- function() FALSE

pairs.profile <-
  ## Another plot method for profile objects showing pairwise traces.
  ## Recommended only for diagnostic purposes.
function(x,
	 colours = if(is.trellis())
	 trellis.par.get("superpose.line")$col[2:3]
	 else 2:3)
{
  parvals <- lapply(x, "[[", "par.vals")
  rng <- apply(do.call("rbind", parvals), 2, range, na.rm = T)
  Pnames <- colnames(rng)
  npar <- length(Pnames)
  coefs <- coef(attr(x, "original.fit"))
  form <- paste(as.character(attr(x, "original.fit")$formula)[c(2, 1, 3)],
		collapse = "")
  oldpar <- par(mar = c(0, 0, 0, 0), mfrow = c(1, 1),
		oma = c(3, 3, 6, 3), las = 1)
  on.exit(par(oldpar))
  ##
  ## The following dodge ensures that the plot region is square
  ##
  fin <- par("fin")
  dif <- (fin[2] - fin[1])/2
  if(dif > 0) adj <- c(dif, 0, dif, 0)
  else adj <- c(0,  - dif, 0,  - dif)
  par(omi = par("omi") + adj)
  ##
  ##
  cex <- 1 + 1/npar
  frame()
  mtext(form, side = 3, line = 3, cex = 1.5, outer = TRUE)
  del <- 1/npar
  for(i in 1:npar) {
    ci <- npar - i
    pi <- Pnames[i]
    for(j in 1:npar) {
      pj <- Pnames[j]
      par(fig = del * c(j - 1, j, ci, ci + 1))
      if(i == j) {
        par(new=TRUE)
	plot(rng[, pj], rng[, pi], axes = F, xlab = "", ylab = "", type = "n")
	op <- par(usr = c(-1, 1, -1, 1))
	text(0, 0, pi, cex = cex, adj = 0.5)
	par(op)
      } else {
	col <- colours
	if(i < j) col <- col[2:1]
	if(!is.null(parvals[[pj]])) {
          par(new=TRUE)
	  plot(spline(x <- parvals[[pj]][, pj],
		      y <- parvals[[pj]][, pi]), type = "l", xlim = rng[, pj],
	       ylim = rng[, pi], axes = F, xlab = "", ylab = "", col = col[2])
	  pu <- par("usr")
	  smidge <- 2/100 * (pu[4] - pu[3])
	  segments(x, pmax(pu[3], y - smidge), x, pmin(pu[4], y + smidge))
	} else
	  plot(rng[, pj], rng[, pi], axes = F, xlab = "", ylab = "",
	       type = "n")
	if(!is.null(parvals[[pi]])) {
	  lines(x <- parvals[[pi]][, pj], y <- parvals[[pi]][, pi],
		type = "l", col = col[1])
	  pu <- par("usr")
	  smidge <- 2/100 * (pu[2] - pu[1])
	  segments(pmax(pu[1], x - smidge), y, pmin(pu[2], x + smidge), y)
	}
	points(coefs[pj], coefs[pi], pch = 3, cex = 3)
      }
      if(i == npar) axis(1)
      if(j == 1) axis(2)
      if(i == 1) axis(3)
      if(j == npar) axis(4)
    }
  }
  par(fig = c(0, 1, 0, 1))
  invisible(x)
}
# file MASS/qda.q
# copyright (C) 1994-9 W. N. Venables and B. D. Ripley
#
qda <- function(x, ...)
{
    if(is.null(class(x))) class(x) <- data.class(x)
    UseMethod("qda", x, ...)
}

qda.formula <- function(formula, data = NULL, ...,
			subset, na.action = na.fail)
{
    m <- match.call(expand.dots = FALSE)
    if(is.matrix(eval(m$data, sys.frame(sys.parent()))))
        m$data <- as.data.frame(data)
    m$... <- NULL
    m[[1]] <- as.name("model.frame")
    m <- eval(m, sys.frame(sys.parent()))
    Terms <- attr(m, "terms")
    grouping <- model.extract(m, "response")
    x <- model.matrix(Terms, m)
    xvars <- as.character(attr(Terms, "variables"))[-1]
    if ((yvar <- attr(Terms, "response")) > 0) xvars <- xvars[-yvar]
    xlev <- if (length(xvars) > 0) {
        xlev <- lapply(m[xvars], levels)
        xlev[!sapply(xlev, is.null)]
    }
    xint <- match("(Intercept)", colnames(x), nomatch=0)
    if(xint > 0) x <- x[, -xint, drop=FALSE]
    res <- qda.default(x, grouping, ...)
    res$terms <- Terms
    res$call <- match.call()
    res$contrasts <- attr(x, "contrasts")
    res$xlevels <- xlev
    attr(res, "na.message") <- attr(m, "na.message")
    if(!is.null(attr(m, "na.action"))) res$na.action <- attr(m, "na.action")
    res
}

qda.data.frame <- function(x, ...)
{
    res <- qda.matrix(structure(data.matrix(x), class="matrix"), ...)
    res$call <- match.call()
    res
}

qda.Matrix <- function(x, ...)
{
    res <- qda.matrix(as.matrix(x), ...)
    res$call <- match.call()
    res
}

qda.matrix <- function(x, grouping, ...,
		       subset, na.action = na.fail)
{
    if(!missing(subset)) {
        x <- x[subset, , drop = FALSE]
        grouping <- grouping[subset]
    }
    if(!missing(na.action)) {
        dfr <- na.action(structure(list(g = grouping, x = x),
                                   class = "data.frame"))
        grouping <- dfr$g
        x <- dfr$x
    }
    res <- NextMethod("qda")
    res$call <- match.call()
    res
}

qda.default <-
  function(x, grouping, prior = proportions,
           method = c("moment", "mle", "mve", "t"), CV=F, nu = 5, ...)
{
    if(is.null(dim(x))) stop("x is not a matrix")
    x <- as.matrix(x)
    n <- nrow(x)
    p <- ncol(x)
    if(n != length(grouping)) stop("nrow(x) and length(grouping) are different")
    g <- as.factor(grouping)
    lev <- levels(g)
    counts <- as.vector(table(g))
    names(counts) <- lev
    if(any(counts < p+1)) stop("some group is too small for qda")
    proportions <- counts/length(g)
    ng <- length(proportions)
# allow for supplied prior
    if(any(prior < 0) || round(sum(prior), 5) != 1) stop("invalid prior")
    if(length(prior) != ng) stop("prior is of incorrect length")
    names(prior) <- lev
# means by group (rows) and variable (columns)
    group.means <- tapply(x, list(rep(g, ncol(x)), col(x)), mean)
    scaling <- array(dim=c(p,p,ng))
    ldet <- numeric(ng)
    method <- match.arg(method)
    if(CV && !(method == "moment" || method == "mle"))
        stop(paste("Cannot use leave-one-out CV with method", method))
    for (i in 1:ng){
        if(method == "mve") {
            cX <- cov.mve(X[unclass(g) == i, ], , FALSE)
            group.means[i] <- cX$center
            sX <- svd(cX$cov, nu=0)
            scaling[, , i] <- sX$v %*% diag(sqrt(1/sX$d),,p)
            ldet[i] <- sum(log(diag(sX$d)))
        } else if(method == "t") {
            if(nu <= 2) stop("nu must exceed 2")
            m <- counts[i]
            X <- x[unclass(g) == i, ]
            w <- rep(1, m)
            repeat {
                w0 <- w
                W <- scale(X, group.means[i, ], FALSE)
                sX <- svd(sqrt((1 + p/nu) * w/m) * W, nu=0)
                W <- W %*% sX$v %*% diag(1/sX$d,, p)
                w <- 1/(1 + drop(W^2 %*% rep(1, p))/nu)
                #         print(summary(w))
                group.means[i,] <- apply(w*X, 2, sum)/sum(w)
                if(all(abs(w - w0) < 1e-2)) break
            }
            qx <- qr(sqrt(w)*scale(X, group.means[i, ], FALSE))
            if(qx$rank < p) stop(paste("Rank deficiency in group", lev[i]))
            qx <- qx$qr* sqrt((1 + p/nu)/m)
            scaling[, , i] <- backsolve(qx[1:p,  ], diag(p))
            ldet[i] <- 2*sum(log(abs(diag(qx))))
        } else {
            if(method == "moment") nk <- counts[i] - 1 else nk <- counts[i]
            X <- scale(x[unclass(g) == i, ], group.means[i, ], FALSE)/sqrt(nk)
            qx <- qr(X)
            if(qx$rank < p) stop(paste("Rank deficiency in group", lev[i]))
            qx <- qx$qr
            scaling[, , i] <- backsolve(qx[1:p, ], diag(p))
            ldet[i] <- 2*sum(log(abs(diag(qx))))
        }
    }
    if(CV) {
        NG <- if(method == "mle") 0 else 1
        dist <- matrix(0, n, ng)
        Ldet <- matrix(0, n, ng)
        for(i in 1:ng) {
            dev <- ((x - matrix(group.means[i,  ], nrow(x),
                                p, byrow = TRUE)) %*% scaling[,,i])
            dist[, i] <- apply(dev^2, 1, sum)
            Ldet[, i] <- ldet[i]
        }
        nc <- counts[g]
        ind <- cbind(1:n, g)
        Ldet[ind] <- log(1 - nc/(nc-1)/(nc-NG) * dist[ind]) +
            p * log((nc-NG)/(nc-1-NG)) + Ldet[ind]
        dist[ind] <- dist[ind] * (nc^2/(nc-1)^2) * (nc-1-NG)/(nc-NG) /
            (1 - nc/(nc-1)/(nc-NG) * dist[ind])
        dist <- 0.5 * dist + 0.5 * Ldet - matrix(log(prior), n, ng, byrow=TRUE)
        dist <- exp(-(dist - min(dist, na.rm=TRUE)))
        posterior <- dist/drop(dist %*% rep(1, length(prior)))
        cl <- factor(max.col(posterior), levels=seq(along=lev), labels=lev)
        dimnames(posterior) <- list(rownames(x), lev)
        return(list(class = cl, posterior = posterior))
    }
    if(is.null(dimnames(x)))
        dimnames(scaling) <- list(NULL, as.character(1:p), lev)
    else {
        dimnames(scaling) <- list(colnames(x), as.character(1:p), lev)
        dimnames(group.means)[[2]] <- colnames(x)
    }
    res <- list(prior = prior, counts = counts, means = group.means,
                scaling = scaling, ldet = ldet, lev = lev, N = n,
                call = match.call())
    class(res) <- "qda"
    res
}

predict.qda <- function(object, newdata, prior = object$prior,
			method = c("plug-in", "predictive", "debiased",
                          "looCV"), ...)
{
    if(!inherits(object, "qda")) stop("object not of class qda")
    method <- match.arg(method)
    if(method == "looCV" && !missing(newdata))
        stop("Cannot have leave-one-out CV with newdata")
    if(is.null(mt <- object$call$method)) mt <- "moment"
    if(method == "looCV" && !(mt == "moment" || mt == "mle"))
        stop(paste("Cannot use leave-one-out CV with method", mt))
    if(!is.null(Terms <- object$terms)) {
    # formula fit
        if(missing(newdata)) newdata <- model.frame(object)
        else {
            newdata <- model.frame(as.formula(delete.response(Terms)),
                                   newdata, na.action=function(x) x,
                                   xlev = object$xlevels)
        }
        x <- model.matrix(delete.response(Terms), newdata,
                          contrasts = object$contrasts)
        xint <- match("(Intercept)", colnames(x), nomatch=0)
        if(xint > 0) x <- x[, -xint, drop=FALSE]
        if(method == "looCV") g <- model.extract(newdata, "response")
    } else { #
    # matrix or data-frame fit
        if(missing(newdata)) {
            if(!is.null(sub <- object$call$subset)) {
                newdata <- eval(parse(text=paste(deparse(object$call$x),"[",
                                      deparse(sub),",]")), sys.frame(sys.parent()))
                g <- eval(parse(text=paste(deparse(object$call[[3]]),"[",
                                deparse(sub),"]")), sys.frame(sys.parent()))
            } else {
                newdata <- eval(object$call$x, sys.frame(sys.parent()))
                g <- eval(object$call[[3]], sys.frame(sys.parent()))
            }
            if(!is.null(nas <- object$call$na.action)) {
                df <- data.frame(g = g, X = newdata)
                df <- eval(call(nas, df))
                g <- df$g
                newdata <- df$X
            }
            g <- as.factor(g)
        }
        if(is.null(dim(newdata)))
            dim(newdata) <- c(1, length(newdata))  # a row vector
        x <- as.matrix(newdata)		# to cope with dataframes
    }
    p <- ncol(object$means)
    if(ncol(x) != p) stop("wrong number of variables")
    if(length(colnames(x)) > 0 &&
       any(colnames(x) != dimnames(object$means)[[2]]))
        warning("Variable names in newdata do not match those in object")
    ngroup <- length(object$prior)
    dist <- matrix(0, nrow = nrow(x), ncol = ngroup)
    if(method == "plug-in") {
        for(i in 1:ngroup) {
            dev <- ((x - matrix(object$means[i,  ], nrow(x),
                                ncol(x), byrow = TRUE)) %*% object$scaling[,,i])
            dist[, i] <- 0.5 * apply(dev^2, 1, sum) + 0.5 * object$ldet[i] - log(object$prior[i])
        }
        dist <- exp(-(dist - min(dist, na.rm=TRUE)))
    } else if(method == "looCV") {
        n <- nrow(x)
        NG <- 1
        if(mt == "mle") NG <- 0
        ldet <- matrix(0, n, ngroup)
        for(i in 1:ngroup) {
            dev <- ((x - matrix(object$means[i,  ], nrow(x), p, byrow = TRUE))
                    %*% object$scaling[,,i])
            dist[, i] <- apply(dev^2, 1, sum)
            ldet[, i] <- object$ldet[i]
        }
        nc <- object$counts[g]
        ind <- cbind(1:n, g)
        ldet[ind] <- log(1 - nc/(nc-1)/(nc-NG) * dist[ind]) +
            p * log((nc-NG)/(nc-1-NG)) + ldet[ind]
        dist[ind] <- dist[ind] * (nc^2/(nc-1)^2) * (nc-1-NG)/(nc-NG) /
            (1 - nc/(nc-1)/(nc-NG) * dist[ind])
        dist <- 0.5 * dist + 0.5 * ldet -
            matrix(log(object$prior), n, ngroup, byrow=TRUE)
        dist <- exp(-(dist - min(dist, na.rm=TRUE)))
    } else if(method == "debiased") {
        for(i in 1:ngroup) {
            nk <- object$counts[i]
            Bm <- p * log((nk-1)/2) - sum(digamma(0.5 * (nk - 1:ngroup)))
            dev <- ((x - matrix(object$means[i,  ], nrow = nrow(x),
                                ncol = ncol(x), byrow = TRUE)) %*% object$scaling[,,i])
            dist[, i] <- 0.5 * (1 - (p-1)/(nk-1)) * apply(dev^2, 1, sum) +
                0.5 * object$ldet[i] - log(object$prior[i]) + 0.5 * Bm - p/(2*nk)
        }
        dist <- exp(-(dist - min(dist, na.rm=TRUE)))
    } else {
        N <- object$N
        for(i in 1:ngroup) {
            nk <- object$counts[i]
            dev <- ((x - matrix(object$means[i,  ], nrow = nrow(x),
                                ncol = ncol(x), byrow = TRUE))
                    %*% object$scaling[,,i])
            dev <- 1 + apply(dev^2, 1, sum)/(nk+1)
            dist[, i] <- object$prior[i] * exp(-object$ldet[i]/2) *
                dev^(-nk/2) * (1 + nk)^(-p/2)
        }
    }
    posterior <- dist/drop(dist %*% rep(1, length(object$prior)))
    cl <- factor(max.col(posterior), levels=seq(along=object$lev),
                 labels=object$lev)
    dimnames(posterior) <- list(rownames(x), object$lev)
    list(class = cl, posterior = posterior)
}

print.qda <- function(x, ...)
{
    if(!is.null(cl <- x$call)) {
        names(cl)[2] <- ""
        cat("Call:\n")
        dput(cl)
    }
    cat("\nPrior probabilities of groups:\n")
    print(x$prior, ...)
    cat("\nGroup means:\n")
    print(x$means, ...)
    invisible(x)
}

model.frame.qda <-  model.frame.lda
# file MASS/rlm.q
# copyright (C) 1994-9 W. N. Venables and B. D. Ripley
#
rlm <- function(x, ...) UseMethod("rlm")

rlm.formula <-
    function(formula, data = NULL, weights, ...,
             subset, na.action = na.fail,
             method = c("M", "MM", "model.frame"),
             wt.method = c("case", "inv.var"),
             model = TRUE, x.ret = TRUE, y.ret = FALSE, contrasts = NULL)
{
    mf <- match.call(expand.dots = FALSE)
    mf$method <- mf$model <- mf$x <- mf$y <- mf$contrasts <- mf$... <- NULL
    mf[[1]] <- as.name("model.frame")
    mf <- eval(mf, sys.frame(sys.parent()))
    method <- match.arg(method)
    wt.method <- match.arg(wt.method)
    if(method == "model.frame") return(mf)
    mt <- attr(mf, "terms")
    y <- model.extract(mf, "response")
    x <- model.matrix(mt, mf, contrasts)
    xvars <- as.character(attr(mt, "variables"))[-1]
    if ((yvar <- attr(mt, "response")) > 0)
        xvars <- xvars[-yvar]
    xlev <- if (length(xvars) > 0) {
        xlev <- lapply(mf[xvars], levels)
        xlev[!sapply(xlev, is.null)]
    }
    weights <- model.extract(mf, weights)
    if(!length(weights)) weights <- rep(1, nrow(x))
    fit <- rlm.default(x, y, weights, method = method,
                       wt.method = wt.method, ...)
    fit$terms <- mt
    fit$call <- match.call()
    fit$contrasts <- attr(x, "contrasts")
    fit$xlevels <- xlev
    attr(fit, "na.message") <- attr(mf, "na.message")
    if(!is.null(attr(mf, "na.action"))) fit$na.action <- attr(mf, "na.action")
    if(model) fit$model <- mf
    if(!x.ret) fit$x <- NULL
    if(y.ret) fit$y <- y
    fit
}

rlm.default <-
  function(x, y, weights, ..., w = rep(1, nrow(x)),
           init = "ls", psi = psi.huber,
           scale.est = c("MAD", "Huber", "proposal 2"), k2 = 1.345,
           method = c("M", "MM"), wt.method = c("case", "inv.var"),
           maxit = 20, acc = 1e-4, test.vec = "resid")
{
    irls.delta <- function(old, new)
        sqrt(sum((old - new)^2)/max(1e-20, sum(old^2)))
    irls.rrxwr <- function(x, w, r)
    {
        w <- sqrt(w)
        max(abs((matrix(r * w, 1, length(r)) %*% x)/
                sqrt(matrix(w, 1, length(r)) %*% (x^2))))/sqrt(sum(w * r^2))
    }

    method <- match.arg(method)
    wt.method <- match.arg(wt.method)
    nmx <- deparse(substitute(x))
    if(is.null(dim(x))) {
        x <- as.matrix(x)
        colnames(x) <- nmx
    } else x <- as.matrix(x)
    if(is.null(colnames(x)))
        colnames(x) <- paste("X", seq(ncol(x)), sep="")
    if(qr(x)$rank < ncol(x))
        stop("x is singular: singular fits are not implemented in rlm")

    if(!(any(test.vec == c("resid", "coef", "w", "NULL"))
         || is.null(test.vec))) stop("invalid testvec")
    ## deal with weights
    w2 <-  rep(1, nrow(x))
    if(!missing(weights)) {
        if(length(weights) != nrow(x))
            stop("Length of weights must equal number of observations")
        if(any(weights < 0)) stop("Negative weights value")
        w <- w * weights
        if(wt.method == "inv.var") w2 <- 1/sqrt(weights)
    }

    if(method == "M") {
        scale.est <- match.arg(scale.est)
        if(!is.function(psi)) psi <- get(psi, mode="function")
        ## match any ... args to those of psi.
        arguments <- list(...)
        if(length(arguments)) {
            pm <- pmatch(names(arguments), names(formals(psi)), nomatch = 0)
            if(any(pm == 0)) warning(paste("some of ... do not match"))
            pm <- names(arguments)[pm> 0]
            formals(psi)[pm] <- unlist(arguments[pm])
        }
        if(is.character(init)) {
            if(init == "ls")
                temp <- lm.wfit(x, y, w, method="qr")
            else if(init == "lts")
                temp <- lqs.default(x, y, intercept=FALSE, nsamp=200)
            else stop("init method is unknown")
            coef <- temp$coef
            resid <- temp$resid
        } else {
            if(is.list(init)) coef <- init$coef
            else coef <- init
            resid <- y - x %*% coef
        }
    } else if(method == "MM") {
        scale.est <- "MM"
        temp <- lqs.default(x, y, intercept=FALSE, method="S", k0 = 1.548)
        coef <- temp$coef
        resid <- temp$resid
        psi <- psi.bisquare
        if(length(arguments <- list(...)))
            if(match("c", names(arguments), F)) {
                c0 <- arguments$c
                if (c0 > 1.548) {
                    psi$c <- c0
                } else warning("c must be at least 1.548 and has been ignored")
            }
        scale <- temp$scale
    } else stop("method is unknown")

    done <- FALSE
    conv <- NULL
    n1 <- nrow(x) - ncol(x)
    if(scale.est != "MM") scale <- mad(resid, 0)
    theta <- 2*pnorm(k2)-1
    gamma <- theta + k2^2 * (1 - theta) - 2 * k2 * dnorm(k2)
    for(iiter in 1:maxit) {
        if(!is.null(test.vec)) testpv <- get(test.vec)
        if(scale.est != "MM") {
            if(scale.est == "MAD") scale <- median(abs(resid))/0.6745
            else scale <- sqrt(sum(pmin(resid^2, (k2 * scale)^2))/(n1*gamma))
            if(scale == 0) {
                done <- TRUE
                break
            }
        }
        w <- psi(w2*resid/scale)
        if(!missing(weights)) w <- w * weights
        temp <- lm.wfit(x, y, w, method="qr")
        coef <- temp$coef
        resid <- temp$residuals
        if(!is.null(test.vec)) convi <- irls.delta(testpv, get(test.vec))
        else convi <- irls.rrxwr(x, w, resid)
        conv <- c(conv, convi)
        done <- (convi <= acc)
        if(done) break
    }
    if(!done) warning(paste("rlm failed to converge in", maxit, "steps"))
    if(!missing(weights)) {
        tmp <- (weights != 0)
        w[tmp] <- w[tmp]/weights[tmp]
    }
    fit <- list(coefficients = coef, residuals = resid, effects = temp$effects,
                rank = temp$rank, fitted.values = temp$fitted.values,
                assign = temp$assign,  qr = temp$qr, df.residual = NA, w = w,
                s = scale, psi = psi, k2 = k2,
                conv = conv, converged = done, x = x, call = match.call())
    class(fit) <- c("rlm", "lm")
    fit
}

print.rlm <- function(x, ...)
{
    if(!is.null(cl <- x$call)) {
        cat("Call:\n")
        dput(cl)
    }
    if(x$converged)
        cat("Converged in", length(x$conv), "iterations\n")
    else cat("Ran", length(x$conv), "iterations without convergence\n")
    coef <- x$coef
    cat("\nCoefficients:\n")
    print(coef, ...)
    nobs <- length(x$resid)
    rdf <- nobs - length(coef)
    cat("\nDegrees of freedom:", nobs, "total;", rdf, "residual\n")
    cat("Scale estimate:", format(signif(x$s,3)), "\n")
    invisible(x)
}

summary.rlm <- function(object, method=c("XtX", "XtWX"), correlation=T, ...)
{
    method <- match.arg(method)
    s <- object$s
    coef <- object$coef
    cnames <- names(coef)
    ptotal <- length(coef)
    resid <- object$resid
    n <- length(resid)
    if(any(na <- is.na(coef))) coef <- coef[!na]
    p <- length(coef)
    rdf <- n - p
    rinv <- diag(p)
    dimnames(rinv) <- list(cnames, cnames)
    w <- object$psi(resid/s)
    S <- sum((resid*w)^2)/rdf
    psiprime <- object$psi(resid/s, deriv=1)
    mn <- mean(psiprime)
    kappa <- 1 + p*var(psiprime)/(n*mn^2)
    stddev <- sqrt(S)*(kappa/mn)
    X <- object$x
    if(method == "XtWX")  X <- X * sqrt(w/mean(w))
    R <- qr(X)$qr
    R <- R[1:p, 1:p, drop = FALSE]
    R[lower.tri(R)] <- 0
    rinv <- solve(R, rinv)
    dimnames(rinv) <- list(cnames, cnames)
    rowlen <- (rinv^2 %*% rep(1, p))^0.5
    names(rowlen) <- cnames
    if(correlation) {
        correl <- rinv * array(1/rowlen, c(p, p))
        correl <- correl %*% t(correl)
    } else correl <- NULL
    coef <- array(coef, c(p, 3))
    dimnames(coef) <- list(cnames, c("Value", "Std. Error", "t value"))
    coef[, 2] <- rowlen %o% stddev
    coef[, 3] <- coef[, 1]/coef[, 2]
    object <- object["call"]
    object$residuals <- resid
    object$coefficients <- coef
    object$sigma <- s
    object$stddev <- stddev
    object$df <- c(p, rdf, ptotal)
    object$r.squared <- NA
    object$cov.unscaled <- rinv %*% t(rinv)
    object$correlation <- correl
    object$terms <- NA
    class(object) <- "summary.rlm"
    object
}

print.summary.rlm <-
function(x, digits = max(3, .Options$digits - 3), ...)
{
    cat("\nCall: ")
    dput(x$call)
    resid <- x$residuals
    df <- x$df
    rdf <- df[2]
    if(rdf > 5) {
        cat("Residuals:\n")
        if(length(dim(resid)) == 2) {
            rq <- apply(t(resid), 1, quantile)
            dimnames(rq) <- list(c("Min", "1Q", "Median", "3Q", "Max"),
                                 colnames(resid))
        } else {
            rq <- quantile(resid)
            names(rq) <- c("Min", "1Q", "Median", "3Q", "Max")
        }
        print(rq, digits = digits, ...)
    } else if(rdf > 0) {
        cat("Residuals:\n")
        print(resid, digits = digits, ...)
    }
    if(nsingular <- df[3] - df[1])
        cat("\nCoefficients: (", nsingular,
            " not defined because of singularities)\n", sep = "")
    else cat("\nCoefficients:\n")
    print(format(round(x$coef, digits = digits)), quote = FALSE, ...)
    cat("\nResidual standard error:", format(signif(x$sigma, digits)),
        "on", rdf, "degrees of freedom\n")
    if(!is.null(correl <- x$correlation)) {
        p <- dim(correl)[2]
        if(p > 1) {
            cat("\nCorrelation of Coefficients:\n")
            ll <- lower.tri(correl)
            correl[ll] <- format(round(correl[ll], digits))
            correl[!ll] <- ""
            print(correl[-1, -p, drop = FALSE], quote = FALSE, digits = digits, ...)
        }
    }
    invisible(x)
}

psi.huber <- function(u, k = 1.345, deriv=0)
{
    if(!deriv) return(pmin(1, k / abs(u)))
    abs(u) <= k
}

psi.hampel <- function(u, a = 2, b = 4, c = 8, deriv=0)
{
    U <- pmin(abs(u) + 1e-50, c)
    if(!deriv) return(ifelse(U <= a, U, ifelse(U <= b, a, a*(c-U)/(c-b) ))/U)
    ifelse(abs(u) <= c, ifelse(U <= a, 1, ifelse(U <= b, 0, -a/(c-b))), 0)
}

psi.bisquare <- function(u, c = 4.685, deriv=0)
{
    if(!deriv) return((1  - pmin(1, abs(u/c))^2)^2)
    t <- (u/c)^2
    ifelse(t < 1, (1 - t)*(1 - 5*t), 0)
}
# file MASS/rms.curv.q
# copyright (C) 1994-9 W. N. Venables and B. D. Ripley
#
"rms.curv"<-
function(obj, fit.val = get.fit.val(obj, data), data = obj$call$data)
{
  .NotYetImplemented()
  get.fit.val <- function(obj, data)
  {
    if(is.null(data)) data <- sys.parent(2)
    if(is.numeric(data)) data <- sys.frame(data)
    if(is.name(data)) data <- get(data)
    class(data) <- NULL
    pp <- as.list(b <- coef(obj))
    np <- names(b)
    data[np] <- pp
    eval(as.expression(obj$formula[3]), local = data)
  }
  v <- attr(fit.val, "gradient")
  if(is.null(v)) stop("gradient attribute missing")
  a <- attr(fit.val, "hessian")
  if(is.null(a)) stop("hessian attribute missing")
  p <- ncol(v)
  n <- nrow(v)
  s <- sqrt(sum((obj$residuals)^2)/(n - p))
  sp <- s * sqrt(p)
  D <- v
  for(j in 1:p) D <- cbind(D, a[, 1:j, j])
  qrd <- qr(D)
  Q <- qr.Q(qrd)
  rnk <- qrd$rank
  if(rnk <= p) warning("regression apparently linear")
  Q1 <- Q[, 1:rnk]
  C <- array(0, c(rnk, p, p))
  for(j in 1:p) C[,  , j] <- crossprod(Q1, a[,  , j])
  C <- aperm(C, c(2, 3, 1))
  r11i <- solve(qr.R(qrd)[1:p, 1:p])
  ct <- 0
  for(j in 1:p) {
    C[,  , j] <- crossprod(r11i, C[,  , j]) %*% r11i * sp
    ct <- ct + 2 * sum(C[,  , j]^2) + sum(diag(C[,  , j]))^2
  }
  ci <- 0
  for(j in (p + 1):rnk) {
    C[,  , j] <- crossprod(r11i, C[,  , j]) %*% r11i * sp
    ci <- ci + 2 * sum(C[,  , j]^2) + sum(diag(C[,  , j]))^2
  }
  ct <- sqrt(ct/(p * (p + 2)))
  ci <- sqrt(ci/(p * (p + 2)))
  pe <- ct * sqrt(qf(19/20, p, n - p))
  ic <- ci * sqrt(qf(19/20, p, n - p))
  val <- list(pe = pe, ic = ic, ct = ct, ci = ci, C = C)
  class(val) <- "rms.curv"
  val
}
"print.rms.curv"<-
function(x, ...)
{
  cat("Parameter effects: c^theta x sqrt(F) =", round(x$pe, 4), "\n",
      "       Intrinsic: c^iota  x sqrt(F) =", round(x$ic, 4), "\n",
      ...)
  invisible(x)
}
# file MASS/sammon.q
# copyright (C) 1994-9 W. N. Venables and B. D. Ripley
#
sammon <- function(d, y, k=2, niter=100, trace=TRUE, magic=0.2, tol=1e-4)
{
   call <- match.call()
   if(any(!is.finite(as.vector(d))))
      stop("NAs/Infs not allowed in d")
   if(is.null(n <- attr(d, "Size"))) {
      x <- as.matrix(d)
      if((n <- nrow(x)) != ncol(x))
        stop("Distances must be result of dist or a square matrix")
   }
   else {
      x <- matrix(0, n, n)
      x[row(x) > col(x)] <- d
      x <- x + t(x)
   }
   if (any(ab <- x[row(x) < col(x)]<=0)) {
	aa <- cbind(as.vector(row(x)), as.vector(col(x)))[row(x) < col(x),]
	aa <- aa[ab,,drop=FALSE]
	stop(paste("zero or negative distance between objects", aa[1,1],
	 "and", aa[1,2]))
	}
   if(missing(y)) y <- cmdscale(d, k=k)
   if(any(dim(y) != c(n, k)) ) stop("invalid initial configuration")
   storage.mode(x) <- "double"
   storage.mode(y) <- "double"
   if(!is.loaded(symbol.C("VR_sammon")))
     stop("Compiled code has not been dynamically loaded")
   z <- .C("VR_sammon",
      x = x,
      as.integer(n),
      as.integer(k),
      y = y,
      as.integer(niter),
      e = double(1),
      as.integer(trace),
      as.double(magic),
      as.double(tol)
      )
   list(points=z$y, stress=z$e, call=call)
}

# file MASS/stdres.q
# copyright (C) 1994-9 W. N. Venables and B. D. Ripley
#
lmwork <- function(object)
{
    resid <- object$resid
    hat <- lm.influence(object)$hat
    ok <- !(is.na(resid))
    n.miss <- sum(!ok)
    switch(ifelse(n.miss > 2, 2, n.miss),
           warning("1 missing observation deleted"),
           warning(paste(n.miss, "missing observations deleted")))
    resid <- resid[ok]
    n <- length(resid)
    p <- object$rank
    rdf <- object$df.resid
    if(is.null(rdf))
        rdf <- n - p
    if(!is.null(object$weights)) {
        wt <- object$weights[ok]
        resid <- resid * wt^0.5
        excl <- wt == 0
        if(any(excl)){
            warning(paste(sum(excl),
                          "rows with zero weights not counted"))
            resid <- resid[!excl]
            if(is.null(object$df.resid))
                rdf <- rdf - sum(excl)
        }
    }
    stdres <- studres <- resid
    if(n > p) {
        stddev <- sqrt(sum(resid^2)/rdf)
        sr <- resid/(sqrt(1 - hat) * stddev)
        stdres <- sr
        studres <- sr/sqrt((n-p-sr^2)/(n-p-1))
        if(!is.null(object$na.action)) {
            stdres <- naresid(object$na.action, stdres)
            studres <- naresid(object$na.action, studres)
        }
    }
    else stddev <- stdres[] <- studres[]<- NA
    list(stdedv=stddev, stdres=stdres, studres=studres)
}
stdres <- function(object) lmwork(object)$stdres
studres <- function(object) lmwork(object)$studres
# file MASS/stepAIC.q
# copyright (C) 1994-9 W. N. Venables and B. D. Ripley
#
stepAIC <-
  function(object, scope, scale = 0,
           direction = c("both", "backward", "forward"),
           trace = 1, keep = NULL, steps = 1000, use.start = FALSE, k = 2, ...)
{
  fixFormulaObject <- function(object) {
    tt <- terms(object)
    tmp <- attr(tt, "term.labels")
    if (!attr(tt, "intercept")) tmp <- c(tmp, "0")
    if (!length(tmp)) tmp <- "1"
    rhs <- paste(tmp, collapse = " + ")
    tmp <- paste(deparse(formula(object)[[2]]), "~", rhs)
    if (length(offset <- attr(tt, "offset")))
      tmp <- paste(tmp, deparse(attr(tt, "variables")[offset + 1]),
                   sep = " + ")
    formula(tmp)
  }
  mydeviance <- function(x, ...)
  {
    dev <- deviance(x)
    if(!is.null(dev)) dev else extractAIC(x, k=0)[2]
  }

  cut.string <- function(string)
  {
    if(length(string) > 1)
      string[-1] <- paste("\n", string[-1], sep = "")
    string
  }

  re.arrange <- function(keep)
  {
    namr <- names(k1 <- keep[[1]])
    namc <- names(keep)
    nc <- length(keep)
    nr <- length(k1)
    array(unlist(keep, recursive = FALSE), c(nr, nc), list(namr, namc))
  }

  step.results <- function(models, fit, object, usingCp=FALSE)
  {
    change <- sapply(models, "[[", "change")
    rd <- sapply(models, "[[", "deviance")
    dd <- c(NA, abs(diff(rd)))
    rdf <- sapply(models, "[[", "df.resid")
    ddf <- c(NA, abs(diff(rdf)))
    AIC <- sapply(models, "[[", "AIC")
    heading <- c("Stepwise Model Path \nAnalysis of Deviance Table",
                 "\nInitial Model:", deparse(as.vector(formula(object))),
                 "\nFinal Model:", deparse(as.vector(formula(fit))),
                 "\n")
    aod <-
      if(usingCp)
        data.frame(Step = change, Df = ddf, Deviance = dd,
                   "Resid. Df" = rdf, "Resid. Dev" = rd,
                   Cp = AIC, check.names = FALSE)
      else data.frame(Step = change, Df = ddf, Deviance = dd,
                      "Resid. Df" = rdf, "Resid. Dev" = rd,
                      AIC = AIC, check.names = FALSE)
    attr(aod, "heading") <- heading
    class(aod) <- c("Anova", "data.frame")
    fit$anova <- aod
    fit
  }

  # need to fix up . in formulae in R
  object$formula <- fixFormulaObject(object)
  Terms <- object$formula
  object$call$formula <- object$formula
  attributes(Terms) <- attributes(object$terms)
  object$terms <- Terms
  if(use.start) warning("use.start cannot be used with R's version of glm")
  if(missing(direction)) direction <- "both"
  else direction <- match.arg(direction)
  backward <- direction == "both" | direction == "backward"
  forward <- direction == "both" | direction == "forward"
  if(missing(scope)) {
    fdrop <- numeric(0)
    fadd <- NULL
  } else {
    if(is.list(scope)) {
      fdrop <- if(!is.null(fdrop <- scope$lower))
	attr(terms(update.formula(object, fdrop)), "factors")
	else numeric(0)
      fadd <- if(!is.null(fadd <- scope$upper))
	attr(terms(update.formula(object, fadd)), "factors")
    } else {
      fadd <- if(!is.null(fadd <- scope))
	attr(terms(update.formula(object, scope)), "factors")
      fdrop <- numeric(0)
    }
  }
  if(is.null(fadd)) {
    backward <- TRUE
    forward <- FALSE
  }
  models <- vector("list", steps)
  if(!is.null(keep)) {
    keep.list <- vector("list", steps)
    nv <- 1
  }
  ## watch out for partial matching here.
  if(is.list(object) && (nm <- match("nobs", names(object), 0)) > 0)
    n <- object[[nm]]
  else n <- length(residuals(object))
  fit <- object
  bAIC <- extractAIC(fit, scale, k = k, ...)
  edf <- bAIC[1]
  bAIC <- bAIC[2]
  nm <- 1
  Terms <- fit$terms
  if(trace)
    cat("Start:  AIC=", format(round(bAIC, 2)), "\n",
        cut.string(deparse(as.vector(formula(fit)))), "\n\n")

  models[[nm]] <- list(deviance = mydeviance(fit), df.resid = n - edf,
		       change = "", AIC = bAIC)
  if(!is.null(keep)) keep.list[[nm]] <- keep(fit, bAIC)
  usingCp <- FALSE
  while(steps > 0) {
    steps <- steps - 1
    AIC <- bAIC
    bfit <- fit
    ffac <- attr(Terms, "factors")
    scope <- factor.scope(ffac, list(add = fadd, drop = fdrop))
    aod <- NULL
    change <- NULL
    if(backward && length(scope$drop)) {
      aod <- dropterm(fit, scope$drop, scale = scale,
                      trace = max(0, trace - 1), k = k, ...)
      rn <- row.names(aod)
      row.names(aod) <- c(rn[1], paste("-", rn[-1], sep=" "))
      # drop all zero df terms first.
      if(any(aod$Df == 0, na.rm=TRUE)) {
        zdf <- aod$Df == 0 & !is.na(aod$Df)
        change <- paste((rownames(aod))[zdf])
      }
    }
    if(is.null(change)) {
      if(forward && length(scope$add)) {
        aodf <- addterm(fit, scope$add, scale = scale,
                        trace = max(0, trace - 1), k = k, ...)
        rn <- row.names(aodf)
        row.names(aodf) <- c(rn[1], paste("+", rn[-1], sep=" "))
        if(is.null(aod)) aod <- aodf
        else {
          aod <- rbind(aod, aodf[-1, , drop=FALSE])
        }
      }
      attr(aod, "heading") <- NULL
      if(is.null(aod) || ncol(aod) == 0) break
      # need to remove any terms with zero df from consideration
      nzdf <- if(!is.null(aod$Df)) aod$Df != 0 | is.na(aod$Df)
      aod <- aod[nzdf, ]
      if(is.null(aod) || ncol(aod) == 0) break
      nc <- match(c("Cp", "AIC"), names(aod))
      nc <- nc[!is.na(nc)][1]
      o <- order(aod[, nc])
      if(trace) print(aod[o,  ])
      if(o[1] == 1) break
      change <- rownames(aod)[o[1]]
    }
    usingCp <- match("Cp", names(aod), 0) > 0
    fit <- update(fit, paste("~ .", change))
    fit$formula <- fixFormulaObject(fit)
    Terms <- fit$formula
    attributes(Terms) <- attributes(fit$terms)
    fit$terms <- Terms
    bAIC <- extractAIC(fit, scale, k = k, ...)
    edf <- bAIC[1]
    bAIC <- bAIC[2]
    if(trace)
      cat("\nStep:  AIC=", format(round(bAIC, 2)), "\n",
          cut.string(deparse(as.vector(formula(fit)))), "\n\n")
    if(bAIC >= AIC) break
    nm <- nm + 1
    models[[nm]] <-
      list(deviance = mydeviance(fit), df.resid = n - edf,
           change = change, AIC = bAIC)
    if(!is.null(keep)) keep.list[[nm]] <- keep(fit, bAIC)
  }
  if(!is.null(keep)) fit$keep <- re.arrange(keep.list[seq(nm)])
  step.results(models = models[seq(nm)], fit, object, usingCp)
}


extractAIC.loglm <- function(fit, scale, k = 2, ...)
{
  edf <- fit$n - fit$df
  c(edf,  fit$deviance + k * edf)
}
# file MASS/truehist.q
# copyright (C) 1994-9 W. N. Venables and B. D. Ripley
#
"truehist"<-
function(data, nbins = nclass.scott(data), h, x0 = -h/1000, breaks, prob = TRUE,
	 xlim = range(breaks), ymax = max(est),
	 col = 5,
	 xlab = deparse(substitute(data)), bty = "n", ...)
{
  eval(xlab)
  data <- data[!is.na(data)]
  if(missing(breaks)) {
    if(missing(h)) h <- diff(pretty(data, nbins))[1]
    first <- floor((min(data) - x0)/h)
    last <- ceiling((max(data) - x0)/h)
    breaks <- x0 + h * c(first:last)
  }
  if(any(diff(breaks) <= 0)) stop("breaks must be strictly increasing")
  if(min(data) < min(breaks) || max(data) > max(breaks))
     stop("breaks do not cover the data")
  db <- diff(breaks)
  if(!prob && sqrt(var(db)) > mean(db)/1000)
    warning("Uneven breaks with prob = F will give a misleading plot")
  bin <- cut(data, breaks, include.lowest = TRUE)
  est <- tabulate(bin, length(levels(bin)))
  if(prob) est <- est/(diff(breaks) * length(data))
  n <- length(breaks)
  plot(xlim, c(0, ymax), type = "n", xlab = xlab, ylab = "", bty = bty)
  rect(breaks[-n], 0, breaks[-1], est, col = col, ...)
  invisible()
}
# file MASS/ucv.q
# copyright (C) 1994-9 W. N. Venables and B. D. Ripley
#

width.SJ <- function(x, nb=1000, lower=0.1*hmax, upper=hmax,
		     method = c("ste", "dpi"))
{
  fSD <- function(h, x, alph2, c1, n, d)
    (c1/SDh(x, alph2 * h^(5/7), n, d))^(1/5) - h
  SDh <- function(x, h, n, d)
    .C("VR_phi4_bin",
       as.integer(n),
       as.integer(length(x)),
       as.double(d),
       x,
       as.double(h),
       u = double(1))$u
  TDh <- function(x, h, n, d)
    .C("VR_phi6_bin",
       as.integer(n),
       as.integer(length(x)),
       as.double(d),
       x,
       as.double(h),
       u = double(1))$u

  method <- match.arg(method)
  n <- length(x)
  if(!is.loaded(symbol.C("VR_phi4_bin")))
    stop("Compiled code has not been dynamically loaded")
  storage.mode(x) <- "double"
  n <- length(x)
  Z <- .C("VR_den_bin",
	  as.integer(n),
	  as.integer(nb),
	  d = double(1),
	  x,
	  cnt = integer(nb)
	  )
  d <- Z$d; cnt <- as.integer(Z$cnt)
  hmax <- 1.144 * sqrt(var(x)) * n^(-1/5)
  scale <- min(sqrt(var(x)), IQR(x)/1.349)
  a <- 1.24 * scale * n^(-1/7)
  b <- 1.23 * scale * n^(-1/9)
  c1 <- 1/(2*sqrt(pi)*n)
  TD  <- -TDh(cnt, b, n, d)
  alph2 <- 1.357*(SDh(cnt, a, n, d)/TD)^(1/7)
  if(method == "dpi")
    res <- (c1/SDh(cnt,(2.394/(n * TD))^(1/7) , n, d))^(1/5)
  else {
    if (fSD(lower, cnt, alph2, c1, n, d) *
	fSD(upper, cnt, alph2, c1, n, d) > 0)
      stop("No solution in the specified range of bandwidths")
    res <- uniroot(fSD, c(lower, upper), tol=0.1*lower,
	      x=cnt, alph2=alph2, c1=c1, n=n, d=d)$root
  }
  4 * res
}


ucv <- function(x, nb=1000, lower=0.1*hmax, upper=hmax)
{
  fucv <- function(h, x, n, d)
    .C("VR_ucv_bin",
       as.integer(n),
       as.integer(length(x)),
       as.double(d),
       x,
       as.double(h),
       u = double(1))$u

  n <- length(x)
  hmax <- 1.144 * sqrt(var(x)) * n^(-1/5) * 4
  if(!is.loaded(symbol.C("VR_den_bin")))
    stop("Compiled code has not been dynamically loaded")
  storage.mode(x) <- "double"
  Z <- .C("VR_den_bin",
	  as.integer(n),
	  as.integer(nb),
	  d = double(1),
	  x,
	  cnt = integer(nb)
	  )
  d <- Z$d; cnt <- as.integer(Z$cnt)
  h <- optimize(fucv, c(lower, upper), tol=0.1*lower,
		x=cnt, n=n, d=d)$minimum
  if(h < 1.1*lower | h > upper-0.1*lower)
    warning("minimum occurred at one end of the range")
  h
}


bcv <- function(x, nb=1000, lower=0.1*hmax, upper=hmax)
{
  fbcv <- function(h, x, n, d)
    .C("VR_bcv_bin",
       as.integer(n),
       as.integer(length(x)),
       as.double(d),
       x,
       as.double(h),
       u = double(1))$u

  n <- length(x)
  hmax <- 1.144 * sqrt(var(x)) * n^(-1/5) * 4
  if(!is.loaded(symbol.C("VR_den_bin")))
    stop("Compiled code has not been dynamically loaded")
  storage.mode(x) <- "double"
  Z <- .C("VR_den_bin",
	  as.integer(n),
	  as.integer(nb),
	  d = double(1),
	  x,
	  cnt = integer(nb)
	  )
  d <- Z$d; cnt <- as.integer(Z$cnt)
  h<- optimize(fbcv, c(lower, upper), tol=0.1*lower,
	       x=cnt, n=n, d=d)$minimum
  if(h < 1.1*lower | h > upper-0.1*lower)
    warning("minimum occurred at one end of the range")
  h
}
# file MASS/vcov.q
# copyright (C) 1994-9 W. N. Venables and B. D. Ripley
#
vcov <- function(object, ...) UseMethod("vcov")

vcov.nls <- function(object)
{
    sm <- summary(object)
    sm$cov.unscaled * sm$sigma^2
}

vcov.glm <- function(object)
{
    so <- summary(object, corr=FALSE)
    so$dispersion * so$cov.unscaled
}

vcov.lm <- function(object)
{
    so <- summary(object, corr=FALSE)
    so$sigma^2 * so$cov.unscaled
}

deviance.nls <- function(object) object$m$deviance()

vcov.nlregb <- function(object, method=c("Fisher", "observed", "Huber"),
   scale=object$scale, eps=1e-3, tol=1)
{
    vcovscale <- function(devfn, par, scale, tol, ...)
    {
        # find a suitable initial scaling
        p <- length(par)
        ind <- 1:p
        v0 <- sum(devfn(par, ...)^2)
        scale <- sqrt(tol)/scale
        for (i in ind) {
            eps <- scale[i]
            inc <- ind == i
            v <- sum(devfn(par + eps*inc, ...)^2)
            if(v - v0 > tol) {
                repeat {
                    eps <- eps/3
                    v <- sum(devfn(par + eps*inc, ...)^2)
                    if(v - v0 < tol) break
                }
            } else {
                repeat {
                    eps <- eps*3
                    if(eps > 1000*scale[i])
                        stop(paste("scaling on var",i,"is too small"))
                    v <- sum(devfn(par + eps*inc, ...)^2)
                    if(v - v0 > tol) {eps <- eps/3; break}
                }
            }
            scale[i] <- eps
        }
        scale
    }

    method <- match.arg(method)
    par <- object$param
    n <- length(object$resid)
    p <- length(object$param)
    if(length(scale) == 1) scale <- rep(scale, p)
    s2 <- sum(object$resid^2)/(n-p)
    if(!is.null(gr <- object$jacobian)) {
        # gradient supplied
        K <- t(gr) %*% gr
        if(method == "Fisher")
            H2 <- 0
        else {
            r <- t(object$resid)
            grfn <- eval(substitute(object$call$jacobian))
            grfn <- eval(as.expression(grfn), sys.frame(sys.parent()))
            if(!length(grfn)) stop("Jacobian fn not found")
            sc <- eps * pmin(1, abs(par)) * sign(par)
            argnames <- names(grfn)
            argnames <- argnames[2:(length(argnames) - 1)]
            addargs <- object$aux[argnames]
            g <- object$jacobian
            H2 <- matrix(0, p, p)
            for(i in 1:p) {
                p1 <- par
                p1[i] <- p1[i] + sc[i]
                g1 <- do.call("grfn", c(list(p1), addargs))
                H2[i, ] <- r %*% (g1 - g)/sc[i]
            }
            H2 <- 0.5*(H2 + t(H2))
        }
        Jinv <- solve(K + H2)
        if(method != "Huber") V <- s2 * Jinv
        else V <- s2 * Jinv %*% K %*% Jinv
    } else {
        # no gradient supplied
        if(method != "Fisher")
            warning("Only Fisher information is available without gradient information")
        fn <- eval(substitute(object$call$residuals))
        fn <- eval(as.expression(fn), sys.frame(sys.parent()))
        if(!length(fn)) stop("objective fn not found")
        argnames <- names(fn)
        argnames <- argnames[-c(1, length(argnames))]
        addargs <- object$aux[argnames]
        scale <- do.call("vcovscale",
                         c(list(fn, par, scale=scale, tol=tol*s2), addargs))
        ind <- 1:p
        H <- matrix(, n, p)
        for (j in 1:p)
            H[,j] <- do.call("fn", c(list(par + scale[j]*(ind==j)), addargs)) -
                do.call("fn", c(list(par - scale[j]*(ind==j)), addargs))
        J <- 0.25*crossprod(H)/outer(scale, scale)
        V <- s2 * solve(J)
    }
    v <- 2*sqrt(diag(V))
    upper <- eval(substitute(object$call$upper))
    if(is.null(upper))
        upper <- Inf
    else upper <- eval(upper, sys.frame(sys.parent()))
    lower <- eval(substitute(object$call$lower))
    if(is.null(lower))
        lower <- -Inf else
    lower <- eval(lower, sys.frame(sys.parent()))
    if(any(par - v <= lower) || any(par + v >= upper))
        warning("estimate is near the boundary: the estimated variance matrix may not be valid")
    V
}

vcov.nlminb <- function(object, tol=1, scale=object$scale, eps=1e-3, eps0=1)
{
    vcovscale <- function(devfn, par, scale, tol, ...)
    {
        # find a suitable initial scaling
        p <- length(par)
        ind <- 1:p
        v0 <- devfn(par, ...)
        scale <- sqrt(tol)/scale
        for (i in ind) {
            eps <- scale[i]
            inc <- ind == i
            v <- devfn(par + eps*inc, ...)
            if(v - v0 > tol) {
                repeat {
                    eps <- eps/3
                    v <- devfn(par + eps*inc, ...)
                    if(v - v0 < tol) break
                }
            } else {
                repeat {
                    eps <- eps*3
                    if(eps > 1000*scale[i])
                        stop(paste("scaling on var",i,"is too small"))
                    v <- devfn(par + eps*inc, ...)
                    if(v - v0 > tol) {eps <- eps/3; break}
                }
            }
            scale[i] <- eps
        }
        scale
    }

    vcov0 <- function(devfn, par, scale = rep(1, p), ...)
    {
        p <- length(par)
        ind <- 1:p
        v0 <- devfn(par, ...)
        h <- numeric(p)
        H <- matrix(, p, p)
        for (i in 1:p)
            h[i] <- devfn(par + scale[i]*(ind==i), ...) - v0
        if(p > 1) for (i in 2:p) {
            inc <- scale[i] * (ind == i)
            for(j in 1:(i-1))
                H[i, j] <- H[j, i] <-
                    devfn(par + inc + scale[j]*(ind==j), ...) - v0
        }
        H <- H - outer(h, h, "+")
        diag(H) <- 2*h
        H/outer(scale, scale)
    }

    par <- object$param
    p <- length(par)
    fn <- eval(substitute(object$call$objective))
    fn <- eval(as.expression(fn), sys.frame(sys.parent()))
    if(!length(fn)) stop("objective fn not found")
    argnames <- names(fn)
    argnames <- argnames[-c(1, length(argnames))]
    addargs <- object$aux[argnames]
    if(length(scale) == 1) scale <- rep(scale, p)
    hessian <- object$call$hessian
    # object$hessian appears to lie, so we redo the calculation
    if(is.logical(hessian) && hessian) {
        # hessian in gradient
        grfn <- eval(substitute(object$call$gradient))
        grfn <- eval(as.expression(grfn), sys.frame(sys.parent()))
        if(!length(grfn)) stop("gradient fn not found")
        hh <- do.call("grfn", c(list(par), addargs))$hessian
        H <- matrix(0, p, p)
        H[row(H) <= col(H)] <- hh
        H[row(H) > col(H)] <- t(H)[row(H) > col(H)]
    } else if(!is.logical(hessian) && !is.null(hessian)) {
        # separate Hessian function
        hfn <- eval(substitute(object$call$hessian))
        hfn <- eval(as.expression(hfn), sys.frame(sys.parent()))
        if(!length(hfn)) stop("hessian fn not found")
        hh <- do.call("hfn", c(list(par), addargs))
        H <- matrix(0, p, p)
        H[row(H) <= col(H)] <- hh
        H[row(H) > col(H)] <- t(H)[row(H) > col(H)]
    } else {
        scale <- do.call("vcovscale",
                         c(list(fn, par, scale=scale, tol=tol), addargs))
        if(!is.null(object$call$gradient)) {
            # gradient but no Hessian
            grfn <- eval(substitute(object$call$gradient))
            grfn <- eval(as.expression(grfn), sys.frame(sys.parent()))
            if(!length(grfn)) stop("gradient fn not found")
            sc <- eps * scale
            g <- do.call("grfn", c(list(par), addargs))
            H <- matrix(0, p, p)
            for(i in 1:p) {
                g1 <- do.call("grfn", c(list(par - sc[i]*(1:p==i)), addargs))
                g2 <- do.call("grfn", c(list(par + sc[i]*(1:p==i)), addargs))
                H[i, ] <- 0.5*(g2 - g1)/sc[i]
            }
        } else {
            # No derivatives available
            H <- do.call("vcov0", c(list(fn, par, scale=eps0*scale), addargs))
        }
    }
    V <- solve(0.5*(H + t(H)))
    if(any(diag(V) < 0)) stop("Estimated variances are negative")
    v <- 2*sqrt(diag(V))
    upper <- eval(substitute(object$call$upper))
    if(is.null(upper)) upper <- Inf else upper <- eval(upper, sys.frame(sys.parent()))
    lower <- eval(substitute(object$call$lower))
    if(is.null(lower)) lower <- -Inf else lower <- eval(lower, sys.frame(sys.parent()))
    if(any(par - v <= lower) || any(par + v >= upper))
        warning("estimate is near the boundary: the estimated variance matrix may not be valid")
    V
}
# file MASS/write.matrix.q
# copyright (C) 1994-9 W. N. Venables and B. D. Ripley
#
write.matrix <- function(x, file="", sep=" ")
{
    x <- as.matrix(x)
    p <- ncol(x)
    cat(colnames(x),format(t(x)), file=file, sep=c(rep(sep, p-1), "\n"))
}
spec.taper <- function(x, p = 0.1) {
  if (any(p < 0) || any(p > 0.5))
    stop("p must be between 0 and 0.5")
  x <- as.ts(x)
  a <- attributes(x)
  x <- as.matrix(x)
  nc <- ncol(x)
  if (length(p) == 1)
    p <- rep(p, nc)
  else if (length(p) != nc)
    stop("length of p must be 1 or equal the number of columns of x")
  nr <- nrow(x)
  for (i in 1 : nc) {
    m <- floor(nr * p[i])
    w <- 0.5 * (1 - cos(pi * seq(1, 2 * m - 1, by = 2) / (2 * m)))
    x[, i] <- c(w, rep(1, nr - 2 * m), rev(w)) * x[, i]
  }
  attributes(x) <- a
  x
}

wt.huber <- function(u, c = 1.345)
  ifelse(abs(u) < c, 1, c / abs(u))

.First.lib <- function(lib, pkg) {
  library.dynam("MASS", pkg, lib)
}

profile <- function(fitted, ...) UseMethod("profile")
