ansari.test <- function(x, y,
                        alternative = c("two.sided", "less", "greater"),
                        exact = NULL,
                        conf.int = FALSE, conf.level = 0.95) 
{
    alternative <- match.arg(alternative)
    if(conf.int) {
        if(!((length(conf.level) == 1)
             && is.finite(conf.level)
             && (conf.level > 0)
             && (conf.level < 1)))
            stop("conf.level must be a single number between 0 and 1")
    }
    DNAME <- paste(deparse(substitute(x)), "and", deparse(substitute(y)))

    x <- x[complete.cases(x)]
    y <- y[complete.cases(y)]
    m <- length(x)
    if(m < 1)
        stop("not enough x observations")
    n <- length(y)
    if(n < 1)
        stop("not enough y observations")
    N <- m + n

    r <- rank(c(x, y))
    STATISTIC <- sum(pmin(r, N - r + 1)[seq(along = x)])
    TIES <- (length(r) != length(unique(r)))

    if(is.null(exact))
        exact <- ((m < 50) && (n < 50))

    if(exact && !TIES) {
        pansari <- function(q, m, n) {
            .C("pansari",
               as.integer(length(q)),
               p = as.double(q),
               as.integer(m),
               as.integer(n),
               PACKAGE = "ctest")$p
        }
        PVAL <-
            switch(alternative,
                   two.sided = {
                       if (STATISTIC > ((m + 1)^2 %/% 4
                                        + ((m * n) %/% 2) / 2))
                           p <- 1 - pansari(STATISTIC - 1, m, n)
                       else
                           p <- pansari(STATISTIC, m, n)
                       min(2 * p, 1)
                   },
                   greater = 1 - pansari(STATISTIC - 1, m, n),
                   less = pansari(STATISTIC, m, n))
        if (conf.int) {
            qansari <- function(p, m, n) {
                .C("qansari",
                   as.integer(length(p)),
                   q = as.double(p), 
                   as.integer(m),
                   as.integer(n),
                   PACKAGE = "ctest")$q
            }
            alpha <- 1 - conf.level
            x <- sort(x)
            y <- sort(y)
            ab <- function(sig) {
                rab <- rank(c(y/sig, x))
                ## here follow Bauer directly
                sum(pmin(rab, N - rab + 1)[seq(along = y)])
            }
            xpos <- x[x > 0]
            ypos <- y[y > 0]
            xneg <- x[x <= 0]
            yneg <- y[y <= 0]
            signeg <- NULL
            sigpos <- NULL
            ## compute all stuff for the negative/positive values
            ## separately 
            if(length(xneg) > 0 && length(yneg) > 0) {
                coefn <- function(j, i)
                    - abs(j+i-(N+1)/2) + abs(i+j-1-(N+1)/2)
                signeg <- outer(yneg, xneg, "/")
                coefneg <- outer(1:length(yneg), 1:length(xneg), "coefn")
                coefneg <- coefneg[order(signeg)]
                signeg <- sort(signeg)
            }
            if(length(xpos) > 0 && length(ypos) > 0) {
                coefp <- function(j,i)
                    - abs(j+i-1-(N+1)/2) + abs(i+j-(N+1)/2)
                sigpos <- outer(ypos, xpos, "/")
                mpos <- min(which(x > 0))
                npos <- min(which(y > 0))
                coefpos <- outer(npos:n, mpos:m, "coefp")
                coefpos <- coefpos[order(sigpos)]
                sigpos <- sort(sigpos)
                if(!is.null(signeg)) {
                    sigma <- c(signeg, sigpos)
                    coefs <- c(coefneg, coefpos)
                    coefs <- coefs[order(sigma)]
                    sigma <- sort(sigma)
                } else {
                    sigma <- sigpos
                    coefs <- coefpos[order(sigpos)]
                    sigma <- sort(sigma)
                }
            }
            if(is.null(sigpos) && !is.null(signeg)) {
                sigma <- signeg
                coefs <- coefneg[order(signeg)]
                sigma <- sort(sigma)
            } 
            ## compute step function
            cint <- if(length(sigma) < 1) {
                warning("Cannot compute confidence interval")
                c(0, 0)
            }
            else {
                absigma <- cumsum(c(ab(sigma[1]),
                                    coefs[2:length(coefs)]))
                switch(alternative, two.sided = {
                    u <- absigma - qansari(alpha/2, m, n) 
                    l <- absigma - qansari(1 - alpha/2, m, n) 
                    if(length(u[u >= 0]) == 0)
                        uci <- sigma[1]
                    else {
                        u[u < 0] <- NA
                        uci <- unique(sigma[which(u == min(u, na.rm=TRUE))])
                        if (length(uci) != 1)
                            uci <- uci[1]
                    }
                    if (length(l[l > 0]) == 0)
                        lci <- sigma[length(sigma)]
                    else {                
                        l[l <= 0] <- NA
                        lci <- unique(sigma[which(l == min(l, na.rm=TRUE))])
                        if(length(lci) != 1)
                            lci <- lci[length(lci)]
                    }
                    c(uci, lci)
                }, greater= {
                    u <- absigma - qansari(alpha, m, n)
                    if(length(u[u >= 0]) == 0)
                        uci <- sigma[1]
                    else {
                        u[u < 0] <- NA
                        uci <- unique(sigma[which(u == min(u, na.rm=TRUE))])
                        if(length(uci) != 1)
                            uci <- uci[1]
                    }
                    c(uci, NA)
                }, less= {
                    l <- absigma - qansari(1 - alpha, m, n)
                    if(length(l[l > 0]) == 0)
                        lci <- sigma[length(sigma)]
                    else {                
                        l[l <= 0] <- NA
                        lci <- unique(sigma[which(l == min(l, na.rm=TRUE))])
                        if (length(lci) != 1)
                            lci <- lci[length(lci)]
                    }
                    c(NA, lci)
                })
            }
            attr(cint, "conf.level") <- conf.level	
        }
    }
    else {
        EVEN <- ((N %% 2) == 0)
        normalize <- function(s, r, TIES) {
            z <- if(EVEN)
                s - m * (N + 2)/4
            else
                s - m * (N + 1)^2/(4 * N)
            if (!TIES) {
                SIGMA <- if(EVEN) 
                    sqrt((m * n * (N + 2) * (N - 2))/(48 * (N - 1)))
                else
                    sqrt((m * n * (N + 1) * (3 + N^2))/(48 * N^2))
            }
            else {
                r <- rle(sort(pmin(r, N - r + 1)))
                SIGMA <- if(EVEN) 
                    sqrt(m * n
                         * (16 * sum(r$l * r$v^2) - N * (N + 2)^2)
                         / (16 * N * (N - 1)))
                else
                    sqrt(m * n
                         * (16 * N * sum(r$l * r$v^2) - (N + 1)^4)
                         / (16 * N^2 * (N - 1)))
            }
            z / SIGMA
        }
        p <- pnorm(normalize(STATISTIC, r, TIES))
        PVAL <- switch(alternative,
                       two.sided = 2 * min(p, 1 - p),
                       greater = 1 - p,
                       less = p)
    
        if(conf.int && !exact) {
            alpha <- 1 - conf.level
            ab <- function(sig, zq) {
                r <- rank(c(y / sig, x))
                s <- sum(pmin(r, N -r + 1)[seq(along = y)])
                TIES <- (length(r) != length(unique(r)))
                abs(normalize(s, r, TIES) - zq)
            }
            ## optimize is not good here, use Nelder-Mead 
            ## what should we use as initial value?
            ## I think the null hypotheses is right here: use sigma = 1 
            cint <- switch(alternative, two.sided = {
                u <- optim(1, ab, zq=qnorm(alpha/2))$par
                l <- optim(1, ab, zq=qnorm(1 - alpha/2))$par
                c(u, l)
            }, greater= {
                u <- optim(1, ab, zq=qnorm(alpha))$par
                c(u, NA)
            }, less= {
                l <- optim(1, ab, zq=qnorm(1 - alpha))$par
                c(NA, l)
            })
            attr(cint, "conf.level") <- conf.level
        }

        if(exact && TIES) {
            warning("Cannot compute exact p-value with ties")
            if(conf.int)
                warning(paste("Cannot compute exact confidence",
                              "intervals with ties"))
        }
    }
    
    names(STATISTIC) <- "AB"
    RVAL <- list(statistic = STATISTIC,
                 p.value = PVAL,
                 alternative = alternative,
                 method = "Ansari-Bradley test",
                 data.name = DNAME)
    if(conf.int)
        RVAL$conf.int <- cint
    class(RVAL) <- "htest"
    return(RVAL)
}
bartlett.test <- function(x, g) {
    LM <- FALSE
    if (is.list(x)) {
        if (length(x) < 2)
            stop("x must be a list with at least 2 elements")
        DNAME <- deparse(substitute(x))
        if (all(sapply(x, function(obj) inherits(obj, "lm"))))
            LM <- TRUE
        else
            x <- lapply(x, function(x) x <- x[is.finite(x)])
        k <- length(x)
    }
    else {
        if (length(x) != length(g))
            stop("x and g must have the same length")
        DNAME <- paste(deparse(substitute(x)), "and",
                       deparse(substitute(g)))
        OK <- complete.cases(x, g)
        x <- x[OK]
        g <- as.factor(g[OK])
        k <- nlevels(g)
        if (k < 2)
            stop("all observations are in the same group")
        x <- split(x, g)
    }

    if (LM) {
        n <- sapply(x, function(obj) obj$df.resid)
        v <- sapply(x, function(obj) sum(obj$residuals^2))
    } else {
        n <- sapply(x, "length") - 1
        if (any(n <= 0))
            stop("there must be at least 2 observations in each group")
        v <- sapply(x, "var")
    }

    n.total <- sum(n)
    v.total <- sum(n * v) / n.total
    STATISTIC <- ((n.total * log(v.total) - sum(n * log(v))) /
                  (1 + (sum(1 / n) - 1 / n.total) / (3 * (k - 1))))
    names(STATISTIC) <- "Bartlett's K-square"
    PARAMETER <- k - 1
    names(PARAMETER) <- "df"
  
    RVAL <- list(statistic = STATISTIC,
                 parameter = PARAMETER,
                 p.value = 1 - pchisq(STATISTIC, PARAMETER),
                 data.name = DNAME,
                 method = "Bartlett test for homogeneity of variances")
    class(RVAL) <- "htest"
    return(RVAL)
}
binom.test <-
function(x, n, p = 0.5, alternative = c("two.sided", "less", "greater"),
         conf.level = 0.95)
{
    if((length(n) > 1) || is.na(n) || (n < 1) || (n != round(n)))
        stop("n must be a positive integer")
    if((length(x) > 1) || is.na(x) ||
       (x < 0) || (x > n) || (x != round(x)))
        stop("x must be an integer between 0 and n")
    if(!missing(p) && (length(p) > 1 || is.na(p) || p < 0 || p > 1))
        stop ("p must be a single number between 0 and 1")
    alternative <- match.arg(alternative)

    if(!((length(conf.level) == 1) && is.finite(conf.level) &&
         (conf.level > 0) && (conf.level < 1)))
        stop("conf.level must be a single number between 0 and 1")

    DNAME <- paste(deparse(substitute(x)), "and", deparse(substitute(n)))

    PVAL <- switch(alternative,
                   less = pbinom(x, n, p),
                   greater = 1 - pbinom(x - 1, n, p),
                   two.sided = {
                       if(p == 0)
                           (x == 0)
                       else if(p == 1)
                           (x == n)
                       else {
                           ## Do
                           ##   d <- dbinom(0 : n, n, p)
                           ##   sum(d[d <= dbinom(x, n, p)])
                           ## a bit more efficiently ...
                           d <- dbinom(x, n, p)
                           if(x / n < p) {
                               i <- seq(from = x + 1, to = n)
                               y <- sum(dbinom(i, n, p) <= d)
                               pbinom(x, n, p) +
                                   (1 - pbinom(n - y, n, p))
                           } else {
                               i <- seq(from = 0, to = x - 1)
                               y <- sum(dbinom(i, n, p) <= d)
                               pbinom(y - 1, n, p) +
                                   (1 - pbinom(x - 1, n, p))
                           }
                       }
                   })
    ## Determine p s.t. Prob(B(n,p) >= x) = alpha
    p.L <- function(x, alpha) {
        if(x == 0)                      # No solution
            0
        else
            uniroot(function(p) 1 - pbinom(x - 1, n, p) - alpha,
                    c(0, 1))$root
    }
    ## Determine p s.t. Prob(B(n,p) <= x) = alpha
    p.U <- function(x, alpha) {
        if(x == n)                      # No solution
            1
        else
            uniroot(function(p) pbinom(x, n, p) - alpha,
                    c(0, 1))$root
    }
    CINT <- switch(alternative,
                   less = c(0, p.U(x, 1 - conf.level)),
                   greater = c(p.L(x, 1 - conf.level), 1),
                   two.sided = {
                       alpha <- (1 - conf.level) / 2
                       c(p.L(x, alpha), p.U(x, alpha))
                   })
    attr(CINT, "conf.level") <- conf.level

    ESTIMATE <- x / n

    names(x) <- "number of successes"	# or simply "x" ??
    names(n) <- "number of trials"	# or simply "n" ??
    names(ESTIMATE) <-
    names(p) <- "probability of success"# or simply "p" ??

    structure(list(statistic = x,
                   parameter = n,
                   p.value = PVAL,
                   conf.int = CINT,
                   estimate = ESTIMATE,
                   null.value = p,
                   alternative = alternative,
                   method = "Exact binomial test",
                   data.name = DNAME),
              class = "htest")
}



chisq.test <-
function(x, y = NULL, correct = TRUE, p = rep(1 / length(x), length(x)),
         simulate.p.value = FALSE, B = 2000)
{
    DNAME <- deparse(substitute(x))
    if (is.data.frame(x))
        x <- as.matrix(x)
    if (is.matrix(x)) {
	if (min(dim(x)) == 1)
	    x <- as.vector(x)
    }
    if (!is.matrix(x) && !is.null(y)) {
	if (length(x) != length(y))
	    stop("x and y must have the same length")
	DNAME <- paste(DNAME, "and", deparse(substitute(y)))
	OK <- complete.cases(x, y)
	x <- as.factor(x[OK])
	y <- as.factor(y[OK])
	if ((nlevels(x) < 2) || (nlevels(y) < 2))
	    stop("x and y must have at least 2 levels")
	x <- table(x, y)
    }

    if (any(x < 0) || any(is.na(x)))
	stop("all entries of x must be nonnegative and finite")
    if ((n <- sum(x)) == 0)
        stop("at least one entry of x must be positive")

    if (is.matrix(x)) {
	METHOD <- "Pearson's Chi-square test"
        nr <- nrow(x)
        nc <- ncol(x)
        sr <- apply(x, 1, sum)
        sc <- apply(x, 2, sum)
	E <- outer(sr, sc, "*") / n
	dimnames(E) <- dimnames(x)
        if (simulate.p.value && all(sr > 0) && all(sc > 0)) {
            METHOD <- paste(METHOD,
                            "with simulated p-value\n\t (based on", B,
                            "replicates)")
            tmp <- .C("chisqsim",
                      as.integer(nr),
                      as.integer(nc),
                      as.integer(sr),
                      as.integer(sc),
                      as.integer(n),
                      as.integer(B),
                      as.double(E),
                      integer(nr * nc),
                      double(n + 1),
                      integer(nc),
                      results = double(B),
                      PACKAGE = "base")
            STATISTIC <- sum((x - E) ^ 2 / E)
            PARAMETER <- NA
            PVAL <- sum(tmp$results >= STATISTIC) / B
        }
        else {
            if (simulate.p.value)
                warning(paste("Cannot compute simulated p-value",
                              "with zero marginals"))
            if (correct && nrow(x) == 2 && ncol(x) == 2) {
                YATES <- .5
                METHOD <- paste(METHOD, "with Yates' continuity correction")
            }
            else
                YATES <- 0
            STATISTIC <- sum((abs(x - E) - YATES)^2 / E)
            PARAMETER <- (nr - 1) * (nc - 1)
            PVAL <- 1 - pchisq(STATISTIC, PARAMETER)
        }
    }
    else {
	if (length(x) == 1)
	    stop("x must at least have 2 elements")
	if (length(x) != length(p))
	    stop("x and p must have the same number of elements")
	METHOD <- "Chi-square test for given probabilities"
	E <- n * p
	names(E) <- names(x)
	STATISTIC <- sum((x - E) ^ 2 / E)
	PARAMETER <- length(x) - 1
        PVAL <- 1 - pchisq(STATISTIC, PARAMETER)
    }

    names(STATISTIC) <- "X-squared"
    names(PARAMETER) <- "df"
    if (any(E < 5) && is.finite(PARAMETER))
	warning("Chi-square approximation may be incorrect")

    structure(list(statistic = STATISTIC,
		   parameter = PARAMETER,
		   p.value = PVAL,
		   method = METHOD,
		   data.name = DNAME,
		   observed = x,
		   expected = E),
	      class = "htest")
}
cor.test <- function(x, y,
		     alternative = c("two.sided", "less", "greater"),
		     method = c("pearson", "kendall", "spearman"),
		     exact = NULL)
{
    alternative <- match.arg(alternative)
    method <- match.arg(method)
    DNAME <- paste(deparse(substitute(x)), "and", deparse(substitute(y)))

    if(length(x) != length(y))
	stop("x and y must have the same length")
    OK <- complete.cases(x, y)
    x <- x[OK]
    y <- y[OK]
    n <- length(x)

    PVAL <- NULL
    NVAL <- 0

    if(method == "pearson") {
	if(n < 3)
	    stop("not enough finite observations")
	method <- "Pearson's product-moment correlation"
	names(NVAL) <- "correlation"
	r <- cor(x, y)
	ESTIMATE <- r
	names(ESTIMATE) <- "cor"
	PARAMETER <- n - 2
	names(PARAMETER) <- "df"
	STATISTIC <- sqrt(PARAMETER) * r / sqrt(1 - r^2)
	names(STATISTIC) <- "t"
	p <- pt(STATISTIC, PARAMETER)
    }
    else {
	if(n < 2)
	    stop("not enough finite observations")
	PARAMETER <- NULL
	TIES <- (min(length(unique(x)), length(unique(y))) < n)
	if(method == "kendall") {
	    method <- "Kendall's rank correlation tau"
	    names(NVAL) <- "tau"
	    x <- rank(x)
	    y <- rank(y)
	    ESTIMATE <- .C("kendall_tau",
			   as.integer(length(x)),
			   as.double(x),
			   as.double(y),
			   tau = as.double(0),
			   PACKAGE = "ctest")$tau
	    names(ESTIMATE) <- "tau"
	    if(is.null(exact))
		exact <- (n < 50)
	    if(exact && !TIES) {
		q <- as.integer((ESTIMATE + 1) * n * (n - 1) / 4)
		pkendall <- function(q, n) {
		    .C("pkendall",
		       as.integer(length(q)),
		       p = as.double(q),
		       as.integer(n),
		       PACKAGE = "ctest")$p
		}
		PVAL <- switch(alternative,
			       "two.sided" = {
				   if(q > n * (n - 1) / 4)
				       p <- 1 - pkendall(q - 1, n)
				   else
				       p <- pkendall(q, n)
				   min(2 * p, 1)
			       },
			       "greater" = 1 - pkendall(q - 1, n),
			       "less" = pkendall(q, n))
		STATISTIC <- c(T = q)
	    } else {
		STATISTIC <- c(z = ESTIMATE /
			       sqrt((4 * n + 10) / (9 * n * (n-1))))
		p <- pnorm(STATISTIC)
		if(exact && TIES)
		    warning("Cannot compute exact p-value with ties")
	    }
	} else {
	    method <- "Spearman's rank correlation rho"
	    names(NVAL) <- "rho"
	    ESTIMATE <- c(rho = cor(rank(x), rank(y)))
	    ## Use the test statistic S = sum(rank(x) - rank(y))^2 and
	    ## AS 89 for obtaining better p-values than via the normal
	    ## approximation of S by N((n^3-n)/6, 1/sqrt(n-1)).
	    ## In the case of no ties, S = (1-rho) * (n^3-n)/6.
	    pspearman <- function(q, n, lower.tail = TRUE) {
		.C("prho",
		   as.integer(n),
		   as.integer(q + 1),
		   p = double(1),
		   integer(1),
		   as.logical(lower.tail),
		   PACKAGE = "ctest")$p
	    }
	    q <- as.integer((n^3 - n) * (1 - ESTIMATE) / 6)
	    STATISTIC <- c(S = q)
	    PVAL <- switch(alternative,
			   "two.sided" = {
			       p <- if(q > (n^3 - n) / 6)
				   pspearman(q - 1, n, lower.tail = FALSE)
			       else
				   pspearman(q, n, lower.tail = TRUE)
			       min(2 * p, 1)
			   },
			   "greater" = pspearman(q, n, lower.tail = TRUE),
			   "less" = pspearman(q - 1, n, lower.tail = FALSE))
	    if(TIES)
		warning("p-values may be incorrect due to ties")
	}
    }

    if(is.null(PVAL)) # for "pearson" (and when else ??)
	PVAL <- switch(alternative,
		       "less" = p,
		       "greater" = 1 - p,
		       "two.sided" = 2 * min(p, 1 - p))

    structure(list(statistic = STATISTIC,
		   parameter = PARAMETER,
		   p.value = PVAL,
		   estimate = ESTIMATE,
		   null.value = NVAL,
		   alternative = alternative,
		   method = method,
		   data.name = DNAME),
	      class = "htest")
}
fisher.test <-
function(x, y = NULL, workspace = 200000, hybrid = FALSE, or = 1,
         alternative = "two.sided", conf.level = 0.95)
{
    DNAME <- deparse(substitute(x))

    if(is.data.frame(x))
        x <- as.matrix(x)
    if(is.matrix(x)) {
        if(any(dim(x) < 2))
            stop("x must have at least 2 rows and columns")
        if(any(x < 0) || any(is.na(x)))
            stop("all entries of x must be nonnegative and finite")
    }
    else {
        if(is.null(y))
            stop("if x is not a matrix, y must be given")
        if(length(x) != length(y))
            stop("x and y must have the same length")
        DNAME <- paste(DNAME, "and", deparse(substitute(y)))
        OK <- complete.cases(x, y)
        x <- as.factor(x[OK])
        y <- as.factor(y[OK])
        if((nlevels(x) < 2) || (nlevels(y) < 2))
            stop("x and y must have at least 2 levels")
        x <- table(x, y)
    }

    nr <- nrow(x)
    nc <- ncol(x)

    if((nr == 2) && (nc == 2)) {
        alternative <- char.expand(alternative,
                                   c("two.sided", "less", "greater"))
        if(length(alternative) > 1 || is.na(alternative))
            stop(paste("alternative must be \"two.sided\",",
                       "\"less\" or \"greater\""))
        if(!((length(conf.level) == 1) && is.finite(conf.level) &&
             (conf.level > 0) && (conf.level < 1)))
            stop("conf.level must be a single number between 0 and 1")
        if(!missing(or) && (length(or) > 1 || is.na(or) || or < 0))
            stop("or must be a single number between 0 and Inf")
    }

    PVAL <- NULL
    if((nr != 2)
       || (nc != 2)
       || (alternative == "two.sided") && (or == 1)) {
        ## Note that it is more efficient to compute p-vaues in C for
        ## the two-sided 2-by-2 case with odds ratio 1
        if(hybrid) {
            warning("p-values may be incorrect")
            PVAL <- .C("fexact",
                       as.integer(nr),
                       as.integer(nc),
                       as.double(x),
                       as.integer(nr),
                       as.double(5),
                       as.double(80),
                       as.double(1),
                       as.double(0),
                       p = as.double(0),
                       as.integer(workspace),
                       PACKAGE = "ctest")$p
        } else
            PVAL <- .C("fexact",
                       as.integer(nr),
                       as.integer(nc),
                       as.double(x),
                       as.integer(nr),
                       as.double(-1),
                       as.double(100),
                       as.double(0),
                       as.double(0),
                       p = as.double(0),
                       as.integer(workspace),
                       PACKAGE = "ctest")$p
        RVAL <- list(p.value = PVAL)
    }
    if((nr == 2) && (nc == 2)) {
        m <- sum(x[, 1])
        n <- sum(x[, 2])
        k <- sum(x[1, ])
        x <- x[1, 1]
        lo <- max(0, k - n)
        hi <- min(k, m)
        NVAL <- or
        names(NVAL) <- "odds ratio"
        ## Note that in general the conditional distribution of x given
        ## the marginals is a non-central hypergeometric distribution H
        ## with non-centrality parameter ncp, the odds ratio.
        pnhyper <- function(q, ncp = 1, upper.tail = FALSE) {
            if(ncp == 1) {
                if(upper.tail)
                    return(1 - phyper(x - 1, m, n, k))
                else
                    return(phyper(x, m, n, k))
            }
            if(ncp == 0) {
                if(upper.tail)
                    return(as.numeric(q <= lo))
                else
                    return(as.numeric(q >= lo))
            }
            if(ncp^(hi - lo) == Inf) {
                if(upper.tail)
                    return(as.numeric(q <= hi))
                else
                    return(as.numeric(q >= hi))
            }
            u <- lo : hi
            d <- dhyper(u, m, n, k) * ncp ^ (0 : (hi - lo))
            d <- d / sum(d)
            if(upper.tail)
                sum(d[u >= q])
            else
                sum(d[u <= q])
        }
        if(is.null(PVAL)) {
            PVAL <-
                switch(alternative,
                       less = pnhyper(x, or),
                       greater = pnhyper(x, or, upper = TRUE),
                       two.sided = {
                           if(or == 0)
                               as.numeric(x == lo)
                           else if(or^(hi - lo) == Inf)
                               as.numeric(x == hi)
                           else {
                               u <- lo : hi
                               d <- (dhyper(lo : hi, m, n, k)
                                     * or ^ (0 : (hi - lo)))
                               d <- d / sum(d)
                               sum(d[d <= d[x - lo + 1]])
                           }
                       })
            RVAL <- list(p.value = PVAL)
        }
        ## Determine the MLE for ncp by solving E(X) = x, where the
        ## expectation is with respect to H.
        mle <- function(x) {
            if(x == lo)
                return(0)
            if(x == hi)
                return(Inf)
            mnhyper <- function(ncp) {
                if(ncp == 0)
                    return(lo)
                if(ncp^(hi - lo) == Inf)
                    return(hi)
                q <- lo : hi
                d <- dhyper(q, m, n, k) * ncp ^ (0 : (hi - lo))
                d <- d / sum(d)
                sum(q * d)
            }
            mu <- mnhyper(1)
            if(mu > x)
                uniroot(function(t) mnhyper(t) - x, c(0, 1))$root
            else if(mu < x)
                1 / uniroot(function(t) mnhyper(1/t) - x,
                            c(.Machine$double.eps, 1))$root
            else
                1
        }
        ESTIMATE <- mle(x)
        names(ESTIMATE) <- "odds ratio"
        ## Determine confidence intervals for the odds ratio.
        ncp.U <- function(x, alpha) {
            if(x == hi)
                return(Inf)
            p <- pnhyper(x, 1)
            if(p < alpha)
                uniroot(function(t) pnhyper(x, t) - alpha, c(0,1))$root
            else if(p > alpha)
                1 / uniroot(function(t) pnhyper(x, 1/t) - alpha,
                            c(.Machine$double.eps,1))$root
            else
                1
        }
        ncp.L <- function(x, alpha) {
            if(x == lo)
                return(0)
            p <- pnhyper(x, 1, upper = TRUE)
            if(p > alpha)
                uniroot(function(t) pnhyper(x, t, upper = TRUE) - alpha,
                        c(0,1))$root
            else if (p < alpha)
                1 / uniroot(function(t) pnhyper(x, 1/t, upper = TRUE) -
                            alpha,
                            c(.Machine$double.eps,1))$root
            else
                1
        }
        CINT <- switch(alternative,
                       less = c(0, ncp.U(x, 1 - conf.level)),
                       greater = c(ncp.L(x, 1 - conf.level), Inf),
                       two.sided <- {
                           alpha <- (1 - conf.level) / 2
                           c(ncp.L(x, alpha), ncp.U(x, alpha))
                       })
        attr(CINT, "conf.level") <- conf.level
        RVAL <- c(RVAL,
                  list(conf.int = CINT,
                       estimate = ESTIMATE,
                       null.value = NVAL))
    }

    RVAL <- c(RVAL,
              alternative = alternative,
              method = "Fisher's Exact Test for Count Data",
              data.name = DNAME)
    attr(RVAL, "class") <- "htest"
    return(RVAL)
}
fligner.test <- function(x, g) {
    ## FIXME: This is the same code as in kruskal.test(), and could also
    ## rewrite bartlett.test() accordingly ...
    if (is.list(x)) {
        if (length(x) < 2)
            stop("x must be a list with at least 2 elements")
        DNAME <- deparse(substitute(x))
        x <- lapply(x, function(u) u <- u[complete.cases(u)])
        k <- length(x)
        l <- sapply(x, "length")
        if (any(l == 0))
            stop("all groups must contain data")
        g <- as.factor(rep(1 : k, l))
        x <- unlist(x)
    }
    else {
        if (length(x) != length(g))
            stop("x and g must have the same length")
        DNAME <- paste(deparse(substitute(x)), "and",
                       deparse(substitute(g)))
        OK <- complete.cases(x, g)
        x <- x[OK]
        g <- g[OK]
        if (!all(is.finite(g)))
            stop("all group levels must be finite")
        g <- as.factor(g)
        k <- nlevels(g)
        if (k < 2)
            stop("all observations are in the same group")
    }
    n <- length(x)
    if (n < 2)
        stop("not enough observations")
    ## FIXME: now the specific part begins.

    x <- unlist(tapply(x, g, function(u) u - median(u)))
    a <- qnorm((1 + rank(abs(x)) / (n + 1)) / 2)
    STATISTIC <- sum(tapply(a, g, "sum")^2 / tapply(a, g, "length"))
    STATISTIC <- (STATISTIC - n * mean(a)^2) / var(a)
    names(STATISTIC) <- "Fligner-Killeen:med chi-square"
    PARAMETER <- k - 1
    names(PARAMETER) <- "df"
    METHOD <- "Fligner-Killeen test for homogeneity of variances"

    RVAL <- list(statistic = STATISTIC,
                 parameter = PARAMETER,
                 p.value = 1 - pchisq(STATISTIC, PARAMETER),
                 method = METHOD,
                 data.name = DNAME)
    class(RVAL) <- "htest"
    return(RVAL)
}
friedman.test <- function(y, groups, blocks) {
    DNAME <- deparse(substitute(y))
    if (is.matrix(y)) {
        groups <- as.factor(c(col(y)))
        blocks <- as.factor(c(row(y)))
    }
    else {
        if (any(is.na(groups)) || any(is.na(blocks)))
            stop("NA's are not allowed in groups or blocks")
        if (any(diff(c(length(y), length(groups), length(blocks)))))
            stop("y, groups and blocks must have the same length")
        DNAME <- paste(DNAME, ", ", deparse(substitute(groups)),
                       " and ", deparse(substitute(blocks)), sep = "")
        if (any(table(groups, blocks) != 1))
            stop("Not an unreplicated complete block design")
        groups <- as.factor(groups)
        blocks <- as.factor(blocks)
    }

    k <- nlevels(groups)
    y <- matrix(unlist(split(y, blocks)), ncol = k, byrow = TRUE)
    y <- y[complete.cases(y), ]
    n <- nrow(y)
    r <- t(apply(y, 1, rank))
    TIES <- tapply(r, row(r), table)
    STATISTIC <- ((12 * sum((apply(r, 2, sum) - n * (k + 1) / 2)^2)) /
                  (n * k * (k + 1)
                   - (sum(unlist(lapply(TIES, function (u) {u^3 - u}))) /
                      (k - 1))))
    PARAMETER <- k - 1
    names(STATISTIC) <- "Friedman chi-square"
    names(PARAMETER) <- "df"

    structure(list(statistic = STATISTIC,
                   parameter = PARAMETER,
                   p.value = 1 - pchisq(STATISTIC, PARAMETER),
                   method = "Friedman rank sum test",
                   data.name = DNAME),
              class = "htest")
}
kruskal.test <- function(x, g) {
    if (is.list(x)) {
        if (length(x) < 2)
            stop("x must be a list with at least 2 elements")
        DNAME <- deparse(substitute(x))
        x <- lapply(x, function(u) u <- u[complete.cases(u)])
        k <- length(x)
        l <- sapply(x, "length")
        if (any(l == 0))
            stop("all groups must contain data")
        g <- as.factor(rep(1 : k, l))
        x <- unlist(x)
    }
    else {
        if (length(x) != length(g))
            stop("x and g must have the same length")
        DNAME <- paste(deparse(substitute(x)), "and",
                       deparse(substitute(g)))
        OK <- complete.cases(x, g)
        x <- x[OK]
        g <- g[OK]
        if (!all(is.finite(g)))
            stop("all group levels must be finite")
        g <- as.factor(g)
        k <- nlevels(g)
        if (k < 2)
            stop("all observations are in the same group")
    }

    n <- length(x)
    if (n < 2)
        stop("not enough observations")
    r <- rank(x)
    TIES <- table(x)
    STATISTIC <- sum(tapply(r, g, "sum")^2 / tapply(r, g, "length"))
    STATISTIC <- ((12 * STATISTIC / (n * (n + 1)) - 3 * (n + 1)) /
                  (1 - sum(TIES^3 - TIES) / (n^3 - n)))
    names(STATISTIC) <- "Kruskal-Wallis chi-square"
    PARAMETER <- k - 1
    names(PARAMETER) <- "df"

    RVAL <- list(statistic = STATISTIC,
                 parameter = PARAMETER,
                 p.value = 1 - pchisq(STATISTIC, PARAMETER),
                 method = "Kruskal-Wallis rank sum test",
                 data.name = DNAME)
    class(RVAL) <- "htest"
    return(RVAL)
}
ks.test <- function(x, y, ..., alternative = c("two.sided", "less", "greater"))
{
    alternative <- match.arg(alternative)
    DNAME <- deparse(substitute(x))
    x <- x[!is.na(x)]
    n <- length(x)
    if (n < 1)
        stop("Not enough x data")

    if (is.numeric(y)) {
        DNAME <- paste(DNAME, "and", deparse(substitute(y)))
        y <- y[!is.na(y)]
        n.x <- n
        n.y <- length(y)
        if (n.y < 1)
            stop("Not enough y data")
        METHOD <- "Two-sample Kolmogorov-Smirnov test"
        n <- n.x * n.y / (n.x + n.y)
        w <- c(x, y)
        z <- cumsum(ifelse(order(w) <= n.x, 1 / n.x, - 1 / n.y))
        if (length(unique(w)) < (n.x + n.y)) {
            warning("cannot compute correct p-values with ties")
            z <- z[c(which(diff(sort(w)) != 0), n.x + n.y)]
            print(z)
        }
        STATISTIC <- switch(alternative,
                            "two.sided" = max(abs(z)),
                            "greater" = max(z),
                            "less" = - min(z))
    }
    else {
        if (is.character(y))
            y <- get(y, mode="function")
        if (mode(y) != "function")
            stop("y must be numeric or a string naming a valid function")
        METHOD <- "One-sample Kolmogorov-Smirnov test"
        n <- length(x)
        x <- y(sort(x), ...) - (0 : (n-1)) / n
        STATISTIC <- switch(alternative,
                            "two.sided" = max(abs(c(x, x-1/n))),
                            "greater" = max(c(x, x-1/n)),
                            "less" = - min(c(x, x-1/n)))
    }

    names(STATISTIC) <- switch(alternative,
                               "two.sided" = "D",
                               "greater" = "D^+",
                               "less" = "D^-")

    pkstwo <- function(x, tol = 10^(-6)) {
        ## Compute \sum_{-\infty}^\infty (-1)^k e^{-2k^2x^2}
        if (is.numeric(x))
            x <- as.vector(x)
        else
            stop("Argument x must be numeric")
        p <- rep(0, length(x))
        p[is.na(x)] <- NA
        IND <- which(!is.na(x) & (x > 0))
        if (length(IND) > 0) {
            p[IND] <- .C("pkstwo",
                         as.integer(length(x)),
                         p = as.double(x[IND]),
                         as.double(tol))$p
        }
        return(p)
    }

    PVAL <- ifelse(alternative == "two.sided",
                   1 - pkstwo(sqrt(n) * STATISTIC),
                   exp(- 2 * n * STATISTIC^2))

    RVAL <- list(statistic = STATISTIC,
                 p.value = PVAL,
                 alternative = alternative,
                 method = METHOD,
                 data.name = DNAME)
    class(RVAL) <- "htest"
    return(RVAL)
}
mantelhaen.test <- function(x, y = NULL, z = NULL, correct = TRUE)
{
    DNAME <- deparse(substitute(x))
    if (is.array(x)) {
        if (length(dim(x)) == 3) {
            if (any(is.na(x)))
                stop("NAs are not allowed")
            if (dim(x)[1:2] != c(2, 2))
                stop("table for each stratum must be 2 by 2")
        }
        else
            stop("x must be a 3-dimensional array")
    }
    else {
        if (is.null(y))
            stop("If x is not an array, y must be given")
        if (is.null(z))
            stop("If x is not an array, z must be given")
        if (any(diff(c(length(x), length(y), length(z)))))
            stop("x, y, and z must have the same length")
        DNAME <- paste(DNAME, "and", deparse(substitute(y)), "and",
                       deparse(substitute(z)))
        OK <- complete.cases(x, y, z)
        x <- as.factor(x[OK])
        y <- as.factor(y[OK])
        if ((nlevels(x) != 2) || (nlevels(y) != 2))
            stop("x and y must be dichotomous")
        else
            x <- table(x, y, z[OK])
    }

    s.x <- apply(x, c(1, 3), sum)
    s.y <- apply(x, c(2, 3), sum)
    n <- apply(x, 3, sum)
    if (any(n < 2))
        stop("sample size in each stratum must be > 1")
    DELTA <- abs(sum(x[1, 1, ] - s.x[1, ] * s.y[1, ] / n))
    YATES <- ifelse(correct && (DELTA >= .5), .5, 0)
    STATISTIC <- ((DELTA - YATES)^2 /
                  sum(apply(rbind(s.x, s.y), 2, prod)
                      / (n^2 * (n - 1))))
    PARAMETER <- 1
    names(STATISTIC) <- "Mantel-Haenszel X-square"
    names(PARAMETER) <- "df"
    
    RVAL <- list(statistic = STATISTIC,
                 parameter = PARAMETER,
                 p.value = 1 - pchisq(STATISTIC, PARAMETER),
                 method = paste("Mantel-Haenszel chi-square test",
                 ifelse(YATES, "with", "without"),
                 "continuity correction"),
                 data.name = DNAME)
    class(RVAL) <- "htest"
    return(RVAL)
}
mcnemar.test <- function(x, y = NULL, correct = TRUE)
{
    if (is.matrix(x)) {
        r <- nrow(x)
        if ((r < 2) || (ncol (x) != r))
            stop("x must be square with at least two rows and columns")
        if (any(x < 0) || any(is.na(x)))
            stop("all entries of x must be nonnegative and finite")
        DNAME <- deparse(substitute(x))
    }
    else {
        if (is.null(y))
            stop("if x is not a matrix, y must be given")
        if (length(x) != length(y))
            stop("x and y must have the same length")
        DNAME <- paste(deparse(substitute(x)), "and",
                       deparse(substitute(y)))
        OK <- complete.cases(x, y)
        x <- as.factor(x[OK])
        y <- as.factor(y[OK])
        r <- nlevels(x)
        if ((r < 2) || (nlevels(y) != r))
            stop("x and y must have the same number of levels (minimum 2)")
        x <- table(x, y)
    }

    PARAMETER <- r * (r-1) / 2
    names(PARAMETER) <- "df"
    METHOD <- "McNemar's Chi-square test"

    if (correct && (r == 2) && any(x - t(x))) {
        y <- (abs(x - t(x)) - 1)
        METHOD <- paste(METHOD, "with continuity correction")
    }
    else
        y <- x - t(x)
    x <- x + t(x)
    
    STATISTIC <- sum(y[upper.tri(x)]^2 / x[upper.tri(x)])
    names(STATISTIC) <- "McNemar's chi-square"
    PVAL <- 1 - pchisq(STATISTIC, PARAMETER)  
    
    RVAL <- list(statistic = STATISTIC,
                 parameter = PARAMETER,
                 p.value = PVAL,
                 method = METHOD,
                 data.name = DNAME)
    class(RVAL) <- "htest"
    return(RVAL)
}
mood.test <- function(x, y, alternative = c("two.sided", "less", "greater"))
{
    alternative <- match.arg(alternative)
    DNAME <- paste(deparse(substitute(x)), "and", deparse(substitute(y)))

    x <- x[is.finite(x)]
    y <- y[is.finite(y)]
    m <- length(x)
    n <- length(y)
    if ((s <- m + n) < 3)
        stop("not enough observations")
    r <- rank(c(x, y))
    z <- ((sum((r[seq(along = x)] - (s + 1) / 2)^2) - m * (s^2 - 1) / 12)
          / sqrt(m * n * (s + 1) * (s + 2) * (s - 2) / 180))
    p <- pnorm(z)
    PVAL <- switch(alternative,
                   "less" = p,
                   "greater" = 1 - p,
                   "two.sided" = 2 * min(p, 1 - p))

    structure(list(statistic = structure(z, names = "Z"),
                   p.value = PVAL,
                   alternative = alternative,
                   method = "Mood two-sample test of scale",
                   data.name = DNAME),
              class = "htest")
}


pairwise.t.test <- function(x, g, p.adjust.method=p.adjust.methods, pool.sd = TRUE, ...)
{
    DNAME <- paste(deparse(substitute(x)), "and", deparse(substitute(g)))
    g <- as.factor(g)
    p.adjust.method <- match.arg(p.adjust.method)
    if (pool.sd)
    {
        METHOD <- "t tests with pooled SD"
        xbar <- tapply(x, g, mean, na.rm = TRUE)
        s <- tapply(x, g, sd, na.rm = TRUE)
        n <- tapply(!is.na(x), g, sum)
        degf <- n - 1
        total.degf <- sum(degf)
        pooled.sd <- sqrt(sum(s^2 * degf)/total.degf)
        compare.levels <- function(i, j) {
            dif <- xbar[i] - xbar[j]
            se.dif <- pooled.sd * sqrt(1/n[i] + 1/n[j])
            t.val <- dif/se.dif
            2 * pt(-abs(t.val), total.degf)
        }
    } else {
        METHOD <- "t tests with non-pooled SD"
        compare.levels <- function(i, j) {
            xi <- x[as.integer(g) == i]
            xj <- x[as.integer(g) == j]
            t.test(xi, xj, ...)$p.value
        }
    }
    PVAL <- pairwise.table(compare.levels, levels(g), p.adjust.method)
    ans <- list(method = METHOD, data.name = DNAME,
                p.value = PVAL, p.adjust.method=p.adjust.method)
    class(ans) <- "pairwise.htest"
    ans
}


pairwise.wilcox.test <- function(x, g, p.adjust.method=p.adjust.methods, ...)
{
    p.adjust.method <- match.arg(p.adjust.method)
    DNAME <- paste(deparse(substitute(x)), "and", deparse(substitute(g)))
    g <- as.factor(g)
    METHOD <- "Wilcoxon rank sum test"
    compare.levels <- function(i, j) {
        xi <- x[as.integer(g) == i]
        xj <- x[as.integer(g) == j]
        wilcox.test(xi, xj, ...)$p.value
    }
    PVAL <- pairwise.table(compare.levels, levels(g), p.adjust.method)
    ans <- list(method = METHOD, data.name = DNAME,
                p.value = PVAL, p.adjust.method=p.adjust.method)
    class(ans) <- "pairwise.htest"
    ans
}

pairwise.prop.test <- function (x, n, p.adjust.method=p.adjust.methods, ...)
{
    p.adjust.method <- match.arg(p.adjust.method)
    METHOD <- "Pairwise comparison of proportions"
    DNAME <- deparse(substitute(x))
    if (is.matrix(x)) {
        if (ncol(x) != 2)
            stop("x must have 2 columns")
         l <- nrow(x)
        n <- apply(x, 1, sum)
        x <- x[, 1]
    }
    else {
        DNAME <- paste(DNAME, "out of", deparse(substitute(n)))
        if ((l <- length(x)) != length(n))
            stop("x and n must have the same length")
    }
    OK <- complete.cases(x, n)
    x <- x[OK]
    n <- n[OK]
    if ((k <- length(x)) < 2)
        stop("Too few groups")
    compare.levels <- function(i, j) {
        prop.test(x[c(i,j)], n[c(i,j)], ...)$p.value
    }
    level.names <- names(x)
    if (is.null(level.names)) level.names <- seq(along=x)
    PVAL <- pairwise.table(compare.levels, level.names, p.adjust.method)
    ans <- list(method = METHOD, data.name = DNAME,
                p.value = PVAL, p.adjust.method=p.adjust.method)
    class(ans) <- "pairwise.htest"
    ans
}

pairwise.table <- function(compare.levels, level.names, p.adjust.method)
{
    ix <- seq(along=level.names)
    names(ix) <- level.names
    pp <- outer(ix[-1], ix[-length(ix)],function(ivec, jvec)
          sapply(seq(along=ivec), function(k) {
              i<-ivec[k]
              j<-jvec[k]
              if (i > j) compare.levels(i, j) else NA
          }))
    pp[lower.tri(pp, TRUE)] <- p.adjust(pp[lower.tri(pp, TRUE)],
                                        p.adjust.method)
    pp
}

print.pairwise.htest <- function(x) {
    cat("\n\tPairwise comparisons using", x$method, "\n\n")
    cat("data: ", x$data.name, "\n\n")
    pp <- format.pval(x$p.value, 2)
    attributes(pp) <- attributes(x$p.value)
    print(pp, quote=FALSE, na.print="-")
    cat("\nP value adjustment method:", x$p.adjust.method, "\n")
}







power.t.test <-
    function(n=NULL, delta=NULL, sd=1, sig.level=0.05, power=NULL,
             type=c("two.sample", "one.sample", "paired"),
             alternative=c("two.sided", "one.sided"))
{
    if ( sum(sapply(list(n, delta, sd, power, sig.level), is.null)) != 1 )
        stop("exactly one of n, delta, sd, power, and sig.level must be NULL")

    type <- match.arg(type)
    alternative <- match.arg(alternative)

    tsample <- switch(type, one.sample = 1, two.sample = 2, paired = 1)
    tside <- switch(alternative, one.sided = 1, two.sided = 2)

    p.body <- quote(1 - pt( qt(1 - sig.level/tside, (n - 1) * tsample),
                           (n - 1) * tsample, ncp = sqrt(n/tsample) *
                           delta/sd))
    if (is.null(power))
        power <- eval(p.body)
    else if (is.null(n))
        n <- uniroot(function(n) eval(p.body) - power,
                     c(2,1e7))$root
    else if (is.null(sd))
        sd <- uniroot(function(sd) eval(p.body) - power,
                      delta * c(1e-7,1e+7))$root
    else if (is.null(delta))
        delta <- uniroot(function(delta) eval(p.body) - power,
                      sd * c(1e-7,1e+7))$root
    else if (is.null(sig.level))
        sig.level <- uniroot(function(sig.level) eval(p.body) - power,
                      c(1e-10,1-1e-10))$root
    else # Shouldn't happen
        stop("internal error")
    NOTE <- switch(type,
                   paired = "n is number of *pairs*, sd is std.dev. of *differences* within pairs",
                   two.sample = "n is number in *each* group", NULL)

    METHOD <- switch(type,
                     one.sample = "One-sample t test power calculation",
                     two.sample =  "Two-sample t test power calculation",
                     paired = "Paired t test power calculation")

    structure(list(n=n, delta=delta, sd=sd,
                   sig.level=sig.level, power=power,
                   alternative=alternative, note=NOTE, method=METHOD),
              class="power.htest")
}

power.prop.test <-
    function(n=NULL, p1=NULL, p2=NULL, sig.level=0.05, power=NULL,
             alternative=c("two.sided", "one.sided"))
{
    if ( sum(sapply(list(n, p1, p2, power, sig.level), is.null)) != 1 )
        stop("exactly one of n, p1, p2, power, and sig.level must be NULL")

    alternative <- match.arg(alternative)

    tside <- switch(alternative, one.sided = 1, two.sided = 2)

    p.body <- quote(pnorm(((sqrt(n) * abs(p1 - p2)
                            - (qnorm(1 - (sig.level/tside))
                             * sqrt((p1 + p2) * (1 - (p1 + p2)/2))))
                           /sqrt(p1 * (1 - p1) + p2 * (1 - p2)))))

    if (is.null(power))
        power <- eval(p.body)
    else if (is.null(n))
        n <- uniroot(function(n) eval(p.body) - power,
                     c(1,1e7))$root
    else if (is.null(p1))
        p1 <- uniroot(function(p1) eval(p.body) - power,
                      c(0,p2))$root
    else if (is.null(p2))
        p2 <- uniroot(function(p2) eval(p.body) - power,
                      c(p1,1))$root
    else if (is.null(sig.level))
        sig.level <- uniroot(function(sig.level) eval(p.body) - power,
                      c(1e-10,1-1e-10))$root
    else # Shouldn't happen
        stop("internal error")

    NOTE <- "n is number in *each* group"

    METHOD <-  "Two-sample comparison of proportions power calculation"

    structure(list(n=n, p1=p1, p2=p2,
                   sig.level=sig.level, power=power,
                   alternative=alternative, note=NOTE, method=METHOD),
              class="power.htest")
}

print.power.htest <- function(x)
{
    cat("\n    ", x$method, "\n\n")
    note<-x$note
    x[c("method","note")] <- NULL
    cat(paste(format.char(names(x), width=15, flag="+"),
              format(x), sep=" = "),sep="\n")
    if(!is.null(note))
        cat("\n", "NOTE:", note, "\n\n")
    else
        cat("\n")
}
prop.test <-
function(x, n, p = NULL, alternative = c("two.sided", "less", "greater"),
         conf.level = 0.95, correct = TRUE)
{
    DNAME <- deparse(substitute(x))

    if (is.matrix(x)) {
	if (ncol(x) != 2)
	    stop("x must have 2 columns")
	l <- nrow(x)
	n <- apply(x, 1, sum)
	x <- x[, 1]
    }
    else {
	DNAME <- paste(DNAME, "out of", deparse(substitute(n)))
	if ((l <- length(x)) != length(n))
	    stop("x and n must have the same length")
    }

    OK <- complete.cases(x, n)
    x <- x[OK]
    n <- n[OK]
    if ((k <- length(x)) < 1)
	stop("Not enough data")
    if (any(n <= 0))
	stop("Elements of n must be positive")
    if (any(x < 0))
	stop("Elements of x must be nonnegative")
    if (any(x > n))
	stop("Elements of x must not be greater than those of n")

    if (is.null(p) && (k == 1))
	p <- .5
    if (!is.null(p)) {
	DNAME <- paste(DNAME, ", null ",
		       ifelse(k == 1, "probability ", "probabilities "),
		       deparse(substitute(p)), sep = "")
	if (length(p) != l)
	    stop("p must have the same length as x and n")
	p <- p[OK]
	if (any((p <= 0) | (p >= 1)))
	    stop("Elements of p must be in (0,1)")
    }

    alternative <- match.arg(alternative)
    if (k > 2 || (k == 2) && !is.null(p))
	alternative <- "two.sided"

    if ((length(conf.level) != 1) || is.na(conf.level) ||
	(conf.level <= 0) || (conf.level >= 1))
	stop("conf.level must be a single number between 0 and 1")

    correct <- as.logical(correct)

    ESTIMATE <- x/n
    names(ESTIMATE) <- if (k == 1) "p" else paste("prop", 1:l)[OK]
    NVAL <- p
    CINT <- NULL
    YATES <- ifelse(correct && (k <= 2), .5, 0)

    if (k == 1) {
	z <- ifelse(alternative == "two.sided",
		    qnorm((1 + conf.level) / 2),
		    qnorm(conf.level))
	YATES <- min(YATES, abs(x - n * p))
	p.c <- ESTIMATE + YATES / n
	p.u <- ((p.c + z^2 / (2 * n)
		 + z * sqrt(p.c * (1 - p.c) / n + z^2 / (4 * n^2)))
		/ (1 + z^2 / n))
	p.c <- ESTIMATE - YATES / n
	p.l <- ((p.c + z^2 / (2 * n)
		 - z * sqrt(p.c * (1 - p.c) / n + z^2 / (4 * n^2)))
		/ (1 + z^2 / n))
	CINT <- switch(alternative,
		       "two.sided" = c(max(p.l, 0), min(p.u, 1)),
		       "greater" = c(max(p.l, 0), 1),
		       "less" = c(0, min(p.u, 1)))
    }
    else if ((k == 2) & is.null(p)) {
	DELTA <- ESTIMATE[1] - ESTIMATE[2]
	YATES <- min(YATES, abs(DELTA) / sum(1/n))
	WIDTH <- (switch(alternative,
			 "two.sided" = qnorm((1 + conf.level) / 2),
			 qnorm(conf.level))
		  * sqrt(sum(ESTIMATE * (1 - ESTIMATE) / n))
		  + YATES * sum(1/n))
	CINT <- switch(alternative,
		       "two.sided" = c(max(DELTA - WIDTH, -1),
		       min(DELTA + WIDTH, 1)),
		       "greater" = c(max(DELTA - WIDTH, -1), 1),
		       "less" = c(-1, min(DELTA + WIDTH, 1)))
    }
    if (!is.null(CINT))
	attr(CINT, "conf.level") <- conf.level

    METHOD <- paste(ifelse(k == 1,
			   "1-sample proportions test",
			   paste(k, "-sample test for ",
				 ifelse(is.null(p), "equality of", "given"),
				 " proportions", sep = "")),
		    ifelse(YATES, "with", "without"),
		    "continuity correction")

    if (is.null(p)) {
	p <- sum(x)/sum(n)
	PARAMETER <- k - 1
    }
    else {
	PARAMETER <- k
	names(NVAL) <- names(ESTIMATE)
    }
    names(PARAMETER) <- "df"

    x <- cbind(x, n - x)
    E <- cbind(n * p, n * (1 - p))
    if (any(E < 5))
	warning("Chi-square approximation may be incorrect")
    STATISTIC <- sum((abs(x - E) - YATES)^2 / E)
    names(STATISTIC) <- "X-squared"

    if (alternative == "two.sided")
	PVAL <- 1 - pchisq(STATISTIC, PARAMETER)
    else {
	if (k == 1)
	    z <- sign(ESTIMATE - p) * sqrt(STATISTIC)
	else
	    z <- sign(DELTA) * sqrt(STATISTIC)
	if (alternative == "greater")
	    PVAL <- 1 - pnorm(z)
	else
	    PVAL <- pnorm(z)
    }

    RVAL <- list(statistic = STATISTIC,
		 parameter = PARAMETER,
		 p.value = PVAL,
		 estimate = ESTIMATE,
		 null.value = NVAL,
		 conf.int = CINT,
		 alternative = alternative,
		 method = METHOD,
		 data.name = DNAME)
    class(RVAL) <- "htest"
    return(RVAL)
}
prop.trend.test <- function (x, n, score = 1:length(x)) 
{
    method <- "Chi Square Test for Trend in Proportions"
    dname <- paste(deparse(substitute(x)), "out of", deparse(substitute(n)))
    dname <- paste(dname, ",\n using scores:", paste(score, collapse = " "))
    freq <- x/n
    p <- sum(x)/sum(n)
    w <- n/p/(1 - p)
    a <- anova(lm(freq ~ score, weight = w))
    chisq <- a["score", "Sum Sq"]
    names(chisq) <- "X-squared"
    df <- 1
    names(df) <- "df"
    pval <- 1 - pchisq(chisq, 1)
    rval <- list(statistic = chisq, parameter = df, p.value = pval, 
        method = method, data.name = dname)
    class(rval) <- "htest"
    return(rval)
}
quade.test <- function(y, groups, blocks) 
{
    DNAME <- deparse(substitute(y))
    if(is.matrix(y)) {
        groups <- as.factor(c(col(y)))
        blocks <- as.factor(c(row(y)))
    }
    else {
        if(any(is.na(groups)) || any(is.na(blocks))) 
            stop("NA's are not allowed in groups or blocks")
        if(any(diff(c(length(y), length(groups), length(blocks))))) 
            stop("y, groups and blocks must have the same length")
        DNAME <- paste(DNAME, ", ",
                       deparse(substitute(groups)), " and ",
                       deparse(substitute(blocks)), sep = "")
        if(any(table(groups, blocks) != 1))
            stop("Not an unreplicated complete block design")
        groups <- as.factor(groups)
        blocks <- as.factor(blocks)
    }
    k <- nlevels(groups)
    b <- nlevels(blocks)
    y <- matrix(unlist(split(y, blocks)), ncol = k, byrow = TRUE)
    y <- y[complete.cases(y), ]
    n <- nrow(y)
    r <- t(apply(y, 1, rank))
    q <- rank(apply(y, 1, function(u) max(u) - min(u)))
    s <- q * (r - (k+1)/2)
    ## S is a matrix of ranks within blocks (minus the average rank)
    ## multiplied by the ranked ranges of the blocks
    A <- sum(s^2)
    B <- sum(apply(s, 2, sum)^2) / b
    if(A == B) {
        ## Treat zero denominator case as suggested by Conover (1999),
        ## p.374.
        STATISTIC <- NaN
        PARAMETER <- c(NA, NA)
        PVAL <- (gamma(k+1))^(1-b)
    } else {
        STATISTIC <- (b - 1) * B / (A - B)
        ## The same as 2-way ANOVA on the scores S.
        PARAMETER <- c(k - 1, (b-1) * (k-1))
        PVAL <- 1 - pf(STATISTIC, PARAMETER[1], PARAMETER[2])
    }
    names(STATISTIC) <- "Quade F"
    names(PARAMETER) <- c("num df", "denom df")
    
    structure(list(statistic = STATISTIC,
                   parameter = PARAMETER,
                   p.value = PVAL,
                   method = "Quade test", 
                   data.name = DNAME),
              class = "htest")
}
shapiro.test <- function(x) {
    DNAME <- deparse(substitute(x))
    x <- sort(x[complete.cases(x)])
    n <- length(x)
    if(n < 3 || n > 5000)
	stop("sample size must be between 3 and 5000")
    rng <- x[n] - x[1]
    if(rng == 0)
	stop("all `x[]' are identical")
    if(rng < 1e-10)
	x <- x/rng # rescale to avoid ifault=6
    n2 <- n %/% 2
    ## C Code: Use the first n1 observations as uncensored
    sw <- .C("swilk",
	     init = FALSE,
	     as.single(x),
	     n,
	     n1 = as.integer(n),
	     as.integer(n2),
	     a = single(n2),
	     w	= double(1),
	     pw = double(1),
	     ifault = integer(1), PACKAGE = "ctest")
    if (sw$ifault && sw$ifault != 7)# 7 *does* happen (Intel Linux)
	stop(paste("ifault=",sw$ifault,". This should not happen"))
    RVAL <- list(statistic = c(W = sw$w),
		 p.value = sw$pw,
		 method = "Shapiro-Wilk normality test",
		 data.name = DNAME)
    class(RVAL) <- "htest"
    return(RVAL)
}
t.test <- function(x, y=NULL, alternative = c("two.sided", "less", "greater"),
		   mu=0, paired = FALSE, var.equal = FALSE, conf.level = 0.95)
{
    alternative <- match.arg(alternative)

    if(!missing(mu) && (length(mu) != 1 || is.na(mu)))
        stop("mu must be a single number")
    if(!missing(conf.level) &&
       (length(conf.level) != 1 || !is.finite(conf.level) ||
        conf.level < 0 || conf.level > 1))
        stop("conf.level must be a single number between 0 and 1")
    if( !is.null(y) ) {
	dname <- paste(deparse(substitute(x)),"and",
		       deparse(substitute(y)))
	if(paired)
	    xok <- yok <- complete.cases(x,y)
	else {
	    yok <- !is.na(y)
	    xok <- !is.na(x)
	}
	y <- y[yok]
    }
    else {
	dname <- deparse(substitute(x))
	if( paired ) stop("y is missing for paired test")
	xok <- !is.na(x)
	yok <- NULL
    }
    x <- x[xok]
    if( paired ) {
	x <- x-y
	y <- NULL
    }
    nx <- length(x)
    if(nx <= 2) stop("not enough x observations")
    mx <- mean(x)
    vx <- var(x)
    estimate <- mx
    if(is.null(y)) {
	df <- length(x)-1
	stderr <- sqrt(vx/nx)
	tstat <- (mx-mu)/stderr
	method <- ifelse(paired,"Paired t-test","One Sample t-test")
	names(estimate) <- ifelse(paired,"mean of the differences","mean of x")
    } else {
	ny <- length(y)
	if(ny <= 2) stop("not enough y observations")
	my <- mean(y)
	vy <- var(y)
	method <- paste(if(!var.equal)"Welch", "Two Sample t-test")
	estimate <- c(mx,my)
	names(estimate) <- c("mean of x","mean of y")
	if(var.equal) {
	    df <- nx+ny-2
	    v <- ((nx-1)*vx + (ny-1)*vy)/df
	    stderr <- sqrt(v*(1/nx+1/ny))
	    tstat <- (mx-my-mu)/stderr
	} else {
	    stderrx <- sqrt(vx/nx)
	    stderry <- sqrt(vy/ny)
	    stderr <- sqrt(stderrx^2 + stderry^2)
	    df <- stderr^4/(stderrx^4/(nx-1) + stderry^4/(ny-1))
	    tstat <- (mx - my - mu)/stderr
	}
    }
    if (alternative == "less") {
	pval <- pt(tstat, df)
	cint <- c(NA, tstat * stderr + qt(conf.level, df) * stderr)
    }
    else if (alternative == "greater") {
	pval <- 1 - pt(tstat, df)
	cint <- c(tstat * stderr - qt(conf.level, df) * stderr, NA)
    }
    else {
	pval <- 2 * pt(-abs(tstat), df)
	alpha <- 1 - conf.level
	cint <- c(tstat * stderr - qt((1 - alpha/2), df) * stderr,
		  tstat * stderr + qt((1 - alpha/2), df) * stderr)
    }
    cint <- cint+mu
    names(tstat) <- "t"
    names(df) <- "df"
    names(mu) <- if(paired || !is.null(y)) "difference in means" else "mean"
    attr(cint,"conf.level") <- conf.level
    rval <- list(statistic = tstat, parameter = df, p.value = pval,
	       conf.int=cint, estimate=estimate, null.value = mu,
	       alternative=alternative,
	       method=method, data.name=dname)
    class(rval) <- "htest"
    return(rval)
}
var.test <-
function(x, y, ratio = 1, alternative = c("two.sided", "less", "greater"),
         conf.level = 0.95)
{
    if (!((length(ratio) == 1) && is.finite(ratio) && (ratio > 0)))
        stop("ratio must be a single positive number")

    alternative <- match.arg(alternative)

    if (!((length(conf.level) == 1) && is.finite(conf.level) &&
          (conf.level > 0) && (conf.level < 1)))
        stop("conf.level must be a single number between 0 and 1")

    DNAME <- paste(deparse(substitute(x)), "and", deparse(substitute(y)))

    if (inherits(x, "lm") && inherits(y, "lm")) {
        DF.x <- x$df.resid
        DF.y <- y$df.resid
        V.x <- sum(x$residuals^2) / DF.x
        V.y <- sum(y$residuals^2) / DF.y
    } else {
        x <- x[is.finite(x)]
        DF.x <- length(x) - 1
        if (DF.x < 1)
            stop("not enough x observations")
        y <- y[is.finite(y)]
        DF.y <- length(y) - 1
        if (DF.y < 1)
            stop("not enough y observations")
        V.x <- var(x)
        V.y <- var(y)
    }
    ESTIMATE <- V.x / V.y
    STATISTIC <- ESTIMATE / ratio
    PARAMETER <- c(DF.x, DF.y)

    PVAL <- pf(STATISTIC, DF.x, DF.y)
    if (alternative == "two.sided") {
        PVAL <- 2 * min(PVAL, 1 - PVAL)
        BETA <- (1 - conf.level) / 2
        CINT <- c(ESTIMATE / qf(1 - BETA, DF.x, DF.y),
                  ESTIMATE / qf(BETA, DF.x, DF.y))
    }
    else if (alternative == "greater") {
        PVAL <- 1 - PVAL
        CINT <- c(ESTIMATE / qf(conf.level, DF.x, DF.y), Inf)
    }
    else
        CINT <- c(0, ESTIMATE / qf(1 - conf.level, DF.x, DF.y))
    names(STATISTIC) <- "F"
    names(PARAMETER) <- c("num df", "denom df")
    names(ESTIMATE) <- names(ratio) <- "ratio of variances"
    attr(CINT, "conf.level") <- conf.level
    RVAL <- list(statistic = STATISTIC,
                 parameter = PARAMETER,
                 p.value = PVAL,
                 conf.int = CINT,
                 estimate = ESTIMATE,
                 null.value = ratio,
                 alternative = alternative,
                 method = "F test to compare two variances",
                 data.name = DNAME)
    attr(RVAL, "class") <- "htest"
    return(RVAL)
}
wilcox.test <-
function(x, y = NULL, alternative = c("two.sided", "less", "greater"), 
         mu = 0, paired = FALSE, exact = NULL, correct = TRUE,
         conf.int = FALSE, conf.level = 0.95) 
{
    alternative <- match.arg(alternative)
    if(!missing(mu) && ((length(mu) > 1) || !is.finite(mu))) 
        stop("mu must be a single number")
    if(conf.int) {
        if(!((length(conf.level) == 1)
             && is.finite(conf.level)
             && (conf.level > 0)
             && (conf.level < 1)))
            stop("conf.level must be a single number between 0 and 1")
    }

    if(!is.null(y)) {
        DNAME <- paste(deparse(substitute(x)), "and",
                       deparse(substitute(y)))
        if(paired) {
            if(length(x) != length(y)) 
                stop("x and y must have the same length")
            OK <- complete.cases(x, y)
            x <- x[OK] - y[OK]
            y <- NULL
        }
        else {
            x <- x[is.finite(x)]
            y <- y[is.finite(y)]
        }
    } else {
        DNAME <- deparse(substitute(x))
        if(paired) 
            stop("y missing for paired test")
        x <- x[is.finite(x)]
    }

    if(length(x) < 1) 
        stop("not enough (finite) x observations")
    CORRECTION <- 0
    if(is.null(y)) {
        METHOD <- "Wilcoxon signed rank test"
        x <- x - mu
        ZEROES <- any(x == 0)
        if(ZEROES) 
            x <- x[x != 0]
        n <- length(x)
        if(is.null(exact)) 
            exact <- (n < 50)
        r <- rank(abs(x))
        STATISTIC <- sum(r[x > 0])
        names(STATISTIC) <- "V"
        TIES <- (length(r) != length(unique(r)))

        if(exact && !TIES && !ZEROES) {
            PVAL <-
                switch(alternative,
                       "two.sided" = {
                           if(STATISTIC > (n * (n + 1) / 4)) 
                               p <- 1 - psignrank(STATISTIC - 1, n)
                           else p <- psignrank(STATISTIC, n)
                           min(2 * p, 1)
                       },
                       "greater" = 1 - psignrank(STATISTIC - 1, n),
                       "less" = psignrank(STATISTIC, n))
            if(conf.int) {
                ## Exact confidence intervale for the median in the
                ## one-sample case.  When used with paired values this
                ## gives a confidence interval for mean(x) - mean(y).
                x <- x + mu             # we want a conf.int for the median
                alpha <- 1 - conf.level
                diffs <- outer(x, x, "+")
                diffs <- sort(diffs[!lower.tri(diffs)]) / 2
                cint <-
                    switch(alternative,
                           "two.sided" = {
                               qu <- qsignrank(alpha / 2, n)
                               if(qu == 0) qu <- 1
                               ql <- n*(n+1)/2 - qu
                               uci <- diffs[qu]
                               lci <- diffs[ql+1]
                               c(uci, lci)        
                           },
                           "greater"= {
                               qu <- qsignrank(alpha, n)
                               if(qu == 0) qu <- 1
                               uci <- diffs[qu]
                               c(uci, NA)
                           },
                           "less"= {
                               qu <- qsignrank(alpha, n)
                               if(qu == 0) qu <- 1
                               ql <- n*(n+1)/2 - qu
                               lci <- diffs[ql+1]
                               c(NA, lci)        
                           })
                attr(cint, "conf.level") <- conf.level    
            }
        } else {
            NTIES <- table(r)
            z <- STATISTIC - n * (n + 1)/4
            SIGMA <- sqrt(n * (n + 1) * (2 * n + 1) / 24
                          - sum(NTIES^3 - NTIES) / 48)
            if(correct) {
                CORRECTION <-
                    switch(alternative,
                           "two.sided" = sign(z) * 0.5,
                           "greater" = 0.5,
                           "less" = -0.5)
                METHOD <- paste(METHOD, "with continuity correction")
            }

            PVAL <- pnorm((z - CORRECTION) / SIGMA)
            if(alternative == "two.sided") 
                PVAL <- 2 * min(PVAL, 1 - PVAL)
            if(alternative == "greater") 
                PVAL <- 1 - PVAL

            if(conf.int) {
                ## Asymptotic confidence intervale for the median in the
                ## one-sample case.  When used with paired values this
                ## gives a confidence interval for mean(x) - mean(y).
                ## Algorithm not published, thus better documented here.
                x <- x + mu
                alpha <- 1 - conf.level
                ## These are sample based limits for the median
                mumin <- min(x)
                mumax <- max(x)
                ## wdiff(d, zq) returns the abolute difference between
                ## the asymptotic Wilcoxon statistic of x - mu - d and
                ## the quantile zq 
                wdiff <- function(d, zq) {
                    xd <- x  - d
                    xd <- xd[xd != 0]
                    nx <- length(xd)
                    dr <- rank(abs(xd))
                    zd <- sum(dr[xd > 0])
                    NTIES.CI <- table(dr)
                    zd <- zd - nx * (nx + 1)/4
                    SIGMA.CI <- sqrt(nx * (nx + 1) * (2 * nx + 1) / 24
                                     - sum(NTIES.CI^3 -  NTIES.CI) / 48)
                    if(correct) {
                        CORRECTION.CI <-
                            switch(alternative,
                                   "two.sided" = sign(z) * 0.5,
                                   "greater" = 0.5,
                                   "less" = -0.5)
                    }
                    zd <- (zd - CORRECTION.CI) / SIGMA.CI
                    abs(zd - zq)
                }
                ## Here we optimize the function wdiff in d over the set
                ## c(mumin, mumax).
                ##
                ## This returns a value from c(mumin, mumax) for which
                ## the asymptotic Wilcoxon statistic is equal to the
                ## quantile zq.  This means that the statistic is not
                ## within the critical region, and that implies that d
                ## is a confidence limit for the median.
                ##
                ## As in the exact case, interchange quantiles.
                cint <- switch(alternative, "two.sided" = {
                    u <- optimize(wdiff, c(mumin, mumax), tol=1e-4,
                                  zq=qnorm(1-alpha/2))$minimum
                    l <- optimize(wdiff, c(mumin, mumax), tol=1e-4,
                                  zq=qnorm(alpha/2))$minimum
                    c(u, l)
                }, "greater"= {
                    u <- optimize(wdiff, c(mumin, mumax), tol=1e-4,
                                  zq=qnorm(1-alpha))$minimum
                    c(u, NA)
                }, "less"= {
                    l <- optimize(wdiff, c(mumin, mumax), tol=1e-4,
                                  zq=qnorm(alpha))$minimum
                    c(NA, l)
                })
                attr(cint, "conf.level") <- conf.level    
            }

            if(exact && TIES) {
                warning("Cannot compute exact p-value with ties")
                if(conf.int)
                    warning(paste("Cannot compute exact confidence",
                                  "interval with ties"))
            }
            if(exact && ZEROES) {
                warning("Cannot compute exact p-value with zeroes")
                if(conf.int)
                    warning(paste("Cannot compute exact confidence",
                                  "interval with zeroes"))
            }
            
	}
    }
    else {
        if(length(y) < 1) 
            stop("not enough y observations")
        METHOD <- "Wilcoxon rank sum test"
        r <- rank(c(x - mu, y))
        n.x <- length(x)
        n.y <- length(y)
        if(is.null(exact)) 
            exact <- (n.x < 50) && (n.y < 50)
        STATISTIC <- sum(r[seq(along = x)]) - n.x * (n.x + 1) / 2
        names(STATISTIC) <- "W"
        TIES <- (length(r) != length(unique(r)))
        if(exact && !TIES) {
            PVAL <-
                switch(alternative,
                       "two.sided" = {
                           if(STATISTIC > (n.x * n.y / 2)) 
                               p <- 1 - pwilcox(STATISTIC - 1, n.x, n.y)
                           else
                               p <- pwilcox(STATISTIC, n.x, n.y)
                           min(2 * p, 1)
                       },
                       "greater" = 1 - pwilcox(STATISTIC - 1, n.x, n.y), 
                       "less" = pwilcox(STATISTIC, n.x, n.y))
            if(conf.int) {
                ## Exact confidence interval for the location parameter 
                ## mean(y) - mean(x) in the two-sample case (cf. the
                ## one-sample case).
                alpha <- 1 - conf.level
                diffs <- sort(outer(y, x, "-"))
                cint <-
                    switch(alternative,
                           "two.sided" = {
                               qu <- qwilcox(alpha/2, n.x, n.y)
                               if(qu == 0) qu <- 1
                               ql <- n.x*n.y - qu
                               uci <- diffs[qu]
                               lci <- diffs[ql + 1]
                               c(uci, lci)
                           },
                           "greater"= {
                               qu <- qwilcox(alpha, n.x, n.y)
                               if(qu == 0) qu <- 1
                               uci <- diffs[qu]
                               c(uci, NA)
                           },
                           "less"= {
                               qu <- qwilcox(alpha, n.x, n.y)
                               if(qu == 0 ) qu <- 1
                               ql <- n.x*n.y - qu
                               lci <- diffs[ql + 1]
                               c(NA, lci)
                           })
                attr(cint, "conf.level") <- conf.level    
            }
        }
        else {
            NTIES <- table(r)
            z <- STATISTIC - n.x * n.y / 2
            SIGMA <- sqrt((n.x * n.y / 12) *
                          ((n.x + n.y + 1)
                           - sum(NTIES^3 - NTIES)
                           / ((n.x + n.y) * (n.x + n.y - 1))))
            if(correct) {
                CORRECTION <- switch(alternative,
                                     "two.sided" = sign(z) * 0.5,
                                     "greater" = 0.5,
                                     "less" = -0.5)
                METHOD <- paste(METHOD, "with continuity correction")
            }
            PVAL <- pnorm((z - CORRECTION)/SIGMA)
            if(alternative == "two.sided") 
                PVAL <- 2 * min(PVAL, 1 - PVAL)
            if(alternative == "greater") 
                PVAL <- 1 - PVAL

            if(conf.int) {
                ## Asymptotic confidence interval for the location
                ## parameter mean(y) - mean(x) in the two-sample case
                ## (cf. one-sample case).
                ##
                ## Algorithm not published, for a documentation see the
                ## one sample case.
                alpha <- 1 - conf.level
                mumin <- min(y) - max(x)
                mumax <- max(y) - min(x)
                wdiff <- function(d, zq) {
                    dr <- rank(c(x - mu, y - d)) 
                    NTIES.CI <- table(dr)
                    dz <- (sum(dr[seq(along = x)])
                           - n.x * (n.x + 1) / 2 - n.x * n.y / 2)
                    if(correct) {
                        CORRECTION.CI <-
                            switch(alternative,
                                   "two.sided" = sign(dz) * 0.5,
                                   "greater" = 0.5,
                                   "less" = -0.5)        
                    }
                    SIGMA.CI <- sqrt((n.x * n.y / 12) *
                                     ((n.x + n.y + 1)
                                      - sum(NTIES.CI^3 - NTIES.CI)
                                      / ((n.x + n.y) * (n.x + n.y - 1))))
                    dz <- (dz - CORRECTION.CI) / SIGMA.CI
                    abs(dz - zq)
                }
                cint <- switch(alternative, "two.sided" = {
                    u <- optimize(wdiff, c(mumin, mumax), tol=1e-4,
                                  zq=qnorm(alpha/2))$minimum
                    l <- optimize(wdiff, c(mumin, mumax), tol=1e-4,
                                  zq=qnorm(1 - alpha/2))$minimum
                    c(u, l)
                }, "greater"= {
                    u <- optimize(wdiff, c(mumin, mumax), tol=1e-4,
                                  zq=qnorm(alpha))$minimum
                    c(u, NA)
                }, "less"= {
                    l <- optimize(wdiff, c(mumin, mumax), tol=1e-4,
                                  zq=qnorm(1 - alpha))$minimum
                    c(NA, l)
                })
                attr(cint, "conf.level") <- conf.level    
            }

            if(exact && TIES) {
                warning("Cannot compute exact p-value with ties")
                if(conf.int)
                    warning(paste("Cannot compute exact confidence",
                                  "intervals with ties"))
            }
        }
    }

    RVAL <- list(statistic = STATISTIC,
                 parameter = NULL,
                 p.value = PVAL, 
                 null.value = c(mu = mu),
                 alternative = alternative,
                 method = METHOD, 
                 data.name = DNAME)
    if(conf.int)
        RVAL$conf.int <- cint
    class(RVAL) <- "htest"
    return(RVAL)
}
.First.lib <- function(lib, pkg)
    library.dynam("ctest", pkg, lib)
