"acf" <-
function (x, lag.max = NULL, plot = FALSE, type = c("correlation", 
    "covariance", "partial")) 
{
    type <- match.arg(type)
    series <- deparse(substitute(x))
    x.freq <- frequency(as.ts(x))
    x <- as.matrix(x)
    sampleT <- nrow(x)
    nser <- ncol(x)
    if (is.null(lag.max)) 
        lag.max <- floor(10 * (log10(sampleT) - log10(nser)))
    lag.max <- min(lag.max, sampleT - 1)
    x <- sweep(x, 2, apply(x, 2, mean))
    lag <- matrix(1, nser, nser)
    lag[lower.tri(lag)] <- -1
    if (type == "partial") {
        acf <- ar(xb, order.max = lag.max)$partialacf
        lag <- outer(1:lag.max, lag/x.freq)
    }
    else {
        acf <- array(NA, c(lag.max + 1, nser, nser))
        xp <- rbind(x, matrix(0, ncol = nser, nrow = nextn(sampleT + 
            lag.max) - sampleT))
        for (i in 1:nser) for (j in 1:nser) {
            acf[, i, j] <- convolve(xp[, i], xp[, j])[1:(lag.max + 
                1)]/sampleT
        }
        if (type == "correlation") {
            var0 <- diag(acf[1, , ], nrow = nser)
            acf0 <- sqrt(var0 %*% t(var0))
            acf <- sweep(acf, c(2, 3), acf0, "/")
        }
        lag <- outer(0:lag.max, lag/x.freq)
    }
    acf.out <- structure(.Data = list(acf = acf, type = type, 
        n.used = sampleT, lag = lag, series = series, snames = colnames(x)), 
        class = "acf")
    if (plot) 
        plot.acf(acf.out)
    else return(acf.out)
}
"plot.acf" <-
function (x, ci = 0.95, type = "h", xlab = "Lag", ylab = NULL, 
    ylim = NULL, main = NULL, ...) 
{
    opar <- NULL
    on.exit(par(opar))
    nser <- ncol(x$lag)
    opar <- c(opar, par(mfrow = rep(min(nser, 5), 2)))
    if (is.null(ylab)) 
        ylab <- switch(x$type, correlation = "ACF", covariance = "ACF", 
            partial = "Partial ACF")
    if (is.null(snames <- x$snames)) {
        snames <- if (nser == 1) 
            paste("Series", x$series)
        else paste("Series", 1:nser)
    }
    with.ci <- (ci != 0) && (x$type != "covariance")
    for (i in 1:nser) for (j in 1:nser) {
        clim <- if (with.ci) 
            qnorm((1 + ci)/2)/sqrt(x$n.used)
        else c(0, 0)
        if (is.null(ylim)) {
            ymin <- min(c(x$acf[, i, j], -clim))
            ymax <- max(c(x$acf[, i, j], clim))
            ylim <- c(ymin, ymax)
        }
        plot(x$lag[, i, j], x$acf[, i, j], type = type, xlab = xlab, 
            ylab = ylab, ylim = ylim, ...)
        if (with.ci) 
            abline(h = c(clim, -clim), col = "red", lty = 2)
        abline(h = 0)
        if (!is.null(main)) 
            title(main)
        else if (i == j) 
            title(snames[i])
        else title(paste(snames[i], "&", snames[j]))
    }
}
"ar" <-
function (x, aic = TRUE, order.max = NULL, method=c("yule-walker","burg"))
{
    switch(match.arg(method),
        "yule-walker" = ar.yw(x, aic=aic, order.max=order.max),
	"burg" = stop("burg method for ar not yet implemented.")
    )
}

"ar.yw" <-
function (x, aic = TRUE, order.max = NULL) 
{
    series <- deparse(substitute(x))
    x <- as.matrix(x)
    x <- sweep(x, 2, apply(x, 2, mean))
    n.used <- nrow(x)
    nser <- ncol(x)
    order.max <- if (is.null(order.max)) 
        floor(10 * log10(n.used))
    else floor(order.max)
    if (order.max < 1) 
        stop("order.max must be >= 1")
    xacf <- acf(x, type = "covariance", lag.max = order.max)$acf
    A <- B <- array(0, dim = c(order.max + 1, nser, nser))
    A[1, , ] <- B[1, , ] <- diag(nser)
    EA <- EB <- xacf[1, , , drop = TRUE]
    partialacf <- array(dim = c(order.max, nser, nser))
    xaic <- numeric(order.max + 1)
    solve.yw <- function(m) {
        # Solve Yule-Walker equations with (a multivariate
        # generalization of) the Durbin Levinson algorithm
        # I suppose this is Whittle's recursion. Anyone?
        betaA <- betaB <- 0
        for (i in 0:m) {
            betaA <- betaA + A[i + 1, , ] %*% xacf[m + 2 - i, 
                , ]
            betaB <- betaB + B[i + 1, , ] %*% t(xacf[m + 2 - 
                i, , ])
        }
        KA <- -t(qr.solve(t(EB), t(betaA)))
        KB <- -t(qr.solve(t(EA), t(betaB)))
        EB <<- (diag(nser) - KB %*% KA) %*% EB
        EA <<- (diag(nser) - KA %*% KB) %*% EA
        Aold <- A
        Bold <- B
        for (i in 1:(m + 1)) {
            A[i + 1, , ] <<- Aold[i + 1, , ] + KA %*% Bold[m + 
                2 - i, , ]
            B[i + 1, , ] <<- Bold[i + 1, , ] + KB %*% Aold[m + 
                2 - i, , ]
        }
    }
    cal.aic <- function() {
        det <- abs(prod(diag(qr(var.pred)$qr)))
        #S-PLUS compatible(ish) answer, but wrong surely?
        #return(n.used * log(det) +  m * nser * nser)
        return(n.used * log(det) + 2 * m * nser * nser)
    }
    cal.var.pred <- function() {
        vp <- 0
        for (i in 0:m) for (j in 0:m) {
            R <- xacf[abs(i - j) + 1, , ]
            if (j < i) 
                R <- t(R)
            vp <- vp + A[i + 1, , ] %*% R %*% t(A[j + 1, , ])
        }
        #Splus compatibility fix
        vp <- vp * n.used/(n.used - nser * (m + 1))
        return(vp)
    }
    cal.resid <- function() {
        resid <- array(dim = c(n.used, nser))
        fitted <- array(0, dim = c(n.used - order, nser))
        if (order > 0) {
            A <- array(dim = c(nser, nser))
            for (i in 1:order) {
                A[, ] <- ar[i, , , drop = FALSE]
                fitted <- fitted + x[(order - i + 1):(n.used - 
                  i), , drop = FALSE] %*% t(A)
            }
        }
        resid[(order + 1):n.used, ] <- x[(order + 1):n.used, 
            , drop = FALSE] - fitted
        return(resid)
    }
    ar.list <- vector("list", order.max)
    for (m in 0:order.max) {
        var.pred <- cal.var.pred()
        xaic[m + 1] <- cal.aic()
        if (m < order.max) {
            solve.yw(m)
            partialacf[m + 1, , ] <- -A[m + 2, , ]
            ar.list[[m + 1]] <- -A[2:(m + 2), , , drop = FALSE]
        }
    }
    xaic <- xaic - min(xaic)
    order <- if (aic) 
        (0:order.max)[xaic == 0]
    else order.max
    ar <- if (order > 0) 
        ar.list[[order]]
    else array(0, dim = c(1, nser, nser))
    resid <- cal.resid()
    return(order, ar, var.pred, aic = xaic, n.used, order.max, 
        partialacf, resid, method = "Yule-Walker", series)
}
"spectrum" <- 
function (x, method = c("pgram", "ar"), plot = TRUE, ...)
{
    # Wrapper function for spec methods
    # (need to implement spec.ar)
    method <- match.arg(method)
    switch(method,
        pgram = spec.pgram(x, plot = plot, ...),
        ar = stop("spec.ar not implemented yet"))
}
"spec.pgram" <-
function (x, spans = 1, taper = 0.1, demean = FALSE, detrend = TRUE, 
    pad = 0, plot = FALSE) 
{
    # Estimate spectral density from smoothed periodogram.
    # (we could farm out padding and tapering to other functions)
    #
    series <- deparse(substitute(x))
    xfreq <- frequency(x)
    x <- as.matrix(x)
    N <- nrow(x)
    nser <- ncol(x)
    if (detrend) {
        t <- 1:N - (N + 1)/2
        sumt2 <- N * (N^2 - 1)/12
        for (i in 1:ncol(x)) {
            x[, i] <- x[, i] - mean(x[, i]) - sum(x[, i] * t) * t/sumt2
	}
        
    }
    else if (demean) {
        x <- sweep(x, 2, apply(x, 2, mean))
    }
    if (taper > 0.5 || taper < 0) 
        stop("taper must be between 0 and 0.5")
    else if (taper > 0) {
        w <- rep(1, N)
        n <- max(round(N * taper), 1)
        w[N:(N - n + 1)]  <- w[1:n] <- sin(((1:n - 0.5) * pi)/(2 * n))^2
        x <- x * w
    }
    if (pad > 0) {
        x <- rbind(x, matrix(0, nrow = N * pad, ncol = ncol(x)))
        N <- nrow(x)
    }
    NewN <- nextn(N)
    x <- rbind(x, matrix(0, nrow = (NewN - N), ncol = ncol(x)))
    N <- nrow(x)
    Nspec <- floor(N/2)
    freq <- seq(from = xfreq/N, by = xfreq/N, length = Nspec)
    xfft <- mvfft(x)[2:(N - 1), , drop = FALSE]
    pgram <- array(NA, dim = c(nrow(x) - 2, ncol(x), ncol(x)))
    for (i in 1:ncol(x)) {
        for (j in 1:ncol(x)) {
            pgram[, i, j] <- xfft[, i] * Conj(xfft[, j])/(N*xfreq)
        }
    }
    filter.list <- vector("list", length(spans))
    for (i in 1:length(spans)) {
        m <- floor(spans[i]/2)
        spans[i] <- 2 * m + 1
        filter.list[[i]] <- if (m > 0) 
            c(0.5, rep(1, 2 * m - 1), 0.5)/(2 * m)
        else 1
    }
    filter <- filter.list[[1]]
    if (length(spans) > 1) 
        for (i in 2:length(spans)) filter <- convolve(filter.list[[i]], 
            filter, type="open")
    if (length(filter) > 1) {
        ndiff <- nrow(pgram) - length(filter)
        m <- floor(length(filter)/2)
        if (ndiff < 0) 
            stop("filter too long!")
        else for (i in 1:ncol(x)) for (j in 1:ncol(x)) {
            pgram[, i, j] <- convolve(pgram[, i, j], c(filter[(m + 
                1):(2 * m + 1)], rep(0, ndiff), filter[1:m]))
        }
    }
    df <- 2/sum(filter^2)
    m <- floor(length(filter)/2)
    bandwidth <- sqrt(sum((1/12 + (0:(2 * m) - m)^2) * filter)) * xfreq/nrow(x)
    spec <- matrix(NA, nrow = Nspec, ncol = nser)
    for (i in 1:nser) spec[, i] <- Re(pgram[1:Nspec, i, i])
    if (nser == 1) {
        coh <- phase <- NULL
        spec <- drop(spec)
    }
    else {
        coh <- phase <- matrix(NA, nrow = Nspec, ncol = nser * 
            (nser - 1)/2)
        for (i in 1:(nser - 1)) {
            for (j in (i + 1):nser) {
                coh[, i + (j - 1) * (j - 2)/2] <- Mod(pgram[1:Nspec, 
                  i, j])^2/(spec[, i] * spec[j])
                ph <- Arg(pgram[1:Nspec, i, j])
                dph <- diff(ph)
                ph <- c(0, sign(dph) * (abs(dph)%%pi)) + ph[1]
                phase[, i + (j - 1) * (j - 2)/2] <- ph
            }
        }
    }
    spec <- 10 * log10(spec)
    spg.out <- list(freq = freq, spec = spec, coh = coh, phase = phase, 
        spans = spans, filter = filter, df = df, bandwidth = bandwidth, 
        n.used = nrow(x), series = series, method = ifelse(length(filter) > 
            1, "Smoothed Periodogram", "Raw Periodogram"), taper = taper, 
        pad = pad, detrend = detrend, demean = demean)
    class(spg.out) <- "spec"
    if(plot)
	plot.spec(spg.out)
    else return(spg.out)
}
"plot.spec" <-
function (x, add = FALSE, ci = 0.95, xlab = "frequency", 
    ylab = "spectrum (dB)", type = "l", main = NULL, sub = NULL, ...) 
{
    matplot(x$freq, x$spec, xlab = xlab, ylab = ylab, type = type, 
        add = add, ...)
    if (ci == 0 || add) {
        #No confidence limits
        ci.text <- ""
    }
    else {
        # The position of the error bar has no meaning: only the width
        # and height. It is positioned in the top right hand corner.
        #
        conf.lim <- spec.ci(x, coverage = ci)
        conf.y <- max(x$spec) - conf.lim[2]
        conf.x <- max(x$freq) - x$bandwidth
        lines(rep(conf.x, 2), conf.y + conf.lim)
        lines(conf.x + c(-0.5, 0.5) * x$bandwidth, rep(conf.y, 
            2))
        ci.text <- paste("95% C.I. is (", paste(format(conf.lim, 
            digits = 3), collapse = ","), ")dB")
    }
    if (is.null(main)) 
        main <- paste(paste("Series:", x$series), x$method, sep = "\n")
    if (is.null(sub)) 
        sub <- paste("bandwidth=", format(x$bandwidth, digits = 3), 
            ci.text)
    title(main = main, sub = sub)
    invisible(x)
}
"spec.ci" <-
function (spec.obj, coverage = 0.95) 
{
    # A utility function for plot.spec which calculates the confidence
    # interval (centred around zero). We use a conditional argument to
    # ensure that the ci always contains zero.
    #
    if (coverage < 0 || coverage >= 1)
        stop("coverage probability out of range [0,1)")
    df <- spec.obj$df
    limits <- numeric(2)
    upper.quantile <- 1 - (1 - coverage) * (1 - pchisq(df, df))
    lower.quantile <- (1 - coverage) * pchisq(df, df)
    -10 * log10(qchisq(c(upper.quantile, lower.quantile), df)/df)
}
