"COR"<-
function(dat)
{
	m <- ncol(dat)
	hold <- c(1, 1)
	temp <- matrix(NA, nrow = m, ncol = m)
	temp2 <- matrix(NA, nrow = m, ncol = m)
	ntemp <- matrix(0, nrow = m, ncol = m)
	hold <- NULL
	hold.stat <- stats(dat)
	for(k in 1:m) {
		for(j in 1:k) {
			hold <- pair.na(dat[, j], dat[, k])
			n <- nrow(hold)
			ntemp[j, k] <- ntemp[k, j] <- n
			if(nrow(hold) > 0) {
				cv <- cor(hold[, 1], hold[, 2])
				temp[j, k] <- temp[k, j] <- cv
			}
		}
		cat(k, " ", fill = T)
	}
	list(cor = temp, N = ntemp, sd = c(hold.stat[3,  ]), mean = c(hold.stat[
		2,  ]))
}
"EOF.cov"<-
function(x1, x2, marginal = F, obj)
{
	symetric <- F | marginal
	if(missing(x2)) {
		symetric <- T
		x2 <- x1
	}
	M <- obj$M
	PHI1 <- matrix(0, ncol = M, nrow(x1))
	PHI2 <- matrix(0, ncol = M, nrow(x2))
	for(k in 1:M) {
		PHI1[, k] <- interp.surface(obj$fits[[k]], x1)
		if(!symetric) PHI2[, k] <- interp.surface(obj$fits[[k]], x2)	
	#		cat(k, " ")
	}
	if(marginal) {
#	print(PHI1)
		temp1 <- (t(sqrt(obj$delta[1:M]) * t(PHI1))^2) %*% rep(1, M)
		return(temp1)
	}
	if(!symetric) {
		temp2 <- PHI1 %*% (obj$delta[1:M] * t(PHI2))
		return(temp2)
	}
	else {
		temp2 <- PHI1 %*% ((obj$delta[1:M]) * t(PHI1))
		return(temp2)
	}
}
"EXP.cov"<-
function(x1, x2, theta = rep(1, ncol(x1)), p = 1, C = NA)
{
	if(!is.loaded(symbol.For("expbs"))) {
          stop("Compiled code has not been dynamically loaded")
          #temp <- dyn.load(paste(FUNFITS.BIN, "TPS.o", sep = "/"), 2)
          #temp2 <- dyn.load(paste(FUNFITS.BIN, "EXP.o", sep = "/"), 2)
	}
	if(!is.matrix(x1))
		x1 <- as.matrix(x1)
	if(missing(x2))
		x2 <- x1
	if(!is.matrix(x2))
		x2 <- as.matrix(x2)	#
	if(length(theta) == 1)
		theta <- rep(theta, ncol(x1))
	d <- ncol(x1)
	n1 <- nrow(x1)
	n2 <- nrow(x2)	# scale both X's to reflect theta parameter
	x1 <- t(t(x1)/theta)
	x2 <- t(t(x2)/theta)	#	make.gaspb(x1, x2, p)
	par <- p
	if(is.na(C[1])) {
#
#
# return the full covariance matrix
		exp( - rdist(x1, x2)^p)
	}
	else {
###
##
# return the covaraince matrix multiplied by the vector C
#
		.Fortran("multeb",
			nd = as.integer(d),
			x1 = as.double(x1),
			n1 = as.integer(n1),
			x2 = as.double(x2),
			n2 = as.integer(n2),
			par = as.double(par),
			c = as.double(C),
			h = as.double(rep(0, n1)),
			work = as.double(rep(0, n2)))$h
	}
}
"FEXP"<-
function(x1, x2, C, theta = rep(1, ncol(x1)), load = T)
{
	if(!is.loaded(symbol.For("m2deb")) | load) {
          stop("Compiled code has not been dynamically loaded")
          #temp2 <- dyn.load(paste(FUNFITS.BIN.NEW, "FEXP.o", sep = "/"), 
          #                  2)
	}
	if(length(theta) == 1)
		theta <- rep(theta, ncol(x1))
	d <- ncol(x1)
	n1 <- nrow(x1)
	n2 <- nrow(x2)	# scale both X's to reflect theta parameter
	x1 <- t(t(x1/theta))
	x2 <- t(t(x2/theta))
	par <- rep(1, d)
	if(!is.matrix(C)) {
		C <- matrix(C, ncol = 1)
	}
	nc <- ncol(C)
	if(nrow(C) != n2) stop("number of rows in C does not match\nnumber of locations in x2"
			)	#8
# return the covaraince matrix multiplied by the vector C
#
	matrix(.Fortran("m2deb",
		nd = as.integer(d),
		x1 = as.double(x1),
		n1 = as.integer(n1),
		x2 = as.double(x2),
		n2 = as.integer(n2),
		par = as.double(par),
		c = as.double(C),
		nc = as.integer(nc),
		h = as.double(rep(0, n1 * nc)),
		work = as.double(rep(0, nc)))$h, ncol = nc)
}
"FUNFITS.version"<-function()
  {
    cat("Funfits version 2.0  Copyright 1995, 1996 \n") 
    cat("Douglas Nychka, Perry Haaland, Michael OConnell,\n")
    cat("Barbara Bailey and Stephen Ellner", "Thu Feb  6 14:36:44 EST 1997\n")
    cat("Ported to R: Albrecht Gebhardt, Sun Mar 15 15:55:54 CET 1998\n")
}
"GaSP.cov"<-
function(x1, x2, theta = rep(1, ncol(x1)), p = rep(1, ncol(x1)), C = NA)
{
        if(!is.loaded(symbol.For("gaspbs"))) {
                stop("Compiled code has not been dynamically loaded")
                #temp <- dyn.load(paste(FUNFITS.BIN, "TPS.o", sep = "/"), 2)
                #temp2 <- dyn.load(paste(FUNFITS.BIN, "GASP.o", sep = "/"), 2)
        }
        if(!is.matrix(x1))
                x1 <- as.matrix(x1)
        if(missing(x2))
                x2 <- x1
        if(!is.matrix(x2))
                x2 <- as.matrix(x2)     #
        d <- ncol(x1)
        n1 <- nrow(x1)
        n2 <- nrow(x2)  # scale both X's to reflect theta parameter
        x1 <- t(t(x1)/theta)
        x2 <- t(t(x2)/theta)    #       make.gaspb(x1, x2, p)
        par <- p
        if(is.na(C[1])) {
#
#
# return the full covariance matrix
                temp <- .Fortran("gaspbs",
                        nd = as.integer(d),
                        x1 = as.double(x1),
                        n1 = as.integer(n1),
                        x2 = as.double(x2),
                        n2 = as.integer(n2),
                        par = as.double(par),
                        k = as.double(rep(0, n1 * n2)))
                matrix(temp$k, ncol = n2, nrow = n1)
        }
        else {
###
##
# return the covaraince matrix multiplied by the vector C
#
                .Fortran("multgb",
                        nd = as.integer(d),
                        x1 = as.double(x1),
                        n1 = as.integer(n1),
                        x2 = as.double(x2),
                        n2 = as.integer(n2),
                        par = as.double(par),
                        c = as.double(C),
                        h = as.double(rep(0, n1)),
                        work = as.double(rep(0, n2)))$h
        }
}

"INTERP"<-
function(x, y, z, ...)
{
        ind <- !(dup(x) & dup(y))
        x <- x[ind]
        y <- y[ind]
        z <- z[ind]
        interp(x, y, z, ...)
}

"add.netfit.model"<-
function(fita, fitb)
{
	if(fita$d != fitb$d)
		stop(" mismatch in dimensions of two models")
	d <- fita$d
	ka <- fita$k
	kb <- fitb$k
	temp.a <- summary.netfit(fita, F)
	temp.b <- summary.netfit(fitb, F)
	temp <- list()
	class(temp) <- "netfit"
	temp$np <- fita$np + fitb$np - 1
	temp$d <- d
	temp$k <- ka + kb
	theta <- c(temp.a$beta[1] + temp.b$beta[1], temp.a$beta[2:(ka + 1)], 
		temp.b$beta[2:(kb + 1)], temp.a$mu, temp.b$mu, c(t(rbind(temp.a$
		gamma, temp.b$gamma))))
	temp$theta <- theta
	temp$xm <- rep(0, d)
	temp$xsd <- rep(1, d)
	temp$ym <- 0
	temp$ysd <- 1
	temp
}
"addreg"<-
function(x, y, lam, nback = 20, tol = 1.0e-05, start, cost = 1)
{
#	if(!is.loaded(symbol.For("addreg"))) {
#		temp <- dyn.load("addreg.sub.o", 2)
#		cat("addreg  FORTRAN subroutines dynamically loaded:", temp, 
#			fill = T)
#	}
	if(!is.loaded(symbol.For("css"))) {
          stop("Compiled code has not been dynamically loaded")
	}
	if(!is.loaded(symbol.For("addreg"))) {
          stop("Compiled code has not been dynamically loaded")
        }
	out <- NULL
	x <- as.matrix(x)
	M <- ncol(x)
	N <- length(y)
	maxit <- 8
	nstep <- 10
	if(nrow(x) != length(y))
		stop(" X and Y do not match")
	if(!missing(lam)) {
		if(length(lam) == 1) {
			lam <- rep(lam, M)
		}
		lam <- ifelse(is.na(lam), -1, lam)
	}
	if(missing(lam)) {
		lam <- rep(-1, M)
	}
	if(missing(start)) {
		use.start <- 0
		start <- rep(0, N)
	}
	else {
		use.start <- 1
	}
	gcv.grid <- matrix(0, nrow = 4, ncol = M)
	a <- .Fortran("addreg",
		x = as.double(x),
		as.integer(N),
		as.integer(M),
		as.integer(N),
		y = as.double(y),
		as.double(rep(1, N)),
		as.double(lam),
		trace = as.double(rep(0, M)),
		sxy = as.double(matrix(0, ncol = M, nrow = N)),
		dsxy = as.double(matrix(0, ncol = M, nrow = N)),
		sy = as.double(start),
		din = as.double(c(tol, nback, maxit, nstep, cost)),
		dout = as.double(rep(0, 2 + M)),
		job = as.integer(use.start),
		ierr = as.integer(0))
	if(a$ierr != 0) {
		cat("Error in call to addreg")
		return(a)
	}
	if(sum(a$trace) > length(y))
		cat(" WARNING: Effective number of parameters exceeds the nmbe of observations",
			fill = T)
	out$x <- matrix(a$x, ncol = M)
	out$y <- a$y
	out$residuals <- a$y - a$sy
	out$fitted.values <- a$sy
	out$predicted.comp <- matrix(a$sxy, ncol = M)
	out$trace <- a$trace
	out$lambda <- a$dout[(1:M) + 2]
	out$converge <- c(a$dout[1:2])	
	#	list(x = matrix(a$x, ncol = M), y = a$y, residuals = a$y - a$sy, 
#	predicted = a$sy, predicted.comp = matrix(a$sxy, ncol = M), 
#	predicted.comp.d = matrix(a$dsxy, ncol = M), trace = a$trace, 
#	lambda = lam, converge = c(a$dout[1:2]))
	class(out) <- "addreg"
        out
}
"as.EOF.cov"<-
function(loc, sigma, grid.list = NA, nx = 50, ny = 50, M = ncol(sigma))
{
#
        if(is.na(grid.list)) {
                grid.list <- list(seq(min(loc[, 1]), max(loc[, 1]),  , nx), seq(
                        min(loc[, 2]), max(loc[, 2]),  , ny))
        }
###
        grid <- make.surface.grid(grid.list)
        hold <- eigen(sigma)
        print(hold$values)
        fits <- as.list(1:M)
        for(k in 1:M) {
                cat(k, " ")
                temp <- predict(tps(loc, c(hold$vectors[, k]), 0, 
                        return.matrices = F), grid)
                cat(k, " ")
                fits[[k]] <- as.surface(grid, temp)
        }
#        Now make up weights 
        temp.e <- hold$values[1:M]
        call <- match.call()
        list(loc = loc, delta = temp.e, fits = fits, M = M, call = match.call()
                )
}

"as.cov.isocor"<-
function(loc, sigma, grid.list = NA, nx = 50, ny = 50, lim.theta = NA, M = ncol(
        sigma), nugget = F, ...)
{
        if(!nugget) rho <- 1    #
        if(is.na(grid.list)) {
                grid.list <- list(seq(min(loc[, 1]), max(loc[, 1]),  , nx), seq(
                        min(loc[, 2]), max(loc[, 2]),  , ny))
        }
###
        out <- list()
        grid <- make.surface.grid(grid.list)
        temp <- (tps(loc, c(sqrt(diag(sigma))), return.matrices = F))
        tps.sum <- (temp)
        print(tps.sum)
        temp2 <- (predict(temp, grid))^2
        temp2 <- as.surface(grid, temp2)
        dst <- rdist.earth(loc, loc)
        ind <- col(dst) > row(dst)
        dst <- dst[ind]
        temp <- 1/sqrt(diag(sigma))
        sigma <- diag(temp) %*% sigma %*% diag(temp)
        print(sigma[1:4, 1:4])
        y <- sigma[ind]
        if(is.na(lim.theta[1])) {
                t1 <- min(dst)
                t2 <- max(dst)
        }
        else {
                t1 <- lim.theta[1]
                t2 <- lim.theta[2]
        }
        theta <- seq(t1, t2,  , 80)
        ss <- rep(NA, length(theta))
        for(k in 1:length(theta)) {
                temp <- exp( - c(dst)/theta[k])
                if(nugget) {
                        rho <- sum(temp * y)/sum(temp^2)
                }
                ss[k] <- sum((y - rho * temp)^2)
        }
        theta.est <- theta[min(ss) == ss]
        list(loc = loc, var = temp2, ss = ss, theta = theta, theta.est = 
                theta.est, rho = rho, summary = tps.sum, dst = dst, y = y, call
                 = match.call())
}

"as.surface" <-
function (grid.list, z, order.variables = "xy") 
{
        ##
        # arg 1 can either be the grid.list or the actual surface.grid object
        #
        if (is.null(class(grid.list)) | (class(grid.list) != 
                "surface.grid")) {
                hold <- make.surface.grid(grid.list, info.list = T)
        }
        else {
                hold <- attributes(grid.list)$surface.info
        }
        if (hold$nx * hold$ny != length(z)) 
                stop("Problems\nmatching grid info with your z vector. Check your grid dimensions!")
        if (hold$nvar > 2) {
                main.title <- paste(names(hold$fixed.variables), 
                        " = ", unlist(hold$fixed.variables), 
                        sep = "")
                main.title <- paste(main.title, collapse = " ")
        }
        else {
                main.title <- NULL
        }
        if (order.variables == "xy") {
                out <- list(x = hold$x, y = hold$y, z = matrix(z, 
                        ncol = hold$ny, nrow = hold$nx), xlab = hold$xlab, 
                        ylab = hold$ylab, main = main.title)
                class(out) <- "surface"
        }
        else {
                out <- list(x = hold$y, y = hold$x, z = t(matrix(z, 
                        ncol = hold$ny, nrow = hold$nx)), xlab = hold$ylab, 
                        ylab = hold$xlab, main = main.title)
                class(out) <- "surface"
        }
        out
}
"as.tiles"<-
function(grid.list, FUN)
{
        grid <- make.surface.grid(grid.list)
        g.info <- make.surface.grid(grid.list, info.list = T)   
        # grid has the nu*nv in the right order for reformatting as a matrix
# persp plotting. ( X varies fastest Y held fixed)
#
        xyz <- FUN(grid)        
        # Ok now is the fun  part. Need to take the xyz values and repeat to
#make the tiles. 
#
#  reformat as a matrix to make the indexing easier. 
#
        nu <- g.info$nx
        nv <- g.info$ny #
#     order for constructing tile is     1 2
#                                        4 3
#
        temp <- matrix(xyz[, 1], ncol = nv, nrow = nu)  
        # Now X is a vector in groups of 4 coordinates representing the x
#coordinates of teh tiel corners. The NA is there to end the polygon
#
#
        X <- (rbind(c(temp[1:(nu - 1), 1:(nv - 1)]), c(temp[2:(nu), 1:(nv - 1)]
                ), c(temp[2:(nu), 2:(nv)]), c(temp[1:(nu - 1), 2:nv]))) 
        #  Now do the same thing for the y  and z
        temp <- matrix(xyz[, 2], ncol = nv, nrow = nu)
        Y <- (rbind(c(temp[1:(nu - 1), 1:(nv - 1)]), c(temp[2:(nu), 1:(nv - 1)]
                ), c(temp[2:(nu), 2:(nv)]), c(temp[1:(nu - 1), 2:nv]))) #
        temp <- matrix(xyz[, 3], ncol = nv, nrow = nu)
        Z <- (rbind(c(temp[1:(nu - 1), 1:(nv - 1)]), c(temp[2:(nu), 1:(nv - 1)]
                ), c(temp[2:(nu), 2:(nv)]), c(temp[1:(nu - 1), 2:(nv)])))       
#
        one <- rep(0.25, 4)
        list(X = t(X)[, c(1:4, 1)], 
             Y = t(Y)[, c(1:4, 1)], 
             Z = t(Z)[, c(1:4, 1)], 
             uv = grid, 
             center = cbind(t(X) %*% one, t(Y) %*% one, t(Z) %*% one))
}

"bisection.search"<-
function(x1, x2, f, tol = 9.9999999999999995e-08, niter = 25, f.extra = NA, 
	upcross.level = 0)
{
	f1 <- f(x1, f.extra) - upcross.level
	f2 <- f(x2, f.extra) - upcross.level
	if(f1 > f2)
		stop(" f1 must be < f2 ")
	iter <- niter
	for(k in 1:niter) {
		xm <- (x1 + x2)/2
		fm <- f(xm, f.extra) - upcross.level
		if(fm < 0) {
			x1 <- xm
			f1 <- fm
		}
		else {
			x2 <- xm
			f2 <- fm
		}
		if(abs(fm) < tol) {
			iter <- k
			break
		}
	}
	xm <- (x1 + x2)/2
	fm <- f(xm, f.extra) - upcross.level
	list(x = xm, fm = fm, iter = iter)
}
"bplot"<-
function(x, ..., xpos = NA, width, label, by, srt = 0, add = F, space = 0.25, 
        sort.names = T, xlab = "", ylab = "")
{
# Draws Horizontal Boxplots. Andreas Krause, Dec 1991.
# Doug Nychka April 28, 1992
        if(is.matrix(x)) data <- data.frame(x)
        if(data.class(x) == "numeric")
                data <- list(x, ...)
        if(is.list(x))
                data <- x
        if(!missing(by)) data <- cat.to.list(unlist(data), by)  
        # at this point the data should be in list form regradless of how the
# pieces were originally passed
        quant <- c(0.050000000000000003, 0.25, 0.5, 0.75, 0.94999999999999996)
        cols <- length(data)
        range.data <- range(as.numeric(unlist(data)), na.rm = T)
        if(is.na(xpos[1])) {
                xpos <- 1:cols
        }
        if(missing(width)) {
                width <- min(diff(sort(xpos))) * space
                if(cols == 1)
                        width <- space
        }
        if(length(width) == 1)
                width <- rep(width, cols)
        if(!add) {
                plot(range(c(xpos - (0.5 * width)/space, xpos + (0.5 * width)/
                        space)), range.data, type = "n", xaxt = "n", ylab = 
                        ylab, xlab = xlab)
        }
        for(i in 1:cols) {
                temp <- data[[i]]
                temp <- temp[!is.na(temp)]
                bb <- quantile(temp, quant)
                mid <- xpos[i]  #  i - 0.5
                low <- mid - width[i] * 0.5
                high <- mid + width[i] * 0.5
                if(length(temp) > 5) {
                        y <- c(bb[1], bb[1], NA, bb[1], bb[2], NA, bb[2], bb[2],
                                bb[4])
                        x <- c(high, low, NA, mid, mid, NA, high, low, low)
                        y <- c(y, bb[4], bb[2], bb[3], bb[3], NA, bb[4], bb[5], 
                                bb[5], bb[5])
                        x <- c(x, high, high, high, low, NA, mid, mid, high, 
                                low)
                        lines(x, y)
                }
                if(length(temp) > 5) {
                        outlier <- temp[(temp < bb[1]) | (temp > bb[5])]
                }
                else outlier <- temp
                olen <- length(outlier)
                if(olen > 0)
                        points(rep(mid, olen), outlier)
        }
        if(missing(label)) {
                if(is.null(names(data)))
                        label <- format(1:cols)
                else label <- names(data)
        }
        if(length(label) > 7)
                srt <- 90
        axis(1, xpos, label, tick = F, srt = srt, adj = 1)
        invisible()
}

"cat.matrix"<-
function(mat, digits = 8)
{
	nc <- ncol(mat)
	temp <- matrix(match(c(signif(mat, digits)), unique(c(signif(mat, 
		digits)))), ncol = nc)
	temp2 <- format(temp[, 1])
	if(nc > 1) {
		for(k in 2:nc) {
			temp2 <- paste(temp2, temp[, k], sep = "X")
		}
	}
	match(temp2, unique(temp2))
}
"coef.nnreg"<-
function(out, model = out$best.model)
{
	fit <- out$model[[model]]
	d <- fit$d
	if(is.null(fit$xm)) {
		fit$xm <- rep(0, fit$d)
		fit$xsd <- 1
	}
	if(is.null(fit$ym)) {
		fit$ym <- 0
		fit$ysd <- 1
	}
	k <- fit$k
	theta <- fit$theta
	beta <- theta[1:(k + 1)]
	mu <- theta[(1:k) + k + 1]
	gamma <- matrix(theta[(1:(d * k)) + 2 * k + 1], ncol = d, nrow = k, 
		byrow = T)
	dimnames(gamma) <- list(paste("gamma", 1:k, sep = ""), NULL)
	names(beta) <- paste("beta", 0:k, sep = "")
	names(mu) <- paste("mu", 1:k, sep = "")
	return(model, x.center = fit$xm, x.scale = fit$xsd, y.center = fit$ym, 
		y.scale = fit$ysd, d, k, beta, mu, gamma)
}
"d.squasher.nnreg"<-
function(u)
{
	au <- abs(u)
	su <- ifelse(u < 0, -1, 1)
	N <- (u * (1 + 0.5 * au))
	D <- (2 + au + 0.5 * au * au)
	((1 + au) * D - N * (su + u))/(D * D)
}
"describe"<-
function(x)
{
	lab <- c("N", "mean", "Std.Dev.", "min", "Q1", "median", "Q3", "max", 
		"missing values")
	if(missing(x)) {
		return(lab)
	}
	temp <- rep(0, length(lab))
	xt <- x[!is.na(x)]
	ix <- order(xt)
	n <- length(xt)
	if(!is.numeric(xt) || all(is.na(x))) {
		return(c(n, rep(NA, length(lab) - 2), length(x) - length(xt)))
	}
	if(n == 1) {
		return(c(n, xt[1], NA, rep(xt[1], 5), length(x) - length(xt)))
	}
	else {
		return(c(n, mean(xt), sqrt(var(xt)), min(xt), quantile(xt, c(
			0.25, 0.5, 0.75)), max(xt), length(x) - length(xt)))
	}
}
"dup" <-
function (dat) 
{
        dat <- match(dat, unique(dat))
        id <- order(dat)
        look <- c(F, ifelse(diff(sort(dat)) == 0, T, F))
        dat[id] <- look
        as.logical(dat)
}
"dup.matrix" <-
function (mat) 
{
        ## pdh -- convert mat to a matrix
        mat <- as.matrix(mat)
        nc <- ncol(mat)
        temp <- matrix(match(c(mat), unique(c(mat))), ncol = nc)
        temp2 <- format(temp[, 1])
        if (nc > 1) {
                for (k in 2:nc) {
                        temp2 <- paste(temp2, temp[, k], sep = "X")
                }
        }
        dup(temp2)
}
"exp.cov"<-
function(x1, x2, theta = 1)
{
	exp( - rdist(x1, x2)/theta)
}
"gcv"<-
function(x, ...)
UseMethod("gcv")
"gcv.krig"<-
function(out, lambda = NA, cost = 1, nstep.cv = 80, verbose = F)
{
	nt <- out$nt
	np <- out$np
	N <- out$N
	D <- out$matrices$D
	u <- out$matrices$u
	pure.ss <- out$pure.ss
	if(is.na(lambda)) {
		l1 <- 1/D[np - nt - 1]
		tr <- np	########## find upper value of lambda
##########
		for(k in 1:8) {
			tr <- sum(1/(1 + l1 * D))
			if(tr < (nt + 0.050000000000000003))
				break
			l1 <- l1 * 2
		}
########## find lower lambda
##########
		l2 <- 1/D[1]
		for(k in 1:8) {
			tr <- sum(1/(1 + l2 * D))
			if(tr > (np * 0.94999999999999996))
				break
			l2 <- l2/2
		}
		lambda <- exp(seq(log(l2), log(l1),  , nstep.cv))
	}
	nl <- length(lambda)
	nd <- length(D)	#
	if(verbose) print(lambda)	#
## In S the fastest way to take a weighted sum of the columns of a matrix
##  is by  matrix multiplication
#
## A big matrix that is the product of the lambdas and D's
#
	big.lD <- matrix(D, nrow = nl, ncol = nd, byrow = T) * matrix(lambda, 
		ncol = nd, nrow = nl)	#
#
#
	if(verbose)
		print(pure.ss)
	RSS <- ((big.lD/(1 + big.lD))^2) %*% u^2
	if(verbose)
		print(RSS)
	RSS <- RSS + pure.ss
	MSE <- RSS/N	#
	trA <- (1/(1 + big.lD)) %*% rep(1, np)
	if(verbose)
		print(trA)	#	V <- MSE/(1 - (cost * (trA - nt) + nt)/N)^2
	denom <- (1 - (cost * (trA - nt) + nt)/N)
	V <- ifelse(denom > 0, MSE/denom^2, 1e+20)	#
## find global minimum of the GCV function
#
	gcv.grid <- data.frame(lambda, trA, V, sqrt(RSS/(N - trA)))
	names(gcv.grid) <- c("lambda", "trA", "GCV", "shat")
	il <- order(gcv.grid$GCV)[1]
	lambda.best <- gcv.grid$lambda[il]
	gcv.grid$GCV[gcv.grid$GCV == 1e+20] <- NA
	list(gcv.grid = gcv.grid, lambda.best = lambda.best)
}
"gcv.tps"<-
function(out, lambda.grid = NA, cost = 1, nstep.cv = 80, rmse = NA, verbose = F,
	tol = 1.0000000000000001e-05)
{
	nt <- out$nt
	np <- out$np
	N <- length(out$y)
	D <- out$matrices$D
	u <- out$matrices$u
	shat.pure.error <- out$shat.pure.error
	pure.ss <- out$pure.ss	#
# create a reasonable grid for the GCV search if not supplied
#
	if(is.na(lambda.grid[1])) {
		l1 <- 1/D[np - nt - 1]
		tr <- np	#
########## find upper value of lambda
#
		for(k in 1:8) {
			tr <- sum(1/(1 + l1 * D))
			if(tr < (nt + 0.050000000000000003))
				break
			l1 <- l1 * 2
		}
#
########## find lower lambda
#
		l2 <- 1/D[1]
		for(k in 1:8) {
			tr <- sum(1/(1 + l2 * D))
			if((tr > (np * 0.94999999999999996)) | ((1 - (cost * (
				tr - nt) + nt)/N) <= 0))
				break
			l2 <- l2/2
		}
		lambda.grid <- exp(seq(log(l2), log(l1),  , nstep.cv))
	}
#
# done with finding a good default range for lambda
#
	nl <- length(lambda.grid)
	nd <- length(D)	#
#
## In S the fastest way to take a weighted sum of the columns of a matrix
##  is by  matrix multiplication
#
## Now make a big matrix that is the product of the lambdas and D's
#
	big.lD <- matrix(D, nrow = nl, ncol = nd, byrow = T) * matrix(
		lambda.grid, ncol = nd, nrow = nl)	#
#
#
	RSS <- pure.ss + ((big.lD/(1 + big.lD))^2) %*% u^2
	MSE <- RSS/N	#
	trA <- (1/(1 + big.lD)) %*% rep(1, np)
	den <- (1 - (cost * (trA - nt) + nt)/N)	#
# If the denominator is negative then flag this as a bogus case
# by making the GCV function "infinity": 10^20
#
	V <- ifelse(den > 0, MSE/den^2, 1e+20)	#
#
## find global minimum of the GCV function on the grid
#
#
	gcv.grid <- data.frame(lambda.grid, trA, V, sqrt(RSS/(N - trA)))
	names(gcv.grid) <- c("lambda", "trA", "GCV", "shat")	#
# il is the index of the smallest value in the grid 
#
	il <- order(gcv.grid$GCV)[1]
	lambda.gcv <- gcv.grid$lambda[il]	#
#
#
	gcv.raw <- min(gcv.grid$GCV)
	if(verbose) {
		cat("GCV coarse search:", gcv.raw)
	}
# Now switch the 1e20 in V to NA's ( these are cases where the 
# the denominator is negative or zero due to the cost being greater than 1. 
#
	gcv.grid$GCV[den < 0] <- NA	#
#
#
#  create a mini tps object list with the information needed
# for further refinements of this estimate and the others
#
	info <- list(matrices = list(D = D, u = u), N = N, nt = nt, cost = cost,
		pure.ss = pure.ss)	#       
	if(verbose) print(info)	#
#
# do a golden section refined search for minimizing lamdda
# if the minimum is in interior of the grid search. 
#
	lambda.est <- matrix(ncol = 4, nrow = 3, dimnames = list(c("GCV", 
		"RMSE", "pure error"), names(gcv.grid)))
	lambda.est[1, 1] <- gcv.grid[il, 1]	#
#
	if((il > 1) & (il < nstep.cv)) {
#
# now do the Golden section refinement
# tolerance for convergence scaled with respect to GCV from the coarse search
#
		out <- golden.section.search(lambda.grid[il - 1], lambda.grid[
			il], lambda.grid[il + 1], tps.fgcv, f.extra = info, tol
			 = tol * gcv.raw)
		lambda.gcv <- out$x
		lambda.est[1, 1] <- lambda.gcv
	}
	else {
		warning("GCV search gives a minumum at the endpoints of the grid search"
			)
	}
	lambda.rmse <- NA
	lambda.pure.error <- NA
	if(!is.na(rmse)) {
		guess <- max(gcv.grid$lambda[gcv.grid$shat < rmse])
		if(verbose) {
			print(rmse)
			print(guess)
		}
		if(!is.na(guess)) {
			lambda.rmse <- find.upcross(tps.fs2hat, info, 
				upcross.level = rmse^2, guess = guess, tol = 
				tol * rmse^2)
			lambda.est[2, 1] <- lambda.rmse
		}
		else {
			warning("Value of rmse is outside possible range")
		}
	}
#
##
#
	if(!is.na(shat.pure.error)) {
		guess <- max(gcv.grid$lambda[gcv.grid$shat < shat.pure.error])
		if(!is.na(guess)) {
			lambda.pure.error <- find.upcross(tps.fs2hat, info, 
				upcross.level = shat.pure.error^2, guess = 
				guess, tol = tol * shat.pure.error^2)
			lambda.est[3, 1] <- lambda.pure.error
		}
		else {
			warning("Value of pure error estimate  is outside possible range"
				)
		}
	}
#
#
# fill in other stuff for each estimate of lambda
	for(k in 1:3) {
		lam <- lambda.est[k, 1]
		if(!is.na(lam)) {
			lambda.est[k, 2] <- tps.ftrace(lam, D)
			lambda.est[k, 3] <- tps.fgcv(lam, info)
			lambda.est[k, 4] <- sqrt(tps.fs2hat(lam, info))
		}
	}
	list(gcv.grid = gcv.grid, lambda.est = lambda.est, lambda.best = 
		lambda.gcv)
}
"golden.section.search"<-
function(ax, bx, cx, f, niter = 25, f.extra = NA, 
	tol = 1.0000000000000001e-05)
{
	r <- 0.61803399000000003
	con <- 1 - r
	x0 <- ax
	x3 <- cx
	if(abs(cx - bx) > abs(bx - ax)) {
		x1 <- bx
		x2 <- bx + con * (bx - ax)
	}
	else {
		x2 <- bx
		x1 <- bx - con * (bx - ax)
	}
	f1 <- f(x1, f.extra)
	f2 <- f(x2, f.extra)
	iter <- niter
	for(k in 1:niter) {
		if(f2 < f1) {
			x0 <- x1
			x1 <- x2
			x2 <- r * x1 + con * x3
			f0 <- f1
			f1 <- f2
			f2 <- f(x2, f.extra)
		}
		else {
			x3 <- x2
			x2 <- x1
			x1 <- r * x2 + con * x0
			f3 <- f2
			f2 <- f1
			f1 <- f(x1, f.extra)
		}
		if(abs(f2 - f1) < tol) {
			iter <- k
			break
		}
	}
	if(f1 < f2) {
		golden <- f1
		xmin <- x1
	}
	else {
		golden <- f2
		xmin <- x2
	}
	list(x = xmin, fmin = golden, iter = iter)
}
"interp.grid" <-
function (loc, grid) 
{
        xg <- unique(great.lakes.rom.grid[, 1])
        yg <- unique(great.lakes.rom.grid[, 2])
        nx <- length(xg)
        ny <- length(yg)
        xa <- min(xg)
        xb <- max(xg)
        xr <- xb - xa
        ya <- min(yg)
        yb <- max(yg)
        yr <- yb - ya
        lx <- ((nx - 1) * (loc[, 1] - xa))/xr + 1
        ly <- ((ny - 1) * (loc[, 2] - ya))/yr + 1
        lx1 <- ifelse(lx == nx, nx - 1, trunc(lx))
        ly1 <- ifelse(ly == ny, ny - 1, trunc(ly))
        ex <- lx - lx1
        # simple linear interpolation of the rom grid
        ey <- ly - ly1
        #
        #
        (grid[cbind(lx1, ly1)] * (1 - ex) * (1 - ey) + grid[cbind(lx1 + 
                1, ly1)] * (ex) * (1 - ey) + grid[cbind(lx1, 
                ly1 + 1)] * (1 - ex) * (ey) + grid[cbind(lx1 + 
                1, ly1 + 1)] * ex * ey)
}
"interp.surface" <-
function (obj, loc) 
{
        # if(missing(loc))
        # loc <- make.surface.grid(list(range(obj$x), range(obj$y)))
        xg <- unique(obj$x)
        yg <- unique(obj$y)
        nx <- length(xg)
        ny <- length(yg)
        xa <- min(xg)
        xb <- max(xg)
        xr <- xb - xa
        ya <- min(yg)
        yb <- max(yg)
        yr <- yb - ya
        lx <- ((nx - 1) * (loc[, 1] - xa))/xr + 1
        ly <- ((ny - 1) * (loc[, 2] - ya))/yr + 1
        lx1 <- ifelse(lx == nx, nx - 1, trunc(lx))
        ly1 <- ifelse(ly == ny, ny - 1, trunc(ly))
        lx1 <- ifelse(lx1 < 1 | lx1 > nx, NA, lx1)
        ly1 <- ifelse(ly1 < 1 | ly1 > ny, NA, ly1)
        ex <- lx - lx1
        # simple linear interpolation of the rom obj$z
        ey <- ly - ly1
        #
        #
        # if(class(loc) == "surface.grid")
        temp <- (obj$z[cbind(lx1, ly1)] * (1 - ex) * (1 - ey) + 
                obj$z[cbind(lx1 + 1, ly1)] * (ex) * (1 - ey) + 
                obj$z[cbind(lx1, ly1 + 1)] * (1 - ex) * (ey) + 
                obj$z[cbind(lx1 + 1, ly1 + 1)] * ex * ey)
        temp
}
"krig"<-
function(x, Y, cov.function = exp.cov, lambda = NA, cost = 1, knots, weights = 
	rep(1, length(Y)), m = 2, return.matrices = T, nstep.cv = 80, 
	scale.type = "user", x.center = rep(0, ncol(x)), x.scale = rep(1, ncol(
	x)), rho = NA, sigma2 = NA, method = "GCV", verbose = F, ...)
{
  out<-list(call=NULL)
	out$call <- match.call()	#
	out$N <- length(Y)	# reshuffle arguments if x nd y are passed as a list
	class(out) <- c("krig", "funfits")
#
###
### add passed theta arguments to the covariance function
	temp <- list(...)
	ntemp <- names(temp)
	if(length(temp) > 0) {
		for(k in 1:length(ntemp)) {
			cov.function[ntemp[k]] <- temp[ntemp[k]]
		}
		cov.function <- as.function(cov.function)
	}
	out$cov.function <- cov.function	#
# S function to find minizier of 
#  || Y- Xb||^2 + lambda b^T H b where H is a 
# covariance matrix found from cov.function
# Solution for b is  b= (X^T*X + lambda*H)^(-1) X^T*Y
#  H is the covariance matrix 
# First set up some constants
	x <- as.matrix(x)
	Y <- c(Y)	# make sure Y is a vector!
	out$y <- Y
	out$x <- x
	out$weights <- weights	
	## if knots are missing then use the set of unique x vexctors.
	if(missing(knots))
		knots <- x[!dup.matrix(x),  ]
	knots <- as.matrix(knots)	##
	out$knots <- knots	##
#
## scale x and knots 
	x <- transformx(x, scale.type, x.center, x.scale)
	transform <- attributes(x)
	knots <- scale(knots, center = transform$x.center, scale = transform$
		x.scale)
	out$transform <- transform	##
##
##  use value of lambda implied by rho and simga2 if these are passed
##
	if(!is.na(lambda))
		method <- "user"
	if(!is.na(rho) & !is.na(sigma2)) {
		lambda <- sigma2/rho
		method <- "user"
	}
	just.solve <- (lambda[1] == 0)
	if(is.na(just.solve))
		just.solve <- F
	d <- ncol(x)	# make up the T and K matrices
# find the QR decopmposition of T matrix  that spans null space with
# respect to the knots 
	qr.T <- qr(make.tmatrix(knots, m))
	X <- cbind(make.tmatrix(x, m), qr.yq2(qr.T, cov.function(x, knots)))
	np <- ncol(X)	# the number of parameters
	nr <- nrow(X)
	N <- nr
	nt <- qr.T$rank	# number of para. in NULL space
	nk <- np - nt	#
	out$np <- np
	out$nt <- nt	#   construct the covariance matrix 
#functions and Qr decomposition of T
#
	H <- matrix(0, ncol = np, nrow = np)
	temp <- qr.yq2(qr.T, cov.function(knots, knots))
	temp <- qr.q2ty(qr.T, temp)
	mean.var <- mean(diag(temp))
	H[(nt + 1):np, (nt + 1):np] <- temp	#
#
# if lambda = 0 then just solve the system 
	if(just.solve) {
		beta <- qr.coef(qr(X), Y)
	}
	else {
#
#   do all the heavy decompositions if lambda is not = 0
#   or if it is omitted
#
#
# inverse symetric square root of X^T W  X
#
		temp <- svd(diag(sqrt(weights)) %*% (X))[c("v", "d")]
		cond.matrix <- max(temp$d)/min(temp$d)
		if(cond.matrix > 10000000000)
			stop("Covarinace matrix is clsoe\nto singular")
		B <- temp$v %*% diag(1/(temp$d)) %*% t(temp$v)	#
#   eigenvalue eigenvector decomposition of BHB
#
		temp <- svd(B %*% H %*% B)
		U <- temp$u	#	cat(diag(U %*% t(U)), fill = T)
		D <- temp$d	#
		if(verbose) {
			cat("singular values:", fill = T)
			print(D)
		}
#   We know that H has atleast nt zero singular values ( see how H is
#   filled)
#   So make these identically zero.
#   the singular values are returned from largest to smallest.
#
		D[(1:nt) + (np - nt)] <- 0
		G <- B %*% U	#
#   with these these decompositions it now follows that 
#     b= B*U( I + lambda*D)^(-1) U^T * B * X^T*Y
#      = G*( I + lambda*D)^(-1) G^T* X^T*Y
#	
# Now tranform  Y based on this last equation
#
		u <- t(G) %*% t(X) %*% (weights * Y)	#
#
#   So now we have   
#
#    b= G*( I + lambda*D)^(-1)*u 
#   Note how in this form we can rapidly solve for b for any lambda
#
# save matrix decopositions in out list
#
# find the pure error sums of sqaures. 
#
		out$pure.ss <- sum(weights * (Y - X %*% G %*% u)^2)
		if(verbose) {
			cat("pure.ss", fill = T)
			print(out$pure.ss)
		}
		out$matrices <- list(B = B, U = U, u = u, D = D, G = G, qr.T = 
			qr.T)
		gcv.out <- gcv.krig(out, nstep.cv = nstep.cv, verbose = verbose
			)
		gcv.grid <- gcv.out$gcv.grid	
	# To solve for the coefficients,  recall: b= G*( I + lambda*D)^(-1)*u
		if(method == "user") {
			lambda.best <- lambda
		}
		else {
			lambda.best <- gcv.out$lambda.best
		}
		beta <- G %*% ((1/(1 + lambda.best * D)) * u)
		out$gcv.grid <- gcv.grid
		if(verbose) {
			print(out$gcv.grid)
		}
	}
	out$cost <- cost
	out$m <- m
	if(!just.solve) {
		out$eff.df <- sum(1/(1 + lambda.best * D))
	}
	else {
		out$eff.df <- out$np
	}
	out$fitted.values <- c(X %*% beta)
	out$residuals <- Y - out$fitted.values
	if(just.solve)
		out$lambda <- lambda
	else out$lambda <- lambda.best
	out$yname <- substitute(Y)	##
##
## wipe out big matrices if they are not to be returned
##
##
## add some more stuff to put object
##
	out$beta <- beta
	out$d <- beta[1:nt]	#
# funny conversions are in case nt is equal to 1 and X is just a vector
#
	out$fitted.values.null <- as.matrix(X[, 1:nt]) %*% out$d	#
##
#
# tranform the beta into the parameter associated with the covariance
# function
# basis set. 
#  into the c parameter vector. 
#
	out$trace <- out$eff.df
	if(verbose) {
		cat("trace of A", fill = T)
		print(out$trace)
	}
	temp <- c(rep(0, nt), beta[(nt + 1):np])
	if(verbose)
		print(temp)
	out$c <- c(qr.qy(qr.T, temp))	
	#	out$coefficients <- c(beta[1:nt], out$c)
	if(verbose)
		print(out$c)
	out$just.solve <- just.solve	#
	out$shat.GCV <- sqrt(sum(out$weights * out$residuals^2)/(length(Y) - 
		out$trace))	#
# fill in the linear parameters of the covariance function in 
# the output object
#
# the next formula is pretty strange. It follows from solving the
# system of equations for the basis coefficients. 
#       
	out$rhohat <- sum(out$c * out$y)/(N - nt)	#
	if(is.na(rho)) {
		out$rho <- out$rhohat
	}
	else out$rho <- rho
	if(is.na(sigma2))
		sigma2 <- out$rho * out$lambda
	out$sigma2 <- sigma2	#
	out$shat.MLE <- sqrt(out$rhohat * out$lambda)	#
	out$best.model <- c(out$lambda, out$sigma2, out$rho)
	if(!return.matrices) {
		out$x <- NULL
		out$y <- NULL
		out$matrices <- NULL
		out$weights <- NULL
	}
##
##
	out
}
"lle"<-
function(x, ...)
UseMethod("lle")
"lle.default"<-
function(jac, lags = NA, nprod = c(5, 10, 20, 40, 80), skip = NA, statevector
	 = F, clean = T, verbose = F)
{
	if(!is.na(skip))
		jac <- as.matrix(jac[, -1 * skip])
	n <- nrow(jac)
	m <- length(nprod)	
	# pad the jacobain marix with zero columns for lags not appearing  in
# the model. ( i.e. some lags have a partial deriviatve of zero)
	if(!is.na(lags[1])) {
		hold <- jac	# fill up a matirx with zeroes
		jac <- matrix(0, ncol = max(lags), nrow = n)	
	#replace nonzero columns with columns of the passed jacobian. 
		jac[, lags] <- hold
	}
	if(!is.na(nprod[1])) {
		temp1 <- matrix(nrow = n, ncol = m)
		temp2 <- matrix(nrow = n, ncol = m)
		temp3 <- matrix(nrow = n, ncol = m)
		dimnames(temp1) <- list(NULL, paste(format(nprod), "steps"))
		dimnames(temp2) <- list(NULL, paste(format(nprod), "steps"))
		for(k in 1:length(nprod)) {
			if(nprod[k] < n)
				temp <- make.lle(jac, nprod[k], statevector = 
				  statevector, clean = clean, verbose = verbose
				  )
			temp1[1:(n - nprod[k] + 1), k] <- temp[, 1]
			temp2[1:(n - nprod[k] + 1), k] <- temp[, 2]
			temp3[1:(n - nprod[k] + 1), k] <- temp[, 3]
		}
	}
	else {
		temp1 <- NA
		temp2 <- NA
		temp3 <- NA
	}
	glb <- make.lle(jac, -1, statevector = statevector, clean = clean, 
		verbose = verbose)[, 2]
	temp <- list(local.svd = temp1, local.qr = temp2, local.11 = temp3, 
		nprod = nprod, glb = glb)
	class(temp) <- "lle"
	temp
}
"lle.nlar"<-
function(obj, model = NA, nprod = c(5, 10, 20, 40, 80), verbose = F, clean = T, 
	...)
{
	statevector <- F
	if(is.na(model)) {
		model <- obj$fit$best.model
	}
	jac <- predict(obj$fit, derivative = 1, model = model, ...)	#
# omit columns  of Jacobian that are not state variables. 	
#
	lle.default(jac, lags = obj$lags, skip = obj$skip, nprod = nprod, clean
		 = clean, verbose = verbose)
}
"make.Amatrix"<-
function(object, ...)
UseMethod("make.Amatrix")
"make.Amatrix.krig"<-
function(out, x0 = out$x, lambda)
{
	if(missing(lambda)) {
		lambda <- out$lambda
	}
	xc <- out$transform$x.center
	xs <- out$transform$x.scale
	x <- scale(out$x, xc, xs)
	knots <- scale(out$knots, xc, xs)
	x0 <- scale(x0, xc, xs)
	X <- cbind(make.tmatrix(x, out$m), qr.yq2(out$matrices$qr.T, out$
		cov.function(x, knots)))
	temp <- (out$matrices$G) %*% diag(1/(1 + lambda * out$matrices$D))
	temp <- temp %*% t(out$matrices$G) %*% t(X)
	temp <- temp %*% diag(out$weights)	#
#
	temp <- cbind(make.tmatrix(x0, out$m), qr.yq2(out$matrices$qr.T, out$
		cov.function(x0, knots))) %*% temp	#
#
	return(temp)
}
"make.Amatrix.tps"<-
function(out, x0 = out$x, lambda, diagonal = F)
{
	if(missing(lambda)) {
		lambda <- out$lambda
	}
	xc <- out$transform$x.center
	xs <- out$transform$x.scale
	x <- scale(out$x, xc, xs)
	knots <- scale(out$knots, xc, xs)
	X <- cbind(make.tmatrix(x, out$m), qr.yq2(out$matrices$qr.T, make.rb(x, 
		knots, p = out$power, with.constant = out$with.constant)))
	if(!diagonal) {
		x0 <- scale(x0, xc, xs)
		temp <- (out$matrices$G) %*% diag(1/(1 + lambda * out$matrices$
			D))
		temp <- temp %*% t(out$matrices$G) %*% t(X)
		temp <- temp %*% diag(out$weights)	#
#
		temp <- cbind(make.tmatrix(x0, out$m), qr.yq2(out$matrices$qr.T,
			make.rb(x0, knots, p = out$power, with.constant = out$
			with.constant))) %*% temp	#
#
	}
	else {
		temp <- X %*% out$matrices$G %*% sqrt(diag(1/(1 + lambda * out$
			matrices$D)))
		temp <- c((temp^2) %*% rep(1, ncol(X))) * out$weights
	}
	return(temp)
}
"make.Kc"<-
function(x1, x2, C, p = 1, with.log = T, with.constant = T)
{
	if(!is.loaded(symbol.For("radbas"))) {
		temp <- dyn.load(paste(FUNFITS.BIN, "TPS.o", sep = "/"), 2)
	}
	if(!is.matrix(x1)) {
		x1 <- as.matrix(x1)
	}
	if(!is.matrix(x2)) {
		x2 <- as.matrix(x2)
	}
	d <- ncol(x1)
	n1 <- nrow(x1)
	n2 <- nrow(x2)	
	#      subroutine multrb( nd,x1,n1, x2,n2, par, c,h,work)
#       implicit double precision (a-h,o-z)
#       integer nd,n1,n2,ic
	par <- c(p/2, ifelse(d %% 2 == 0, 1, 0))
	if(!with.log)
		par[2] <- 0
	temp <- .Fortran("multrb",
		nd = as.integer(d),
		x1 = as.double(x1),
		n1 = as.integer(n1),
		x2 = as.double(x2),
		n2 = as.integer(n2),
		par = as.double(par),
		c = as.double(C),
		h = as.double(rep(0, n1)),
		work = as.double(rep(0, n2)))
	if(with.constant) {
		m <- (d + p)/2
		Amd <- radbas.constant(m, d)
	}
	else {
		Amd <- 1
	}
	Amd * temp$h
}
"make.lags"<-
function(x, lags, cov = NA, nobs = 3500)
{
    if(is.null(x)) stop("x should exist!")
	x <- as.matrix(x)
	xd <- ncol(x)
	m <- length(lags)
	N <- min(nobs, nrow(x) - max(lags))
	n <- min(nobs, N)
	if(N > nobs)
		warning(" series lengh truncated to\ndefault length in make.lags"
			)
	start <- max(lags) + 1
	temp <- matrix(0, ncol = xd * (length(lags)), nrow = n)
	for(k in 1:length(lags)) {
		a <- start - lags[k]
		b <- a + n - 1
		temp[, (1:xd) + (k - 1) * xd] <- x[(a:b),  ]
	}
	a <- start
	b <- a + n - 1
	if(xd == 1)
		lab <- format(paste("lag", rep(lags, rep(xd, length(lags))), 
			sep = ""))
	else lab <- format(paste(, rep(1:xd, length(lags)), "lag", rep(lags, 
			rep(xd, length(lags))), sep = ""))
	dimnames(temp) <- list(NULL, lab)
	skip <- NA
	if(!is.na(cov[1])) {
		cov <- as.matrix(cov)
		temp <- cbind(temp, cov[a:b,  ])
		cat(a, b)
		skip <- (1:ncol(cov)) + m * xd
	}
	if(xd == 1)
		y <- c(x[a:b])
	else y <- x[a:b,  ]
	list(x = temp, y = y, nvar = m, lags = lags, skip = skip, start = a, 
		end = b)
}
"make.lle"<-
function(jac, nprod, statevector = F, verbose = T, clean = T)
{
	nc <- ncol(jac)
	remove.file("lle.par")
	remove.file("temp.lle")
	remove.file("lle.out")
	if(verbose)
		cat(" removed UNIX temp files", fill = T)
	if(statevector) {
		state <- 1
		emd <- sqrt(nc)
		test <- emd - round(emd)
		if(test != 0)
			cat("Stop, no. of col of jac is not a perfect \nsquare",
				fill = T)
	}
	else {
		state <- 0
		emd <- nc
	}
	write(c(nc, emd, nprod, state), "lle.par")
	write(t(jac), "temp.lle", ncol = nc)
	#unix(paste(FUNFITS.BIN, "/lle.x < temp.lle > lle.out", sep = ""))
	unix(paste(system.file("exec/lle.x")," < temp.lle > lle.out", sep = ""))
	if(verbose)
		cat(" reading in matrix of LLE's", fill = T)
	temp <- matrix(scan("lle.out"), ncol = 3, byrow = T)
	if(clean) {
		remove.file("lle.par")
		remove.file("temp.lle")
		remove.file("lle.out")
		remove.file("lle.warnings")
	}
	temp
}
"make.rb"<-
function(x1, x2, p = 1, with.log = T, with.constant = T)
{
	if(!is.loaded(symbol.For("radbas"))) {
#		temp <- dyn.load(paste(FUNFITS.BIN, "TPS.o", sep = "/"), 2)
          stop("Compiled code has not been dynamically loaded")
	}
	if(!is.matrix(x1))
		x1 <- as.matrix(x1)
	if(!is.matrix(x2))
		x2 <- as.matrix(x2)
	d <- ncol(x1)
	n1 <- nrow(x1)
	n2 <- nrow(x2)	# 2m-d =p so m= ( d+p)/2
	m <- (d + p)/2
	par <- c(p/2, ifelse((d %% 2 == 0) & (with.log), 1, 0))
	temp <- .Fortran("radbas",
		nd = as.integer(d),
		x1 = as.double(x1),
		n1 = as.integer(n1),
		x2 = as.double(x2),
		n2 = as.integer(n2),
		par = as.double(par),
		k = as.double(rep(0, n1 * n2)))
	if(with.constant) {
		Amd <- radbas.constant(m, d)
	}
	else {
		Amd <- 1
	}
	Amd * matrix(temp$k, ncol = n2, nrow = n1)
}
"make.surface.grid" <-
function (grid.list, X, nx = 30, ny = 30, info.list = F, FUN = median) 
{
        #
        # what array tells where in the grid.list the x and y variable are. 
        #
        # initialize the what array
        #
        what <- rep(NA, 2)
        # If a X matrix is supplied fill out the grid.list with all the names
        #  of the variables
        if (!is.list(grid.list)) 
                stop("Must supply a list to describe grid limits")
        #
        #
        # at this point the grid.list has been fixed up so that all components
        # are numeric.
        #   The what array has been filled in if the x and y indicators have
        #   been found  
        #
        # ind variable keeps track of where x and y variables are .
        if (!missing(X)) {
                if (data.class(X) != "data.frame") {
                        names.of.X <- dimnames(X)[[2]]
                        if ((is.null(names.of.X))) {
                                names.of.X <- format(1:ncol(X))
                        }
                }
                else names.of.X <- names(X)
                #
                m <- length(names.of.X)
                #print(names.of.X)
                # add integer names to the grid.list if they are missing. 
                #
                #
                # make sure that any component of the grid.list also appears in the 
                # names of X
                #   default integer names have been added at this point so this should
                #work even for grid.lists and X matrices without names
                if (is.null(names(grid.list))) {
                        if (length(grid.list) < m) 
                                stop(" grid.list must be as long as the number of columns of X!")
                        names(grid.list) <- format(1:length(grid.list))
                }
                test <- match(names(grid.list), names.of.X)
                #
                #
                #   default is to center at vairbales that do not appear in the grid
                # specification
                #
                if (!(all(!is.na(test)))) {
                        print("names in grid.list")
                        print(names(grid.list))
                        print("names for columns of X matrix")
                        print(names.of.X)
                        stop(" some of the\ngrid.list names are not found in the names of the X columns")
                }
                # 
                temp <- as.list(rep("c", m))
                names(temp) <- names.of.X
                # now add the grid list info to this master list 
                #
                #
                temp[names(grid.list)] <- grid.list
                for (k in 1:length(temp)) {
                        test <- temp[[k]]
                        if (length(test) == 1) {
                                if (test == "c") 
                                 temp[[k]] <- FUN(X[, k])
                                if (test == "x") {
                                 temp[[k]] <- seq(min(X[, k]), 
                                  max(X[, k]), , nx)
                                 what[1] <- k
                                }
                                if (test == "y") {
                                 temp[[k]] <- seq(min(X[, k]), 
                                  max(X[, k]), , ny)
                                 what[2] <- k
                                }
                        }
                }
                #
                # Now update the orignal grid.list
                #
                grid.list <- temp
        }
        #
        ind <- unlist(lapply(grid.list, length))
        #  check to make sure that the grid list has only two components that
        #have more than 1 value ( i.e the X and Y grid info)
        #
        if (sum(ind > 1) > 2) {
                stop("Only two components can have more than one\nvalue in the grid list")
        }
        #
        nl <- length(grid.list)
        #  fill in what vector in the case when X has not been passed
        #
        if (is.na(what[1])) {
                what <- (1:nl)[ind > 1]
        }
        x1 <- grid.list[[what[1]]]
        x2 <- grid.list[[what[2]]]
        if (length(x1) == 2) {
                x1 <- seq(min(x1), max(x1), , nx)
        }
        if (length(x2) == 2) {
                x2 <- seq(min(x2), max(x2), , ny)
        }
        nx <- length(x1)
        ny <- length(x2)
        # OK at this point x1 and x2 are the real grids in the right order
        nr <- nx * ny
        # Now fill in the constant levels for the other variables
        # 
        if (!info.list) {
                xg <- matrix(NA, ncol = nl, nrow = nr)
                #
                dimnames(xg) <- list(NULL, names(grid.list))
                # attributes contain enough information to reformat the grid for
                # surface plotting or anything else you might think of
                #
                attr(xg, "format") <- cbind(what, ind[what])
                attr(xg, "surface.info") <- list(x = x1, y = x2, 
                        nx = nx, ny = ny, xlab = names(grid.list)[what[1]], 
                        ylab = names(grid.list)[what[2]], fixed.variables = grid.list[-what], 
                        nvar = nl)
                #
                attr(xg, "grid.list") <- grid.list
                # stuff the set of grid points into the right columns of the matrix 
                #
                #
                xg[, what] <- cbind(rep(x1, ny), rep(x2, rep(nx, 
                        ny)))
                for (k in 1:nl) {
                        if (ind[k] == 1) {
                                xg[, k] <- rep(grid.list[[k]], 
                                 nr)
                        }
                }
                class(xg) <- "surface.grid"
                return(xg)
        }
        else {
                #
                # return surface.info component of attributes
                #
                return(list(x = x1, y = x2, nx = nx, ny = ny, 
                        xlab = names(grid.list)[what[1]], ylab = names(grid.list)[what[2]], 
                        fixed.variables = grid.list[-what], grid.list = grid.list, 
                        nvar = nl))
        }
}
"make.tmatrix"<-
function(x, m = 2)
{
	if(!is.loaded(symbol.For("radbas"))) {
	#	temp <- dyn.load(paste(FUNFITS.BIN, "TPS.o", sep = "/"), 2)
          stop("dynamic code not loaded!")
	}
	d <- ncol(x)	#
	n <- nrow(x)	#cat(d, n, fill = T)	
#      subroutine dmaket(m,n,dim,des,lddes,npoly,t,ldt, wptr,info)
#      integer m,n,dim,lddes,npoly,ldt,wptr(dim),info
#      double precision des(lddes,dim),t(ldt,*)
#   m			order of the derivatives in the penalty
#   n			number of rows in des
	nterms <- .Fortran("mkpoly",
		as.integer(m),
		as.integer(d),
		nterms = as.integer(0))$nterms
	temp <- .Fortran("dmaket",
		m = as.integer(m),
		n = as.integer(n),
		dim = as.integer(d),
		des = as.double(x),
		lddes = as.integer(n),
		npoly = as.integer(nterms),
		tmatrix = as.double(rep(0, n * (nterms))),
		ldt = as.integer(n),
		wptr = as.integer(rep(0, d * m)),
		info = as.integer(0),
		ptab = as.integer(rep(0, nterms * d)),
		ldptab = as.integer(nterms))
	temp2 <- matrix(temp$tmatrix, nrow = n)
	attr(temp2, "ptab") <- matrix(temp$ptab, nrow = nterms, ncol = d)
	temp2
}
"netfit.reformat.greedy"<-
function(model)
{
	nm <- length(model)
	temp <- list(1:nm)
	temp[[1]] <- model[[1]]
	if(nm > 1) {
		for(jj in 2:nm) {
			temp[[jj]] <- add.netfit.model(temp[[jj - 1]], model[[
				jj]])
		}
	}
	temp
}
"nkden"<-
function(data, bandwidth, n.points, grid)
{
	if(!is.loaded(symbol.For("nkden"))) {
          stop("Compiled code has not been dynamically loaded")
	#	temp <- dyn.load(paste(FUNFITS.BIN, "nkden.o", sep = "/"), 2)	
	#		cat("nkden.o  FORTRAN subroutines dynamically loaded:", temp, 
#		fill = T)
	}
# who wants to keep typing bandwidth!]
	if(data.class(data) == "data.frame")
		data <- as.matrix(data)
	h <- bandwidth	# reformat 1-d vector as column matrix
	if(!is.matrix(data))
		m <- 1
	else m <- ncol(data)
	if(missing(grid)) {
		if((m == 1))
			if(!missing(n.points))
				grid <- seq(min(data), max(data),  , n.points)
			else grid <- sort(data)
		else {
			grid <- data
		}
	}
	if(data.class(grid) == "data.frame")
		grid <- as.matrix(grid)
	if(m > 1) {
		if(m != ncol(grid))
			stop("dimension of data and grid do not agree")
		p <- nrow(grid)
		n <- nrow(data)
	}
	else {
		p <- length(grid)
		n <- length(data)
	}
	nh <- length(h)
	f <- matrix(rep(-99, p * nh), ncol = nh)
	out <- list()
	for(k in 1:nh) {
		out <- .Fortran("nkden",
			as.double(h[k]),
			as.integer(n),
			as.integer(m),
			as.double(data),
			as.integer(p),
			as.double(grid),
			as.double(rep(-99, p)))
		f[, k] <- out[[7]]
	}
	if(nh == 1)
		f <- c(f)
	list(x = grid, y = f, h = h)
}
"nnreg"<-
function(x, y, k1, k2, start, ngrind = 250, ntries = 100, npol = 20, tol1 = 
	9.9999999999999995e-07, tol2 = 1.0000000000000001e-09, itmax1 = 250, 
	itmax2 = 10000, derivative = F, fout = "nnreg.out", run = T, just.setup
	 = F, just.read = F, fitted.values = F, all.fits = F, greedy = F, seed, 
	clean = T)
{
	lags <- NA
	call <- match.call()
	y <- c(y)	# make sure y is just a vector!
	if(missing(seed))
		seed <- as.integer(runif(1) * 125000)
	if(is.list(x)) {
		k1 <- y
		k2 <- k1
		lags <- x$lags
		y <- x$y
		x <- x$x
	}
	x <- as.matrix(x)
	d <- ncol(x)
	jac.list <- NA
	if(!just.read) {
		write(t(cbind(y, x)), "nnreg.dat", ncol = 1)	#
# A negative grid number is the switch to indicate a single fit based
# on the start values. In this case only one specification of the
#hidden units makes sense.
#
		if(!missing(start)) {
			ngrind <- -1
		}
		if(all.fits)
			iprint <- 1
		else iprint <- 0
		if(greedy)
			igreed <- 1
		else igreed <- 0
		write(c("nnreg.dat", "nnreg.sum"), "nnreg.par", ncol = 1)
		temp <- c(length(y), ncol(x), ngrind, ntries, npol, iprint, 
			igreed, seed, tol1, tol2, itmax1, itmax2, k1, k2)
		write(temp, "nnreg.par", ncol = 1, append = T)
		if(ngrind < 0) {
#
# extract the parameters from the object passed as start. 
#
			if(class(start)[1] == "nnreg") start <- start$model[[
				  start$best.model]]$theta
			if(class(start)[1] == "netfit")
				start <- start$theta
			write(unlist(start), "nnreg.str", ncol = 1)
		}
		if(just.setup) {
			cat("Input file and data files have been constructed for nnreg",
				fill = T)
			cat("Run nnreg in the UNIX shell or in DOS by:     nnreg.x > outfile",
				fill = T)
			cat("the S data set FUNFITS has the path to nnreg.x", 
				fill = T)
			return()
		}
		if(run == T) {
			cat("Running nnreg in the shell", fill = T)
			remove.file(fout)
			#unix(paste(.Library,"/funfits/exec", "/nnreg.x  > ", fout, sep = "")
			unix(paste(system.file("exec/nnreg.x")," > ", fout, sep = "")
				)
		}
	}
# end of run and set block
	if(!just.setup) {
		temp <- list()
		cat("Reading in results from output file", fill = T)
		temp$model <- read.nnreg(fout)	# read in summary from LENNS
		class(temp) <- c("nnreg", "funfits")
		if(greedy)
			temp$model <- netfit.reformat.greedy(temp$model)
		temp$summary <- scan("nnreg.sum", what = "a", sep = "\n")
		class(temp$summary) <- "text"	#end
		nfits <- length(temp$model)
		cat(nfits, " models read in from ", fout, fill = T)
		temp$fitted.values <- matrix(NA, ncol = nfits, nrow = length(y)
			)
#                print(temp)
		if(clean) {
			remove.file("nnreg.dat")
			remove.file("nnreg.par")
			remove.file("nnreg.sum")
			remove.file("nnreg.str")
			remove.file(fout)
		}
		if(!all.fits | fitted.values) {
			for(k in 1:nfits) {
				temp$fitted.values[, k] <- predict(temp$model[[k
				  ]], x)
			}
			temp$residuals <- y - temp$fitted.values
		}
		temp$call <- call
		temp$x <- x
		temp$y <- y
		temp$n <- length(y)
		temp$nfits <- nfits
		temp$lags <- lags
		temp$seed <- seed
		hold <- summary(temp, noprint = T)
		temp$best.model <- order(hold[, 6])[1]
		return(temp)
	}
	else {
		invisible()
	}
}
"nnregCI"<-
function(fit, model = fit$best.model, ngrind = 250, ntries = 100, npol = 20, 
	clevel = 0.94999999999999996, cut1 = NA, cut2 = NA, nfits = 500, tol1
	 = 9.9999999999999995e-07, tol2 = 1.0000000000000001e-09, itmax1 = 250, 
	itmax2 = 10000, fdata, fout = "nnci.out", seed)
{
	call <- match.call()
	if(missing(seed))
		seed <- as.integer(runif(1) * 125000)
	y <- fit$y
	x <- as.matrix(fit$x)
	k <- fit$model[[model]]$k
	d <- ncol(x)
	rms <- fit$model[[model]]$rms
	np <- fit$model[[model]]$np
	if(missing(fdata)) {
		fdata <- "nnci.dat"
		write(t(cbind(y, x)), "nnci.dat", ncol = 1)
	}
	if(is.na(cut1))
		cut1 <- rms * sqrt((1 + qf(clevel, np, length(y) - np) * (np/(
			length(y) - np))))
	if(is.na(cut2))
		cut2 <- cut1 - 0.20000000000000001 * (cut1 - rms)
	remove.file("nnci.sum")
	write(c(fdata, "nnci.sum"), "nnci.par", ncol = 1)
	temp <- c(length(y), ncol(x), ngrind, ntries, npol, rms, cut1, cut2, 
		nfits, seed, tol1, tol2, itmax1, itmax2, k)
	write(temp, "nnci.par", ncol = 1, append = T)
	cat("Running nnregci in the shell", fill = T)
	remove.file(fout)
	#unix(paste(FUNFITS.BIN, "/nnregci.x  > ", fout, sep = ""))	
        unix(paste(system.file("exec/nnregci.x")," > ", fout, sep = ""))
	# end of run and set block
	temp <- list(model=NULL)
	class(temp) <- c("nnreg", "funfits")
	cat("Reading in results from output file", fill = T)
	temp$model <- read.nnreg(fout)	# read in summary from LENNS
	temp$summary <- scan("nnci.sum", what = "a", sep = "\n")
	class(temp$summary) <- "text"
	nfits <- length(temp$model)
	cat(nfits, " models read in from ", fout, fill = T)
	temp$call <- call
	temp$x <- x
	temp$y <- y
	temp$n <- length(y)
	temp$nfits <- nfits
	temp$seed <- seed
	return(temp)
}
"pair.na"<-
function(temp, b)
{
	if(!missing(b)) {
		temp <- cbind(temp, b)
	}
	temp[!(is.na(temp[, 1]) | is.na(temp[, 2])),  ]
}
"plot.addreg" <-
function (out, ...) 
{
        old.par <- par("mfrow", "oma")
        on.exit(par(old.par))
        m <- ncol(out$predicted.comp)
        set.panel(min(m/2 + 1.5, 3), 2)
        plot(out$fitted.values, out$residuals, ...)
        hist(out$residuals)
        for (k in (1:m)) {
                if (out$lam[k] != 0) 
                        ix <- order(out$x[, k])
                ytemp <- out$y - out$fitted.values + out$predicted.comp[, 
                        k]
                plot(out$x[ix, k], ytemp[ix], xlab = paste("variable", 
                        k), ylab = "estimated ridge function", 
                        pch = ".", ...)
                lines(out$x[ix, k], out$predicted.comp[ix, k])
        }
}
"plot.krig"<-
function(out, main = NA, digits = 4, graphics.reset = T, ...)
{
	old.par <- par("mfrow", "oma")
	if(graphics.reset) {
		on.exit(par(old.par))
		par(xpd = T)
	}
	set.panel(2, 2, T)
	temp <- summary(out)
	plot(out$fitted.values, out$y, ylab = "Y", xlab = " predicted values", 
		bty = "n", ...)
	abline(0, 1)
	hold <- par("usr")
	text(hold[1], hold[4], paste(" R**2 = ", format(round(100 * temp$
		covariance, 2)), "%", sep = ""), cex = 0.80000000000000004, adj
		 = 0)
	plot(out$fitted.values, out$residuals, ylab = "residuals", xlab = 
		" predicted values", bty = "n", ...)
	yline(0)
	hold <- par("usr")
	text(hold[1], hold[4], paste(" RMSE =", format(signif(sqrt(sum(out$
		residuals^2)/(temp$num.observation - temp$enp)), digits))), cex
		 = 0.80000000000000004, adj = 0)
	if(nrow(out$gcv.grid) > 1) {
# trim off + infinity due to pole in the denominator of GCV function
#with cost
		ind <- out$gcv.grid[, 3] < 1e+19
		out$gcv.grid <- out$gcv.grid[ind,  ]
		plot(out$gcv.grid[, 2], (out$gcv.grid[, 3]), xlab = 
			"Effective number of parameters", ylab = 
			" estimated (EASE) + sigma**2 ", bty = "n")
		xline(out$eff.df)
		hold <- par("usr")
		text(out$eff.df, hold[4], paste(" Eff. df. =", format(round(out$
			eff.df, 1)), "\n Res. df. =", format(round(temp$
			num.observation - temp$enp, 1))), cex = 
			0.80000000000000004, adj = 0)
		title("GCV", cex = 0.59999999999999998)
		plot(out$gcv.grid[, 1], out$gcv.grid[, 3], xlab = "Lambda", 
			ylab = "GCV", log = "x", bty = "n")
		xline(out$lambda)
		hold <- par("usr")
		text(out$lambda, hold[4], paste(" Lambda =", format(round(out$
			lambda, 2))), cex = 0.80000000000000004, adj = 0)
		title("GCV", cex = 0.59999999999999998)
	}
	if(is.na(main))
		mtext(deparse(out$call), cex = 1.3, outer = T, line = -2)
	else mtext(main, cex = 1.3, outer = T, line = -2)
}
"plot.lle"<-
function(out)
{
	bplot(out$local.qr, xpos = log10(out$nprod), label = format(out$nprod), 
		xlab = "Number of Steps")
	yline(out$glb)
}
"plot.nnreg"<-
function(out, model = out$best.model, main = NA, digits = 4, graphics.reset = T,
	...)
{
	old.par <- par("mfrow", "oma")
	if(graphics.reset) {
		on.exit(par(old.par))
		par(xpd = T)
	}
	if(model == out$best.model & (ncol(out$residuals) > 1)) {
		cat("Note: there is more than one model in the nnreg output object",
			fill = T)
	}
	set.panel(2, 2, T)
	temp <- summary(out, noprint = T)
	plot(out$fitted.values[, model], out$y, ylab = "Y", xlab = 
		"predicted values", bty = "n")
	abline(0, 1)
	hold <- par("usr")
	text(hold[1], hold[4], paste(" R**2 =", format(round(100 * cor(out$
		fitted.values[, model], out$y)^2, 2)), "%", sep = ""), cex = 
		0.80000000000000004, adj = 0)
	plot(out$fitted.values[, model], out$residuals[, model], ylab = 
		"residuals", xlab = "predicted values", bty = "n")
	yline(0)
	hold <- par("usr")
	text(hold[1], hold[4], paste(" RMSE =", format(signif(temp[model, 4], 
		digits))), cex = 0.80000000000000004, adj = 0)
	if(ncol(out$residuals) > 1) {
		matplot(temp[, 2], temp[, 5:6], ylab = "GCV (1) and GCV2 (2)", 
			xlab = "Number of Parameters", bty = "n", col = 1)
		xline(temp[model, 2])
		hold <- par("usr")
		text(temp[model, 2], hold[4], paste(" # par =", format(temp[
			model, 2]), "\n # units =", format(temp[model, 1])), 
			cex = 0.80000000000000004, adj = 0)
		title("GCV and GCV2", cex = 0.59999999999999998)
	}
	else {
		matplot(rep(temp[, 2], 2), temp[, 5:6], ylab = 
			"GCV (1) and GCV2 (2)", xlab = "Number of Parameters", 
			pty = 7, bty = "n", col = 1, type = "n")
		text(rep(temp[, 2], 2), temp[, 5:6], labels = c("1", "2"))
		xline(temp[model, 2])
		hold <- par("usr")
		text(temp[model, 2], hold[4], paste(" # par =", format(temp[
			model, 2]), "\n # units =", format(temp[model, 1])), 
			cex = 0.80000000000000004, adj = 0)
		title("GCV and GCV2", cex = 0.59999999999999998)
	}
	matplot(temp[, 2], temp[, 4], ylab = "RMSE", xlab = 
		"Number of Parameters", pch = "*", bty = "n")
	title("Root Mean Squared Error", cex = 0.59999999999999998)
	if(is.na(main))
		mtext(deparse(out$call), cex = 1.3, outer = T, line = -2)
	else mtext(main, cex = 1.3, outer = T, line = -2)
	invisible()
}
"plot.predict.tpsreg"<-
function(x, model = 1, digits = 2)
{
	lims <- range(c(x$y, x$fit))
	n <- length(x$y)
	plot(x$y, x$fit, ylab = "Predicted", xlab = "Observed", xlim = lims, 
		ylim = lims, type = "n")
	abline(0, 1)
	points(x$y, x$fit, mark = 4)
	rmse <- sqrt(sum(x$res^2)/(n - x$eff.df))
	title(paste("TPSREG Results", "; Obs. =", length(x$y), "; R.sq =", 
		round(100 * cor(x$y, x$fit)^2, 1), "%", "\n", "DFr =", round(x$
		eff.df, 1), "; DFe =", round(length(x$y) - x$eff.df, 1), 
		"; RMSE =", signif(rmse, digits)))
}
"plot.surface"<-
function(obj, main = NULL, type = "b", zlab = NULL, xlab = NULL, ylab = NULL, 
	levels = NULL, zlim = NULL, graphics.reset = T, ...)
{
	old.par <- par()
        # error in R 0.61.3
        old.par$fin <- NULL
	if(graphics.reset)
		on.exit(par(old.par))
	if(is.null(xlab)) {
		if(is.null(obj$xlab))
			xlab <- "X"
		else xlab <- obj$xlab
	}
	if(is.null(ylab)) {
		if(is.null(obj$ylab))
			ylab <- "Y"
		else ylab <- obj$ylab
	}
	if(is.null(zlab)) {
		if(is.null(obj$zlab))
			zlab <- "Z"
		else zlab <- obj$zlab
	}
	if(is.null(main))
		if(!is.null(obj$main))
			main <- obj$main
#	if(type == "b") # makes no sense without persp
#		set.panel(2, 1, T)
	if(type == "p" | type == "b") {
##	par(mar = c(3, 0, 0, 0))
#		if(is.null(zlim)) persp(obj, xlab = xlab, ylab = ylab, zlab = 
#				zlab, ...)
#                else persp(obj, xlab = xlab, ylab = 
#				ylab, zlab = zlab, zlim = zlim, ...)
          cat("Warning: persp not supported in R, using image instead.\n")
		if(is.null(zlim))
                  image(obj$x,obj$y,obj$z, xlab = xlab, ylab = ylab,
                        zlab = zlab, ...)
                else
                  image(obj$x, obj$y, obj$z, xlab = xlab, ylab = ylab,
                        zlab = zlab, zlim = zlim, ...)
		if(!is.null(main))
                  title(main)
	}
	if(type == "c" | type == "b") {
##par(mar = c(3, 0, 0, 0))
		if(is.null(levels)) levels <- pretty(range(obj$z,na.rm=T), 5)
                if(type=="b")
#                  contour(obj$x,obj$y,obj$z, xlab = xlab, ylab = ylab, levels = levels,add=T, ...)
                  contour(obj$x,obj$y,obj$z, levels = levels,add=T, ...)
                else
                  contour(obj$x,obj$y,obj$z, xlab = xlab, ylab = ylab, levels = levels, ...)
		if((!is.null(main)) & type != "b")
			title(main)
	}
	invisible()
}
"plot.tps"<-
function(out, main = NA, digits = 4, graphics.reset = T, ...)
{
## pdh 8/16/96 - added Q^2, pure error, changed some labels and
##   locations of text, add a gcvmin line if fit.pure.error=T
## DWN 9/26/96 changed arguments to fit in with new tps object
## pdh 10/29/96 - added drop=F to GCV matrix
## pdh 11/14/96 - x and y axes turned around on fit vs obs plot
## pdh 11/25/96 - uses r.squared instead of covariance if available
	old.par <- par("mfrow", "oma")
	if(graphics.reset) {
		on.exit(par(old.par))
		par(xpd = T)
	}
	set.panel(2, 2, T)
	temp <- summary(out)
	par1 <- par(pty = "s")
	lims <- range(out$fitted.values, out$y)
	plot(out$fitted.values, out$y, xlim = lims, ylim = lims, ylab = 
		"Observed Values", xlab = "Predicted Values", bty = "n", ...)
	abline(0, 1)
	hold <- par("usr")
	if(!is.null(temp$r.square))
		r.square <- temp$r.square
	else r.square <- temp$covariance
	text(hold[1], hold[4], paste(" R^2 = ", format(round(100 * r.square, 2)
		), "%", "\n", " Q^2 = ", format(round(100 * temp$q2, 2)), "%", 
		sep = ""), cex = 0.80000000000000004, adj = 0)
	par(par1)
	maxres <- max(abs(out$residuals))
	plot(out$fitted.values, out$residuals, ylim = c( - maxres, maxres), 
		ylab = "Residuals", xlab = "Predicted values", bty = "n", ...)
	yline(0)
	hold <- par("usr")
	if(!is.na(out$shat.pure.error))
		text(hold[1], hold[4], paste(" RMSE =", format(signif(out$shat, 
			digits)), "\n", "Pure Error =", format(signif(out$
			shat.pure.error, digits))), cex = 0.80000000000000004, 
			adj = 0)
	else text(hold[1], hold[4], paste(" RMSE =", format(signif(out$shat, 
			digits))), cex = 0.80000000000000004, adj = 0)
	if(nrow(out$gcv.grid) > 1) {
## trim off + infinity due to pole in the denominator of GCV function
##with cost
		ind <- out$gcv.grid[, 3] < 1e+19
		out$gcv.grid <- out$gcv.grid[ind,  ]
		plot(out$gcv.grid[, 2], (out$gcv.grid[, 3]), xlab = 
			"Effective number of parameters", ylab = 
			"Estimated (EASE) + sigma**2 ", bty = "n")
		xline(out$eff.df)
		hold <- par("usr")	##    text(out$eff.df, hold[4], 
		text(hold[1], hold[4], paste(" Eff. df. =", format(round(out$
			eff.df, 1)), "\n Res. df. =", format(round(temp$
			num.observation - temp$enp, 1))), cex = 
			0.80000000000000004, adj = 0)
		title("GCV", cex = 0.59999999999999998)
		plot(out$gcv.grid[, 1], out$gcv.grid[, 3], xlab = "Lambda", 
			ylab = "GCV", log = "x", bty = "n")
		temp <- out$lambda.est[!is.na(out$lambda.est[, "lambda"]),  , 
			drop = F]	## temp <- out$lambda.est
		hold <- par("usr")
		lam <- temp[, 1]
		names(lam) <- row.names(temp)	##		print(lam)
		xline(lam)
		points(lam, temp[, "GCV"], mark = 1, cex = 1.1000000000000001)
		title(paste("GCV", "\n", " Lambda =", format(round(out$lambda, 
			5))), cex = 0.59999999999999998)
	}
	if(is.na(main))
		mtext(deparse(out$call), cex = 1.3, outer = T, line = -2)
	else mtext(main, cex = 1.3, outer = T, line = -2)
	invisible()
}
"plot.tpsreg"<-
function(out)
{
	old.par <- par()
        # error in R 0.61.3
        old.par$fin <- NULL
	if(length(out$gcv.grid[, 1]) > 1) {
		set.panel(2, 1)
		plot(out$gcv.grid[, 3], out$gcv.grid[, 2], type = "l", xlab = 
			"Effective number of parameters", ylab = "GCV function"
			)
		title(" Generalized Cross-validation function")
	}
	plot(out$fitted.value, out$residual, xlab = " Predicted values", ylab
		 = "Residuals")
	par(old.par)
	invisible()
}
"predict.addreg" <-
function (out, xnew = out$x, model = NA) 
{
        temp <- rep(0, nrow(xnew))
        m <- ncol(out$x)
        for (k in 1:m) {
                temp <- temp + splint(out$x[, k], out$predicted.comp[, 
                        k], xnew[, k])
        }
        temp
}
"predict.krig"<-
function(out, x, lambda = NA, model = NA)
{
	if(missing(x)) {
		x <- out$x
	}
	x <- as.matrix(x)	#
# scale the x values 
# using information from the output object
# scaling is (0,1) by default
#
	if(is.null(out$transform)) {
		xc <- rep(0, ncol(x))
		xs <- rep(1, ncol(x))
	}
	else {
		xc <- out$transform$x.center
		xs <- out$transform$x.scale
	}
	x <- scale(x, xc, xs)
	knots <- scale(out$knots, xc, xs)	#
# find out if fast multiplication routine for covarinace is available
#
# if cov.function allows an argument for C then use this typ of call 
# if the C argument is not present in teh function do it the long way 
# using matrix multiplcation explicitly
#
#
	if(!is.na(model)) {
		lambda <- model[1]
	}
	if(!is.na(lambda)) {
# use a different lambda so we need to get the new out$d and out$c
#coefficietns
		beta <- out$matrices$G %*% ((1/(1 + lambda * out$matrices$D)) * 
			out$matrices$u)
		nt <- out$nt
		np <- out$np
		out$d <- beta[1:nt]
		temp <- c(rep(0, nt), beta[(nt + 1):np])	#
#
# tranform the beta into the parameter associated with the covariance
# function  basis set into the c parameter vector.
#
		out$c <- c(qr.qy(out$matrices$qr.T, temp))
	}
##
###
# decide whether to use the fast multiplication routines for the
#covariance function
#	if(is.null(out$cov.function[["C"]])) {
	if(is.null(formals(out$cov.function)$C)) {
		c(make.tmatrix(x, out$m) %*% out$d + out$cov.function(x, out$
			knots) %*% out$c)
	}
	else {
		c(make.tmatrix(x, out$m) %*% out$d + out$cov.function(x, out$
			knots, C = out$c))
	}
}
"predict.netfit"<-
function(fit, x, derivative = 0, type = "full")
{
	nx <- nrow(x)
	d <- fit$d
	if(is.null(fit$xm)) {
		fit$xm <- rep(0, fit$d)
		fit$xsd <- 1
	}
	if(is.null(fit$ym)) {
		fit$ym <- 0
		fit$ysd <- 1
	}
	if(d != ncol(x)) stop(" columns of X not equal to d!")	
	# standardize the X's 
	u <- (x - matrix(fit$xm, ncol = d, nrow = nx, byrow = T))
	u <- u/matrix(fit$xsd, ncol = d, nrow = nx, byrow = T)
	k <- fit$k
	theta <- fit$theta
	beta <- theta[1:(k + 1)]
	mu <- theta[(1:k) + k + 1]
	gamma <- matrix(theta[(1:(d * k)) + 2 * k + 1], ncol = d, nrow = k, 
		byrow = T)
	pu <- cbind(rep(1, nx), u) %*% t(cbind(mu, gamma))
	if(derivative == 0) {
# find predicted values and transform to original scale
		if(type == "full") {
			yhat <- fit$ysd * (squasher.nnreg(pu) %*% beta[2:(k + 1
				)] + beta[1]) + fit$ym
		}
		if(type == "terms") {
			temp <- dim(pu)
			yhat <- list(u = pu, yhat = fit$ysd * (squasher.nnreg(
				pu)) * matrix(beta[2:(k + 1)], nrow = temp[1], 
				ncol = temp[2], byrow = T), constant = fit$ysd * 
				beta[1] + fit$ym)
		}
		return(yhat)
	}
	else {
		if(type == "terms")
			stop("derviative not available for individual\nhidden units"
				)
		jac <- fit$ysd * (d.squasher.nnreg(pu) %*% (gamma/matrix(fit$
			xsd, ncol = d, nrow = k, byrow = T) * matrix(beta[2:(k + 
			1)], ncol = d, nrow = k)))
		return(jac)
	}
}
"predict.nnreg"<-
function(out, x = out$x, model = NA, derivative = 0, type = "full")
{
	if(is.na(model)) {
		model <- out$best.model
	}
	if(model == out$best.model & out$nfits > 1) {
		cat("Note: the nnreg output data object fit has more than one model",
			fill = T)
		cat(" just the best one is being evaluated.", fill = T)
	}
	fit <- out$model[[model]]
	predict(fit, x, derivative, type = type)
}
"predict.se"<-
function(object, ...)
UseMethod("predict.se")
"predict.se.krig"<-
function(out, x, cov.function, rho, sigma2, stationary = T)
{
	if(missing(x))
		x <- out$x
	x <- as.matrix(x)	
	########## if covariance function and parameters are missing
########## extract them from the krig object
	if(missing(cov.function)) {
		fun <- out$cov.function
	}
	else {
		fun <- cov.function
	}
	if(missing(sigma2)) {
		sigma2 <- out$sigma2
	}
#
# OK now fix the right value for sigma
#
	if(missing(rho)) {
		rho <- out$rho
	}
#
	if(!is.null(fun$marginal)) stationary <- F	#
	lambda <- sigma2/rho
	if(out$lambda != lambda) {
		warning("lambda value used is different from the one in the krig object"
			)
	}
	nx <- nrow(out$x)	
	# wght.vec are the linear combinations of the data that give the 
# correpsonding estimates of the function at the points x
	wght.vec <- t(make.Amatrix(out, x, lambda))	
	# Cy is the observed covariance matrix of the data vector
	Cy <- rho * fun(out$x, out$x) + sigma2 * diag(1/out$weights)
	temp2 <- c(t(wght.vec * (Cy %*% wght.vec)) %*% rep(1, nx))
	temp1 <- rho * c(t(wght.vec * fun(out$x, x)) %*% rep(1, nx))	#
#
#
	if(stationary) {
		x0 <- matrix(0, ncol = ncol(x), nrow = 1)
		return(sqrt(rho * fun(x0, x0) - 2 * temp1 + temp2))
	}
	else {
		if(is.null(fun$marginal)) {
#
#
# if covariance is not stationary then loop through each point to get
# the variance of field at that point. 
#
#
			temp <- rep(0, nrow(x))
			for(k in 1:nrow(x)) {
				x0 <- matrix(x[k,  ], nrow = 1)
				temp[k] <- rho * fun(x0, x0) - 2 * temp1[k] + 
				  temp2[k]
			}
		}
		else {
#
# marginal variances available by single call
#
			temp <- rho * fun(x, marginal = T) - 2 * temp1 + temp2
		}
		return(sqrt(temp))
	}
}
"predict.surface" <-
function (out, grid.list = NA, extrap = F, chull.mask, model = NA, 
        nx=30,ny=30) 
{
        if ((length(grid.list) == 1) | (is.na(grid.list)[1])) {
                grid.list <- as.list(rep("c", ncol(out$x)))
                grid.list[[1]] <- "x"
                grid.list[[2]] <- "y"
                ##print(grid.list)
                temp <- dimnames(out$x)[[2]]
                if (!(is.null(temp))) {
                        if (!(temp[1] == "")) 
                                names(grid.list) <- temp
                }
        }
        if (is.null(out$x)) 
                xg <- make.surface.grid(grid.list, nx=nx, ny=ny)
        else xg <- make.surface.grid(grid.list, X = out$x, nx=nx,ny=ny)
        out2 <- as.surface(xg, predict(out, xg, model = model))
        if (!extrap) {
                if (missing(chull.mask)) {
                        ind <- c(attr(xg, "format")[, 1])
                        chull.mask <- out$x[, ind]
                }
                chull.mask <- unique.matrix(chull.mask)
                mask.temp <- interp(chull.mask[, 1], chull.mask[, 
                        2], rep(1, length(chull.mask[, 1])), 
                        xo = out2$x, yo = out2$y)$z
                out2$z <- ifelse(mask.temp == 1, out2$z, NA)
                out2$zlab <- out$yname
        }
        out2
}
"predict.tps"<-
function(out, x, y, lambda, df, omega, derivative = 0, model = NA)
{
# model is a generic argument that may be used to pass a different lambda
	if(!is.na(model)) lambda <- model
	if(out$tag != 1)
		stop("This is an old tps object please rerun\n\ntps to get right coefficients"
			)
	if(missing(x))
		x <- out$x
	x <- as.matrix(x)
	xc <- out$transform$x.center
	xs <- out$transform$x.scale
	n <- nrow(x)
	p <- ncol(x)
	x <- scale(x, xc, xs)
	knots <- scale(out$knots, xc, xs)
	nt <- out$nt
	np <- out$np	#
# these are the estimated coefficients to use from the tps object
#
	dtemp <- out$d
	ctemp <- out$c	#
# recompute the omega vector if somehting is different
#
	if(!missing(lambda) | !missing(df) | !missing(y)) {
		if(missing(lambda))
			lambda <- out$lambda
		if(!missing(df))
			lambda <- tps.df.to.lambda(df, out$matrices$D)
		if(!missing(y)) {
			u <- t(out$matrices$X %*% out$matrices$G) %*% (y * out$
				weights)
		}
		else {
			u <- out$matrices$u
		}
		omega <- out$matrices$G %*% ((1/(1 + lambda * out$matrices$D)) * 
			u)
		dtemp <- omega[1:nt]
		temp <- c(rep(0, nt), omega[(nt + 1):np])
		ctemp <- c(qr.qy(out$matrices$qr.T, temp))
	}
	if(!missing(omega)) {
		dtemp <- omega[1:nt]
		temp <- c(rep(0, nt), omega[(nt + 1):np])
		ctemp <- c(qr.qy(out$matrices$qr.T, temp))
	}
#
# at this point dtemp and ctemp are the right coefficients for the splines
#
	if(derivative == 0) {
		return(make.tmatrix(x, out$m) %*% dtemp + make.Kc(x, knots, 
			ctemp, p = out$power, with.constant = out$with.constant
			))
	}
	if(derivative == 1) {
		temp <- matrix(1/xs, ncol = p, nrow = n, byrow = T)
		return((make.DTd(x, dtemp, m = out$m) + make.DKc(x, knots, 
			ctemp, p = out$power, with.constant = out$with.constant
			)) * temp)
	}
}
"predict.tpsreg"<-
function(out, grid, clean = T)
{
	if(missing(grid)) grid <- out$x	
	#	cat("Writing grid out to an input file for ev.x", fill = T)
	write(t(grid), "tpsgrid.in", ncol = 1)
	write(out$coefficients, "tps.ev", ncol = 1)	
	#	cat("Running ev.x to evalute spline on  grid", fill = T)
	#unix(paste(FUNFITS.BIN, "/tpsregev.x ", " < tpsgrid.in > tpsev.out", 
	unix(paste(system.file("exec/tpsregev.x"), " < tpsgrid.in > tpsev.out", 
		sep = ""))	# read output of ev.x back into S
	fit <- scan("tpsev.out")
	if(clean) {
		remove.file("tpsgrid.in")
		remove.file("tpsev.out")
		remove.file("tps.ev")
	}
	fit
}
"print.krig"<-
function(x, ...)
{
	digits <- 4
	c1 <- "Number of Observations:"
	c2 <- length(x$residuals)
	c1 <- c(c1, "Degree of polynomial null space ( base model):")
	c2 <- c(c2, x$m - 1)
	c1 <- c(c1, "Number of parameters in the null space")
	c2 <- c(c2, x$nt)
	c1 <- c(c1, "Model degrees of freedom:")
	c2 <- c(c2, format(round(x$eff.df, 1)))
	c1 <- c(c1, "Residual degrees of freedom:")
	c2 <- c(c2, format(round(length(x$residuals) - x$eff.df, 1)))
	c1 <- c(c1, "GCV estimate for sigma:")
	c2 <- c(c2, format(signif(x$shat.GCV, digits)))
	c1 <- c(c1, "MLE for sigma:")
	c2 <- c(c2, format(signif(x$shat.MLE, digits)))
	c1 <- c(c1, "lambda")
	c2 <- c(c2, format(signif(x$lambda, 2)))
	c1 <- c(c1, "rho")
	c2 <- c(c2, format(signif(x$rho, digits)))
	c1 <- c(c1, "sigma^2")
	c2 <- c(c2, format(signif(x$sigma2, digits)))
	sum <- cbind(c1, c2)
	dimnames(sum) <- list(rep("", dim(sum)[1]), rep("", dim(sum)[2]))
	cat("Call:\n")
	dput(x$call)
	print(sum, quote = F)
	invisible(x)
}
"print.nnreg"<-
function(fit)
{
#	cat(" Call: ", fit$call,sep="\n", fill = T)	
	cat(" Call: ")
        print(fit$call)
	#	cat("Dimension of surface:", fit$d, fill = T)
#out <- cbind(fit$k, fit$np)
#	dimnames(out) <- list(rep(" ", length(fit$k)), c("# units", 
#		"#  parameters   "))
#print(out)
	cat("Detailed output from FORTRAN program", " \n \n", fill = T)
	print.text(fit$summary)
	invisible()
}
"print.text"<-
function(txt)
{
	tf <- tempfile()
	write(txt, file = tf, ncol = 1)
	unix(paste("cat ", tf), output = F)
	invisible()
}
"print.tps"<-
function(x, ...)
{
	digits <- 4
	c1 <- "Number of Observations:"
	c2 <- length(x$residuals)
	c1 <- c(c1, "Degree of polynomial null space ( base model):")
	c2 <- c(c2, x$m - 1)
	c1 <- c(c1, "Number of parameters in the null space")
	c2 <- c(c2, x$nt)
	c1 <- c(c1, "Effective degrees of freedom:")
	c2 <- c(c2, format(round(x$eff.df, 1)))
	c1 <- c(c1, "Residual degrees of freedom:")
	c2 <- c(c2, format(round(length(x$residuals) - x$eff.df, 1)))
	c1 <- c(c1, "Root Mean Square Error:")
	c2 <- c(c2, format(round(x$shat, digits)))
	c1 <- c(c1, "Log10(lambda)")
	c2 <- c(c2, format(round(log10(x$lambda), 2)))
	sum <- cbind(c1, c2)
	dimnames(sum) <- list(rep("", dim(sum)[1]), rep("", dim(sum)[2]))
	cat("Call:\n")
	dput(x$call)
	print(sum, quote = F)
	invisible(x)
}
"print.tpsreg"<-
function(fit)
{
	cat(" Call: ", "\n", as.character(fit$call), fill = T)
	cat("Dimension of surface:", fit$parameter[2], fill = T)
	print.text(fit$summary)
	invisible()
}
"qr.q2ty"<-
function(qr, y)
{
	qrqr <- qr$qr
	if(is.null(qrqr))
		stop("First argument should be a qr object")
	qra <- qr$qraux
	rank <- qr$rank
	dq <- dim(qrqr)
	if(is.matrix(y)) {
		y <- as.matrix(y)
		dy <- dim(y)
	}
	else {
		dy <- c(length(y), 1)
		y <- as.matrix(y)
	}
	if(dy[1] != dq[1])
		stop("y and qr$qr should have same number of rows")
	if(!(cmplx <- mode(qra) == "complex"))
		storage.mode(y) <- "double"
	else mode(y) <- "complex"
        # orig:
	#.Fortran(if(!cmplx) "dqrsl1" else "zqrsl1",
	#	qrqr,
	#	as.integer(dq),
	#	qra,
	#	as.integer(rank), 
	#	y,
	#	as.integer(dy[2]),
	#	qy = y,
	#	if(!cmplx) 0 else 0i,
	#	as.integer(1000),
	#	as.integer(1))$qy[(rank + 1):dy[1],  ]
        matrix(.Fortran("dqrqty",
                 as.double(qrqr),
                 as.integer(dq[1]),
                 as.integer(rank),
                 as.double(qra),
                 as.double(y),
                 as.integer(dy[2]),
                 qy = as.double(y)
                 )$qy,dy[1],dy[2])[(rank + 1):dy[1], ]
}
"qr.q2y"<-
function(qr, y)
{
	c(qr.qy(qr, c(rep(0, qr$rank), y)))
}
"qr.yq2"<-
function(qr, y)
{
	y <- t(y)
	qrqr <- qr$qr
	if(is.null(qrqr))
		stop("First argument should be a qr object")
	qra <- qr$qraux
	rank <- qr$rank
	dq <- dim(qrqr)
	if(is.matrix(y)) {
		y <- as.matrix(y)
		dy <- dim(y)
	}
	else {
		dy <- c(length(y), 1)
		y <- as.matrix(y)
	}
	if(dy[1] != dq[1])
		stop("y and qr$qr should have same number of rows")
	if(!(cmplx <- mode(qra) == "complex"))
		storage.mode(y) <- "double"
	else mode(y) <- "complex"
        # qr.yq2:
	#t(.Fortran(if(!cmplx) "dqrsl1" else "zqrsl1",
	#	qrqr,
	#	as.integer(dq),
	#	qra,
	#	as.integer(rank),
	#	y,
	#	as.integer(dy[2]),
	#	qy = y,
	#	if(!cmplx) 0 else 0i,
	#	as.integer(1000),
	#	as.integer(1))$qy)[, (rank + 1):dy[1]]
        matrix(.Fortran("dqrqty",
                   as.double(qrqr),
                   as.integer(dq[1]),
                   as.integer(rank),
                   as.double(qra),
                   as.double(y),
                   as.integer(dy[2]),
                   qy = as.double(y)
                   )$qy,dy[2],dy[1],byrow=T)[,(rank + 1):dy[1]]
}
"radbas.constant"<-
function(m, d)
{
	if(d %% 2 == 0) {
		Amd <- (((-1)^(1 + m + d/2)) * (2^(1 - 2 * m)) * (pi^( - d/2)))/
			(gamma(m) * gamma(m - d/2 + 1))
	}
	else {
		Amd <- (gamma(d/2 - m) * (2^(-2 * m)) * (pi^( - d/2)))/gamma(m)
	}
	Amd
}
"rdist"<-
function(x1, x2)
{
	if(!is.loaded(symbol.For("radbas"))) {
		temp <- dyn.load(paste(FUNFITS.BIN, "TPS.o", sep = "/"), 2)
	}
	if(!is.matrix(x1))
		x1 <- as.matrix(x1)
	if(!is.matrix(x2))
		x2 <- as.matrix(x2)
	d <- ncol(x1)
	n1 <- nrow(x1)
	n2 <- nrow(x2)
	par <- c(1/2, 0)
	matrix((.Fortran("radbas",
#
#
		nd = as.integer(d),
		x1 = as.double(x1),
		n1 = as.integer(n1),
		x2 = as.double(x2),
		n2 = as.integer(n2),
		par = as.double(par),
		k = as.double(rep(0, n1 * n2)))$k), 	#
#
	ncol = n2, nrow = n1)
}
"read.nnreg"<-
function(fname = "nnreg.out")
{
	temp <- scan(fname)
	ntemp <- length(temp)
	out <- list()	#	cmd <- paste("grep '#' ", fname, " > nnreg.temp")
#	unix(cmd)
#	out$summary <- c(scan("nnreg.temp", what = "a", sep = "\n"), 
#scan("nnreg.summary", what = "a", sep = "\n"))
	i <- 1
	loc <- 0
	out <- list()
	while(loc < ntemp) {
#print(" start model loc=")
#print(loc)
		if(loc + 3 > ntemp) break
		d <- temp[1 + loc]
		k <- temp[2 + loc]
		rms <- temp[3 + loc]
		loc <- loc + 3
		if(loc + 2 * (d + 1) > ntemp)
			break
		xm <- temp[(1:d) + loc]
		loc <- loc + d
		xsd <- temp[(1:d) + loc]
		loc <- loc + d
		ym <- temp[loc + 1]
		ysd <- temp[loc + 2]
		loc <- loc + 2
		np <- 1 + k * (d + 2)
		if(loc + np > ntemp)
			break
		theta <- temp[loc + (1:np)]
		loc <- loc + np
		out[[i]] <- list(d = d, k = k, xm = xm, ym = ym, xsd = xsd, ysd
			 = ysd, np = np, theta = theta, rms = rms)
		class(out[[i]]) <- "netfit"
		i <- i + 1	#	print(loc)
	}
	if(loc != length(temp))
		cat("incomplete information in ", fname, " output file", fill
			 = T)
	out
}
"remove.file"<-
function(file.name)
{
	touch.file(file.name)
	unix(paste("rm ", file.name))
	invisible()
}
"rtps"<-
function(..., max.iter = 20, acc = 10 * .Machine$single.eps^0.5, finish = F, 
	conv.method = "lambda")
{
## take the standard tps function but use iterative reweighted
## least squares
## first fit the regular tps to get initial weights
## then do huber until convergence
## if finish=T, then finish off with two rounds of bisquare
	fit1 <- tps(...)
	resid0 <- fit1$resid
	lambda0 <- fit1$lambda
	converged <- F
	status <- "converged"
	conv <- NULL
	if(max.iter > 0) {
		for(i in 1:max.iter) {
			if(i == 1) {
				scale <- median(abs(resid0))/
				  0.67449999999999999
				w <- w0 <- wt.huber(resid0/scale)
			}
			else {
				w0 <- w
				lambda0 <- lambda
			}
			fit1 <- tps(..., weights = w)
			resid <- fit1$resid
			scale <- median(abs(resid))/0.67449999999999999
			w <- wt.huber(resid/scale)
			lambda <- fit1$lambda
			if(conv.method == "lambda")
				convi <- abs(lambda - lambda0)
			if(conv.method == "weights")
				convi <- sum((w - w0)^2)
			conv <- c(conv, convi)
			converged <- convi <= acc
			if(converged)
				break
		}
		if(!converged)
			warning(status <- paste("failed to converge in", 
				max.iter, "steps"))
	}
	if(finish) {
## now finish off with two iterations of the bisquare
		for(i in 1:2) {
			scale <- median(abs(resid))/0.67449999999999999
			wts <- wt.bisquare(resid/scale)
			fit1 <- tps(..., weights = wts)
			resid <- fit1$resid
		}
	}
	class(fit1) <- c("rtps", "tps", "funfits")
	fit1$call <- match.call()
	invisible(fit1)
}
"std.factor.names"<-
function(n)
{
	ll <- LETTERS[ - c(6, 20)]
	if((m <- ceiling(n/length(ll))) > 1)
		ll <- c(ll, outer(ll, ll[1:(m - 1)], function(x, y)
		paste(x, y, sep = "")))
	ll[1:n]
}
 
"unix" <-
function (command, input, output.to.S = T) 
{
        if (!missing(input)) {
                file <- tempfile("unix")
                on.exit(unlink(file))
                cat(input, file = file, sep = "\n")
                command <- paste("<", file, command)
        }
        system(command, output.to.S)
}
"set.panel" <-
function (m = 1, n = 1, relax = F) 
{
        temp <- par()
        single.plot <- (temp$mfg[3] == 1 & temp$mfg[4] == 1)
        if (!relax | single.plot | ((m == 1) & (n == 1))) {
                par(mfrow = c(m, n))
                cat("plot window will lay out plots in a", m, 
                        "by", n, "matrix ", fill = T)
        }
        invisible()
}
"splint" <-
function (x, y, xgrid, derivative = 0) 
{
        # This is a lean and mean cubic spline interpolation routine
        # although this is the same subroutie that will also smooth 
        # data the correct set of job codes with provide a simple
        # and fast interpolation wiothout much overhead
        #
        if (!is.loaded(symbol.For("css"))) {
          stop("Compiled code has not been dynamically loaded")
                # 
#                temp <- dyn.load(paste(FUNFITS.BIN, "css.o", 
#                        sep = "/"), 2)
        }
        if (is.matrix(x)) {
                xgrid <- y
                y <- x[, 2]
                x <- x[, 1]
        }
        # remove duplicate X's
        if (is.list(x)) {
                xgrid <- y
                y <- x$y
                x <- x$x
        }
        ind <- !dup(x)
        x <- x[ind]
        y <- y[ind]
        if ((derivative > 2) | (derivative < 0)) 
                stop("derivative must be 0,1,2")
        if (length(x) != length(y)) 
                stop("Lengths of x and y must match")
        n <- length(x)
        #       subroutine css(h,npoint,x,y,wght,sy,trace,diag,vlam,  
        #    +                  ngrid,xg,yg,job,ideriv,ierr)  
        .Fortran("css", as.double(0), as.integer(n), as.double(x), 
                as.double(y), as.double(rep(0, n)), as.double(rep(1, 
                        n)), as.double(1), as.double(1), as.double(1), 
                as.integer(length(xgrid)), as.double(xgrid), 
                ygrid = as.double(rep(0, length(xgrid))), as.integer(c(2, 
                        3, 0)), as.integer(derivative), as.integer(0))$ygrid
}
"squasher.nnreg"<-
function(u)
{
	au <- abs(u)
	(u * (1 + 0.5 * au))/(2 + au + 0.5 * au * au)
}
"stats"<-
function(x, by)
{
	if(!missing(by)) {
		x <- cat.to.list(c(x), by)
	}
# coerce into matrix or list format
	if(!is.list(x) & !is.matrix(x))
		x <- matrix(x, ncol = 1)
	if(is.list(x)) {
		ncol <- length(x)
		out <- matrix(NA, ncol = ncol, nrow = length(describe()))
		dimnames(out) <- list(describe(), names(x))
		for(j in (1:ncol)) {
			if(is.numeric(x[[j]])) {
				out[, j] <- describe(x[[j]])
			}
		}
		return(out)
	}
	if(is.matrix(x)) {
		nc <- ncol(x)
		out <- matrix(NA, ncol = nc, nrow = length(describe()))
		dimnames(out) <- list(describe(), dimnames(x)[[2]])
		for(j in (1:nc)) {
			out[, j] <- describe(x[, j])
		}
		return(out)
	}
}
"summary.krig"<-
function(x, digits = 4, ...)
{
	summary <- list(call = x$call, num.observation = length(x$residuals), 
		enp = x$trace, nt = x$nt, res.quantile = quantile(x$residuals, 
		seq(0, 1, 0.25)), shat.MLE = x$shat.MLE, shat.GCV = x$shat.GCV, 
		rhohat = x$rhohat, m = x$m, lambda = x$lambda, cost = x$cost, 
		gcvmin = min(x$gcv.grid[, 3]), rho = x$rho, sigma2 = x$sigma2)
	class(summary) <- "summary.krig"
	summary$covariance <- cor(x$fitted.values * sqrt(x$weights), (x$y) * 
		sqrt(x$weights))^2
	hold <- (sum((x$y - mean(x$y))^2) - sum(x$residuals^2))/(sum((x$y - 
		mean(x$y))^2))
	summary$adjr2 <- 1 - ((length(x$residuals) - 1)/(length(x$residuals) - 
		x$eff.df)) * (1 - hold)
	summary$digits <- digits
	summary
}
"summary.lle"<-
function(obj, digits = 5)
{
	cat("estimated global exponent", obj$glb, fill = T)
	cat("summary of QR estimate", fill = T)
	temp <- t(stats(obj$local.qr))
	temp <- signif(temp, digits)
	temp <- temp[, c(1, 2, 3, 5, 6, 7)]
	temp
}
"summary.netfit"<-
function(fit, standardized = F)
{
	d <- fit$d
	k <- fit$k
	theta <- fit$theta
	beta <- theta[1:(k + 1)]
	mu <- theta[(1:k) + k + 1]
	gamma <- matrix(theta[(1:(d * k)) + 2 * k + 1], ncol = d, nrow = k, 
		byrow = T)
	if(standardized == F) {
		gamma <- gamma * matrix(1/fit$xsd, ncol = fit$d, nrow = fit$k, 
			byrow = T)
		temp <- (gamma * matrix(fit$xm, ncol = fit$d, nrow = fit$k, 
			byrow = T)) %*% rep(1, fit$d)
		mu <- mu - c(temp)
		beta <- beta * fit$ysd
		beta[1] <- beta[1] + fit$ym
	}
	list(beta = beta, mu = mu, gamma = gamma, standardized = standardized)
}
"summary.nlar"<-
function(obj, ...)
{
	cat("Call:", fill = T)
	print(obj$call)
	cat("summary of fitted model(s)", fill = T)
	summary(obj$fit, ...)
}
"summary.nnreg"<-
function(out, noprint = F, digits = 4)
{
	n <- out$n
	nfits <- out$nfits
	temp <- matrix(0, ncol = 6, nrow = nfits)
	for(j in 1:nfits) {
		fit <- out$model[[j]]
		temp[j, 1] <- fit$k
		ss <- sum((out$y - predict(fit, out$x))^2)
		temp[j, 2] <- fit$np
		temp[j, 3] <- n - fit$np
		temp[j, 4] <- signif(sqrt(ss/temp[j, 3]), digits)
		temp[j, 5] <- signif((ss/n)/(1 - temp[j, 2]/n)^2, digits)
		temp[j, 6] <- signif((ss/n)/(1 - (2 * temp[j, 2])/n)^2, digits)
	}
	dimnames(temp) <- list(format(1:nfits), c("# hidden units", "DF model", 
		"DF residuals", "Root MSE", "GCV", "GCV cost=2"))
	if(!noprint) {
		cat("Summary of outout from neural net fit", fill = T)
		cat("see the component summary in the output list for", fill = 
			T)
		cat("more details of the fitting process", fill = T)
		cat("call to nnreg :", fill = T)
		print(out$call)
		temp
	}
	else {
		return(temp)
	}
}
"summary.tps"<-
function(x, digits = 4, ...)
{
## pdh - 7/16/96: added q2 and press
## pdh - 8/16/96: added rmse.press, pure.error, pure.df, fit.pure.error
## pdh - 11/25/97: added r.square as a pass through if available
##      and changed name of covariance to r.square
	summary <- list(call = x$call, num.observation = length(x$residuals), 
		enp = x$trace, nt = x$nt, res.quantile = quantile(x$residuals, 
		seq(0, 1, 0.25)), shat = x$shat, m = x$m, lambda = x$lambda, 
		form = x$form, power = x$power, cost = x$cost, gcvmin = min(x$
		gcv.grid[, 3]), press = x$press, r.square = x$r.square, q2 = x$
		q2, pure.error = x$shat.pure.error, pure.df = x$pure.df, method
		 = x$method, gcv.pure.error = x$gcv.pure.error, gcv.rmse = x$
		GCV, rmse.press = x$rmse.press)
	if(is.null(summary$method))
		summary$method <- "gcvmin"
	class(summary) <- "summary.tps"
	if(is.null(summary$r.square))
		summary$r.square <- cor(x$fitted.values * sqrt(x$weights), (x$y
			) * sqrt(x$weights))^2
	hold <- (sum((x$y - mean(x$y))^2) - sum(x$residuals^2))/(sum((x$y - 
		mean(x$y))^2))
	summary$adjr2 <- 1 - ((length(x$residuals) - 1)/(length(x$residuals) - 
		x$eff.df)) * (1 - hold)
	summary$digits <- digits
	summary
}
"summary.tpsreg"<-
function(fit)
{
	print(fit$call)
	print(fit$summary)
	invisible()
}
"surface" <-
function (x, grid.list = NA, extrap = F, xlab = NULL, ylab = NULL, 
        zlab = NULL, main = NULL, levels = NULL, zlim = NULL, nx=30, ny=30,
        ...) 
UseMethod("surface")
"surface.addreg" <-
function (obj, grid.list = NA, extrap = F, graphics.reset = T, 
        ...) 
{
        old.par <- par("mfrow", "oma")
        if (graphics.reset) 
                on.exit(par(old.par))
        out.p <- predict.surface(obj, grid.list = grid.list, 
                extrap = extrap)
        # was plot:
        surface(out.p, type = "b", graphics.reset = graphics.reset, 
                ...)
        invisible()
}
"surface.krig"<-
function(obj, grid.list = NA, extrap = F, graphics.reset = T, xlab = NULL, ylab
	 = NULL, main = NULL, zlab = NULL, zlim = NULL, levels = NULL,
         nx=30, ny=30, ...)
{
## modified so that you can give main, and ylab as arguments
## in ... and have them passed correctly
	old.par <- par("mfrow", "oma")
	if(graphics.reset)
		on.exit(par(old.par))
	out.p <- predict.surface(obj, grid.list = grid.list, extrap = extrap,
                                 nx=nx, ny=ny)
	if(!is.null(ylab))
		out.p$ylab <- ylab
	if(!is.null(xlab))
		out.p$xlab <- xlab
	if(!is.null(zlab))
		out.p$zlab <- zlab
	if(!is.null(main)) out.p$main <- main	##    else
##      out.p$main <- NULL
	plot(out.p, type = "b", graphics.reset = graphics.reset, levels = 
		levels, zlim = zlim, ...)
	invisible()
}
"surface.nnreg"<-
function(obj, grid.list = NA, extrap = F, graphics.reset = T, xlab = NULL, ylab
	 = NULL, main = NULL, zlab = NULL, zlim = NULL, levels = NULL, model = 
	NA, ...)
{
## modified so that you can give main, and ylab as arguments
## in ... and have them passed correctly
	old.par <- par("mfrow", "oma")
	if(graphics.reset)
		on.exit(par(old.par))
	out.p <- predict.surface(obj, grid.list = grid.list, extrap = extrap, 
		model = model)
	if(!is.null(ylab))
		out.p$ylab <- ylab
	if(!is.null(xlab))
		out.p$xlab <- xlab
	if(!is.null(zlab))
		out.p$zlab <- zlab
	if(!is.null(main)) out.p$main <- main	##    else
##      out.p$main <- NULL
	plot(out.p, type = "b", graphics.reset = graphics.reset, levels = 
		levels, zlim = zlim, ...)
	invisible()
}
"surface.surface"<-
function(obj, lab = NA, type = "b", zlab, xlab, ylab, graphics.reset = T, ...)
{
	old.par <- par()
        # error in R 0.61.3 ?
        old.par$fin<-NULL
	if(graphics.reset) {
		on.exit(par(old.par))
	}
	if(is.null(obj$xlab))
		obj$xlab <- "X"
	if(is.null(obj$ylab))
		obj$ylab <- "Y"
	if(missing(zlab)) {
		zlab <- "Z"
	}
	if(!missing(xlab)) {
		obj$xlab <- xlab
	}
	if(!missing(ylab)) {
		obj$xlab <- ylab
	}
	if(is.na(lab) & !is.null(obj$main)) {
		lab <- paste("Fixed Variables:  ", obj$main)
	}
#	if(type == "b")
#		set.panel(2, 1, T)
	if(type == "p" | type == "b") {
#	par(mar = c(3, 0, 0, 0))
#		persp(obj, xlab = obj$xlab, ylab = obj$ylab, zlab = zlab, ...)
          cat("Warning: persp not supported in R, using image instead.\n")
          image(obj$x,obj$y,obj$z, xlab = obj$xlab, ylab = obj$ylab, zlab = zlab, ...)
		if(!is.na(lab)) {
#	mtext(lab, 1, 2)
			title(lab)
		}
	}
	if(type == "c" | type == "b") {
#par(mar = c(3, 0, 0, 0))
          if(type == "b")
#            contour(obj$x,obj$y,obj$z, xlab = obj$xlab, ylab = obj$ylab,add=T, ...)
            contour(obj$x,obj$y,obj$z, add=T, ...)
          else
            contour(obj$x,obj$y,obj$z, ...)
		if(!is.na(lab) & type != "b") {
#		mtext(lab, 1, 1, outer = T)
			title(lab)
		}
	}
	invisible()
}
"surface.tps"<-
function(obj, grid.list = NA, extrap = F, graphics.reset = T, xlab = NULL, ylab
	 = NULL, main = NULL, zlab = NULL, zlim = NULL, levels = NULL, model = 
	NA, ...)
{
## modified so that you can give main, and ylab as arguments
## in ... and have them passed correctly
	old.par <- par("mfrow", "oma")
	if(graphics.reset)
		on.exit(par(old.par))
	out.p <- predict.surface(obj, grid.list = grid.list, extrap = extrap, 
		model = model)
	if(!is.null(ylab))
		out.p$ylab <- ylab
	if(!is.null(xlab))
		out.p$xlab <- xlab
	if(!is.null(zlab))
		out.p$zlab <- zlab
	if(!is.null(main))
		out.p$main <- main
	plot(out.p, type = "b", graphics.reset = graphics.reset, levels = 
		levels, zlim = zlim, ...)
	invisible()
}
"surface.tpsreg"<-
function(tpsreg.object, vary, granularity = 25, transform, x.scale, ..., main)
{
	fnames <- dimnames(tpsreg.object$x)[[2]]	
	## Make sure the vary parameter is usable
	nfactors <- length(fnames)
	if(missing(vary)) {
		vary <- list()
		for(i in 1:nfactors)
			if(i <= 2) eval(parse(text = paste("vary$", fnames[i], 
				  " <- 'v'", sep = ""))) else eval(parse(text
				   = paste("vary$", fnames[i], " <- 'c'", sep
				   = "")))
	}
	if(length(vary) > nfactors)
		stop("Error: vary has too many components.")
	nmv <- names(vary)
	nvary <- 0
	for(nm in fnames) {
		if(is.na(charmatch(nm, nmv)))
			eval(parse(text = paste("vary$", nm, " <- 'c'", sep = 
				"")))
		eval(parse(text = paste("if(vary$", nm, 
			" == 'v') nvary <- nvary+1", sep = "")))
	}
	if(nvary != 2) stop("Error: Must have exactly 2 factors varying")	
	## ready to call grid routine 
	vnames <- names(vary)
	index <- charmatch(fnames, vnames)
	index.v <- rep(F, nfactors)
	mf <- tpsreg.object$x
	ranges <- t(apply(mf, 2, range))
	holdat <- rep(0, nfactors)
	for(i in 1:nfactors) {
		if(vary[[index[i]]] == "v")
			index.v[i] <- T
		else if(vary[[index[i]]] == "c") {
			if(missing(x.scale))
				holdat[i] <- mean(ranges[i,  ])
			else holdat[i] <- mean(x.scale[i])
		}
		else {
			if(missing(x.scale))
				holdat[i] <- vary[[index[i]]]
			else {
				a <- min(x.scale[, i])
				b <- max(x.scale[, i])
				holdat[i] <- (vary[[index[i]]] - a)/(b - a)
			}
		}
	}
	varies <- (1:nfactors)[index.v]
	if((length(varies) > nfactors) && !all(ii <- (ranges[ - varies, 1] <= 
		holdat[ - varies] && ranges[ - varies, 2] >= holdat[ - varies])
		))
		stop(paste("Fixed variables must be held at a value", 
			"in the range of the model data.", paste("Variable", 
			fnames[ - varies][!ii], "is outside its range", 
			collapse = "\n")))
	xx <- seq(ranges[varies[1], 1], ranges[varies[1], 2], length = 
		granularity)
	yy <- seq(ranges[varies[2], 1], ranges[varies[2], 2], length = 
		granularity)
	xm <- rep(xx, rep(granularity, granularity))
	ym <- rep(yy, granularity)
	x.pred <- matrix(NA, ncol = nfactors, nrow = granularity * granularity)
	x.pred[, varies[1]] <- xm
	x.pred[, varies[2]] <- ym
	nvaries <- (1:nfactors)[!index.v]
        # error in R !?
        if(length(nvaries)!=0)
          for(i in 1:length(nvaries))
            x.pred[, nvaries[i]] <- rep(holdat[nvaries[i]], granularity * 
                                        granularity)
	dimnames(x.pred) <- list(1:(granularity * granularity), fnames)
	ypred.mat <- predict(tpsreg.object, x.pred)
	z.mat <- matrix(as.numeric(ypred.mat), nrow = granularity, ncol = 
		granularity, byrow = T)
	if(!missing(transform))
		eval(parse(text = paste("z.mat <- ", paste(substitute(transform
			)), "(z.mat)", sep = "")))
	if(!missing(x.scale)) {
		a <- min(x.scale[, varies[1]])
		b <- max(x.scale[, varies[1]])
		xx <- xx * (b - a) + a
		a <- min(x.scale[, varies[2]])
		b <- max(x.scale[, varies[2]])
		yy <- yy * (b - a) + a
	}
	result <- surface.default(xx, yy, z.mat, ..., xlab = vnames[varies[1]], 
		ylab = vnames[varies[2]])
	if(missing(main)) {
		main <- paste("Surface plot for", tpsreg.object$yname)
		if(length(nvaries) > 0) {
			for(i in 1:length(nvaries)) {
				if(missing(x.scale))
				  holdx <- holdat[nvaries[i]]
				else {
				  a <- min(x.scale[, nvaries[i]])
				  b <- max(x.scale[, nvaries[i]])
				  holdx <- holdat[nvaries[i]] * (b - a) + a
				}
				if(i == 1)
				  main <- paste(main, "\nHolding ", fnames[
				    nvaries[i]], "=", holdx, sep = "")
				else main <- paste(main, ",", fnames[nvaries[i]
				    ], "=", holdx, sep = "")
			}
		}
	}
	title(main = main)
	invisible(result)
}
"touch.file"<-
function(file.name)
{
	unix(paste("touch ", file.name))
}
"tps"<-
function(x, y, lambda = NA, df = NA, cost = 1, knots, weights = rep(1, length(y
	)), m, power, scale.type = "unit.sd", x.center, x.scale, 
	return.matrices = T, nstep.cv = 80, method = "GCV", rmse = NA, 
	link.matrix = NA, verbose = F, subset = NULL, tol = 0.0001, 
	print.warning = T, yname = NULL)
{
	out <- list()
	out$tag <- 1
	class(out) <- c("tps", "funfits")
	out$call <- match.call()	##
## S function to find minizier of 
##  || y- Xb||^2 + lambda b^T H b where H is a nonnegative definite 
## matrix
## Solution for b is  b= (X^T*X + lambda*H)^(-1) X^T*Y
##  the  H matrix is consructed to be the thin plate spline roughness
## matrix. (If the power =2m-d) 
##
## First set up some constants
## and some values in the output list
##
	x <- as.matrix(x)
	y <- c(y)	# make sure y is a vector!
	if(!is.null(subset)) {
		x <- x[subset,  ]
		y <- y[subset]
		out$subset <- paste(substitute(subset))
	}
	out$x <- x
	out$y <- y
	N <- length(y)
	out$N <- N
	lambda.est <- NA
	d <- ncol(x)	##
## make sure that 2m-d > 0
##
	out$form <- T
	with.constant <- T	## refers to weird constant for radial basis
	if(missing(m)) {
		m <- max(2, ceiling(d/2 + 0.10000000000000001))
	}
	if(missing(power)) {
		power <- 2 * m - d
		if(power < 1) {
			power <- 1
			out$form <- F
			if(print.warning)
				cat("Warning: Model is not a true thin plate spline",
				  fill = T)
		}
	}
## If not a true tps then do not find the  weird constant for the basis 
## functions
	if(2 * m - d <= 0) {
		with.constant <- F
	}
	if(2 * m - d != power) {
		with.constant <- F
	}
	out$cost <- cost
	out$m <- m
	out$with.constant <- with.constant
	out$trace <- NA
	if(is.null(yname))
		out$yname <- as.character(paste(substitute(y), collapse = ""))
	else out$yname <- yname
	out$weights <- weights	##
## Now find the estimate of sigma based on replicated points if this 
## makes sense
	rep.info <- cat.matrix(x)	## integer tags to indicate replications
	if(verbose)
		print(rep.info)
	if(max(rep.info) == N | !is.na(link.matrix[1])) {
		shat.rep <- NA
		shat.pure.error <- NA
	}
	else {
##
## do a simple 1-way ANOVA to get the rep error
##
		shat.pure.error <- sqrt(fast.1way(rep.info, y, weights)$MSE)
		shat.rep <- shat.pure.error
		out$shat.pure.error <- shat.pure.error
	}
	out$shat.rep <- shat.rep
	out$shat.pure.error <- shat.pure.error
	if(missing(knots))
		knots <- x[!dup(rep.info),  ]
	knots <- as.matrix(knots)
	out$knots <- knots	##
##
## scale the X's 
	x <- transformx(x, scale.type, x.center, x.scale)
	transform <- attributes(x)
	out$transform <- transform	## scale the knots int eh same way
	knots <- scale(knots, center = transform$x.center, scale = transform$
		x.scale)	##
#######################   NOTE        #############################
############ both the x and the knots must be scaled ################
################################################
##
	just.solve <- (lambda[1] == 0)
	if(is.na(just.solve))
		just.solve <- F
	out$power <- power	## make up the T and K matrices
## find the QR decopmposition of T matrix  that spans null space with
## respect to the knots 
	qr.T <- qr(make.tmatrix(knots, m))
	tmat <- make.tmatrix(x, m)
	out$ptab <- attributes(tmat)$ptab
	X <- cbind(tmat, qr.yq2(qr.T, make.rb(x, knots, power, with.constant = 
		with.constant)))
	if(verbose) print(dim(X))	
	## transform the X evalution matrix by a linear transformation if 
## the link matrix has been passed
##
	if(!is.na(link.matrix[1])) X <- link.matrix %*% X	##
	np <- ncol(X)	## the number of parameters
	nr <- nrow(X)
	N <- nr
	nt <- qr.T$rank	## number of para. in NULL space
	nk <- np - nt
	out$np <- np
	out$nt <- nt	
	##   construct the roughness penalty matrix  using radial basis
##functions and Qr decomposition of T
##
	H <- matrix(0, ncol = np, nrow = np)
	temp <- qr.yq2(qr.T, make.rb(knots, knots, power, with.constant = 
		with.constant))
	temp <- qr.q2ty(qr.T, temp)
	H[(nt + 1):np, (nt + 1):np] <- temp	##
## if lambda = 0 then just solve the system 
	if(just.solve) {
#
##  just find the least squares fit using radial basis functions or
## the interpolation if knots are missing. 
##
		out$method <- "interpolation"
		omega <- qr.coef(qr(X), y)
	}
	else {
##
##   do all the heavy decompositions if lambda is not = 0 
##   or if it is omitted
##
##
## inverse symetric square root of X^T W  X
##
		temp <- svd(sqrt(weights) * X)[c("v", "d")]	##
		if(max(temp$d)/min(temp$d) > 10000000000) {
			if(verbose)
				print(temp$d)
			print("Must use a reduced set of\nknots because the radial basis functions are close to being singular"
				)
			out <- NULL
			return(out)
		}
##
##
		B <- temp$v %*% diag(1/(temp$d)) %*% t(temp$v)	##
##   eigenvalue eigenvector decomposition of BHB
##
		temp <- svd(B %*% H %*% B)
		U <- temp$u
		D <- temp$d
		if(verbose) print(D)	
	##   We know that H has at least nt zero singular values ( see how H is
##   filled)
##   So make these identically zero.
##   the singular values are returned from largest to smallest.
##
		D[(1:nt) + (np - nt)] <- 0
		G <- B %*% U	##
##   with these these decompositions it now follows that 
##     b= B*U( I + lambda*D)^(-1) U^T * B * X^T*Y
##      = G*( I + lambda*D)^(-1) G^T* X^T*Y
##	
##
		u <- t(X %*% G) %*% (y * weights)	##
## find the (weighted) pure error sums of squares by calculating 
## predcited values when lambda=0 
## 
		temp1 <- (X %*% G) %*% u
		out$pure.ss <- sum(weights * (y - X %*% G %*% u)^2)
		out$matrices <- list(D = D, G = G, u = u, qr.T = qr.T)	##
## find some estimates of lambda
##
		gcv.out <- gcv.tps(out, cost = cost, nstep.cv = nstep.cv, rmse
			 = rmse, verbose = verbose, tol = tol)	##
		out$gcv.grid <- gcv.out$gcv.grid	##
##
		lambda.est <- gcv.out$lambda.est	##
		if(verbose) print(lambda.est)	##
## find the one specified by the method but first fill in a 
## possible user supplied value
##
##
		if(!missing(lambda) | !missing(df)) {
			method <- "user"	
	## is the df is supplied then find the corresponding lambda
			if(!is.na(df)) {
				lambda <- tps.df.to.lambda(df, D)
			}
			temp <- c(lambda, NA, NA, NA)
			lab <- c(dimnames(lambda.est)[[1]], "user")
			lambda.est <- rbind(lambda.est, temp)
			row.names(lambda.est) <- lab
		}
## find the best one. 
##
		lambda.best <- lambda.est[method, "lambda"]
		if(verbose) print(lambda.best)	##
## To solve for the coefficients,  recall: omega= G*( I + lambda*D)^(-1)*u
## predicted values are X%*% omega
		omega <- G %*% ((1/(1 + lambda.best * D)) * u)
	}
	if(!just.solve) {
		out$eff.df <- sum(1/(1 + lambda.best * D))
		out$trA2 <- sum(1/(1 + lambda.best * D)^2)
		temp <- X %*% out$matrices$G %*% sqrt(diag(1/(1 + lambda.best * 
			out$matrices$D)))
		diagA <- c((temp^2) %*% rep(1, ncol(X))) * out$weights
		out$diagA <- diagA
	}
	if(just.solve)
		out$eff.df <- out$np
	out$fitted.values <- c(X %*% omega)
	out$residuals <- y - c(X %*% omega)
	out$trace <- out$eff.df	##
	if(verbose)
		print(out$eff.df)
	if(just.solve) {
		out$lambda <- lambda
		out$gcv.grid <- matrix(c(lambda, rep(NA, 4)), nrow = 1)
	}
	else {
		out$lambda <- lambda.best
		out$method <- method
	}
	out$best.model <- out$lambda
	out$omega <- omega
	out$d <- omega[1:nt]	
	## transform the omegas associated with the radial basis functions back
##  into the c parameter vector. 
## 
##
	temp <- omega
	temp[1:nt] <- 0
	out$c <- c(qr.qy(qr.T, temp))
	out$coefficients <- c(omega[1:nt], out$c)
	out$just.solve <- just.solve
	res.df <- (N - out$trace)	##
##
## find an estimate of the residual standard deviation
## based on fitted spline
	if(res.df > 0) {
		out$GCV <- (sum(out$residuals^2 * weights)/N)/(1 - out$eff.df/N
			)^2
		out$shat <- sqrt(sum(out$residuals^2 * weights)/(res.df))
		if(method == "user") {
## fill in the info for the lambda.est data frame
## for the user supplied value of lambda
			lambda.est["user", 2] <- out$eff.df
			lambda.est["user", 3] <- out$GCV
			lambda.est["user", 4] <- out$shat
		}
	}
	else {
		out$shat <- 0
		out$GCV <- NA
	}
	if(verbose) {
		print("shat")
		print(out$shat)
	}
	if(is.na(link.matrix[1])) {
		r.square <- cor(out$fitted.values * sqrt(out$weights), (out$y) * 
			sqrt(out$weights))^2
		out$r.square <- r.square	## calculate the q2 value
		if(!just.solve) {
#
# don't do this if interplotating
#
			cv.res <- out$residuals/(1 - diagA)
			press <- sum((cv.res)^2)
			rmse.press <- (press/length(cv.res))^0.5
			ss.tot <- sum((y - mean(y))^2)
			q2 <- (ss.tot - press)/ss.tot
			out$q2 <- q2
			out$press <- press
			out$rmse.press <- rmse.press
		}
	}
	else {
		out$press <- NA
		out$rmse.press <- NA
		out$q2 <- NA
	}
	out$lambda.est <- lambda.est
	out$best.model <- out$lambda	#
#zap matrices if no return
#
	if(!return.matrices)
		out$matrices <- NA
	out
}
"tps.df.to.lambda"<-
function(df, D, guess = 1)
{
	if(is.na(df))
		return(NA)
	if(df < sum(D == 0)) {
		warning("df too small to match with a lambda value")
		return(NA)
	}
	if(df > length(D)) {
		warning(" df too large to match a lambda value")
		return(NA)
	}
	l1 <- guess	########## find upper lambda
	for(k in 1:8) {
		tr <- sum(1/(1 + l1 * D))
		if(tr <= df)
			break
		l1 <- l1 * 2
	}
########## find lower lambda
##########
	l2 <- guess
	for(k in 1:8) {
		tr <- sum(1/(1 + l2 * D))
		if(tr >= df)
			break
		l2 <- l2/2
	}
	info <- list(D = D, df = df, N = length(D))
	out <- bisection.search(log(l1), log(l2), tps.fdf, tol = 0.0001, 
		f.extra = info)$x
 + exp(out)
}
"tps.fdf"<-
function(llam, info)
{
	sum(1/(1 + exp(llam) * info$D)) - info$df
}
"tps.fgcv"<-
function(lam, obj)
{
	lD <- obj$matrices$D * lam
	RSS <- obj$pure.ss + sum(((obj$matrices$u * lD)/(1 + lD))^2)
	MSE <- RSS/obj$N	#
	trA <- sum(1/(1 + lD))
	den <- (1 - (obj$cost * (trA - obj$nt) + obj$nt)/obj$N)	#
# If the denominator is negative then flag this as a bogus case
# by making the GCV fucntion "infinity"
#
	ifelse(den > 0, MSE/den^2, 1e+20)
}
"tps.fs2hat"<-
function(lam, obj)
{
	lD <- obj$matrices$D * lam
	RSS <- obj$pure.ss + sum(((obj$matrices$u * lD)/(1 + lD))^2)
	trA <- sum(1/(1 + lD))
	RSS/(obj$N - trA)	#
}
"tps.fshat"<-
function(lam, obj)
{
	lD <- obj$matrices$D * lam
	RSS <- obj$pure.ss + sum(((obj$matrices$u * lD)/(1 + lD))^2)
	trA <- sum(1/(1 + lD))
	RSS/(obj$N - trA)	#
}
"tps.ftrace"<-
function(lam, D)
{
	sum(1/(1 + lam * D))
}
"tpsreg"<-
function(x, y, lambda, m, clean = T)
{
### run the tps program while in S
### data file is tps.dat
	out <- list()
	out$call <- match.call()
	class(out) <- "tpsreg"
	if(missing(lambda)) {
		gcv <- 1
	}
	else gcv <- 101
	ncov <- 0	## swh scale x matrix according to user specified options
	x <- as.matrix(x)
	d <- ncol(x)	#print(d)
### pdh 10/12/94 - set a better default for m
	if(missing(m))
		m <- ceiling((d + 2)/2)
	write(t(cbind(x, y)), "tps.dat", ncol = (d + 1))
	write(c(d, m, ncov, gcv), "tps.in")
	if(!missing(lambda))
		write(lambda, "tps.in", append = T)
	write("tps.dat", "tps.in", append = T)
	cat("Running thinplate spline program in the shell", fill = T)
	#TPSREG <- paste(FUNFITS.BIN, "tpsreg.x", sep = "/")
	#unix(paste(TPSREG, "< tps.in  > tps.sum"))
        #unix(paste(.Library,"/funfits/exec", "/nnreg.x  > ", fout, sep = ""))
	unix(paste(system.file("exec/tpsreg.x")," < tps.in > tps.sum",
                   sep = ""))
	cat(" Output from tps is in the file  tps.sum", fill = T)	
	## nobs,dim,m,ncov1,(iout(k),k=1,4)
	parms <- scan("tps.par")
	dim <- parms[2]
	spar <- parms[9]	#	print(parms)
	cat(" Reading in estimated spline at the data points", fill = T)
	tps.spline <- matrix(scan("tps.spl"), ncol = 2, byrow = T)
	gcvf <- scan("tps.gcv")
	gcvf <- matrix(gcvf, ncol = 3, byrow = T)	# add to the outout  object 
#out$x <- tps.spline[, 1:dim]
## pdh 5/25/94
	out$x <- x
	out$y <- y
	out$yname <- paste(substitute(y))
	out$summary <- scan("tps.sum", what = "a", sep = "\n")
	class(out$summary) <- "text"
	out$residual <- tps.spline[, 2]
	out$fitted.values <- tps.spline[, 1]
	out$gcv.grid <- gcvf
	out$parameters <- parms	
	##         read in the coefficients of the  spline          
	out$coefficients <- scan("tps.ev")
	out$spar <- spar
	out$eff.df <- parms[10]
	if(clean) {
		remove.file("tps.ev")
		remove.file("tps.spl")
		remove.file("tps.sum")
		remove.file("tps.gcv")
		remove.file("tps.in")
		remove.file("tps.dat")
		remove.file("tps.in")
	}
	out
}
"transformx"<-
function(x, scale.type = "unit.sd", x.center, x.scale)
{
	if(scale.type == "unscaled") {
		x.center <- rep(0, ncol(x))
		x.scale <- rep(1, ncol(x))
	}
	else if(scale.type == "unit.sd") {
		x.center <- apply(x, 2, mean)
		x.scale <- sqrt(apply(x, 2, var))
		x <- scale(x)
	}
	else if(scale.type == "range") {
		x.center <- apply(x, 2, min)
		x.scale <- apply(x, 2, max) - apply(x, 2, min)
		x <- scale(x, center = x.center, scale = x.scale)
	}
	else if(scale.type == "user") {
		if(missing(x.center))
			x.center <- apply(x, 2, mean)
		if(missing(x.scale) || length(x.scale) != ncol(x))
			stop("Error: x.scale must be a vector of length d")
		x <- scale(x, center = x.center, scale = x.scale)
	}
	else stop(paste("Error: scale.type must be one of", 
			"unit.sd, range, user, unscaled"))
	attr(x, "x.center") <- x.center
	attr(x, "x.scale") <- x.scale
	attr(x, "x.scale.type") <- scale.type
	x
}
"unique.matrix" <-
function (x) 
{
        x[!dup.matrix(x), ]
}
"unscale"<-
function(x, x.center, x.scale)
{
## unscale a matrix that has been previously scaled
	x <- scale(x, center = F, scale = 1/x.scale)
	x <- scale(x, center =  - x.center, scale = F)
	x
}
"ushell.design"<-
function(n.factors, factor.names.arg = std.factor.names(n.factors), n.shells = 
	1, logx = F, digits = 4)
{
## creates uniform shell designs as described in
## David D. Doehlert (1970). "Uniform Shell Designs," Applied Statistics
##(JRSS, Series C) Vol 19, 231-239
	if(length(logx) == 1) logx <- rep(logx, n.factors)
	if(length(logx) != n.factors) stop(paste("Length of", substitute(logx), 
			"can only be 1 or", substitute(n.factors)))	
	## make up the list of generates from p. 233/239 of the paper
##  gen.list <- list(d2=rbind(c(0,0),c(1,0),c(.5,.86602)))
##  gen.list$d3 <- rbind(cbind(gen.list[[1]],rep(0,3)), 
##		       c(.5,.28868,.81650))
##  gen.list$d4 <- rbind(cbind(gen.list[[2]],rep(0,4)), 
##		       c(.5,.28868,.20413,.79057))
	gen.list <- vector("list", length = 10)
	names(gen.list) <- paste("d", 2:11, sep = "")
	gen.list[[1]] <- rbind(c(0, 0), c(1, 0), c(1/2, sqrt(3/4)))
	for(j in 2:10) {
## j = d-1
		gen.mat <- gen.list[[j - 1]]
		gen.mat <- cbind(gen.mat, rep(0, j + 1))
		new.pt <- gen.mat[j + 1, 1:(j - 1)]
		new.pt <- c(new.pt, 1/sqrt(2 * (j + 1) * j), sqrt((j + 2)/(2 * (
			j + 1))))
		gen.mat <- rbind(gen.mat, new.pt)
		gen.list[[j]] <- gen.mat
	}
	max.factors <- length(gen.list) + 1
	if(n.factors > max.factors)
		stop(paste("\nError: Program will handle only up to", 
			max.factors, "factors right now.\n"))
	gen.mat <- gen.list[[n.factors - 1]]	
	## if there are d factors, there are d+1 rows in the generating
## matrix. When each point is subtracted from every other point, 
## then it creates d new points. So the total number of points
## is (d+1)*d. One point of the design is the origin, and the
## remaining d(d+1) points lie on a sphere of radius 1
	d <- n.factors
	n.gen <- d + 1
	n <- d * (d + 1) + 1
	des.mat <- matrix(NA, nrow = n, ncol = n.factors)
	des.mat[1:n.gen,  ] <- gen.mat
	i <- n.gen + 1
	for(j in 2:n.gen) {
		for(l in 1:n.gen) {
			if(l != j) {
				des.mat[i,  ] <- gen.mat[l,  ] - gen.mat[j,  ]
				i <- i + 1
			}
		}
	}
## if there are multiple shells create them here
	if(n.shells > 1)
		add.shells <- T
	else add.shells <- F
	while(add.shells) {
		n.gen <- nrow(des.mat)
		n <- n.gen + (n.gen - 1)^2
		new.des <- matrix(NA, nrow = n, ncol = d)
		new.des[1:n.gen,  ] <- des.mat
		j <- n.gen + 1	
	## add and subtract the generating points to the existing
## matrix
		for(i in 2:n.gen) {
			for(k in 2:(n.gen)) {
				new.des[j,  ] <- new.des[i,  ] - new.des[k,  ]
				j <- j + 1
			}
		}
		des.mat <- new.des
		des.mat <- des.mat[!dup.matrix(round(des.mat, digits)),  ]	
	## now check to see how many shells there are
		r.vals <- sqrt(apply(des.mat^2, 1, sum))
		r.vals[r.vals <= 10^-6] <- 0
		r.vals <- round(r.vals, digits)
		u.rvals <- sort(unique(r.vals))[-1]
		n.r <- length(u.rvals) - 1	## don't include 0
		if(n.r >= n.shells) add.shells <- F	#      browser()
	}
## now pull off the desired number of shells
	if(n.shells > 1) {
		r.cut <- u.rvals[n.shells]
		des.mat <- des.mat[r.vals <= r.cut,  ]
		r.vals <- u.rvals[1:n.shells]
	}
## now assign the factor names and levels
	if(!is.list(factor.names.arg)) {
		fnames <- as.list(rep(0, n.factors))
		names(fnames) <- factor.names.arg
		for(i in 1:n.factors)
			if(logx[i]) fnames[[i]] <- log(c(0.10000000000000001, 
				  10)) else fnames[[i]] <- c(-1, 1)
	}
	else {
		if(length(factor.names.arg) != n.factors)
			stop(paste(
				"Number of elements in factor.names.arg must equal",
				n.factors))
		if(!all(sapply(factor.names.arg, function(x)
		length(x) == 2)))
			stop(paste("If a list, each component of", 
				"factor.names.arg must have length 2"))
		fnames <- factor.names.arg
		for(i in 1:n.factors)
			if(logx[i]) fnames[[i]] <- log(fnames[[i]])
	}
	des.mat <- des.mat/max(abs(des.mat))
	des <- data.frame(des.mat)
	des <- eval(parse(text = paste("des[order(", paste("des[,", 1:n.factors,
		"]", sep = "", collapse = ","), "),]", sep = "")))
	for(i in 1:n.factors) {
		x <- fnames[[i]]
		y <- des[, i]
		a <- x[1]
		b <- x[2]
		center.x <- 0.5 * (a + b)
		scale.x <- (b - a)/2
		y <- y * scale.x + center.x
		attributes(y) <- list(log = logx[i], scale = scale.x, center = 
			center.x, class = "rsm.factor")
		des[, i] <- y
	}
	dimnames(des) <- list(1:nrow(des), names(fnames))
	class(des) <- c("rsm.design", "design", "data.frame")
	attr(des, "n.shells") <- n.shells
	if(n.shells > 1)
		attr(des, "r.vals") <- r.vals
	des
}
"write.data"<-
function(x, file)
{
	length.old <- options()$length
	options(length = 10000)
	if(missing(file)) {
		file <- paste(as.character(substitute(x)), ".output", sep = "")
		cat(" The data set is being written to the UNIX file: ", file, 
			fill = T)
	}
	sink(file)
	print(x, quote = F)
	sink()
	options(length = length.old)
	invisible()
}
"wt.huber"<-
function(u, c = 1.345)
{
	U <- abs(u)
	Ugtc <- (U > c)
	w <- u
	w[!Ugtc] <- 1
	w[Ugtc] <- c/U[Ugtc]
	w
}
"xline"<-
function(x, ...)
{
	abline(v = x, ...)
}
"yline"<-
function(y, ...)
{
	abline(h = y, ...)
}
require(akima)

.First.lib <- function(lib, pkg) {
  library.dynam("funfits", pkg, lib)
}

if(version$minor < "62")
  library.dynam("funfits")

