Cv.warping<-function(x,delta,kernel,Mstart=1,Mend)
{
  x<-x[!is.na(x)]
# selection of missing values
  if (missing(Mend)) Mend<-floor(0.33*diff(range(x))/delta)
  Mnumber<-Mend-Mstart+1
# Maximal value of smoothing parameter h about a third part
# of the range of x
  numdata<-length(x)
  start<-min(x)-delta*(Mend+0.1)
  origin<-(floor(start/delta)-0.5)*delta
  numbin<-ceiling((max(x)+delta*(Mend+0.1)-origin)/delta)+1
  max.notempty<-min(numdata+1,numbin)
# Creation of several vectors needed in the C-routine
# Call of the C-routine
  z<-.C("cvwarping",
    as.double(x),
    double(numbin),
    as.double(delta),
    as.integer(c(Mstart,Mend)),
  result=double(Mnumber),
    double(Mend),
    as.double(origin),
    integer(numbin),
    integer(max.notempty),
    integer(max.notempty),
    as.integer(numdata),
    as.integer(numbin),
    as.integer(kernel))$result
# Creation of a matrixm which contains in the first column
# the vector of the used M, the second the corresponding 
# CV-values.  Note that the corresponding bandwidths h are
# computed by multiplying the first column with delta.
#
  cv<-matrix(c(c(Mstart:Mend),z),Mnumber,2)
# This matrix will be returned to S
  cv
}
  
Cvb.warping<-function(x,delta,kernel,Mstart=1,Mend)
{
  x<-x[!is.na(x)]
# selection of missing values
  if (missing(Mend)) Mend<-floor(0.33*diff(range(x))/delta)
  Mnumber<-Mend-Mstart+1
# Maximal value of smoothing parameter h about a third part
# of the range of x
  numdata<-length(x)
  start<-min(x)-delta*(Mend+0.1)
  origin<-(floor(start/delta)-0.5)*delta
  numbin<-ceiling((max(x)+delta*(Mend+0.1)-origin)/delta)+1
  max.notempty<-min(numdata+1,numbin)
# Creation of several vectors needed in the C-routine
# Call of the C-routine
  z<-.C("cvbwarping",
    as.double(x),
    double(numbin),
    as.double(delta),
    as.integer(c(Mstart,Mend)),
  result=double(Mnumber),
    double(Mend),
    as.double(origin),
    integer(numbin),
    integer(max.notempty),
    integer(max.notempty),
    as.integer(numdata),
    as.integer(numbin),
    as.integer(kernel))$result
# Creation of a matrixm which contains in the first column
# the vector of the used M, the second the corresponding 
# CV-values.  Note that the corresponding bandwidths h are
# computed by multiplying the first column with delta.
#
  cv<-matrix(c(c(Mstart:Mend),z),Mnumber,2)
# This matrix will be returned to S
  cv
}
  
G.Warpingreg<-function(x,y,delta,selector=2,kernel=4,Mstart=5,Mend,boundary=0.1)
# S-function calling the C-routine for computation of
# the adjusted prediction error G(M) for WARPing regression
#
# Expected parameters :
# Predictor x (onedimensional) and response y.
# Binwidth delta
# Optional parameters :
# Selector coded 1 to 5
# 1 : SHIBATA'S MODEL SELECTOR
# 2 : GENERALIZED CROSS-VALIDATION (default)
# 3 : AKAIKE'S INFORMATION CRITERION
# 4 : FINITE PREDICTION ERROR
# 5 : RICE'S T
# Kernel ( default 4 Quartic)
# Start and end point for the smoothing parameter M,
# default is 5 (not 1 to reduce binning effects).
# and 0.3*range(data)/delta
# Boundary : roughly the proportion of observations
# at the boundary ignored in the computation.
{
  if(missing(Mend)) Mend<-floor(0.3*diff(range(x))/delta)
  Mnumber<-Mend-Mstart+1
  numdata<-length(x)
  start<-min(x)-delta*(Mend+0.1)
  origin<-(floor(start/delta)-0.5)*delta
  numbin<-ceiling((max(x)+delta*(Mend+0.1)-origin)/delta)+1
# definition of weight function as an indexfunction, which is
# zero for bins at (and outside) the boundary of x
  binboundary<-floor((quantile(x,c(boundary/2,1-boundary/2))-origin)/delta)
  max.notempty<-min(numdata+1,numbin)
# Creation of several vectors needed in the C-routine
# Call of the C-routine
  z<-.C("Gwarpingreg",
    as.double(x),
    as.double(y),
    as.double(delta),
    as.integer(c(Mstart,Mend)),
    result=double(Mnumber),
    double(Mend),
    as.double(origin),
    integer(numbin),
    integer(max.notempty),
    double(max.notempty),
    double(max.notempty),
    integer(max.notempty),
    as.integer(numdata),
    as.integer(numbin),
    as.integer(kernel),
    as.integer(selector),
    as.integer(binboundary),
    integer(max.notempty))$result
  z<-z/numdata
  result<-list(M=c(Mstart:Mend),score=z,h=c(Mstart:Mend)*delta)
  result
}
"GS.Warping" <-
function (x, y, h, locations, samplenumber, M = 10, M.large = 20, 
        kernel = 4) 
# GOLDEN SECTION BOOTSTRAP FOR WARPING REGRESSION
# Expected parameters :
# x predictor, y response, h bandwidth,
# locations, samplenumber, optional : M, kernel
# M.large corresponding to oversmoothing bandwidth
{
        missingflag <- (is.na(x) | is.na(y))
        x <- x[!missingflag]
        y <- y[!missingflag]
        n <- length(x)
        delta <- h/M
        start <- min(x) - delta * (M.large + 0.1)
        origin <- (floor(start/delta) - 0.5) * delta
        numbin <- ceiling((max(x) + delta * (M.large + 0.1) - 
                origin)/delta) + 1
        max.notempty <- min(n + 1, numbin)
        # specification of binmesh like for usual WARPing.
        # range of binmesh determined by M.large
        # This object code contains all C-functions
        #
        # Call of the first C-routine for BINNING THE DATA
        # STEP 1 of the algorithm above
        binned.data <- .C("Hbinning", as.double(x), as.double(y), 
                as.double(start), as.double(delta), integer(numbin), 
                integer(1), integer(max.notempty), double(max.notempty), 
                integer(max.notempty), integer(n), as.integer(n))
        bin <- binned.data[[5]]
        nl <- binned.data[[6]]
        counts <- binned.data[[7]][1:(nl + 1)]
        ysum <- binned.data[[8]][1:(nl + 1)]
        index <- binned.data[[9]][1:(nl + 1)]
        indexobs <- binned.data[[10]]
        # Results of this step : bin, nl, counts, ysum
        # index and indexobs.
        #
        # Creation of wights first time corresponding to M.
        # First part of STEP 2
        kwe <- .C("createweights", result = double(M), as.integer(M), 
                as.integer(kernel))$result
        # Result of this step : kwe corresponding to M
        #
        # Weighting the bins corresponding to M and original data
        # Second part of STEP 2
        estimate <- .C("weightingbins", double(numbin), double(numbin), 
                as.integer(nl), as.integer(counts), as.double(ysum), 
                as.integer(index), as.double(kwe), as.integer(M))
        rM <- estimate[[1]]
        fM <- estimate[[2]]
        mM <- rM/fM
        missingflag <- is.na(mM)
        mM[missingflag] <- 0
        # Missings do not matter
        # Result of this step : mM, fM corresponding to M
        # and original data
        #
        # Computation of residuals, STEP 3
        residuals <- .C("residuals", result = double(n), as.double(y), 
                as.double(mM), as.integer(indexobs), as.integer(n))$result
        # Result of this step : residuals
        #
        # Creating weights corresponding to M.large.
        # First part of STEP 4.
        kwe.M.large <- .C("createweights", result = double(M.large), 
                as.integer(M.large), as.integer(kernel))$result
        # Result of this step : kwe.M.large
        #
        # Weighting bins corresponding to M.large
        # Second part of STEP 4.
        estimate.M.large <- .C("weightingbins", double(numbin), 
                double(numbin), as.integer(nl), as.integer(counts), 
                as.double(ysum), as.integer(index), as.double(kwe.M.large), 
                as.integer(M.large))
        mM.large <- estimate.M.large[[1]]/estimate.M.large[[2]]
        missingflag <- is.na(mM.large)
        mM.large[missingflag] <- 0
        # Result of this step : mM.large
        #
        # Identifying the locations, where the bootstrap
        # observations shall be generated
        locindex <- floor((locations - origin)/delta)
        locindex <- locindex[locindex > 0]
        locindex <- locindex[locindex < numbin]
        locationnumber <- length(locindex)
        #
        # Generation of bootstrap residuals using self-defined
        # S-function twopoint.  Generate blocks of samples such that
        # each block contains app. 10000 observations.
        # First part of STEP 5.
        sampleblock <- floor(10000/n)
        blocknumber <- floor(samplenumber/sampleblock)
        mMstar <- matrix(0, samplenumber, locationnumber)
        j <- 0
        while (j < blocknumber) {
                j <- j + 1
                twopoint <- twopoint.generator(sampleblock * 
                        n)
                resstar <- rep(residuals, sampleblock) * twopoint
                # Result of this step : resstar
                #
                # Compute sum over bootstrap observations in non-empty bins
                # Second part of STEP 5.
                ysumstar <- .C("bootstrapsums", as.double(resstar), 
                        as.double(mM.large), as.integer(bin), 
                        as.integer(indexobs), result = double(nl * 
                                sampleblock + 1), as.integer(n), 
                        as.integer(sampleblock), as.integer(nl))$result
                # Result of this step : ysumstar
                #
                # Weighting bins for bootstrap samples
                # Third part of STEP 5.
                z <- .C("bootstrapestimate", result = double(sampleblock * 
                        locationnumber), double(sampleblock * 
                        locationnumber), as.double(fM), as.integer(nl), 
                        as.double(ysumstar), as.double(kwe), 
                        as.integer(M), as.integer(locindex), 
                        as.integer(locationnumber), as.integer(bin), 
                        as.integer(sampleblock))$result
                mMstar[(1 + (j - 1) * sampleblock):(j * sampleblock), 
                        ] <- z
        }
        # LAST BLOCK
        # Like other blocks, but a different number of samples
        lastblock <- samplenumber - blocknumber * sampleblock
        twopoint <- twopoint.generator(lastblock * n)
        resstar <- rep(residuals, lastblock) * twopoint
        ysumstar <- .C("bootstrapsums", as.double(resstar), as.double(mM.large), 
                as.integer(bin), as.integer(indexobs), result = double(nl * 
                        lastblock + 1), as.integer(n), as.integer(lastblock), 
                as.integer(nl))$result
        z <- .C("bootstrapestimate", result = double(lastblock * 
                locationnumber), double(lastblock * locationnumber), 
                as.double(fM), as.integer(nl), as.double(ysumstar), 
                as.double(kwe), as.integer(M), as.integer(locindex), 
                as.integer(locationnumber), as.integer(bin), 
                as.integer(lastblock))$result
        mMstar[(1 + blocknumber * sampleblock):samplenumber, 
                ] <- z
        #
        # All samples of Regression Curves generated.
        result <- list(locations = (locindex + 0.5) * delta + 
                origin, mM = mM[locindex], mM.large = mM.large[locindex], 
                mMstar = mMstar)
        result
}

twopoint.generator <- function(ndata)
{
        a <- (1 - sqrt(5))/2
        b <- (1 + sqrt(5))/2
        gamma <- (5 + sqrt(5))/10
        zero.one <- (runif(ndata) <= gamma)
        result <- a * zero.one + b * (1 - zero.one)
        result
}
histogram<-function(data,h,x0,plot.type=T,...)
{
  data<-data[!is.na(data)]
  n<-length(data)
  first<-floor((min(data)-x0)/h)
  last<-ceiling((max(data)-x0)/h)
  binmesh<-x0+h*c(first:last)
  counts<-hist(data,breaks=binmesh,plot=FALSE)$counts
#  counts <- .C("bincount", as.double(data), length(data), 
#	       as.double(binmesh),length(binmesh),
#	       counts = integer(length(binmesh) - 1), TRUE)[[5]]
#FALSE replaces "n", second counts has the s missing in book 
  est<-counts /(n*h)
  if(plot.type==TRUE)
  {
    hist(data,breaks=binmesh,yaxt="n",cex=0.8,xlab="",bar.stlye="old",freq=F,...)
    maxheight <-max(counts)
    print(maxheight)
    ymarks<- c(0:4) / 4 * maxheight
    ymarkslabels <-round(ymarks/(n*h),digits=3)
    box()
    axis(2,at=ymarks,labels=ymarkslabels,srt=90,cex=0.8)
  }
  else
  {
    midpoints<-x0+h*c((first-0.5):(last+0.5))
    est<-c(0,est,0)
    result <-list(midpoints,est)
    result
  }
}


frequency.polygon <- function(data,h,x0)
{
  est <- histogram(data,h,x0,FALSE)
  plot(est[[1]], est[[2]], type="l", xlab="", ylab="", cex=0.8)
}

histogram.normal.ref <- function(data,x0,plot.type=T)
{
  data <- data[!is.na(data)]
  n <- length(data)
  s <- sqrt(var(data))
  h <- 3.491*s*n^(-1/3)
  histogram(data,h,x0,plot.type)
}
"k.nn" <-
function (x, y, k) 
{
        missingflag <- (is.na(x) | is.na(y))
        x <- x[!missingflag]
        y <- y[!missingflag]
        # selection of missings
        n <- length(x)
        order.x <- order(x)
        y <- y[order.x]
        x <- x[order.x]
        # sorting x and y
        start <- 1 + floor(k/2)
        mk <- rep(NA, n)
        # fill the vector of estimates with missings
        mk[start] <- sum(y[1:k])/k
        # at x[start] the first k observations are the
        # k nearest neighbours.
        difference <- diff(y, k)/k
        # computes differences of y to the lag k
        mk[(start + 1):(n - start)] <- cumsum(c(mk[start], difference))
        # compute cumulative sums over differences
        result <- list(m = mk, x = x, y = y)
        result
}
kde <- function(data, kernel, h, points=100)
{
  data <-data[!is.na(data)]
  n <-length(data)
  result <- .C("kde",
    as.double(data),
    as.integer(n),
    x=double(points),
    y=double(points),
    as.integer(kernel),
    as.double(h),
    as.integer(points), 
# comma omitted in book
    as.double(range(data)))
# final ) omitted in book 
  final.result <- matrix(c(result$x, result$y), points, 2)
  final.result
}
kde.old <- function(data, kernel, h, points)
{
# kernel density estimation use C-code
# which employs selected S-function
# select function
  switch(kernel,
    fun<-uniform,
    fun<-triangle,
    fun<-epanechnikov,
    fun<-quartic,
    fun<-triweight,
    fun<-kgaussian,
    fun<-cosinus)
  data <-data[!is.na(data)]
  n<-length(data)
# Use list() to send a function to C
  result <- .C("kernelestimation",
    list(fun),
    as.double(data),
    as.integer(n),
    double(points),
    double(points),
    as.integer(kernel),
    as.double(h),
    as.integer(points), 
# comma omitted in book
    as.double(range(data)))
# final ) omitted in book 
  final.result <- matrix(c(result[[4]], result[[5]]), points, 2)
  final.result
}

uniform<-function(x)
{
  result <- .5 * (abs(x) <= 1)
  result
}
triangle<-function(x)
{
  result <- (1 - abs(u)) *(abs(x) <= 1)
  result
}
epanechnikov<-function(x)
{
  result <- .75 * (1 - x^2) * (abs(x) <= 1)
  result
}
quartic<-function(x)
{
  result <- .9375 * (1-x^2)^2 *(abs(x) <= 1)
  result
}
triweight<-function(x)
{
  result <- 1.09375 * (1-x^2)^3 *(abs(x) <= 1)
  result
}
kgaussian<-function(x)
{
  result <- 0.3989 * exp(-0.5*x*x)
  result
}
cosinus<-function(x)
{
  result <- 0.25*pi * cos(pi*0.5*x) * (abs(x) <= 1)
  result
}
kds.ci<-function(data,h,g,x,alpha)
{
  data<-data[!is.na(data)]
  n<-length(data)
  x<-x[!is.na(x)]
  nx<-length(x)
  n<-length(data)
  result<- .C("kerneldensityci",
    as.double(data),
    as.integer(n),
    as.double(h),
    as.double(g),
    double(nx),
    double(nx),
    as.double(x),
    as.integer(nx))
  alpha.simult<-(1-alpha/2)^(1/nx)
# change of confidence level, when we compute a set of intervals
  u.a.s<-qnorm(alpha.simult)
# corresponding quantile of standard normal distribution
  c.h<-h*n^.2
  bias.est<-c.h^2*result[[6]]/2
  var.est<-result[[5]]/(2*sqrt(pi)*c.h)
  ci.lower<-result[[5]]-n^(-0.4)*(bias.est+u.a.s*sqrt(var.est))
  ci.upper<-result[[5]]-n^(-0.4)*(bias.est-u.a.s*sqrt(var.est))
  final.result<-matrix(c(x,result[[5]],result[[6]],
    ci.lower, ci.upper),nx,5)
  final.result
}
# S-function calling the C-routine for the regression smoothing
# by WARPing. Expected parameters : predictor x, response y
# bandwidth h, number of bins M (optional), the chosen kernel, 
# coded by 1 up to 5 (optional): 1 = Uniform, 2 = Triangle, 
# 3 = Epanechnikov, 4 = Quartic, 5 = Triweight
# and the na.handling (optional)
NW.Warping <-function(x,y,h,M=10,kernel=4,na.handling=0)
{
  missingflag<-(is.na(x) | is.na(y))
  x <-x[!missingflag]
  y <-y[!missingflag]
  n <-length(x)
  delta <-h/M
  start<-min(x)-h-delta*0.1
  origin<-(floor(start/delta)-0.5)*delta
  numbin<-ceiling((max(x)+h+delta*0.1-origin)/delta)+1
  max.notempty<-min(n+1,numbin)
  z<-.C("warpingregression",
    as.double(x),
    as.double(y),
    as.double(h),
    as.integer(M),
    double(numbin),
    double(numbin),
    result=double(numbin),
    double(M),
    as.double(origin),
    integer(numbin),
    integer(max.notempty),
    double(max.notempty),
    integer(max.notempty),
    as.integer(n),
    as.integer(numbin),
    as.integer(kernel),
    as.integer(na.handling))$result
  indexstart<-floor((min(x)-origin)/delta)
  indexend<-floor((max(x)-origin)/delta)
  result<-list(
    midpoints=origin+(0.5+c(indexstart:indexend))*delta,
    m=z[(indexstart+1):(indexend+1)],
    x=x,
    y=y)
  result
} 


"NW.kernel" <-
function (x, y, h, kernel = 4, points = 100, na.handling = 0) 
{
        help <- (is.na(x) | is.na(y))
        x <- x[!help]
        y <- y[!help]
        n <- length(x)
        result <- .C("kernelregression", as.double(x), as.double(y), 
                as.double(h), double(points), double(points), 
                double(points), double(points), as.integer(n), 
                as.integer(points), as.integer(kernel), as.double(range(x)), 
                as.integer(na.handling))
        indexrange <- ((result[[4]] - min(x)) * (max(x) - result[[4]]) >= 
                0)
        result.list <- list(grid = result[[4]][indexrange], m = result[[7]][indexrange], 
                x = x, y = y)
        result.list
}
ortho.series0.1<-function(data,N)
{
  data<-data[!is.na(data)]
  coef<-rep(0,N)
  grid<-c(0:100)/100
  ort.sequ<-rep(0,101)
  estimate<-rep(1,101)
  for(i in 1:N)
  {
    if(floor(i/2)==i/2)
    {
      coef[i]<-sqrt(2)*mean(sin(pi*i*data))
      ort.sequ<-sqrt(2)*sin(pi*i*grid)
    }
    else
    {
      coef[i]<-sqrt(2)*mean(cos(pi*(i+1)*data))
      ort.sequ<-sqrt(2)*cos(pi*(i+1)*grid)
    }
    estimate<-estimate+coef[i]*ort.sequ
  }
  result<-matrix(c(grid,estimate),101,2)
  result
}
Simul.conf.interval<-function(x,y,h,alpha=0.2,N=200)
{
  locationnumber<-floor(diff(range(x))/h*11/3)-1
  locations<-min(x)+c(1:locationnumber)*3*h/11
  limits<-matrix(0,locationnumber,2)
  Bootstrap<-GS.Warping(x,y,h,locations,samplenumber=N,M=11,M.large=22)
  G<-ceiling(locationnumber/7)
  alpha.G<-alpha/G
  for(j in 1:(G-1))
  {
    group<-Bootstrap$mMstar[,(1+(j-1)*7):(j*7)]
    limits[(1+(j-1)*7):(j*7),]<-Within.group(group,alpha.G)
  }
  lastgroup<-locationnumber-(G-1)*7
  group<-matrix(Bootstrap$mMstar[,(1+(G-1)*7):locationnumber],N,lastgroup)
  limits[(1+(G-1)*7):locationnumber,]<-Within.group(group,alpha.G)
  conf.limits<-limits-Bootstrap$mM.large+Bootstrap$mM
  list(locations=Bootstrap$locations,
    lower.limit=conf.limits[,1],
    upper.limit=conf.limits[,2],
    m=Bootstrap$mM)
}

Within.group<-function(data,beta)
{
  N<-dim(data)[1]
  L<-dim(data)[2]
  order.data<-matrix(0,N,L)
# Compute the ranks of the observations for each location
  for (k in 1:L)
  {
    order.data[,k]<-order(data[,k])
  }
  niveau<-0
  i<-1
  lower.limit<-rep(0,L)
  lower.limit.lower<-rep(0,L)
  lower.limit.upper<-rep(0,L)
  upper.limit<-rep(0,L) 
  upper.limit.lower<-rep(0,L)
  upper.limit.upper<-rep(0,L)
  diff.size<-rep(0,N)
# The N elememts of diff.size correspond to N samples and are
# initially set to 0.  When a sample contains observations outside
# the confidence limits, the corresponding element of diff.size
# is set to 1 using the vector of ranks, which contains exactly
# the information, which sample contains the i-th largest and
# smallest observation.
  while(niveau<beta)
  {
    i<-i+1
# Suppose conf.limits as the i-th and (N-i+1)-th order statistic.
    diff.size[order.data[i-1,]]<-1
    diff.size[order.data[N-i+2,]]<-1
# Then the (i-1)-th and (N-i+2) order statistics lie outside
# these confidence limits.
    alpha.upper<-niveau
    niveau<-sum(diff.size)/N
# Save the old niveau as alpha.upper and compute new niveau.
# If niveau too small take next order statistics.
  }
  for(k in 1 : L)
  {
    lower.limit.upper[k]<-data[order.data[i-1,k],k]
    upper.limit.upper[k]<-data[order.data[N-i+2,k],k]
# Identify confidence limits for the upper niveau with the
# (i-1)-th and (N-i+2)-th order statistics.
  lower.limit.lower[k]<-data[order.data[i,k],k]
  upper.limit.lower[k]<-data[order.data[N-i+1,k],k]
# Identify confidence limits for the lower niveau with the
# i-th and (N-i+1)-th order statistics.
  }
  alpha.lower<-niveau
  upper.weight<-(alpha.lower-beta)/(alpha.lower-alpha.upper)
  lower.weight<-1-upper.weight
# Compute the weights of the confidence limits.
  for(k in 1 : L)
  {
    upper.limit[k]<-lower.weight*upper.limit.lower[k] +
    	upper.weight*upper.limit.upper[k]
    lower.limit[k]<-lower.weight*lower.limit.lower[k] +
   	 upper.weight*lower.limit.upper[k]
# Weight the confidence limits.
  }
  interval<-matrix(c(lower.limit,upper.limit),L,2)
  interval
}

Simul.conf.plot<-function(x,y,h,alpha=0.2,N=200)
{
  z<- Simul.conf.interval(x,y,h,alpha,N=N)
  yli<-c(min(z$lower.limit),max(z$upper.limit))
  plot(z$locations,z$m,type="l",cex=0.6,xlab="",ylab="",ylim=yli)
  lines(z$locations,z$lower.limit,lty=2)
  lines(z$locations,z$upper.limit,lty=2)
}
# WARPing density estimation in S
# expected parameter : data x, bandwidth h, number of
# small bins M and the chosen kernel, coded by 1 up to 5.
# 1 = Uniform, 2 = Triangle (ASH), 3 = Epanechnikov,
# 4 = Quartic, 5 = Triweight, 6 = Gaussian
#
warping <-function(x,h,M,kernel)
{
	x <-x[!is.na(x)]
	n <-length(x)
	if(kernel == 6) {h <- h*4; M <- 4*M}
	delta <-h/M
# delta is the binwidth of the small bins
	numbin<-floor((max(x)-min(x))/delta)+2*(M+1+ceiling(M/10))
	start<-min(x)-h-delta*0.1
	origin<-(floor(start/delta)-0.5)*delta
	max.notempty<-min(n+1,numbin)
	z<-.C("warp",
		as.double(x),
		result=  double(numbin),
		as.double(h),
		double(M),
		as.double(origin),
		integer(numbin),
		integer(max.notempty),
		integer(max.notempty),
		as.integer(M),
		as.integer(n),
		as.integer(kernel))$result
	warp<-matrix(0,numbin,2)
	warp[,1]<-(0.5+c(0:(numbin-1)))*delta+origin
	warp[,2]<-z
	warp
} 

# WARPing density estimation in S
# expected parameter : data x, bandwidth h, number of
# small bins M and the chosen kernel, coded by 1 up to 5.
# 1 = Uniform, 2 = Triangle (ASH), 3 = Epanechnikov,
# 4 = Quartic, 5 = Triweight
#
warping.old  <-function(x,h,M,kernel)
{
x <-x[!is.na(x)]
# selection of missing values
n <-length(x)
delta <-h/M
# delta is the binwidth of the small bins
numbin<-floor((max(x)-min(x))/delta)+2*(M+1+ceiling(M/10))
start<-min(x)-h-delta*0.1
origin<-(floor(start/delta)-0.5)*delta
# origin is left border of first bin
# This form has been chosen to make sure that the
# interval, where the density will be estimated, begins
# nearly atmin(x)-h and ends nearly at max(x)+h.
#
max.notempty<-min(n+1,numbin)
# The number of non-empty bins is bounded by the number of
# observations and the number of the considered small bins
# Initial the C-routine
# Call of the C-routine
z<-.C("warping",
as.double(x),
result=  double(numbin),
as.double(h),
double(M),
as.double(origin),
integer(numbin),
integer(max.notempty),
integer(max.notempty),
as.integer(M),
as.integer(n),
as.integer(kernel))$result
# C-routine returns vector z with estimates
#
# Creation of a matrix, which contains in the first column
# a vector with the bin-midpoints and in the second column
# the belonging estimates.
#
warp<-matrix(0,numbin,2)
warp[,1]<-(0.5+c(0:(numbin-1)))*delta+origin
warp[,2]<-z
# This matrix will be returned to S.
warp
} 


"warping.step" <-
function (x, bandwidth, M, kernel, axes = T, ...) 
{
        est <- warping(x, bandwidth, M, kernel)
        step <- est[2, 1] - est[1, 1]
        breaks <- est[, 1] - step/2
        #  barplot(height=est[,2],width=est[,1]-step/2,histo=T,inside=FALSE,cex=0.6,
        #  style.bar="old", ...)
        plot.new()
        plot.window(xlim = range(x), ylim = c(0, max(est[, 2])), 
                "")
        rect(breaks[-length(breaks)], 0, breaks[-1], est[, 2])
        #  n <- dim(est)[1]
        #  index <- round(n/10*c(1,3,5,7,9), digits=0)
        box()
        #  axis(side=1,at=index,labels=signif(est[index,1],digits=2),cex=0.6)
        if (axes) {
                axis(1, ...)
                axis(2, ...)
        }
}
library.dynam("haerdle.so")

