# file MASS/area.q
# copyright (C) 1994-8 W. N. Venables and B. D. Ripley
#
"area"<-
function(f, a, b, ..., fa = f(a, ...), fb = f(b, ...), limit
	 = 10, eps = 1e-5)
{
	h <- b - a
	d <- (a + b)/2
	fd <- f(d, ...)
	a1 <- ((fa + fb) * h)/2
	a2 <- ((fa + 4 * fd + fb) * h)/6
	if(abs(a1 - a2) < eps)
		return(a2)
	if(limit == 0) {
		warning(paste("iteration limit reached near x = ",
			d))
		return(a2)
	}
	Recall(f, a, d, ..., fa = fa, fb = fd, limit = limit - 1,
		eps = eps) + Recall(f, d, b, ..., fa = fd, fb = 
		fb, limit = limit - 1, eps = eps)
}
"fbeta"<-
function(x, alpha, beta)
{
	x^(alpha - 1) * (1 - x)^(beta - 1)
}
"print.abbrev"<-
function(obj, ...)
{
	if(is.list(obj))
		obj <- unlist(obj)
	NextMethod("print")
}
# file MASS/boxcox.q
# copyright (C) 1994-8 W. N. Venables and B. D. Ripley
#
"boxcox"<- function(object, ...)
UseMethod("boxcox")

"boxcox.formula"<-
function(object, lambda = seq(-2, 2, 1/10), plotit = length(
	dev.list()) > 0, interp = (plotit && (m < 100)), eps = 1/
	50, xlab = "lambda", ylab = "log-Likelihood", ...)
{
	object <- lm(object, y = T, qr = T, ...)
	result <- NextMethod()
	if(plotit) invisible(result)
	else result
}

"boxcox.lm"<-
function(object, lambda = seq(-2, 2, 1/10), plotit = length(
	dev.list()) > 0, interp = (plotit && (m < 100)), eps = 1/
	50, xlab = "lambda", ylab = "log-Likelihood", ...)
{
	if(is.null(object$y) || is.null(object$qr))
		object <- update(object, y = T, qr = T, ...)
	result <- NextMethod()
	if(plotit) invisible(result)
	else result
}

"boxcox.default"<-
function(object, lambda = seq(-2, 2, 1/10), plotit = length(
	dev.list()) > 0, interp = (plotit && (m < 100)), eps = 1/
	50, xlab = "lambda", ylab = "log-Likelihood", ...)
{
	if(is.null(object$y) || is.null(object$qr))
		stop(paste(deparse(substitute(object)), 
			"does not have both `qr' and `y' components"
			))
	y <- object$y
	n <- length(y)
	if(any(y <= 0))
		stop("Response variable must be positive")
	xqr <- object$qr
	logy <- log(y)
	ydot <- exp(mean(logy))
	xl <- loglik <- as.vector(lambda)
	m <- length(xl)
	for(i in 1:m) {
		if(abs(la <- xl[i]) > eps)
			yt <- (y^la - 1)/la
		else yt <- logy * (1 + (la * logy)/2 * (1 + (la * 
				logy)/3 * (1 + (la * logy)/4)))
		loglik[i] <-  - n/2 * log(sum(qr.resid(xqr, yt/
			ydot^(la - 1))^2))
	}
	if(interp) {
		sp <- spline(xl, loglik, n = 100)
		xl <- sp$x
		loglik <- sp$y
		m <- length(xl)
	}
	if(plotit) {
		mx <- (1:m)[loglik == max(loglik)][1]
		Lmax <- loglik[mx]
		lim <- Lmax - qchisq(19/20, 1)/2
		plot(xl, loglik, xlab = xlab, ylab = ylab, type
			 = "l", ylim = range(loglik, lim))
		plims <- par("usr")
		abline(h = lim, lty = 3)
		y0 <- plims[3]
		scal <- (1/10 * (plims[4] - y0))/par("pin")[2]
		scx <- (1/10 * (plims[2] - plims[1]))/par("pin")[
			1]
		text(xl[1] + scx, lim + scal, " 95%")
		la <- xl[mx]
		if(mx > 1 && mx < m)
			segments(la, y0, la, Lmax, lty = 3)
		ind <- range((1:m)[loglik > lim])
		if(loglik[1] < lim) {
			i <- ind[1]
			x <- xl[i - 1] + ((lim - loglik[i - 1]) * (
				xl[i] - xl[i - 1]))/(loglik[i] - 
				loglik[i - 1])
			segments(x, y0, x, lim, lty = 3)
		}
		if(loglik[m] < lim) {
			i <- ind[2] + 1
			x <- xl[i - 1] + ((lim - loglik[i - 1]) * (
				xl[i] - xl[i - 1]))/(loglik[i] - 
				loglik[i - 1])
			segments(x, y0, x, lim, lty = 3)
		}
	}
	list(x = xl, y = loglik)
}

# file MASS/contr.sdif.q
# copyright (C) 1994-8 W. N. Venables and B. D. Ripley
#
contr.sdif <- function(n, contrasts = T) {
# contrasts generator giving `successive difference' contrasts.
  if(is.numeric(n) && length(n) == 1) {
    if(n %% 1 || n < 2)
      stop("invalid degree")
    lab <- as.character(seq(n))
  } else {
      lab <- as.character(n)
      n <- length(n)
      if(n < 2)
	stop("invalid number of levels")
    }
  if(contrasts) {
    contr <- col(matrix(nrow = n, ncol = n - 1))
    upper.tri <- !lower.tri(contr)
    contr[upper.tri] <- contr[upper.tri] - n
    structure(contr/n, dimnames = list(lab, paste(lab[-1], lab[-n], sep="-")))
  } else structure(diag(n), dimnames = list(lab, lab))
}
# file MASS/corresp.q
# copyright (C) 1994-8 W. N. Venables and B. D. Ripley
#
"corresp" <- function(x, ...)
{
  if(is.null(class(x))) class(x) <- data.class(x)
  UseMethod("corresp")
}

"corresp.crosstabs" <- function(x, ...)
{
  if((m <- length(dim(x))) > 2)
    stop(paste("Frequency table is", m, "dimensional"))
  corresp.matrix(structure(x, marginals = NULL, call = NULL, class = NULL))
}

"corresp.data.frame" <- function(x, ...)
corresp.matrix(as.matrix(x), ...)

"corresp.default" <- function(x, ...)
stop("invalid table specification")

"corresp.factor" <- function(x, y, ...)
corresp.matrix(table(x, y), ...)

"corresp.formula" <- function(formula, data = sys.parent(), ...)
{
  rhs <- formula[[length(formula)]]
  if(length(rhs[[2]]) > 1 || length(rhs[[3]]) > 1)
    stop("higher way table requested.  Only 2 way allowed")
  tab <- table(eval(rhs[[2]], data), eval(rhs[[3]], data))
  names(dimnames(tab)) <- as.character(c(rhs[[2]], rhs[[3]]))
  corresp.matrix(tab, ...)
}

"corresp.matrix" <- function(x, nf = 1, ...)
{
  if(any(x < 0 | x %% 1 > 10 * sqrt(.Machine$double.eps)))
    warning("negative or non-integer entries in table")
  if((N <- sum(x)) == 0) stop("all frequencies are zero")
  Dr <- drop(x %*% (rep(1/N, ncol(x))))
  Dc <- drop((rep(1/N, nrow(x))) %*% x)
  if(any(Dr == 0) || any(Dc == 0)) stop("empty row or column in table")
  x1 <- x/N - outer(Dr, Dc)
  Dr <- 1/sqrt(Dr)
  Dc <- 1/sqrt(Dc)
  if(is.null(dimnames(x)))
    dimnames(x) <- list(Row = paste("R", 1:nrow(x)), 
			Col = paste("C", 1:ncol(x)))
  if(is.null(names(dimnames(x))))
    names(dimnames(x)) <- c("Row", "Column")
  X.svd <- svd(t(t(x1 * Dr) * Dc))
  dimnames(X.svd$u) <- list(dimnames(x)[[1]], NULL)
  dimnames(X.svd$v) <- list(dimnames(x)[[2]], NULL)
  structure(list(cor = X.svd$d[1:nf], rscore = X.svd$u[, 1:nf] * Dr, 
		 cscore = X.svd$v[, 1:nf] * Dc, Freq = x),
	    class = "correspondence")
}

"plot.correspondence" <- function(x, scale=1, ...)
{
  if(length(x$cor) > 1) return(invisible(biplot(x, ...)))
  Fr <- x$Freq
  rs <- x$rscore
  cs <- x$cscore
  xs <- range(cs)
  xs <- xs + diff(xs) * c(-1/5, 1/5)
  ys <- range(rs)
  ys <- ys + diff(ys) * c(-1/5, 1/5)
  x <- cs[col(Fr)]
  y <- rs[row(Fr)]
  rcn <- names(dimnames(Fr))
  plot(x, y, xlim = xs, ylim = ys, xlab = rcn[2], ylab = rcn[1], pch = 3)
  size <- min(par("pin"))/20 * scale
  symbols(x, y, circles = as.vector(sqrt(Fr)), inches = size, add = T)
  x0 <- (min(cs) + min(xs))/2
  y0 <- (min(rs) + min(ys))/2
  text(cs, y0, names(cs))
  text(x0, rs, names(rs), adj = 1)
  invisible()
}

"print.correspondence" <- function(x, ...)
{
  cat("First canonical correlation(s):", format(x$cor, ...), "\n")
  rcn <- names(dimnames(x$Freq))
  cat("\n", rcn[1], "scores:\n")
  print(x$rscore)
  cat("\n", rcn[2], "scores:\n")
  print(x$cscore)
  invisible(x)
}

biplot.correspondence <-
  function(obj, type=c("symmetric", "rows", "columns"), ...)
{
  if(length(obj$cor) < 2) stop("biplot is only possible if nf >= 2")
  type <- match.arg(type)
  X <- obj$rscore[, 1:2]
  if(type != "columns") X <- X %*% diag(obj$cor[1:2])
  dimnames(X)[[2]] <- rep("", 2)
  Y <- obj$cscore[, 1:2]
  if(type != "rows")  Y <- Y %*% diag(obj$cor[1:2])
  switch(type, "symmetric" = biplot.default(X, Y, var.axes=F, ...),
         "rows" = biplot.bdr(X, Y, ...),
         "columns" = biplot.bdr(Y, X, ...))
  invisible()
}

biplot.bdr <-
function(obs, bivars, col, cex = rep(par("cex"), 2), 
	olab = NULL, vlab = NULL, xlim = NULL, ylim = NULL, ...)
{
 # for cases where we need equal scales for the two sets of vars.
  expand.range <- function(x)
  {
    if(x[1] > 0) x[1] <-  - x[1]
    else if(x[2] < 0) x[2] <-  - x[2]
    x
  }
  n <- dim(obs)[1]
  p <- dim(bivars)[1]
  vlab.real <- dimnames(bivars)[[1]]
  if(is.logical(vlab)) vlab <- vlab.real[vlab]
  else if(length(vlab) != p) vlab <- vlab.real
  else vlab <- as.character(vlab)
  if(!length(vlab)) {
    vlab.real <- vlab <- paste("Var", 1:p)
    dimnames(bivars) <- list(vlab, dimnames(bivars)[[2]])
  }
  if(length(olab)) olab <- rep(as.character(olab), length = n)
  else {
    olab <- dimnames(obs)[[1]]
    if(length(olab) != n) olab <- as.character(1:n)
  }
  if(length(cex) != 2) cex <- rep(cex, length = 2)
  if(missing(col)) {
    col <- par("col")
    if (!is.numeric(col)) col <- match(col, palette())
    col <- c(col, col + 1)
  }
  else if(length(col) != 2) col <- rep(col, length = 2)
  ro1 <- expand.range(range(obs[, 1]))
  ro2 <- expand.range(range(obs[, 2]))
  rv1 <- expand.range(range(bivars[, 1]))
  rv2 <- expand.range(range(bivars[, 2]))
  if(!(length(xlim) || length(ylim)))
    xlim <- ylim <- range(ro1, ro2, rv1, rv2)
  else if(length(ylim)) xlim <- range(ro1, rv1)
  else ylim <- range(ro2, rv2)
  on.exit(par(oldpar))
  oldpar <- par(pty = "s")
  plot(obs, type = "n", xlim = xlim, ylim = ylim, col = col[1], ...)
  text(obs, labels=olab, cex = cex[1], col = col[1], ...)
  par(new = T)
  plot(bivars, axes = F, type = "n", xlim = xlim, ylim = 
       ylim, xlab = "", ylab = "", col = col[1], ...)
  axis(3, col = col[2])
  axis(4, col = col[2])
  box(col = col[1])
  text(bivars, labels=vlab, cex = cex[2], col = col[2], ...)#
  invisible()
}

# file MASS/cov.trob.q
# copyright (C) 1994-8 W. N. Venables and B. D. Ripley
#
cov.trob <- function(x, wt = rep(1, n), cor = F, center = T, nu = 5, maxit
	 = 25, tol = 0.01)
{
  test.values <- function(x)
  {
    if(length(c(which.inf(x), which.na(x))))
      stop(paste("error in cov.wt: missing or infinite values in",
                 deparse(substitute(x))))
  }
  scale.simp <- function(x, center, n, p) x - rep(center, rep.int(n, p))

  x <- as.matrix(x)
  n <- nrow(x)
  p <- ncol(x)
  dn <- dimnames(x)[[2]]
  test.values(x)
  if(!(miss.wt <- missing(wt))) {
    test.values(wt)
    if(length(wt) != n)
      stop("length of wt must equal number of observations")
    if(any(wt < 0)) stop("negative weights not allowed")
    if(!sum(wt)) stop("no positive weights")
  }
  loc <- apply(wt * x, 2, sum)/sum(wt)
  if(is.numeric(center)) {
    if(length(center) != p) stop("center is not the right length")
     loc <- center
  } else if(is.logical(center) && !center) loc <- rep(0, p)
  use.loc <- is.logical(center) && center
  w <- wt * (1 + p/nu)
  endit <- 0
  for(iter in 1:maxit) {
    w0 <- w
    X <- scale.simp(x, loc, n, p)
    sX <- svd(sqrt(w/n) * X, nu = 0)
    wX <- X %*% sX$v %*% diag(1/sX$d,  , p)
    Q <- drop(wX^2 %*% rep(1, p))
    w <- (wt * (nu + p))/(nu + Q)
#    print(summary(w))
    if(use.loc) loc <- apply(w * x, 2, sum)/n
    if(all(abs(w - w0) < tol)) break
    endit <- iter
  }
  if(endit == maxit || abs(mean(w) - mean(wt)) > tol ||
     abs(mean(w * Q)/p - 1) > tol)
    warning("Probable convergence failure")
  cov <- crossprod(sqrt(w) * X)/sum(wt)
  if(length(dn)) {
    dimnames(cov) <- list(dn, dn)
    names(loc) <- dn
  }
  if(miss.wt)
    ans <- list(cov = cov, center = loc, n.obs = n)
  else ans <- list(cov = cov, center = loc, wt = wt, n.obs = n)
  if(cor) {
    sd <- sqrt(diag(cov))
    cor <- (cov/sd)/rep(sd, rep.int(p, p))
    if(length(dn)) dimnames(cor) <- list(dn, dn)
    ans <- c(ans, list(cor = cor))
  }
  ans$call <- match.call()
  ans$iter <- endit
  ans
}
# file MASS/cpgram.q
# copyright (C) 1994-8 W. N. Venables and B. D. Ripley
#
cpgram <- function(ts, taper=0.1, 
   main=paste("Series: ", deparse(substitute(ts))) )
{
   eval(main)
   x <- as.vector(ts)
   x <- x[!is.na(x)]
   x <- spec.taper(scale(x, T, F), p=taper)
   y <- Mod(fft(x))^2/length(x)
   y[1] <- 0
   n <- length(x)
   x <- (0:(n/2))*frequency(ts)/n
   if(length(x)%%2==0) {
      n <- length(x)-1
      y <- y[1:n]
      x <- x[1:n]
   } else y <- y[seq(along=x)]
   xm <- frequency(ts)/2
   mp <- length(x)-1
   crit <- 1.358/(sqrt(mp)+0.12+0.11/sqrt(mp))
   oldpty <- par()$pty
   par(pty="s")
   plot(x, cumsum(y)/sum(y), type="s", xlim=c(0, xm), 
      ylim=c(0, 1), xaxs="i", yaxs="i", xlab="frequency", 
      ylab="", pty="s")
   lines(c(0, xm*(1-crit)), c(crit, 1))
   lines(c(xm*crit, xm), c(0, 1-crit))
   title(main = main)
   par(pty=oldpty)
   invisible()
}
# file MASS/digamma.q
# copyright (C) 1994-8 W. N. Venables and B. D. Ripley
#
digamma <- function(z)
{
        if(any(omit <- Re(z) <= 0)) {
                ps <- z
                ps[omit] <- NA
                if(any(!omit))
                        ps[!omit] <- Recall(z[!omit])
                return(ps)
        }
        if(any(small <- Mod(z) < 5)) {
                ps <- z
                x <- z[small]
                ps[small] <- Recall(x + 5) - 1/x - 1/(x + 1) - 1/
                        (x + 2) - 1/(x + 3) - 1/(x + 4)
                if(any(!small))
                        ps[!small] <- Recall(z[!small])
                return(ps)
        }
        x <- 1/z^2
        tail <- ((x * (-1/12 + ((x * (1/120 + ((x * (-1/252 + ((
                x * (1/240 + ((x * (-1/132 + ((x * (691/32760 + (
                (x * (-1/12 + (3617 * x)/8160)))))))))))))))))))
                ))
        log(z) - 1/(2 * z) + tail
}

trigamma <- function(z)
{
        if(any(omit <- Re(z) <= 0)) {
                ps <- z
                ps[omit] <- NA
                if(any(!omit))
                        ps[!omit] <- Recall(z[!omit])
                return(ps)
        }
        if(any(small <- Mod(z) < 5)) {
                ps <- z
                x <- z[small]
                ps[small] <- Recall(x + 5) + 1/x^2 + 1/(x + 1)^2 +
                        1/(x + 2)^2 + 1/(x + 3)^2 + 1/(x + 4)^2
                if(any(!small))
                        ps[!small] <- Recall(z[!small])
                return(ps)
        }
        x <- 1/z^2
        tail <- 1 + (x * (1/6 + (x * (-1/30 + (x * (1/42 + (x * (
                -1/30 + (x * (5/66 + (x * (-691/2370 + (x * (7/6 -
                (3617 * x)/510))))))))))))))
        1/(2 * z^2) + tail/z
}
# file MASS/dose.p.q
# copyright (C) 1994-8 W. N. Venables and B. D. Ripley
#
dose.p <- function(obj, cf = 1:2, p = 0.5) {
  eta <- family(obj)$linkfun(p)
  b <- coef(obj)[cf]
  x.p <- (eta - b[1])/b[2]
  names(x.p) <- paste("p = ", format(p), ":", sep = "")
  pd <-  - cbind(1, x.p)/b[2]
  SE <- sqrt(((pd %*% vcov(obj)[cf, cf]) * pd) %*% c(1, 1))
  structure(x.p, SE = SE, p = p, class = "glm.dose")
}

print.glm.dose <- function(x, ...)
{
        M <- cbind(x, attr(x, "SE"))
        dimnames(M) <- list(names(x), c("Dose", "SE"))
        x <- M
        NextMethod("print")
}
# file MASS/enlist.q
# copyright (C) 1994-8 W. N. Venables and B. D. Ripley
#
"enlist"<-
function(vec)
{
	x <- as.list(vec)
	names(x) <- names(vec)
	x
}
# file MASS/eqscplot.q
# copyright (C) 1994-8 W. N. Venables and B. D. Ripley
#
eqscplot <- function(x, y, tol = 0.04, xlim = range(x), ylim = range(y),
		     xlab, ylab, ...)
{
  if(is.matrix(x)) {
    y <- x[, 2]
    x <- x[, 1]
    if(!is.null(dn <- dimnames(x)[[2]])) {
      xlab0 <- dn[1]
      ylab0 <- dn[2]
    } else {
      xlab0 <- ""
      ylab0 <- ""
    }
  } else if(is.list(x)) {
    y <- x$y
    x <- x$x
    xlab0 <- "x"; ylab0 <- "y"
  } else {
    xlab0 <- deparse(substitute(x))
    ylab0 <- deparse(substitute(y))
  }
  if(missing(xlab)) xlab <- xlab0
  if(missing(ylab)) ylab <- ylab0
  oldpin <- par("pin")
  midx <- 0.5 * (xlim[2] + xlim[1])
  xlim <- midx + (1 + tol) * 0.5 * c(-1, 1) * (xlim[2] - xlim[1])
  midy <- 0.5 * (ylim[2] + ylim[1])
  ylim <- midy + (1 + tol) * 0.5 * c(-1, 1) * (ylim[2] - ylim[1])
  xr <- oldpin[1]/(xlim[2] - xlim[1])
  yr <- oldpin[2]/(ylim[2] - ylim[1])
  if (yr > xr) {
    ylim <- midy + yr * c(-1, 1) * (ylim[2] - ylim[1])/(2*xr)
  } else {
    xlim <- midx + xr * c(-1, 1) * (xlim[2] - xlim[1])/(2*yr)
  }
  plot(x, y, xlim=xlim, ylim=ylim, xaxs="i", yaxs="i",
       xlab=xlab, ylab=ylab, ...)
}
# file MASS/fractions.q
# copyright (C) 1994-8 W. N. Venables and B. D. Ripley
#
.rat <- function(x, cycles = 10, max.denominator = 2000)
{
  a0 <- rep(0, length(x))
  A <- matrix(b0 <- rep(1, length(x)))
  B <- matrix(floor(x))
  r <- as.vector(x) - drop(B)
  len <- 0
  while(any(which <- (r > 1/max.denominator)) && 
	(len <- len + 1) <= cycles) {
    a <- a0
    b <- b0
    a[which] <- 1
    r[which] <- 1/r[which]
    b[which] <- floor(r[which])
    r[which] <- r[which] - b[which]
    A <- cbind(A, a)
    B <- cbind(B, b)
  }
  pq1 <- cbind(b0, a0)
  pq <- cbind(B[, 1], b0)
  len <- 1
  while((len <- len + 1) <= ncol(B)) {
    pq0 <- pq1
    pq1 <- pq
    pq <- B[, len] * pq1 + A[, len] * pq0
  }
  list(rat = pq, x = x)
}

rational <- function(x, ...) {
  ans <- .rat(x, ...)$rat
  do.call("structure", c(list(ans[,1]/ans[,2]), attributes(x)))
}

fractions <- function(x, ...) {
  ans <- .rat(x, ...)
  ndc <- paste(ans$rat[, 1], ans$rat[, 2], sep = "/")
  int <- ans$rat[, 2] == 1
  ndc[int] <- as.character(ans$rat[int, 1])
  structure(ans$x, fracs = ndc, class = 
	    c("fractions", class(ans$x)))
}

"t.fractions"<- function(x)
{
  xt <- NextMethod()
  class(xt) <- class(x)
  attr(xt, "fracs") <- t(array(attr(x, "fracs"), dim(x)))
  xt
}

"Math.fractions"<- function(x, ...)
{
  x <- unclass(x)
  fractions(NextMethod())
}

"Ops.fractions"<- function(e1, e2)
{
  e1 <- unclass(e1)
  if(!missing(e2))
    e2 <- unclass(e2)
  fractions(NextMethod(.Generic))
}

"Summary.fractions"<- function(x, ...)
{
  x <- unclass(x)
  fractions(NextMethod())
}

"[.fractions"<- function(x, ...)
{
  x <- unclass(x)
  fractions(NextMethod())
}

"[<-.fractions"<- function(x, ..., value)
{
  x <- unclass(x)
  fractions(NextMethod())
}

"as.character.fractions"<- function(x)
structure(attr(x, "fracs"), dim = dim(x), dimnames = dimnames(x))

"as.fractions"<- function(x)
if(is.fractions(x)) x else fractions(x)

"is.fractions"<- function(f)
inherits(f, "fractions")

"print.fractions"<- function(x, ...)
{
  y <- attr(x, "fracs")
  mc <- max(ncy <- nchar(y))
  if(any(small <- ncy < mc)) {
    blanks <- "    "
    while(nchar(blanks) < mc) blanks <- paste(blanks, blanks)
    blanks <- rep(blanks, sum(small))
    blanks <- substring(blanks, 1, mc - ncy)
    y[small] <- paste(blanks[small], y[small], sep = "")
  }
  att <- attributes(x)
  att$fracs <- att$class <- NULL
  x <- do.call("structure", c(list(y), att))
  NextMethod("print", quote = F)
}

# file MASS/gamma.shape.q
# copyright (C) 1994-8 W. N. Venables and B. D. Ripley
#
gamma.shape <- function(object, ...) UseMethod("gamma.shape")

gamma.shape.glm <- function(object, it.lim = 10, 
			    eps.max = .Machine$double.eps^0.25, 
			    verbose = F)
{
  if(is.null(object$y)) object <- update(object, y = T)
  y <- object$y
  A <- object$prior.weights
  if(is.null(A)) A <- rep(1, length(y))
  u <- fitted(object)
  Dbar <- object$deviance/object$df.residual
  alpha <- (6 + 2*Dbar)/(Dbar*(6 + Dbar))
  if(verbose) cat("Initial estimate:", format(alpha), "\n")
  fixed <-  - y/u - log(u) + log(A) + 1 + log(y + (y == 0))
  eps <- 1
  itr <- 0
  while(abs(eps) > eps.max && (itr <- itr + 1) <= it.lim) {
    sc <- sum(A * (fixed + log(alpha) - digamma(A * alpha)))
    inf <- sum(A * (A * trigamma(A * alpha) - 1/alpha))
    alpha <- alpha + (eps <- sc/inf)
    if(verbose) cat("Iter. ", itr, " Alpha:", alpha, "\n")
  }
  if(itr > it.lim) warning("Iteration limit reached")
   structure(list(alpha = alpha, SE = sqrt(1/inf)), class = "gamma.shape")
}

gamma.dispersion <- function(object, ...)
1/gamma.shape(object, ...)[[1]]

print.gamma.shape <- function(x, ...)
{
  y <- x
  x <- array(unlist(x), dim = 2:1, 
	     dimnames = list(c("Alpha:", "SE:"), ""))
  NextMethod("print")
  invisible(y)
}
# file MASS/hist.scott.q
# copyright (C) 1994-8 W. N. Venables and B. D. Ripley
#
nclass.scott <- function(x)
{
    h <- 3.5 * sqrt(var(x)) * length(x)^(-1/3)
    ceiling(diff(range(x))/h)
}
nclass.FD <- function(x)
{
    r <- quantile(x, c(0.25, 0.75))
    names(r) <- NULL # to avoid the label 75%
    h <- 2 * (r[2] - r[1]) * length(x)^(-1/3)
    ceiling(diff(range(x))/h)
}
hist.scott <- function(x, xlab = deparse(substitute(x)),...) 
   invisible(hist(x, nclass.scott(x), xlab=xlab, ...))
hist.FD <- function(x, xlab = deparse(substitute(x)),...) 
   invisible(hist(x, nclass.FD(x), xlab=xlab, ...))

frequency.polygon <- function(x, nclass = nclass.freq(x), 
    xlab="", ylab="", ...)
{
    hst <- hist(x, nclass, probability=T, plot=F, ...)
    midpoints <- 0.5 * (hst$breaks[-length(hst$breaks)] 
                 + hst$breaks[-1])
    plot(midpoints, hst$counts, type="l", xlab=xlab, ylab=ylab)
}
nclass.freq <- function(x)
{
    h <- 2.15 * sqrt(var(x)) * length(x)^(-1/5)
    ceiling(diff(range(x))/h)
}

bandwidth.nrd <- function(x)
{
    r <- quantile(x, c(0.25, 0.75))
    h <- (r[2] - r[1])/1.34
    4 * 1.06 * min(sqrt(var(x)), h) * length(x) ^ (-1/5)
}
# file MASS/histplot.q
# copyright (C) 1994-8 W. N. Venables and B. D. Ripley
#
"histplot" <-
function(formula, data = sys.parent(1),
	 prepanel = prepanel.histplot, panel = panel.histplot, 
	 nbins = 5, h , x0 = -h/1000, breaks, prob = T,
	 aspect = "fill",
	 xlab = deparse(do.formula.trellis(formula)$expr[[1]]), 
	 ylab = if(prob) "Density" else "Counts", 
	 groups = NULL, ..., subset = T)
{
  .NotYetImplemented()
  if(mode(formula) != "call")
    formula <- formula.default(paste("~", deparse(substitute(formula))))
  Z <- do.formula.trellis(formula)
  if(!Z$no.response || is.null(Z$expr))
    stop("formula should be in the form of x or ~ x | g1*g2*...")
  subset <- eval(substitute(subset), data)
  x <- eval(Z$expr[[1]], data)[subset]
  x <- x[!is.na(x)]
  if(missing(breaks)) {
    if(missing(h)) h <- diff(pretty(x, nbins))[1]
    first <- floor((min(x) - x0)/h)
    last <- ceiling((max(x) - x0)/h)
    breaks <- x0 + h * c(first:last)
  }
  if(any(diff(breaks) <= 0)) stop("breaks must be strictly increasing")
  if(min(x) < min(breaks) || max(x) > max(breaks))
     stop("breaks do not cover the data")
  db <- diff(breaks)
  if(!prob && sqrt(var(db)) > mean(db)/1000)
    warning("Uneven breaks with prob = F will give a misleading plot")
  setup.2d.trellis(formula, data = data, prepanel = prepanel,
		   prepanel.arg = list(breaks = breaks, prob = prob),
		   breaks = breaks, prob = prob, panel = panel, 
		   aspect = aspect, xlab = xlab, ylab = ylab, 
		   groups = eval(substitute(groups), data), ..., 
		   subset = subset)
}
"prepanel.histplot" <- 
function(x, y, breaks, prob=T, ...)
{
  data <- x[!is.na(x)]
  x <- breaks
  xl <- max(seq(along=x)[x < min(data)])
  xm <- min(seq(along=x)[x > max(data)])
  x <- x[xl:xm]
  bin <- cut(data, x, include.lowest = T)
  est <- tabulate(bin, length(levels(bin)))
  if (prob) est <- est/(diff(x) * length(data))
  list(xlim = range(x), ylim = c(0, max(est)), dx = diff(x)[-1], dy = diff(est))
}
"panel.histplot" <- 
function(x, y, breaks, prob = T, col = trellis.par.get("bar.fill")$col, ...)
{
  data <- x[!is.na(x)]
  x <- breaks
  xl <- max(seq(along=x)[x < min(data)])
  xm <- min(seq(along=x)[x > max(data)])
  x <- x[xl:xm]
  bin <- cut(data, x, include.lowest = T)
  est <- tabulate(bin, length(levels(bin)))
  if (prob) est <- est/(diff(x) * length(data))
  nx <- length(x)
  X <- as.vector(rbind(x[-1], x[ - nx], x[ - nx], x[-1], NA))
  Y <- as.vector(rbind(0, 0, est, est, NA))
  polygon(X, Y, col = col, border = 1, ...)
}



# file MASS/huber.q
# copyright (C) 1994-8 W. N. Venables and B. D. Ripley
#
huber <- function(y, k=1.5, tol = 1.0e-6)
{
   y <- y[!is.na(y)]
   n <- length(y)
   mu <- median(y)
   s <- mad(y)
   repeat{
      yy <- pmin(pmax(mu-k*s,y),mu+k*s)
      mu1 <- sum(yy)/n
      if(abs(mu-mu1) < tol*s) break
      mu <- mu1
   }
   list(mu=mu,s=s)
}
# file MASS/hubers.q
# copyright (C) 1994-8 W. N. Venables and B. D. Ripley
#
hubers <- function(y, k=1.5, mu, s, initmu=median(y), tol = 1.0e-6)
{
   y <- y[!is.na(y)]
   n <- length(y)
   if(missing(mu)) {
      mu0 <- initmu
      n1 <- n - 1
   } else {
       mu0 <- mu
       mu1 <- mu
       n1 <- n
   }
   if(missing(s)) s0 <- mad(y) else {s0 <- s;s1 <- s}
   th <- 2*pnorm(k)-1
   beta <- th + k^2*(1-th) - 2*k*dnorm(k)
   repeat{
      yy <- pmin(pmax(mu0-k*s0,y), mu0+k*s0)
      if(missing(mu)) mu1 <- sum(yy)/n
      if(missing(s)) {
         ss <- sum((yy-mu1)^2)/n1
         s1 <- sqrt(ss/beta)
      }
      if((abs(mu0-mu1) < tol*s0) && abs(s0-s1) < tol*s0) break
      mu0 <- mu1; s0 <- s1
   }
   list(mu=mu0, s=s0)
}
# file MASS/isoMDS.q
# copyright (C) 1994-8 W. N. Venables and B. D. Ripley
#
isoMDS <- function(d, y=cmdscale(d, 2), maxit=50, trace=T)
{
  if(any(!is.finite(d))) stop("NAs/Infs not allowed in d")
  if(is.null(n <- attr(d, "Size"))) {
    x <- as.matrix(d)
    if((n <- nrow(x)) != ncol(x))
      stop("Distances must be result of dist or a square matrix")
  } else {
    x <- matrix(0, n, n)
    x[row(x) > col(x)] <- d
    x <- x + t(x)
  }
  if (any(ab <- x[row(x) < col(x)] <= 0)) {
    aa <- cbind(as.vector(row(x)), as.vector(col(x)))[row(x) < col(x),]
    aa <- aa[ab, , drop=F]
    stop(paste("zero or negative distance between objects",aa[1,1],
	       "and", aa[1,2]))
  }
  dis <- x[row(x) > col(x)]
  ord <- order(dis)
  nd <- length(ord)
  n <- nrow(y)
  k <- ncol(y)
  .C("VR_mds_init_data",
     as.integer(nd),
     as.integer(k),
     as.integer(n),
     as.integer(ord - 1),
     as.integer(order(ord) - 1),
     as.double(y)
     )
  tmp <- .C("VR_mds_dovm",
	    val = double(1),
	    as.integer(maxit),
	    as.integer(trace),
	    y = as.double(y)
	    )
  .C("VR_mds_unload")
  list(points = matrix(tmp$y,,k), stress = tmp$val)	
}

Shepard <- function(d, x)
{
#
# Given a dissimilarity d and configuration x, compute Shephard plot
#
  n <- nrow(x)
  k <- ncol(x)
  y <- dist(x)
  ord <- order(d)
  y <- y[ord]
  nd <- length(ord)
  Z <- .C("VR_mds_fn",
	  as.double(y),
	  yf=as.double(y),
	  as.integer(nd),
	  ssq = double(1),
	  as.integer(order(ord)-1),
	  as.double(x),
	  as.integer(n),
	  as.integer(k),
	  g=double(n*k),
	  as.integer(1)
	  )
  list(x = d[ord], y = y, yf = Z$yf)
}

# file MASS/kde2d.q
# copyright (C) 1994-8 W. N. Venables and B. D. Ripley
#
kde2d <- function(x, y, h, n = 25, lims=c(range(x), range(y)) )
{
   nx <- length(x)
   if(length(y) != nx)
      stop("Data vectors must be the same length")
   gx <- seq(lims[1], lims[2], length = n)
   gy <- seq(lims[3], lims[4], length = n)
   if (missing(h))
      h <- c(bandwidth.nrd(x), bandwidth.nrd(y))
   h <- h/4 # for S's bandwidth scale
   ax <- outer(gx, x, "-" )/h[1]
   ay <- outer(gy, y, "-" )/h[2]
   z <- matrix(dnorm(ax), n, nx) %*%
        t(matrix(dnorm(ay),n, nx))/ (nx * h[1] * h[2])
   return(list(x = gx, y = gy, z = z))
}
# file MASS/lda.q
# copyright (C) 1994-8 W. N. Venables and B. D. Ripley
#
lda <- function(x, ...) 
{
  if(is.null(class(x))) class(x) <- data.class(x)
  UseMethod("lda", x, ...)
}

lda.formula <- function(formula, data = NULL, ..., 
			subset, na.action = na.fail)
{
  m <- match.call(expand.dots = F)
  if(is.matrix(eval(m$data, sys.parent())))
    m$data <- as.data.frame(data)
  m$... <- NULL
  m[[1]] <- as.name("model.frame")
  m <- eval(m, sys.frame(sys.parent()))
  Terms <- attr(m, "terms")
  grouping <- model.extract(m, "response")
  x <- model.matrix(Terms, m)
  xint <- match("(Intercept)", dimnames(x)[[2]], nomatch=0)
  if(xint > 0) x <- x[, -xint, drop=F]
  res <- lda.default(x, grouping, ...)
  res$terms <- Terms
  res$call <- match.call()
  res
}

lda.data.frame <- function(x, ...)
{
  res <- lda.matrix(data.matrix(x), ...)
  res$call <- match.call()
  res 
}

lda.Matrix <- function(x, ...)
{
  res <- lda.matrix(as.matrix(x), ...)
  res$call <- match.call()
  res 
}

lda.matrix <- function(x, grouping, ..., 
		       subset, na.action = na.fail)
{
  if(!missing(subset)) {
    x <- x[subset, , drop = F]
    grouping <- grouping[subset]
  }
  if(!missing(na.action)) {
    dfr <- na.action(structure(list(g = grouping, x = x), 
			       class = "data.frame"))
    grouping <- dfr$g
    x <- dfr$x
  }
  res <- NextMethod("lda")
  res$call <- match.call()
  res 
}

lda.default <- function(x, grouping, prior = proportions, tol = 1.0e-4,
			method = c("moment", "mle", "mve", "t"), CV=F,
			nu = 5, ...)
{
  which.is.max <- function(x) {
    d <- (1:length(x))[x == max(x)]
    if(length(d) > 1) d <- sample(d, 1)
    d
  }

  if(is.null(dim(x))) stop("x is not a matrix")
  n <- nrow(x)
  p <- ncol(x)
  if(n != length(grouping)) stop("nrow(x) and length(grouping) are different")
  g <- as.factor(grouping)
  lev <- levels(g)
  counts <- table(g)
  if(any(counts == 0)) {
     warning(paste("group(s)", paste(levels(g)[counts == 0], collapse=" "), 
		   "are empty"))
     g <- factor(g, levels=levels(g)[counts > 0])
     counts <- table(g)
  }
  proportions <- counts/n
  ng <- length(proportions)
  if(any(prior < 0) || round(sum(prior), 5) != 1) stop("invalid prior")
  if(length(prior) != ng) stop("prior is of incorrect length")
  dim(prior) <- dimnames(prior) <- NULL
  names(prior) <- names(counts)
  method <- match.arg(method)
  if(CV && !(method == "moment" || method == "mle"))
     stop(paste("Cannot use leave-one-out CV with method", method))
  group.means <- tapply(x, list(rep(g, p), col(x)), mean)
  f1 <- sqrt(diag(var(x - group.means[g,  ])))
  if(any(f1 < tol))
    stop(paste("variable(s)", 
	       paste(format((1:p)[f1 < tol]), collapse = " "), 
	       "appear to be constant within groups"))
# scale columns to unit variance before checking for collinearity
  scaling <- diag(1/f1,,p)
  if(method == "mve") {
# adjust to "unbiased" scaling of covariance matrix
    cov <- n/(n-ng) * cov.mve((x - group.means[g,  ]) %*% scaling, , F)$cov
    sX <- svd(cov, nu = 0)
    rank <- sum(sX$d > tol^2)
    if(rank < p) warning("variables are collinear")
    scaling <- scaling %*% sX$v[, 1:rank] %*%
                 diag(sqrt(1/sX$d[1:rank]),,rank)
  } else if(method == "t") {
    if(nu <= 2) stop("nu must exceed 2")
    w <- rep(1, n)
    repeat {
         w0 <- w
         X <- x - group.means[g, ]
         sX <- svd(sqrt((1 + p/nu)*w/n) * X, nu=0)
         X <- X %*% sX$v %*% diag(1/sX$d,, p)
         w <- 1/(1 + drop(X^2 %*% rep(1, p))/nu)
         print(summary(w))
         group.means <- tapply(w*x, list(rep(g, p), col(x)), sum)/
            rep(tapply(w, g, sum), p)
         if(all(abs(w - w0) < 1e-2)) break
    }
    X <-  sqrt(nu/(nu-2)*(1 + p/nu)/n * w) * (x - group.means[g,  ]) %*% scaling
    X.s <- svd(X, nu = 0)
    rank <- sum(X.s$d > tol)
    if(rank < p) warning("variables are collinear")
    scaling <- scaling %*% X.s$v[, 1:rank] %*% diag(1/X.s$d[1:rank],,rank)
  } else {
    if(method == "moment") fac <- 1/(n-ng) else fac <- 1/n
    X <- sqrt(fac) * (x - group.means[g,  ]) %*% scaling
    X.s <- svd(X, nu = 0)
    rank <- sum(X.s$d > tol)
    if(rank < p) warning("variables are collinear")
    scaling <- scaling %*% X.s$v[, 1:rank] %*% diag(1/X.s$d[1:rank],,rank)
  }
# now have variables scaled so that W is the identity
  if(CV) {
    x <- x %*% scaling
    dm <- group.means %*% scaling
    K <- if(method == "moment") ng else 0
    dist <- matrix(0, n, ng)
    for(i in 1:ng) {
      dev <- x - matrix(dm[i,  ], n, p, byrow = T)
      dist[, i] <- apply(dev^2, 1, sum)
    }
    ind <- cbind(1:n, g)
    nc <- counts[g]
    cc <- nc/((nc-1)*(n-K))
    dist2 <- dist
    for(i in 1:ng) {
      dev <- x - matrix(dm[i,  ], n, p, byrow = T)
      dev2 <- x - dm[g, ]
      tmp <- apply(dev*dev2, 1, sum)
      dist[, i] <- (n-1-K)/(n-K) * (dist2[, i] +  cc*tmp^2/(1 - cc*dist2[ind]))
    }
    dist[ind] <- dist2[ind] * (n-1-K)/(n-K) * (nc/(nc-1))^2 /
      (1 - cc*dist2[ind])
    dist <- 0.5 * dist - matrix(log(prior), n, ng, byrow=T)
    dist <- exp(-(dist - min(dist, na.rm=T)))
    cl <- factor(apply(dist, 1, which.is.max), levels=seq(along=lev),
                 labels=lev)
    #  convert to posterior probabilities
    posterior <- dist/drop(dist %*% rep(1, length(prior)))
    dimnames(posterior) <- list(dimnames(x)[[1]], lev)
    return(list(class = cl, posterior = posterior))
  }
  xbar <- apply(prior %*% group.means, 2, sum)
  if(method == "mle") fac <-  1/ng else fac <- 1/(ng - 1)
  X <- sqrt((n * prior)*fac) * scale(group.means, xbar, F) %*% scaling
  X.s <- svd(X, nu = 0)
  rank <- sum(X.s$d > tol * X.s$d[1])
  scaling <- scaling %*% X.s$v[, 1:rank]
  if(is.null(dimnames(x)))
    dimnames(scaling) <- list(NULL, paste("LD", 1:rank, sep = ""))
  else {
    dimnames(scaling) <- list(dimnames(x)[[2]], paste("LD", 1:rank, sep = ""))
    dimnames(group.means)[[2]] <- dimnames(x)[[2]]
  }
  dimnames(group.means)[[1]] <- names(prior)
  structure(list(prior = prior, counts = counts, means = group.means, 
		 scaling = scaling, lev = lev, svd = X.s$d[1:rank],
		 N = n, call = match.call()),
	    class = "lda")
}

predict.lda <- function(object, newdata, prior = object$prior, dimen,
			method = c("plug-in", "predictive", "debiased"), ...)
{
  if(!inherits(object, "lda")) stop("object not of class lda")
  which.is.max <- function(x) {
    d <- (1:length(x))[x == max(x)]
    if(length(d) > 1) d <- sample(d, 1)
    d
  }
  model.frame.lda <- function(formula)
  {
    oc <- formula$call
    oc$method <- "model.frame"
    oc[[1]] <- as.name("lm")
    eval(oc, sys.frame(sys.parent()))
  }

  if(!is.null(Terms <- object$terms)) { #
    # formula fit
    if(missing(newdata)) newdata <- model.frame.lda(object)
    else newdata <- model.frame(as.formula(delete.response(Terms)),
                                newdata, na.action=function(x) x)
    x <- model.matrix(delete.response(Terms), newdata)
    xint <- match("(Intercept)", dimnames(x)[[2]], nomatch=0)
    if(xint > 0) x <- x[, -xint, drop=F]
  } else { #
    # matrix or data-frame fit
    if(missing(newdata)) {
      newdata <- eval(object$call$x)
      if (!is.null(sub <- object$call$subset)) newdata <- newdata[, sub]
      if(!is.null(nas <- object$call$na.action))
        newdata <- do.call(nas, list(newdata))
    }
    if(is.null(dim(newdata)))
      dim(newdata) <- c(1, length(newdata))  # a row vector
    x <- as.matrix(newdata)		# to cope with dataframes
  }

  if(dim(x)[2] != dim(object$means)[2]) stop("wrong number of variables")
  if(length(dimnames(x)[[2]]) > 0 &&
     any(dimnames(x)[[2]] != dimnames(object$means)[[2]]))
        warning("Variable names in newdata do not match those in object")
  ng <- length(prior)
#   remove overall means to keep distances small
  means <- apply(object$means, 2, mean)
  scaling <- object$scaling
  x <- scale(x, means, F) %*% scaling
  dm <- scale(object$means, means, F) %*% scaling
  method <- match.arg(method)
  if(missing(dimen)) dimen <- length(object$svd)
  else dimen <- min(dimen, length(object$svd))
  N <- object$N
  if(method == "plug-in") {
    dm <- dm[, 1:dimen, drop=F]
    dist <- matrix(0.5 * apply(dm^2, 1, sum) - log(prior), nrow(x), 
		 length(prior), byrow = T) - x[, 1:dimen, drop=F] %*% t(dm)
    dist <- exp( -(dist - min(dist, na.rm=T)))
  } else if (method == "debiased") {
    dm <- dm[, 1:dimen, drop=F]
    dist <- matrix(0.5 * apply(dm^2, 1, sum), nrow(x), ng, byrow = T) - 
      x[, 1:dimen, drop=F] %*% t(dm)
    dist <- (N - ng - dimen - 1)/(N - ng) * dist -
      matrix(log(prior) - dimen/object$counts , nrow(x), ng, byrow=T)
    dist <- exp( -(dist - min(dist, na.rm=T)))
  } else { # predictive
    dist <- matrix(0, nrow = nrow(x), ncol = ng)
    p <- ncol(object$means)
# adjust to ML estimates of covariances
    X <- x * sqrt(N/(N-ng)) 
    for(i in 1:ng) {
      nk <- object$counts[i]
      dev <- scale(X, dm[i, ], F)
      dev <- 1 + apply(dev^2, 1, sum) * nk/(N*(nk+1))
      dist[, i] <- prior[i] * (nk/(nk+1))^(p/2) * dev^(-(N - ng + 1)/2)
   }
  }
  cl <- factor(apply(dist, 1, which.is.max), levels=seq(along=object$lev),
               labels=object$lev)
  posterior <- dist / drop(dist %*% rep(1, ng))
  dimnames(posterior) <- list(dimnames(x)[[1]], object$lev)
  list(class = cl, posterior = posterior, x = x[, 1:dimen, drop=F])
}

print.lda <- function(x, ...)
{
  if(!is.null(cl <- x$call)) {
    names(cl)[2] <- ""
    cat("Call:\n")
    dput(cl)
  }
  cat("\nPrior probabilities of groups:\n")
  print(x$prior, ...)
  cat("\nGroup means:\n")
  print(x$means, ...)
  cat("\nCoefficients of linear discriminants:\n")
  print(x$scaling, ...)
  svd <- x$svd
  names(svd) <- dimnames(x$scaling)[[2]]
  if(length(svd) > 1) {
    cat("\nProportion of trace:\n")
    print(round(svd^2/sum(svd^2), 4),  ...)
  }
  invisible(x)
}

plot.lda <- function(x, panel=panel.lda, ..., cex=0.7,
                     dimen, abbrev=F, xlab="LD1", ylab="LD2")
{
  panel.lda <- function(x, y, ...) {
    text(x, y, as.character(g.lda), cex=tcex)
  }
  model.frame.lda <- function(formula)
  {
    oc <- formula$call
    oc$method <- "model.frame"
    oc[[1]] <- as.name("lm")
    eval(oc, sys.frame(sys.parent()))
  }
  if(!is.null(Terms <- x$terms)) { #
    # formula fit
    data <- model.frame.lda(x)
    X <- model.matrix(delete.response(Terms), data)
    g <- model.extract(data, "response")
    xint <- match("(Intercept)", dimnames(X)[[2]], nomatch=0)
    if(xint > 0) X <- X[, -xint, drop=F]
  } else { #
    # matrix or data-frame fit
    X <- eval(x$call$x)
    g <- eval(x$call[[3]])
    if (!is.null(sub <- x$call$subset)) {
      X <- X[, sub]
      g <- g[, sub]
    }
    if(!is.null(nas <- x$call$na.action)) {
      df <- structure(list(g = g, X = X), class = "data.frame")
      df <- do.call(nas, list(df))
      g <- df$g
      X <- df$X
    }
  }
  if(abbrev) levels(g) <- abbreviate(levels(g), abbrev)
  assign("g.lda", g)
  assign("tcex", cex)
  means <- apply(x$means, 2, mean)
  X <- scale(X, means, F) %*% x$scaling
  if(!missing(dimen) && dimen < ncol(X)) X <- X[, 1:dimen, drop=F]
  if(ncol(X) > 2) {
    pairs(X, panel=panel, ...)
  } else if(ncol(X) == 2)  {
    eqscplot(X[, 1:2], xlab=xlab, ylab=ylab, type="n", ...)
    panel(X[, 1], X[, 2], ...)
  } else ldahist(X[,1], g, xlab=xlab, ...)
  invisible(NULL)
}

ldahist <- 
function(data, g, nbins = 25, h, x0 = -h/1000, breaks,
	 xlim = range(breaks), ymax = 0, width,
         type = c("histogram", "density", "both"), sep = (type != "density"),
         col = 5, 
	 xlab = deparse(substitute(data)), bty = "n", ...)
{
  eval(xlab)
  type <- match.arg(type)
  data <- data[!is.na(data)]
  g <- g[!is.na(data)]
  counts <- table(g)
  groups <- names(counts)[counts > 0]
  if(missing(breaks)) {
    if(missing(h)) h <- diff(pretty(data, nbins))[1]
    first <- floor((min(data) - x0)/h)
    last <- ceiling((max(data) - x0)/h)
    breaks <- x0 + h * c(first:last)
  }
  if(type=="histogram" || type=="both") {
    if(any(diff(breaks) <= 0)) stop("breaks must be strictly increasing")
    if(min(data) < min(breaks) || max(data) > max(breaks))
      stop("breaks do not cover the data")
    est <- vector("list", length(groups))
    for (i in seq(along=groups)){
      grp <- groups[i]
      breaks[1] <- breaks[1] - 0.01*diff(breaks)[1]
      bin <- cut(data[g==grp], breaks)
      est1 <- tabulate(bin, length(levels(bin)))
      est1 <- est1/(diff(breaks) * length(data[g==grp]))
      ymax <- max(ymax, est1)
      est[[i]] <- est1
    }
  }
  if(type=="density" || type == "both"){
    xd <- vector("list", length(groups))
    for (i in seq(along=groups)){
      grp <- groups[i]
      if(missing(width)) width <- width.SJ(data[g==grp])
      xd1 <- density(data[g==grp], n=200, bw=width, from=xlim[1], to=xlim[2])
      ymax <- max(ymax, xd1$y)
      xd[[i]] <- xd1
    }
  }
  if(!sep) plot(xlim, c(0, ymax), type = "n", xlab = xlab, ylab = "",
                bty = bty)
  else {
    oldpar <- par(mfrow=c(length(groups), 1))
    on.exit(par(oldpar))
  }
  for (i in seq(along=groups)){
    grp <- groups[i]
    if(sep) plot(xlim, c(0, ymax), type = "n",
                 xlab = paste("group", grp), ylab = "", bty = bty)
    if(type=="histogram" || type=="both") {
      n <- length(breaks)
      rect(breaks[-n], 0, breaks[-1], est[[i]], col = col, ...)
    }
    if(type=="density" || type == "both") lines(xd[[i]])
  }
  invisible()
}

pairs.lda <- function(x, labels = dimnames(x)[[2]], panel=panel.lda,
                      dimen, abbrev=F, ..., cex=0.7,
                      type=c("std", "trellis"))
{
  panel.lda <- function(x,y, ...) {
    text(x, y, as.character(g.lda), cex=tcex, ...)
  }
  model.frame.lda <- function(formula)
  {
    oc <- formula$call
    oc$method <- "model.frame"
    oc[[1]] <- as.name("lm")
    eval(oc, sys.frame(sys.parent()))
  }
  type <- match.arg(type)
  if(!is.null(Terms <- x$terms)) { #
    # formula fit
    data <- model.frame.lda(x)
    X <- model.matrix(delete.response(Terms), data)
    g <- model.extract(data, "response")
    xint <- match("(Intercept)", dimnames(X)[[2]], nomatch=0)
    if(xint > 0) X <- X[, -xint, drop=F]
  } else { #
    # matrix or data-frame fit
    X <- eval(x$call$x)
    g <- eval(x$call[[3]])
    if (!is.null(sub <- x$call$subset)) {
      X <- X[, sub]
      g <- g[, sub]
    }
    if(!is.null(nas <- x$call$na.action)) {
      df <- structure(list(g = g, X = X), class = "data.frame")
      df <- do.call(nas, list(df))
      g <- df$g
      X <- df$X
    }
  }
  g <- as.factor(g)
  if(abbrev) levels(g) <- abbreviate(levels(g), abbrev)
  assign("g.lda", g)
  assign("tcex", cex)
  means <- apply(x$means, 2, mean)
  X <- scale(X, means, F) %*% x$scaling
  if(!missing(dimen) && dimen < ncol(X)) X <- X[, 1:dimen]
  if(type=="std") pairs.default(X, panel=panel, ...)
  else {
    print(splom(~X, groups = g, panel=panel.superpose,
                key = list(
                  text=list(levels(g)),
                  points = Rows(trellis.par.get("superpose.symbol"),
                    seq(along=levels(g))),
                  columns = min(5, length(levels(g)))
                  )
                ))
  }
  invisible(NULL)
}
# file MASS/loglm.q
# copyright (C) 1994-8 W. N. Venables and B. D. Ripley
#
denumerate <- function(x) UseMethod("denumerate")

renumerate <- function(x) UseMethod("renumerate")

denumerate.formula <- function(x) {
  if(length(x) == 1) {
    if(mode(x) == "numeric" || 
	 (mode(x) == "name" && 
	  any(substring(x, 1, 1) == as.character(1:9))))
	x <- as.name(paste(".v", x, sep = ""))
  }
  else {
    x[[2]] <- Recall(x[[2]])
    if(length(x) == 3 && x[[1]] != as.name("^"))
	x[[3]] <- Recall(x[[3]])
  }
  x
}

renumerate.formula <- function(x) {
  if(length(x) == 1) {
    if(mode(x) == "name" 
       && nchar(x) > 2 
       && substring(x, 1, 2) == ".v") 
      x <- as.name(substring(x, 3))
  }
  else {
    x[[2]] <- Recall(x[[2]])
    if(length(x) == 3 && x[[1]] != as.name("^"))
	x[[3]] <- Recall(x[[3]])
  }
  x
}

"loglm"<-
function(formula, data = sys.parent(), subset, na.action, ...)
{
  .NotYetImplemented()
  assign(".call", match.call(), frame = 1)
  if(missing(data) || inherits(data, "data.frame")) {
    m <- match.call(expand = F)
    m$... <- NULL
    m[[1]] <- as.name("model.frame")
    data <- eval(m, sys.parent())
    assign(".formula", attr(attr(data, "terms"), "formula"), frame = 1)
  } else {
    trms <- attr(data, "terms") <- terms(formula <- denumerate(formula))
    assign(".formula", renumerate(attr(trms, "formula")), frame = 1)
  }
  UseMethod("loglm", data)
}

"loglm.crosstabs"<-
function(formula, data, ...)
{
  attr(data, "marginals") <- attr(data, "call") <- class(data) <- NULL
  NextMethod("loglm")
}

"loglm.data.frame"<-
function(formula, data, ...)
{
  trms <- attr(data, "terms")
  if(is.null(trms)) stop("data has no trms attribute")
  if(attr(trms, "response") == 0) stop("Formula specifies no response")
  resp <- match(as.character(attr(trms, "variables")[attr(trms, "response")]), 
		names(data))
  fac <- data.frame(lapply(data[,  - resp], as.factor))
  rsp <- data[, resp]
  tab <- do.call("table", fac)
  if(max(tab) > 1) {
#
# and extra factor needed for repeated frequencies
#
    i <- do.call("order", rev(fac))
    fac <- fac[i,  ]
    rsp <- rsp[i]
    fac$.Within. <- factor(unlist(sapply(tab, 
					 function(x)
					 if(x > 0) seq(x) else NULL)))
  }
  dn <- lapply(fac, levels)
  dm <- sapply(dn, length)
  data <- structure(array(-1, dm, dn), terms = trms)
  data[do.call("cbind", lapply(fac, as.numeric))] <- rsp
  st <- array(as.numeric(data >= 0), dm, dn)
  data[data < 0] <- 0
  loglm.default(formula, data, ..., start = st)
}

loglm.default <-
function(formula, data, start = rep(1, length(data)), fitted = F,
	keep.frequencies = fitted, param = T, eps = 
	1/10, iter = 40, print = F, ...)
{
  trms <- attr(data, "terms")
  if(is.null(trms)) stop("data has no trms attribute")
  factors <- attr(trms, "factors") > 0
  if((r <- attr(trms, "response"))) factors <- factors[ - r,  , drop = F]
  nt <- ncol(factors)
  fo <- order(apply(factors, 2, sum))
  factors <- factors[, fo, drop = F]
  ff <- crossprod(factors)
  keep <- rep(T, nt)
  j <- 0
  while((j <- j + 1) < nt) keep[j] <- ff[j, j] > max(ff[j, (j + 1):nt])
  factors <- factors[, keep, drop = F]
  ldim <- length(dim(data))
  nnames <- paste(".v", 1:ldim, sep = "")
  which <- structure(1:ldim, names = nnames)
  if(!is.null(anames <- names(dimnames(data))))
    which <- c(which, structure(which, names = anames))
  margins <- apply(factors, 2, function(x, which, nam)
		   as.vector(which[nam[x]]), which, dimnames(factors)[[1]])
  if(is.matrix(margins))
    margins <- as.list(data.frame(margins))
  else margins <- structure(as.list(margins), names = names(margins))
  Fit <- loglin(data, margins, start = start, fit = fitted,
		param = param, eps = eps, iter = iter, print = print)
  if(!exists(".formula", frame = 1)) {
    .formula <- NULL
    .call <- NULL
  }
  Fit <- structure(Fit, formula = .formula, class = "loglm", call = .call)
  if(keep.frequencies) Fit$frequencies <- structure(data, terms = NULL)
  if(fitted) {
    names(Fit)[match("fit", names(Fit))] <- "fitted"
    attr(Fit$fitted, "terms") <- NULL
  }
  Fit$deviance <- Fit$lrt
  Fit$df <- Fit$df - sum(start == 0)
  Fit
}

anova.loglm <- function(object, ..., test = c("Chisq", "chisq", "LR"))
{
  test <- match.arg(test)
  margs <- function(...) nargs()
  if(!(k <- margs(...))) return(object)
  objs <- list(object, ...)
  dfs <- sapply(objs, "$", "df")
  o <- order( - dfs)
  objs <- objs[o]
  dfs <- c(dfs[o], 0)
  forms <- lapply(objs, formula)
  dev <- c(sapply(objs, "$", "lrt"), 0)
  M <- array(0, c(k + 2, 5), 
	     list(c(paste("Model", 1:(k + 1)), "Saturated"), 
		  c("Deviance", "df", "Delta(Dev)", "Delta(df)", "P(> Delta(Dev)")))
  M[, 1] <- dev
  M[, 2] <- dfs
  M[-1, 3] <- dev[1:(k + 1)] - dev[2:(k + 2)]
  M[-1, 4] <- dfs[1:(k + 1)] - dfs[2:(k + 2)]
  M[-1, 5] <- 1 - pchisq(M[-1, 3], M[-1, 4])
  structure(M, class = "anova.loglm", formulae = forms)
}

print.anova.loglm <- function(x, ...)
{
  rjustify <- function(str) {
    m <- max(n <- nchar(str))
    blanks <- format(c("", str[n == m][1]))[1]
    paste(substring(blanks, 0, m - n), str, sep = "")
  }
  y <- x
  y[, 5] <- round(y[, 5], 5)
  R <- array("", dim(x), dimnames(x))
  for(j in 1:5) {
    colj <- rjustify(c(dimnames(x)[[2]][j], format(y[, j])))
    R[, j] <- colj[-1]
    dimnames(R)[[2]][j] <- colj[1]
  }
  R[1, 3:5] <- ""
  pform <- function(form)
    if(length(form) == 2) form else form[c(2, 1, 3)]
  forms <- attr(x, "formulae")
  cat("LR tests for hierarchical log-linear models\n\n")
  for(i in seq(along=forms))
    cat(paste("Model ", i, ":\n", sep = ""), pform(forms[[i]]), "\n")
  cat("\n")
  print(R, quote = F)
  invisible(x)
}

print.loglm <- function(x, ...)
{
  cat("Call:\n")
  print(attr(x, "call"))
  ts.array <- rbind(c(x$lrt, x$df, 
		      if(x$df > 0) 1 - pchisq(x$lrt, x$df) else 1), 
		    c(x$pearson, x$df, 
		      if(x$df > 0) 1 - pchisq(x$pearson, x$df) 
		      else 1))
  dimnames(ts.array) <- list(c("Likelihood Ratio", 
			       "Pearson"), c("X^2", "df", "P(> X^2)"))
  cat("\nStatistics:\n")
  print(ts.array)
  invisible(x)
}

summary.loglm <- function(object, fitted = F, ...)
{
  ts.array <- rbind(c(object$lrt, object$df, 
		      if(object$df > 0) 1 - pchisq(object$lrt, object$df) 
		      else 1), c(object$pearson, object$df, 
				 if(object$df > 0) 
				 1 - pchisq(object$pearson, object$df) 
				 else 1))
  dimnames(ts.array) <- list(c("Likelihood Ratio", "Pearson"), 
			     c("X^2", "df", "P(> X^2)"))
  if(fitted) {
    if(is.null(object$fitted) || is.null(object$freqencies)) {
      cat("Re-fitting to find fitted values\n")
      object <- update(object, fitted = T, keep.frequencies = T)
    }
    fit <- format(round(object$fit, 1))
    OE <- array(paste(format(object$freq), " (", fit, ")", sep = ""),
		dim(fit), dimnames(object$freq))
  }  else OE <- NULL
  structure(list(formula = formula(object), tests = ts.array, oe = OE), 
	    class = "summary.loglm")
}

print.summary.loglm <- function(x, ...)
{
  cat("Formula:\n")
  print(formula(x))
  cat("\nStatistics:\n")
  print(x$tests)
  if(!is.null(x$oe)) {
    cat("\nObserved (Expected):\n")
    print(x$oe, quote = F)
  }
  invisible(x)
}

update.loglm <- function(object, formula, ...)
{
  if(is.null(attr(object, "call")))
     stop("object has no call attribute.  Updating not possible")
  if(fix <- !missing(formula)) {
    attr(object, "formula") <- denumerate(attr(object, "formula"))
    formula <- denumerate(formula)
  }
  result <- NextMethod("update")
  extras <- setdiff(names(m <- match.call()), c("", "object", "formula"))
  if(length(extras)) 
    attr(result, "call")[extras] <- m[extras]
  if(fix) {
    form <- renumerate(attr(result, "formula"))
    attr(result, "call")$formula <- unclass(attr(result, "formula") <- form)
  }
  result
}

fitted.loglm <- function(object, ...)
{
  if(!is.null(object$fit))
    return(unclass(object$fit))
  cat("Re-fitting to get fitted values\n")
  unclass(update(object, fitted = T, keep.frequencies = F)$fitted)
}

residuals.loglm <- 
function(object, type = c("deviance", "pearson", "response"))
{
  type <- match.arg(type)
  if(is.null(object$fit) || is.null(object$freq)) {
    cat("Re-fitting to get frequencies and fitted values\n")
    object <- update(object, fitted = T, keep.frequencies = T)
  }
  y <- object$freq
  mu <- object$fit
  res <- y - mu
  nz <- mu > 0
  y <- y[nz]
  mu <- mu[nz]
  res[nz] <- switch(type,
    deviance = sign(y - mu) * sqrt(2*abs(y*log((y + (y == 0))/mu) - (y - mu))),
    pearson = (y - mu)/sqrt(mu),
    response = y - mu)
  res
}

coef.loglm <- function(object, ...)
{
  if(!is.null(cf <- object$param)) return(cf)
  cat("Re-fitting to calculate missing coefficients\n")
  update(object, param = T)$param
}
# file MASS/logtrans.q
# copyright (C) 1994-8 W. N. Venables and B. D. Ripley
#
"logtrans"<-
function(object, ...)
UseMethod("logtrans")

"logtrans.default"<-
function(object, ..., alpha = seq(0.5, 6, by = 0.25) - min(y), 
	plotit = length(dev.list()) > 0, interp = (plotit && (m < 
	100)), xlab = "alpha", ylab = "log Likelihood")
{
  if(is.null(object$y) || is.null(object$qr))
    stop(paste(deparse(substitute(object)), 
	       "does not have both `qr' and `y' components"))
  y <- object$y
  n <- length(y)
  if(any(y + min(alpha) <= 0))
    stop("Response variable must be positive after additions")
  xqr <- object$qr
  xl <- loglik <- as.vector(alpha)
  m <- length(xl)
  for(i in 1:m) {
    rs <- qr.resid(xqr, yt <- log(y + alpha[i]))
    loglik[i] <-  - n/2 * log(sum(rs^2)) - sum(yt)
  }
  if(interp) {
    sp <- spline(alpha, loglik, n = 100)
    xl <- sp$x
    loglik <- sp$y
    m <- length(xl)
  }
  if(plotit) {
    mx <- (1:m)[loglik == max(loglik)][1]
    Lmax <- loglik[mx]
    lim <- Lmax - qchisq(19/20, 1)/2
    plot(xl, loglik, xlab = xlab, ylab = ylab, type
	 = "l", ylim = range(loglik, lim))
    plims <- par("usr")
    abline(h = lim, lty = 3)
    y0 <- plims[3]
    scal <- (1/10 * (plims[4] - y0))/par("pin")[2]
    scx <- (1/10 * (plims[2] - plims[1]))/par("pin")[1]
    text(xl[1] + scx, lim + scal, " 95%")
    la <- xl[mx]
    if(mx > 1 && mx < m)
      segments(la, y0, la, Lmax, lty = 3)
    ind <- range((1:m)[loglik > lim])
    if(loglik[1] < lim) {
      i <- ind[1]
      x <- xl[i - 1] + ((lim - loglik[i - 1]) * 
			(xl[i] - xl[i - 1]))/(loglik[i] - loglik[i - 1])
      segments(x, y0, x, lim, lty = 3)
    }
    if(loglik[m] < lim) {
      i <- ind[2] + 1
      x <- xl[i - 1] + ((lim - loglik[i - 1]) * 
			(xl[i] - xl[i - 1]))/(loglik[i] - loglik[i - 1])
      segments(x, y0, x, lim, lty = 3)
    }
  }
  invisible(list(x = xl, y = loglik))
}
"logtrans.formula"<-
function(object, data = NULL, ...)
{
  object <- aov(object, data = data, y = T, qr = T)
  invisible(NextMethod("logtrans"))
}
"logtrans.lm"<-
function(object, ...)
{
  if(is.null(object$y) || is.null(object$qr))
    object <- update(object, y = T, qr = T)
  invisible(NextMethod("logtrans"))
}
# file MASS/mca.q
# copyright (C) 1994-8 W. N. Venables and B. D. Ripley
#
mca <- function(df, nf = 2, abbrev = F)
{
  class.ind <- function(cl)
  {
    n <- length(cl); cl <- as.factor(cl)
    x <- matrix(0, n, length(levels(cl)))
    x[(1:n) + n * (unclass(cl) - 1)] <- 1
    dimnames(x) <- list(names(cl), levels(cl))
    x
  }
  if(!all(unlist(lapply(df, is.factor))))
    stop("All variables must be factors")
  n <- nrow(df); p <- length(df)
  G <- as.matrix(do.call("data.frame", c(lapply(df, class.ind),
                                         check.names=F)))
  Dc <- drop((rep(1, n)) %*% G)
  X <- t(t(G)/(sqrt(p*Dc)))
  X.svd <- svd(X)
  sec <- 1 + (1:nf)
  rs <- X %*% X.svd$v[, sec]/p
  cs <- diag(1/(sqrt(p*Dc))) %*% X.svd$v[, sec]
  fs <- X.svd$u[, sec]/rep(p*X.svd$d[sec], c(n,n))
  dimnames(rs) <- list(row.names(df), as.character(1:nf))
  dimnames(fs) <- dimnames(rs)
  varnames <- if(abbrev) unlist(lapply(df, levels))
              else dimnames(G)[[2]]
  dimnames(cs) <- list(varnames, as.character(1:nf))
  structure(list(rs=rs, cs=cs, fs=fs, d=X.svd$d[sec], p=p,
                 call=match.call()), class="mca")
}

print.mca <- function(x, ...)
{
  if(!is.null(cl <- x$call)) {
    cat("Call:\n")
    dput(cl)
  }
  cat("\nMultiple correspondence analysis of",
            nrow(x$rs), "cases of", x$p,
            "factors\n")
  cat("\nCorrelations", format(round(x$d,3), ...))
  p <- 100 * cumsum(x$d)/(x$p - 1)
  cat("  cumulative % explained", format(round(p,2), ...), "\n")
  invisible(x)
}

plot.mca <- function(x, rows=T, col, cex = par("cex"), ...)
{
  if(length(cex) == 1) cex <- rep(cex, 2)
  eqscplot(x$cs, type="n", xlab="", ...)
  if(missing(col)) {
    col <- par("col")
    if (!is.numeric(col)) col <- match(col, palette())
    col <- c(col, col + 1)
  } else if(length(col) != 2) col <- rep(col, length = 2)
  if(rows) text(x$rs, cex=cex[1], col=col[1])
  text(x$cs, labels=dimnames(x$cs)[[1]], cex=cex[2], col=col[2])
  invisible(x)
}

predict.mca <- function(object, newdata, type=c("row", "factor"), ...)
{
  class.ind <- function(cl)
  {
    n <- length(cl); cl <- as.factor(cl)
    x <- matrix(0, n, length(levels(cl)))
    x[(1:n) + n * (unclass(cl) - 1)] <- 1
    dimnames(x) <- list(names(cl), levels(cl))
    x
  }
  
  type <- match.arg(type)
  if(is.null(abbrev <- object$call$abbrev)) abbrev <- F
  if(!all(unlist(lapply(newdata, is.factor))))
    stop("All variables must be factors")
  G <- as.matrix(do.call("data.frame", c(lapply(newdata, class.ind),
                                         check.names=F)))
  if(abbrev) dimnames(G)[[2]] <- unlist(lapply(newdata, levels))
  if(type == "row") {
    # predict new row(s)
    if(!all(dimnames(G)[[2]] == dimnames(object$cs)[[1]]))
       stop("factors in newdata do not match those for fit")
    G %*% object$cs/object$p
  } else {
    # predict positions of new factor(s)
    n <- nrow(G)
    Dc <- drop((rep(1, n)) %*% G)
    if(n != nrow(object$fs))
      stop("newdata is not of the right length")
    (t(G)/Dc) %*% object$fs
  }
}
# file MASS/misc.q
# copyright (C) 1994-8 W. N. Venables and B. D. Ripley
#
Choleski <- function(x)
{
  .NotYetImplemented()
    x <- as.Matrix(x)
    class(x) <- Matrix.class(x)
    if(!inherits(x, "Hermitian")) stop("x must be symmetric")
    LU <- expand(lu(x))
    B <- LU$block.diagonal
    if(!inherits(B, "Diagonal") || any(diag(B) <= 0) 
       || !inherits(LU$permutation, "Identity"))
           stop("x is not positive definite")
    x <- LU$triangular %*% sqrt(B)    
    class(x) <- Matrix.class(x)
    x
}

mat2tr <- function(z)
{
        dn <- names(dimnames(z))
        dx <- dimnames(z)[[1]]
        x <- as.numeric(substring(dx, nchar(dn[1]) + 2))
        dy <- dimnames(z)[[2]]
        y <- as.numeric(substring(dy, nchar(dn[2]) + 2))
        cbind(expand.grid(x = x, y = y), z = as.vector(z))
}

con2tr <- function(obj)
{
   data.frame(expand.grid(x=obj$x,y=obj$y),z=as.vector(obj$z))
}

Null <- function(M)
{
  tmp <- qr(M)
  set <- if(tmp$rank == 0) 1:ncol(M) else  - (1:tmp$rank)
  qr.Q(tmp, complete = T)[, set, drop = F]
}

ginv <- function(X, tol = sqrt(.Machine$double.eps))
{
#
# based on suggestions of R M Heiberger, T M Hesterburg and WNV
#
  if(length(dim(X)) > 2 || !(is.numeric(X) || is.complex(X)))
    stop("X must be a numeric or complex matrix")
  if(!is.matrix(X)) X <- as.matrix(X)
  Xsvd <- svd(X)
  if(is.complex(X)) Xsvd$u <- Conj(Xsvd$u)
  Positive <- Xsvd$d > max(tol * Xsvd$d[1], 0)
  if(!any(Positive)) array(0, dim(X)[2:1])
  else Xsvd$v[, Positive] %*% ((1/Xsvd$d[Positive]) * t(Xsvd$u[, Positive]))
}
# file MASS/mvrnorm.q
# copyright (C) 1994-8 W. N. Venables and B. D. Ripley
#
mvrnorm <- function(n = 1, mu, Sigma, tol=1e-6)
{
  p <- length(mu)
  if(!all(dim(Sigma) == c(p,p))) stop("incompatible arguments")
  eS <- eigen(Sigma, sym = T)
  ev <- eS$values
  if(!all(ev >= -tol*abs(ev[1]))) stop("Sigma is not positive definite")
  X <- mu + eS$vectors %*% diag(sqrt(pmax(ev, 0))) %*% matrix(rnorm(p*n),p)
  nm <- names(mu)
  if(is.null(nm) && !is.null(dn <- dimnames(Sigma))) nm <- dn[[1]]
  dimnames(X) <- list(nm, NULL)
  if(n == 1) drop(X) else t(X)
}
# file MASS/neg.bin.q
# copyright (C) 1994-8 W. N. Venables and B. D. Ripley
#
neg.bin <- function (theta = stop("theta must be given")) 
{
  .Theta <- theta
  stats <- make.link("log")
  variance <- function(mu)
    mu + mu^2/.Theta
  validmu <- function(mu)
    all(mu > 0)
  dev.resids <- function(y, mu, wt)
    2 * wt * (y * log(pmax(1, y)/mu) - (y + .Theta) *
              log((y + .Theta)/ (mu + .Theta)))
  aic <- function(y, n, mu, wt, dev) {
    term <- (y + .Theta) * log((y + .Theta)/ (mu + .Theta)) - y * log(mu) +
      lgamma(y + 1) - .Theta * log(.Theta) + lgamma(.Theta) - lgamma(.Theta+y)
    2 * sum(term * wt)
  }
  initialize <- expression({
    if (any(y < 0)) stop(paste("Negative values not allowed for", 
                               "the Poisson family"))
    n <- rep(1, nobs)
    mustart <- y + (y == 0)/6
  })
  structure(list(family = "Negative Binomial", link = "log", linkfun = stats$linkfun, 
                 linkinv = stats$linkinv, variance = variance, dev.resids = dev.resids, 
                 aic = aic, mu.eta = stats$mu.eta, initialize = initialize, 
                 validmu = validmu, valideta = stats$valideta), class = "family")
}
# file MASS/negbin.q
# copyright (C) 1994-8 W. N. Venables and B. D. Ripley
#
"anova.negbin"<-
function(object, ..., test = "Chisq")
{
  dots <- list(...)
  if(length(dots) == 0) {
    warning("tests made without re-estimating theta")
    object$call[[1]] <- as.name("glm")
    if(is.null(object$link))
      object$link <- as.name("log")
    object$call$family <- call("negative.binomial", theta = object$
                               theta, link = object$link)
    anova.glm(object, test = test)
  } else {
    if(test != "Chisq")
      warning("only Chi-squared LR tests are implemented")
    mlist <- list(object, ...)
    nt <- length(mlist)
    dflis <- sapply(mlist, function(x) x$df.resid)
#    s <- sort.list( - dflis)
    s <- order( - dflis)
    mlist <- mlist[s]
    if(any(!sapply(mlist, inherits, "negbin")))
      stop("not all objects are of class `negbin'")
    rsp <- unique(sapply(mlist, function(x) paste(formula(x)[2])))
    mds <- sapply(mlist, function(x) paste(formula(x)[3]))
    ths <- sapply(mlist, function(x) x$theta)
    dfs <- dflis[s]
    lls <- sapply(mlist, function(x) x$twologlik)
    tss <- c("", paste(1:(nt - 1), 2:nt, sep = " vs "))
    df <- c(NA,  - diff(dfs))
    x2 <- c(NA, diff(lls))
    pr <- c(NA, 1 - pchisq(x2[-1], df[-1]))
    out <- data.frame(Model = mds, theta = ths, Resid.df = dfs, 
                      "2 x log-lik." = lls, Test = tss, df = df, LRtest = x2, 
                      Prob = pr)
    names(out) <- c("Model", "theta", "Resid. df", 
                    "   2 x log-lik.", "Test", "   df", "LR stat.", "Pr(Chi)")
    attr(out, "class") <- c("anova", "data.frame")
    attr(out, "heading") <-
      c("Likelihood ratio tests of Negative Binomial Models\n",
        paste("Response:", rsp))
    out
  }
}

"family.negbin"<-
function(object)
#eval(substitute(negative.binomial(theta = object$theta, link = lnk), list(lnk
#	 = object$call$link)))
  object$family

"glm.convert"<-
function(object)
{
  object$call[[1]] <- as.name("glm")
  if(is.null(object$link))
    object$link <- as.name("log")
  object$call$family <- call("negative.binomial", theta = object$theta, 
                             link = object$link)
  object$call$init.theta <- object$call$link <- NULL
  class(object) <- c("glm", "lm")
  object
}

glm.nb <- function(formula = formula(data), data = sys.parent(), weights, 
		   subset, na.action, start = eta, 
		   control = glm.control(...), method = "glm.fit", 
		   model = T, x = F, y = T, contrasts = NULL, 
		   init.theta, link = log, ...)
{
  loglik <- function(n, th, mu, y)
    {
      sum(lgamma(th + y) - lgamma(th) + th * log(th) + 
	  y * log(mu + (y == 0)) - (th + y) * log(th + mu))
    }
  link <- substitute(link)
  th <- as.name("th")
  if(missing(init.theta)) {
    fam0 <- do.call("poisson", list(link = link))
  } else {
    fam0 <- do.call("negative.binomial", list(theta = init.theta, link = link))
  }
  Call <- match.call()
  m <- match.call(expand = F)
  m$method <- m$model <- m$x <- m$y <- m$control <- m$contrasts <-
    m$init.theta <- m$link <- m$... <- NULL
  m[[1]] <- as.name("model.frame")
  m <- eval(m, sys.frame(sys.parent()))
  Terms <- attr(m, "terms")
  if(method == "model.frame") return(m)
  a <- attributes(m)
  Y <- model.extract(m, response)
  X <- model.matrix(Terms, m, contrasts)
  w <- model.extract(m, weights)
  if(!length(w)) w <- rep(1, nrow(m))
  else if(any(w < 0)) stop("negative weights not allowed")
  start <- model.extract(m, start)
  offset <- model.extract(m, offset)
  n <- length(Y)
  if(!is.null(method)) {
    if(!exists(method, mode = "function"))
      stop(paste("unimplemented method:", method))
  }
  else method <- "glm.fit"
  glm.fitter <- get(method)
  if(control$trace > 1) cat("Initial fit:\n")
  fit <- glm.fitter(x = X, y = Y, w = w, etastart = start, 
		    offset = offset, family = fam0,
                    control = list(maxit=control$maxit,
                      epsilon = control$epsilon,
                      trace = control$trace > 1))
  attr(fit, "class") <- c("glm", "lm")
  mu <- fitted(fit)
  th <- as.vector(theta.ml(Y, mu, n, limit=control$maxit, trace = 
                           control$trace> 2))
  if(control$trace > 1)
    cat("Initial value for theta:", signif(th), "\n")
  fam <- do.call("negative.binomial", list(theta = th, link = link))
  iter <- 0
  d1 <- sqrt(2 * max(1, fit$df.residual))
  d2 <- del <- 1
  g <- fam$linkfun
  Lm <- loglik(n, th, mu, Y)
  Lm0 <- Lm + 2 * d1
  while((iter <- iter + 1) <= control$maxit && 
	(abs(Lm0 - Lm)/d1 + abs(del)/d2) > control$epsilon) {
    eta <- g(mu)
    fit <- glm.fitter(x = X, y = Y, w = w, etastart = 
		      eta, offset = offset, family = fam,
                       control = list(maxit=control$maxit,
                         epsilon = control$epsilon,
                         trace = control$trace > 1))
    t0 <- th
    th <- theta.ml(Y, mu, n, limit=control$maxit, trace = control$trace > 2)
    fam <- do.call("negative.binomial", list(theta = th, link = link))
    attr(fit, "class") <- c("negbin", "glm", "lm")
    mu <- fitted(fit)
    del <- t0 - th
    Lm0 <- Lm
    Lm <- loglik(n, th, mu, Y)
    if(control$trace) {
      Ls <- loglik(n, th, Y, Y)
      Dev <- 2 * (Ls - Lm)
      cat("Theta(", iter, ") =", signif(th), 
	  ", 2(Ls - Lm) =", signif(Dev), "\n")
    }
  }
  if(!is.null(attr(th, "warn"))) fit$th.warn <- attr(th, "warn")
  if(iter > control$maxit) {
    warning("alternation limit reached")
    fit$th.warn <- "alternation limit reached"
  }
  
  # If an offset and intercept are present, iterations are needed to
  # compute the Null deviance; these are done here, unless the model
  # is NULL, in which case the computations have been done already
  #
  if(any(offset) && attr(Terms, "intercept")) {
    null.deviance <-
      if(length(Terms))
        glm.fitter(X[, "(Intercept)", drop = F], Y, w,
                   offset = offset, family = fam,
                   control = list(maxit=control$maxit,
                     epsilon = control$epsilon, trace = control$trace > 1)
                   )
      else fit$deviance
    fit$null.deviance <- null.deviance
  }
  Call$init.theta <- as.vector(th)
  Call$link <- link
  if(model) fit$model <- m
  if(x) fit$x <- X
  if(!y) fit$y <- NULL
  fit$contrasts <- if (length(clv <- unlist(lapply(m, class)))) 
    options("contrasts")[[1]] else FALSE
  fit <- c(fit, list(call = Call, formula = formula, terms = Terms,
                     data = data, offset = offset, control = control,
                     method = method, theta = as.vector(th),
                     SE.theta = attr(th, "SE"),
                     twologlik = as.vector(2 * Lm)))
  class(fit) <-  c("negbin", "glm", "lm")
  fit
}

"negative.binomial" <-
function(theta = stop("theta must be specified"), link = "log")
{
  linktemp <- substitute(link)
  if (!is.character(linktemp)) {
    linktemp <- deparse(linktemp)
    if (linktemp == "link") 
      linktemp <- eval(link)
  }
  if (any(linktemp == c("log", "identity", "sqrt"))) 
    stats <- make.link(linktemp)
  else stop(paste(linktemp, "link not available for negative binomial", 
                  "family; available links are", "\"identity\", \"log\" and \"sqrt\""))
  .Theta <- theta
  stats <- make.link("log")
  variance <- function(mu)
    mu + mu^2/.Theta
  validmu <- function(mu)
    all(mu > 0)
  dev.resids <- function(y, mu, wt)
    2 * wt * (y * log(pmax(1, y)/mu) - (y + .Theta) *
              log((y + .Theta)/ (mu + .Theta)))
  aic <- function(y, n, mu, wt, dev) {
    term <- (y + .Theta) * log((y + .Theta)/ (mu + .Theta)) - y * log(mu) +
      lgamma(y + 1) - .Theta * log(.Theta) + lgamma(.Theta) - lgamma(.Theta+y)
    2 * sum(term * wt)
  }
  initialize <- expression({
    if (any(y < 0)) stop(paste("Negative values not allowed for", 
                               "the Poisson family"))
    n <- rep(1, nobs)
    mustart <- y + (y == 0)/6
  })
  famname <- paste("Negative Binomial(", format(round(theta, 4)), ")",
                   sep = "")
  structure(list(family = famname, link = linktemp, linkfun = stats$linkfun, 
                 linkinv = stats$linkinv, variance = variance, dev.resids = dev.resids, 
                 aic = aic, mu.eta = stats$mu.eta, initialize = initialize, 
                 validmu = validmu, valideta = stats$valideta), class = "family")
}

"rnegbin"<-
function(n, mu = n, theta = stop("theta must be given"))
{
  k <- if(length(n) > 1) length(n) else n
  rpois(k, (mu * rgamma(k, theta))/theta)
}

"summary.negbin" <-
function(object, dispersion = 1, correlation = T, ...)
{
  if(is.null(dispersion)) dispersion <- 1
  summ <- c(summary.glm(object, dispersion = dispersion, 
                        correlation = correlation), 
            object[c("theta", "SE", "twologlik", "th.warn")])
  class(summ) <- c("summary.negbin", "summary.glm")
  summ
}

#"print.summary.negbin" <- function(x, ...)
#{
#  NextMethod()
#  cat("\n              Theta: ", round(x$theta, 5), 
#      "\n          Std. Err.: ", round(x$SE, 5), "\n") 
#  if(!is.null(x$th.warn))
#    cat("Warning while fitting theta:", x$th.warn,"\n")
#  cat("\n 2 x log-likelihood: ", round(x$twologlik, 5), "\n")
#  invisible(x)
#}

"print.summary.negbin" <- function(x, ...)
{
  NextMethod()
  dp <- 2 - floor(log10(x$SE))
  cat("\n              Theta: ", format(round(x$theta, dp), sci=F, nsmall=dp),
      "\n          Std. Err.: ", format(round(x$SE, dp), sci=F, nsmall=dp),
      "\n") 
  if(!is.null(x$th.warn))
    cat("Warning while fitting theta:", x$th.warn,"\n")
  cat("\n 2 x log-likelihood: ", format(round(x$twologlik, 3), sci=F, nsmall=3), "\n")
  invisible(x)
}

"theta.md" <-
function(y, u, dfr, limit = 20, eps = .Machine$double.eps^0.25)
{
  if(inherits(y, "lm")) {
    u <- fitted(y)
    dfr <- y$df.residual
    y <- if(is.null(y$y)) u + residuals(y) else y$y
  }
  n <- length(y)
  t0 <- n/sum((y/u - 1)^2)
  a <- 2 * sum(y * log(pmax(1, y)/u)) - dfr
  it <- 0
  del <- 1
  while((it <- it + 1) < limit && abs(del) > eps) {
    t0 <- abs(t0)
    top <- a - 2 * sum((y + t0) * log((y + t0)/(u + t0)))
    bot <- 2 * sum((y - u)/(u + t0) - log((y + t0)/(u + t0)))
    del <- top/bot
    t0 <- t0 - del
  }
  if(t0 < 0) {
    t0 <- 0
    warning("estimator truncated at zero")
    attr(t0, "warn") <- "estimate truncated at zero"
  }
  t0
}

"theta.ml" <-
function(y, mu, n = length(y), limit = 10, eps = .Machine$double.eps^0.25,
         trace=F)
{
  score <- function(n, th, mu, y)
    sum(digamma(th + y) - digamma(th) + log(th) + 
	1 - log(th + mu) - (y + th)/(mu + th))
  info <- function(n, th, mu, y)
    sum( - trigamma(th + y) + trigamma(th) - 1/th + 
	2/(mu + th) - (y + th)/(mu + th)^2)
  if(inherits(y, "lm")) {
    mu <- fitted(y)
    y <- if(is.null(y$y)) mu + residuals(y) else y$y
  }
  t0 <- n/sum((y/mu - 1)^2)
  it <- 0
  del <- 1
  if(trace) cat("theta.ml: initial theta =", signif(t0), "\n")
  while((it <- it + 1) < limit && abs(del) > eps) {
    t0 <- abs(t0)
    del <- score(n, t0, mu, y)/(i <- info(n, t0, mu, y))
    t0 <- t0 + del
  if(trace) cat("theta.ml: iter", it," theta =", signif(t0), "\n")
  }
  if(t0 < 0) {
    t0 <- 0
    warning("estimator truncated at zero")
    attr(t0, "warn") <- "estimate truncated at zero"
  }
  if(it == limit) {
    warning("iteration limit reached")
    attr(t0, "warn") <- "iteration limit reached"
  }
  attr(t0, "SE") <- sqrt(1/i)
  t0
}

"theta.mm" <-
function(y, u, dfr, limit = 10, eps = .Machine$double.eps^0.25)
{
  if(inherits(y, "lm")) {
    u <- fitted(y)
    dfr <- y$df.residual
    y <- if(is.null(y$y)) u + residuals(y) else y$y
  }
  n <- length(y)
  t0 <- n/sum((y/u - 1)^2)
  it <- 0
  del <- 1
  while((it <- it + 1) < limit && abs(del) > eps) {
    t0 <- abs(t0)
    del <- (sum((y - u)^2/(u + u^2/t0)) - dfr)/sum((y - u)^2/(u + t0)^2)
    t0 <- t0 - del
  }
  if(t0 < 0) {
    t0 <- 0
    warning("estimator truncated at zero")
    attr(t0, "warn") <- "estimate truncated at zero"
  }
  t0
}
# file MASS/negexp.q
# copyright (C) 1994-8 W. N. Venables and B. D. Ripley
#
negexp.ival <- function(x, b0, b1, th)
{
   pnames <- as.character(sys.call()[3:5])
   y <- get(".nls.initial.response", frame = 1)
   if(length(unique(x)) < 3)
       stop("at least 3 distinct x values are needed")
   mx <- mean(x)
   b <- as.vector(lsfit(cbind(x - mx, -(x - mx)^2/2), y)$coef)
   rx <- range(x)
   xh <- mx + b[2]/b[3]
   if(prod(xh - rx) < 0)
      if(xh - rx[1] > rx[2] - xh) rx[2] <- xh  else rx[1] <- xh
   x0 <- c(rx[1], sum(rx)/2, rx[2])
   dy <- diff(b[1] + b[2]*(x0 - mx) - (b[3]*(x0 - mx)^2)/2)
   th <- (x0[2] - x0[1])/log(dy[1]/dy[2])
   b <- as.vector(lsfit(exp( - x/th), y)$coef)
   pars <- list(b[1], b[2], th)
   names(pars) <- pnames
   print(unlist(pars))
   pars
}

# MASS/nls-extras.q copyright (C) 1996 D. M. Bates,
# modified (C) 1997 B. D. Ripley.
#
if(version$major < 4) 
{
  "predict.nls"<-
    function(object, newdata = list(), se.fit = F, ...)
  {
    if(!length(newdata) && !length(list(...))) {
      if(!se.fit) {
# most common case
	return(object$fitted)
      }
      newdata <- object$data
      if(!length(newdata)) newdata <- eval(object$call$data)
    }
    ldotdot <- list(...)	
    ## extract the operative part of the formula
    formula <- object$formula
    formula <- formula[[length(formula)]]	
    ## massage newdata and make a frame
    cl <- class(newdata)
    class(newdata) <- NULL
    if(length(ldotdot)) newdata[names(ldotdot)] <- ldotdot
    asgn <- object$assign
    anames <- names(asgn)
    coeff <- object$parameters
    pars <- as.list(parameters(newdata))
    for(i in seq(along = asgn))
      pars[[anames[[i]]]] <- coeff[asgn[[i]]]
    pars$.parameters <- anames
    class(newdata) <- cl
    parameters(newdata) <- pars
    nl.frame <- new.frame(newdata)
# next six lines a replacement by BDR for
# pred <- as.vector(eval(formula, nl.frame))
# which needs later code for pframes than that in S-PLUS 3.x.
    for(i in seq(along = asgn))
      assign(f = nl.frame, anames[[i]], coeff[asgn[[i]]])
    pred <- eval(formula, nl.frame)
    grad <- attr(pred, "gradient")
    pred <- as.vector(pred)
    base <- pred
    nlinear <- 0
    if(inherits(object, "nls.pl")) {
      npar <- max(unlist(asgn))
      lin.pars <- coeff[ - (1:npar)]
      nlinear <- length(lin.pars)
      if(!is.matrix(pred)) pred <- matrix(pred, nc = nlinear)
      pred <- drop(pred %*% lin.pars)
    }
# Addition by BDR of four lines.
    if(se.fit && !length(grad)) {
      warning("se.fit is not implemented without gradients in this version")
      se.fit <- F
    }
    if(se.fit) {
# omit finite-difference case which used calls to compiled code.
      pred <- list(fit = pred)
      fit.summary <- summary.nls(object)
      if(nlinear) {
	if(length(dim(grad)) < 3)
	  grad <- array(grad, c(nobs/nlinear, nlinear, npar))
	gdim <- dim(aperm(grad, c(1, 3, 2)))
	grad <- array(c(array(grad, c(gdim[1] * gdim[2], gdim[3])) %*% 
			lin.pars, base), c(gdim[1], length(coeff))
		      )
      }
      pred$se.fit <- drop((((grad %*% fit.summary$cov) *
			    fit.summary$sigma^2 * grad)
			   %*% rep.int(1, length(coeff)))^0.5)
      pred$residual.scale <- fit.summary$sigma
      pred$df <- fit.summary$df[2]
    }
    pred
  }
  "anova.nls"<-
    function(object, ..., test = c("F", "none", "Chisq", "Cp"))
  {
    if(length(list(...)) == 0) {
      stop("Anova is only defined for sequences of nls objects")
    }
    test <- match.arg(test)
    object <- list(object, ...)
    forms <- sapply(object, function(x) as.character(formula(x)))
    subs <- as.logical(match(forms[2,  ], forms[2, 1], F))
    if(!all(subs))
      warning(paste("Some fit objects deleted because response differs",
		    "from the first model"))
    if(sum(subs) == 1)
      stop("The first model has a different response from the rest")
    forms <- forms[, subs]
    object <- object[subs]
    dfres <- sapply(object, function(x)
		    length(x$residuals) - length(x$parameters))
    dev <- sapply(object, deviance.lm)
    tl <- lapply(object, labels)
    rt <- length(dev)
    effects <- as.character(1:length(dev))
    ddev <-  - diff(dev)
    ddf <-  - diff(dfres)
    heading <- c("Analysis of Variance Table", 
		 paste("\nResponse: ", forms[2, 1], "\n", sep = ""))
    aod <- data.frame(Terms = forms[3,  ], "Resid. Df" = 
		      dfres, RSS = dev, Test = effects, Df = c(NA, ddf), 
		      "Sum of Sq" = c(NA, ddev), check.names = F)
    aod <- as.anova(aod, heading)
    if(test != "none") {
      n <- length(object[[1]]$residuals)
      o <- order(dfres)
      return(stat.anova(aod, test, dev[o[1]]/dfres[o[1]], dfres[o[1]], n))
    }
    aod
  }
  invisible()
}
# based on code in survival4 and modifications in MASS library.
plot.survfit <-
function(surv, conf.int, mark.time = T, mark = 3, col = 1, lty = 1, lwd = 1, 
	cex = 1, logplot = F, yscale = 1, xscale = 1, xlab = "", ylab = "", 
	xaxs = "i", add = F, cloglog=F, strata, ...)
{
   put.line <- function(xx, yy, cloglog, lty, col, lwd)
   {
     nn <- length(xx)
     if (nn <= 1) return()
     if (cloglog) yy <- -log(yy)[-1]
     whom <- c(match(unique(yy[-nn]), yy), nn)
     lines(xx[whom], yy[whom], type="s", lty=lty, col=col, lwd=lwd)
   }
   
   if(!inherits(surv, "survfit"))
      stop("First arg must be the result of survfit")

   stime <- surv$time / xscale
   ssurv <- surv$surv
   if(missing(conf.int)) {
      if(is.null(surv$strata) && !is.matrix(ssurv)) conf.int <- T
      else conf.int <- F
   }
   if(is.null(surv$strata)) {
      nstrat <- 1
      stemp <- rep(1, length(surv$time))
   }
   else {
      nstrat <- length(surv$strata)
      stemp <- rep(1:nstrat, surv$strata)
   }
   if(is.null(surv$n.event)) mark.time <- F
# expected survival curve
# set default values for missing parameters
   if (is.matrix(ssurv)) ncurve <- nstrat * ncol(ssurv)
   else                  ncurve <- nstrat
   mark <- rep(mark, length = ncurve)
   col <- rep(col, length = ncurve)
   lty <- rep(lty, length = ncurve)
   lwd <- rep(lwd, length = ncurve)
   if(!is.logical(mark.time) && is.numeric(mark.time))
     mark.time <- sort(mark.time[mark.time > 0])
   if (missing(xaxs)) temp <- 1.04*max(stime)
   else               temp <- max(stime)

#
# for log plots we have to be tricky about the y axis scaling
#
   if(!add) {
       if(cloglog) {
          yy <- ssurv[ssurv > 0 & ssurv < 1]
# strategy is to force 10-20% on-scale
          ymin <- min(0.1, yy, na.rm=T)
          ymax <- max(0.2, yy, na.rm=T)
	  plot(c(min(stime), temp), 
	     yscale * c(-log(ymax), -log(ymin)), type = "n", 
	     log = "xy", xlab = xlab, ylab = ylab, xaxs = xaxs, ...)
       }
       else if(logplot) {
	  ymin <- min(0.1, ssurv[!is.na(ssurv) & ssurv > 0])
	  ssurv[!is.na(ssurv) & ssurv == 0] <- ymin
	  plot(c(0, temp), yscale * c(0.99, ymin), type = "n", 
	     log = "y", xlab = xlab, ylab = ylab, xaxs = xaxs, ...)
       }
       else plot(c(0, temp), yscale * c(0, 1), type = "n", 
		xlab = xlab, ylab = ylab, xaxs = xaxs, ...)
   }
   if(yscale != 1) par(usr = par("usr")/c(1, 1, yscale, yscale))
#
# put up the curves one by one
#
   i <- 0
   xend <- NULL
   yend <- NULL
   if(missing(strata)) strata <- unique(stemp)
   for(j in strata) {
	who <- (stemp == j)
	if (cloglog) xx <- stime[who] else xx <- c(0, stime[who])
	nn <- length(xx)
	deaths <- c(-1, surv$n.event[who])
	if (is.matrix(ssurv)) {
	    for (k in 1:ncol(ssurv)) {
		i <- i + 1
		yy <- c(1, ssurv[who,k])
		ind <- deaths == 0
                if (cloglog) {
                  yy <- -log(yy)[-1]
                  ind <- ind[-1]
                }
		put.line(xx, yy, F, lty[i], col[i], lwd[i])
		if (is.numeric(mark.time)) {
		    indx <- mark.time
		    for (k in seq(along=mark.time))
			indx[k] <- sum(mark.time[k] > xx)
		    points(mark.time[indx < nn], yy[indx[indx < nn]],
			   pch=mark[i], col=col[i], cex=cex)
		} else if (mark.time == T && any(ind))
		    points(xx[ind], yy[ind], pch=mark[i], col=col[i], cex=cex)
		xend <- c(xend,max(xx))
		yend <- c(yend,min(yy))

		if (conf.int == T && !is.null(surv$upper)) {
		    if (ncurve == 1) lty[i] <- lty[i] + 1
		    yy <- c(1,surv$upper[who,k])
		    put.line(xx, yy, cloglog, lty[i], col[i], lwd[i])
		    yy <- c(1,surv$lower[who,k])
		    put.line(xx, yy, cloglog, lty[i], col[i], lwd[i])
		}
	    }
	} else {
	  i <- i + 1
	  yy <- c(1, ssurv[who])
          ind <- deaths == 0
          if (cloglog) {
            yy <- -log(yy)[-1]
            ind <- ind[-1]
          }
	  put.line(xx, yy, F, lty[i], col[i], lwd[i])
	  if(is.numeric(mark.time)) {
	     nn <- length(xx)
	     indx <- mark.time
	     for(k in seq(along = mark.time))
		indx[k] <- sum(mark.time[k] > xx)
	     points(mark.time[indx < nn], yy[indx[indx < nn]], pch
		 = mark[i], col = col[i], cex = cex)
	  }
	  else if(mark.time == T && any(ind))
	     points(xx[ind], yy[ind], pch = mark[i], col = col[i], cex = cex)
	  xend <- c(xend, max(xx))
	  yend <- c(yend, min(yy))
	  if(conf.int == T && !is.null(surv$upper)) {
	     if(ncurve == 1) lty[i] <- lty[i] + 1
	     yy <- c(1, surv$upper[who])
	     put.line(xx, yy, cloglog, lty[i], col[i], lwd[i])
	     yy <- c(1, surv$lower[who])
	     put.line(xx, yy, cloglog, lty[i], col[i], lwd[i])
	  }
       }
   }
   invisible(list(x = xend, y = yend))
}
# file MASS/polynom.q
# copyright (C) 1994-8 W. N. Venables and B. D. Ripley
#
"polynom"<- function(x, b)
{
	if(!is.loaded(symbol.C("poly"))) dyn.load("horner.o")
	m <- as.integer(length(x))
	n <- as.integer(length(b) - 1)
	storage.mode(x) <- "double"
	p <- x
	storage.mode(b) <- "double"
	.C("poly",
		m,
		val = p,
		x,
		n,
		b)$val
}
# File MASS/profiles.q copyright (C) 1996 D. M. Bates and W. N. Venables.
#
profile.glm <- function(fitted, which = 1:p, alpha = 0.01, 
			maxsteps = 10, del = zmax/5, trace = F)
{
  .NotYetImplemented()
  Pnames <- names(B0 <- coefficients(fitted))
  pv0 <- t(as.matrix(B0))
  p <- length(Pnames)
  if(is.character(which)) which <- match(which, Pnames)
  summ <- summary(fitted)
  std.err <- summ$coefficients[, "Std. Error"]
  mf <- update(fitted, method = "model.frame")
  n <- length(Y <- model.extract(mf, response))
  O <- model.extract(mf, offset)
  if(length(O) == 1 && O == 0) O <- rep(0, n)
  W <- model.extract(mf, weights)
  if(length(W) == 0) W <- rep(1, n)
  OriginalDeviance <- deviance(fitted)
  DispersionParameter <- summ$dispersion
  X <- model.matrix(fitted)
  fam <- family(fitted)
  switch(fam$family["name"],
	 Binomial = {
	   if(!is.null(dim(Y))) {
	     n <- n/2
	     O <- O[1:n]
	     Y <- Y[,1]/(W <- drop(Y %*% c(1,1)))
	   } 
	   zmax <- sqrt(qchisq(1 - alpha/2, p))
	   profName <- "z"
	 },
	 Poisson = ,
	 "Negative Binomial" = {
	   zmax <- sqrt(qchisq(1 - alpha/2, p))
	   profName <- "z"
	 }
	 ,
	 Gaussian = ,
	 "Quasi-likelihood" = ,
	 "Inverse Gaussian" = ,
	 {
	   zmax <- sqrt(p * qf(1 - alpha/2, p, n - p))
	   profName <- "tau"
	 }
	 )
  prof <- list()
  for(i in which) {
    zi <- 0
    pvi <- pv0
    Xi <- X[,  - i, drop = F]
    Pi <- Pnames[ - i]
    pi <- Pnames[i]
    for(sgn in c(-1, 1)) {
      if(trace) cat("\nParameter:", pi, c("up", "down")[(sgn + 1)/2 + 1], "\n")
      step <- 0
      z <- 0
      LP <- fitted$linear.predictors
      while((step <- step + 1) < maxsteps && abs(z) < zmax) {
	bi <- B0[i] + sgn * step * del * 
	  std.err[i]
	fm <- glm.fit(x = Xi, y = Y, w = W, start = LP, 
		      offset = o <- O + X[, i] * bi, family = fam, 
		      trace = trace)
	LP <- fm$linear.predictors + o
	ri <- pv0
	ri[, names(coef(fm))] <- coef(fm)
	ri[, pi] <- bi
	pvi <- rbind(pvi, ri)
	z <- sgn * sqrt((deviance(fm) - OriginalDeviance)/DispersionParameter)
	zi <- c(zi, z)
      }
    }
    si <- sort.list(zi)
    prof[[pi]] <- structure(data.frame(zi[si]), names = profName)
    prof[[pi]]$par.vals <- pvi[si, ]
  }
  structure(prof, original.fit = fitted, summary = summ, 
	    class = c("profile.glm", "profile"))
}


plot.profile <-
  ## Trellis-based replacement for plot.profile
  function(x, nseg, ...) 
{
  .NotYetImplemented()
  nulls <- sapply(x, is.null)
  if (all(nulls)) return(NULL)
  x <- x[!nulls]
  tau <- parval <- parname <- list()
  for(nm in names(x)) {
    tau[[nm]] <- x[[nm]][[1]]
    parval[[nm]] <- x[[nm]][[2]][, nm]
    parname[[nm]] <- rep(nm, length(tau[[nm]]))
  }
  xyplot(formula = tau ~ parval | parname,
	 data = data.frame(tau = unlist(tau),
	     parval = unlist(parval),
	     ## Use explicit levels in conversion of parname to prevent the
	     ## levels being sorted into alphabetical order
	     parname = factor(unlist(parname), levels = names(x))),
	 xlab = "", ylab = names(x[[1]])[1],
	 scales = list(x = list(relation = "free"),
	     y = list(alternating = FALSE)), 
	 strip = function(...) strip.default(..., style = 1),
	 panel = function(x, y)
	 {
	   panel.xyplot(x[y == 0], 0, pch = 3)
	   splineVals <- spline(x, y)
	   panel.xyplot(splineVals$x, splineVals$y, type = "l")
	 },
	 aspect = "xy",
	 ...)
}

pairs.profile <-
  ## Another plot method for profile objects showing pairwise traces.
  ## Recommended only for diagnostic purposes.
function(x,
	 colours = if(is.trellis())
	 trellis.par.get("superpose.line")$col[2:3]
	 else 2:3)
{
  .NotYetImplemented()
  parvals <- lapply(x, "[[", "par.vals")
  rng <- apply(do.call("rbind", parvals), 2, range, na.rm = T)
  Pnames <- dimnames(rng)[[2]]
  npar <- length(Pnames)
  coefs <- coef(attr(x, "original.fit"))
  form <- paste(as.character(attr(x, "original.fit")$formula)[c(2, 1, 3)], 
		collapse = "")
  oldpar <- par(mar = c(0, 0, 0, 0), mfrow = c(1, 1),
		oma = c(3, 3, 6, 3), las = 1)
  on.exit(par(oldpar))	
  ##
  ## The following dodge ensures that the plot region is square
  ##
  fin <- par("fin")
  dif <- (fin[2] - fin[1])/2
  if(dif > 0) adj <- c(dif, 0, dif, 0)
  else adj <- c(0,  - dif, 0,  - dif)
  par(omi = par("omi") + adj)	
  ## 
  ##
  cex <- 1 + 1/npar
  frame()
  mtext(form, side = 3, line = 3, cex = 1.5, outer = T)
  del <- 1/npar
  for(i in 1:npar) {
    ci <- npar - i
    pi <- Pnames[i]
    for(j in 1:npar) {
      pj <- Pnames[j]
      par(fig = del * c(j - 1, j, ci, ci + 1))
      if(i == j) {
	plot(rng[, pj], rng[, pi], axes = F, xlab = "", ylab = "", type = "n")
	op <- par(usr = c(-1, 1, -1, 1))
	text(0, 0, pi, cex = cex, adj = 0.5)
	par(op)
      } else {
	col <- colours
	if(i < j) col <- col[2:1]
	if(!is.null(parvals[[pj]])) {
	  plot(spline(x <- parvals[[pj]][, pj], 
		      y <- parvals[[pj]][, pi]), type = "l", xlim = rng[, pj], 
	       ylim = rng[, pi], axes = F, xlab = "", ylab = "", col = col[2])
	  pu <- par("usr")
	  smidge <- 2/100 * (pu[4] - pu[3])
	  segments(x, pmax(pu[3], y - smidge), x, pmin(pu[4], y + smidge),
		   lwd = 0)
	} else 
	  plot(rng[, pj], rng[, pi], axes = F, xlab = "", ylab = "",
	       type = "n")
	if(!is.null(parvals[[pi]])) {
	  lines(x <- parvals[[pi]][, pj], y <- parvals[[pi]][, pi],
		type = "l", col = col[1])
	  pu <- par("usr")
	  smidge <- 2/100 * (pu[2] - pu[1])
	  segments(pmax(pu[1], x - smidge), y, pmin(pu[2], x + smidge), y,
		   lwd = 0)
	}
	points(coefs[pj], coefs[pi], pch = 3, cex = 3)
      }
      if(i == npar) axis(1, outer = T)
      if(j == 1) axis(2, outer = T)
      if(i == 1) axis(3, outer = T)
      if(j == npar) axis(4, outer = T)
    }
  }
  par(fig = c(0, 1, 0, 1))
  invisible(x)
}
# file MASS/qda.q
# copyright (C) 1994-8 W. N. Venables and B. D. Ripley
#
qda <- function(x, ...) 
{
  if(is.null(class(x))) class(x) <- data.class(x)
  UseMethod("qda", x, ...)
}

qda.formula <- function(formula, data = NULL, ..., 
			subset, na.action = na.fail)
{
  m <- match.call(expand.dots = F)
  if(is.matrix(eval(m$data, sys.parent())))
    m$data <- as.data.frame(data)
  m$... <- NULL
  m[[1]] <- as.name("model.frame")
  m <- eval(m, sys.frame(sys.parent()))
  Terms <- attr(m, "terms")
  grouping <- model.extract(m, "response")
  x <- model.matrix(Terms, m)
  xint <- match("(Intercept)", dimnames(x)[[2]], nomatch=0)
  if(xint > 0) x <- x[, -xint, drop=F]
  res <- qda.default(x, grouping, ...)
  res$terms <- Terms
  res$call <- match.call()
  res
}

qda.data.frame <- function(x, ...)
{
  res <- qda.matrix(data.matrix(x), ...)
  res$call <- match.call()
  res 
}

qda.Matrix <- function(x, ...)
{
  res <- qda.matrix(as.matrix(x), ...)
  res$call <- match.call()
  res 
}

qda.matrix <- function(x, grouping, ..., 
		       subset, na.action = na.fail)
{
  if(!missing(subset)) {
    x <- x[subset, , drop = F]
    grouping <- grouping[subset]
  }
  if(!missing(na.action)) {
    dfr <- na.action(structure(list(g = grouping, x = x), 
			       class = "data.frame"))
    grouping <- dfr$g
    x <- dfr$x
  }
  res <- NextMethod("qda")
  res$call <- match.call()
  res 
}

qda.default <- function(x, grouping, prior = proportions,
			method = c("moment", "mle", "mve", "t"), CV=F,
                        nu = 5, ...)
{
  which.is.max <- function(x) {
    d <- (1:length(x))[x == max(x)]
    if(length(d) > 1) d <- sample(d, 1)
    d
  }

   if(is.null(dim(x))) stop("x is not a matrix")
  n <- nrow(x)
  p <- ncol(x)
  if(n != length(grouping)) stop("nrow(x) and length(grouping) are different")
  g <- as.factor(grouping)
  lev <- levels(g)
  counts <- table(g)
  if(any(counts < p+1)) stop("some group is too small for qda")
  proportions <- counts/length(g)
  ng <- length(proportions)
# allow for supplied prior
  if(any(prior < 0) || round(sum(prior), 5) != 1) stop("invalid prior")
  if(length(prior) != ng) stop("prior is of incorrect length")
  dim(prior) <- dimnames(prior) <- NULL
# means by group (rows) and variable (columns)
  group.means <- tapply(x, list(rep(g, ncol(x)), col(x)), mean)
  scaling <- array(dim=c(p,p,ng))
  ldet <- numeric(ng)
  method <- match.arg(method)
  if(CV && !(method == "moment" || method == "mle"))
     stop(paste("Cannot use leave-one-out CV with method", method))
  for (i in 1:ng){
    if(method == "mve") {
      cX <- cov.mve(X[unclass(g) == i, ], , F)
      group.means[i] <- cX$center
      sX <- svd(cX$cov, nu=0)
      scaling[, , i] <- sX$v %*% diag(sqrt(1/sX$d),,p)
      ldet[i] <- sum(log(diag(sX$d)))
    } else if(method == "t") {
      if(nu <= 2) stop("nu must exceed 2")
      m <- counts[i]
      X <- x[unclass(g) == i, ]
      w <- rep(1, m)
      repeat {
         w0 <- w
         W <- scale(X, group.means[i, ], F)
         sX <- svd(sqrt((1 + p/nu) * w/m) * W, nu=0)
         W <- W %*% sX$v %*% diag(1/sX$d,, p)
         w <- 1/(1 + drop(W^2 %*% rep(1, p))/nu)
#         print(summary(w))
         group.means[i,] <- apply(w*X, 2, sum)/sum(w)
         if(all(abs(w - w0) < 1e-2)) break
      }
      qx <- qr(sqrt(w)*scale(X, group.means[i, ], F)) 
      if(qx$rank < p) stop(paste("Rank deficiency in group", lev[i]))
      qx <- qx$qr* sqrt((1 + p/nu)/m)
      scaling[, , i] <- backsolve(qx[1:p,  ], diag(p))
      ldet[i] <- 2*sum(log(abs(diag(qx))))
    } else {
      if(method == "moment") nk <- counts[i] - 1 else nk <- counts[i]
      X <- scale(x[unclass(g) == i, ], group.means[i, ], F)/sqrt(nk)
      qx <- qr(X)
      if(qx$rank < p) stop(paste("Rank deficiency in group", lev[i]))
      qx <- qx$qr
      scaling[, , i] <- backsolve(qx[1:p, ], diag(p))
      ldet[i] <- 2*sum(log(abs(diag(qx))))
    }
  }
  if(CV) {
    NG <- if(method == "mle") 0 else 1
    dist <- matrix(0, n, ng)
    Ldet <- matrix(0, n, ng)
    for(i in 1:ng) {
      dev <- ((x - matrix(group.means[i,  ], nrow(x), 
                          p, byrow = T)) %*% scaling[,,i])
      dist[, i] <- apply(dev^2, 1, sum)
      Ldet[, i] <- ldet[i]
    }
    nc <- counts[g]
    ind <- cbind(1:n, g)
    Ldet[ind] <- log(1 - nc/(nc-1)/(nc-NG) * dist[ind]) +
      p * log((nc-NG)/(nc-1-NG)) + Ldet[ind]
    dist[ind] <- dist[ind] * (nc^2/(nc-1)^2) * (nc-1-NG)/(nc-NG) /
      (1 - nc/(nc-1)/(nc-NG) * dist[ind])
    dist <- 0.5 * dist + 0.5 * Ldet - matrix(log(prior), n, ng, byrow=T)
    dist <- exp(-(dist - min(dist, na.rm=T)))
    cl <- factor(apply(dist, 1, which.is.max), levels=seq(along=lev),
                 labels=lev)
    # convert to posterior probabilities
    posterior <- dist/drop(dist %*% rep(1, length(prior)))
    dimnames(posterior) <- list(dimnames(x)[[1]], lev)
    return(list(class = cl, posterior = posterior))
  }
  if(is.null(dimnames(x)))
    dimnames(scaling) <- list(NULL, as.character(1:p), lev)
  else {
    dimnames(scaling) <- list(dimnames(x)[[2]], as.character(1:p), lev)
    dimnames(group.means)[[2]] <- dimnames(x)[[2]]
  }
  res <- list(prior = prior, counts = counts, means = group.means, 
	      scaling = scaling, ldet = ldet, lev = lev, N = n,
	      call = match.call())
  class(res) <- "qda"
  res
}

predict.qda <- function(object, newdata, prior = object$prior,
			method = c("plug-in", "predictive", "debiased",
                          "looCV"), ...)
{
  which.is.max <- function(x) {
    d <- (1:length(x))[x == max(x)]
    if(length(d) > 1) d <- sample(d, 1)
    d
  }
  model.frame.lda <- function(formula)
  {
    oc <- formula$call
    oc$method <- "model.frame"
    oc[[1]] <- as.name("lm")
    eval(oc, sys.frame(sys.parent()))
  }

  if(!inherits(object, "qda")) stop("object not of class qda")
  method <- match.arg(method)
  if(method == "looCV" && !missing(newdata))
    stop("Cannot have leave-one-out CV with newdata")
  if(is.null(mt <- object$call$method)) mt <- "moment"
  if(method == "looCV" && !(mt == "moment" || mt == "mle"))
     stop(paste("Cannot use leave-one-out CV with method", mt))
  if(!is.null(Terms <- object$terms)) {
    # formula fit
    if(missing(newdata)) newdata <- model.frame.lda(object)
    else newdata <- model.frame(as.formula(delete.response(Terms)),
                                newdata, na.action=function(x) x)
    x <- model.matrix(delete.response(Terms), newdata)
    xint <- match("(Intercept)", dimnames(x)[[2]], nomatch=0)
    if(xint > 0) x <- x[, -xint, drop=F]
    if(method == "looCV") g <- model.extract(newdata, "response")
  } else { #
    # matrix or data-frame fit
    if(missing(newdata)) { #
      newdata <- eval(x$call$x)
      g <- eval(x$call[[3]])
      if (!is.null(sub <- x$call$subset)) {
        newdata <- newdata[, sub]
        g <- g[, sub]
      }
      if(!is.null(nas <- object$call$na.action)) {
        df <- structure(list(g = g, X = newdata), class = "data.frame")
        df <- do.call(nas, list(df))
        g <- df$g
        newdata <- df$X
      }
      g <- as.factor(g)
    }
    if(is.null(dim(newdata)))
      dim(newdata) <- c(1, length(newdata)) # a row vector
    x <- as.matrix(newdata)          # to cope with dataframes
  }
  p <- dim(object$means)[2]
  if(dim(x)[2] != p) stop("wrong number of variables")
  if(length(dimnames(x)[[2]]) > 0 &&
     any(dimnames(x)[[2]] != dimnames(object$means)[[2]]))
        warning("Variable names in newdata do not match those in object")
  ngroup <- length(object$prior)
  dist <- matrix(0, nrow = nrow(x), ncol = ngroup)
  if(method == "plug-in") {
    for(i in 1:ngroup) {
      dev <- ((x - matrix(object$means[i,  ], nrow(x), 
                          ncol(x), byrow = T)) %*% object$scaling[,,i])
      dist[, i] <- 0.5 * apply(dev^2, 1, sum) + 0.5 * object$ldet[i] - log(object$prior[i])
    }
    dist <- exp(-(dist - min(dist, na.rm=T)))
  } else if(method == "looCV") {
    n <- nrow(x)
    NG <- 1
    if(mt == "mle") NG <- 0
    ldet <- matrix(0, n, ngroup)
    for(i in 1:ngroup) {
       dev <- ((x - matrix(object$means[i,  ], nrow(x), 
                           p, byrow = T)) %*% object$scaling[,,i])
       dist[, i] <- apply(dev^2, 1, sum)
       ldet[, i] <- object$ldet[i]
     }
    nc <- object$counts[g]
    ind <- cbind(1:n, g)
    ldet[ind] <- log(1 - nc/(nc-1)/(nc-NG) * dist[ind]) +
      p * log((nc-NG)/(nc-1-NG)) + ldet[ind]
    dist[ind] <- dist[ind] * (nc^2/(nc-1)^2) * (nc-1-NG)/(nc-NG) /
      (1 - nc/(nc-1)/(nc-NG) * dist[ind])
    dist <- 0.5 * dist + 0.5 * ldet -
      matrix(log(object$prior), n, ngroup, byrow=T)
    dist <- exp(-(dist - min(dist, na.rm=T)))
  } else if(method == "debiased") {
    for(i in 1:ngroup) {
      nk <- object$counts[i]
      Bm <- p * log((nk-1)/2) - sum(digamma(0.5 * (nk - 1:ngroup)))
      dev <- ((x - matrix(object$means[i,  ], nrow = nrow(x), 
			  ncol = ncol(x), byrow = T)) %*% object$scaling[,,i])
      dist[, i] <- 0.5 * (1 - (p-1)/(nk-1)) * apply(dev^2, 1, sum) + 
		   0.5 * object$ldet[i] - log(object$prior[i]) + 0.5 * Bm - p/(2*nk)
    }
    dist <- exp(-(dist - min(dist, na.rm=T)))
  } else {
    N <- object$N
    for(i in 1:ngroup) {
      nk <- object$counts[i]
      dev <- ((x - matrix(object$means[i,  ], nrow = nrow(x),
			  ncol = ncol(x), byrow = T)) %*% object$scaling[,,i])
      dev <- 1 + apply(dev^2, 1, sum)/(nk+1)
      dist[, i] <- object$prior[i] * exp(-object$ldet[i]/2) * dev^(-nk/2) *
	(1 + nk)^(-p/2)
   }
  }
  cl <- factor(apply(dist, 1, which.is.max), levels=seq(along=object$lev),
               labels=object$lev)
# convert to posterior probabilities
  posterior <- dist/drop(dist %*% rep(1, length(object$prior)))
  dimnames(posterior) <- list(dimnames(x)[[1]], object$lev)
  list(class = cl, posterior = posterior)
}

print.qda <- function(x, ...)
{
  if(!is.null(cl <- x$call)) {
    names(cl)[2] <- ""
    cat("Call:\n")
    dput(cl)
  }
  cat("\nPrior probabilities of groups:\n")
  print(x$prior, ...)
  cat("\nGroup means:\n")
  print(x$means, ...)
  invisible(x)
}
# file MASS/rlm.q
# copyright (C) 1994-8 W. N. Venables and B. D. Ripley
#
rlm <-
function(formula, data, weights, subset, na.action, method = "qr", model = F,
        x = F, y = F, ...)
{
   call <- match.call()
   m <- match.call(expand = F)
   m$method <- m$model <- m$x <- m$y <- m$... <- NULL
   m[[1]] <- as.name("model.frame")
   m <- eval(m, sys.frame(sys.parent()))
   if(method == "model.frame") return(m)
   Terms <- attr(m, "terms")
   weights <- model.extract(m, weights)
   Y <- model.extract(m, response)
   X <- model.matrix(Terms, m)
   if(length(weights)==0) weights <- rep(1, nrow(X))
   fit <- hsreg(X, Y, wx = weights, ...)
   fit$terms <- Terms
   fit$call <- call
   if(model) fit$model <- m
   fit$x <- X
   fit$y <- Y
   fit
}
hsreg <-
function(x, y, w = rep(1, nrow(x)), k=1.345, wx, maxit = 20, sw=1000,
   acc = .Machine$double.eps^0.25, test.vec = "resid", ...)
{
   irls.delta <- function(old, new)
      sqrt(sum((old - new)^2)/max(1e-20, sum(old^2)))
   irls.rrxwr <- function(x, w, r)
   {
      w <- sqrt(w)
      max(abs((matrix(r * w, 1, length(r)) %*% x)/sqrt(matrix(w,
         1, length(r)) %*% (x^2))))/sqrt(sum(w * r^2))
   }
   if(!(any(test.vec == c("resid", "coef", "w", "NULL")) || is.null(
      test.vec)))
      stop("invalid testvec")
   if(!missing(wx)) {
      if(length(wx) != nrow(x))
         stop("Length of wx must equal number of observations")
      if(any(wx < 0))
         stop("Negative wx value")
      w <- w * wx
   }
   temp <- lm.wfit(x, y, w, "qr", ...)
   coef <- temp$coef
   resid <- temp$resid
   th <- 2*pnorm(k)-1
   gamma <- th + k^2*(1-th) -2*k*dnorm(k)
      
   done <- FALSE
   conv <- NULL
   n1 <- nrow(x) - ncol(x)
   scale <- median(abs(resid))/0.6745
   for(iiter in 1:maxit) {
      if(!is.null(test.vec)) testpv <- get(test.vec)
      ks <- k*scale
      if(iiter < sw) scale <- median(abs(resid))/0.6745
      else scale <- sqrt(sum(pmin(resid^2,ks^2))/(n1*gamma))
      if(scale == 0) {
         done <- T
         break
      }
      w <- as.vector(wt.huber(resid/scale, k))
      if(!missing(wx)) w <- w * wx    # adjust for wx weights
      temp <- lm.wfit(x, y, w, "qr")
      coef <- temp$coef
      resid <- temp$residuals
      if(!is.null(test.vec))
         convi <- irls.delta(testpv, get(test.vec))
      else convi <- irls.rrxwr(x, w, resid)
      conv <- c(conv, convi)
      done <- convi <= acc
      if(!done) next
      if(!exists("method.done") || method.done) break
   }
   if(!done)
      warning(paste("hsreg failed to converge in", maxit, "steps"))
   if(!missing(wx)) {
      tmp <- (wx != 0)
      w[tmp] <- w[tmp]/wx[tmp]
   }
   names(scale) <- NULL # since median assigns a name
   fit <- list(coefficients = coef, residuals = resid, 
     fitted.values = temp$fitted.values, rank = temp$rank, 
     assign =temp$assign,  w = w, k = k, s = scale, 
     conv = conv, converged = done)
   class(fit) <- c("rlm", "lm")
   fit
}
print.rlm <-
function(x, ...)
{
   if(!is.null(cl <- x$call)) {
      cat("Call:\n")
      dput(cl)
   }
   if(x$converged)
   cat("Converged in",length(x$conv), "iterations\n")
   else cat("Ran",length(x$conv), "iterations without convergence\n")
   coef <- x$coef
   cat("\nCoefficients:\n")
   print(coef, ...)
   nobs <- length(x$resid)
   rdf <- nobs - length(coef)
   cat("\nDegrees of freedom:", nobs, "total;", rdf, "residual\n")
   cat("Scale estimate:", format(signif(x$s,3)), "\n")
   invisible(x)
}
summary.rlm <- function(object, ...)
{
   k <- object$k
   s <- object$s
   ks <- k*s
   coef <- object$coef
   cnames <- names(coef)
   ptotal <- length(coef)
   resid <- object$resid
   n <- length(resid)
   if(any(na <- is.na(coef))) coef <- coef[!na]
   p <- length(coef)
   rdf <- n - p
   rinv <- diag(p)
   dimnames(rinv) <- list(cnames, cnames)
   S <- sum(pmin(abs(resid), ks)^2)/rdf
   m <- sum(abs(resid) < ks)/n
   kappa <- 1 + p*(1-m)/(n*m)
   stddev <- sqrt(S)*(kappa/m)
   R <- qr(object$x)$qr
   R <- R[1:p, 1:p, drop = F]
   # for(i in 2:p)for(j in 1:(i-1))R[i,j] <- 0
   R[lower.tri(R)] <- 0
   rinv <- solve(R, rinv)
   rowlen <- (rinv^2 %*% rep(1, p))^0.5
   names(rowlen) <- cnames
   correl <- rinv * array(1/rowlen, c(p, p))
   correl <- correl %*% t(correl)
   coef <- array(coef, c(p, 3))
   dimnames(coef) <- list(cnames, c("Value", "Std. Error", "t value"))
   coef[, 2] <- rowlen %o% stddev
   coef[, 3] <- coef[, 1]/coef[, 2]
   object <- object["call"]
   object$residuals <- resid
   object$coefficients <- coef
   object$sigma <- s
   object$stddev <- stddev
   object$df <- c(p, rdf, ptotal)
   object$r.squared <- NA
   object$cov.unscaled <- rinv %*% t(rinv)
   object$correlation <- correl
   object$terms <- NA
   class(object) <- "summary.lm"
   object
}
# file MASS/rms.curv.q
# copyright (C) 1994-8 W. N. Venables and B. D. Ripley
#
"rms.curv"<-
function(obj, fit.val = get.fit.val(obj, data), data = obj$call$data)
{
  .NotYetImplemented()
       get.fit.val <- function(obj, data)
       {
               if(is.null(data))
                       data <- sys.parent(2)
               if(is.numeric(data))
                       data <- sys.frame(data)
               if(is.name(data))
                       data <- get(data)
               class(data) <- NULL
               pp <- as.list(b <- coef(obj))
               np <- names(b)
               data[np] <- pp
               eval(as.expression(obj$formula[3]), local = data)
       }
       v <- attr(fit.val, "gradient")
       if(is.null(v))
               stop("gradient attribute missing")
       a <- attr(fit.val, "hessian")
       if(is.null(a))
               stop("hessian attribute missing")
       p <- ncol(v)
       n <- nrow(v)
       s <- sqrt(sum((obj$residuals)^2)/(n - p))
       sp <- s * sqrt(p)
       D <- v
       for(j in 1:p)
               D <- cbind(D, a[, 1:j, j])
       qrd <- qr(D)
       Q <- qr.Q(qrd)
       rnk <- qrd$rank
       if(rnk <= p)
               warning("regression apparently linear")
       Q1 <- Q[, 1:rnk]
       C <- array(0, c(rnk, p, p))
       for(j in 1:p)
               C[,  , j] <- crossprod(Q1, a[,  , j])
       C <- aperm(C, c(2, 3, 1))
       r11i <- solve(qr.R(qrd)[1:p, 1:p])
       ct <- 0
       for(j in 1:p) {
               C[,  , j] <- crossprod(r11i, C[,  , j]) %*% r11i * sp
               ct <- ct + 2 * sum(C[,  , j]^2) + sum(diag(C[,  , j]))^2
       }
       ci <- 0
       for(j in (p + 1):rnk) {
               C[,  , j] <- crossprod(r11i, C[,  , j]) %*% r11i * sp
               ci <- ci + 2 * sum(C[,  , j]^2) + sum(diag(C[,  , j]))^2
       }
       ct <- sqrt(ct/(p * (p + 2)))
       ci <- sqrt(ci/(p * (p + 2)))
       pe <- ct * sqrt(qf(19/20, p, n - p))
       ic <- ci * sqrt(qf(19/20, p, n - p))
       val <- list(pe = pe, ic = ic, ct = ct, ci = ci, C = C)
       class(val) <- "rms.curv"
       val
}
"print.rms.curv"<-
function(val, ...)
{
       cat("Parameter effects: c^theta x sqrt(F) =", round(val$pe, 4), "\n", 
               "       Intrinsic: c^iota  x sqrt(F) =", round(val$ic, 4), "\n",
               ...)
	invisible(val)
}
# file MASS/sammon.q
# copyright (C) 1994-8 W. N. Venables and B. D. Ripley
#
sammon <- function(d, y, k=2, niter=100, trace=T, magic=0.2, tol=1e-4)
{
   call <- match.call()
   if(any(!is.finite(d)))
      stop("NAs/Infs not allowed in d")
   if(is.null(n <- attr(d, "Size"))) {
      x <- as.matrix(d)
      if((n <- nrow(x)) != ncol(x))
        stop("Distances must be result of dist or a square matrix")
   }
   else {
      x <- matrix(0, n, n)
      x[row(x) > col(x)] <- d
      x <- x + t(x)
   }
   if (any(ab <- x[row(x) < col(x)]<=0)) {
	aa <- cbind(as.vector(row(x)), as.vector(col(x)))[row(x) < col(x),]
	aa <- aa[ab,,drop=F]
	stop(paste("zero or negative distance between objects", aa[1,1],
	 "and", aa[1,2]))
	}
   if(missing(y)) y <- cmdscale(d, k=k)
   if(any(dim(y) != c(n, k)) ) stop("invalid initial configuration")
   storage.mode(x) <- "double"
   storage.mode(y) <- "double"
   if(!is.loaded(symbol.C("VR_sammon")))
     stop("Compiled code has not been dynamically loaded")
   z <- .C("VR_sammon",
      x = x,
      as.integer(n),
      as.integer(k),
      y = y,
      as.integer(niter),
      e = double(1),
      as.integer(trace),
      as.double(magic),
      as.double(tol)
      )
   list(points=z$y, stress=z$e, call=call)
}

# file MASS/stdres.q
# copyright (C) 1994-8 W. N. Venables and B. D. Ripley
#
lmwork <- function(object)
{
	resid <- object$resid
	hat <- lm.influence(object)$hat
	ok <- !(is.na(resid))
	n.miss <- sum(!ok)
	switch(ifelse(n.miss > 2, 2, n.miss),
		warning("1 missing observation deleted"),
		warning(paste(n.miss, "missing observations deleted")))
	resid <- resid[ok ]
	n <- length(resid)
	p <- object$rank
	rdf <- object$df.resid
	if(is.null(rdf))
		rdf <- n - p
	if(!is.null(object$weights)) {
		wt <- object$weights[ok]
		resid <- resid * wt^0.5
		excl <- wt == 0
		if(any(excl)){
			warning(paste(sum(excl), 
				"rows with zero weights not counted"))
			resid <- resid[!excl]
			if(is.null(object$df.resid))
				rdf <- rdf - sum(excl)
		}
	}
	stdres <- studres <- resid
	if(n > p) {
		stddev <- sqrt(sum(resid^2)/rdf)
		sr <- resid/(sqrt(1 - hat) * stddev)
		stdres <- sr
		studres <- sr/sqrt((n-p-sr^2)/(n-p-1))
		}
	else stddev <- stdres[] <- studres[]<- NA
	list(stdedv=stddev, stdres=stdres, studres=studres)
}
stdres <- function(object) lmwork(object)$stdres
studres <- function(object) lmwork(object)$studres
# file MASS/stepAIC.q
# copyright (C) 1994-8 W. N. Venables and B. D. Ripley
#
"stepAIC"<-
function(object, scope, scale = 0, direction = c("both", "backward", 
	"forward"), trace = 1, keep = NULL, steps = 1000, screen = F,
         use.start = F, k = 2, ...)
{
  fixFormulaObject <- function(object) {
    tmp <- attr(terms(object), "term.labels")
    formula(paste(deparse(formula(object)[[2]]), "~", paste(tmp, collapse=" + ")))
  }
  update.formula <-
    function (old, new) {
      tmp <-.Internal(update.formula(as.formula(old), as.formula(new)))
      class(tmp ) <- "formula"
      tmp
    }
  cut.string <- function(string)
    {
      if(length(string) > 1)
        string[-1] <- paste("\n", string[-1], sep = "")
      string
    }
  AIC.drop1 <- function(fit, Terms, scale, trace, k = 2, ...)
    {
      n <- length(Terms)
      ans <- matrix(nrow = n + 1, ncol = 2)
      dimnames(ans) <- list(c("<none>", paste("-", Terms, sep = "")), 
			    c("df", "AIC"))
      ans[1,  ] <- extractAIC(fit, scale, k = k, ...)
      if(n == 0) return(ans)
      i <- 2
      for(tt in Terms) {
        if(trace > 1) cat("trying -", tt, "\n")
        nfit <- update(fit, as.formula(paste("~ . -", tt)))
        ans[i,  ] <- extractAIC(nfit, scale, k = k, ...)
        i <- i + 1
      }
      ans
    }

  AIC.add1 <- function(fit, Terms, scale, trace, screen, k = 2, ...)
    {
      if(screen) {
        lmscale <- switch(family(fit)$family,
                          poisson = 1,
                          binomial = 1,
                          if(scale > 0) scale else deviance(fit)/fit$df.resid)
        Ad <- add1.lm(fit, Terms, scale = lmscale)
        Ad$Df[1] <- 0
        Cp <- Ad$RSS/lmscale + k * Ad$Df
        Cp <- Cp - Cp[1]
        Cp <- Cp[-1]
        Terms <- Terms[Cp < 1 & Cp < min(Cp) + 4 & Ad$Df[-1] > 0]
      }
      n <- length(Terms)
      ans <- matrix(nrow = n + 1, ncol = 2)
      t2 <- if(length(Terms)) paste("+", Terms, sep = "") else NULL
      dimnames(ans) <- list(c("<none>", t2), c("df", "AIC"))
      ans[1,  ] <- extractAIC(fit, scale, k = k, ...)
      if(n == 0) return(ans)
      i <- 2
      for(tt in Terms) {
        if(trace > 1) cat("trying +", tt, "\n")
        nfit <- update(fit, as.formula(paste("~ . +", tt)))
        ans[i,  ] <- extractAIC(nfit, scale, k = k, ...)
        i <- i + 1
      }
      ans
    }

  AIClm.drop1 <- function(fit, Terms, scale, trace, k = 2, ...)
    {
      n <- length(Terms)
      ans <- matrix(nrow = n + 1, ncol = 2)
      dimnames(ans) <- list(c("<none>", paste("-", Terms, sep = "")), 
			    c("df", "AIC"))

      Ad <- drop1.lm(fit, Terms, scale = scale)
      nn <- length(fit$residuals)
      edf <- nn - fit$df.residual
      ans[, 1] <- edf - c(0, Ad$Df[-1])
      ans[, 2] <- if(scale > 0) Ad$RSS/scale + k * ans[, 1] - nn
        else nn * log(Ad$RSS/nn) + k * ans[, 1]
      ans
    }

  AIClm.add1 <- function(fit, Terms, scale, k = 2, ...)
    {
      n <- length(Terms)
      ans <- matrix(nrow = n + 1, ncol = 2)
      t2 <- if(length(Terms)) paste("+", Terms, sep = "") else NULL
      dimnames(ans) <- list(c("<none>", t2), c("df", "AIC"))
      if(n == 0) return(ans)
      Ad <- add1.lm(fit, Terms, scale = scale)
      nn <- length(fit$residuals)
      edf <- nn - fit$df.residual
      ans[, 1] <- edf + c(0, Ad$Df[-1])
      ans[, 2] <- if(scale > 0) Ad$RSS/scale + k * ans[, 1] - nn
        else nn * log(Ad$RSS/nn) + k * ans[, 1]
      ans
    }

  re.arrange <- function(keep)
    {
      namr <- names(k1 <- keep[[1]])
      namc <- names(keep)
      nc <- length(keep)
      nr <- length(k1)
      array(unlist(keep, recursive = F), c(nr, nc), list(namr, namc))
    }

  make.step <- function(models, fit, object)
    {
      change <- sapply(models, "[[", "change")
      rd <- sapply(models, "[[", "deviance")
      dd <- c(NA, diff(rd))
      rdf <- sapply(models, "[[", "df.resid")
      ddf <- c(NA, diff(rdf))
      AIC <- sapply(models, "[[", "AIC")
      heading <- c("Stepwise Model Path \nAnalysis of Deviance Table",
		   "\nInitial Model:", deparse(as.vector(formula(object))),
		   "\nFinal Model:", deparse(as.vector(formula(fit))), 
		   "\n")
      aod <- data.frame(Step = change, Df = ddf, Deviance = dd, 
			"Resid. Df" = rdf, "Resid. Dev" = rd, AIC = AIC, 
			check.names = F)
      attr(aod, "heading") <- heading
      attr(aod, "class") <- c("anova", "data.frame")
      fit$anova <- aod
      fit
    }

  # need to fix up . in formulae in R
  object$formula <- fixFormulaObject(object)
  Terms <- object$formula
  object$call$formula <- object$formula
  attributes(Terms) <- attributes(object$terms)
  object$terms <- Terms
  cl <- class(object)[1]
  LM <- (cl == "lm" || cl == "aov" ||
         (cl == "glm" && object$family$family == "Gaussian" && object$family$link == "Identity: mu"))
  LM <- F # for now
  if(!LM && use.start) 
    if(is.null(object$linear.predictors)) {
      use.start <- F
      warning(paste("cannot use start with object of class", class(object)))
    } else {
      assign(".eta", object$linear.predictors)
      object$call$start <- .eta
    }
  if(missing(direction)) direction <- "both"
  else direction <- match.arg(direction)
  backward <- direction == "both" | direction == "backward"
  forward <- direction == "both" | direction == "forward"
  if(missing(scope)) {
    fdrop <- numeric(0)
    fadd <- NULL
  } else {
    if(is.list(scope)) {
      fdrop <- if(!is.null(fdrop <- scope$lower))
	attr(terms(update.formula(object, fdrop)), "factors")
	else numeric(0)
      fadd <- if(!is.null(fadd <- scope$upper))
	attr(terms(update.formula(object, fadd)), "factors")
    } else {
      fadd <- if(!is.null(fadd <- scope))
	attr(terms(update.formula(object, scope)), "factors")
      fdrop <- numeric(0)
    }
  }
  if(is.null(fadd)) {
    backward <- T
    forward <- F
  }
  models <- vector("list", steps)
  if(!is.null(keep)) {
    keep.list <- vector("list", steps)
    nv <- 1
  }
  n <- length(object$residuals)
  fit <- object
  cf <- attributes(coef(object))	
  #check if any terms have zero df
  if(!is.null(cf$singular) && cf$singular > 0) {
    TT <- !match(TL <- attr(object$terms, "term.labels"), names(cf$assign), F)
    if(any(TT)) {
      upd <- eval(parse(text = paste(c(".~.", TL[TT]), collapse = "-")))
      fit <- update(fit, upd)
    }
  }
  bAIC <- extractAIC(fit, scale, k = k, ...)
  edf <- bAIC[1]
  bAIC <- bAIC[2]
  nm <- 1
  Terms <- fit$terms
  if(trace)
    cat("Start:  AIC=", format(round(bAIC, 2)), "\n",
        cut.string(deparse(as.vector(formula(fit)))), "\n\n")

  models[[nm]] <- list(deviance = deviance(fit), df.resid = n - edf, 
		       change = "", AIC = bAIC)
  if(!is.null(keep)) keep.list[[nm]] <- keep(fit, bAIC)
  AIC <- bAIC + 1
  while(bAIC < AIC & steps > 0) {
    steps <- steps - 1
    AIC <- bAIC
    bfit <- fit
    ffac <- attr(Terms, "factors")
    scope <- findScope(ffac, list(add = fadd, drop = fdrop))
    aod <- NULL
    change <- NULL
    if(backward && (ndrop <- length(scope$drop))) {
      aod <- if(LM) AIClm.drop1(fit, scope$drop, scale = scale,
                                trace = trace, k = k, ...)
      else AIC.drop1(fit, scope$drop, scale = scale, trace = trace, k = k, ...)
    }
    if(forward && (nadd <- length(scope$add))) {
      aodf <- if(LM) AIClm.add1(fit, scope$add, scale = scale, k = k, ...)
      else AIC.add1(fit, scope$add, scale = scale, trace = trace,
                    screen = screen, k = k,, ...)
      if(is.null(aod)) aod <- aodf
      else aod <- rbind(aod, aodf[-1, , drop=F])
    }
    if(is.null(aod)) break
    o <- order(aod[, "AIC"])
    if(trace) print(aod[o,  ])
    if(o[1] == 1) break
    change <- dimnames(aod)[[1]][o[1]]
    fit <- update(fit, eval(parse(text = paste("~ .", change))))
    fit$formula <- fixFormulaObject(fit)
    Terms <- fit$formula
    attributes(Terms) <- attributes(fit$terms)
    fit$terms <- Terms
    bAIC <- extractAIC(fit, scale, k = k, ...)
    edf <- bAIC[1]
    bAIC <- bAIC[2]
    if(trace)
      cat("\nStep:  AIC=", format(round(bAIC, 2)), "\n", 
          cut.string(deparse(as.vector(formula(fit)))), "\n\n")
    if(bAIC >= AIC) break
    nm <- nm + 1
    edf <- 
    models[[nm]] <- list(deviance = deviance(fit), df.resid = n - edf,
			 change = change, AIC = bAIC)
    if(!is.null(keep)) keep.list[[nm]] <- keep(fit, bAIC)
  if(use.start) assign(".eta", fit$linear.predictors)
  }
  if(!is.null(keep)) fit$keep <- re.arrange(keep.list[seq(nm)])
  if(!LM && use.start) fit$call$start <- NULL
  make.step(models = models[seq(nm)], fit, object)
}

"findScope"<-
function(factor, scope)
{
  drop <- scope$drop
  add <- scope$add

  if(length(factor) && !is.null(drop)) {# have base model
    if(length(drop)) {
      nmdrop <- dimnames(drop)[[2]]
      nmfac <- dimnames(factor)[[2]]
      where <- match(nmdrop, nmfac, 0)
      if(any(!where))
        stop("lower scope is not included in model")
      nmdrop <- nmfac[-where]
      facs <- factor[, -where, drop = F]
    } else {
      nmdrop <- dimnames(factor)[[2]]
      facs <- factor
    }
    if(ncol(facs) > 1) {
      # now check no interactions will be left without margins.
      keep <- rep(T, ncol(facs))
      f <- crossprod(facs > 0)
      for(i in seq(keep)) keep[i] <- max(f[i,  - i]) != f[i, i]
      nmdrop <- nmdrop[keep]
    }
  } else nmdrop <- character(0)

  facs <- dimnames(factor)[[2]]
  if(is.null(add)) nmadd <- character(0)
  else {
    nmadd <- dimnames(add)[[2]]
    where <- match(dimnames(factor)[[2]], nmadd, 0)
    if(any(!where))
      stop("upper scope does not include model")
    nmadd <- nmadd[-where]
    facs <- add[, -where, drop = F]
    if(ncol(facs) > 1) {
      # now check marginality: 
      keep <- rep(T, ncol(facs))
      f <- crossprod(facs > 0)
      for(i in seq(keep)) keep[-i] <- keep[-i] & (f[i, -i] < f[i, i])
      nmadd <- nmadd[keep]
    }    
  }
  list(drop = nmdrop, add = nmadd)
}


extractAIC <- function(fit, scale, k = 2, ...) UseMethod("extractAIC")

extractAIC.coxph <- function(fit, scale, k = 2, ...)
{
  edf <- length(fit$coef)
  c(edf, -2 * fit$loglik[2] + k * edf)
}
extractAIC.survreg <- function(fit, scale, k = 2, ...)
{
  n <- length(fit$residuals)
  edf <- n  - fit$df.residual
  c(edf, -2 * fit$loglik[2] + k * edf)
}
extractAIC.glm <- function(fit, scale = 0, k = 2, ...)
{
  n <- length(fit$residuals)
  edf <- n  - fit$df.residual
  dev <- fit$deviance
  if(scale > 0) dev <- dev/scale
  if(scale == 0 && fit$family$family == "Gaussian") dev <- n * log(dev/n)
  c(edf,  dev + k * edf)
}
extractAIC.lm <- function(fit, scale = 0, k = 2, ...)
{
  n <- length(fit$residuals)
  edf <- n  - fit$df.residual
  RSS <- deviance.lm(fit)
  dev <- if(scale > 0) RSS/scale else n * log(RSS/n)
  c(edf, dev + k * edf)
}
extractAIC.aov <- function(fit, scale = 0, k = 2, ...)
{
  n <- length(fit$residuals)
  edf <- n - fit$df.residual
  RSS <- deviance.lm(fit)
  dev <- if(scale > 0) RSS/scale - n else n * log(RSS/n)
  c(edf, dev + k * edf)
}
extractAIC.negbin <- function(fit, scale, k = 2, ...)
{
  n <- length(fit$residuals)
  edf <- n - fit$df.residual
  c(edf,  -fit$twologlik + k * edf)
}

deviance.default <- function(x, ...) extractAIC(x, k=0)[2]
# file MASS/truehist.q
# copyright (C) 1994-8 W. N. Venables and B. D. Ripley
#
"truehist"<-
function(data, nbins = nclass.scott(data), h, x0 = -h/1000, breaks, prob = T,
	 xlim = range(breaks), ymax = max(est), 
	 col = 5, 
	 xlab = deparse(substitute(data)), bty = "n", ...)
{
  eval(xlab)
  data <- data[!is.na(data)]
  if(missing(breaks)) {
    if(missing(h)) h <- diff(pretty(data, nbins))[1]
    first <- floor((min(data) - x0)/h)
    last <- ceiling((max(data) - x0)/h)
    breaks <- x0 + h * c(first:last)
  }
  if(any(diff(breaks) <= 0)) stop("breaks must be strictly increasing")
  if(min(data) < min(breaks) || max(data) > max(breaks))
     stop("breaks do not cover the data")
  db <- diff(breaks)
  if(!prob && sqrt(var(db)) > mean(db)/1000)
    warning("Uneven breaks with prob = F will give a misleading plot")
  breaks[1] <- breaks[1] - 0.01*db[1]
  bin <- cut(data, breaks)
  est <- tabulate(bin, length(levels(bin)))
  if(prob) est <- est/(diff(breaks) * length(data))
  n <- length(breaks)
  plot(xlim, c(0, ymax), type = "n", xlab = xlab, ylab = "", bty = bty)
  rect(breaks[-n], 0, breaks[-1], est, col = col, ...)
  invisible()
}
# file MASS/ucv.q
# copyright (C) 1994-8 W. N. Venables and B. D. Ripley
#

width.SJ <- function(x, nb=1000, lower=0.1*hmax, upper=hmax,
		     method = c("ste", "dpi"))
{
  fSD <- function(h, x, alph2, c1, n, d) 
    (c1/SDh(x, alph2 * h^(5/7), n, d))^(1/5) - h
  SDh <- function(x, h, n, d)
    .C("VR_phi4_bin",
       as.integer(n),
       as.integer(length(x)),
       as.double(d),
       x,
       as.double(h),
       u = double(1))$u
  TDh <- function(x, h, n, d)
    .C("VR_phi6_bin",
       as.integer(n),
       as.integer(length(x)),
       as.double(d),
       x,
       as.double(h),
       u = double(1))$u

  method <- match.arg(method)
  n <- length(x)
  if(!is.loaded(symbol.C("VR_phi4_bin")))
    stop("Compiled code has not been dynamically loaded")
  storage.mode(x) <- "double"
  n <- length(x)
  Z <- .C("VR_den_bin",
	  as.integer(n),
	  as.integer(nb),
	  d = double(1),
	  x,
	  cnt = integer(nb)
	  )
  d <- Z$d; cnt <- as.integer(Z$cnt)
  hmax <- 1.144 * sqrt(var(x)) * n^(-1/5)
  scale <- min(sqrt(var(x)), IQR(x)/1.349)
  a <- 1.24 * scale * n^(-1/7)
  b <- 1.23 * scale * n^(-1/9)
  c1 <- 1/(2*sqrt(pi)*n)
  TD  <- -TDh(cnt, b, n, d)
  alph2 <- 1.357*(SDh(cnt, a, n, d)/TD)^(1/7)
  if(method == "dpi")
    res <- (c1/SDh(cnt,(2.394/(n * TD))^(1/7) , n, d))^(1/5)
  else {
    if (fSD(lower, cnt, alph2, c1, n, d) * 
	fSD(upper, cnt, alph2, c1, n, d) > 0)
      stop("No solution in the specified range of bandwidths")
    res <- uniroot(fSD, c(lower, upper), tol=0.1*lower,
	      x=cnt, alph2=alph2, c1=c1, n=n, d=d)$root
  }
  4 * res
}


ucv <- function(x, nb=1000, lower=0.1*hmax, upper=hmax)
{
  fucv <- function(h, x, n, d)
    .C("VR_ucv_bin",
       as.integer(n),
       as.integer(length(x)),
       as.double(d),
       x,
       as.double(h),
       u = double(1))$u

  n <- length(x)
  hmax <- 1.144 * sqrt(var(x)) * n^(-1/5) * 4
  if(!is.loaded(symbol.C("VR_den_bin")))
    stop("Compiled code has not been dynamically loaded")
  storage.mode(x) <- "double"
  Z <- .C("VR_den_bin",
	  as.integer(n),
	  as.integer(nb),
	  d = double(1),
	  x,
	  cnt = integer(nb)
	  )
  d <- Z$d; cnt <- as.integer(Z$cnt)
  h <- optimize(fucv, c(lower, upper), tol=0.1*lower,
		x=cnt, n=n, d=d)$minimum
  if(h < 1.1*lower | h > upper-0.1*lower) 
    warning("minimum occurred at one end of the range")
  h
}


bcv <- function(x, nb=1000, lower=0.1*hmax, upper=hmax)
{
  fbcv <- function(h, x, n, d)
    .C("VR_bcv_bin",
       as.integer(n),
       as.integer(length(x)),
       as.double(d),
       x,
       as.double(h),
       u = double(1))$u

  n <- length(x)
  hmax <- 1.144 * sqrt(var(x)) * n^(-1/5) * 4
  if(!is.loaded(symbol.C("VR_den_bin")))
    stop("Compiled code has not been dynamically loaded")
  storage.mode(x) <- "double"
  Z <- .C("VR_den_bin",
	  as.integer(n),
	  as.integer(nb),
	  d = double(1),
	  x,
	  cnt = integer(nb)
	  )
  d <- Z$d; cnt <- as.integer(Z$cnt)
  h<- optimize(fbcv, c(lower, upper), tol=0.1*lower,
	       x=cnt, n=n, d=d)$minimum
  if(h < 1.1*lower | h > upper-0.1*lower) 
    warning("minimum occurred at one end of the range")
  h
}
# file MASS/vcov.q
# copyright (C) 1994-8 W. N. Venables and B. D. Ripley
#
vcov <- function(object, ...) UseMethod("vcov")
vcov.nls <- function(object)
{
  sm <- summary(object)
  sm$cov.unscaled * sm$sigma^2
}
vcov.glm <- function(object)
{
  so <- summary(object, corr=F)
  so$dispersion * so$cov.unscaled
}
vcov.lm <- function(object)
{
  so <- summary(object, corr=F)
  so$sigma^2 * so$cov.unscaled
}
deviance.nls <- function(object) sum(object$residuals^2)

vcov.nlregb <- function(object, method=c("Fisher", "observed", "Huber"),
   scale=object$scale, eps=1e-3, tol=1)
{
  vcovscale <- function(devfn, par, scale, tol, ...)
    {
# find a suitable initial scaling
      p <- length(par)
      ind <- 1:p
      v0 <- sum(devfn(par, ...)^2)
      scale <- sqrt(tol)/scale
      for (i in ind) {
	eps <- scale[i]
	inc <- ind == i
	v <- sum(devfn(par + eps*inc, ...)^2)
	if(v - v0 > tol) {
	  repeat {
	    eps <- eps/3
	    v <- sum(devfn(par + eps*inc, ...)^2)
	    if(v - v0 < tol) break
	  }
	} else {
	  repeat {
	    eps <- eps*3
	    if(eps > 1000*scale[i]) 
	      stop(paste("scaling on var",i,"is too small"))
	    v <- sum(devfn(par + eps*inc, ...)^2)
	    if(v - v0 > tol) {eps <- eps/3; break}
	  }
	}
	scale[i] <- eps
      }
      scale
    }

  method <- match.arg(method)
  par <- object$param
  n <- length(object$resid)
  p <- length(object$param)
  if(length(scale) == 1) scale <- rep(scale, p)
  s2 <- sum(object$resid^2)/(n-p)
  if(!is.null(gr <- object$jacobian)) {
# gradient supplied
    K <- t(gr) %*% gr
    if(method == "Fisher") 
      H2 <- 0
      else {
	r <- t(object$resid)
	grfn <- eval(substitute(object$call$jacobian))
	grfn <- eval(as.expression(grfn), sys.parent())
	if(!length(grfn)) stop("Jacobian fn not found")
	sc <- eps * pmin(1, abs(par)) * sign(par)
	argnames <- names(grfn)
	argnames <- argnames[2:(length(argnames) - 1)]
	addargs <- object$aux[argnames]
	g <- object$jacobian
	H2 <- matrix(0, p, p)
	for(i in 1:p) {     
	  p1 <- par
	  p1[i] <- p1[i] + sc[i]
	  g1 <- do.call("grfn", c(list(p1), addargs))
	  H2[i, ] <- r %*% (g1 - g)/sc[i]
	}
	H2 <- 0.5*(H2 + t(H2))
      }
    Jinv <- solve(K + H2)
    if(method != "Huber") V <- s2 * Jinv
      else V <- s2 * Jinv %*% K %*% Jinv
  } else {
# no gradient supplied
    if(method != "Fisher") warning("Only Fisher information is available without gradient information")
    fn <- eval(substitute(object$call$residuals))
    fn <- eval(as.expression(fn), sys.parent())
    if(!length(fn)) stop("objective fn not found")
    argnames <- names(fn)
    argnames <- argnames[-c(1, length(argnames))]
    addargs <- object$aux[argnames]
    scale <- do.call("vcovscale", c(list(fn, par, scale=scale, tol=tol*s2), addargs))
    ind <- 1:p
    H <- matrix(, n, p)
    for (j in 1:p) 
      H[,j] <- do.call("fn", c(list(par + scale[j]*(ind==j)), addargs)) - 
	do.call("fn", c(list(par - scale[j]*(ind==j)), addargs))
    J <- 0.25*crossprod(H)/outer(scale, scale)
    V <- s2 * solve(J)
  }
  v <- 2*sqrt(diag(V))
  upper <- eval(substitute(object$call$upper))
  if(is.null(upper)) upper <- Inf else upper <- eval(upper, sys.parent())
  lower <- eval(substitute(object$call$lower))
  if(is.null(lower)) lower <- -Inf else lower <- eval(lower, sys.parent())
  if(any(par - v <= lower) || any(par + v >= upper))
    warning("estimate is near the boundary: the estimated variance matrix may not be valid")
  V
}

vcov.nlminb <- function(object, tol=1, scale=object$scale, eps=1e-3, eps0=1)
{
  vcovscale <- function(devfn, par, scale, tol, ...)
    {
					# find a suitable initial scaling
      p <- length(par)
      ind <- 1:p
      v0 <- devfn(par, ...)
      scale <- sqrt(tol)/scale
      for (i in ind) {
	eps <- scale[i]
	inc <- ind == i
	v <- devfn(par + eps*inc, ...)
	if(v - v0 > tol) {
	  repeat {
	    eps <- eps/3
	    v <- devfn(par + eps*inc, ...)
	    if(v - v0 < tol) break
	  }
	} else {
	  repeat {
	    eps <- eps*3
	    if(eps > 1000*scale[i]) 
	      stop(paste("scaling on var",i,"is too small"))
	    v <- devfn(par + eps*inc, ...)
	    if(v - v0 > tol) {eps <- eps/3; break}
	  }
	}
	scale[i] <- eps
      }
      scale
    }

  vcov0 <- function(devfn, par, scale = rep(1, p), ...)
    {
      p <- length(par)
      ind <- 1:p
      v0 <- devfn(par, ...)
      h <- numeric(p)
      H <- matrix(, p, p)
      for (i in 1:p)
	h[i] <- devfn(par + scale[i]*(ind==i), ...) - v0
      if(p > 1) for (i in 2:p) {
	inc <- scale[i] * (ind == i)
	for(j in 1:(i-1))
	  H[i, j] <- H[j, i] <- 
	    devfn(par + inc + scale[j]*(ind==j), ...) - v0
      }
      H <- H - outer(h, h, "+")
      diag(H) <- 2*h
      H/outer(scale, scale)
    }

  par <- object$param
  p <- length(par)
  fn <- eval(substitute(object$call$objective))
  fn <- eval(as.expression(fn), sys.parent())
  if(!length(fn)) stop("objective fn not found")
  argnames <- names(fn)
  argnames <- argnames[-c(1, length(argnames))]
  addargs <- object$aux[argnames]
  if(length(scale) == 1) scale <- rep(scale, p)
  hessian <- object$call$hessian
# object$hessian appears to lie, so we redo the calculation
  if(is.logical(hessian) && hessian) {
# hessian in gradient
    grfn <- eval(substitute(object$call$gradient))
    grfn <- eval(as.expression(grfn), sys.parent())
    if(!length(grfn)) stop("gradient fn not found")
    hh <- do.call("grfn", c(list(par), addargs))$hessian
    H <- matrix(0, p, p)
    H[row(H) <= col(H)] <- hh   
    H[row(H) > col(H)] <- t(H)[row(H) > col(H)]
  } else if(!is.logical(hessian) && !is.null(hessian)) {
# separate Hessian function
    hfn <- eval(substitute(object$call$hessian))
    hfn <- eval(as.expression(hfn), sys.parent())
    if(!length(hfn)) stop("hessian fn not found")
    hh <- do.call("hfn", c(list(par), addargs))
    H <- matrix(0, p, p)
    H[row(H) <= col(H)] <- hh   
    H[row(H) > col(H)] <- t(H)[row(H) > col(H)]
  } else {
    scale <- do.call("vcovscale", c(list(fn, par, scale=scale, tol=tol), addargs))
    if(!is.null(object$call$gradient)) {
# gradient but no Hessian
      grfn <- eval(substitute(object$call$gradient))
      grfn <- eval(as.expression(grfn), sys.parent())
      if(!length(grfn)) stop("gradient fn not found")
      sc <- eps * scale
      g <- do.call("grfn", c(list(par), addargs))
      H <- matrix(0, p, p)
      for(i in 1:p) {
	g1 <- do.call("grfn", c(list(par - sc[i]*(1:p==i)), addargs))
	g2 <- do.call("grfn", c(list(par + sc[i]*(1:p==i)), addargs))
	H[i, ] <- 0.5*(g2 - g1)/sc[i]
      }
    } else {
# No derivatives available
      H <- do.call("vcov0", c(list(fn, par, scale=eps0*scale), addargs))
    }
  }
  V <- solve(0.5*(H + t(H)))
  if(any(diag(V) < 0)) stop("Estimated variances are negative")
  v <- 2*sqrt(diag(V))
  upper <- eval(substitute(object$call$upper))
  if(is.null(upper)) upper <- Inf else upper <- eval(upper, sys.parent())
  lower <- eval(substitute(object$call$lower))
  if(is.null(lower)) lower <- -Inf else lower <- eval(lower, sys.parent())
  if(any(par - v <= lower) || any(par + v >= upper))
    warning("estimate is near the boundary: the estimated variance matrix may not be valid")
  V
}
# file MASS/write.matrix.q
# copyright (C) 1994-8 W. N. Venables and B. D. Ripley
#
write.matrix <- function(x, file="", sep=" ")
{
   x <- as.matrix(x)
   p <- ncol(x)
   cat(dimnames(x)[[2]],format(t(x)), file=file, sep=c(rep(sep, p-1), "\n"))
}

loglin <- function(...) .NotYetImplemented()

spec.taper <- function(x, p = 0.1) {
  if (any(p < 0) || any(p > 0.5))
    stop("p must be between 0 and 0.5")
  x <- as.ts(x)
  a <- attributes(x)
  x <- as.matrix(x)
  nc <- ncol(x)
  if (length(p) == 1)
    p <- rep(p, nc)
  else if (length(p) != nc)
    stop("length of p must be 1 or equal the number of columns of x")
  nr <- nrow(x)
  for (i in 1 : nc) {
    m <- floor(nr * p[i])
    w <- 0.5 * (1 - cos(pi * seq(1, 2 * m - 1, by = 2) / (2 * m)))
    x[, i] <- c(w, rep(1, nr - 2 * m), rev(w)) * x[, i]
  }
  attributes(x) <- a
  x
}

wt.huber <- function(u, c = 1.345)
  ifelse(abs(u) < c, 1, c / abs(u))

.First.lib <- function(lib, pkg) {
  library.dynam("MASS", pkg, lib)
}
