"cvkreg2d"<-
  function(h, pts, y, w = rep(1, length(y)))
{
                                        # cross-validation for kernel regression
  n <- length(y)
  x1 <- pts[, 1]
  x2 <- pts[, 2]
#  dynload("cvker2d")
  result <- .Fortran("cvker2d",
                     h = as.single(h),
                     x1 = as.single(x1),
                     x2 = as.single(x2),
                     y = as.single(y),
                     w = as.single(w),
                     n = as.integer(n),
                     cv = as.single(0))
  ans <- result$cv
  ans
}
"dynload"<-
  function(filename)
{
                                        # for loading object files in Splus
                                        # don't include .o part
  if(!is.loaded(paste("_", filename, "_", sep = ""))) {
    dyn.load(paste(sfunspath, "/Forsub.dir/", filename, ".o", sep
                   = ""))
  }
}

 
################ FITTING A LOVELY GAM MODEL ############################

# So you want to fit a binary regression gam....
# with a spatially smooth component....

# Suppose there are n1 cases and n2 controls in the data frame gam.data

#You need to define vectors containing the following components:

# 	pts 	a (n1+n2) by 2 matrix showing the locations of 
# 		the cases and controls
#	why	the binary indicators corresponding to the rows of pts
#		1 if case, 0 if control
#	region	the polygon containing the data
#	h	valor de banda, se for zero, o programa roda cross validation (default)
#	hvals 	vector of smoothing parameter values you want 
#		to consider (about 15 choices is alright) If you want
#		 to fix it then one value will do.  
#	form	this is the `formula' for the linear predictor part of the
#		model describing the linear predictor just use the usual 
#		formula conventions apart from spatial bit...
#		e.g. sex+age+age^2 (constant is there automatically)	
			
# 	also need other vectors of same length as why 
#	corresponding to other variables
#	of interest such as age, sex etc...as included in `form' above
#
#	So that should be all the data you have that needs using.

# Below is defined a function called gamfit which takes gam.data 
#	as its argument as well as a smoothing parameter value if you 
# want it fixed at a particular value. Also below is the function called
# waddmod which is called by gamfit.

# You can source this file to get the functions or just copy and 
# paste them in.

# Before you use these you will have to have loaded splancs 
#	library(splancs)	

#---------------------------------------------------------------------------

################## below here are functions ################################


"gamfit"_function(form,gam.data,pts,region,h=0,hvals=NA,ngrid=4000){
	formul_formula(form)
	termos<-terms(formul)
	at<-as.character(attr(termos,"variables"))[-1]
	data<-as.data.frame(as.matrix(gam.data[,at]))
	why<-data[,at[1]]
	ny<-nrow(data)
	g2_rep(0,ny)
	fit_glm(formul,family=binomial,data=data)
	g1_predict(fit)
	fhat <- g1 + g2
	phat <- exp(fhat)/(1+exp(fhat))
	w <- phat*(1-phat)
	z <- fhat + (why-phat)/w
  
if(h!=0){
new_waddmod(formul,data,pts,g2=g2,z=z,w=w,hopt=h,hvals=hvals,region=region,sameh=T);old_new
new_waddmod(formul,data,pts,g2=old$g2,z=old$z,w=old$w,hopt=h,hvals=hvals,region=region,sameh=T);old_new
new_waddmod(formul,data,pts,g2=old$g2,z=old$z,w=old$w,hopt=h,hvals=hvals,region=region,sameh=T);old_new
new_waddmod(formul,data,pts,g2=old$g2,z=old$z,w=old$w,hopt=h,hvals=hvals,region=region,sameh=T);old_new
new_waddmod(formul,data,pts,g2=old$g2,z=old$z,w=old$w,hopt=h,hvals=hvals,region=region,sameh=T);old_new
new_waddmod(formul,data,pts,g2=old$g2,z=old$z,w=old$w,hopt=h,hvals=hvals,region=region,ngrid=ngrid,sameh=T)
}
else{
new_waddmod(formul,data,pts,g2=g2,z=z,w=w,hvals=hvals,region=region);old_new
new_waddmod(formul,data,pts,g2=old$g2,z=old$z,w=old$w,hopt=old$h,hvals=hvals,region=region);old_new
new_waddmod(formul,data,pts,g2=old$g2,z=old$z,w=old$w,hopt=old$h,hvals=hvals,region=region);old_new
new_waddmod(formul,data,pts,g2=old$g2,z=old$z,w=old$w,hopt=old$h,hvals=hvals,region=region);old_new
new_waddmod(formul,data,pts,g2=old$g2,z=old$z,w=old$w,hopt=old$h,hvals=hvals,region=region,sameh=T);old_new
new_waddmod(formul,data,pts,g2=old$g2,z=old$z,w=old$w,hopt=old$h,hvals=hvals,region=region,ngrid=ngrid,sameh=T)
}
new
}

 

# Now need to do the following ....

# Let  gam.data    be original data used for doing gamfit
# Let  gam.fit     be the result of doing a gamfit 

#start<-gam.sig.start(form,jua,pts=cbind(jua$E,jua$N),region=poly,h=300,ngrid=2500)
 
#sims_gam.sim(form,jua,pts=cbind(jua$E,jua$N),region=poly,start,ngrid=2500)
#sims_gam.sim(form,jua,pts=cbind(jua$E,jua$N),region=poly,start,ngrid=2500,sims)
#sims_gam.sim(form,jua,pts=cbind(jua$E,jua$N),region=poly,start,ngrid=2500,sims)
#sims_gam.sim(form,jua,pts=cbind(jua$E,jua$N),region=poly,start,ngrid=2500,sims)
#.
#. do 100 or 200 or whatever of these: can keep track of number by looking
#.	at sims$m
#.
#sims_gam.sim(form,jua,pts=cbind(jua$E,jua$N),region=poly,start,ngrid=2500,sims)



# Now lets create something that the plotrisk function can use...

"gamfit.sig"_function(gam.fit,region,sims){
x<-list(est=gam.fit$g2est,poly=region,x=sims$x,y=sims$y,
	z=sims$sigmat/sims$m,h=gam.fit$h,pval=sims$pval,
	tval=sims$tval,tvalobs=sims$tvalobs,beta=gam.fit$beta,m=sims$m)
return(x)
}



"gam.sig.start"<-function(form,gam.data,pts,region,h,ngrid)
{
################ Tolerance interval for GAM ####################


# Let  gam.data    be original data used for doing gamfit
# Let  fitted.gam  be the result of doing a gamfit 
# Let  h	   be chosen smoothing parameter
# sets things up for doing signicance...
        formul_formula(form)
        termos<-terms(formul)
        at<-as.character(attr(termos,"variables"))[-1]
        data<-as.data.frame(as.matrix(gam.data[,at]))
        fit_glm(formul,family=binomial,data=data)
        prob_predict(fit,type='response')
        fit_gamfit(form,gam.data,pts=pts,region=region,h=h,ngrid=ngrid)
        surf.est_fit$g2est$z
list(prob=prob,surf.est=surf.est,h=h,x=fit$g2est$x,y=fit$g2est$y,
        tvalobs=mean(fit$g2^2))
}

"gam.sim"_function(form,gam.data,pts,region,start,ngrid=NULL,prev.sims=NULL){
	if(is.null(prev.sims)){
		prev.sims_list(m=0,tval=NULL,tvalobs=start$tvalobs,pval=NULL,
		sigmat=matrix(0,nrow=length(start$surf.est[,1]),
				ncol=length(start$surf.est[1,])),
				x=start$x,y=start$y)
	}
	null.prob_start$prob
	surface.est_start$surf.est
	h_start$h 
	formul_formula(form)
	termos<-terms(formul)
	at<-as.character(attr(termos,"variables"))[-1]
	data<-as.data.frame(as.matrix(gam.data[,at]))
	why<-data[,at[1]]
	simcase_sample(1:length(why),size=sum(why),replace=F,prob=null.prob)
  	simwhy_rep(0,length(why)); simwhy[simcase]_1
	data[,at[1]]<-simwhy
	sim_gamfit(form,data,pts=pts,region=region,h=h,ngrid=ngrid)
	tval_mean(sim$g2^2)
	below_as.numeric(sim$g2est$z<surface.est)
	prev.sims$m_prev.sims$m+1
	prev.sims$tval_c(prev.sims$tval,tval)
	prev.sims$sigmat_prev.sims$sigmat+below
	prev.sims$pval_(sum(prev.sims$tval>=start$tvalobs)+1)/(prev.sims$m+1)
	cat('\n\n ********* DONE',prev.sims$m,'GAM FITS *********\n\n')
prev.sims
}
"hch2d"<-
  function(hvals, pts, y, w = rep(1, length(y)))
{
                                        # hvals contains 5, 10, 15 or 20 values of h, and we want the one which
                                        # minimises the CV criterion for kernel regression as in cvkreg2d.
  nh <- length(hvals)
  if((nh != 5) & (nh != 10) & (nh != 15) & (nh != 20)) {
    stop("length of hvals must be one of 5,10,15 and 20")
  }
  cv <- rep(NA, nh)
  i <- 1
  cv[i] <- cvkreg2d(hvals[i], pts, y, w)
  cat(i, hvals[i], cv[i], "\n")
  i <- 2
  cv[i] <- cvkreg2d(hvals[i], pts, y, w)
  cat(i, hvals[i], cv[i], "\n")
  i <- 3
  cv[i] <- cvkreg2d(hvals[i], pts, y, w)
  cat(i, hvals[i], cv[i], "\n")
  i <- 4
  cv[i] <- cvkreg2d(hvals[i], pts, y, w)
  cat(i, hvals[i], cv[i], "\n")
  i <- 5
  cv[i] <- cvkreg2d(hvals[i], pts, y, w)
  cat(i, hvals[i], cv[i], "\n")
  if(nh > 5) {
    i <- 6
    cv[i] <- cvkreg2d(hvals[i], pts, y, w)
    cat(i, hvals[i], cv[i], "\n")
    i <- 7
    cv[i] <- cvkreg2d(hvals[i], pts, y, w)
    cat(i, hvals[i], cv[i], "\n")
    i <- 8
    cv[i] <- cvkreg2d(hvals[i], pts, y, w)
    cat(i, hvals[i], cv[i], "\n")
    i <- 9
    cv[i] <- cvkreg2d(hvals[i], pts, y, w)
    cat(i, hvals[i], cv[i], "\n")
    i <- 10
    cv[i] <- cvkreg2d(hvals[i], pts, y, w)
    cat(i, hvals[i], cv[i], "\n")
    if(nh > 10) {
      i <- 11
      cv[i] <- cvkreg2d(hvals[i], pts, y, w)
      cat(i, hvals[i], cv[i], "\n")
      i <- 12
      cv[i] <- cvkreg2d(hvals[i], pts, y, w)
      cat(i, hvals[i], cv[i], "\n")
      i <- 13
      cv[i] <- cvkreg2d(hvals[i], pts, y, w)
      cat(i, hvals[i], cv[i], "\n")
      i <- 14
      cv[i] <- cvkreg2d(hvals[i], pts, y, w)
      cat(i, hvals[i], cv[i], "\n")
      i <- 15
      cv[i] <- cvkreg2d(hvals[i], pts, y, w)
      cat(i, hvals[i], cv[i], "\n")
      if(nh > 15) {
        i <- 16
        cv[i] <- cvkreg2d(hvals[i], pts, y, w)
        cat(i, hvals[i], cv[i], "\n")
        i <- 17
        cv[i] <- cvkreg2d(hvals[i], pts, y, w)
        cat(i, hvals[i], cv[i], "\n")
        i <- 18
        cv[i] <- cvkreg2d(hvals[i], pts, y, w)
        cat(i, hvals[i], cv[i], "\n")
        i <- 19
        cv[i] <- cvkreg2d(hvals[i], pts, y, w)
        cat(i, hvals[i], cv[i], "\n")
        i <- 20
        cv[i] <- cvkreg2d(hvals[i], pts, y, w)
        cat(i, hvals[i], cv[i], "\n")
      }
    }
  }
  if(any(is.na(cv)))
    warning("One of CV values is an NA")
  opt <- (1:nh)[cv == min(cv, na.rm = T)]
  ans <- hvals[sort(opt)]
  if(length(ans) > 1) {
    ans <- (max(ans))
  }
  ans
}
"kdebigw"<-
function(h, pts, t, poly = NA, kernel = "normal", grid = T, x = NA, y = NA, nx
	 = NA, ny = NA, edge = T, scale = T, inpoly = NA, rings = 10, npt = 10 * 
	rings,w=rep(1,length(t)))
{
#Silvia Shimakura
# This function prodices similar results as kdebid
# here you can apply different weights to each t

	rx <- pts[, 1]
	ry <- pts[, 2]
	nr <- length(rx)	# no of data points
	pol <- !is.na(poly[1])	# is polygon specified?
	if(pol) {
		if(sum(inout(pts, poly)) != nr)
			stop("Observations outside polygon")
	}
	if(!pol) {
		edge <- F	# can't do edge corrections if no polygon
		scale <- F	# can't scale either
		if((is.na(x[1])) & (is.na(nx))) {
			grid <- T
			xrange <- (max(rx) - min(rx) + 4 * h)
			xmin <- min(rx) - 2 * h
			nx <- max(20, ceiling(xrange/(h/2)))
			yrange <- (max(ry) - min(ry) + 4 * h)
			ymin <- min(ry) - 2 * h
			ny <- max(20, ceiling(yrange/(h/2)))
			x <- ((1:nx) - 1/2)/nx * xrange + xmin
			y <- ((1:ny) - 1/2)/ny * yrange + ymin
		}
	}
	if((pol) & (is.na(x[1])) & (is.na(nx))) {
		edge <- T
		scale <- T
		xmin <- min(poly[, 1])
		xmax <- max(poly[, 1])
		ymin <- min(poly[, 2])
		ymax <- max(poly[, 2])
		xrange <- xmax - xmin
		yrange <- ymax - ymin
		nx <- max(20, ceiling(xrange/(h/2)))
		ny <- max(20, ceiling(yrange/(h/2)))
		x <- ((1:nx) - 1/2)/nx * xrange + xmin
		y <- ((1:ny) - 1/2)/ny * yrange + ymin
	}
	if(grid == T) {
		if(is.na(x[1])) {
			xmin <- min(poly[, 1])
			xmax <- max(poly[, 1])
			ymin <- min(poly[, 2])
			ymax <- max(poly[, 2])
			xrange <- xmax - xmin
			yrange <- ymax - ymin
			x <- ((1:nx) - 1/2)/nx * xrange + xmin
			y <- ((1:ny) - 1/2)/ny * yrange + ymin
		}
		if(is.na(nx)) {
			nx <- length(x)
			ny <- length(y)
		}
		ans <- list(0, 0, matrix(nrow = nx, ncol = ny), 0)
		names(ans) <- c("x", "y", "z", "scale")
		ans$x <- x
		ans$y <- y
		ans$scale <- 1
		if(pol & (h == Inf)) {
			ans$z <- matrix(1/area(poly), nrow = nx, ncol = ny)
			xyvec <- cbind(rep(x, ny), rep(y, rep(nx, ny)))
			lpmat1 <- matrix(inout(xyvec, poly), nrow = nx, ncol = 
				ny, byrow = F)
			ans$z[!lpmat1] <- NA
		}
		else {
			if(edge == T) {
				edgexy <- kdebig2(h, x, y, poly, kernel, grid, 
				  inpoly, rings, npt)
			}
			zmat <- matrix(0, nrow = nx, ncol = ny)
			storage.mode(zmat) <- "single"
			if(kernel == "normal") {
				if(!is.loaded("_kdesrw_")) {
#				  unix("Splus COMPILE  /home/shimakur/morrisse/kdesrw.f")
				  dyn.load("/home/shimakur/morrisse/kdesrw.o")
				}
				result <- .Fortran("kdesrw",
				  h = as.single(h),
				  rx = as.single(rx),
				  ry = as.single(ry),
				  nr <- as.integer(nr),
				  x = as.single(x),
				  nx = as.integer(nx),
				  y = as.single(y),
				  ny = as.integer(ny),
				  t = as.single(t),
				  w= as.single(w),
				  zmat = zmat)
			}
#			  result<-kdesr.sil(h,rx,ry,nr,x,nx,y,ny,t,zmat)
#			}
			if(kernel == "quartic") {
				if(!is.loaded("_kdesr5_")) {
				  dyn.load(
				    "/home/morrisse/libjek/Binreg/Bazza/kdesr5.o"
				    )
				}
				result <- .Fortran("kdesr5",
				  h = as.single(h),
				  rx = as.single(rx),
				  ry = as.single(ry),
				  nr <- as.integer(nr),
				  x = as.single(x),
				  nx = as.integer(nx),
				  y = as.single(y),
				  ny = as.integer(ny),
				  zmat = zmat)
			}
			fhat <- result$zmat	#			fhat<-result
			if(edge == T) {
				fhat <- fhat/edgexy
			}
			if(scale == F) {
				ans$z <- fhat
				ans$scale <- NULL
			}
			else {
				scal <- mean(fhat, na.rm = T) * area(poly)
				ans$z <- fhat/scal
				ans$scale <- scal
			}
			if(pol & (edge == F)) {
				xyvec <- cbind(rep(x, ny), rep(y, rep(nx, ny)))
				lpmat1 <- matrix(inout(xyvec, poly), nrow = nx, 
				  ncol = ny, byrow = F)
				ans$z[!lpmat1] <- NA
			}
		}
	}
	if(grid == F) {
		nx <- length(x)
		if(!(nx == length(y)))
			stop("x and y are different lengths")
		ans <- list(0, 0, 0, 0)
		names(ans) <- c("x", "y", "z", "scale")
		ans$x <- x
		ans$y <- y
		ans$scale <- 1
		if(pol & (h == Inf)) {
			ans$z <- rep(1/area(poly), nx)
			lpin1 <- inout(cbind(x, y), poly)
			ans$z[!lpin1] <- NA
		}
		else {
			if(edge == T) {
				edgexy <- kdebig2(h, x, y, poly, kernel, grid, 
				  inpoly, rings, npt)
			}
			if(kernel == "normal") {
				if(!is.loaded("_kdesr3_")) {
				  dyn.load(
				    "/home/morrisse/libjek/Binreg/Bazza/kdesr3.o"
				    )
				}
				result <- .Fortran("kdesr3",
				  h = as.single(h),
				  rx = as.single(rx),
				  ry = as.single(ry),
				  nr <- as.integer(nr),
				  x = as.single(x),
				  y = as.single(y),
				  nx = as.integer(nx))
			}
			if(kernel == "quartic") {
				if(!is.loaded("_kdesr6_")) {
				  dyn.load(
				    "/home/morrisse/libjek/Binreg/Bazza/kdesr6.o"
				    )
				}
				result <- .Fortran("kdesr6",
				  h = as.single(h),
				  rx = as.single(rx),
				  ry = as.single(ry),
				  nr <- as.integer(nr),
				  x = as.single(x),
				  y = as.single(y),
				  nx = as.integer(nx))
			}
			fhat <- result$x
			if(edge == T) {
				fhat <- fhat/edgexy
			}
			ans$z <- fhat
			if(pol & (edge == F)) {
				lpin1 <- inout(cbind(x, y), poly)
				ans$z[!lpin1] <- NA
			}
		}
	}
	ans
}
"kernelw.sil"<-
function(data, t, poly, h, ngrid = 1000, kernel = "normal", weights= rep(1,length(t)), obsize = 5000000)
{
# This function prodices similar results as kernel.sil
# here you can apply different weights to each t

	.h <- h
	.poly <- poly
	.obsize <- obsize
	.kernel <- kernel
	options(object.size = .obsize)
	xrange <- range(.poly[, 1])
	yrange <- range(.poly[, 2])
	xmin <- xrange[1]
	xmax <- xrange[2]
	ymin <- yrange[1]
	ymax <- yrange[2]
	xlength <- diff(xrange)
	ylength <- diff(yrange)
	actngrid <- (ngrid * xlength * ylength)/(area(.poly))
	.nx <- ceiling(sqrt((actngrid * xlength)/ylength))
	.ny <- ceiling(sqrt((actngrid * ylength)/xlength))
	ans <- list(0, 0, 0, 0)
	names(ans) <- c("x", "y", "z", "poly")
	ans$poly <- .poly
	.nx <<- .nx
	.ny <<- .ny
	.n <<- npts(data)
	.data <<- data
	.h <<- h
	.t <<- t
	ker <- kdebig(.h, data, .t, poly = .poly, kernel = .kernel, nx = .nx, 
		ny = .ny, edge = F, scale = F)
	.x <<- ker$x
	.y <<- ker$y
	ans$x <- .x
	ans$y <- .y
	.poly <<- .poly
	.kernel <<- .kernel
	.obsize <<- .obsize
	ker <- kdebigw(.h, data, .t, poly = .poly, kernel = .kernel, x = .x, y
		 = .y, w=weights, edge = F, scale = F)
	rm(.nx, .ny, .x, .y, .obsize, .n, .poly, .kernel, .h)
	ans$z <- ker$z
	cat("\n")
	ans
}
 "kerreg2d"<-function (h, pts, y, w = rep(1, length(y)), x = pts, grid = F, 
    poly = NA, ngrid = 1000) 
{
    if (!grid) {
        n <- length(y)
        m <- length(x[, 1])
        yans <- rep(0.3, m)
        datx1 <- pts[, 1]
        datx2 <- pts[, 2]
        x1 <- x[, 1]
        x2 <- x[, 2]
        result <- .Fortran("ker2d", h = as.single(h), datx1 = as.single(datx1), 
            datx2 = as.single(datx2), y = as.single(y), w = as.single(w), 
            n = as.integer(n), x1 = as.single(x1), x2 = as.single(x2), 
            yans = as.single(yans), m = as.integer(m), df = as.single(0))
        ans <- list(vals = result$yans, df = result$df)
    }
    if (grid) {
        n <- length(y)
        datx1 <- pts[, 1]
        datx2 <- pts[, 2]
        x1range <- range(poly[, 1])
        x2range <- range(poly[, 2])
        x1min <- x1range[1]
        x1max <- x1range[2]
        x2min <- x2range[1]
        x2max <- x2range[2]
        x1length <- diff(x1range)
        x2length <- diff(x2range)
        actngrid <- (ngrid * x1length * x2length)/(areapl(poly))
        m1 <- ceiling(sqrt((actngrid * x1length)/x2length))
        m2 <- ceiling(sqrt((actngrid * x2length)/x1length))
        zmat <- matrix(0, m1, m2)
        storage.mode(zmat) <- "single"
        x1 <- seq(x1min + x1length/(2 * m1), x1max - x1length/(2 * 
            m1), length = m1)
        x2 <- seq(x2min + x2length/(2 * m2), x2max - x2length/(2 * 
            m2), length = m2)
        xyvec1 <- rep(x1, m2)
        xyvec2 <- rep(x2, rep(m1, m2))
        xyvec <- cbind(xyvec1, xyvec2)
        lpmat <- matrix(inout(xyvec, poly), nrow = m1, ncol = m2, 
            byrow = F)
        storage.mode(lpmat) <- "logical"
        result <- .Fortran("ker2dg", h = as.single(h), datx1 = as.single(datx1), 
            datx2 = as.single(datx2), y = as.single(y), w = as.single(w), 
            n = as.integer(n), x1 = as.single(x1), x2 = as.single(x2), 
            zmat = zmat, lpmat = lpmat, m1 = as.integer(m1), 
            m2 = as.integer(m2))
        z <- result$zmat
        z[z == 0] <- NA
        ans <- list(x = x1, y = x2, z = z)
    }
    ans
}

"lookfor" <- function(x,y)
{
n_length(x)
#.x<-x
#.y<-y
for (i in 1:(n-1)){
  for (j in (i+1):n){
    if (x[i]==x[j] & y[i]==y[j]) {
      lab_paste('The pairs', i,'and', j, 'are equal')
      print(lab)
      u_rnorm(2,0,1)
      print(u)
      x[j]<-x[j]+u[1]
      y[j]<-y[j]+u[2]
    }}}
return(as.matrix(cbind(x,y)))
}
"my.plotrisk" <- 
  function(risk, tp = "range", zlim, clevels = default, tolvals = c(0.025,0.975), toptitle = NA, pval = F)
{
#********************************
# inputs:
# risk is a list of (x, y, z, est, poly) where x,y are the spatial
#      locations (probably in a regular grid). z is the matrix of the
#      proportion of simulated data sets below the observed g(x). est
#      is the x,y,z values of the observed rel risk surface from the
#      function relrisk. 
#
# The whole thing can be the output from the significance or
#      tolerance function.
# 
# contours of risk$z are plotted on the map of risk$est
#********************************
  default <- c(-20, -15, -10, -8, -6, -5, -4, seq(-3, 3, 0.5), 4, 5,
               6, 8, 10, 15, 20)
  if(missing(zlim)) {
    if(tp == "quantile") {
      zl <- quantile(as.vector(risk$est$z), c(0.25, 0.75),
                     na.rm = T)
      zlim <- c(2 * zl[1], 10 * zl[2])
    }
    else if(tp == "range")
      zlim <- range(as.vector(risk$est$z), na.rm = T)
  }
  risk$est$z[(risk$est$z) > (zlim[2])] <- zlim[2]
  risk$est$z[(risk$est$z) < (zlim[1])] <- zlim[1]
#  print(range(risk$est$z[risk$est$z != "NA"]))
  par(pty = "s", font = 3)
  my.polymap(risk$poly, add.poly=F, cex = 0.8)
  image(risk$est, add = T, zlim = zlim)
  image.legend(risk$est, zlim = zlim)
  my.polymap(risk$poly, add = T,add.poly=T)
  contour(risk$est, lwd = 0.7, cex = 0.5, levels = 
          clevels, add = T, lty = 2)
  if((tolvals[1] == 0.025) & (tolvals[2] == 
               0.975)) {
    contour(risk, levels = 0.975, cex = 0.001, lwd = 
            3, add = T, lty = 1)
    contour(risk, levels = 0.025, cex = 0.001, lwd
            = 3, add = T, lty = 3)
  }
  else {
    contour(risk, levels = tolvals, cex = 0.7,
            lwd = 3, add = T, lty = 3)
  }
  if(is.na(toptitle)) {
    toptitle <- " "
  }
  if(pval) {
    toptitle <- paste(toptitle, " (p =", as.character(round(risk$
                                          pval, digits = 3)), ")")
  }
  title(toptitle, cex = 0.8)
}

"my.polymap" <- 
function(poly, add = F, xlab = "", ylab = "", axes = T, cex = 1.2, add.poly = T,
	...)
{
	pty.old <- par("pty")
	par(pty = "s")
	if(!add) {
		xrnge <- range(poly[, 1], na.rm = T)
		yrnge <- range(poly[, 2], na.rm = T)
		xd <- xrnge[2] - xrnge[1]
		yd <- yrnge[2] - yrnge[1]
		if(xd > yd) {
			xplot <- xrnge
			yplot <- NULL
			yplot[1] <- ((yrnge[2] + yrnge[1])/2) - xd/2
			yplot[2] <- ((yrnge[2] + yrnge[1])/2) + xd/2
		}
		else {
			yplot <- yrnge
			xplot <- NULL
			xplot[1] <- ((xrnge[2] + xrnge[1])/2) - yd/2
			xplot[2] <- ((xrnge[2] + xrnge[1])/2) + yd/2
		}
		plot(poly, xlim = xplot, ylim = yplot, type = "n", axes = axes,
			xlab = xlab, ylab = ylab, mgp = c(2.6000000000000001,
			2, 0), cex = cex)
	}
	if(add.poly) {
		polygon(poly, density = 0, ...)
	}
	par(pty = pty.old)
	invisible(0)
}

"plotrisk.sil"<-
function(risk, tp='range', zlim, clevels = default, tolvals = c(0.025, 
	0.975), toptitle = NA, pval = F)
{
# Julia Kelsall
#********************************
# Sara Morris
# November 15th 1995
# inputs:
# risk is a list of (x, y, z, est, poly) where x,y are the spatial
#      locations (probably in a regular grid). z is the matrix of the
#      proportion of simulated data sets below the observed g(x). est
#      is the x,y,z values of the observed rel risk surface from the
#      function relrisk. 
#
# The whole thing can be the output from the significance or
#      tolerance function.
# 
# contours of risk$z are plotted on the map of risk$est
#********************************
	default <- c(-20, -15, -10, -8, -6, -5, -4, seq(-3, 3, 0.5), 4, 5, 6, 8,
		10, 15, 20)
	if(missing(zlim)) {
	  if (tp=="quantile"){
		zl <- quantile(as.vector(risk$est$z), c(0.25, 0.75), na.rm = T)
		zlim <- c(2 * zl[1], 10 * zl[2])
	      }
	  else if(tp=="range")
		zlim <- range(as.vector(risk$est$z), na.rm = T)
	}
	risk$est$z[(risk$est$z) > (zlim[2])] <- zlim[2]
	risk$est$z[(risk$est$z) < (zlim[1])] <- zlim[1]
	print(range(risk$est$z[risk$est$z!='NA']))
	par(pty = "s", font = 3)
	pointmap(risk$poly, type = "n", cex = 0.69999999999999996)
	image(risk$est, add = T, zlim = zlim)
	image.legend(risk$est$z, zlim = zlim)
	polymap(risk$poly, add = T)
	contour(risk$est, lwd = 0.7, cex = 0.5, 
		levels = clevels, add = T, lty = 2)
	if((tolvals[1] == 0.025000000000000001) & (tolvals[2] == 
		0.97499999999999998)) {
		contour(risk, levels = 0.97499999999999998, cex = 0.001, lwd = 
			3, add = T, lty = 1)
		contour(risk, levels = 0.025000000000000001, cex = 0.001, lwd
			 = 3, add = T, lty = 3)
	}
	else {
		contour(risk, levels = tolvals, cex = 0.69999999999999996, lwd
			 = 3, add = T, lty = 3)
	}
	if(is.na(toptitle)) {
		toptitle <- " "
	}
	if(pval) {
		toptitle <- paste(toptitle, " (p =", as.character(round(risk$
			pval, digits = 3)), ")")
	}
	title(toptitle, cex = 0.80000000000000004)
}
"polymap.sil"<-
function(poly, add = F, xlab = "", ylab = "", axes = T, cex=1.2,...)
{
	pty.old <- par("pty")
	if(pty.old != "s")
		warning("polymap: plot type not square.")
	if(!add) {
		xrnge <- range(poly[, 1], na.rm = T)
		yrnge <- range(poly[, 2], na.rm = T)
		xd <- xrnge[2] - xrnge[1]
		yd <- yrnge[2] - yrnge[1]
		if(xd > yd) {
			xplot <- xrnge
			yplot <- NULL
			yplot[1] <- ((yrnge[2] + yrnge[1])/2) - xd/2
			yplot[2] <- ((yrnge[2] + yrnge[1])/2) + xd/2
		}
		else {
			yplot <- yrnge
			xplot <- NULL
			xplot[1] <- ((xrnge[2] + xrnge[1])/2) - yd/2
			xplot[2] <- ((xrnge[2] + xrnge[1])/2) + yd/2
		}
		par(pty = "s")
		plot(poly, xlim = xplot, ylim = yplot, type = "n", axes = axes, 
			xlab = xlab, ylab = ylab,mgp=c(2.6,2,0),cex=cex)
	}
	par(pty = pty.old)
	invisible(0)
}
"saveplot"<-
function(filename, horiz = F)
{
	printgraph(file = filename, horiz = horiz)
}
"sfunspath"<-
  "/home/shimakur/plots/julia/Sfuns.dir"
"waddmod"_function(formul,data,pts,g2,z,w,hopt0=NA,hvals=NA,ngrid=0,region=NA,sameh=F){

# FITTING WEIGHTED ADDITIVE MODEL:

# g1 is linear part, g2 is spatial nonparam part, z is adj dep var, w weights.
# sameh=T means we want to use the value in hopt0 as the smoothing parameter.
# sameh=F means we want to choose a new value of smoothing parameter, 
# 	using hopt0 as a starting point to help choose.
# 	if hopt=NA, then a smoothing parameter is chosen from scratch from 
#  	the ones specified in hvals.
# ngrid is the approximate number of grid points in the polygon at which
#	 the surface is calculated. It takes longer the more grid points used. 
# 	get surface with log base 2 so comparable with non gam method.
# 	If specify ngrid=0, then don't get estimate over grid,
#	 just at data points.

i_0
brk_0
if(is.na(hopt0)){hopt_0}else{hopt_hopt0}

k_1
	formul_formula(form)
	termos<-terms(formul)
	at<-as.character(attr(termos,"variables"))[-1]
	why<-data[,at[1]]	
if(sameh){
	cat('\n\n\n WEIGHTED ADDITIVE MODEL ITERATION',k,'\n');k_k+1
	cat('\n Keeping fixed h... \n')
	yvar <- z-g2
	data[,at[1]]<-yvar
	data$w<-w
	data<-as.data.frame(as.matrix(data))
	temp <- lm(formul,weights=w,data=data)
	g1 <- fitted(temp)
	beta_summary(temp)$coeff;
	cat('\n Beta is ',beta[,1],'\n')
	yvar <- z-g1 
	smooth <- kerreg2d(hopt,pts,yvar,w)
	g2 <- smooth$vals-mean(smooth$vals)
}else{
repeat{
	if(brk==1){break}
	if(i==10)stop('\n Too many iterations (done 10) \n')
	i_i+1
	cat('\n\n\n WEIGHTED ADDITIVE MODEL ITERATION',k,'\n');k_k+1
	yvar <- z-g2
	data[,at[1]]<-yvar
	data$w<-w
	data<-as.data.frame(as.matrix(data))
	temp <- lm(formul,weights=w,data=data)
	g1 <- fitted(temp)
	beta_summary(temp)$coeff;
	cat('\n Beta is ',beta[,1],'\n')
	yvar <- z-g1 
	cat('\n Choosing h value...\n')
	if(hopt==0){
		hopt.new <- hch2d( hvals, pts, yvar, w )
	}
	else{
		pos_(1:length(hvals))[hvals==hopt]
		if(pos<=2){hs_hvals[1:5]}
		if((pos>2)&(pos<(length(hvals)-1))){hs_hvals[(pos-2):(pos+2)]}
		if(pos>=(length(hvals)-1)){
			hs_hvals[(length(hvals)-4):(length(hvals))]}
		hopt.new <- hch2d( hs, pts, yvar, w)
	}
	cat('\n chosen hopt =',hopt.new,'\n')
	hopt.old_hopt
	smooth <- kerreg2d(hopt.new,pts,yvar,w)
	g2 <- smooth$vals-mean(smooth$vals)
	if((hopt.new==hopt.old)&(!is.na(hopt.new==hopt.old))){brk_1}
	hopt_hopt.new
}
}
	cat('\n WEIGHTED ADDITIVE MODEL ITERATION',k,'\n');k_k+1
	yvar <- z-g2
	data[,at[1]]<-yvar
	data$w<-w
	data<-as.data.frame(as.matrix(data))
	temp <- lm(formul,weights=w,data=data)
	g1 <- fitted(temp)
	beta_summary(temp)$coeff
	cat('\n',beta[,1],'\n')
	yvar <- z-g1
	smooth <- kerreg2d(hopt,pts,yvar,w)
	g2 <- smooth$vals-mean(smooth$vals)

	cat('\n WEIGHTED ADDITIVE MODEL ITERATION',k,'\n');k_k+1
	yvar <- z-g2	
	data[,at[1]]<-yvar
	data$w<-w
	data<-as.data.frame(as.matrix(data))
	temp <- lm(formul,weights=w,data=data) 
	g1 <- fitted(temp)
	beta_summary(temp)$coeff
	cat('\n',beta[,1],'\n')
	yvar <- z-g1
	smooth <- kerreg2d(hopt,pts,yvar,w)
	g2 <- smooth$vals-mean(smooth$vals)

#	[More iterations can be added from above if you want...]

	fhat <- g1 + g2
	phat <- exp(fhat)/(1+exp(fhat))
	w <- phat*(1-phat)
	z <- fhat + (why-phat)/w

if(ngrid>0){
	gsmooth <- kerreg2d(hopt,pts,yvar,w,poly=region, grid=T,ngrid=ngrid)

	ans <- list(g1=g1,g2=g2,z=z,w=w,beta=beta, h=hopt,
			g2est=list(x=gsmooth$x, y=gsmooth$y,
			z=(gsmooth$z-mean(gsmooth$z,na.rm=T))/log(2)))
}else{	
	ans <- list(g1=g1,g2=g2,z=z,w=w,beta=beta, h=hopt)
}

ans
}


".First.lib" <-
  function(lib, pkg)
{
  library.dynam("spatgam", package = pkg, lib.loc = lib)  
  cat("\n")
  cat("------------------------------------------------\n")
    cat(package.description("spatgam", lib = lib, field="Title"))
    cat("\n")
    ver <- package.description("spatgam", lib = lib, field="Version")
    cat(paste("spatgam version", ver,  "is now loaded\n"))
  cat("------------------------------------------------\n")
  cat("\n")
  return(invisible(0))
}

