#############################################
# This function plots a histogram for
# objects in class samp
#
############################################
hist.samp = function(vals, varname="Variable", adj=2)
{
x=unclass(vals)
quartz(varname)
hist(x,50,freq=F,main=paste("Histogram of", varname), xlab=varname)
lines(density(x,adjust=adj))
rug(x)
}
#############################################
# This function plots the time history
# of a sample in class samp
#
############################################
plot.samp = function(vals,varname="x")
{
x=unclass(vals)
quartz(varname)
plot(x,xlab="Sample Number",ylab=varname)
}
###################################################
#
# You can "plug in" any of the following 3 proposals
# Both rq and logdq are needed. The rq and logdq must
# match each other on each model (compare the code).
#
###################################################
############## Exact (Gibbs) Proposal #############
rqE = function(model) { # Gibbs
if(model == 1) return(0.5)
return(rbeta(1,H+1,T+1))
}
logdqE = function(model, param) { # Gibbs
if(model == 1) return(0)
return(dbeta(param, H+1, T+1, log=TRUE))
}
############## Approximate (Gaussian) Proposal ####
rqA = function(model) { # Gaussian
if(model == 1) return(0.5)
repeat {
u = rnorm(1,H/(H+T),sqrt(H*T/(H+T)^3))
if(u>0 && u<1) return(u)
}
}
logdqA = function(model, param) { # Gaussian
if(model == 1) return(0)
mu = H/(H+T)
sg = sqrt(H*T/(H+T)^3)
return(dnorm(param, mu, sg, log=TRUE)- log(pnorm(1.0,mean=mu, sd=sg)- pnorm(0.0,mean=mu, sd=sg)))
# Must renormalize the truncated normal distribution
# or the probabilities will be wrong
# This is the point of the last two lines.
# The original code posted was wrong (but the last
# two lines evaluate to 1 almost exactly so no error
# was noticed.
}
############## Uniform proposal ##################
rqU = function(model) { # Uniform
if(model == 1) return(0.5)
return(runif(1,0,1))
}
logdqU = function(model, param) { # Uniform
if(model == 1) return(0)
return(log(dunif(param,0,1)))
}
#
# Comment: We could have simplified this code to
# just return the value 0 from this function, but
# that only works in this particular case, where
# the proposal is U(0,1). If the proposal had been
# U(0,2), for example, the log would not evaluate
# to 0 and we would get different proposals under
# the two models. The proposal flat(0,1) is not
# equal to flat(a,b) for arbitrary a and b!!!
#
# Always check that you can make such simplifications
# before you make them!
#
############### Log prior ##########################
logdprior = function(model, param) {
if(model == 1) return(0)
return(log(dunif(param,0,1)))
# Comment: It may seem that the prior on the alternative
# is flat and can be ignored; but it is actually flat(0,1),
# so the log evaluates to 0 only when the prior is U(0,1).
# If it were flat(0,2), for example, it would not evaluate
# to 0, but to log(0.5), and eliminating the second line
# of this calculation would give incorrect results.
}
############### Log beta ###########################
#
# Note that on a uniform proposal, you can delete the
# calls to BOTH logdprior AND logdq (if you delete BOTH
# of them) because IN THIS CASE the prior equals the
# proposal ON BOTH MODELS and they cancel. Usually this
# cannot be done.
#
# It is important to be sure that every component
# of beta_{m|n} is represented, either by actual
# code or by checking the results of deleting code,
# or the sampler will not work properly. I have shown
# all components; delete code at your peril!
#
# Calculations are done in logs to protect against
# underflow
#
####################################################
lbeta = function(model, param, logdq) {
x = (
log(pmodels[model]/qmodels[model]) # p(M)/q(M)
+ dbinom(H, H+T, param, log=TRUE) # p(data|param,M)
+ logdprior(model,param) # p(param|M)
- logdq(model, param) # 1/q(param|M)
)
return(x)
}
####################################################
#
# This function samples M and r in a M-H step
#
####################################################
sample.M.r = function(M,p,proposal) {
# Independence sampler: proposal doesn't depend on (M,p)
Mstar = sample(c(1,2),1,replace=TRUE,qmodels)
pstar = proposal$rq(Mstar)
accept = 0
lbeta0 = lbeta(M,p,proposal$logdq)
lbeta1 = lbeta(Mstar,pstar,proposal$logdq)
u = log(runif(1,0,1))
if(u < (lbeta1-lbeta0)) {
M = Mstar
p = pstar
accept = 1
}
list(M=M,p=p,accept=accept)
}
####################################################
#
# This function samples all variables in turn. It
# first samples M and r in a single M-H step
# Then we can sample other parameters a, b, ... if
# they are part of the problem.
#
####################################################
samplen = function(N=1000,proposal) {
M = 1
p = 0.5
M1 = 0
models = rep(NaN,N)
ps = rep(NaN,N)
for(i in 1:N) {
sam = sample.M.r(M,p,proposal)
models[i] = M = sam$M
ps[i] = p = sam$p
if(M==1) {
M1 = M1+1 # Accepts on model 1
}
}
class(ps)="samp"
p2=ps[models==2]
class(p2)="samp"
cat("Odds on M1 = ", M1/(N-M1),"\n")
list(model=models,p=ps,p2=p2)
}
################ Initializations ####################
# Data...
H = 60
T = 40
# Prior on models...
pmodels = c(0.5,0.5)
# Objects naming the different proposals
UNIFORM = list(rq=rqU,logdq=logdqU)
APPROX = list(rq=rqA, logdq=logdqA)
EXACT = list(rq=rqE, logdq=logdqE)
# Ideally, adjust the qmodels vector to agree with the
# actual posterior probabilities as sampled. This will
# give the most efficient sampling. You can do this by
# running a small sample, observing the acceptances, and
# then adjusting qmodels appropriately.
qmodels = c(0.5,0.5)
#######################################
# run program and display results
#######################################
run = function(n=1000,proposal=EXACT){
z = samplen(n,proposal)
plot(z$p2)
hist(z$p2)
cat("pbar = ",mean(z$p2),"\n")
}
run()