Zhiguang Huo (Caleb)
Wednesday November 15, 2017
Similarly
Iteration | I | G | S |
---|---|---|---|
init | \(1\) | \(1\) | \(1\) |
1 | |||
2 | |||
3 | |||
… | … | … | … |
K |
I <- 1; G <- 1; S <- 1
pG <- c(0.5, 0.8)
pS <- c(0.5, 0.5)
pI <- c(0.19, 0.36, 0.49,0.69)
i <- 1
plot(1:3,ylim=c(0,10),type="n", xaxt="n", ylab="iteration")
axis(1, at=1:3,labels=c("I", "G", "S"), col.axis="red")
text(x = 1:3, y= i, label = c(I, G, S))
set.seed(32611)
while(i<10){
I <- rbinom(1,1,pI[2*G+S+1])
G <- rbinom(1,1,pG[I+1])
S <- rbinom(1,1,pS[I+1])
i <- i + 1
text(x = 1:3, y= i, label = c(I, G, S))
}
Posterior can be derived using Bayes rule:
\(var_\alpha \doteq 1/(1/\tau_a^2 + n/\sigma^2)\), \(var_\beta \doteq 1/(1/\tau_b^2 + \sum_{i=1}^n x_i^2/\sigma^2)\)
\(\alpha | x_1^n, y_1^n, \beta, \sigma^2 \sim N(var_\alpha(\sum_{i=1}^n (y_i - \beta x_i)/\sigma^2 + \alpha_0 / \tau_a^2), var_\alpha)\)
\(\beta | x_1^n, y_1^n, \alpha, \sigma^2 \sim N(var_\beta(\sum_{i=1}^n \big( (y_i - \alpha) x_i \big) /\sigma^2 + \beta_0 / \tau_b^2), var_\beta)\)
\(\sigma^2 | x_1^n, y_1^n, \alpha, \beta \sim IG(\nu + n/2, \mu + \sum_{i=1}^n (y_i - \alpha - \beta x_i)^2/2)\)
set.seed(32611)
n = 100; alpha = 0; beta=2; sig2=0.5;true=c(alpha,beta,sig2)
x=rnorm(n)
y=rnorm(n,alpha+beta*x,sqrt(sig2))
# Prior hyperparameters
alpha0=0;tau2a=10;beta0=0;tau2b=10;nu0=3;s02=1;nu0s02 = nu0*s02
# Setting up starting values
alpha=0;beta=0;sig2=1
# Gibbs sampler
M = 1000
draws = matrix(0,M,3)
draws[1,] <- c(alpha,beta,sig2)
for(i in 2:M){
var_alpha = 1/(1/tau2a + n/sig2)
mean = var_alpha*(sum(y-beta*x)/sig2 + alpha0/tau2a)
alpha = rnorm(1,mean,sqrt(var_alpha))
var_beta = 1/(1/tau2b + sum(x^2)/sig2)
mean = var_beta*(sum((y-alpha)*x)/sig2+beta0/tau2b)
beta = rnorm(1,mean,sqrt(var_beta))
sig2 = 1/rgamma(1,(nu0+n/2),(nu0s02+sum((y-alpha-beta*x)^2)/2))
draws[i,] = c(alpha,beta,sig2)
}
# Markov chain + marginal posterior
names = c('alpha','beta','sig2')
colnames(draws) <- names
ind = 101:M
par(mfrow=c(3,3))
for(i in 1:3){
ts.plot(draws[,i],xlab='iterations',ylab="",main=names[i])
abline(v=ind[1],col=4)
abline(h=true[i],col=2,lwd=2)
acf(draws[ind,i],main="")
hist(draws[ind,i],prob=T,main="",xlab="")
abline(v=true[i],col=2,lwd=2)
}
colMeans(draws[101:M,])
## alpha beta sig2
## -0.06394806 1.93993716 0.54674891
\[R_x(k) = \frac {\sum_{t=1}^{n-k} (x_t - \bar{x})(x_{t+k} - \bar{x})} {\sum_{t=1}^{n-k} (x_t - \bar{x})^2} \]
High autocorrelation leads to smaller effective sample size.
knitr::purl("Gibbs.rmd", output = "Gibbs.R ", documentation = 2)
##
##
## processing file: Gibbs.rmd
##
|
| | 0%
|
|...... | 10%
|
|............. | 20%
|
|.................... | 30%
|
|.......................... | 40%
|
|................................ | 50%
|
|....................................... | 60%
|
|.............................................. | 70%
|
|.................................................... | 80%
|
|.......................................................... | 90%
|
|.................................................................| 100%
## output file: Gibbs.R
## [1] "Gibbs.R "