######################################### # Section 1.3 #--------------------------------------------------------------------- # Example 1 N <- 100 theta <- 1 xbar.vec <- rnorm(N,theta,sd=1) s.vec <- rchisq(N,df=20) / 20 ci.lngth <- qt(1 - 0.05/2,df=20)*s.vec ind.cov <- xbar.vec - ci.lngth < theta & theta < xbar.vec + ci.lngth plot(1:N,xbar.vec,ylim=c(-3,6),xlab="Experiment",ylab="Theta",pch=3) abline(h=theta,col=4,lwd=2) segments((1:N)[ind.cov],(xbar.vec - ci.lngth)[ind.cov],(1:N)[ind.cov],(xbar.vec + ci.lngth)[ind.cov],col=3) segments((1:N)[!ind.cov],(xbar.vec - ci.lngth)[!ind.cov],(1:N)[!ind.cov],(xbar.vec + ci.lngth)[!ind.cov],col=2) sum(ind.cov) #--------------------------------------------------------------------- # Example 2 1 - pbinom(8,12,0.5) # Binomial significance 1 - pnbinom(8,3,.50) # Negative-binomial significance ######################################### # Section 1.4 library(LearnBayes) # 1.4.1 Analyze data using a Discrete Prior p <- seq(0.05, 0.95, by = 0.1) prior <- c(2,4,8,8,4,2,1,1,1,1) prior <- prior/sum(prior) plot(p, prior, type = "h", ylab="Prior Probability") # producing the posterior with LearnBayes data <- c(11, 16) post <- pdisc(p, prior, data) round(cbind(p, prior, post),3) # producing the posterior in R post.1 <- prior * p^11 * (1-p)^16 post.1 <- post.1 / sum(post.1) round(cbind(p, prior, post,post.1),4) library(lattice) PRIOR=data.frame("prior",p,prior) POST=data.frame("posterior",p,post) names(PRIOR)=c("Type","P","Probability") names(POST)=c("Type","P","Probability") data=rbind(PRIOR,POST) xyplot(Probability~P|Type,data=data,layout=c(1,2),type="h",lwd=3,col="black") # 1.4.2 Analyze data using a Beta Prior p.seq <- seq(0,1,length=1000) par(mfcol=c(1,2)) plot(p.seq,dbeta(p.seq,1,1),type="l",col=4,ylim=c(0,3),xlim=c(0,1),xlab="p",ylab="Density") lines(p.seq,dbeta(p.seq,2,2),col=2) lines(p.seq,dbeta(p.seq,3,7),col=3) legend(.4,3,c("Beta(1,1)","Beta(2,2)","Beta(3,7)"),col=c(4,2,3),lty=1,lwd=3) plot(p.seq,pbeta(p.seq,1,1),col=4,type="l",ylim=c(0,1),xlim=c(0,1),xlab="p",ylab="Probability") lines(p.seq,pbeta(p.seq,2,2),col=2) lines(p.seq,pbeta(p.seq,3,7),col=3) qbeta(c(.5,.9),1,1) qbeta(c(.5,.9),2,2) qbeta(c(.5,.9),3,7) qbeta(c(.5,.9),3.4,7.4) like <- p.seq^11 * (1-p.seq)^16 like <- like / sum(like) prior <- dbeta(p.seq,3.4,7.4) / sum(dbeta(p.seq,3.4,7.4)) post <- dbeta(p.seq,3.4,7.4) * p.seq^11 * (1-p.seq)^16 post <- post / sum(post) par(mfcol=c(1,1)) plot(p.seq,like,type="l",lty=1,ylim=c(0,0.005),xlim=c(0,1),xlab="p",ylab="Density") lines(p.seq,prior,lty=2) lines(p.seq,post,lty=3) legend(.7,0.005,c("Likelihood","Prior","Posterior"),lty=1:3,lwd=3) 1 - pbeta(0.5, 3.4+11, 7.4+16) qbeta(c(0.05, 0.95),3.4+11, 7.4+16) # 1.4.3 Analyze data using a histogram Prior midpt <- seq(0.05, 0.95, by = 0.1) prior <- c(2,4,8,8,4,2,1,1,1,1) prior <- prior / sum(prior) par(mfcol=c(1,3)) plot(p.seq,histprior(p.seq,midpt,prior),ylab="Prior density",type="l") plot(p.seq,histprior(p.seq,midpt,prior)* p.seq^11*(1-p.seq)^16,ylab="Posterior density",type="l") post <- histprior(p.seq,midpt,prior)* p.seq^11*(1-p.seq)^16 post <- post/sum(post) post.samp <- sample(p.seq, replace = TRUE, prob = post) hist(post.samp, xlab="p", main="") # 1.4.4 Prediction p <- seq(0.05, 0.95, by=.1) prior <- c(2,4,8,8,4,2,1,1,1,1) prior <- prior/sum(prior) m <- 20; ys <- 0:20 pred <- pdiscp(p, prior, m, ys) round(cbind(0:20,pred),3) prior.samp <- rbeta( 10000,3.4,7.4) post.samp <- rbeta( 10000,3.4+11,7.4+16) y.prior.samp <- rbinom(10000,20,prior.samp) y.post.samp <- rbinom(10000,20,post.samp) y.binm.samp <- rbinom(10000,20,(3.4 + 11) / (3.4+11 + 7.4+16)) summary(y.prior.samp) summary(y.post.samp) summary(y.binm.samp) par(mfcol=c(1,1)) plot(0:(length(table(y.binm.samp))-1),table(y.binm.samp)/10000,type="h", xlim=c(0,20),xlab="Num of students with 8 hour sleep",ylab="Probability") points((0:(length(table(y.post.samp))-1))+.2,table(y.post.samp)/10000,lty=2,type="h") #points((0:(length(table(y.prior.samp))-1))+.4,table(y.prior.samp)/10000,col=3,type="h")