Skip to content

Commit

Permalink
Lab5
Browse files Browse the repository at this point in the history
  • Loading branch information
isa3 committed Mar 6, 2020
1 parent 3887712 commit 156eec2
Show file tree
Hide file tree
Showing 3 changed files with 185 additions and 0 deletions.
54 changes: 54 additions & 0 deletions lab05/lab_1.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
'''
This module implements the Bayesian network shown in the text, Figure 14.2.
It's taken from the AIMA Python code.
Additions made to original source code from kvlinden by Ian Adams for
Exercises 5.1 and 5.4 for lab 5 in CS300 at Calvin University.
@author: isa3
@version March 6, 2020
'''

from probability import BayesNet, enumeration_ask, elimination_ask, gibbs_ask, rejection_sampling

# Utility variables
T, F = True, False

# From AIMA code (probability.py) - Fig. 14.2 - burglary example
burglary = BayesNet([
('Burglary', '', 0.001),
('Earthquake', '', 0.002),
('Alarm', 'Burglary Earthquake', {(T, T): 0.95, (T, F): 0.94, (F, T): 0.29, (F, F): 0.001}),
('JohnCalls', 'Alarm', {T: 0.90, F: 0.05}),
('MaryCalls', 'Alarm', {T: 0.70, F: 0.01})
])

# Compute P(Burglary | John and Mary both call).
print(enumeration_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary).show_approx())
# elimination_ask() is a dynamic programming version of enumeration_ask().
print(elimination_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary).show_approx())
# rejection_sampling() is an approximation algorithm that rejects inconsistent samples.
print(rejection_sampling('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary).show_approx())
# gibbs_ask() is an approximation algorithm helps Bayesian Networks scale up.
print(gibbs_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary).show_approx())
# See the explanation of the algorithms in AIMA Section 14.4.

# Exercise 5.1
print("a.")
print("i. P(Alarm | Burglary ^ !Earthquake)")
print(enumeration_ask('Alarm', dict(Burglary=T, Earthquake=F), burglary).show_approx())
print("ii. P(John | Burglary ^ !Earthquake)")
print(enumeration_ask('JohnCalls', dict(Burglary=T, Earthquake=F), burglary).show_approx())
print("iii. P(Burglary | Alarm)")
print(enumeration_ask('Burglary', dict(Alarm=T), burglary).show_approx())
print("iv. P(Burglary | John ^ Mary)")
print(enumeration_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary).show_approx())

# Exercise 5.4
# The original source code we were given used elimination_ask, not rejection_sampling.
# elimination_ask exactly matched the results of enumeration_ask, likely because it's
# just a dynamic programming version of enumeration_ask.
# The rejection sampling algorithm doesn't match the results of the exact inference algorithm,
# and neither does Gibbs sampling. This is because while the exact inference algorithm
# calculates the posterior precisely, rejection_sampling and Gibbs sampling both just
# sample from the posterior, resulting in a representative posterior, not an exact one.
47 changes: 47 additions & 0 deletions lab05/lab_2.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
'''
This module implements the Bayesian network for Thrun's
2-test cancer example. Exercise 5.2 for lab 5 in CS300
at Calvin University.
@author: isa3
@version March 6, 2020
'''

from probability import BayesNet, enumeration_ask

# Utility variables
T, F = True, False

cancer = BayesNet([
('Cancer', '', 0.01),
('Test1', 'Cancer', {T: 0.90, F: 0.2}),
('Test2', 'Cancer', {T: 0.90, F: 0.2})
])

print("a. P(Cancer | Test1 ^ Test2")
print(enumeration_ask('Cancer', dict(Test1=T, Test2=T), cancer).show_approx())
print("a. P(Cancer | Test1 ^ !Test2")
print(enumeration_ask('Cancer', dict(Test1=T, Test2=F), cancer).show_approx())

# The probability of the patient having cancer given both tests coming back positive was
# much lower than I had expected. I knew the prior of 0.01 would affect the posterior, but
# I very much thought that it would not affect it that strongly, to the point that even
# with 2 tests returning positive the patient still has a ~1/4th chance of not having cancer.
# Given this knowledge though, I'm not surprised at by how low the probability of the patient
# having cancer is given a positive first test and negative second test. The prior affects the
# posterior strongly, and even one test that affirms the high prior probability of the patient
# not having cancer results in a posterior with similar probabilities.
# P(Cancer | Test1, Test2) = alpha P(Cancer, Test1, Test2)
# = alpha P(Cancer) * P(Test1 | Cancer) * P(Test2 | Cancer)
# = alpha < P(Cancer) * P(Test1 | Cancer) * P(Test2 | Cancer) ,
# P(!Cancer) * P(Test1 | !Cancer) * P(Test2 | !Cancer) >
# = alpha < 0.01 * 0.9 * 0.9, 0.99 * 0.2 * 0.2 >
# = alpha < .0081, .0396 >
# = < .170, .830 >
# P(Cancer | Test1, !Test2) = alpha P(Cancer, Test1, !Test2)
# = alpha P(Cancer) * P(Test1 | Cancer) * P(!Test2 | Cancer)
# = alpha < P(Cancer) * P(Test1 | Cancer) * P(!Test2 | Cancer) ,
# P(!Cancer) * P(Test1 | !Cancer) * P(!Test2 | !Cancer) >
# = alpha < 0.01 * 0.9 * 0.1, 0.99 * 0.8 * 0.2 >
# = alpha < .0009, .1584 >
# = < .00565, .994 >
84 changes: 84 additions & 0 deletions lab05/lab_3.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
'''
This module implements the Bayesian network for Thrun's
confounding clause example. Exercise 5.3 for lab 5 in CS300
at Calvin University.
@author: isa3
@version March 6, 2020
'''

from probability import BayesNet, enumeration_ask

# Utility variables
T, F = True, False

happiness = BayesNet([
('Sunny', '', 0.7),
('Raise', '', 0.01),
('Happy', 'Sunny Raise', {(T, T): 1.0, (T, F): 0.7, (F, T): 0.9, (F, F): 0.1})
])

print("a.")
print("i. P(Raise | Sunny)")
print(enumeration_ask('Raise', dict(Sunny=T), happiness).show_approx())
print("ii. P(Raise | Happy ^ Sunny)")
print(enumeration_ask('Raise', dict(Sunny=T, Happy=T), happiness).show_approx())

# The first answer makes sense because getting a raise is independent of it
# being sunny, at least according to the Bayesian Graph in question. So,
# regardless of whether it is sunny or not, the agent has a .01 chance of a raise.
# The second answer makes sense because 1) we know that the agent is happy,
# increasing the chances they got a raise, but also 2) the sun is shining,
# which could be why the agent is happy and therefore decreases the chance
# of them having gotten a raise. Overall, a slight increase in the chances of
# a raise.
# a.i.
# P(Raise | Sunny) = P(Raise) = < 0.01, 0.99 >
# a.ii.
# P(Raise | Happy ^ Sunny) = alpha P(Raise) * ( [ P(Sunny) * P(Happy | Raise, Sunny) ] )
#
# = alpha < P(Raise) * ( [ P(Sunny) * P(Happy | Raise, Sunny) ] ) ,
# P(!Raise) * ( [ P(Sunny) * P(Happy | !Raise, Sunny) ]) >
#
# = alpha < 0.01 * ( [ 0.7 * 1.0 ]) ,
# 0.99 * ( [ 0.7 * 0.7 ]) >
# = alpha < .007, .4851 >
# = < .0142, 0.986 >

print("b.")
print("i. P(Raise | Happy)")
print(enumeration_ask('Raise', dict(Happy=T), happiness).show_approx())
print("ii. P(Raise | Happy ^ !Sunny)")
print(enumeration_ask('Raise', dict(Sunny=F, Happy=T), happiness).show_approx())

# The first answer makes sense because we know that the agent is happy, increasing
# the chances they got a raise. Since we don't know whether it's sunny, the
# chances of the agent getting a raise are slightly higher than in part a.ii.
# The second answer makes sense as well. We know that the agent is happy,
# increasing the chances they got a raise. But we also know that the agent's
# happiness cannot be due to it being sunny, since it's not sunny, so
# their odds of having gotten a raise are much better than in part b.i.
# b.i.
# P(Raise | Happy) = alpha P(Raise) * ( [ P(Sunny) * P(Happy | Raise, Sunny) ]
# + [ P(!Sunny) * P(Happy | Raise, !Sunny) ] )
#
# = alpha < P(Raise) * ( [ P(Sunny) * P(Happy | Raise, Sunny) ]
# + [ P(!Sunny) * P(Happy | Raise, !Sunny) ] ) ,
# P(!Raise) * ( [ P(Sunny) * P(Happy | !Raise, Sunny) ]
# + [ P(!Sunny) * P(Happy | !Raise, !Sunny) ] ) >
#
# = alpha < 0.01 * ( [ 0.7 * 1.0 ] + [ 0.3 * 0.9 ] ) ,
# 0.99 * ( [ 0.7 * 0.7 ] + [ 0.3 * 0.1 ] ) >
# = alpha < .0097, .5148 >
# = < .0185, .9815 >
#
# b.ii.
# P(Raise | Happy ^ !Sunny) = alpha P(Raise) * ( [ P(!Sunny) * P(Happy | Raise, !Sunny) ] )
#
# = alpha < P(Raise) * ( [ P(!Sunny) * P(Happy | Raise, !Sunny) ] ) ,
# P(!Raise) * ( [ P(!Sunny) * P(Happy | !Raise, !Sunny) ]) >
#
# = alpha < 0.01 * ( [ 0.3 * 0.9 ]) ,
# 0.99 * ( [ 0.3 * 0.1 ]) >
# = alpha < .0027, .0297 >
# = < 0.0833, 0.0917 >

0 comments on commit 156eec2

Please sign in to comment.