#30 Crash with partially observed mixed bayesian nets

open
nobody
None
5
2006-08-02
2006-08-02
Marco
No

I created a network with 3 nodes A, B are discrete and
C is multivariate gaussian (the same crash happens with
univariate.

The structure is A -> B -> C.

Using EM learning and gibbs sample for inference. Its
written using a python wrapper of the high level
interface (which I promise to upload soon).

When I test it by generating data using an appropriate
distribution it crashes if node B is unobserved. This
happens both in the learning phase if I don't supply
any data for B and when getting the jdp if I don't
supply evidence.

The crash happens in pnlgaussiancpd.cpp line 384:

CCondGaussianDistribFun* withDiscrEv =

(static_cast<CCondGaussianDistribFun*>(m_CorrespDistribFun))->

EnterDiscreteEvidence(obsDiscreteIndex.size(),
&obsDiscreteIndex.front(),
&obsDiscrVals.front(),
pMD->GetObsTabVarType() );

below is my code.

I'll submit this to the forums as well in case its a
mistake of mine not a bug

thanks a lot

Marco

import sys

sys.path.append("../../PyPNL/PyPNL")
#sys.path.append("../../PyPNL/PyPNL/Debug")

import string

import random
import scipy
#from scipy import random

import PyPNL

net = PyPNL.BayesNet()
#net.SetProperty("Inference", "jtree")
net.SetProperty("Inference", "gibbs")
net.SetProperty("Learning", "em")
#net.SetProperty("GibbsThresholdIteration", "50");
#net.SetProperty("GibbsNumberOfIterations", "1000");

net.AddNode("discrete^d1", "true false maybe")
net.AddNode("discrete^d2", "a b c")

net.AddNode("continuous^c1", "dim1 dim2 dim3")

net.AddArc("d1", "d2")
net.AddArc("d2", "c1")

print "set d1 tabular"
net.SetPTabular("d1^true d1^false d1^maybe", "0.25 0.5
0.25")
print "set d2 tabular"
net.SetPTabular("d2^a d2^b d2^c", "0.25 0.5 0.25",
"d1^true")
net.SetPTabular("d2^a d2^b d2^c", "0.25 0.5 0.25",
"d1^false")
net.SetPTabular("d2^a d2^b d2^c", "0.25 0.5 0.25",
"d1^maybe")
print "set continuous"
net.SetPGaussian("c1", "0.0 1.0 0.0", "1.0 0.0 0.0 0.0
1.0 0.0 0.0 0.0 2.0", " ", "d2^a")
net.SetPGaussian("c1", "0.0 0.0 1.0", "1.0 0.0 0.0 0.0
1.0 0.0 0.0 0.0 2.0", " ", "d2^b")
net.SetPGaussian("c1", "1.0 0.0 0.0", "1.0 0.0 0.0 0.0
1.0 0.0 0.0 0.0 1.0", " ", "d2^c")

loopcount = 0
mean1 = [1.0, 2.0, 0.0]
var1 = [ [1.0, 1.0, 1.0], [0.0, 1.0, 0.0], [0.0, 0.0,
2.0] ]
mean2 = [0.0, 0.0, 1.0]
var2 = [ [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0,
1.0] ]
mean3 = [0.0, 0.0, 0.0]
var3 = [ [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0,
1.0] ]
for i in range(700):
dval1 = random.choice(["true", "true", "true",
"false", "maybe", "maybe"])
if dval1 == "true" :
dval2 = random.choice(["a", "b", "b", "b", "b", "c"])
if dval1 == "false" :
dval2 = random.choice(["a", "b", "c", "c", "c", "c"])
if dval1 == "maybe" :
dval2 = random.choice(["a", "b", "c"])
dval3 = random.choice(["a", "a", "b", "b", "b"])

if dval2 == "a" :
cvals = scipy.random.multivariate_normal(mean1, var1)
if dval2 == "b" :
cvals = scipy.random.multivariate_normal(mean2, var2)
if dval2 == "c" :
cvals = scipy.random.multivariate_normal(mean3, var3)

#print vals
# uncomment second bit of this line for fully observed
learning
# crashes if the line is not uncommented
evidence = "d1^"+dval1#+" d2^"+dval2
evidence = evidence + " c1^dim1^" + str(cvals[0])
evidence = evidence + " c1^dim2^" + str(cvals[1])
evidence = evidence + " c1^dim3^" + str(cvals[2])
#print evidence
net.EditEvidence(evidence)
#print "finished adding pcs"
net.CurEvidToBuf()
net.ClearEvid()
loopcount += 1
if loopcount > 10:
print "#",
loopcount = 0

net.LearnParameters()
net.ClearEvidBuf()

net.EditEvidence("d1^false")
net.EditEvidence("d2^a")

print "about to get jpd"
jpd = net.GetJPD("c1")
print jpd

# The next part gets the jpd if d2 is unobserved, this
crashes

net.ClearEvid()
net.EditEvidence("d1^true")

print "about to get jpd"
jpd = net.GetJPD("d2")
print jpd
jpd = net.GetJPD("c1")
print jpd

net.ClearEvid()
net.EditEvidence("d1^maybe")

print "about to get jpd"
jpd = net.GetJPD("d2")
print jpd
jpd = net.GetJPD("c1")
print jpd

Discussion

Get latest updates about Open Source Projects, Conferences and News.

Sign up for the SourceForge newsletter:





No, thanks