☰  klForBns
In :
import os

%matplotlib inline

from pylab import *
import matplotlib.pyplot as plt


# Initialisation¶

• importing pyAgrum
• importing pyAgrum.lib tools
In :
import pyAgrum as gum
import pyAgrum.lib.notebook as gnb


## Create a first BN : bn¶

In :
bn=gum.loadBN(os.path.join("res","asia.bif"))
# randomly re-generate parameters for every Conditional Probability Table
bn.generateCPTs()
bn

Out:

## Create a second BN : bn2¶

In :
bn2=gum.loadBN(os.path.join("res","asia.bif"))
bn2.generateCPTs()
bn2

Out:

## bn vs bn2 : different parameters¶

In :
gnb.sideBySide(bn.cpt(3),bn2.cpt(3),
captions=["a CPT in bn","same CPT in bn2"])

positive_XraY?
tuberculos_or_cancer?
0
1
0
0.56770.4323
1
0.46830.5317
positive_XraY?
tuberculos_or_cancer?
0
1
0
0.37710.6229
1
0.55460.4454
a CPT in bn
same CPT in bn2

## Exact and (Gibbs) approximated KL-divergence¶

In order to compute KL-divergence, we just need to be sure that the 2 distributions are defined on the same domain (same variables, etc.)

### Exact KL

In :
g1=gum.ExactBNdistance(bn,bn2)
print(g1.compute())

{'klPQ': 3.214142228862081, 'errorPQ': 0, 'klQP': 3.400505514007759, 'errorQP': 0, 'hellinger': 0.9423646873914335, 'bhattacharya': 0.5870330325924192, 'jensen-shannon': 0.5434931919782907}


If the models are not on the same domain :

In :
bn_different_domain=gum.loadBN(os.path.join("res","alarm.dsl"))

# g=gum.BruteForceKL(bn,bn_different_domain) # a KL-divergence between asia and alarm ... :(
#
# would cause
#---------------------------------------------------------------------------
#OperationNotAllowed                       Traceback (most recent call last)
#
#OperationNotAllowed: this operation is not allowed : KL : the 2 BNs are not compatible (not the same vars : visit_to_Asia?)


### Gibbs-approximated KL

In :
g=gum.GibbsBNdistance(bn,bn2)
g.setVerbosity(True)
g.setMaxTime(120)
g.setBurnIn(5000)
g.setEpsilon(1e-7)
g.setPeriodSize(500)

In :
print(g.compute())
print("Computed in {0} s".format(g.currentTime()))

{'klPQ': 3.2013969048174893, 'errorPQ': 0, 'klQP': 3.486488841603239, 'errorQP': 0, 'hellinger': 0.9466122397143021, 'bhattacharya': 0.584055412409202, 'jensen-shannon': 0.5479129764518125}
Computed in 2.692543788 s

In :
print("--")

print(g.messageApproximationScheme())
print("--")

print("Temps de calcul : {0}".format(g.currentTime()))
print("Nombre d'itérations : {0}".format(g.nbrIterations()))

p=plot(g.history(), 'g')

--
stopped with epsilon=1e-07
--
Temps de calcul : 2.692543788
Nombre d'itérations : 240500


### Animation of Gibbs KL¶

Since it may be difficult to know what happens during approximation algorithm, pyAgrum allows to follow the iteration using animated matplotlib figure

In :
g=gum.GibbsBNdistance(bn,bn2)
g.setMaxTime(60)
g.setBurnIn(500)
g.setEpsilon(1e-7)
g.setPeriodSize(5000)

In :
gnb.animApproximationScheme(g) # logarithmique scale for Y
g.compute()

Out:
{'klPQ': 3.2220373632846218,
'errorPQ': 0,
'klQP': 3.4095265663868957,
'errorQP': 0,
'hellinger': 0.9443942379696034,
'bhattacharya': 0.5861736998780925,
'jensen-shannon': 0.5461182417485572}
In [ ]: