import os
%matplotlib inline
from pylab import *
import matplotlib.pyplot as plt
import pyAgrum as gum
import pyAgrum.lib.notebook as gnb
bn=gum.loadBN("res/asia.bif")
# randomly re-generate parameters for every Conditional Probability Table
bn.generateCPTs()
bn
bn2=gum.loadBN("res/asia.bif")
bn2.generateCPTs()
bn2
gnb.flow.row(bn.cpt(3),bn2.cpt(3),
captions=["a CPT in bn","same CPT in bn2 (with different parameters)"])
|
| |
---|---|---|
0.6516 | 0.3484 | |
0.4449 | 0.5551 |
|
| |
---|---|---|
0.6965 | 0.3035 | |
0.1083 | 0.8917 |
In order to compute KL-divergence, we just need to be sure that the 2 distributions are defined on the same domain (same variables, etc.)
g1=gum.ExactBNdistance(bn,bn2)
print(g1.compute())
{'klPQ': 1.9191657665041133, 'errorPQ': 0, 'klQP': 1.942616105384583, 'errorQP': 0, 'hellinger': 0.7605316099375611, 'bhattacharya': 0.3413700421126023, 'jensen-shannon': 0.37371212141901083}
If the models are not on the same domain :
bn_different_domain=gum.loadBN("res/alarm.dsl")
# g=gum.BruteForceKL(bn,bn_different_domain) # a KL-divergence between asia and alarm ... :(
#
# would cause
#---------------------------------------------------------------------------
#OperationNotAllowed Traceback (most recent call last)
#
#OperationNotAllowed: this operation is not allowed : KL : the 2 BNs are not compatible (not the same vars : visit_to_Asia?)
g=gum.GibbsBNdistance(bn,bn2)
g.setVerbosity(True)
g.setMaxTime(120)
g.setBurnIn(5000)
g.setEpsilon(1e-7)
g.setPeriodSize(500)
print(g.compute())
print("Computed in {0} s".format(g.currentTime()))
{'klPQ': 1.888582675328128, 'errorPQ': 0, 'klQP': 2.409520503291685, 'errorQP': 0, 'hellinger': 0.79919830491918, 'bhattacharya': 0.3147642841353423, 'jensen-shannon': 0.4108069121688592} Computed in 0.31129116599999995 s
print("--")
print(g.messageApproximationScheme())
print("--")
print("Temps de calcul : {0}".format(g.currentTime()))
print("Nombre d'itérations : {0}".format(g.nbrIterations()))
-- stopped with epsilon=1e-07 -- Temps de calcul : 0.31129116599999995 Nombre d'itérations : 52500
p=plot(g.history(), 'g')
Since it may be difficult to know what happens during approximation algorithm, pyAgrum allows to follow the iteration using animated matplotlib figure
g=gum.GibbsBNdistance(bn,bn2)
g.setMaxTime(60)
g.setBurnIn(500)
g.setEpsilon(1e-7)
g.setPeriodSize(5000)
gnb.animApproximationScheme(g) # logarithmique scale for Y
g.compute()
{'klPQ': 1.9088695207054012, 'errorPQ': 0, 'klQP': 1.8891423220301469, 'errorQP': 0, 'hellinger': 0.7553029868459951, 'bhattacharya': 0.3409495029750985, 'jensen-shannon': 0.3691557649394705}