[Scipy-svn] r2283 - in trunk/Lib/sandbox/pyem: . pyem pyem/profile_data

scipy-svn at scipy.org scipy-svn at scipy.org
Thu Oct 12 09:48:46 EDT 2006


Author: cdavid
Date: 2006-10-12 08:48:34 -0500 (Thu, 12 Oct 2006)
New Revision: 2283

Added:
   trunk/Lib/sandbox/pyem/pyem/online_em.py
Modified:
   trunk/Lib/sandbox/pyem/.bzrignore
   trunk/Lib/sandbox/pyem/TODO
   trunk/Lib/sandbox/pyem/count.sh
   trunk/Lib/sandbox/pyem/pyem/densities.py
   trunk/Lib/sandbox/pyem/pyem/gauss_mix.py
   trunk/Lib/sandbox/pyem/pyem/gmm_em.py
   trunk/Lib/sandbox/pyem/pyem/profile_data/profile_densities.py
   trunk/Lib/sandbox/pyem/pyem/profile_data/profile_gmm.py
Log:
[pyem @ david at ar.media.kyoto-u.ac.jp-20061006110359-7a70da70de1dd0bf]
Add preliminary online functions (does not work yet)
David Cournapeau <david at ar.media.kyoto-u.ac.jp> | 2006-10-06 20:03:59 +0900 (Fri, 06 Oct 2006)

Modified: trunk/Lib/sandbox/pyem/.bzrignore
===================================================================
--- trunk/Lib/sandbox/pyem/.bzrignore	2006-10-12 13:48:27 UTC (rev 2282)
+++ trunk/Lib/sandbox/pyem/.bzrignore	2006-10-12 13:48:34 UTC (rev 2283)
@@ -26,3 +26,4 @@
 blop
 *.prog
 *.prof
+test_storage.py

Modified: trunk/Lib/sandbox/pyem/TODO
===================================================================
--- trunk/Lib/sandbox/pyem/TODO	2006-10-12 13:48:27 UTC (rev 2282)
+++ trunk/Lib/sandbox/pyem/TODO	2006-10-12 13:48:34 UTC (rev 2283)
@@ -1,4 +1,4 @@
-# Last Change: Mon Aug 28 10:00 PM 2006 J
+# Last Change: Thu Oct 05 03:00 PM 2006 J
 
 Things which must be implemented for a 1.0 version (in importante order)
     - test for various length and model size 
@@ -10,6 +10,8 @@
     - C implementation of Gaussian densities at least (partially done,
     but need integration into distutils + portable way of detecting/
     loading shared library with ctypes)
+    - numpy computations make huge difference between C and F storage: 
+    should review the code to see if the
 
 Things which would be nice:
     - Integrate libem (libem should be modified so

Modified: trunk/Lib/sandbox/pyem/count.sh
===================================================================
--- trunk/Lib/sandbox/pyem/count.sh	2006-10-12 13:48:27 UTC (rev 2282)
+++ trunk/Lib/sandbox/pyem/count.sh	2006-10-12 13:48:34 UTC (rev 2283)
@@ -1,5 +1,5 @@
 #! /bin/sh
-# Last Change: Mon Sep 11 08:00 PM 2006 J
+# Last Change: Fri Oct 06 08:00 PM 2006 J
 
 n=0
 np=0
@@ -19,5 +19,5 @@
     let nc="$nc + $tp"
 done
 
-echo $nc
-echo $np
+echo "$nc lines of C code"
+echo "$np lines of python code"

Modified: trunk/Lib/sandbox/pyem/pyem/densities.py
===================================================================
--- trunk/Lib/sandbox/pyem/pyem/densities.py	2006-10-12 13:48:27 UTC (rev 2282)
+++ trunk/Lib/sandbox/pyem/pyem/densities.py	2006-10-12 13:48:34 UTC (rev 2283)
@@ -1,7 +1,7 @@
 #! /usr/bin/python
 #
 # Copyrighted David Cournapeau
-# Last Change: Fri Sep 29 06:00 PM 2006 J
+# Last Change: Thu Oct 05 07:00 PM 2006 J
 
 import numpy as N
 import numpy.linalg as lin
@@ -114,26 +114,16 @@
     Call gauss_den instead"""
     # Diagonal matrix case
     d   = mu.size
+    n   = x.shape[0]
     if not log:
         inva    = 1/va[0,0]
         fac     = (2*N.pi) ** (-d/2.0) * N.sqrt(inva)
         y       =  (x[:,0] - mu[0,0]) ** 2 * inva * -0.5
         for i in range(1, d):
             inva    = 1/va[0,i]
-            #fac     *= (2*N.pi) ** (-d/2.0) * N.sqrt(inva)
             fac     *= N.sqrt(inva)
             y       += (x[:,i] - mu[0,i]) ** 2 * inva * -0.5
         y   = fac * N.exp(y)
-    #d   = mu.size
-    #n   = x.shape[0]
-    #if not log:
-    #    y   = N.zeros(n)
-    #    inva= 1/va
-    #    _gden.gden_diag(x.ctypes.data_as(POINTER(c_double)),
-    #        n, d,
-    #        mu.ctypes.data_as(POINTER(c_double)),
-    #        (inva).ctypes.data_as(POINTER(c_double)),
-    #        y.ctypes.data_as(POINTER(c_double)))
     else:
         y   = _scalar_gauss_den(x[:,0], mu[0,0], va[0,0], log)
         for i in range(1, d):

Modified: trunk/Lib/sandbox/pyem/pyem/gauss_mix.py
===================================================================
--- trunk/Lib/sandbox/pyem/pyem/gauss_mix.py	2006-10-12 13:48:27 UTC (rev 2282)
+++ trunk/Lib/sandbox/pyem/pyem/gauss_mix.py	2006-10-12 13:48:34 UTC (rev 2283)
@@ -1,5 +1,5 @@
 # /usr/bin/python
-# Last Change: Mon Sep 11 05:00 PM 2006 J
+# Last Change: Tue Oct 03 06:00 PM 2006 J
 
 # Module to implement GaussianMixture class.
 

Modified: trunk/Lib/sandbox/pyem/pyem/gmm_em.py
===================================================================
--- trunk/Lib/sandbox/pyem/pyem/gmm_em.py	2006-10-12 13:48:27 UTC (rev 2282)
+++ trunk/Lib/sandbox/pyem/pyem/gmm_em.py	2006-10-12 13:48:34 UTC (rev 2283)
@@ -1,5 +1,5 @@
 # /usr/bin/python
-# Last Change: Mon Sep 11 05:00 PM 2006 J
+# Last Change: Fri Oct 06 05:00 PM 2006 J
 
 # TODO:
 #   - which methods to avoid va shrinking to 0 ? There are several options, 
@@ -83,9 +83,6 @@
             raise GmmParamError("mode " + str(mode) + " not recognized")
 
         self.gm.set_param(w, mu, va)
-        #self.gm.w   = w
-        #self.gm.mu  = mu
-        #self.gm.va  = va
 
     def init_random(self, data):
         """ Init the model at random."""
@@ -152,9 +149,9 @@
         mGamma  = N.sum(gamma, axis = 0)
 
         if self.gm.mode == 'diag':
-            mu  = N.zeros((k, d))
-            va  = N.zeros((k, d))
-            gamma   = gamma.transpose()
+            mu      = N.zeros((k, d))
+            va      = N.zeros((k, d))
+            gamma   = gamma.T
             for c in range(k):
                 x   = N.dot(gamma[c:c+1,:], data)[0,:]
                 xx  = N.dot(gamma[c:c+1,:], data ** 2)[0,:]
@@ -198,16 +195,18 @@
     Not really useful yet"""
     def __init__(self):
         pass
-
-    def train(self, data, model, niter = 10):
+    
+    def train(self, data, model, maxiter = 10, thresh = 1e-5):
         """
-        Train a model using data, with niter iterations.
+        Train a model using data, and stops when the likelihood fails
+        behind a threshold, or when the number of iterations > niter, 
+        whichever comes first
 
         Args:
             - data:     contains the observed features, one row is one frame, ie one 
             observation of dimension d
             - model:    object of class Mixture
-            - niter:    number of iterations
+            - maxiter:  maximum number of iterations
 
         The model is trained, and its parameters updated accordingly.
 
@@ -219,17 +218,33 @@
         model.init(data)
 
         # Likelihood is kept
-        like    = N.zeros(niter)
+        like    = N.zeros(maxiter)
 
         # Em computation, with computation of the likelihood
-        for i in range(niter):
+        g, tgd      = model.sufficient_statistics(data)
+        like[0]     = N.sum(N.log(N.sum(tgd, 1)), axis = 0)
+        model.update_em(data, g)
+        for i in range(1, maxiter):
             g, tgd      = model.sufficient_statistics(data)
             like[i]     = N.sum(N.log(N.sum(tgd, 1)), axis = 0)
             model.update_em(data, g)
+            if has_em_converged(like[i], like[i-1], thresh):
+                return like[0:i]
 
         return like
     
 # Misc functions
+def has_em_converged(like, plike, thresh):
+    """ given likelihood of current iteration like and previous
+    iteration plike, returns true is converged: based on comparison
+    of the slope of the likehood with thresh"""
+    diff    = N.abs(like - plike)
+    avg     = 0.5 * (N.abs(like) + N.abs(plike))
+    if diff / avg < thresh:
+        return True
+    else:
+        return False
+
 def multiple_gauss_den(data, mu, va):
     """Helper function to generate several Gaussian
     pdf (different parameters) from the same data"""
@@ -240,10 +255,11 @@
     n   = data.shape[0]
     d   = data.shape[1]
     
-    y   = N.zeros((n, K))
+    y   = N.zeros((K, n))
     if mu.size == va.size:
         for i in range(K):
-            y[:, i] = densities.gauss_den(data, mu[i, :], va[i, :])
+            y[i] = densities.gauss_den(data, mu[i, :], va[i, :])
+        return y.T
     else:
         for i in range(K):
             y[:, i] = densities.gauss_den(data, mu[i, :], 

Added: trunk/Lib/sandbox/pyem/pyem/online_em.py
===================================================================
--- trunk/Lib/sandbox/pyem/pyem/online_em.py	2006-10-12 13:48:27 UTC (rev 2282)
+++ trunk/Lib/sandbox/pyem/pyem/online_em.py	2006-10-12 13:48:34 UTC (rev 2283)
@@ -0,0 +1,340 @@
+# /usr/bin/python
+# Last Change: Fri Oct 06 08:00 PM 2006 J
+
+#---------------------------------------------
+# This is not meant to be used yet !!!! I am 
+# not sure how to integrate this stuff inside
+# the package yet. The cases are:
+#   - we have a set of data, and we want to test online EM 
+#   compared to normal EM 
+#   - we do not have all the data before putting them in online EM:
+#   eg current frame depends on previous frame in some way.
+
+import numpy as N
+from numpy import repmat, mean
+from numpy.testing import assert_array_almost_equal, assert_array_equal
+
+from gmm_em import ExpMixtureModel, GMM, EM, multiple_gauss_den
+from gauss_mix import GM
+from kmean import kmean
+
+print "======================================================"
+import traceback
+f = traceback.extract_stack()
+print f[0][0] + " This is beta code, don't use it !        "
+print "======================================================"
+
+# Error classes
+class OnGmmError(Exception):
+    """Base class for exceptions in this module."""
+    pass
+
+class OnGmmParamError:
+    """Exception raised for errors in gmm params
+
+    Attributes:
+        expression -- input expression in which the error occurred
+        message -- explanation of the error
+    """
+    def __init__(self, message):
+        self.message    = message
+    
+    def __str__(self):
+        return self.message
+
+class OnGMM(ExpMixtureModel):
+    def init_kmean(self, init_data, niter = 5):
+        """ Init the model at random."""
+        k   = self.gm.k
+        d   = self.gm.d
+        if mode == 'diag':
+            w           = N.ones(k) / k
+
+            # Init the internal state of EM
+            self.cx     = N.outer(w, mean(init_data, 0))
+            self.cxx    = N.outer(w, mean(init_data ** 2, 0))
+
+            # w, mu and va init is the same that in the standard case
+            (code, label)   = kmean(init_data, init_data[0:k, :], niter)
+            mu          = code.copy()
+            va          = N.zeros((k, d))
+            for i in range(k):
+                for j in range(d):
+                    va [i,j] = N.cov(init_data[N.where(label==i), j], rowvar = 0)
+        else:
+            raise OnGmmParamError("""init_online not implemented for
+                    mode %s yet""", mode)
+
+        self.gm.set_param(w, mu, va)
+        self.cw     = w
+        self.cmu    = mu
+        self.cva    = va
+
+        self.pw     = self.cw.copy()
+        self.pmu    = self.cmu.copy()
+        self.pva    = self.cva.copy()
+
+    def __init__(self, gm, init_data, init = 'kmean'):
+        self.gm = gm
+        
+        # Possible init methods
+        init_methods = {'kmean' : self.init_kmean}
+
+        self.init   = init_methods[init]
+
+    def sufficient_statistics(self, frame, nu):
+        """ sufficient_statistics(frame, nu)
+        
+        frame has to be rank 2 !"""
+        gamma   = multiple_gauss_den(frame, self.pmu, self.pva)[0]
+        gamma   *= self.pw
+        gamma   /= N.sum(gamma)
+        # <1>(t) = cw(t), each column is one component cw = (cw1, ..., cwK);
+        self.cw	= (1 - nu) * self.cw + nu * gamma
+
+        return gamma
+
+    def update_em(self, frame, gamma, nu):
+        cx  = self.cx
+        cxx = self.cxx
+        cmu = self.cmu
+        cva = self.cva
+        for k in range(self.gm.k):
+            cx[k]   = (1 - nu) * cx[k] + nu * frame * gamma[k]
+            cxx[k]  = (1 - nu) * cxx[k] + nu * frame ** 2 * gamma[k]
+
+            cmu[k]  = cx[k] / cw[k]
+            cva[k]  = cxx[k] / cw[k] - cmu[k] ** 2
+    
+if __name__ == "__main__":
+    import copy
+    #=============================
+    # Simple GMM with 2 components
+    #=============================
+
+    #+++++++++++++++++++++++++++++
+    # Meta parameters of the model
+    #   - k: Number of components
+    #   - d: dimension of each Gaussian
+    #   - mode: Mode of covariance matrix: full or diag
+    #   - nframes: number of frames (frame = one data point = one
+    #   row of d elements
+    k       = 2 
+    d       = 1
+    mode    = 'diag'
+    nframes = int(1e3)
+    emiter  = 2
+
+    #+++++++++++++++++++++++++++++++++++++++++++
+    # Create an artificial GMM model, samples it
+    #+++++++++++++++++++++++++++++++++++++++++++
+    print "Generating the mixture"
+    # Generate a model with k components, d dimensions
+    w, mu, va   = GM.gen_param(d, k, mode, spread = 1.5)
+    gm          = GM.fromvalues(w, mu, va)
+
+    # Sample nframes frames  from the model
+    data    = gm.sample(nframes, )
+
+    #++++++++++++++++++++++++
+    # Learn the model with EM
+    #++++++++++++++++++++++++
+
+    # Init the model
+    print "Init a model for learning, with kmean for initialization"
+    lgm = GM(d, k, mode)
+    gmm = GMM(lgm, 'kmean')
+    gmm.init(data)
+
+    # Keep the initialized model for drawing
+    gm0 = copy.copy(lgm)
+
+    # The actual EM, with likelihood computation
+    like    = N.zeros(emiter)
+    print "computing..."
+    for i in range(emiter):
+        g, tgd  = gmm.sufficient_statistics(data)
+        like[i] = N.sum(N.log(N.sum(tgd, 1)), axis = 0)
+        gmm.update_em(data, g)
+
+    #+++++++++++++++++++++++++++++++
+    # Learn the model with Online EM
+    #+++++++++++++++++++++++++++++++
+    ogm     = GM(d, k, mode)
+    ogmm    = OnGMM(ogm, 'kmean')
+    #init_data   = data[0:nframes / 20, :]
+    init_data   = data
+    ogmm.init(init_data)
+
+    ogm2    = GM(d, k, mode)
+    ogmm2   = OnGMM(ogm2, 'kmean')
+    #init_data   = data[0:nframes / 20, :]
+    init_data   = data
+    ogmm2.init(init_data)
+
+    ogm0    = copy.copy(ogm)
+    assert_array_equal(ogm0.w, gm0.w)
+    assert_array_equal(ogm0.mu, gm0.mu)
+    assert_array_equal(ogm0.va, gm0.va)
+
+    ogm02   = copy.copy(ogm2)
+    assert_array_equal(ogm02.w, gm0.w)
+    assert_array_equal(ogm02.mu, gm0.mu)
+    assert_array_equal(ogm02.va, gm0.va)
+
+    w0  = gm0.w.copy()
+    mu0 = gm0.mu.copy()
+    va0 = gm0.va.copy()
+
+    cx  = ogmm.cx
+    cxx = ogmm.cxx
+    
+    cw  = w0.copy()
+    cmu = mu0.copy()
+    cva = va0.copy()
+    
+    pw  = cw.copy()
+    pmu = cmu.copy()
+    pva = cva.copy()
+
+    # Forgetting param
+    ku		= 0.005
+    t0		= 200
+    lamb	= N.ones((nframes, 1))
+    lamb[0] = 0
+    nu0		= 1.0
+    #lamb	= 1 - 1/(N.arange(-1, nframes-1) * ku + t0)
+    #nu0		= 0.2
+    nu		= N.zeros((len(lamb), 1))
+    nu[0]	= nu0
+    for i in range(1, len(lamb)):
+        nu[i]	= 1./(1 + lamb[i] / nu[i-1])
+
+    gamma   = N.zeros((nframes, k))
+    for e in range(emiter):
+        print "online manual:"
+        print pw
+        print pmu
+        print pva
+        for t in range(nframes):
+            gamma[t]    = multiple_gauss_den(data[t:t+1, :], pmu, pva)[0]
+            gamma[t]    *= pw
+            gamma[t]    /= N.sum(gamma[t])
+            # <1>(t) = cw(t), each column is one component cw = (cw1, ..., cwK);
+            # <x>(t) = cx(t)
+            cw	= (1 - nu[t]) * cw + nu[t] * gamma[t]
+            # loop through each component
+            for i in range(k):
+                cx[i]   = (1 - nu[t]) * cx[i] + nu[t] * data[t, :] * gamma[t, i]
+                cxx[i]  = (1 - nu[t]) * cxx[i] + nu[t] * data[t, :] ** 2 * gamma[t, i]
+
+                cmu[i]  = cx[i] / cw[i]
+                cva[i]  = cxx[i] / cw[i] - cmu[i] ** 2
+
+            #pw  = cw.copy()
+            #pmu = cmu.copy()
+            #pva = cva.copy()
+        print gamma[-1]
+        pw  = cw.copy()
+        pmu = cmu.copy()
+        pva = cva.copy()
+
+    print "online manual:"
+    print pw
+    print pmu
+    print pva
+    for e in range(emiter):
+        print "online automatic:"
+        print ogmm2.pw
+        print ogmm2.pmu
+        print ogmm2.pva
+        for t in range(nframes):
+            gamma   = ogmm2.sufficient_statistics(data[t:t+1, :], nu[t])
+            ogmm2.update_em(data[t, :], gamma, nu[t])
+        print gamma
+        ogmm2.pw  = ogmm2.cw.copy()
+        ogmm2.pmu = ogmm2.cmu.copy()
+        ogmm2.pva = ogmm2.cva.copy()
+
+    print "online automatic:"
+    print ogmm2.pw
+    print ogmm2.pmu
+    print ogmm2.pva
+    # #ogm.set_param(pw, pmu, pva)
+    # print "weights off vs on: \n" + str(lgm.w) + "\n " + str(cw)
+    # print "mean off vs on: \n" + str(lgm.mu) + "\n " + str(cmu)
+    # print "variances off vs on: \n" + str(lgm.va) + "\n " + str(cva)
+    # print "weights off vs on2: \n" + str(lgm.w) + "\n " + str(ogmm2.cw)
+    # print "mean off vs on2: \n" + str(lgm.mu) + "\n " + str(ogmm2.cmu)
+    # print "variances off vs on2: \n" + str(lgm.va) + "\n " + str(ogmm2.cva)
+    #assert_array_almost_equal(cw, lgm.w)
+    #assert_array_almost_equal(cmu, lgm.mu)
+    #assert_array_almost_equal(cva, lgm.va)
+    assert_array_almost_equal(ogmm2.cw, lgm.w)
+    assert_array_almost_equal(ogmm2.cmu, lgm.mu)
+    assert_array_almost_equal(ogmm2.cva, lgm.va)
+    # #+++++++++++++++
+    # # Draw the model
+    # #+++++++++++++++
+    # print "drawing..."
+    # import pylab as P
+    # P.subplot(2, 1, 1)
+
+    # if d == 1:
+    #     # Real confidence ellipses
+    #     h   = gm.plot1d()
+    #     [i.set_color('g') for i in h['pdf']]
+    #     h['pdf'][0].set_label('true pdf')
+
+    #     # Initial confidence ellipses as found by kmean
+    #     h0  = gm0.plot1d()
+    #     [i.set_color('k') for i in h0['pdf']]
+    #     h0['pdf'][0].set_label('initial pdf')
+
+    #     # Values found by EM
+    #     hl  = lgm.plot1d(fill = 1, level = 0.66)
+    #     [i.set_color('r') for i in hl['pdf']]
+    #     hl['pdf'][0].set_label('pdf found by EM')
+
+    #     # Values found by Online EM
+    #     ho  = ogm.plot1d(fill = 1, level = 0.66)
+    #     [i.set_color('b') for i in ho['pdf']]
+    #     ho['pdf'][0].set_label('pdf found by online EM')
+
+    #     P.legend(loc = 0)
+    # else:
+    #     # Draw what is happening
+    #     P.plot(data[:, 0], data[:, 1], '.', label = '_nolegend_')
+
+    #     # Real confidence ellipses
+    #     Xre, Yre  = gm.conf_ellipses()
+    #     P.plot(Xre[0], Yre[0], 'g', label = 'true confidence ellipsoides')
+    #     for i in range(1,k):
+    #         P.plot(Xre[i], Yre[i], 'g', label = '_nolegend_')
+
+    #     # Initial confidence ellipses as found by kmean
+    #     X0e, Y0e  = gm0.conf_ellipses()
+    #     P.plot(X0e[0], Y0e[0], 'k', label = 'initial confidence ellipsoides')
+    #     for i in range(1,k):
+    #         P.plot(X0e[i], Y0e[i], 'k', label = '_nolegend_')
+
+    #     # Values found by EM
+    #     Xe, Ye  = lgm.conf_ellipses()
+    #     P.plot(Xe[0], Ye[0], 'r', label = 'confidence ellipsoides found by EM')
+    #     for i in range(1,k):
+    #         P.plot(Xe[i], Ye[i], 'r', label = '_nolegend_')
+    #     P.legend(loc = 0)
+
+    #     # Values found by Online EM
+    #     Xe, Ye  = ogm.conf_ellipses()
+    #     P.plot(Xe[0], Ye[0], 'm', label = 'confidence ellipsoides found by Online EM')
+    #     for i in range(1,k):
+    #         P.plot(Xe[i], Ye[i], 'm', label = '_nolegend_')
+    #     P.legend(loc = 0)
+
+
+    # P.subplot(2, 1, 2)
+    # P.plot(like)
+    # P.title('log likelihood')
+
+    # P.show()

Modified: trunk/Lib/sandbox/pyem/pyem/profile_data/profile_densities.py
===================================================================
--- trunk/Lib/sandbox/pyem/pyem/profile_data/profile_densities.py	2006-10-12 13:48:27 UTC (rev 2282)
+++ trunk/Lib/sandbox/pyem/pyem/profile_data/profile_densities.py	2006-10-12 13:48:34 UTC (rev 2283)
@@ -2,7 +2,7 @@
 from numpy.random import randn
 from pyem import densities as D
 from pyem import _c_densities as DC
-import tables
+#import tables
 
 def bench(func, mode = 'diag'):
     #===========================================

Modified: trunk/Lib/sandbox/pyem/pyem/profile_data/profile_gmm.py
===================================================================
--- trunk/Lib/sandbox/pyem/pyem/profile_data/profile_gmm.py	2006-10-12 13:48:27 UTC (rev 2282)
+++ trunk/Lib/sandbox/pyem/pyem/profile_data/profile_gmm.py	2006-10-12 13:48:34 UTC (rev 2283)
@@ -2,16 +2,22 @@
 from pyem import GM, GMM
 import copy
 
+from pyem._c_densities import gauss_den
+
 def bench1(mode = 'diag'):
     #===========================================
     # GMM of 20 comp, 20 dimension, 1e4 frames
     #===========================================
     d       = 15
     k       = 30
-    nframes = 1e5
+    nframes = 1e4
     niter   = 10
     mode    = 'diag'
 
+    print "============================================================="
+    print "(%d dim, %d components) GMM with %d iterations, for %d frames" \
+            % (d, k, niter, nframes)
+
     #+++++++++++++++++++++++++++++++++++++++++++
     # Create an artificial GMM model, samples it
     #+++++++++++++++++++++++++++++++++++++++++++
@@ -49,9 +55,16 @@
         gmm.update_em(data, g)
 
 if __name__ == "__main__":
-    import profile
-    profile.run('bench1()', 'gmmprof')
-    import pstats
-    p = pstats.Stats('gmmprof')
+    import hotshot, hotshot.stats
+    profile_file    = 'gmm.prof'
+    prof    = hotshot.Profile(profile_file, lineevents=1)
+    prof.runcall(bench1)
+    p = hotshot.stats.load(profile_file)
     print p.sort_stats('cumulative').print_stats(20)
+    prof.close()
+    # import profile
+    # profile.run('bench1()', 'gmmprof')
+    # import pstats
+    # p = pstats.Stats('gmmprof')
+    # print p.sort_stats('cumulative').print_stats(20)
 




More information about the Scipy-svn mailing list