[Scipy-svn] r4725 - in trunk/scipy/maxentropy: . examples tests

scipy-svn at scipy.org scipy-svn at scipy.org
Thu Sep 18 15:24:02 EDT 2008


Author: alan.mcintyre
Date: 2008-09-18 14:23:58 -0500 (Thu, 18 Sep 2008)
New Revision: 4725

Modified:
   trunk/scipy/maxentropy/examples/bergerexample.py
   trunk/scipy/maxentropy/examples/conditionalexample2.py
   trunk/scipy/maxentropy/maxentropy.py
   trunk/scipy/maxentropy/maxentutils.py
   trunk/scipy/maxentropy/tests/test_maxentropy.py
Log:
Removed unused imports.
Standardized NumPy import as "import numpy as np".
PEP8 conformance (one import per line).


Modified: trunk/scipy/maxentropy/examples/bergerexample.py
===================================================================
--- trunk/scipy/maxentropy/examples/bergerexample.py	2008-09-18 19:15:47 UTC (rev 4724)
+++ trunk/scipy/maxentropy/examples/bergerexample.py	2008-09-18 19:23:58 UTC (rev 4725)
@@ -20,8 +20,6 @@
 __author__ =  'Ed Schofield'
 __version__=  '2.1'
 
-
-import math
 from scipy import maxentropy
 
 a_grave = u'\u00e0'

Modified: trunk/scipy/maxentropy/examples/conditionalexample2.py
===================================================================
--- trunk/scipy/maxentropy/examples/conditionalexample2.py	2008-09-18 19:15:47 UTC (rev 4724)
+++ trunk/scipy/maxentropy/examples/conditionalexample2.py	2008-09-18 19:23:58 UTC (rev 4725)
@@ -18,9 +18,7 @@
 
 __author__ =  'Ed Schofield'
 
-import math
 from scipy import maxentropy, sparse
-import numpy
 
 samplespace = ['dans', 'en', 'à', 'au cours de', 'pendant']
 # Occurrences of French words, and their 'next English word' contexts, in

Modified: trunk/scipy/maxentropy/maxentropy.py
===================================================================
--- trunk/scipy/maxentropy/maxentropy.py	2008-09-18 19:15:47 UTC (rev 4724)
+++ trunk/scipy/maxentropy/maxentropy.py	2008-09-18 19:23:58 UTC (rev 4725)
@@ -70,8 +70,8 @@
 
 
 import math, types, cPickle
-import numpy
-from scipy import optimize, sparse
+import numpy as np
+from scipy import optimize
 from scipy.linalg import norm
 from scipy.maxentropy.maxentutils import *
 
@@ -194,7 +194,7 @@
                                       " using setfeaturesandsamplespace()"
 
         # First convert K to a numpy array if necessary
-        K = numpy.asarray(K, float)
+        K = np.asarray(K, float)
 
         # Store the desired feature expectations as a member variable
         self.K = K
@@ -212,7 +212,7 @@
         # self.gradevals = 0
 
         # Make a copy of the parameters
-        oldparams = numpy.array(self.params)
+        oldparams = np.array(self.params)
 
         callback = self.log
 
@@ -272,7 +272,7 @@
                     + "' is unsupported.  Options are 'CG', 'LBFGSB', " \
                     "'Nelder-Mead', 'Powell', and 'BFGS'"
 
-        if numpy.any(self.params != newparams):
+        if np.any(self.params != newparams):
             self.setparams(newparams)
         self.func_calls = func_calls
 
@@ -322,7 +322,7 @@
             self.setparams(params)
 
         # Subsumes both small and large cases:
-        L = self.lognormconst() - numpy.dot(self.params, self.K)
+        L = self.lognormconst() - np.dot(self.params, self.K)
 
         if self.verbose and self.external is None:
             print "  dual is ", L
@@ -332,7 +332,7 @@
         # Define 0 / 0 = 0 here; this allows a variance term of
         # sigma_i^2==0 to indicate that feature i should be ignored.
         if self.sigma2 is not None and ignorepenalty==False:
-            ratios = numpy.nan_to_num(self.params**2 / self.sigma2)
+            ratios = np.nan_to_num(self.params**2 / self.sigma2)
             # Why does the above convert inf to 1.79769e+308?
 
             L += 0.5 * ratios.sum()
@@ -396,7 +396,7 @@
                 self.test()
 
         if not self.callingback and self.external is None:
-            if self.mindual > -numpy.inf and self.dual() < self.mindual:
+            if self.mindual > -np.inf and self.dual() < self.mindual:
                 raise DivergenceError, "dual is below the threshold 'mindual'" \
                         " and may be diverging to -inf.  Fix the constraints" \
                         " or lower the threshold!"
@@ -428,7 +428,7 @@
         if self.sigma2 is not None and ignorepenalty==False:
             penalty = self.params / self.sigma2
             G += penalty
-            features_to_kill = numpy.where(numpy.isnan(penalty))[0]
+            features_to_kill = np.where(np.isnan(penalty))[0]
             G[features_to_kill] = 0.0
             if self.verbose and self.external is None:
                 normG = norm(G)
@@ -449,7 +449,7 @@
         return G
 
 
-    def crossentropy(self, fx, log_prior_x=None, base=numpy.e):
+    def crossentropy(self, fx, log_prior_x=None, base=np.e):
         """Returns the cross entropy H(q, p) of the empirical
         distribution q of the data (with the given feature matrix fx)
         with respect to the model p.  For discrete distributions this is
@@ -466,9 +466,9 @@
         For continuous distributions this makes no sense!
         """
         H = -self.logpdf(fx, log_prior_x).mean()
-        if base != numpy.e:
+        if base != np.e:
             # H' = H * log_{base} (e)
-            return H / numpy.log(base)
+            return H / np.log(base)
         else:
             return H
 
@@ -483,7 +483,7 @@
         Z = E_aux_dist [{exp (params.f(X))} / aux_dist(X)] using a sample
         from aux_dist.
         """
-        return numpy.exp(self.lognormconst())
+        return np.exp(self.lognormconst())
 
 
     def setsmooth(sigma):
@@ -507,7 +507,7 @@
         length as the model's feature vector f.
         """
 
-        self.params = numpy.array(params, float)        # make a copy
+        self.params = np.array(params, float)        # make a copy
 
         # Log the new params to disk
         self.logparams()
@@ -546,7 +546,7 @@
                 raise ValueError, "specify the number of features / parameters"
 
         # Set parameters, clearing cache variables
-        self.setparams(numpy.zeros(m, float))
+        self.setparams(np.zeros(m, float))
 
         # These bounds on the param values are only effective for the
         # L-BFGS-B optimizer:
@@ -595,7 +595,7 @@
             return
 
         # Check whether the params are NaN
-        if not numpy.all(self.params == self.params):
+        if not np.all(self.params == self.params):
             raise FloatingPointError, "some of the parameters are NaN"
 
         if self.verbose:
@@ -775,15 +775,15 @@
                 raise AttributeError, "prior probability mass function not set"
 
         def p(x):
-            f_x = numpy.array([f[i](x) for i in range(len(f))], float)
+            f_x = np.array([f[i](x) for i in range(len(f))], float)
 
             # Do we have a prior distribution p_0?
             if priorlogpmf is not None:
                 priorlogprob_x = priorlogpmf(x)
-                return math.exp(numpy.dot(self.params, f_x) + priorlogprob_x \
+                return math.exp(np.dot(self.params, f_x) + priorlogprob_x \
                                 - logZ)
             else:
-                return math.exp(numpy.dot(self.params, f_x) - logZ)
+                return math.exp(np.dot(self.params, f_x) - logZ)
         return p
 
 
@@ -893,7 +893,7 @@
         # As an optimization, p_tilde need not be copied or stored at all, since
         # it is only used by this function.
 
-        self.p_tilde_context = numpy.empty(numcontexts, float)
+        self.p_tilde_context = np.empty(numcontexts, float)
         for w in xrange(numcontexts):
             self.p_tilde_context[w] = self.p_tilde[0, w*S : (w+1)*S].sum()
 
@@ -932,7 +932,7 @@
         if self.priorlogprobs is not None:
             log_p_dot += self.priorlogprobs
 
-        self.logZ = numpy.zeros(numcontexts, float)
+        self.logZ = np.zeros(numcontexts, float)
         for w in xrange(numcontexts):
             self.logZ[w] = logsumexp(log_p_dot[w*S: (w+1)*S])
         return self.logZ
@@ -972,8 +972,7 @@
 
         logZs = self.lognormconst()
 
-        L = numpy.dot(self.p_tilde_context, logZs) - numpy.dot(self.params, \
-                                                               self.K)
+        L = np.dot(self.p_tilde_context, logZs) - np.dot(self.params, self.K)
 
         if self.verbose and self.external is None:
             print "  dual is ", L
@@ -1069,7 +1068,7 @@
             log_p_dot += self.priorlogprobs
         if not hasattr(self, 'logZ'):
             # Compute the norm constant (quickly!)
-            self.logZ = numpy.zeros(numcontexts, float)
+            self.logZ = np.zeros(numcontexts, float)
             for w in xrange(numcontexts):
                 self.logZ[w] = logsumexp(log_p_dot[w*S : (w+1)*S])
         # Renormalize
@@ -1366,8 +1365,8 @@
             #     -log(n-1) + logsumexp(2*log|Z_k - meanZ|)
 
             self.logZapprox = logsumexp(logZs) - math.log(ttrials)
-            stdevlogZ = numpy.array(logZs).std()
-            mus = numpy.array(mus)
+            stdevlogZ = np.array(logZs).std()
+            mus = np.array(mus)
             self.varE = columnvariances(mus)
             self.mu = columnmeans(mus)
             return
@@ -1459,7 +1458,7 @@
         log_Z_est = self.lognormconst()
 
         def p(fx):
-            return numpy.exp(innerprodtranspose(fx, self.params) - log_Z_est)
+            return np.exp(innerprodtranspose(fx, self.params) - log_Z_est)
         return p
 
 
@@ -1486,7 +1485,7 @@
         """
         log_Z_est = self.lognormconst()
         if len(fx.shape) == 1:
-            logpdf = numpy.dot(self.params, fx) - log_Z_est
+            logpdf = np.dot(self.params, fx) - log_Z_est
         else:
             logpdf = innerprodtranspose(fx, self.params) - log_Z_est
         if log_prior_x is not None:
@@ -1536,8 +1535,8 @@
                     # Use Kersten-Deylon accelerated SA, based on the rate of
                     # changes of sign of the gradient.  (If frequent swaps, the
                     # stepsize is too large.)
-                    #n += (numpy.dot(y_k, y_kminus1) < 0)   # an indicator fn
-                    if numpy.dot(y_k, y_kminus1) < 0:
+                    #n += (np.dot(y_k, y_kminus1) < 0)   # an indicator fn
+                    if np.dot(y_k, y_kminus1) < 0:
                         n += 1
                     else:
                         # Store iterations of sign switches (for plotting
@@ -1590,7 +1589,7 @@
             if self.verbose:
                 print "SA: after iteration " + str(k)
                 print "  approx dual fn is: " + str(self.logZapprox \
-                            - numpy.dot(self.params, K))
+                            - np.dot(self.params, K))
                 print "  norm(mu_est - k) = " + str(norm_y_k)
 
             # Update params (after the convergence tests too ... don't waste the
@@ -1682,7 +1681,7 @@
         self.external = None
         self.clearcache()
 
-        meandual = numpy.average(dualapprox,axis=0)
+        meandual = np.average(dualapprox,axis=0)
         self.external_duals[self.iters] = dualapprox
         self.external_gradnorms[self.iters] = gradnorms
 
@@ -1692,7 +1691,7 @@
                  (len(self.externalFs), meandual)
             print "** Mean mean square error of the (unregularized) feature" \
                     " expectation estimates from the external samples =" \
-                    " mean(|| \hat{\mu_e} - k ||,axis=0) =", numpy.average(gradnorms,axis=0)
+                    " mean(|| \hat{\mu_e} - k ||,axis=0) =", np.average(gradnorms,axis=0)
         # Track the parameter vector params with the lowest mean dual estimate
         # so far:
         if meandual < self.bestdual:

Modified: trunk/scipy/maxentropy/maxentutils.py
===================================================================
--- trunk/scipy/maxentropy/maxentutils.py	2008-09-18 19:15:47 UTC (rev 4724)
+++ trunk/scipy/maxentropy/maxentutils.py	2008-09-18 19:23:58 UTC (rev 4725)
@@ -17,7 +17,9 @@
 __author__ = "Ed Schofield"
 __version__ = '2.0'
 
-import random, math, bisect, cmath
+import random
+import math
+import cmath
 import numpy
 from numpy import log, exp, asarray, ndarray
 from scipy import sparse

Modified: trunk/scipy/maxentropy/tests/test_maxentropy.py
===================================================================
--- trunk/scipy/maxentropy/tests/test_maxentropy.py	2008-09-18 19:15:47 UTC (rev 4724)
+++ trunk/scipy/maxentropy/tests/test_maxentropy.py	2008-09-18 19:23:58 UTC (rev 4725)
@@ -6,9 +6,8 @@
 Copyright: Ed Schofield, 2003-2005
 """
 
-import sys
 from numpy.testing import *
-from numpy import arange, add, array, dot, zeros, identity, log, exp, ones
+from numpy import arange, log, exp, ones
 from scipy.maxentropy.maxentropy import *
 
 class TestMaxentropy(TestCase):




More information about the Scipy-svn mailing list