[Scipy-svn] r4638 - in trunk/scipy/ndimage: . src tests

scipy-svn at scipy.org scipy-svn at scipy.org
Mon Aug 11 19:09:06 EDT 2008


Author: jarrod.millman
Date: 2008-08-11 18:08:56 -0500 (Mon, 11 Aug 2008)
New Revision: 4638

Removed:
   trunk/scipy/ndimage/_registration.py
   trunk/scipy/ndimage/_segmenter.py
   trunk/scipy/ndimage/src/register/
   trunk/scipy/ndimage/src/segment/
   trunk/scipy/ndimage/tests/test_registration.py
   trunk/scipy/ndimage/tests/test_segment.py
Modified:
   trunk/scipy/ndimage/setup.py
Log:
removing segmentation and registration code since it isn't ready for release.


Deleted: trunk/scipy/ndimage/_registration.py
===================================================================
--- trunk/scipy/ndimage/_registration.py	2008-08-11 21:39:17 UTC (rev 4637)
+++ trunk/scipy/ndimage/_registration.py	2008-08-11 23:08:56 UTC (rev 4638)
@@ -1,1044 +0,0 @@
-#
-# written by Tom Waite
-# rigid body 3D registration
-#
-
-
-import math
-import numpy as np
-from scipy.special import erf
-from scipy.ndimage import correlate1d
-from scipy.optimize import fmin_powell, fmin_cg
-
-import scipy.ndimage._register as reg
-
-import time
-import glob
-
-# Issue warning regarding heavy development status of this module
-import warnings
-_msg = "The registration code is under heavy development and therefore the \
-public API will change in the future.  The NIPY group is actively working on \
-this code, and has every intention of generalizing this for the Scipy \
-community.  Use this module minimally, if at all, until it this warning is \
-removed."
-warnings.warn(_msg, UserWarning)
-
-# TODO:  Add docstrings for public functions in extension code.
-# Add docstrings to extension code.
-#from numpy.lib import add_newdoc
-#add_newdoc('scipy.ndimage._register', 'register_histogram',
-#    """A joint histogram used for registration module.
-#    """)
-
-
-#
-#  ---- co-registration and IO  ---- 
-#
-
-def resize_image(imageS, imageS_mat, imageR_mat):
-    """
-    zoom_image = resize_image(imageS, imageS_mat, imageR_mat)
-
-    Fractional resample source_image to reference_image size. The
-    resample is implemented with 3D cubic spline. The source
-    imageS_mat is the 4x4 voxel-to-physical conversion matrix.
-    
-    Parameters 
-    ----------
-
-    imageS: {ndarray} 
-        imageS is the source image to be resized.
-
-    imageS_mat : {ndarray} 
-        the 4x4 transform of the source image that maps voxel to physical.
-
-    imageR_mat : {ndarray}
-        the 4x4 transform of the destination image that maps voxel to physical.
-
-    Returns 
-    -------
-    zoom_image : {ndarray}
-
-    Examples
-    --------
-
-    >>> import _registration as reg
-    >>> measures, image_anat, image_anat_mat, image_fmri_mat, fmri_series = reg.demo_MRI_coregistration()
-
-    >>> resampled_fmri = reg.resize_image(fmri_series[10], image_fmri_mat, image_anat_mat)
-
-    image 10 in the fmri_series is resampled from image_fmri_mat to image_anat coordinates
-
-    """
-
-    # get the zoom
-    Z = imageS_mat.diagonal() / imageR_mat.diagonal()
-
-    # new volume dimensions (rounded). D, imageS and Z are 3D and this is a vector element product
-    D = (imageS.shape * Z + 0.5).astype(np.int16)
-
-    # for the test data, set the xyz voxel sizes for fMRI volume. M is a 4x4 matrix.
-    M = np.diag(imageS_mat.diagonal() / Z)    
-
-    image = np.zeros((D[2],D[1],D[0]),np.uint8)
-    
-    mode  = 2
-    scale = 0
-    reg.register_volume_resample(imageS, image, Z, scale, mode)
-
-    return image, M
-
-def remap_image(image, M_inverse, resample='linear'):
-    """
-    remaped_image = remap_image(image, parm_vector, resample='linear')
-
-    rotates and translates image using the 3 angles and 3 translations in the 6-dim
-    parm_vector. The mapping is stored in the 4x4 M_inverse matrix from the get_inverse_mapping
-    method.
-
-    Parameters 
-    ----------
-    image : {ndarray} 
-        image is the source image to be remapped. 
-
-    M_inverse : {ndarray}
-        M_inverse is the 4x4 inverse affine matrix 
-
-    resample : {'linear', 'cubic'}, optional
-
-
-    Returns 
-    -------
-    remaped_image : {ndarray}
-
-    Examples
-    --------
-        image = fmri_series[i]
-        x[0:6] = measures[i]['align_rotate'][0:6]
-        M = get_inverse_mappings(x)
-        # overwrite the fMRI volume with the aligned volume
-        fmri_series[i] = remap_image(image, M, resample='cubic')
-
-    """
-
-    # allocate the zero image
-    remaped_image = np.zeros(image.shape, dtype=np.uint8)
-
-    step = np.array([1, 1, 1], dtype=np.int32)
-
-    if resample == 'linear':
-        # trilinear interpolation mapping.
-        reg.register_linear_resample(image, remaped_image, M_inverse, step)
-    elif resample == 'cubic':
-        # tricubic convolve interpolation mapping. 
-        reg.register_cubic_resample(image, remaped_image, M_inverse, step)
-
-    return remaped_image
-
-
-def get_inverse_mappings(parm_vector):
-    """
-    M_inverse = get_inverse_mappings(parm_vector)
-
-    takes the 6-dimensional rotation/translation vector and builds the inverse
-    4x4 mapping matrix M_inverse that will map imageG to imageF orientation
-
-    Parameters 
-    ----------
-    parm_vector : {nd_array} 
-
-    Returns 
-    -------
-    M_inverse : {nd_array}
-
-    Examples
-    --------
-
-    >>> import numpy as NP
-    >>> import _registration as reg
-    >>> array = np.zeros(6, dtype=float)
-    >>> M = reg.get_inverse_mappings(array)
-    >>> M 
-
-    array([
-    [ 1.,  0.,  0.,  0.],
-    [ 0.,  1.,  0.,  0.],
-    [ 0.,  0.,  1.,  0.],
-    [ 0.,  0.,  0.,  1.]])
-
-    """
-    # get the inverse mapping to rotate the G matrix to F space following registration
-    # -parm_vector is the inverse angles and translations
-    M_inverse = build_rotate_matrix(-parm_vector)
-    return M_inverse
-
-def register(image1, image1_mat, image2, image2_mat, multires=[4, 2], histo_fwhm=3, 
-             ftype=1, lite=0, smhist=0, method='nmi', opt_method='hybrid',
-             optimize_function=None):
-
-    """
-    parm_vector = register(image1, image1_mat, image2, image2_mat, multires=[4, 2], histo_fwhm=3,
-                             ftype=1, lite=0, smhist=0, method='nmi', opt_method='powell'):
-
-    alignment of the two images (measured by mutual information or cross correlation) 
-    using optimization search of 3 angle and 3 translation parameters. The optimization 
-    uses either the Powell or Conjugate Gradient methods in the scipy optimization 
-    package. The optimal rigid body parameter is returned.
-
-    Parameters 
-    ----------
-    image1 : {nd_array} 
-        image1 is the source image to be remapped during the registration. 
-    image1_mat : {nd_array} 
-        image1_mat is the source image MAT 
-    image2 : {nd_array} 
-        image2 is the reference image that image1 gets mapped to. 
-    image2_mat : {nd_array} 
-        image2_mat is the source image MAT 
-    multires: {list}, optional
-        the volume subsample values for each pass of the registration.
-        the default is 2 passes with subsample 4 in pass 1 and subsample 2 in pass 2
-    histo_fwhm : {int}, optional
-        used for the filter kernel in the low pass filter of the joint histogram 
-    ftype : {0, 1}, optional
-        flag for type of low pass filter. 0 is Gauss-Spline
-        1 is pure Gauss. Sigma determined from volume sampling info.
-    lite : {0, 1}, optional
-        lite of 1 is to jitter both images during resampling. 0
-        is to not jitter. jittering is for non-aliased volumes.
-    smhist: {0, 1}, optional
-        flag for joint histogram low pass filtering. 0 for no filter,
-        1 for do filter.
-    method: {'nmi', 'mi', 'ncc', 'ecc', 'mse'}, optional
-        flag for type of registration metric. nmi is normalized mutual
-        information; mi is mutual information; ecc is entropy cross
-        correlation; ncc is normalized cross correlation. mse is mean
-        squared error.
-    opt_method: {'powell', 'cg', 'hybrid'}, optional
-        registration is two pass. Pass 1 is low res to get close to alignment
-        and pass 2 starts at the pass 1 optimal alignment. In powell pass 1 and
-        2 are powell, in hybrid pass 2 is conjugate gradient.
-
-
-    Returns 
-    -------
-    parm_vector : {nd_array}
-        this is the optimal alignment (6-dim) array with 3 angles and
-        3 translations.
-
-    Examples
-    --------
-
-    >>> import numpy as NP
-    >>> import _registration as reg
-
-    >>> image1, image2, fwhm, improc = reg.demo_build_dual_volumes()
-    >>> parm_vector = register(image1, image2, fwhm, improc)
-
-    """
-
-    # do the parameter validity checking. this is specific to this 3D registration.
-    # make sure the image is 3D and the mats are 4x4 with nonzero diagonal
-
-    if image1.ndim != 3:
-        raise ValueError, "Image 1 is not 3 dimensional"
-
-    if image2.ndim != 3:
-        raise ValueError, "Image 2 is not 3 dimensional"
-
-    if image1.dtype != np.uint8:
-        raise ValueError, "Image 1 is not 8 bit (required for joint histogram)"
-
-    if image2.dtype != np.uint8:
-        raise ValueError, "Image 2 is not 8 bit (required for joint histogram)"
-
-    if image1_mat.shape != (4,4):
-        raise ValueError, "Image1 MAT is not 4x4"
-
-    if image2_mat.shape != (4,4):
-        raise ValueError, "Image2 MAT is not 4x4"
-
-    if (np.diag(image1_mat)).prod() == 0:
-        raise ValueError, "Image1 MAT has a 0 on the diagonal"
-
-    if (np.diag(image2_mat)).prod() == 0:
-        raise ValueError, "Image2 MAT has a 0 on the diagonal"
-
-    if opt_method=='hybrid' and np.size(multires) != 2:
-        raise ValueError, "hybrid method must be 2 pass registration"
-
-    if ftype != 0 and ftype != 1: 
-        raise ValueError, "choose filter type 0 or 1 only"
-
-    if lite != 0 and lite != 1: 
-        raise ValueError, "choose histogram generation type 0 or 1 only"
-
-    if smhist != 0 and smhist != 1: 
-        raise ValueError, "choose histogram smoothing type 0 or 1 only"
-
-    if method != 'nmi' and method != 'mi'  and method != 'ncc'\
-                       and method != 'ecc' and method != 'mse':
-        raise ValueError, "choose cost method nmi, mi, ecc, mse, ncc"
-
-    if opt_method != 'powell' and opt_method != 'cg'  and opt_method != 'hybrid':
-        raise ValueError, "only optimize methods powell, cg or hybrid are supported"
-
-    # default is to use the cost_function I provided.
-    # this shows you can override this but the parameters will have to
-    # be changed for the new cost function if it is different
-
-    if optimize_function == None:
-        optimize_function = cost_function
-
-    parm_vector = multires_registration(optimize_function, image1, image1_mat, image2, image2_mat,
-                                        multires, histo_fwhm, lite, smhist, method, opt_method)
-
-    return parm_vector
-
-def multires_registration(optimize_function, image1, image1_mat, image2, image2_mat,
-                          multires, histo_fwhm, lite, smhist, method, opt_method):
-
-    """
-
-    to be called by register() which does parameter validation 
-
-    Parameters 
-    ----------
-    image1 : {nd_array} 
-        image1 is the source image to be remapped during the registration. 
-    image1_mat : {nd_array} 
-        image1_mat is the source image MAT 
-    image2 : {nd_array} 
-        image2 is the reference image that image1 gets mapped to. 
-    image2_mat : {nd_array} 
-        image2_mat is the source image MAT 
-    multires: {list}, optional
-        the volume subsample values for each pass of the registration.
-        the default is 2 passes with subsample 4 in pass 1 and subsample 2 in pass 2
-    histo_fwhm : {int}, optional
-        used for the filter kernel in the low pass filter of the joint histogram 
-    ftype : {0, 1}, optional
-        flag for type of low pass filter. 0 is Gauss-Spline
-        1 is pure Gauss. Sigma determined from volume sampling info.
-    lite : {0, 1}, optional
-        lite of 1 is to jitter both images during resampling. 0
-        is to not jitter. jittering is for non-aliased volumes.
-    smhist: {0, 1}, optional
-        flag for joint histogram low pass filtering. 0 for no filter,
-        1 for do filter.
-    method: {'nmi', 'mi', 'ncc', 'ecc', 'mse'}, optional
-        flag for type of registration metric. nmi is normalized mutual
-        information; mi is mutual information; ecc is entropy cross
-        correlation; ncc is normalized cross correlation. mse is mean
-        squared error.
-    opt_method: {'powell', 'cg', 'hybrid'}, optional
-        registration is two pass. Pass 1 is low res to get close to alignment
-        and pass 2 starts at the pass 1 optimal alignment. In powell pass 1 and
-        2 are powell, in hybrid pass 2 is conjugate gradient.
-
-
-    Returns 
-    -------
-    parm_vector : {nd_array}
-        this is the optimal alignment (6-dim) array with 3 angles and
-        3 translations.
-
-    Examples
-    --------
-
-    (calling this from register which optionally filters image2)
-    >>> import numpy as NP
-    >>> import _registration as reg
-    >>> image1, mat1, image2, mat2 = reg.demo_build_dual_volumes()
-    >>> parm_vector = register(image1, image2, imdata)
-
-    """
-    ret_histo=0
-    step = np.array([1, 1, 1], dtype=np.int32)
-    fwhm = np.zeros(2, dtype=np.int32)
-    # make the step a scalar to can put in a multi-res loop
-    loop = range(np.size(multires))
-    # 6-D zero vector
-    x = np.zeros(6, dtype=np.float64);
-    # the kernel fwhm value for the x and y joint histogram filter
-    fwhm[:] = histo_fwhm
-    for i in loop:
-        # this is the volume subsample
-        step[:] = multires[i]
-        # optfunc_args is specific to the cost_function in this file
-        # this will need to change if you use another optimize_function.
-        optfunc_args = (image1, image1_mat, image2, image2_mat, step, histo_fwhm,
-                        lite, smhist, method, ret_histo)
-        p_args = (optfunc_args,)
-        if opt_method=='powell':
-            print 'POWELL multi-res registration step size ', step
-            print 'vector ', x
-            x = fmin_powell(optimize_function, x, args=p_args, callback=callback_powell)
-        elif opt_method=='cg':
-            print 'CG multi-res registration step size ', step
-            print 'vector ', x
-            x = fmin_cg(optimize_function, x, args=p_args, callback=callback_cg) 
-        elif opt_method=='hybrid':
-            if i==0:
-                print 'Hybrid POWELL multi-res registration step size ', step
-                print 'vector ', x
-                lite = 0
-                optfunc_args = (image1, image1_mat, image2, image2_mat, step, histo_fwhm,
-                                lite, smhist, method, ret_histo)
-                p_args = (optfunc_args,)
-                x = fmin_powell(optimize_function, x, args=p_args, callback=callback_powell) 
-            elif i==1:
-                print 'Hybrid CG multi-res registration step size ', step
-                print 'vector ', x
-                lite = 1
-                optfunc_args = (image1, image1_mat, image2, image2_mat, step, histo_fwhm,
-                                lite, smhist, method, ret_histo)
-                p_args = (optfunc_args,)
-                x = fmin_cg(optimize_function, x, args=p_args, callback=callback_cg) 
-
-    return x
-
-
-def callback_powell(x):
-    """
-    called by optimize.powell only. prints current parameter vector.
-    """
-    print 'Parameter Vector from Powell: - '
-    print x
-    return
-
-def callback_cg(x):
-    """
-    called by optimize.cg only. prints current parameter vector.
-    """
-    print 'Parameter Vector from Conjugate Gradient: - '
-    print x
-    return
-
-def smooth_kernel(fwhm, x, pixel_scale=8.0, ktype=1):
-    """
-    kernel = smooth_kernel(fwhm, x, ktype=1)
-
-    smooth_kernel creates filter kernel based on image sampling parameters.
-    provide domain of kernel and sampling parameters. 
-
-    Parameters 
-    ----------
-    fwhm : {int}
-        used for kernel width
-    x : {nd_array}
-        domain of kernel
-    ktype: {1, 2}, optional
-        kernel type. 1 is Gauss convoled with spline, 2 is Gauss
-
-
-    Returns 
-    -------
-    kernel : {nd_array}
-
-    Examples
-    --------
-
-    >>> import numpy as NP
-    >>> import _registration as reg
-    >>> fwhm = 3
-    >>> ftype = 2
-    >>> p = np.ceil(2*fwhm).astype(int)
-    >>> x = np.array(range(-p, p+1))
-    >>> kernel = reg.smooth_kernel(fwhm, x, ktype=ftype)
-    >>> kernel
-
-    array([
-     4.77853772e-06,   1.41575516e-04,   2.26516955e-03,
-     1.95718875e-02,   9.13238336e-02,   2.30120330e-01,
-     3.13144850e-01,   2.30120330e-01,   9.13238336e-02,
-     1.95718875e-02,   2.26516955e-03,   1.41575516e-04,
-     4.77853772e-06])
-
-    """
-    eps = 0.00001
-    s   = np.square((fwhm/math.sqrt(pixel_scale*math.log(2.0)))) + eps
-    if ktype==1:
-        # from SPM: Gauss kernel convolved with 1st degree B spline
-        w1 = 0.5 * math.sqrt(2.0/s)
-        w2 = -0.5 / s
-        w3 = math.sqrt((s*math.pi) /2.0)
-        kernel = 0.5*(erf(w1*(x+1))*(x+1) + erf(w1*(x-1))*(x-1)
-                      - 2.0*erf(w1*(x))*(x) + w3*(np.exp(w2*np.square(x+1))) 
-                      + np.exp(w2*(np.square(x-1)))
-                      - 2.0*np.exp(w2*np.square(x)))
-        kernel[kernel<0] = 0
-        kernel = kernel / kernel.sum()  
-    else:
-        # Gauss kernel 
-        kernel = (1.0/math.sqrt(2.0*math.pi*s)) * np.exp(-np.square(x)/(2.0*s)) 
-        kernel = kernel / kernel.sum()  
-
-    return kernel
-
-def filter_image_3D(imageRaw, fwhm, ftype=2, give_2D=0):
-    """
-    image_F_xyz = filter_image_3D(imageRaw, fwhm, ftype=2):
-    does 3D separable digital filtering using scipy.ndimage.correlate1d
-
-    Parameters 
-    ----------
-    imageRaw : {nd_array}
-        the unfiltered 3D volume image
-    fwhm : {int}
-        used for kernel width. this is 3 elements (one for each dimension)
-    ktype: {1, 2}, optional
-        kernel type. 1 is Gauss convoled with spline (SPM), 2 is Gauss
-
-    Returns 
-    -------
-    image_F_xyz : {nd_array}
-        3D filtered volume image
-
-    Examples
-    --------
-
-    >>> import _registration as reg
-    >>> image1, image2, imdata = reg.demo_build_dual_volumes()
-    >>> ftype = 1
-    >>> image_Filter_xyz = filter_image_3D(image, fwhm, ftype)
-    >>> image1['data'] = image_Filter_xyz
-    """
-
-    p = np.ceil(2*fwhm).astype(int)
-    x = np.array(range(-p[0], p[0]+1))
-    kernel_x = smooth_kernel(fwhm[0], x, ktype=ftype)
-
-    x = np.array(range(-p[1], p[1]+1))
-    kernel_y = smooth_kernel(fwhm[1], x, ktype=ftype)
-
-    x = np.array(range(-p[2], p[2]+1))
-    kernel_z = smooth_kernel(fwhm[2], x, ktype=ftype)
-
-    output=None
-    # 3D filter in 3 1D separable stages. keep the image
-    # names at each stage separate in case you need them
-    # for example may need an image that is 2D slice filtered only
-    axis = 0
-    image_F_x   = correlate1d(imageRaw,   kernel_x, axis, output)
-    axis = 1
-    image_F_xy  = correlate1d(image_F_x,  kernel_y, axis, output)
-    axis = 2
-    image_F_xyz = correlate1d(image_F_xy, kernel_z, axis, output)
-
-    if give_2D==0:
-        return image_F_xyz  
-    else:
-        return image_F_xyz, image_F_xy
-
-
-def build_fwhm(M, S):
-    """
-    fwhm = build_fwhm(M, S)
-
-    builds the low pass filter kernel sigma value from the image pixel sampling
-
-    Parameters 
-    ----------
-    M : {nd_array}
-        input 4x4 voxel to physical map matrix (called 'MAT')
-
-    S : {nd_array}
-        1x3 sample increment array. should be = (1, 1, 1)
-
-    Returns 
-    -------
-    fwhm : {nd_array}
-        the 3D Gaussian kernel width
-
-    Examples
-    --------
-
-    >>> import numpy as NP
-    >>> import _registration as reg
-    >>> anat_desc = reg.load_anatMRI_desc()
-    >>> image1 = reg.load_volume(anat_desc, imagename='ANAT1_V0001.img')
-    >>> image1['fwhm'] = reg.build_fwhm(image1['mat'], imdata['step'])
-
-    """
-    # M contains the voxel to physical mapping
-    view_3x3 = np.square(M[0:3, 0:3])
-    # sum the elements in the first row
-    vxg = np.sqrt(view_3x3.sum(axis=1))
-    # assumes that voxel sampling is the same for xyz as S is the step
-    size = np.array([1,1,1])*S[0]
-    x = np.square(size) - np.square(vxg)
-    # clip
-    x[x<0] = 0
-    fwhm = np.sqrt(x) / vxg
-    # pathology when stepsize = 1 for MAT equal to the identity matrix
-    fwhm[fwhm==0] = 1
-    # return the 3D Gaussian kernel width (xyz)
-    return fwhm 
-
-def cost_function(x, optfunc_args):
-    """
-    cost = cost_function(x, optfunc_args)    --- OR ---
-    cost, joint_histogram = cost_function(x, optfunc_args)   
-
-    computes the alignment between 2 volumes using cross correlation or mutual
-    information metrics. In both the 8 bit joint histogram of the 2 images is
-    computed. The 8 bit scaling is done using an integrated histogram method and
-    is called prior to this.
-
-    Parameters 
-    ----------
-    x : {nd_array}
-        this is the current (6-dim) array with 3 angles and 3 translations.
-
-    optfunc_args : {tuple}
-        this is a tuple of 8 elements that is formed in the scipy.optimize powell
-        and cg (conjugate gradient) functions. this is why the elements need to be
-        a tuple. The elements of optfunc_args are: 
-
-        image_F       : {dictionary} 
-            image_F is the source image to be remapped during the registration. 
-            it is a dictionary with the data as an ndarray in the ['data'] component.
-        image_G       : {dictionary} 
-            image_G is the reference image that image_F gets mapped to. 
-        sample_vector : {nd_array} 
-            sample in x,y,x. should be (1,1,1)
-        fwhm          : {nd_array} 
-            Gaussian sigma
-        do_lite       : {0, 1} 
-            lite of 1 is to jitter both images during resampling. 
-            0 is to not jitter. jittering is for non-aliased volumes.
-        smooth        : {0, 1} 
-            flag for joint histogram low pass filtering. 0 for no filter,
-            1 for do filter.
-        method        : {'nmi', 'mi', 'ncc', 'ecc', 'mse'}
-            flag for type of registration metric. nmi is normalized mutual
-            information; mi is mutual information; ecc is entropy cross
-            correlation; ncc is normalized cross correlation. mse is mean
-            square error. with mse there is no joint histogram.
-        ret_histo     : {0, 1} 
-            if 0 return is: cost 
-            if 0 return is: cost, joint_histogram  
-
-    Returns 
-    -------
-        cost : {float}
-            the negative of one of the mutual information metrics
-            or negative cross correlation. use negative as the optimization
-            is minimization.
-
-        --- OR --- (if ret_histo = 1)
-
-        cost : {float}
-            the negative of one of the mutual information metrics
-            or negative cross correlation. use negative as the optimization
-            is minimization.
-
-        joint_histogram : {nd_array}
-            the 2D (256x256) joint histogram of the two volumes
-
-
-    Examples
-    --------
-
-    >>> import numpy as NP
-    >>> import _registration as reg
-    >>> anat_desc = reg.load_anatMRI_desc()
-    >>> image1 = reg.load_volume(anat_desc, imagename='ANAT1_V0001.img')
-    >>> image2 = reg.load_volume(anat_desc, imagename='ANAT1_V0001.img')
-    >>> image1['fwhm'] = reg.build_fwhm(image1['mat'], imdata['step'])
-    >>> image2['fwhm'] = reg.build_fwhm(image2['mat'], imdata['step'])
-    >>> method = 'ncc'
-    >>> lite = 1
-    >>> smhist = 0
-    >>> ret_histo = 1
-    >>> optfunc_args = (image1, image2, imdata['step'], imdata['fwhm'], lite, smhist, method, ret_histo)
-    >>> x = np.zeros(6, dtype=np.float64)
-    >>> return cost, joint_histogram = reg.cost_function(x, optfunc_args)
-
-
-    """
-
-    image_F       = optfunc_args[0]
-    image_F_mat   = optfunc_args[1]
-    image_G       = optfunc_args[2]
-    image_G_mat   = optfunc_args[3]
-    sample_vector = optfunc_args[4]
-    fwhm          = optfunc_args[5]
-    do_lite       = optfunc_args[6]
-    smooth        = optfunc_args[7]
-    method        = optfunc_args[8]
-    ret_histo     = optfunc_args[9]
-
-    rot_matrix = build_rotate_matrix(x)
-    cost = 0.0
-    epsilon = 2.2e-16 
-    # image_G is base image
-    # image_F is the to-be-rotated image
-    # rot_matrix is the 4x4 constructed (rigid body) transform matrix
-    # sample_vector is the subsample vector for x-y-z
-
-    F_inv = np.linalg.inv(image_F_mat)
-    composite = np.dot(F_inv, image_G_mat)
-    composite = np.dot(composite, rot_matrix)
-
-    if method == 'mse':
-        #
-        # mean squard error method
-        #
-
-        # allocate the zero image
-        #(layers, rows, cols) = image_F.shape
-        remap_image_F = np.zeros(image_F.shape, dtype=np.uint8)
-        # trilinear interpolation mapping.
-        reg.register_linear_resample(image_F, remap_image_F, composite, sample_vector)
-        cost = (np.square(image_G-remap_image_F)).mean()
-        # cost is min when G and F are aligned so keep cost positive
-
-        return cost
-
-    else:
-        #
-        # histogram-based methods (nmi, ncc, mi, ecc)
-        #
-
-        # allocate memory for 2D histogram
-        joint_histogram = np.zeros([256, 256], dtype=np.float64)
-
-        if do_lite: 
-            reg.register_histogram_lite(image_F, image_G, composite, sample_vector, joint_histogram)
-        else:
-            reg.register_histogram(image_F, image_G, composite, sample_vector, joint_histogram)
-
-        # smooth the histogram
-        if smooth: 
-            p = np.ceil(2*fwhm).astype(int)
-            x = np.array(range(-p, p+1))
-            hkernel = smooth_kernel(fwhm, x)
-            output=None
-            # 2D filter in 1D separable stages using the same kernel. SPM
-            # has options for a 2D fwhm kernel yet only uses 1 element
-            axis = 0
-            joint_histogram = correlate1d(joint_histogram, hkernel, axis, output)
-            axis = 1
-            joint_histogram = correlate1d(joint_histogram, hkernel, axis, output)
-
-        joint_histogram += epsilon # prevent log(0) 
-        # normalize the joint histogram
-        joint_histogram /= joint_histogram.sum() 
-        # get the marginals
-        marginal_col = joint_histogram.sum(axis=0)
-        marginal_row = joint_histogram.sum(axis=1)
-
-        if method == 'mi':
-            # mutual information
-            marginal_outer = np.outer(marginal_col, marginal_row)
-            H = joint_histogram * np.log(joint_histogram / marginal_outer)  
-            mutual_information = H.sum()
-            cost = -mutual_information
-
-        elif method == 'ecc':
-            # entropy correlation coefficient 
-            marginal_outer = np.outer(marginal_col, marginal_row)
-            H = joint_histogram * np.log(joint_histogram / marginal_outer)  
-            mutual_information = H.sum()
-            row_entropy = marginal_row * np.log(marginal_row)
-            col_entropy = marginal_col * np.log(marginal_col)
-            ecc  = -2.0*mutual_information/(row_entropy.sum() + col_entropy.sum())
-            cost = -ecc
-
-        elif method == 'nmi':
-            # normalized mutual information
-            row_entropy = marginal_row * np.log(marginal_row)
-            col_entropy = marginal_col * np.log(marginal_col)
-            H = joint_histogram * np.log(joint_histogram)  
-            nmi  = (row_entropy.sum() + col_entropy.sum()) / (H.sum())
-            cost = -nmi
-
-        elif method == 'ncc':
-            # cross correlation from the joint histogram 
-            r, c = joint_histogram.shape
-            i = np.array(range(1,c+1))
-            j = np.array(range(1,r+1))
-            m1 = (marginal_row * i).sum()
-            m2 = (marginal_col * j).sum()
-            sig1 = np.sqrt((marginal_row*(np.square(i-m1))).sum())
-            sig2 = np.sqrt((marginal_col*(np.square(j-m2))).sum())
-            [a, b] = np.mgrid[1:c+1, 1:r+1]
-            a = a - m1
-            b = b - m2
-            # element multiplies in the joint histogram and grids
-            H = ((joint_histogram * a) * b).sum()
-            ncc  = H / (np.dot(sig1, sig2)) 
-            cost = -ncc
-
-        if ret_histo:
-            return cost, joint_histogram 
-        else:
-            return cost
-
-
-def build_rotate_matrix(img_data_parms):
-    """
-    rot_matrix = reg.build_rotate_matrix(img_data_parms)
-
-    takes the 6 element vector (3 angles, 3 translations) and build the 4x4 mapping matrix
-
-    Parameters 
-    ----------
-    img_data_parms : {nd_array}
-        this is the current (6-dim) array with 3 angles and 3 translations.
-
-    Returns 
-    -------
-    rot_matrix: {nd_array}
-        the 4x4 mapping matrix
-
-    Examples
-    --------
-
-    >>> import numpy as NP
-    >>> import _registration as reg
-    >>> x = np.zeros(6, dtype=np.float64)
-    >>> M = reg.build_rotate_matrix(x)
-    >>> M 
-    array([[ 1.,  0.,  0.,  0.],
-           [ 0.,  1.,  0.,  0.],
-           [ 0.,  0.,  1.,  0.],
-           [ 0.,  0.,  0.,  1.]])
-
-
-    """
-
-    R1 = np.zeros([4,4], dtype=np.float64);
-    R2 = np.zeros([4,4], dtype=np.float64);
-    R3 = np.zeros([4,4], dtype=np.float64);
-    T  = np.eye(4, dtype=np.float64);
-
-    alpha = math.radians(img_data_parms[0])
-    beta  = math.radians(img_data_parms[1])
-    gamma = math.radians(img_data_parms[2])
-
-    R1[0][0] = 1.0
-    R1[1][1] = math.cos(alpha)
-    R1[1][2] = math.sin(alpha)
-    R1[2][1] = -math.sin(alpha)
-    R1[2][2] = math.cos(alpha)
-    R1[3][3] = 1.0
-
-    R2[0][0] = math.cos(beta)
-    R2[0][2] = math.sin(beta)
-    R2[1][1] = 1.0
-    R2[2][0] = -math.sin(beta)
-    R2[2][2] = math.cos(beta)
-    R2[3][3] = 1.0
-
-    R3[0][0] = math.cos(gamma)
-    R3[0][1] = math.sin(gamma)
-    R3[1][0] = -math.sin(gamma)
-    R3[1][1] = math.cos(gamma)
-    R3[2][2] = 1.0
-    R3[3][3] = 1.0
-
-    T[0][0] = 1.0
-    T[1][1] = 1.0
-    T[2][2] = 1.0
-    T[3][3] = 1.0
-    T[0][3] = img_data_parms[3]
-    T[1][3] = img_data_parms[4]
-    T[2][3] = img_data_parms[5]
-
-    rot_matrix = np.dot(T, R1);
-    rot_matrix = np.dot(rot_matrix, R2);
-    rot_matrix = np.dot(rot_matrix, R3);
-
-    return rot_matrix
-
-
-def build_gauss_volume(imagedesc, S=[1500.0, 2500.0, 1000.0]):
-
-    """
-    build a 3D Gaussian volume. user passes in image dims in imagedesc
-    the sigma for each axis is S[3] where 0=z, 1=y, 2=x
-
-    volume3D = build_test_volume(imagedesc, S)
-
-    Parameters 
-    ----------
-    imagedesc : {dictionary}
-        volume dimensions and sampling
-
-    S : {tuple}
-        the Gaussian sigma for Z, Y and X
-
-    Returns 
-    -------
-
-    volume3D : {nd_array}
-        the 3D volume for testing
-
-    """
-    layers = imagedesc['layers']
-    rows   = imagedesc['rows']
-    cols   = imagedesc['cols']
-
-    L = layers/2
-    R = rows/2
-    C = cols/2
-
-    # build coordinates for 3D Gaussian volume
-    # coordinates are centered at (0, 0, 0)
-    [a, b, c] = np.mgrid[-L:L, -R:R, -C:C]
-
-    sigma    = np.array([S[0], S[1], S[2]])
-    aa       = (np.square(a))/sigma[0]
-    bb       = (np.square(b))/sigma[1]
-    cc       = (np.square(c))/sigma[2]
-    volume3D = (255.0*np.exp(-(aa + bb + cc))).astype(np.uint8)
-
-    return volume3D
-
-
-def scale_image(image, max_amp=255, image_type=np.uint8, threshold=0.999, fetch_ih=0):
-
-    """
-    scale and threshold clip the volume using the integrated histogram
-    to set the high threshold
-
-    Parameters 
-    ----------
-    image : {nd_array}
-        raw unscaled volume
-
-    max_amp : int (default 255)
-        the maximum value of the scaled image
-
-    image_type : nd_array dtype (default uint8)
-        the type of the volume to return.
-
-    threshold : float (default 0.999)
-        the value of the normalized integrated histogram
-        that when reached sets the high threshold index
-
-    Returns 
-    -------
-    image : {nd_array}
-        the scaled volume
-    ih : {nd_array}
-        the integrated histogram. can be used for image display 
-        purpose (histogram equalization)
-
-    """
-
-    max = image.max()
-    min = image.min()
-    if max == 0 and min == 0:
-        raise ValueError, "Zero image. cannot be scaled"
-
-    # need range of pixels for the number of bins
-    h, edges = np.histogram(image, bins=(max-min))
-    ih = (np.cumsum(h)).astype(np.float64)
-    # normalize the integrated histogram
-    ih = ih / ih.max()
-    indices = np.where(ih >= threshold)
-    # wind up getting all the indices where the ih >= threshold
-    # and only need the first index. tuple has one nd_array and
-    # get the 0 element from it ([0][0])
-    index   = indices[0][0]
-    scale   = float(max_amp) / (index-min)
-    image   = (scale*(image.astype(np.float)-min))
-    image[image>max_amp] = max_amp
-    # down type. usually will go from float to 8 bit (needed for the 8 bit joint histogram)
-    image = image.astype(image_type)
-
-    if fetch_ih == 1:
-        return image, ih
-    else:
-        return image
-
-
-def check_alignment(image1, image1_mat, image2, image2_mat, histo_fwhm=3, method='ncc', lite=0,
-                    smhist=0, alpha=0.0, beta=0.0, gamma=0.0, Tx=0, Ty=0, Tz=0, ret_histo=0):
-                    
-    """
-    test the cost function and (optional) view the joint histogram. can be used
-    during intra-modal registration to measure the current alignment (return
-    the cross correlation). would measure before and after registration
-
-
-
-    """
-
-    # do the parameter validity checking. this is specific to this 3D registration.
-    # make sure the image is 3D and the mats are 4x4 with nonzero diagonal
-
-    if image1.ndim != 3:
-        raise ValueError, "Image 1 is not 3 dimensional"
-
-    if image2.ndim != 3:
-        raise ValueError, "Image 2 is not 3 dimensional"
-
-    if image1.dtype != np.uint8:
-        raise ValueError, "Image 1 is not 8 bit (required for joint histogram)"
-
-    if image2.dtype != np.uint8:
-        raise ValueError, "Image 2 is not 8 bit (required for joint histogram)"
-
-    if image1_mat.shape != (4,4):
-        raise ValueError, "Image1 MAT is not 4x4"
-
-    if image2_mat.shape != (4,4):
-        raise ValueError, "Image2 MAT is not 4x4"
-
-    if (np.diag(image1_mat)).prod() == 0:
-        raise ValueError, "Image1 MAT has a 0 on the diagonal"
-
-    if (np.diag(image2_mat)).prod() == 0:
-        raise ValueError, "Image2 MAT has a 0 on the diagonal"
-
-    if method != 'nmi' and method != 'mi'  and method != 'ncc'\
-                       and method != 'ecc' and method != 'mse':
-        raise ValueError, "choose cost method nmi, mi, ecc, mse, ncc"
-
-    P    = np.zeros(6, dtype=np.float64);
-    P[0] = alpha
-    P[1] = beta
-    P[2] = gamma
-    P[3] = Tx
-    P[4] = Ty
-    P[5] = Tz
-
-    step = np.array([1, 1, 1], dtype=np.int32)
-    optfunc_args = (image1, image1_mat, image2, image2_mat, step, histo_fwhm, lite,
-                    smhist, method, ret_histo)
-                        
-    if ret_histo:
-        cost, joint_histogram = cost_function(P, optfunc_args)
-        return cost, joint_histogram 
-    else:
-        cost = cost_function(P, optfunc_args)
-        return cost
-
-
-
-def build_scale_volume(image, mat, scale):
-    #
-    # rescale the 'mat' (voxel to physical mapping matrix) 
-    #
-    M = mat * scale
-    (layers, rows, cols) = image.shape
-    # dimensions 
-    D = np.zeros(3, dtype=np.int32);
-    Z = np.zeros(3, dtype=np.float64);
-    D[0] = rows/scale
-    D[1] = cols/scale
-    D[2] = layers/scale
-    image2 = np.zeros([D[2], D[0], D[1]], dtype=np.uint8)
-    mode = 1;
-    reg.register_volume_resample(image, image2, Z, scale, mode)
-    return image2, M
-
-
-
-

Deleted: trunk/scipy/ndimage/_segmenter.py
===================================================================
--- trunk/scipy/ndimage/_segmenter.py	2008-08-11 21:39:17 UTC (rev 4637)
+++ trunk/scipy/ndimage/_segmenter.py	2008-08-11 23:08:56 UTC (rev 4638)
@@ -1,1718 +0,0 @@
-import math
-import numpy as NP
-import scipy.ndimage._segment as S
-
-_objstruct = NP.dtype([('Left', 'i'),
-                       ('Right', 'i'),
-                       ('Top', 'i'),
-                       ('Bottom', 'i'),
-                       ('Front', 'i'),
-                       ('Back', 'i'),
-                       ('Label', 'i'),
-                       ('Mass', 'i'),
-                       ('cX', 'f'),
-                       ('cY', 'f'),
-                       ('cZ', 'f'),
-                       ('voxelMean', 'f'),
-                       ('voxelVar', 'f'),
-                       ('COM', 'f', 6),
-                       ('TEM', 'f', 21)]
-                      )
-
-
-# Issue warning regarding heavy development status of this module
-import warnings
-_msg = "The segmentation code is under heavy development and therefore the \
-public API will change in the future.  The NIPY group is actively working on \
-this code, and has every intention of generalizing this for the Scipy \
-community.  Use this module minimally, if at all, until it this warning is \
-removed."
-warnings.warn(_msg, UserWarning)
-
-def canny_hysteresis(magnitude, canny_stats):
-    """
-    edge_image = canny_hysteresis(magnitude, canny_stats)
-
-    hystereis stage of Canny filter
-
-    Parameters 
-    ----------
-
-    magnitude : {nd_array}
-        the output from the canny_nonmax_supress() method
-
-    canny_stats : {dictionary}
-        contains the low and high thesholds determined from canny_nonmax_supress()
-
-    Returns 
-    ----------
-    edge_image : {nd_array}
-        the labeled edge image that can be displayed and used for later processing
-
-    """
-    [rows, cols] = magnitude.shape
-    edge_image = NP.zeros(rows*cols, dtype=NP.int16).reshape(rows, cols)
-    S.canny_hysteresis(magnitude, edge_image, canny_stats['low'], canny_stats['high'])
-
-    return edge_image
-
-def canny_nonmax_supress(horz_DGFilter, vert_DGFilter, img_means, thres=0.5, 
-                         mode=1, canny_l=0.5, canny_h=0.8):
-    """
-    magnitude, canny_stats = canny_nonmax_supress(horz_DGFilter, vert_DGFilter, img_means,
-                                          thres=0.5, mode=1, canny_l=0.5, canny_h=0.8)
-
-    non-max supression stage of Canny filter
-
-    Parameters 
-    ----------
-
-    horz_DGFilter : {nd_array}
-        the horizonal filtered image using the derivative of Gaussian kernel filter.
-        this is the output of the canny_filter method
-
-    vert_DGFilter : {nd_array}
-        the vertical filtered image using the derivative of Gaussian kernel filter.
-        this is the output of the canny_filter method
-
-    img_means : {dictionary}
-        mean X and Y values of edge signals determined from canny_filter
-
-    thres : {float}, optional
-        edge threshold applied to x and y filtered means
-
-    mode : {0, 1}, optional
-        threshold based on histogram mean(0) or mode(1)
-
-    canny_low : {float}, optional
-        low threshold applied to magnitude filtered 
-    
-    canny_high : {float}, optional
-        high threshold applied to magnitude filtered 
-
-    Returns 
-    ----------
-
-    magnitude : {nd_array}
-        magnitude of X and Y filtered for critical samples
-
-    canny_stats : {dictionary}
-        mean, low and high to be used for hysteresis
-
-    """
-    [rows, cols] = horz_DGFilter.shape
-    magnitude = NP.zeros(rows*cols, dtype=NP.float64).reshape(rows, cols)
-    aveMag, canny_low, canny_high = S.canny_nonmax_supress(horz_DGFilter, vert_DGFilter,
-                                        magnitude, mode, img_means['x-dg']*thres,
-                                        img_means['y-dg']*thres, canny_l, canny_h)
-
-    canny_stats = {'mean' : aveMag, 'low' : canny_low, 'high' : canny_high} 
-
-    return magnitude, canny_stats
-
-def canny_filter(slice, dg_kernel):
-    """
-    horz_DGFilter, vert_DGFilter, img_means = canny_filter(slice, dg_kernel)
-
-    Canny derivative of Gaussian filter 
-    returns the X and Y filterd image
-
-    Parameters 
-    ----------
-
-    slice : {nd_array}
-        2D image array
-
-    dg_kernel : {dictionary}
-        derivative of Gaussian kernel from build_d_gauss_kernel()
-
-    Returns 
-    ----------
-
-    horz_DGFilter : {nd_array}
-        X filtered image 
-
-    vert_DGFilter : {nd_array}
-        Y filtered image 
-
-    img_means : {dictionary}
-
-
-    """
-    slice = slice.astype(NP.float64)
-    [rows, cols] = slice.shape
-    horz_DGFilter = NP.zeros(rows*cols, dtype=NP.float64).reshape(rows, cols)
-    vert_DGFilter = NP.zeros(rows*cols, dtype=NP.float64).reshape(rows, cols)
-    aveX, aveY = S.canny_filter(slice, horz_DGFilter, vert_DGFilter,
-                   dg_kernel['coefficients'], dg_kernel['kernelSize'])
-
-    img_means = {'x-dg' : aveX, 'y-dg' : aveY} 
-
-    return horz_DGFilter, vert_DGFilter, img_means
-
-
-def binary_edge(label_image, ROI):
-    """
-    binary_edge_image = binary_edge(label_image, ROI)
-
-    takes the ROI dictionary with the blob bounding boxes and generates 
-    the binary edge for each blob. The ROI mask is used to isolate
-    the binary edges for later use (e.g. contour tracing).
-
-    Parameters 
-    ----------
-
-    label_image : {nd_array}
-        an image with labeled regions from get_blobs() method
-
-    ROI : {dictionary}
-        Region of Interest structure that has blob bounding boxes
-
-    Returns 
-    ----------
-
-    binary_edge_image : {nd_array}
-        edge image for each ROI combined into a single image
-
-    """
-
-    [rows, cols]      = label_image.shape
-    binary_edge_image = NP.zeros(rows*cols, dtype=NP.uint16).reshape(rows, cols)
-    number_regions    = ROI.size
-    indices           = range(0, number_regions)
-    for i in indices:
-        left   = ROI[i]['Left']-2
-        right  = ROI[i]['Right']+2
-        bottom = ROI[i]['Bottom']-2
-        top    = ROI[i]['Top']+2
-        Label  = ROI[i]['Label']
-        if left < 0: 
-            left = 0
-        if bottom < 0: 
-            bottom = 0
-        if right > cols-1: 
-            right = cols-1
-        if top > rows-1: 
-            top = rows-1
-
-        roi_rows = top-bottom
-        roi_cols = right-left
-        label_region  = NP.zeros(roi_rows*roi_cols, dtype=NP.uint16).reshape(roi_rows, roi_cols)
-        input = NP.zeros(roi_rows*roi_cols, dtype=NP.uint16).reshape(roi_rows, roi_cols)
-        # load the labeled region 
-        label_region[0:roi_rows, 0:roi_cols][label_image[bottom:top, left:right]==Label] = 1 
-        S.binary_edge(label_region, input)
-        input[0:roi_rows,0:roi_cols][input[0:roi_rows,0:roi_cols]==1] = Label
-        binary_edge_image[bottom:top,left:right] = binary_edge_image[bottom:top,left:right] + \
-                                                               input[0:roi_rows,0:roi_cols] 
-
-    return binary_edge_image
-
-
-def roi_co_occurence(label_image, raw_image, ROI, distance=2, orientation=90, verbose=0):
-    """
-    roi_co_occurence(label_image, raw_image, ROI, distance=2, orientation=90, verbose=0)
-
-    - OR -
-
-    texture_arrays = roi_co_occurence(label_image, raw_image, ROI, distance=2, verbose=1)
-
-    (N-S, E-W, NW-SE, NE-SW) computes one of 4 directional co-occurence matrices and features.
-    In debug=1 will return the 4 joint histograms for each ROI. This is used to compute texture
-    features for a pre-segmented region. The seg_co_occurence() method will be used for texture-
-    based segmentation.
-
-    Parameters 
-    ----------
-
-    label_image : {nd_array}
-        an image with labeled regions from get_blobs() method
-
-    raw_image : {nd_array}
-        raw image from which texture features get extracted 
-
-    distance : {int}
-        integer value of pixel offset in forming joint histogram. default value 2
-
-    orientation : {45, 90, 135, 180}
-        direction for pixel offet.
-
-    ROI : {dictionary}
-        Region of Interest structure that has blob bounding boxes. The largest
-        2D target bounding box is extracted.
-
-
-    Returns 
-    ----------
-
-    co_occurence_images : {dictionary}
-        contains 4 joint histogram images for each ROI 
-        returned if verbose=1
-
-    """
-
-    if orientation != 45 and orientation != 90 and orientation != 135 and orientation != 180: 
-        orientation = 90
-
-    epsilon  = 2.2e-16 
-    num_bits = 256
-    copy_image = raw_image.copy()
-    number_regions = ROI.size
-    indices = range(0, number_regions)
-    co_occurence_image_list = {}
-    for i in indices:
-        left   = ROI[i]['Left']
-        right  = ROI[i]['Right']
-        bottom = ROI[i]['Bottom']
-        top    = ROI[i]['Top']
-        Label  = ROI[i]['Label']
-        rows   = top-bottom
-        cols   = right-left
-        # copy the mask to section image
-        section = NP.zeros(rows*cols, dtype=label_image.dtype).reshape(rows, cols)
-        section[0:rows, 0:cols][label_image[bottom:top, left:right]==Label] = 1
-        source_region = NP.zeros(rows*cols, dtype=NP.float64).reshape(rows, cols)
-        cocm_block = NP.zeros(num_bits*num_bits, dtype=NP.int32).reshape(num_bits, num_bits)
-        source_region[0:rows, 0:cols] = copy_image[bottom:top, left:right] 
-        # scale segment to 8 bits. this needs to be smarter (e.g. use integrated histogram method)
-        max_value = source_region.max()
-        min_value = source_region.min()
-        scale = 255.0 / (max_value-min_value)
-        image_roi = (scale*(source_region-min_value)).astype(NP.int16)
-        # image_roi is short type
-        S.roi_co_occurence(section, image_roi, cocm_block, distance, orientation)
-        co_occurence_image_list[i] = cocm_block
-        # normalize the joint histogram prior to feature extraction
-        joint_histogram  = cocm_block.astype(NP.float64) 
-        joint_histogram  = joint_histogram / joint_histogram.sum()
-        # to prevent log(0)
-        joint_histogram += epsilon
-        # compute the com features
-        energy = joint_histogram.std()
-        H = joint_histogram * NP.log(joint_histogram)
-        entropy = H.sum()
-        r, c = joint_histogram.shape
-        [a, b] = NP.mgrid[1:c+1, 1:r+1]
-        contrast = ((NP.square(a-b))*joint_histogram).sum()
-        d = 1.0 + NP.abs(a-b)
-        homogeneity = (joint_histogram / d).sum()
-        ROI[i]['COM'][0] = distance
-        ROI[i]['COM'][1] = orientation 
-        ROI[i]['COM'][2] = energy
-        ROI[i]['COM'][3] = entropy
-        ROI[i]['COM'][4] = contrast
-        ROI[i]['COM'][5] = homogeneity
-
-    if verbose == 1:
-        return co_occurence_image_list
-    else:
-        return
-
-
-
-def region_grow(label_image, raw_image, ROI, roi_index, roi_inflate,
-                low_thresh=0.5, high_thresh=1.5, N_connectivity=3, debug=0):
-    """
-    region_grow(label_image, raw_image, ROI, roi_index, roi_inflate, stop_thresh)
-
-    starting from a seed (masks or regions in the label_image) grow the regions based
-    on (1) connectivity (2D or 3D) and (2) raw voxel values > stop threshold criterion.
-
-    Parameters 
-    ----------
-
-    label_image : {nd_array}
-        an image with labeled regions from get_blobs() method
-
-    raw_image : {nd_array}
-        raw image from which texture features get extracted 
-
-    ROI : {dictionary}
-        Region of Interest structure that has blob bounding boxes. The largest
-        2D target bounding box is extracted.
-
-    roi_index : {int}
-        the single ROI element to apply region growing to.
-
-    roi_inflate : {list}
-        the maximum increase in the ROI bounding box. For 3D the tuple is [layers, rows, cols]
-        and for 2D it is [rows, cols].
-
-    low_thresh : {float}
-        this is the percent of the voxel mean that the growing region must be GREATER than.
-        region growing terminates when the raw_image is BELOW this value.
-
-    high_thresh : {float}
-        this is the percent of the voxel mean that the growing region must be LESS than.
-        region growing terminates when the raw_image is ABOVE this value.
-
-    N_connectivity : {int}
-        for growing this indicates how connected in a 3x3 or 3x3x3 window the un-labeled
-        sample is. Make less than full connected for growing boundaries
-
-    Returns 
-    ----------
-
-    label : {nd_array}
-        the label image with the selected ROI after region growing. only returned
-        in debug mode.
-
-    """
-
-    _c_ext_struct = NP.dtype([('Left', 'i'),
-                              ('Right', 'i'),
-                              ('Top', 'i'),
-                              ('Bottom', 'i'),
-                              ('Front', 'i'),
-                              ('Back', 'i'),
-                              ('Label', 'i'),
-                              ('Mass', 'i'),
-                              ('cX', 'f'),
-                              ('cY', 'f'),
-                              ('cZ', 'f')]
-                             )
-
-    expanded_ROI = NP.zeros(1, dtype=_c_ext_struct)
-
-    dimensions  = label_image.ndim
-
-    if dimensions == 3:
-        z_ext = roi_inflate[0]
-        y_ext = roi_inflate[1]
-        x_ext = roi_inflate[2]
-        [layers, rows, cols]  = label_image.shape
-    else:
-        y_ext = roi_inflate[0]
-        x_ext = roi_inflate[1]
-        [rows, cols]  = label_image.shape
-
-    if dimensions == 2:  
-        left    = ROI[roi_index]['Left']-x_ext
-        right   = ROI[roi_index]['Right']+x_ext
-        bottom  = ROI[roi_index]['Bottom']-y_ext
-        top     = ROI[roi_index]['Top']+y_ext
-        Label   = ROI[roi_index]['Label']
-        lcutoff = low_thresh  * ROI[roi_index]['voxelMean']
-        hcutoff = high_thresh * ROI[roi_index]['voxelMean']
-        if left < 0: 
-            left = 0
-        if bottom < 0: 
-            bottom = 0
-        if right > cols-1: 
-            right = cols-1
-        if top > rows-1: 
-            top = rows-1
-        expanded_ROI['Left']   = left 
-        expanded_ROI['Right']  = right 
-        expanded_ROI['Top']    = top 
-        expanded_ROI['Bottom'] = bottom 
-        expanded_ROI['Label']  = Label 
-        rows    = top-bottom
-        cols    = right-left
-        label   = NP.zeros(rows*cols, dtype=NP.int16).reshape(rows, cols)
-        section = NP.zeros(rows*cols, dtype=NP.float64).reshape(rows, cols)
-        label   = label_image[bottom:top, left:right].copy()
-        section = (raw_image[bottom:top, left:right].astype(NP.float64)).copy()
-    elif dimensions == 3:  
-        left    = ROI[roi_index]['Left']-x_ext
-        right   = ROI[roi_index]['Right']+x_ext
-        bottom  = ROI[roi_index]['Bottom']-y_ext
-        top     = ROI[roi_index]['Top']+y_ext
-        front   = ROI[roi_index]['Front']-z_ext
-        back    = ROI[roi_index]['Back']+z_ext
-        Label   = ROI[roi_index]['Label']
-        lcutoff = low_thresh  * ROI[roi_index]['voxelMean']
-        hcutoff = high_thresh * ROI[roi_index]['voxelMean']
-        if left < 0: 
-            left = 0
-        if bottom < 0: 
-            bottom = 0
-        if right > cols-1: 
-            right = cols-1
-        if top > rows-1: 
-            top = rows-1
-        if front < 0: 
-            front = 0
-        if back > layers-1: 
-            back = layers-1
-        expanded_ROI['Left']   = left 
-        expanded_ROI['Right']  = right 
-        expanded_ROI['Top']    = top 
-        expanded_ROI['Bottom'] = bottom 
-        expanded_ROI['Back']   = back 
-        expanded_ROI['Front']  = front 
-        expanded_ROI['Label']  = Label 
-        rows    = top-bottom
-        cols    = right-left
-        layers  = back-front
-        label   = NP.zeros(layers*rows*cols, dtype=NP.int16).reshape(layers, rows, cols)
-        label   = label_image[front:back, bottom:top, left:right].copy()
-        section = NP.zeros(layers*rows*cols, dtype=NP.float64).reshape(layers, rows, cols)
-        section = (raw_image[front:back, bottom:top, left:right].astype(NP.float64)).copy()
-
-    #
-    # this newgrow_ROI gets filled in and the label image is grown
-    #
-
-    newgrow_ROI = NP.zeros(1, dtype=_c_ext_struct)
-    S.region_grow(section, label, expanded_ROI, newgrow_ROI, lcutoff, hcutoff, Label, N_connectivity)
-
-    if debug==1:  
-        #
-        # do not update ROI for index and the label_image 
-        #
-        return label
-
-    else:
-        #
-        # update (overwrite) ROI for index and the label_image 
-        #
-        if dimensions == 2:  
-            ROI[roi_index]['Left']   = newgrow_ROI['Left']
-            ROI[roi_index]['Right']  = newgrow_ROI['Right']
-            ROI[roi_index]['Top']    = newgrow_ROI['Top']
-            ROI[roi_index]['Bottom'] = newgrow_ROI['Bottom']
-            left   = ROI[roi_index]['Left']
-            right  = ROI[roi_index]['Right']
-            top    = ROI[roi_index]['Top']
-            bottom = ROI[roi_index]['Bottom']
-            rows   = top-bottom
-            cols   = right-left
-            label_image[bottom:top,left:right] = label[0:rows,0:cols]
-        elif dimensions == 3:  
-            ROI[roi_index]['Left']   = newgrow_ROI['Left']
-            ROI[roi_index]['Right']  = newgrow_ROI['Right']
-            ROI[roi_index]['Top']    = newgrow_ROI['Top']
-            ROI[roi_index]['Bottom'] = newgrow_ROI['Bottom']
-            ROI[roi_index]['Front']  = newgrow_ROI['Front']
-            ROI[roi_index]['Back']   = newgrow_ROI['Back']
-            left   = expanded_ROI['Left']
-            right  = expanded_ROI['Right']
-            top    = expanded_ROI['Top']
-            bottom = expanded_ROI['Bottom']
-            front  = expanded_ROI['Front']
-            back   = expanded_ROI['Back']
-            rows   = top-bottom
-            cols   = right-left
-            layers = back-front
-            label_image[front:back,bottom:top,left:right] = label[0:layers,0:rows,0:cols]
-                             
-        return 
-
-
-
-def seg_co_occurence(raw_image, window=16, distance=2, orientation=90):
-    """
-    cocm_images = seg_co_occurence(raw_image, window=16, distance=2, orientation=90)
-
-    (N-S, E-W, NW-SE, NE-SW) computes one of 4 directional co-occurence matrices and features.
-    In debug=1 will return the 4 joint histograms for each ROI.
-
-    The seg_co_occurence() method is used for texture-based segmentation. Feature images are
-    returned from which segmentation can be later performed.
-
-    ****
-    NOTE: This is very slow and a fast method using Unsers histogram approximation will be 
-    added in the future.
-    ****
-
-
-    Parameters 
-    ----------
-
-    raw_image : {nd_array}
-        raw image from which texture features get extracted 
-
-    window : {int}
-        integer value of moving 2D window. Window slides in 2D over image and is the
-        region-of-interest from which co-occurence texture features are extracted. The
-        window is 2D square so only a single value is entered. Default window is 32x32. 
-
-    distance : {int}
-        integer value of pixel offset in forming joint histogram. default value 2
-
-    orientation : {45, 90, 135, 180}
-        direction for pixel offet.
-
-    Returns 
-    ----------
-
-    cocm_images : {dictionary}
-        
-        co_occurence_feature_images. contains 4 normalized feature
-        windows with keys: energy, entropy, contrast and homogeneity.
-
-    """
-
-    if orientation != 45 and orientation != 90 and orientation != 135 and orientation != 180: 
-        orientation = 90
-
-    epsilon      = 2.2e-16 
-    num_bits     = 256
-    copy_image   = raw_image.copy()
-    [rows, cols] = copy_image.shape
-    row_indices  = range(window, rows-window)
-    col_indices  = range(window, cols-window)
-
-    # create a fixed mask and scratch window for raw source
-    section       = NP.ones(2*window*2*window, dtype=NP.int16).reshape(2*window, 2*window)
-    source_region = NP.zeros(2*window*2*window, dtype=NP.float64).reshape(2*window, 2*window)
-
-    # output images
-    energy_image      = NP.zeros(rows*cols, dtype=NP.float64).reshape(rows, cols)
-    entropy_image     = NP.zeros(rows*cols, dtype=NP.float64).reshape(rows, cols)
-    homogeneity_image = NP.zeros(rows*cols, dtype=NP.float64).reshape(rows, cols)
-    contrast_image    = NP.zeros(rows*cols, dtype=NP.float64).reshape(rows, cols)
-    cocm_block        = NP.zeros(num_bits*num_bits, dtype=NP.int32).reshape(num_bits, num_bits)
-    
-    for i in row_indices:
-        bottom = i - window
-        top    = i + window
-        for j in col_indices:
-            left  = j - window
-            right = j + window 
-            source_region[0:2*window, 0:2*window] = copy_image[bottom:top, left:right] 
-            # scale segment to 8 bits. this needs to be smarter (e.g. use integrated histogram method)
-            max_value = source_region.max()
-            min_value = source_region.min()
-            scale     = 255.0 / (max_value-min_value)
-            image_roi = (scale*(source_region-min_value)).astype(NP.int16)
-            # image_roi is short type
-            cocm_block[:] = 0.0
-            S.roi_co_occurence(section, image_roi, cocm_block, distance, orientation)
-            # normalize the joint histogram prior to feature extraction
-            joint_histogram = cocm_block.astype(NP.float64) 
-            joint_histogram = joint_histogram / joint_histogram.sum()
-            # to prevent log(0)
-            joint_histogram += epsilon
-            # compute the com features
-            energy      = joint_histogram.std()
-            H           = joint_histogram * NP.log(joint_histogram)
-            entropy     = H.sum()
-            r, c        = joint_histogram.shape
-            [a, b]      = NP.mgrid[1:c+1, 1:r+1]
-            contrast    = ((NP.square(a-b))*joint_histogram).sum()
-            d           = 1.0 + NP.abs(a-b)
-            homogeneity = (joint_histogram / d).sum()
-            # store the feature pixel for the 4 images
-            energy_image[i, j]      = energy
-            entropy_image[i, j]     = entropy
-            contrast_image[i, j]    = contrast
-            homogeneity_image[i, j] = homogeneity
-
-    scale_energy      = 1.0 / max(energy_image.max(), abs(energy_image.min()))
-    scale_entropy     = 1.0 / max(entropy_image.max(), abs(entropy_image.min()))
-    scale_contrast    = 1.0 / max(contrast_image.max(), abs(contrast_image.min()))
-    scale_homogeneity = 1.0 / max(homogeneity_image.max(), abs(homogeneity_image.min()))
-
-    energy_image      = scale_energy      * energy_image
-    entropy_image     = scale_entropy     * entropy_image
-    homogeneity_image = scale_homogeneity * homogeneity_image
-    contrast_image    = scale_contrast    * contrast_image
-
-    cocm_images = {'energy_image' : energy_image,  'entropy_image' : entropy_image, 
-                   'homogeneity_image' : homogeneity_image,  'contrast_image' : contrast_image} 
-
-    return cocm_images 
-
-
-def roi_mat_filter(label_image, thin_kernel, ROI):
-    """
-    thin_edge_image = roi_mat_filter(label_image, thin_kernel, ROI)
-
-    gets the largest object in the ROI list and returns the medial axis for
-    that object. Idea is that the largest object is a reference, e.g. the skull
-    for anatomical MRI.
-
-    Parameters 
-    ----------
-
-    label_image : {nd_array}
-        an image with labeled regions from get_blobs() method
-
-    thin_kernel : {dictionary}
-        set of 8 'J' and 'K' 3x3 masks from build_morpho_thin_masks() method
-
-    ROI : {dictionary}
-        Region of Interest structure that has blob bounding boxes. The largest
-        2D target bounding box is extracted.
-
-    Returns 
-    ----------
-
-    thin_edge_image : {nd_array}
-        thinned edge image for the largest object.
-
-    """
-
-
-    [rows, cols] = label_image.shape
-    # destination image
-    thin_edge_image = NP.zeros(rows*cols, dtype=NP.uint16).reshape(rows, cols)
-    # scratch memory for thin 
-    input           = NP.zeros(rows*cols, dtype=NP.uint8).reshape(rows, cols)
-    cinput          = NP.zeros(rows*cols, dtype=NP.uint8).reshape(rows, cols)
-    erosion         = NP.zeros(rows*cols, dtype=NP.uint8).reshape(rows, cols)
-    dialation       = NP.zeros(rows*cols, dtype=NP.uint8).reshape(rows, cols)
-    hmt             = NP.zeros(rows*cols, dtype=NP.uint8).reshape(rows, cols)
-    copy            = NP.zeros(rows*cols, dtype=NP.uint8).reshape(rows, cols)
-
-    bbox = get_max_bounding_box(ROI)
-
-    left     = bbox['Left']-1
-    right    = bbox['Right']+1
-    bottom   = bbox['Bottom']-1
-    top      = bbox['Top']+1
-    Label    = bbox['Label']
-
-    if left < 0: 
-        left = 0
-    if bottom < 0: 
-        bottom = 0
-    if right > cols-1: 
-        right = cols-1
-    if top > rows-1: 
-        top = rows-1
-
-    inflate  = 1
-    roi_rows = top-bottom+2*inflate
-    roi_cols = right-left+2*inflate
-    rgrows   = top-bottom
-    rgcols   = right-left
-    # clear the memory
-    input[0:roi_rows, 0:roi_cols] = 0
-    # load the labeled region 
-    input[inflate:inflate+rgrows, inflate:inflate+rgcols] \
-         [label_image[bottom:top, left:right]==Label] = 1 
-    # thin this region
-    S.thin_filter(thin_kernel['jmask'], thin_kernel['kmask'], thin_kernel['number3x3Masks'],
-                  roi_rows, roi_cols, cols, input, cinput, erosion, dialation, hmt, copy)
-
-    # accumulate the images (do not over-write). for overlapping regions
-    input[inflate:rgrows+inflate,inflate:rgcols+inflate] \
-         [input[inflate:rgrows+inflate,inflate:rgcols+inflate]==1] = Label 
-    thin_edge_image[bottom:top,left:right] = input[inflate:rgrows+inflate,inflate:rgcols+inflate] 
-
-    return thin_edge_image
-
-def mat_filter(label_image, thin_kernel, ROI=None):
-    """
-    mat_image = mat_filter(label_image, thin_kernel, ROI=None)
-
-    takes the ROI dictionary with the blob bounding boxes and thins each blob
-    giving the medial axis. if ROI is null will create a single ROI with the
-    bounding box set equal to the full image
-
-    Parameters 
-    ----------
-
-    label_image : {nd_array}
-        an image with labeled regions from get_blobs() method
-
-    thin_kernel : {dictionary}
-        set of 8 'J' and 'K' 3x3 masks from build_morpho_thin_masks() method
-
-    ROI : {dictionary}
-        Region of Interest structure that has blob bounding boxes
-
-    Returns 
-    ----------
-
-    mat_image : {nd_array}
-        thinned edge image
-
-    """
-    if ROI==None:
-        ROIList = NP.zeros(1, dtype=_objstruct)
-        [rows, cols] = label_image.shape
-        ROIList['Left']   = 2
-        ROIList['Right']  = cols-3
-        ROIList['Bottom'] = 2
-        ROIList['Top']    = rows-3
-
-    [rows, cols] = label_image.shape
-    # destination image
-    thin_edge_image = NP.zeros(rows*cols, dtype=NP.uint16).reshape(rows, cols)
-    mat_image       = NP.zeros(rows*cols, dtype=NP.uint16).reshape(rows, cols)
-    # scratch memory for thin 
-    input           = NP.zeros(rows*cols, dtype=NP.uint8).reshape(rows, cols)
-    cinput          = NP.zeros(rows*cols, dtype=NP.uint8).reshape(rows, cols)
-    erosion         = NP.zeros(rows*cols, dtype=NP.uint8).reshape(rows, cols)
-    dialation       = NP.zeros(rows*cols, dtype=NP.uint8).reshape(rows, cols)
-    hmt             = NP.zeros(rows*cols, dtype=NP.uint8).reshape(rows, cols)
-    copy            = NP.zeros(rows*cols, dtype=NP.uint8).reshape(rows, cols)
-
-    number_regions = ROI.size
-    indices = range(0, number_regions)
-    inflate = 1
-    for i in indices:
-        left     = ROI[i]['Left']-1
-        right    = ROI[i]['Right']+1
-        bottom   = ROI[i]['Bottom']-1
-        top      = ROI[i]['Top']+1
-        Label    = ROI[i]['Label']
-        if left < 0: 
-            left = 0
-        if bottom < 0: 
-            bottom = 0
-        if right > cols-1: 
-            right = cols-1
-        if top > rows-1: 
-            top = rows-1
-
-        roi_rows = top-bottom+2*inflate
-        roi_cols = right-left+2*inflate
-        rgrows   = top-bottom
-        rgcols   = right-left
-        # clear the memory
-        input[0:roi_rows, 0:roi_cols] = 0
-        # load the labeled region 
-        input[inflate:inflate+rgrows, inflate:inflate+rgcols] \
-             [label_image[bottom:top, left:right]==Label] = 1 
-        # thin this region
-        S.thin_filter(thin_kernel['jmask'], thin_kernel['kmask'], thin_kernel['number3x3Masks'],
-                      roi_rows, roi_cols, cols, input, cinput, erosion, dialation, hmt, copy)
-
-        # accumulate the images (do not over-write). for overlapping regions
-        input[inflate:rgrows+inflate,inflate:rgcols+inflate] \
-             [input[inflate:rgrows+inflate,inflate:rgcols+inflate]==1] = Label 
-        thin_edge_image[bottom:top,left:right] = thin_edge_image[bottom:top,left:right] + \
-                                                 input[inflate:rgrows+inflate,inflate:rgcols+inflate] 
-            
-
-    # accumulate overlaps set back to binary at later date
-    mat_image[:, :] = thin_edge_image[:, :]
-
-    return mat_image
-
-
-def laws_texture_filter(raw_image, label_image, laws_kernel, ROI=None, dc_thres=1.0,
-                        mean_feature=1, verbose=0):
-    """
-    texture_images = laws_texture_filter(raw_image, label_image, laws_kernel, ROI=None, verbose=1)
-    .
-        OR
-    .
-    laws_texture_filter(raw_image, label_image, laws_kernel, ROI=None, verbose=0)
-
-    Parameters 
-    ----------
-
-    raw_image : {nd_array}
-        raw double image 
-
-    label_image : {nd_array}
-        an image with labeled regions from get_blobs() method
-
-    laws_kernel : {dictionary}
-        set of 6 length-7 Law's texture feature kernels 
-
-    ROI : {dictionary}
-        Region of Interest structure that has blob bounding boxes
-
-    dc_thres : {float}
-        used as a filter. Sets texture feature to 0.0 when the 
-        mean level is above this. Removes the low frequency, high amplitude
-        image regions from the feature list
-
-    mean_feature : {0, 1}, optional
-        when set to 1, the feature is the mean value of the
-        selected Law's texture filter. When 0 the feature is
-        the standard deviation.
-
-    verbose : {0, 1}, optional
-        determines if return is to include Law's filter images
-
-    Returns 
-    ----------
-
-    laws_image : {dictionary}
-        contains 21 Laws filtered  regions for each ROI 
-        returned if verbose=1
-        
-
-    """
-    if ROI==None:
-        ROI= NP.zeros(1, dtype=_objstruct)
-        [rows, cols] = label_image.shape
-        ROI['Left']   = 2
-        ROI['Right']  = cols-3
-        ROI['Bottom'] = 2
-        ROI['Top']    = rows-3
-
-    laws_image_list = {}
-    number_regions  = ROI.size
-    layers          = laws_kernel['filters']
-    indices         = range(0, number_regions)
-    filters         = range(0, layers)
-    for i in indices:
-        left   = ROI[i]['Left']
-        right  = ROI[i]['Right']
-        bottom = ROI[i]['Bottom']
-        top    = ROI[i]['Top']
-        Label  = ROI[i]['Label']
-        rows   = top-bottom
-        cols   = right-left
-        label_region  = NP.zeros(rows*cols, dtype=NP.uint16).reshape(rows, cols)
-        source_region = NP.zeros(rows*cols, dtype=NP.float64).reshape(rows, cols)
-        laws_block    = NP.zeros(layers*rows*cols, dtype=NP.float32).reshape(layers, rows, cols)
-        # load the labeled region 
-        label_region[0:rows,  0:cols][label_image[bottom:top, left:right]==Label] = 1 
-        source_region[0:rows, 0:cols] = raw_image[bottom:top, left:right] 
-
-        S.laws_texture_metric(label_region, source_region, laws_block, laws_kernel['numKernels'],
-                              laws_kernel['kernelSize'], laws_kernel['filters'],
-                              laws_kernel['coefficients'][0], laws_kernel['coefficients'][1],
-                              laws_kernel['coefficients'][2], laws_kernel['coefficients'][3],
-                              laws_kernel['coefficients'][4], laws_kernel['coefficients'][5])
-
-        for j in filters:
-            # compute the energy measure for each filter in the ROI
-            mask_image = laws_block[j, :, :][label_region[:, :]>0]
-            mean = abs(mask_image.mean())
-            std  = mask_image.std()
-            if mean > dc_thres:
-                mean = 0.0
-                std = 0.0
-            if mean_feature == 1:
-                ROI[i]['TEM'][j] = mean 
-            else:
-                ROI[i]['TEM'][j] = std 
-
-        ROI[i]['TEM'][:] = ROI[i]['TEM'][:] / ROI[i]['TEM'][:].max() 
-        # accumulate the 21 Law's filtered ROI's and optional
-        # return as image (3D)
-        laws_image_list[i] = laws_block
-
-    if verbose == 1:
-        return laws_image_list
-    else:
-        return 
-
-
-
-def get_voxel_measures(label_image, raw_image, ROI=None):
-    """
-    mat_image = mat_filter(label_image, raw_image, ROI=None)
-
-    takes the ROI dictionary with the blob bounding boxes and gets the voxel measures
-    from each ROI in the raw data.
-
-    Parameters 
-    ----------
-
-    label_image : {nd_array}
-        an image with labeled regions from get_blobs() method
-
-    raw_image : {nd_array}
-        the original double image (raw voxels) from which voxel measures are made
-
-    ROI : {dictionary}
-        Region of Interest structure that has blob bounding boxes
-
-
-    Returns 
-    ----------
-
-    none
-
-    """
-
-    dimensions = label_image.ndim
-
-    if ROI==None:
-        ROIList = NP.zeros(1, dtype=_objstruct)
-        if dimensions == 2:  
-            [rows, cols] = label_image.shape
-            ROIList['Left']   = 1
-            ROIList['Right']  = cols-1
-            ROIList['Bottom'] = 1
-            ROIList['Top']    = rows-1
-        elif dimensions == 3:  
-            [layers, rows, cols] = label_image.shape
-            ROIList['Left']   = 1
-            ROIList['Right']  = cols-1
-            ROIList['Bottom'] = 1
-            ROIList['Top']    = rows-1
-            ROIList['Front']  = 1
-            ROIList['Back']   = layers-1
-
-    number_regions = ROI.size
-    indices = range(0, number_regions)
-    inflate = 1
-    for i in indices:
-        if dimensions == 2:  
-            left   = ROI[i]['Left']
-            right  = ROI[i]['Right']
-            bottom = ROI[i]['Bottom']
-            top    = ROI[i]['Top']
-            Label  = ROI[i]['Label']
-            rows   = top-bottom-1
-            cols   = right-left-1
-            section= NP.zeros(rows*cols, dtype=raw_image.dtype).reshape(rows, cols)
-            section = raw_image[bottom:top, left:right] \
-                               [label_image[bottom:top, left:right]==Label]
-        elif dimensions == 3:  
-            left   = ROI[i]['Left']
-            right  = ROI[i]['Right']
-            bottom = ROI[i]['Bottom']
-            top    = ROI[i]['Top']
-            front  = ROI[i]['Front']
-            back   = ROI[i]['Back']
-            Label  = ROI[i]['Label']
-            rows   = top-bottom-1
-            cols   = right-left-1
-            layers = back-front-1
-            section= NP.zeros(layers*rows*cols, dtype=raw_image.dtype).reshape(layers, rows, cols)
-            section = raw_image[front:back, bottom:top, left:right] \
-                               [label_image[front:back, bottom:top, left:right]==Label]
-
-        mask = section[section>0]
-        ROI[i]['voxelMean'] = mask.mean()
-        ROI[i]['voxelVar']  = mask.std()
-
-    return 
-
-
-def get_blob_regions(labeled_image, groups, dust=16):
-    """
-    ROIList = get_blob_regions(labeled_image, groups, dust=16)
-
-    get the bounding box for each labelled blob in the image
-    allocate the dictionary structure that gets filled in with later
-    stage processing to add blob features.
-
-    Parameters 
-    ----------
-
-    label_image : {nd_array}
-        a 2D or 3D image with labeled regions from get_blobs() method
-
-    groups : {int}
-        number of blobs in image determined by get_blobs() method
-
-    Returns 
-    ----------
-
-    ROIList : {dictionary}
-        structure that has the bounding box and area of each blob
-
-
-
-    """
-
-    _c_ext_struct = NP.dtype([('Left', 'i'),
-                              ('Right', 'i'),
-                              ('Top', 'i'),
-                              ('Bottom', 'i'),
-                              ('Front', 'i'),
-                              ('Back', 'i'),
-                              ('Label', 'i'),
-                              ('Mass', 'i'),
-                              ('cX', 'f'),
-                              ('cY', 'f'),
-                              ('cZ', 'f')]
-                             )
-
-    c_ext_ROI = NP.zeros(groups, dtype=_c_ext_struct)
-    ROIList = NP.zeros(groups, dtype=_objstruct)
-    # return the bounding box for each connected edge
-    S.get_blob_regions(labeled_image, c_ext_ROI)
-
-    indices = range(0, groups)
-    for i in indices:
-        ROIList[i]['Left']   = c_ext_ROI[i]['Left']
-        ROIList[i]['Right']  = c_ext_ROI[i]['Right']
-        ROIList[i]['Bottom'] = c_ext_ROI[i]['Bottom']
-        ROIList[i]['Top']    = c_ext_ROI[i]['Top']
-        ROIList[i]['Front']  = c_ext_ROI[i]['Front']
-        ROIList[i]['Back']   = c_ext_ROI[i]['Back']
-        ROIList[i]['Label']  = c_ext_ROI[i]['Label']
-        ROIList[i]['Mass']   = c_ext_ROI[i]['Mass']
-        ROIList[i]['cX']     = c_ext_ROI[i]['cX']
-        ROIList[i]['cY']     = c_ext_ROI[i]['cY']
-        ROIList[i]['cZ']     = c_ext_ROI[i]['cZ']
-
-    return ROIList[ROIList['Mass']>dust]
-
-
-def get_blobs(binary_edge_image, mask=1):
-    """
-
-    labeled_edge_image, groups = get_blobs(binary_edge_image)
-
-    get the total number of blobs in a 2D or 3D image and convert the 
-    binary image (or volume) to labelled regions
-
-    Parameters 
-    ----------
-
-    binary_edge_image : {nd_array}
-        an binary image/volume
-
-    mask : {int}
-        the size of the 2D or 3D connectivity mask. For 2D this is 1, 4 or 8.
-        For 3D this is 1, 6, 14 or 28. Mask = 1 is ANY connection in 3x3
-        or 3x3x3 mask for 2D or 3D, respectively.
-
-    Returns 
-    ----------
-
-    label_image : {nd_array}
-        an image/volume with labeled regions from get_blobs() method
-
-    groups : {int}
-        number of blobs in image determined by get_blobs() method
-
-    """
-
-    dimensions = binary_edge_image.ndim
-    if dimensions == 2:  
-        if mask != 1 and mask != 4 and mask != 8:
-            mask = 1 
-        [rows, cols] = binary_edge_image.shape
-        labeled_edge_image_or_vol = NP.zeros(rows*cols, dtype=NP.uint16).reshape(rows, cols)
-    elif dimensions == 3:
-        if mask != 1 and mask != 6 and mask != 14 and mask != 28:
-            mask = 1 
-        [layers, rows, cols] = binary_edge_image.shape
-        labeled_edge_image_or_vol = NP.zeros(layers*rows*cols, dtype=NP.uint16).reshape(layers, rows, cols)
-    else:
-        labeled_edge_image_or_vol = None
-        groups = 0
-        return labeled_edge_image_or_vol, groups
-
-    groups = S.get_blobs(binary_edge_image, labeled_edge_image_or_vol, mask)
-
-    return labeled_edge_image_or_vol, groups
-
-
-def sobel_edges(sobel_edge_image, sobel_stats, mode=1, sobel_threshold=0.3):
-    """
-    sobel_edge = sobel_edges(sobel_edge_image, sobel_stats, mode=1, sobel_threshold=0.3)
-    take sobel-filtered image and return binary edges
-
-    Parameters 
-    ----------
-
-    sobel_edge_image : {nd_array}
-        edge-filtered image from sobel_image() method
-
-    sobel_stats : {dictionary}
-        mean and nonzero min, max of sobel filtering
-
-    mode : {0, 1}, optional
-        threshold based on histogram mean(0) or mode(1)
-
-    sobel_threshold : {float}, optional
-        low threshold applied to edge filtered image for edge generation
-
-    Returns 
-    ----------
-
-    sobel_edge : {nd_array}
-        binary edge-image
-
-    """
-    [rows, cols] = sobel_edge_image.shape
-    sobel_edge = NP.zeros(rows*cols, dtype=NP.uint16).reshape(rows, cols)
-    S.sobel_edges(sobel_edge_image, sobel_edge, sobel_stats['ave_gt0'], sobel_stats['min_gt0'],
-                  sobel_stats['max_gt0'], mode, sobel_threshold)
-
-    return sobel_edge
-
-
-def sobel_image(filtered_slice):
-    """
-    sobel_edge_image, sobel_stats = sobel_image(filtered_slice)
-
-    take 2D raw or filtered image slice and get sobel-filtered image 
-
-    Parameters 
-    ----------
-
-    filtered_slice : {nd_array}
-        raw or pre-processed (filtered and thresholded) 2D image 
-
-    Returns 
-    ----------
-
-    sobel_edge_image : {nd_array}
-        edge-filtered image from sobel_image() method
-
-    sobel_stats : {dictionary}
-        mean and nonzero min, max of sobel filtering
-
-    """
-    filtered_slice = filtered_slice.astype(NP.float64)
-    [rows, cols] = filtered_slice.shape
-    sobel_edge_image = NP.zeros(rows*cols, dtype=NP.float64).reshape(rows, cols)
-    pAve, min_value, max_value = S.sobel_image(filtered_slice, sobel_edge_image)
-
-    # can replace this with numpy calls for the image stats. but C ext is faster
-    #   S.sobel_image(filtered_slice, sobel_edge_image)
-    #   sobel_mask = sobel_edge_image[sobel_edge_image>0]
-    #   pAve = sobel_mask.mean()
-    #   min_value = sobel_mask.min()
-    #   max_value = sobel_mask.max()
-
-    sobel_stats= {'ave_gt0' : pAve, 'min_gt0': min_value, 'max_gt0': max_value} 
-
-    return sobel_edge_image, sobel_stats
-
-def pre_filter(slice, filter, low_threshold=0, high_threshold=0, conv_binary=0):
-    """
-    edge_filter = pre_filter(slice, filter, low_threshold=0, high_threshold=slice.max(), conv_binary=0)
-
-    take 2D image slice and filter and pre-filter and threshold prior to segmentation
-
-    Parameters 
-    ----------
-    slice : {nd_array}
-        input 2D image. gets cast to int16
-
-    filter : {dictionary}
-        2D filter kernel set from build_2d_kernel()
-
-    low_threshold : {int}, optional
-        default 0
-
-    high_threshold : {int}, optional
-        default max value of source image
-
-    conv_binary : {0, 1}, optional
-        flag to convert edge_filter image to binary valued. default 
-        is binary conversion off
-
-    Returns 
-    ----------
-    edge_filter : {nd_array}
-        filtered and thresholded image that can be (optional) binary.
-
-    """
-
-    # make sure the input is 16 bits. this is input to edge machine
-    # so can handle raw and 8 bit scaled inputs
-    if high_threshold==0:
-        # default to the maximum value of the image
-        high_threshold = slice.max()
-
-    slice = slice.astype(NP.int16)
-    [rows, cols] = slice.shape
-    edge_image = NP.zeros(rows*cols, dtype=NP.float64).reshape(rows, cols)
-    S.edge_prefilter(low_threshold, high_threshold, filter['kernelSize'], filter['kernel'],
-                     slice, edge_image)
-
-    if conv_binary == 1:
-        edge_image[edge_image>0] = 1
-        edge_image = edge_image.astype(NP.uint16)
-
-    return edge_image
-
-def get_max_bounding_box(ROI):
-    """
-    bounding_box = get_max_bounding_box(ROI)
-
-    take an ROI structure and find the maximum area bounding box
-
-    Parameters 
-    ----------
-
-    ROI : {dictionary}
-        the ROI is the automatically extracted blob regions of interest
-        and contains the rectangular bounding box of each blob.
-
-    Returns 
-    ----------
-
-    bounding_box : {dictionary}
-        the Left, Right, Top Bottom and Label of the LARGEST bounding box in the ROI
-
-    """
-    max_index = ROI[:]['Mass'].argmax()
-    bounding_box = {'Left' : ROI[max_index]['Left'], 'Right' : ROI[max_index]['Right'],
-                    'Top' : ROI[max_index]['Top'], 'Bottom' : ROI[max_index]['Bottom'],
-                    'Label' : ROI[max_index]['Label']} 
-
-    return bounding_box 
-
-def get_all_bounding_boxes(ROI):
-    """
-    measures = get_all_bounding_boxes(ROI)
-
-    get all bounding boxes in the ROI (feature) dictionary
-
-    Parameters 
-    ----------
-
-    ROI : {dictionary}
-        the ROI is the automatically extracted blob regions of interest
-        and contains the rectangular bounding box of each blob.
-
-    Returns 
-    ----------
-
-    measures : {dictionary}
-        the Left, Right, Top and Bottom of all bounding boxes in the ROI
-
-    """
-
-    number = ROI.size
-    indices = range(0, ROI.size)
-    _shortstruct = NP.dtype([('left', 'i'),
-                             ('right', 'i'),
-                             ('top', 'i'),
-                             ('bottom', 'i')])
-    measures = NP.zeros(number, dtype=_shortstruct)
-    for i in indices:
-        measures[i]['left']   = ROI[i]['Left']
-        measures[i]['right']  = ROI[i]['Right']
-        measures[i]['top']    = ROI[i]['Top']
-        measures[i]['bottom'] = ROI[i]['Bottom']
-
-    return measures
-
-
-def build_2d_kernel(aperature=21, hiFilterCutoff=10.0):
-    """
-
-    FIRFilter = build_2d_kernel(aperature, hiFilterCutoff)
-
-    build hamming-windowed FIR filter with sinc kernel
-
-    Parameters 
-    ----------
-
-    aperature : {int}, optional
-        the number of coefficients in the filter. default is 21. needs to be ODD
-
-    hiFilterCutoff : {float}
-        the upper cutoff in digital frequency units
-
-    Returns 
-    ----------
-
-    FIRFilter : {dictionary}
-        filter kernel
-
-    """
-
-    rad = math.pi / 180.0
-    HalfFilterTaps = (aperature-1) / 2
-    kernel = NP.zeros((aperature), dtype=NP.float64)
-    LC = 0.0
-    HC = hiFilterCutoff * rad 
-    t2 = 2.0 * math.pi
-    t1 = 2.0 * HalfFilterTaps + 1.0
-    indices = range(-HalfFilterTaps, HalfFilterTaps+1, 1)  
-    j = 0
-    for i in indices:
-        if i == 0:
-            tLOW  = LC
-            tHIGH = HC
-        else:
-            tLOW  = math.sin(i*LC)/i
-            tHIGH = math.sin(i*HC)/i
-        # Hamming window
-        t3 = 0.54 + 0.46*(math.cos(i*t2/t1))
-        t4 = t3*(tHIGH-tLOW)
-        kernel[j] = t4
-        j += 1
-
-    # normalize the kernel
-    sum = kernel.sum()
-    kernel /= sum
-
-    FIRFilter= {'kernelSize' : HalfFilterTaps, 'kernel': kernel} 
-
-    return FIRFilter
-
-def build_d_gauss_kernel(gWidth=20, sigma=1.0):
-
-    """
-    DGFilter = build_d_gauss_kernel(gWidth, sigma)
-
-    build the derivative of Gaussian kernel for Canny edge filter
-
-    Parameters 
-    ----------
-    gWdith : {int}, optional
-         width of derivative of Gaussian kernel.
-         default value is 20
-
-    sigma : {float}, optional
-        sigma term of derivative of Gaussian kernel
-         default value is 1.0
-
-    Returns 
-    ----------
-
-    DGFilter : {dictionary}
-        filter kernel
-
-    """
-
-    kernel  = NP.zeros((1+gWidth), dtype=NP.float64)
-    indices = range(0, gWidth)  
-
-    for i in indices:
-        kernel[i]  = math.exp(((-i*i)/(2.0 * sigma * sigma)))
-        kernel[i] *= -(i / (sigma * sigma))
-
-    DGFilter= {'kernelSize' : gWidth, 'coefficients': kernel} 
-
-    return DGFilter
-
-def build_morpho_thin_masks():
-
-    """
-    MATFilter = build_morpho_thin_masks()
-
-    build 2 sets (J and K) of 8 3x3 morphology masks (structuring elements)
-    to implement thinning (medial axis transformation - MAT)
-
-
-    Parameters 
-    ----------
-
-    None
-
-    Returns 
-    ----------
-
-    MATFilter : {dictionary}
-        morphology filter kernels. there are 2 sets of 8 3x3 masks
-
-    """
-
-    # (layers, rows, cols)
-    size = (8*3*3)
-    J_mask = NP.zeros(size, dtype=NP.int16)
-    K_mask = NP.zeros(size, dtype=NP.int16)
-
-    maskCols = 3
-    # load the 8 J masks for medial axis transformation
-    Column = 0
-    J_mask[0+maskCols*(Column+0)] = 1
-    J_mask[0+maskCols*(Column+1)] = 1
-    J_mask[0+maskCols*(Column+2)] = 1
-    J_mask[1+maskCols*(Column+1)] = 1
-
-    Column += 3
-    J_mask[0+maskCols*(Column+1)] = 1
-    J_mask[1+maskCols*(Column+1)] = 1
-    J_mask[1+maskCols*(Column+2)] = 1
-
-    Column += 3
-    J_mask[0+maskCols*(Column+0)] = 1
-    J_mask[1+maskCols*(Column+0)] = 1
-    J_mask[2+maskCols*(Column+0)] = 1
-    J_mask[1+maskCols*(Column+1)] = 1
-
-    Column += 3
-    J_mask[0+maskCols*(Column+1)] = 1
-    J_mask[1+maskCols*(Column+0)] = 1
-    J_mask[1+maskCols*(Column+1)] = 1
-
-    Column += 3
-    J_mask[0+maskCols*(Column+2)] = 1
-    J_mask[1+maskCols*(Column+1)] = 1
-    J_mask[1+maskCols*(Column+2)] = 1
-    J_mask[2+maskCols*(Column+2)] = 1
-
-    Column += 3
-    J_mask[1+maskCols*(Column+0)] = 1
-    J_mask[1+maskCols*(Column+1)] = 1
-    J_mask[2+maskCols*(Column+1)] = 1
-
-    Column += 3
-    J_mask[1+maskCols*(Column+1)] = 1
-    J_mask[2+maskCols*(Column+0)] = 1
-    J_mask[2+maskCols*(Column+1)] = 1
-    J_mask[2+maskCols*(Column+2)] = 1
-
-    Column += 3
-    J_mask[1+maskCols*(Column+1)] = 1
-    J_mask[1+maskCols*(Column+2)] = 1
-    J_mask[2+maskCols*(Column+1)] = 1
-
-    # load the 8 K masks for medial axis transformation
-    Column = 0
-    K_mask[2+maskCols*(Column+0)] = 1
-    K_mask[2+maskCols*(Column+1)] = 1
-    K_mask[2+maskCols*(Column+2)] = 1
-    
-    Column += 3
-    K_mask[1+maskCols*(Column+0)] = 1
-    K_mask[2+maskCols*(Column+0)] = 1
-    K_mask[2+maskCols*(Column+1)] = 1
-    
-    Column += 3
-    K_mask[0+maskCols*(Column+2)] = 1
-    K_mask[1+maskCols*(Column+2)] = 1
-    K_mask[2+maskCols*(Column+2)] = 1
-
-    Column += 3
-    K_mask[1+maskCols*(Column+2)] = 1
-    K_mask[2+maskCols*(Column+1)] = 1
-    K_mask[2+maskCols*(Column+2)] = 1
-
-    Column += 3
-    K_mask[0+maskCols*(Column+0)] = 1
-    K_mask[1+maskCols*(Column+0)] = 1
-    K_mask[2+maskCols*(Column+0)] = 1
-
-    Column += 3
-    K_mask[0+maskCols*(Column+1)] = 1
-    K_mask[0+maskCols*(Column+2)] = 1
-    K_mask[1+maskCols*(Column+2)] = 1
-
-    Column += 3
-    K_mask[0+maskCols*(Column+0)] = 1
-    K_mask[0+maskCols*(Column+1)] = 1
-    K_mask[0+maskCols*(Column+2)] = 1
-
-    Column += 3
-    K_mask[0+maskCols*(Column+0)] = 1
-    K_mask[0+maskCols*(Column+1)] = 1
-    K_mask[1+maskCols*(Column+0)] = 1
-
-    MATFilter = {'number3x3Masks' : 8, 'jmask' : J_mask, 'kmask' : K_mask} 
-
-    return MATFilter 
-
-
-def build_laws_kernel():
-
-    """
-    LAWSFilter = build_laws_kernel()
-
-    build 6 length-7 Law's texture filter masks
-    mask names are: 'L', 'S', 'E', 'W', 'R', 'O'
-
-
-    Parameters 
-    ----------
-
-    None
-
-    Returns 
-    ----------
-
-    LAWSFilter : {dictionary}
-        a set of 6 length-7 Laws texture kernels
-
-    """
-    aperature = (6, 7)
-    coefficients = NP.zeros((aperature), dtype=NP.float64)
-    names = ('L', 'E', 'S', 'W', 'R', 'O' )
-
-    coefficients[0, :] =  ( 1.0,  6.0,  15.0, 20.0,  15.0,  6.0,  1.0)
-    coefficients[1, :] =  (-1.0, -4.0,  -5.0,  0.0,   5.0,  4.0,  1.0)
-    coefficients[2, :] =  (-1.0, -2.0,   1.0,  4.0,   1.0, -2.0, -1.0)
-    coefficients[3, :] =  (-1.0,  0.0,   3.0,  0.0,  -3.0,  0.0,  1.0)
-    coefficients[4, :] =  ( 1.0, -2.0,  -1.0,  4.0,  -1.0, -2.0,  1.0)
-    coefficients[5, :] =  (-1.0,  6.0, -15.0, 20.0, -15.0,  6.0, -1.0)
-
-    LAWSFilter= {'numKernels' : 6, 'kernelSize' : 7, 'filters' : 21,
-                 'coefficients': coefficients, 'names': names} 
-
-    return LAWSFilter
-
-def build_laws_masks(LAWSFilter):
-
-    """
-    masks = build_laws_masks(LAWSFilter)
-
-    takes the Laws Filter dictionary and builds the 21 7x7 kernel masks that are
-    used in Laws texture feature extraction. 
-
-    Parameters 
-    ----------
-
-    LAWSFilter : {dictionary}
-        a set of 6 length-7 Laws texture kernels
-
-    Returns 
-    ----------
-
-    masks : {list}
-        a list of 21 7x7 kernels (2D nd_array)
-
-    Examples:
-    use this along with FFT Pack to view the spatial frequency response. Create a 256x256 zero 
-    array and pad the first 7x7 with the Laws kernel and then get the 2D power spectrum and display
-    with pylab
-
-
-    LAWSFilter = build_laws_kernel()
-    mask = build_laws_masks(LAWSFilter)
-
-    mask_2 = masks[2]
-
-    z = NP.zeros(256*256, dtype=NP.float32).reshape(256, 256)
-    z[0:7, 0:7] = mask_0
-    x = abs(fftshift(fft2(z)))
-    pylab.imshow(x)
-
-    """
-
-    outer_indices = range(0, LAWSFilter['numKernels'])
-    mask_array = {}
-    count = 0
-    for i in outer_indices:
-        rowFilter = LAWSFilter['coefficients'][i]
-        colFilter = LAWSFilter['coefficients'][i]
-        matrix = NP.outer(rowFilter, colFilter)
-        mask_array[count] = 2.0*matrix
-        count = count + 1 
-        inner_indices = range(i+1, LAWSFilter['numKernels'])
-        for j in inner_indices:
-            colFilter = LAWSFilter['coefficients'][j]
-            matrix = NP.outer(rowFilter, colFilter) + NP.outer(colFilter, rowFilter)
-            mask_array[count] = matrix
-            count = count + 1 
-
-    return mask_array
-
-
-#
-#    test pattern generators for demo and test
-#
-
-def build_test_texture_discs():
-    """
-    discs = build_test_texture_discs()
-    
-    builds 4 discs with plane wave texture. used for test and demo
-
-    Parameters 
-    ----------
-
-    None
-
-    Returns 
-    ----------
-
-    discs : {nd_array}
-        a 512x512 image with 4 test discs (one per quadrant)
-
-    """
-    rows = 512
-    cols = 512
-    rad  = NP.pi / 180.0
-    test_image = NP.zeros(rows*cols, dtype=NP.float32).reshape(rows, cols)
-    [a, b] = NP.mgrid[0:rows, 0:cols]
-
-    test_image[0:255,0:255]     = NP.sin(4.0*rad*a[0:255,0:255])  + NP.sin(-4.0*rad*b[0:255,0:255]) 
-    test_image[256:511,256:511] = NP.sin(24.0*rad*a[0:255,0:255]) + NP.sin(20.0*rad*b[0:255,0:255])
-
-    test_image = test_image + test_image.min()
-    discs = build_test_unit_discs()
-    discs = discs * test_image 
-
-    return discs
-
-
-def build_test_discs():
-    """
-    test_image = build_test_discs()
-    build 4 discs of equal radius and different mean values for edge/blob testing
-    
-    Parameters 
-    ----------
-
-    None
-
-    Returns 
-    ----------
-
-    test_image : {nd_array}
-        a 512x512 image with 4 test discs (one per quadrant)
-
-    """
-    radius = 50
-    rows   = 512
-    cols   = 512
-    test_image = NP.zeros(rows*cols, dtype=NP.int16).reshape(rows, cols)
-    y_indices = NP.array(range(-radius, radius+1))
-    center_x = rows / 4
-    center_y = cols / 4
-
-    for i in y_indices:
-        x = math.sqrt(float(radius)**2 - float(i)**2)
-        # different raw mean levels
-        test_image[1*center_y+i, 1*center_x-x:1*center_x+x] = 80
-        test_image[1*center_y+i, 3*center_x-x:3*center_x+x] = 90
-        test_image[3*center_y+i, 1*center_x-x:1*center_x+x] = 100
-        test_image[3*center_y+i, 3*center_x-x:3*center_x+x] = 110
-
-    return test_image
-
-
-def build_test_unit_discs():
-    """
-    test_image = build_test_unit_discs()
-    build 2 discs of equal radius and same mean values for texture testing
-    
-    Parameters 
-    ----------
-
-    None
-
-    Returns 
-    ----------
-
-    test_image : {nd_array}
-        a 512x512 image with 4 test discs (one per quadrant)
-
-    """
-    radius = 50
-    rows   = 512
-    cols   = 512
-    test_image = NP.zeros(rows*cols, dtype=NP.int16).reshape(rows, cols)
-    y_indices = NP.array(range(-radius, radius+1))
-    center_x = rows / 4
-    center_y = cols / 4
-
-    for i in y_indices:
-        x = math.sqrt(float(radius)**2 - float(i)**2)
-        # different raw mean levels
-        test_image[1*center_y+i, 1*center_x-x:1*center_x+x] = 100
-        test_image[3*center_y+i, 3*center_x-x:3*center_x+x] = 100
-
-    return test_image
-
-
-def build_test_impulses():
-    """
-    test_image = build_test_impulses()
-
-    build 4 test impulses discs centered in the 4 discs. used
-    for testing filter kernels, esp. Laws' filter kernel. Filtering
-    with these test patterns will return Law's kernel outer product matrices.
-
-    Parameters 
-    ----------
-
-    None
-
-    Returns 
-    ----------
-
-    test_image : {nd_array}
-        a 512x512 image with 4 test discs (one per quadrant)
-
-    """
-    rows = 512
-    cols = 512
-    test_image = NP.zeros(rows*cols, dtype=NP.int16).reshape(rows, cols)
-    test_image[128,128] = 1 
-    test_image[378,128] = 1 
-    test_image[128,378] = 1 
-    test_image[378,378] = 1 
-
-    return test_image
-
-
-

Modified: trunk/scipy/ndimage/setup.py
===================================================================
--- trunk/scipy/ndimage/setup.py	2008-08-11 21:39:17 UTC (rev 4637)
+++ trunk/scipy/ndimage/setup.py	2008-08-11 23:08:56 UTC (rev 4638)
@@ -14,17 +14,6 @@
         include_dirs=['src']+[get_include()],
     )
 
-    config.add_extension('_segment',
-                         sources=['src/segment/Segmenter_EXT.c',
-                                  'src/segment/Segmenter_IMPL.c'],
-                         depends = ['src/segment/ndImage_Segmenter_structs.h']
-    )
-
-    config.add_extension('_register',
-                         sources=['src/register/Register_EXT.c',
-                                  'src/register/Register_IMPL.c']
-    )
-
     config.add_data_dir('tests')
 
     return config

Deleted: trunk/scipy/ndimage/tests/test_registration.py
===================================================================
--- trunk/scipy/ndimage/tests/test_registration.py	2008-08-11 21:39:17 UTC (rev 4637)
+++ trunk/scipy/ndimage/tests/test_registration.py	2008-08-11 23:08:56 UTC (rev 4638)
@@ -1,192 +0,0 @@
-import math
-import numpy as np
-import scipy.ndimage._registration as reg
-from numpy.testing import *
-
-def load_desc():
-    # this is for a 256x256x90 volume with 0.9375 x 0.9375 * 1.5 mm voxel sizes 
-    rows   = 256
-    cols   = 256
-    layers = 90
-    xsamp  = 0.9375
-    ysamp  = 0.9375
-    zsamp  = 1.5
-    desc = {'rows' : rows, 'cols' : cols, 'layers' : layers, 
-            'sample_x' : xsamp, 'sample_y' : ysamp, 'sample_z' : zsamp}
-    return desc
-
-def build_volume(imagedesc, S=[1500.0, 2500.0, 1000.0]):
-
-    """
-    build a 3D Gaussian volume. user passes in image dims in imagedesc
-    the sigma for each axis is S[3] where 0=z, 1=y, 2=x
-
-    volume3D = build_test_volume(imagedesc, S)
-
-    Parameters 
-    ----------
-    imagedesc : {dictionary}
-        volume dimensions and sampling
-
-    S : {tuple}
-        the Gaussian sigma for Z, Y and X
-
-    Returns 
-    -------
-
-    volume3D : {nd_array}
-        the 3D volume for testing
-
-    """
-    layers = imagedesc['layers']
-    rows   = imagedesc['rows']
-    cols   = imagedesc['cols']
-
-    L = layers/2
-    R = rows/2
-    C = cols/2
-
-    # build coordinates for 3D Gaussian volume
-    # coordinates are centered at (0, 0, 0)
-    [a, b, c] = np.mgrid[-L:L, -R:R, -C:C]
-
-    sigma    = np.array([S[0], S[1], S[2]])
-    aa       = (np.square(a))/sigma[0]
-    bb       = (np.square(b))/sigma[1]
-    cc       = (np.square(c))/sigma[2]
-    volume3D = (255.0*np.exp(-(aa + bb + cc))).astype(np.uint8)
-
-    return volume3D
-
- # self.failUnless(diff(output, tcov) < eps)
-class TestRegistration(TestCase):
-
-    def test_affine_matrix_build_1(self):
-        "test_affine_matrix_build_1"
-        P = np.zeros(6)
-        M = reg.build_rotate_matrix(P)
-        E = np.eye(4)
-        match = (E==M).all()
-        assert_equal(match, True)
-        return
-
-    def test_affine_matrix_build_2(self):
-        "test_affine_matrix_build_2"
-        P = np.zeros(6)
-        P[0] = 1.0
-        M = reg.build_rotate_matrix(P)
-        E = np.array([
-                     [ 1. ,  0.        , 0.        , 0. ],
-                     [ 0. ,  0.9998477 , 0.01745241, 0. ],
-                     [ 0. , -0.01745241, 0.9998477 , 0. ],
-                     [ 0. ,  0.        , 0.        , 1. ]
-                     ])
-        assert_array_almost_equal(E, M, decimal=6)
-        return
-
-    def test_affine_matrix_build_3(self):
-        "test_affine_matrix_build_3"
-        P = np.zeros(6)
-        P[0] = 1.0
-        P[1] = 1.0
-        P[2] = 1.0
-        M = reg.build_rotate_matrix(P)
-        E = np.array([
-                     [ 0.99969541,  0.01744975,  0.01745241,  0. ],
-                     [-0.01775429,  0.9996901 ,  0.01744975,  0. ],
-                     [-0.0171425 , -0.01775429,  0.99969541,  0. ],
-                     [ 0.        ,  0.        ,  0.        ,  1. ]
-                     ])
-        assert_array_almost_equal(E, M, decimal=6)
-        return
-
-    @dec.slow
-    def test_autoalign_histogram_1(self):
-        "test_autoalign_histogram_1"
-        desc = load_desc()
-        gvol = build_volume(desc)
-        mat  = np.eye(4)
-        cost, joint = reg.check_alignment(gvol, mat, gvol, mat, ret_histo=1, lite=1)
-        # confirm that with lite=1 only have non-zero on the main diagonal
-        j_diag = joint.diagonal()
-        Z = np.diag(j_diag)
-        W = joint - Z
-        # clip the near-zero fuzz
-        W[abs(W) < 1e-10] = 0.0
-        assert_equal(W.max(), 0.0)
-        return
-
-    @dec.slow
-    def test_autoalign_histogram_2(self):
-        "test_autoalign_histogram_2"
-        desc = load_desc()
-        gvol = build_volume(desc)
-        mat  = np.eye(4)
-        cost, joint = reg.check_alignment(gvol, mat, gvol, mat, ret_histo=1, lite=0)
-        # confirm that with lite=0 DO have non-zero on the main diagonal
-        j_diag = joint.diagonal()
-        Z = np.diag(j_diag)
-        W = joint - Z
-        # clip the near-zero fuzz
-        W[abs(W) < 1e-10] = 0.0
-        s = (W.max() > 0.0)
-        # make sure there are off-diagonal values
-        assert_equal(s, True)
-        return
-
-    @dec.slow
-    def test_autoalign_ncc_value_1(self):
-        "test_autoalign_ncc_value_1"
-        desc = load_desc()
-        gvol = build_volume(desc)
-        mat  = np.eye(4)
-        cost = reg.check_alignment(gvol, mat, gvol, mat, method='ncc', lite=1)
-        # confirm the cross correlation is near 1.0 
-        t = abs(cost) + 0.0001
-        s = (t >= 1.0)
-        assert_equal(s, True)
-        return
-
-    @dec.slow
-    def test_autoalign_ncc_value_2(self):
-        "test_autoalign_ncc_value_2"
-        desc = load_desc()
-        gvol = build_volume(desc)
-        mat  = np.eye(4)
-        cost = reg.check_alignment(gvol, mat, gvol, mat, method='ncc', lite=0)
-        # confirm the cross correlation is near 1.0 
-        t = abs(cost) + 0.0001
-        s = (t >= 1.0)
-        assert_equal(s, True)
-        return
-
-    @dec.slow
-    def test_autoalign_nmi_value_1(self):
-        "test_autoalign_nmi_value_1"
-        desc = load_desc()
-        gvol = build_volume(desc)
-        mat  = np.eye(4)
-        cost = reg.check_alignment(gvol, mat, gvol, mat, method='nmi', lite=1)
-        # confirm the normalized mutual information is near -2.0 
-        assert_almost_equal(cost, -2.0, decimal=6)
-        return
-
-    @dec.slow
-    def test_autoalign_nmi_value_2(self):
-        "test_autoalign_nmi_value_2"
-        desc = load_desc()
-        gvol = build_volume(desc)
-        mat  = np.eye(4)
-        cost = reg.check_alignment(gvol, mat, gvol, mat, method='nmi', lite=0)
-        # confirm the normalized mutual information is near -2.0 
-        assert_almost_equal(cost, -1.7973048186515352, decimal=6)
-        return
-
-
-
-if __name__ == "__main__":
-    #nose.runmodule()
-    run_module_suite()
-
-
-

Deleted: trunk/scipy/ndimage/tests/test_segment.py
===================================================================
--- trunk/scipy/ndimage/tests/test_segment.py	2008-08-11 21:39:17 UTC (rev 4637)
+++ trunk/scipy/ndimage/tests/test_segment.py	2008-08-11 23:08:56 UTC (rev 4638)
@@ -1,159 +0,0 @@
-import math
-import numpy as NP
-import scipy.ndimage._segmenter as seg
-from numpy.testing import *
-
-def run_sobel():
-    img    = seg.build_test_discs()
-    filter = seg.build_2d_kernel(hiFilterCutoff=60.0)
-    fslice = seg.pre_filter(img, filter, low_threshold=0, high_threshold=255)
-    sobel_edge_image, sobel_stats = seg.sobel_image(fslice)
-    sobel_edge = seg.sobel_edges(sobel_edge_image, sobel_stats, sobel_threshold=0.8)
-    label_sobel, sobel_groups = seg.get_blobs(sobel_edge)
-    ROI      = seg.get_blob_regions(label_sobel, sobel_groups)
-    measures = seg.get_all_bounding_boxes(ROI)
-    thin_kernel = seg.build_morpho_thin_masks()
-    sobel_edges = seg.mat_filter(label_sobel, thin_kernel, ROI)
-    seg.get_voxel_measures(label_sobel, img, ROI)
-    means = ROI[:]['voxelMean']
-    return measures, means
-
-def run_canny():
-    img          = seg.build_test_discs()
-    filter       = seg.build_2d_kernel(hiFilterCutoff=60.0)
-    fslice       = seg.pre_filter(img, filter, low_threshold=0, high_threshold=255)
-    canny_kernel = seg.build_d_gauss_kernel()
-    horz, vert, imean = seg.canny_filter(fslice, canny_kernel)
-    mag, canny_stats  = seg.canny_nonmax_supress(horz, vert, imean)
-    canny_edge        = seg.canny_hysteresis(mag, canny_stats)
-    label_canny, canny_groups = seg.get_blobs(canny_edge)
-    ROI      = seg.get_blob_regions(label_canny, canny_groups)
-    measures = seg.get_all_bounding_boxes(ROI)
-    seg.get_voxel_measures(label_canny, img, ROI)
-    means = ROI[:]['voxelMean']
-    return measures, means
-
-def run_texture1():
-    filter      = seg.build_2d_kernel(hiFilterCutoff=60.0)
-    img         = seg.build_test_discs()
-    disc_mask   = seg.pre_filter(img, filter, low_threshold=50, high_threshold=255,
-                                 conv_binary=1)
-    label_disc_mask, disc_mask_groups = seg.get_blobs(disc_mask)
-    disc_ROI    = seg.get_blob_regions(label_disc_mask, disc_mask_groups)
-    laws_kernel = seg.build_laws_kernel() 
-    impulse     = seg.build_test_impulses()
-    calib       = seg.laws_texture_filter(impulse, label_disc_mask, laws_kernel,
-                                          ROI=disc_ROI, verbose=1)
-    kernels = calib[0]
-    x = laws_kernel['coefficients'][0]
-    m = NP.outer(x, x)
-    m = m * 2
-    laws_LL = kernels[0, 50-4:50+3, 50-3:50+4]
-    return m, laws_LL 
-
-
-def run_texture2():
-    filter = seg.build_2d_kernel(hiFilterCutoff=60.0)
-    img = seg.build_test_unit_discs()
-    disc = seg.pre_filter(img, filter, low_threshold=50, high_threshold=255)
-    disc_mask = seg.pre_filter(img, filter, low_threshold=50, high_threshold=255,
-                               conv_binary=1)
-    label_disc_mask, disc_mask_groups = seg.get_blobs(disc_mask)
-    disc_ROI = seg.get_blob_regions(label_disc_mask, disc_mask_groups)
-    laws_kernel = seg.build_laws_kernel() 
-    texture_img = seg.build_test_texture_discs()
-    seg.laws_texture_filter(texture_img, label_disc_mask, laws_kernel, ROI=disc_ROI,
-                            mean_feature=1, verbose=0)
-    tem = disc_ROI['TEM']
-    return tem 
-
-class TestSegment(TestCase):
-    def test_sobel(self):
-        # generate 4 discs, find the bounding boxes and 
-        # confirm the bounding boxes are at the true position
-        measures, voxel_means = run_sobel()
-        number = measures.size
-        _shortstruct = NP.dtype([('left', 'i'),
-                                 ('right', 'i'),
-                                 ('top', 'i'),
-                                 ('bottom', 'i')])
-
-        assert_equal(number, 4)
-        # load the ground truth
-        truth = NP.zeros(number, dtype=_shortstruct)
-        truth[0] = (76, 179, 179, 77) 
-        truth[1] = (332, 435, 179, 77)
-        truth[2] = (76, 179, 435, 333)
-        truth[3] = (332, 435, 435, 333)
-        match = (truth==measures).all()
-        assert_equal(match, True)
-        # load the ground truth for the bounding box test image mean value
-        voxel_truth = NP.zeros(number, dtype=NP.float64)
-        voxel_truth = (80.0, 90.0, 100.0, 110.0)
-        match = (voxel_truth==voxel_means).all()
-        assert_equal(match, True)
-
-        return
-
-    def test_canny(self):
-        # generate 4 discs, find the bounding boxes and 
-        # confirm the bounding boxes are at the true position
-        measures, voxel_means = run_canny()
-        number = measures.size
-        _shortstruct = NP.dtype([('left', 'i'),
-                                 ('right', 'i'),
-                                 ('top', 'i'),
-                                 ('bottom', 'i')])
-
-        assert_equal(number, 4)
-        # load the ground truth for the bounding box
-        truth = NP.zeros(number, dtype=_shortstruct)
-        truth[0] = (78,  177, 177, 79)
-        truth[1] = (334, 433, 177, 79)
-        truth[2] = (78,  177, 433, 335)
-        truth[3] = (334, 433, 433, 335)
-        match = (truth==measures).all()
-        assert_equal(match, True)
-        # load the ground truth for the bounding box test image mean value
-        voxel_truth = NP.zeros(number, dtype=NP.float64)
-        voxel_truth = (80.0, 90.0, 100.0, 110.0)
-        match = (voxel_truth==voxel_means).all()
-        assert_equal(match, True)
-
-        return
-
-    def test_texture1(self):
-        # [1] texture1 is delta functions and confirm the
-        #     filter result is outer product of the L kernel
-        M, Laws_LL = run_texture1()
-        match = (Laws_LL==M).all()
-        assert_equal(match, True)
-        return
-
-    def test_texture2(self):
-        # [2] texture2 is 2 plane waves and assert the 20-element feature
-        #     vector for each disc is correct
-        tem = run_texture2()
-        tem0 = tem[0]
-        tem1 = tem[1]
-        truth_tem0 = NP.array(
-                        [ 0.        ,  0.        ,  0.        ,  0.        ,  0.        ,
-                          0.        ,  0.13306101,  0.08511007,  0.05084148,  0.07550675,
-                          0.4334695 ,  0.03715914,  0.00289055,  0.02755581,  0.48142046,
-                          0.03137803,  0.00671277,  0.51568902,  0.01795249,  0.49102375,  1.
-                        ], dtype=NP.float32)
-        truth_tem1 = NP.array(
-                        [ 0.        ,  0.        ,  0.        ,  0.        ,  0.        ,
-                          0.        ,  0.02970393,  0.00164266,  0.00922416,  0.01221788,
-                          0.51485199,  0.03298925,  0.02212243,  0.01912871,  0.48350537,
-                          0.01125561,  0.00826189,  0.49437219,  0.00526817,  0.49736592,  1.
-                        ], dtype=NP.float32)
-
-        assert_array_almost_equal(tem0, truth_tem0, decimal=6)
-        assert_array_almost_equal(tem1, truth_tem1, decimal=6)
-
-        return
-
-if __name__ == "__main__":
-    nose.runmodule()
-




More information about the Scipy-svn mailing list