[Scipy-svn] r2712 - in trunk/Lib/sandbox/timeseries: . io io/fame io/fame/src io/fame/tests

scipy-svn at scipy.org scipy-svn at scipy.org
Thu Feb 15 12:52:32 EST 2007


Author: mattknox_ca
Date: 2007-02-15 11:52:27 -0600 (Thu, 15 Feb 2007)
New Revision: 2712

Added:
   trunk/Lib/sandbox/timeseries/io/
   trunk/Lib/sandbox/timeseries/io/__init__.py
   trunk/Lib/sandbox/timeseries/io/fame/
   trunk/Lib/sandbox/timeseries/io/fame/__init__.py
   trunk/Lib/sandbox/timeseries/io/fame/fame.py
   trunk/Lib/sandbox/timeseries/io/fame/mapping.py
   trunk/Lib/sandbox/timeseries/io/fame/readme.txt
   trunk/Lib/sandbox/timeseries/io/fame/setup.py
   trunk/Lib/sandbox/timeseries/io/fame/src/
   trunk/Lib/sandbox/timeseries/io/fame/src/cfame.c
   trunk/Lib/sandbox/timeseries/io/fame/tests/
   trunk/Lib/sandbox/timeseries/io/fame/tests/test_fame.py
Log:


Added: trunk/Lib/sandbox/timeseries/io/__init__.py
===================================================================

Added: trunk/Lib/sandbox/timeseries/io/fame/__init__.py
===================================================================
--- trunk/Lib/sandbox/timeseries/io/fame/__init__.py	2007-02-15 16:42:18 UTC (rev 2711)
+++ trunk/Lib/sandbox/timeseries/io/fame/__init__.py	2007-02-15 17:52:27 UTC (rev 2712)
@@ -0,0 +1 @@
+from fame import *
\ No newline at end of file

Added: trunk/Lib/sandbox/timeseries/io/fame/fame.py
===================================================================
--- trunk/Lib/sandbox/timeseries/io/fame/fame.py	2007-02-15 16:42:18 UTC (rev 2711)
+++ trunk/Lib/sandbox/timeseries/io/fame/fame.py	2007-02-15 17:52:27 UTC (rev 2712)
@@ -0,0 +1,657 @@
+import sys, types, re, os
+
+import timeseries as ts
+import cfame
+import mapping as mp
+
+import numpy
+import maskedarray as ma
+import thread
+
+fameLock = thread.allocate_lock()
+
+class CaseInsensitiveDict(dict):
+    def __init__(self, data={}):
+        for i, v in data.iteritems():
+            self[i.upper()] = v
+            
+    def __getitem__(self, key):
+        if hasattr(key, 'upper'): key = key.upper()
+        return super(CaseInsensitiveDict, self).__getitem__(key)
+        
+    def __setitem__(self, key, item):
+        if hasattr(key, 'upper'): key = key.upper()
+        super(CaseInsensitiveDict, self).__setitem__(key, item)
+
+class DBError(Exception): pass
+
+
+class FameDb(object):
+    """Fame database object
+
+:Construction:
+    x = FameDb(conn_str, mode='r', large=True)
+
+:Paramaters:
+    - `conn_str` (str) : valid connection string. Can be a physical path,
+    channel specification, etc.
+    - `mode` (str, *['r']*) : method of access to the database. Can be one
+    of the following:
+        'r' => read only
+        's' => shared
+        'o' => overwrite
+        'c' => create
+        'u' => update
+        'w' => write
+        'd' => direct
+    - `large` (boolean, *[True]*) : Applies only when `mode` is 'o' or 'c'.
+    If True, a large size database will be created. If False, a standard size
+    database will be created.
+"""
+    def __init__(self, conn_str, mode='r', large=True):
+        mode = mode.lower()
+        if mode == 'r':
+            intmode = mp.HRMODE
+        elif mode == 's':
+            intmode = mp.HSMODE
+        elif mode == 'u':
+            intmode = mp.HUMODE
+        elif mode == 'w':
+            intmode = mp.HWMODE
+        elif mode == 'd':
+            intmode = mp.HDMODE
+        elif mode == 'c':
+            intmode = mp.HCMODE
+        elif mode == 'o':
+            intmode = mp.HOMODE
+        else:
+            raise ValueError, "Database access mode not supported."
+        self.mode = mode
+        
+        try:
+            self.dbkey = cf_open(conn_str, intmode, int(large))
+            self.dbIsOpen = True
+        except:
+            self.dbIsOpen = False
+            raise
+
+        
+    def read(self, name,
+             start_date=None, end_date=None,
+             start_case=None, end_case=None, max_string_len=65):
+    
+        """read specified object(s) from database
+
+:Parameters:
+        - `name` (string or list of strings) : names of objects that will be
+          read from the database
+
+        - `start_date` (int, *[None]*) : Applies only when reading time series.
+          If specified, only data points on or after `start_date` will be read.
+          If None, data will be read from the first value of the series.
+        - `end_date` (int, *[None]*) : Applies only when reading time series.
+          If specified, only data points on or before `end_date` will be read.
+          If None, data will be read to the last value of the series.
+        - `start_case` (int, *[None]*) : Applies only when reading case series.
+          If specified, only data points on or after `start_case` will be read.
+          If None, data will be read starting from case index 1
+        - `end_case` (int, *[None]*) : Applies only when reading case series.
+          If specified, only data points on or before `end_case` will be read.
+          If None, data will be read to the last value of the series.
+        - `max_string_len` (int, *[65]*) : Applies only when readings strings
+           or series of strings. This is the maximum length of string that can
+           be read. Lower values result in less memory usage, so you should
+           specify this as low as is reasonable for your data.
+           
+:Return:
+        if `name` is a list of strings:
+            case insensitive dictionary of the objects
+        if `name` is a single string:
+            object from database that is stored as `name`"""
+
+
+        if not self.dbIsOpen:
+            raise DBError("Database is not open")
+
+        isSingle = False
+        if isinstance(name, types.StringType):
+            names = [name]
+            isSingle = True
+        else:
+            names = name
+
+        items = CaseInsensitiveDict()
+        
+        #default to -1. This will get the entire range
+        _start_case = _end_case = -1
+        _start_date = _end_date = -1
+
+        range_freq = None
+        if start_date is not None:
+            _start_date = start_date.value - mp.value_adjust[start_date.freq]
+            range_freq = mp.freqReverseMapping[start_date.freq]
+
+        if end_date is not None:
+            if start_date is not None and start_date.freq != end_date.freq:
+                raise ValueError("start_date and end_date must be same frequency")
+            _end_date = end_date.value - mp.value_adjust[end_date.freq]
+            if range_freq is None:
+                range_freq = mp.freqReverseMapping[end_date.freq]
+
+        if start_case is not None: _start_case = start_case
+        if end_case is not None: _end_case = end_case
+       
+        if len(set([_start_case, _end_case, _start_date, _end_date, -1])) != 1:
+            checkFreq = True
+        else:
+            checkFreq = False
+
+        for objName in names:
+            objName = objName.upper()
+
+            if checkFreq:
+                objFreq = self.get_freq(objName)
+
+                if objFreq == range_freq:
+                    start_index, end_index = _start_date, _end_date
+                elif objFreq == mp.HCASEX:
+                    start_index, end_index = _start_case, _end_case
+                else:
+                    start_index, end_index = -1, -1
+            else:
+                start_index, end_index = -1, -1
+
+            result = cf_read(self.dbkey, objName, start_index,
+                             end_index, max_string_len)
+
+            if result['type'] == mp.HBOOLN:
+                numpyType = numpy.bool_
+            else:
+                numpyType = mp.fametype_tonumpy(result['type'])
+
+            if result['type'] == mp.HNAMEL:
+                pyObj = [x for x in result['data'][1:-1].split(", ") \
+                         if x != '']
+
+            elif result['class'] == mp.HSCALA:
+                if isinstance(result['data'], str):
+                    if result['mask']:
+                        pyObj = None
+                    else:
+                        pyObj = result['data']
+                else:
+                    if result['mask'][0]:
+                        pyObj = None
+                    else:
+                        pyObj = result['data'][0]
+                        if result['type'] >= 8: # date type
+                            value = pyObj+ \
+                               mp.value_adjust[mp.freqMapping[result['type']]]
+                            pyObj = ts.Date(
+                                        freq=mp.freqMapping[result['type']],
+                                        value=value)
+                        else:
+                            pyObj = numpyType(pyObj)
+
+            elif result['class'] == mp.HSERIE:
+                
+                if 'data' in result:
+                    vals = result['data']
+                    mask = result['mask']
+                else:
+                    vals = []
+                    mask = ma.nomask
+                    
+                if result['type'] >= 8: # date type
+                    valadj = mp.value_adjust[mp.freqMapping[result['type']]]
+                    if len(vals) > 0: vals += valadj
+                    data = ts.DateArray(vals,
+                                        freq=mp.freqMapping[result['type']])
+                else:
+                    data = numpy.array(vals, dtype=numpyType)
+                    
+                if result['freq'] == mp.HCASEX:
+                    pyObj = ma.array(data, mask=mask)
+                else:
+                    observed = mp.observedMapping[result['observed']]
+                    basis = mp.basisMapping[result['basis']]
+                    freq = mp.freqMapping[result['freq']]
+
+                    if 'data' in result:
+                        start_date = ts.Date(
+                              freq=freq,
+                              value=result['startindex']+mp.value_adjust[freq])
+                    else:
+                        start_date = None
+                    
+                    pyObj = ts.time_series(data, freq=freq,
+                                           start_date=start_date,
+                                           observed=observed, mask=mask)
+
+            items[objName] = pyObj
+
+        if isSingle:
+            return items.values()[0]
+            
+        return items
+
+
+    def write_tser_dict(self, objdict,
+                        overwrite=False, assume_exists=False,
+                        start_date=None, end_date=None):
+        """for each key, value pair in the dictionary `objdict` write value to
+the database as key, as a time series (calls FameDb.write_tser on each key,
+value pair)
+
+:Parameters:
+        - `objdict` (dict) : dictionary of TimeSeries objects to be written. Object
+          names for keys and TimeSeries objects for values
+        - `overwrite (boolean, *[False]*) : If True, if the key exists in the database it
+           will be overwritten. If False, data will be added to series that already exist
+           (data in objects in `objdict` will be given priority over pre-existing data in
+           the db where there is overlap)
+        - `assume_exists` (boolean, *[False]*) : If True, an error will be
+           raised if the series does not exist. If False, the series will be
+           created if it does not exist already.
+        - `start_date` (Date, *[None]*) : If None, data will be written from the start of
+           the series. If specified, only data points on or after start_date will be written.
+        - `end_date` (Date, *[None]*) : If None, data will be written until the end of
+           the series. If specified, only data points on or before end_date will be written.
+"""
+        for key, obj in objdict.iteritems():
+            self.write_tser(key, obj, overwrite=overwrite,
+                            assume_exists=assume_exists,
+                            start_date=start_date, end_date=end_date)
+
+
+    def write_cser_dict(self, objdict,
+                        overwrite=False, assume_exists=False,
+                        zero_represents=1, start_case=None, end_case=None):
+        """for each key, value pair in the dictionary `objdict` write value to
+the database as key, as a case series (calls FameDb.write_tser on each key,
+value pair)
+
+:Parameters:
+        - `objdict` (dict) : dictionary of arrays to be written as Case Series.
+           Object names for keys and arrays for values
+        - `overwrite (boolean, *[False]*) : If True, if the key exists in the database it
+           will be overwritten. If False, data will be added to series that already exist
+           (data in objects in `objdict` will be given priority over pre-existing data in
+           the db where there is overlap)
+        - `assume_exists` (boolean, *[False]*) : If True, an error will be
+           raised if the series does not exist. If False, the series will be
+           created if it does not exist already.
+        - `zero_represents` (int, *[1]*) : the case index for FAME that index zero in
+           the array represents
+        - `start_case` (int, *[None]*) : If None, data will be written from the start of
+           the array. If specified, only data points on or after start_case will be written.
+        - `end_case` (int, *[None]*) : If None, data will be written until the end of
+           the array. If specified, only data points on or before end_case will be written.
+"""
+        for key, obj in objdict.iteritems():
+            self.write_cser(key, obj, overwrite=overwrite,
+                            assume_exists=assume_exists,
+                            zero_represents=zero_represents,
+                            start_case=start_case, end_case=end_case)
+
+    def write_scalar_dict(self, objdict):
+        """for each key, value pair in the dictionary `objdict` write value to
+the database as key, as a scalar (calls FameDb.write_scalar on each key,
+value pair)
+
+:Parameters:
+        - `objdict` (dict) : dictionary of items to be written as scalars.
+           Object names for keys and scalar items for values
+"""
+        for key, obj in objdict.iteritems():
+            self.write_scalar(key, obj)
+
+
+    def write_tser(self, name, tser,
+                   overwrite=False, assume_exists=False,
+                   start_date=None, end_date=None):
+        """write `tser` to the database as `name` as a time series.
+
+:Parameters:
+        - `name` (string) : database key that the object will be written to
+        - `tser` (TimeSeries) : TimeSeries object to be written. Cannot have missing dates.
+           Use fill_missing_dates first on your series if you suspect this is the situation.
+           TimeSeries must be 1-dimensional
+        - `overwrite (boolean, *[False]*) : If True, if `name` exists in the database it
+           will be overwritten. If False, data will be added to series that already exist
+           (data in `tser` will be given priority over pre-existing data in the db where
+           there is overlap)
+        - `assume_exists` (boolean, *[False]*) : If True, an error will be
+           raised if the series does not exist. If False, the series will be
+           created if it does not exist already.
+        - `start_date` (Date, *[None]*) : If None, data will be written from the start of
+           `tser`. If specified, only data points on or after start_date will be written.
+        - `end_date` (Date, *[None]*) : If None, data will be written until the end of
+           `tser`. If specified, only data points on or before end_date will be written.
+"""
+        
+        self.__check_writeable()
+            
+        if not isinstance(tser, ts.TimeSeries):
+            raise ValueError("tser is not a valid time series")
+        elif tser.has_missing_dates():
+            raise ValueError("tser must not have any missing dates")
+        elif tser.ndim != 1:
+            raise ValueError("FAME db only supports 1-dimensional time series")
+
+        if assume_exists and not self.exists(name):
+            raise DBError("%s does not exist" % name)
+
+        if overwrite or not self.exists(name): create = True
+        else: create = False
+
+        fame_type = mp.fametype_fromdata(tser._data)
+        fame_freq = mp.freqReverseMapping[tser.freq]
+
+        if create:
+            
+            if hasattr(tser, "basis"):
+                fame_basis = mp.basisReverseMapping[tser.basis]
+            else:
+                fame_basis = mp.HBSDAY
+
+            if hasattr(tser, "observed"):
+                fame_observed = mp.observedReverseMapping[tser.observed]
+                if fame_observed == 0: fame_observed = mp.HOBEND
+            else:
+                fame_observed = mp.HOBEND
+
+            if self.exists(name): self.remove(name)
+            cf_create(self.dbkey, name, mp.HSERIE, fame_freq, fame_type, fame_basis, fame_observed)
+
+        def get_boundary_date(bdate, attr):
+            if bdate is not None:
+                if bdate.freq != tser.freq:
+                    raise ValueError(attr+" frequency must be same as tser frequency")
+                if tser.start_date > bdate or tser.end_date < bdate:
+                    raise ValueError(attr+" outside range of series")
+                return bdate
+            else:
+                return getattr(tser, attr)
+            
+        start_date = get_boundary_date(start_date, "start_date")
+        end_date = get_boundary_date(end_date, "end_date")
+        
+        if start_date is not None:
+
+            towrite = tser[start_date:end_date+1]
+
+            start_index = start_date.value
+            end_index = end_date.value
+
+            # convert integer types to floats since FAME does not have an integer type
+            newType = mp.fametype_tonumpy(fame_type)
+            if fame_type >= 8:
+                # date type
+                fame_data = towrite._data - mp.value_adjust[towrite._data.freq]
+            elif newType != tser._data.dtype:
+                fame_data = towrite._data.astype(newType)
+            else:
+                fame_data = towrite._data
+
+            if towrite._mask is ma.nomask:
+                fame_mask = numpy.zeros(towrite._data.shape, dtype=numpy.bool_)
+            else:
+                fame_mask = towrite._mask
+
+            start_index -= mp.value_adjust[towrite.freq]
+            end_index   -= mp.value_adjust[towrite.freq]
+
+            cfame.write_series(self.dbkey, name, fame_data, fame_mask, start_index, end_index, fame_type, fame_freq)
+
+    def write_cser(self, name, cser, overwrite=False, assume_exists=False, zero_represents=1, start_case=None, end_case=None):
+        """write `cser` to the database as `name` as a case series.
+
+:Parameters:
+        - `name` (string) : database key that the object will be written to
+        - `cser` (ndarray) : 1-dimensional ndarray (or subclass of ndarray) object to be
+           written. If `cser` is a MaskedArray, then masked values will be written as ND.
+        - `overwrite (boolean, *[False]*) : If True, if `name` exists in the database it
+           will be overwritten. If False, data will be added to series that already exist
+           (data in `cser` will be given priority over pre-existing data in the db where
+           there is overlap)
+        - `assume_exists` (boolean, *[False]*) : If True, an error will be
+           raised if the series does not exist. If False, the series will be
+           created if it does not exist already.
+        - `zero_represents` (int, *[1]*) : the case index for FAME that index zero in
+           the array represents
+        - `start_case` (int, *[None]*) : If None, data will be written from the start of
+           `cser`. If specified, only data points on or after start_case will be written.
+        - `end_case` (int, *[None]*) : If None, data will be written until the end of
+           `cser`. If specified, only data points on or before end_case will be written.
+"""
+        
+        self.__check_writeable()
+            
+        if not isinstance(cser, numpy.ndarray):
+            raise ValueError("cser is not a valid ndarray")
+        elif cser.ndim != 1:
+            raise ValueError("FAME db only supports 1-dimensional arrays")
+
+        if assume_exists and not self.exists(name):
+            raise DBError("%s does not exist" % name)
+
+        if overwrite or not self.exists(name): create = True
+        else: create = False
+
+        if hasattr(cser, "_data"):
+            fame_data = cser._data
+            if cser._mask is ma.nomask:
+                fame_mask = numpy.zeros(fame_data.shape, dtype=numpy.bool_)
+            else:
+                fame_mask = cser._mask
+        else:
+            fame_data = cser
+            fame_mask = numpy.zeros(fame_data.shape, dtype=numpy.bool_)
+            
+        fame_type = mp.fametype_fromdata(fame_data)
+
+        if create:
+            if self.exists(name): self.remove(name)
+            cf_create(self.dbkey, name, mp.HSERIE, mp.HCASEX, fame_type, mp.HBSUND, mp.HOBUND)
+
+        def get_boundary_case(bcase, attr):
+            if bcase is not None:
+                idx = bcase - zero_represents
+                if idx < 0 or idx > cser.size:
+                    raise ValueError("%s outside range of series" % attr)
+                return bcase
+            else:
+                if cser.size == 0:
+                    return None
+                else:
+                    if attr == 'start_case':
+                        return zero_represents
+                    elif attr == 'end_case':
+                        return zero_represents + cser.size - 1
+                    else:
+                        raise ValueError("unexpected argument: %s " % attr)
+            
+        start_case = get_boundary_case(start_case, "start_case")
+        end_case = get_boundary_case(end_case, "end_case")
+
+        if start_case is not None:        
+            # convert integer types to floats since FAME does not have an integer type
+            s = start_case - zero_represents
+            e = end_case - zero_represents
+            
+            fame_data = fame_data[s:e+1]
+            fame_mask = fame_mask[s:e+1]
+            newType = mp.fametype_tonumpy(fame_type)
+            if fame_type >= 8:
+                # date type
+                fame_data = fame_data - mp.value_adjust[fame_data.freq]
+            elif newType != fame_data.dtype:
+                fame_data = fame_data.astype(newType)
+
+            cfame.write_series(self.dbkey, name, fame_data, fame_mask, start_case, end_case, fame_type, mp.HCASEX)
+
+
+    def write_scalar(self, name, scalar):
+        """write `scalar` to the database as `name` as a scalar object. If an
+object already exists in the database named as `name` then it is
+over-written, otherwise it is created.
+
+:Parameters:
+        - `name` (string) : database key that the object will be written to
+        - `scalar` : one of the following: string, numpy scalar, int, float,
+           list of strings (for name lists), Date, boolean"""
+        
+        self.__check_writeable()
+        
+        fame_type = mp.fametype_fromdata(scalar)
+
+        if isinstance(scalar, ts.Date):
+            fame_data = numpy.int32(scalar.value - mp.value_adjust[scalar.freq])
+        elif hasattr(scalar, "dtype"):
+            if scalar.ndim != 0: raise ValueError("received non-scalar data")
+            newType = mp.fametype_tonumpy(fame_type)
+            if newType != scalar.dtype: fame_data = scalar.astype(newType)
+            else: fame_data = scalar
+        elif fame_type == mp.HSTRNG:
+            fame_data = scalar
+        elif fame_type == mp.HPRECN:
+            fame_data = numpy.float64(scalar)
+        elif fame_type == mp.HBOOLN:
+            fame_data = numpy.int32(scalar)
+        elif fame_type == mp.HNAMEL:
+            fame_data = "{" + ", ".join(scalar) + "}"
+        else:
+            raise ValueError("Unrecognized data type")
+            
+        if self.exists(name): self.remove(name)
+        cf_create(self.dbkey, name, mp.HSCALA, mp.HUNDFX, fame_type, mp.HBSUND, mp.HOBUND)
+
+        # convert integer types to floats since FAME does not have an integer type
+        newType = mp.fametype_tonumpy(fame_type)
+        if hasattr(fame_data, 'dtype') and newType != fame_data.dtype:
+            fame_data = fame_data.astype(newType)
+        
+        if fame_type == mp.HNAMEL:
+            cf_write_namelist(self.dbkey, name, fame_data)
+        else:
+            cf_write_scalar(self.dbkey, name, fame_data, fame_type)
+
+
+
+    def wildlist(self, exp, wildonly=False):
+        """performs a wildlist lookup on the database, using Fame syntax
+("?" and "^"), returns a normal python list of strings"""
+        self.__check_readable()
+        res = cf_wildlist(self.dbkey, exp)
+            
+        if wildonly:
+            exp = exp.replace("?", "(.*)")
+            exp = exp.replace("^", "(.)")
+            exp = exp.replace("$","\$")
+            regex = re.compile(exp)
+            for i in range(len(res)):
+                res[i] = "".join(regex.match(res[i]).groups())
+        return res
+
+    def exists(self, objName):
+        return cf_exists(self.dbkey, objName)
+
+    def close(self):
+        if self.dbIsOpen:
+            cf_close(self.dbkey)
+        self.dbIsOpen = False
+
+    def __del__(self):
+        if self.dbIsOpen:
+            self.close()
+
+
+    def __check_writeable(self):
+        """Raises error if data base is not writeable"""
+        if not self.dbIsOpen:
+            raise DBError("Database is not open")
+        if self.mode == 'r':
+            raise DBError("Cannot write to a read-only database")
+
+    def __check_readable(self):
+        """Raises error if data base is not readable"""
+        if not self.dbIsOpen:
+            raise DBError("Database is not open")
+
+
+    def remove(self, name, ignoreError=True):
+        """Deletes the given series from the database"""
+        if type(name) == type(""): name = [name]
+
+        for x in name:
+            try:
+                cf_remove(self.dbkey, x)
+            except:
+                if not ignoreError: raise            
+
+    def get_freq(self, name):
+        """Finds the frequency of the object stored in the db as `name`"""
+        if not self.dbIsOpen:
+            raise DBError("Database is not open")
+
+        result = cf_size(self.dbkey, name.upper())
+        return result['freq']
+
+
+    def whats(self, name):
+        """Preforms a fame "whats" command on the provided series"""
+        if type(name) == type(""): name = [name]
+
+        result = {}
+        for dbname in name:
+            if not self.dbIsOpen:
+                raise DBError("Database is not open")
+
+            result[dbname] = cf_whats(self.dbkey, dbname.upper())
+
+        if len(result) == 1:
+            return result.values()[0]
+        return result
+
+
+
+    def restore(self):
+        """Discard any changes made to the database since it was last opened or posted."""
+        return cf_restore(self.dbkey)
+
+
+class cFameCall:
+    """wrapper for cfame functions that acquires and releases a resource log.
+This is needed because the Fame C api is not thread safe."""
+
+    def __init__ (self, func):
+        self.f = func
+        self.__doc__ = getattr(func, "__doc__", str(func))
+        self.__name__ = getattr(func, "__name__", str(func))
+    #
+    def __call__ (self, *args, **kwargs):
+        "Execute the call behavior."
+        tmp = fameLock.acquire()
+        try:
+            result = self.f(*args, **kwargs)
+            fameLock.release()
+        except:
+            fameLock.release()
+            raise
+            
+        return result
+
+cf_open = cFameCall(cfame.open)
+cf_close = cFameCall(cfame.close)
+cf_restore = cFameCall(cfame.restore)
+cf_size = cFameCall(cfame.size)
+cf_whats = cFameCall(cfame.whats)
+cf_remove = cFameCall(cfame.remove)
+cf_create = cFameCall(cfame.create)
+cf_read = cFameCall(cfame.read)
+cf_write_scalar = cFameCall(cfame.write_scalar)
+cf_write_series = cFameCall(cfame.write_series)
+cf_write_namelist = cFameCall(cfame.write_namelist)
+cf_wildlist = cFameCall(cfame.wildlist)
+cf_exists = cFameCall(cfame.exists)
\ No newline at end of file

Added: trunk/Lib/sandbox/timeseries/io/fame/mapping.py
===================================================================
--- trunk/Lib/sandbox/timeseries/io/fame/mapping.py	2007-02-15 16:42:18 UTC (rev 2711)
+++ trunk/Lib/sandbox/timeseries/io/fame/mapping.py	2007-02-15 17:52:27 UTC (rev 2712)
@@ -0,0 +1,205 @@
+
+
+# ---------------------------
+# For fametype mapping
+import types
+import numpy
+from timeseries import TimeSeries, Date, DateArray, freq_fromstr
+
+
+# ---------------------------
+# Fame specific constants
+
+HRMODE = 1 # READ        
+HCMODE = 2 # CREATE      
+HOMODE = 3 # OVERWRITE       
+HUMODE = 4 # UPDATE      
+HSMODE = 5 # SHARED      
+HWMODE = 6 # WRITE       
+HDMODE = 7 # DIRECT WRITE    
+
+#** FAME Data Object Classes **
+
+HSERIE = 1 # SERIES   
+HSCALA = 2 # SCALAR   
+HFRMLA = 3 # FORMULA  
+HITEM  = 4 # ITEM     
+HGLNAM = 5 # GLNAME   
+HGLFOR = 6 # GLFORMULA    
+
+#** FAME Data Object Types **
+
+HUNDFT = 0 # Undefined    
+HNUMRC = 1 # NUMERIC  
+HNAMEL = 2 # NAMELIST 
+HBOOLN = 3 # BOOLEAN  
+HSTRNG = 4 # STRING   
+HPRECN = 5 # PRECISION    
+HDATE  = 6 # General DATE 
+HRECRD = 7 # RECORD   
+
+#** FAME Frequencies **
+
+HUNDFX = 0 # Undefined            
+HDAILY = 8 # DAILY            
+HBUSNS = 9 # BUSINESS              
+HWKSUN = 16 #WEEKLY (SUNDAY)
+HMONTH = 129 # MONTHLY             
+HCASEX = 232 # CASE
+HSEC   = 226 # SECONDLY
+HMIN   = 227 # MINUTELY
+HHOUR  = 228 # HOURLY
+HQTOCT = 160 # QUARTERLY (OCTOBER)
+HQTNOV = 161 # QUARTERLY (NOVEMBER)
+HQTDEC = 162 # QUARTERLY (DECEMBER)
+HANJAN = 192 # ANNUAL (JANUARY)
+HANFEB = 193 # ANNUAL (FEBRUARY)  
+HANMAR = 194 # ANNUAL (MARCH) 
+HANAPR = 195 # ANNUAL (APRIL) 
+HANMAY = 196 # ANNUAL (MAY)   
+HANJUN = 197 # ANNUAL (JUNE)  
+HANJUL = 198 # ANNUAL (JULY)  
+HANAUG = 199 # ANNUAL (AUGUST)
+HANSEP = 200 # ANNUAL (SEPTEMBER) 
+HANOCT = 201 # ANNUAL (OCTOBER)
+HANNOV = 202 # ANNUAL (NOVEMBER)  
+HANDEC = 203 # ANNUAL (DECEMBER)  
+
+#** FAME BASIS Attribute Settings **
+
+HBSUND = 0 # Undefined
+HBSDAY = 1 # DAILY
+HBSBUS = 2 # BUSINESS
+
+#** FAME OBSERVED Attribute Settings **
+
+HOBUND = 0 # Undefined
+HOBBEG = 1 # BEGINNING
+HOBEND = 2 # ENDING
+HOBAVG = 3 # AVERAGED
+HOBSUM = 4 # SUMMED
+HOBANN = 5 # ANNUALIZED
+HOBFRM = 6 # FORMULA
+HOBHI  = 7 # HIGH
+HOBLO  = 8 # LOW
+
+def reverse_dict(d):
+    return dict([(y, x) for x, y in d.iteritems()])
+
+basisMapping = { HBSUND:"UNDEFINED",
+                 HBSDAY:"D",
+                 HBSBUS:"B"}
+basisReverseMapping = reverse_dict(basisMapping)
+
+observedMapping = { HOBUND:"UNDEFINED",
+                  HOBBEG: "BEGINNING",
+                  HOBEND: "ENDING",
+                  HOBAVG: "AVERAGED",
+                  HOBSUM: "SUMMED",
+                  HOBANN: "ANNUALIZED",
+                  HOBFRM: "FORMULA",
+                  HOBHI: "MAXIMUM",
+                  HOBLO: "MINIMUM"  }
+                  
+observedReverseMapping = reverse_dict(observedMapping)
+
+freqMapping = { HDAILY:"D",
+                HBUSNS:"B",
+                HMONTH:"M",
+                HWKSUN:"W",
+                HSEC  :"S",
+                HMIN  :"T",
+                HHOUR :"H",
+                HQTOCT:"Q",
+                HQTNOV:"Q",
+                HQTDEC:"Q",
+                HANJAN:"A",
+                HANFEB:"A",
+                HANMAR:"A",
+                HANAPR:"A",
+                HANMAY:"A",
+                HANJUN:"A",
+                HANJUL:"A",
+                HANAUG:"A",
+                HANSEP:"A",
+                HANOCT:"A",
+                HANNOV:"A",
+                HANDEC:"A" }
+                
+freqMapping = dict([(x, freq_fromstr(val)) for x, val in freqMapping.iteritems()])
+
+freqReverseMapping = {  "D" : HDAILY,
+                        "B" : HBUSNS,
+                        "M" : HMONTH,
+                        "W" : HWKSUN,
+                        "S" : HSEC,
+                        "T" : HMIN,
+                        "H" : HHOUR,
+                        "Q" : HQTDEC,
+                        "A" : HANDEC}
+                        
+freqReverseMapping = dict([(freq_fromstr(x), val) for x, val in freqReverseMapping.iteritems()])
+
+value_adjust = {
+    'A':1849,
+    'Q':7396,
+    'M':22188,
+    'W':96477,
+    'B':482381,
+    'D':675333,
+    'H':87648,
+    'T':5258880,
+    'S':315532800}
+
+value_adjust = dict([(freq_fromstr(x), val) for x, val in value_adjust.iteritems()])
+
+
+def fametype_fromdata(data):
+    """determine fame type code from a data object"""
+    
+    if isinstance(data, DateArray) or isinstance(data, Date):
+        return freqReverseMapping[data.freq]
+    elif hasattr(data, 'dtype'):
+        dtypeStr = str(data.dtype)
+        
+        if dtypeStr[:5] == "float":
+            if int(dtypeStr[5:]) > 32: return HPRECN
+            else: return HNUMRC
+        elif dtypeStr[:3] == "int":
+            if int(dtypeStr[3:]) > 32: return HPRECN
+            else: return HNUMRC
+        elif dtypeStr[:4] == "uint":
+            if int(dtypeStr[4:]) >= 32: return HPRECN
+            else: return HNUMRC
+        elif dtypeStr[:2] == "|S" or dtypeStr == 'object':
+            return HSTRNG
+        elif dtypeStr == "bool":
+            return HBOOLN
+        else:
+            raise ValueError("Unsupported dtype for fame database: %s", dtypeStr)
+    
+    elif type(data) == types.StringType:
+        return HSTRNG
+    elif type(data) in (types.IntType, types.FloatType):
+        return HPRECN
+    elif type(data) == types.BooleanType:
+        return HBOOLN
+    elif type(data) == types.ListType:
+        return HNAMEL
+    else:
+        raise ValueError("Unrecognized data type")
+
+def fametype_tonumpy(fametype):
+    if fametype >= 8:
+        # date types
+        return numpy.int32
+    elif fametype == HNAMEL:
+        return None
+    else:
+        typeMap = {
+            HNUMRC:numpy.float32,
+            HBOOLN:numpy.int32,
+            HSTRNG:numpy.object_,
+            HPRECN:numpy.float64}
+        return typeMap[fametype]
+

Added: trunk/Lib/sandbox/timeseries/io/fame/readme.txt
===================================================================
--- trunk/Lib/sandbox/timeseries/io/fame/readme.txt	2007-02-15 16:42:18 UTC (rev 2711)
+++ trunk/Lib/sandbox/timeseries/io/fame/readme.txt	2007-02-15 17:52:27 UTC (rev 2712)
@@ -0,0 +1,6 @@
+Requirements and warnings:
+
+1. Requires FAME version 9.2. Can be back-ported to version 9.0 with very
+   minor tweaking, but does not work with 9.0 out of the box.
+2. Requires the timeseries module, and has all the same requirements and
+   warnings listed in that module's readme file.

Added: trunk/Lib/sandbox/timeseries/io/fame/setup.py
===================================================================
--- trunk/Lib/sandbox/timeseries/io/fame/setup.py	2007-02-15 16:42:18 UTC (rev 2711)
+++ trunk/Lib/sandbox/timeseries/io/fame/setup.py	2007-02-15 17:52:27 UTC (rev 2712)
@@ -0,0 +1,53 @@
+__version__ = '1.0'
+__revision__ = "$Revision: 37 $"
+__date__     = '$Date: 2006-12-08 14:30:29 -0500 (Fri, 08 Dec 2006) $'
+
+import os, sys
+from os.path import join
+
+def configuration(parent_package='',top_path=None):
+    from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs
+    nxheader = join(get_numpy_include_dirs()[0],'numpy',)
+
+    famedir = os.getenv('FAME')
+    if famedir is None:
+        raise EnvironmentError("FAME environment variable not found")
+
+    if sys.platform == 'win32': msvc_flags()
+
+    fameheader = famedir
+    confgr = Configuration(parent_package=parent_package,top_path=top_path)
+
+    sources = join('src', 'cfame.c')
+    libraries = "chli"
+    library_dirs = [famedir, join(famedir, "demo/hli")]
+    confgr.add_extension('cfame',
+                         sources=[sources],
+                         include_dirs=[nxheader, fameheader, library_dirs],
+                         libraries = [libraries],
+                         library_dirs = [library_dirs]
+                         )
+    return confgr
+    
+def msvc_flags():
+    """/DWIN32 flag is required on windows for compiling FAME
+C-hli code"""
+
+    from distutils.msvccompiler import MSVCCompiler
+
+    # remember old initialize
+    old_MSVCCompiler_initialize = MSVCCompiler.initialize
+
+    def fame_msvccompiler_initialize(self, *args, **kws):
+         apply(old_MSVCCompiler_initialize, (self,) + args, kws)
+         self.compile_options.extend(['/DWIN32'])
+
+    # "Install" new initialize
+    MSVCCompiler.initialize = fame_msvccompiler_initialize
+
+if __name__ == "__main__":
+
+    from numpy.distutils.core import setup
+    config = configuration().todict() 
+    setup(**config)
+    
\ No newline at end of file

Added: trunk/Lib/sandbox/timeseries/io/fame/src/cfame.c
===================================================================
--- trunk/Lib/sandbox/timeseries/io/fame/src/cfame.c	2007-02-15 16:42:18 UTC (rev 2711)
+++ trunk/Lib/sandbox/timeseries/io/fame/src/cfame.c	2007-02-15 17:52:27 UTC (rev 2712)
@@ -0,0 +1,998 @@
+#include <Python.h>
+#include <structmember.h>
+#include <arrayobject.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <hli.h>
+#include <chlilib.c>
+
+//Constants
+#define MAXOBJNAME 64
+#define MAXNLLENGTH 1000
+
+#define CALLFAME(cmd) Py_BEGIN_ALLOW_THREADS; cmd; Py_END_ALLOW_THREADS; if (checkError(status)) return NULL
+
+#define ROUND(x) ((x)>=0?(long)((x)+0.5):(long)((x)-0.5))
+
+/**********************************************************************/
+
+static float  nmistt[3];        //Numeric
+static double pmistt[3];        //Precision
+static int    bmistt[3];        //Boolean
+static int    dmistt[3];        //Date
+
+//Numeric
+static float N_ND = 1.701419e+038;
+static float N_NC = 1.701418e+038;
+static float N_NA = 1.701417e+038;
+// Precision
+static double P_ND = 1.70141507979e+038;
+static double P_NC = 1.70141507978e+038;
+static double P_NA = 1.70141507977e+038;
+// Boolean
+static int B_ND = 127;
+static int B_NC = 126;
+static int B_NA = 125;
+// Date
+static int D_ND = -1;
+static int D_NC = -2;
+static int D_NA = -3;
+
+static char cfame_doc[] = "Module providing access to FAME functionality.";
+
+//if there was an error, we need to set the Python error status before returning
+static checkError(int status)
+{
+    if (status != HSUCC && status != HTRUNC)
+    {
+        char message[1000];
+        PyErr_SetString(PyExc_RuntimeError, getsta(status, message));
+        cfmfin(&status);
+        return 1;
+    }
+    return 0;
+}
+
+static int makeTranslationTables(void)
+{
+    //Set up translation tables for ND, NC, NA mappings
+    int status;
+
+    cfmspm(&status, P_NC, P_ND, P_NA, pmistt);  //Precision
+    cfmsnm(&status, N_NC, N_ND, N_NA, nmistt);  //Numeric
+    cfmsbm(&status, B_NC, B_ND, B_NA, bmistt);  //Boolean
+    cfmsdm(&status, D_NC, D_ND, D_NA, dmistt);  //Date
+    return 0;
+}
+///////////////////////////////////////////////////////////////////////
+
+static char cfame_open_doc[] = "open(database, access)\n\nOpens a FAME database and returns a FAME db idenifier.";
+static PyObject *
+cfame_open(PyObject *self, PyObject *args)
+{
+    int status;
+    int dbkey, access, large;
+    const char *dbname;
+    if (!PyArg_ParseTuple(args, "sii:open", &dbname, &access, &large)) return NULL;
+
+    if (access == HOMODE || access == HCMODE) {
+
+        if (large) { CALLFAME(cfmsopt(&status, "DBSIZE", "LARGE")); }
+        else       { CALLFAME(cfmsopt(&status, "DBSIZE", "STANDARD")); }
+    }
+
+    CALLFAME(cfmopdb (&status, &dbkey, dbname, access));
+
+    return PyInt_FromLong(dbkey);
+}
+
+static char cfame_close_doc[] = "close(database_id)\n\nCloses an open FAME database.";
+static PyObject *
+cfame_close(PyObject *self, PyObject *args)
+{
+    int status;
+    int dbkey;
+    if (!PyArg_ParseTuple(args, "i:close", &dbkey)) return NULL;
+
+    cfmcldb (&status, dbkey);
+    if (checkError(status)) return NULL;
+
+    return PyInt_FromLong(0);
+}
+
+static char cfame_wildlist_doc[] = "wildlist(dbkey, wildlist expression, wildonly)\n\nPerforms a wildlist.";
+static PyObject *
+cfame_wildlist(PyObject *self, PyObject *args)
+{
+    int status;
+    int dbkey;
+    const char *expression;
+    int class, type, freq;
+    char objnam[MAXOBJNAME+1];
+    PyObject *result = PyList_New(0);
+
+    if (!PyArg_ParseTuple(args, "is:wildlist(dbkey, expression)", &dbkey, &expression)) return NULL;
+
+    // initialize wildlist
+    CALLFAME(cfminwc (&status, dbkey, expression));
+
+    // get first data object matching wildlist expression
+    cfmnxwc (&status, dbkey, objnam, &class, &type, &freq);
+
+    if (status == HNOOBJ)
+        // no matching objects, return empty list
+        return result;
+    else
+        if (checkError(status)) return NULL;
+
+    while (status != HNOOBJ)
+    {
+        // append objnam to list
+        if (PyList_Append(result, PyString_FromString(objnam))) return NULL;
+
+        // get next item
+        cfmnxwc (&status, dbkey, objnam, &class, &type, &freq);
+        if (status != HNOOBJ && status != HSUCC)
+            if (checkError(status)) return NULL;
+    }
+
+    return result;
+}
+
+// Make appropriate boolean mask for data (based on special constants)
+static PyObject *make_mask(void *data, int arraylen, int type) {
+
+    PyArrayObject *mask;
+    int i;
+    int *mask_raw;
+
+    if ((mask_raw = malloc(arraylen * sizeof(int))) == NULL) return PyErr_NoMemory();
+
+    switch(type)
+    {
+
+        case HNUMRC: { // numeric
+            float *castdata = (float*)data;
+            float val;
+
+            for (i = 0; i < arraylen; i++) {
+                val = castdata[i];
+                if (val == N_ND || val == N_NC || val == N_NA) {
+                    mask_raw[i] = 1;
+                } else {
+                    mask_raw[i] = 0;
+                }
+            }
+        } break;
+        case HBOOLN: { // boolean
+            int *castdata = (int*)data;
+            int val;
+
+            for (i = 0; i < arraylen; i++) {
+                val = castdata[i];
+                if (val == B_ND || val == B_NC || val == B_NA) {
+                    mask_raw[i] = 1;
+                } else { mask_raw[i] = 0;}
+            }
+        } break;
+        case HSTRNG: { // string
+            char **castdata = (char**)data;
+            char *val;
+            for (i = 0; i < arraylen; i++) {
+                val = castdata[i];
+                if (val == "") {
+                    mask_raw[i] = 1;
+                } else { mask_raw[i] = 0; }
+            }
+        } break;
+        case HPRECN: { // precision
+            double *castdata = (double*)data;
+            double val;
+            for (i = 0; i < arraylen; i++) {
+                val = castdata[i];
+                if (val == P_ND || val == P_NC || val == P_NA) {
+                    mask_raw[i] = 1;
+                } else { mask_raw[i] = 0; }
+            }
+        } break;
+        default:
+            if (type >= 8) {
+                int *castdata = (int*)data;
+                int val;
+                for (i = 0; i < arraylen; i++) {
+                    val = castdata[i];
+                    if (val == D_ND || val == D_NC || val == D_NA) {
+                        mask_raw[i] = 1;
+                    } else { mask_raw[i] = 0; }
+                }
+            } else {
+                PyErr_SetString(PyExc_ValueError, "unsupported datatype");
+                return NULL;
+            }
+    }
+
+    mask = (PyArrayObject*)PyArray_SimpleNewFromData(1, &arraylen, PyArray_INT32, mask_raw);
+    mask->flags = (mask->flags) | NPY_OWNDATA;
+
+    return (PyObject*)mask;
+}
+
+
+static char cfame_read_doc[] = "read(dbkey, data object name, startDate, endDate, dateSeriesFlag, longStr)\n\nReturns specified object.";
+//startDate(endDate) must be the int value of the startDate(endDate) using the frequency of the underlying data
+//dateSeriesFlag is 1 for date series 0 for case series
+//longStr is 1 for string series with very long items 0 otherwise. Use 1 with care as it takes up alot of memory.
+static PyObject *
+cfame_read(PyObject *self, PyObject *args)
+{
+    int status, dbkey, i;
+    int dataFlag;
+
+    const char *object_name;
+
+    int first_point, last_point; //this defines the custom range to read (-1 for both means read all)
+    int longStr; //1 for case series with really long items
+
+    int max_string_len;
+
+    int class, type, freq, start_year, start_period, end_year, end_period;  // data fields returned by cfmosiz
+    int basis, observed, created_year, created_month, created_day, mod_year, mod_month, mod_day;  //additional fields for cfmwhat
+    char desc[1], doc[1];
+    PyObject * returnVal = NULL;
+    PyObject * values = NULL;
+    int numobjs, typeNum;
+    int range[3];
+    void* dbValues;
+
+    desc[0] = 0x0;
+    doc[0]  = 0x0;
+
+    // "isiii:get" means parse args for an int, a string and 4 more ints and use "get" as the function name in error messages
+    if (!PyArg_ParseTuple(args, "isiii:read",
+                                &dbkey,
+                                &object_name,
+                                &first_point,
+                                &last_point,
+                                &max_string_len)) return NULL;   //get params
+
+    CALLFAME(cfmwhat(&status, dbkey, object_name, &class, &type, &freq, &basis, &observed, &start_year, &start_period, &end_year, &end_period, &created_year, &created_month, &created_day, &mod_year, &mod_month, &mod_day, desc, doc));
+
+    returnVal = PyDict_New();
+
+    PyDict_SetItemString(returnVal, "type", PyInt_FromLong(type));
+    PyDict_SetItemString(returnVal, "freq", PyInt_FromLong(freq));
+    PyDict_SetItemString(returnVal, "class", PyInt_FromLong(class));
+    PyDict_SetItemString(returnVal, "mod_year", PyInt_FromLong(mod_year));
+    PyDict_SetItemString(returnVal, "mod_month", PyInt_FromLong(mod_month));
+    PyDict_SetItemString(returnVal, "mod_day", PyInt_FromLong(mod_day));
+    PyDict_SetItemString(returnVal, "observed", PyInt_FromLong(observed));
+    PyDict_SetItemString(returnVal, "basis", PyInt_FromLong(basis));
+
+    if (type == HNAMEL)     //namelists
+    {
+        int length;
+        char names[MAXOBJNAME*MAXNLLENGTH+1];
+
+        CALLFAME(cfmgtnl(&status, dbkey, object_name, HNLALL, names, MAXOBJNAME*MAXNLLENGTH, &length));
+        PyDict_SetItemString(returnVal, "data", PyString_FromStringAndSize(names, length)); //just return the namelist as a comma delimited string
+    }
+    else
+    {
+        dataFlag = 1;
+
+        switch (class)
+        {
+            case HSERIE:
+                //initialize custom range
+
+                //if we are dealing with a date we need to convert
+                //'begin' and 'end' dates to year/period format
+
+                if (first_point != -1) {
+                    if (freq == HCASEX) {
+                        start_year = 0;
+                        start_period = first_point;
+                    } else {
+                        CALLFAME(cfmdatp(&status, freq, first_point, &start_year, &start_period));
+                    }
+                } else {
+                    if (freq == HCASEX) {
+                        /* for case series, if first_point not explicitly defined, always
+                        read starting at index 1 (not the first data point like with
+                        time series */
+                        start_year = 0;
+                        start_period = 1;
+                    }
+                }
+
+                if (last_point != -1) {
+                    if (freq == HCASEX) {
+                        end_year = 0;
+                        end_period = last_point;
+                    } else {
+                        CALLFAME(cfmdatp(&status, freq, last_point, &end_year, &end_period));
+                    }
+                }
+
+                if (end_year < start_year ||
+                    (start_year == end_year && end_period < start_period) ||
+                    (start_period == -1)) {
+                    dataFlag = 0;
+                    break;
+                }
+
+                numobjs = -1;
+                CALLFAME(cfmsrng(&status, freq, &start_year, &start_period, &end_year, &end_period, range, &numobjs)); //set the range of data to get
+                break;
+
+            case HSCALA:
+                numobjs = 1;
+                break;
+            default:  //This should never happen
+                PyErr_SetString(PyExc_RuntimeError, "Critical internal error #0 in CFAMEMODULE");
+                return NULL;
+        }
+
+        if (dataFlag)
+        {
+            switch (type)   //initialize an array of the correct type to get the data from Fame
+            {
+                case HNUMRC:
+                    if ((dbValues = malloc(numobjs * sizeof(float))) == NULL) return PyErr_NoMemory();
+                    typeNum = PyArray_FLOAT;
+                    break;
+                case HPRECN:
+                    if ((dbValues = malloc(numobjs * sizeof(double))) == NULL) return PyErr_NoMemory();
+                    typeNum = PyArray_DOUBLE;
+                    break;
+                case HSTRNG:
+                    typeNum = PyArray_OBJECT;
+                    break;
+                default:
+                    if ((dbValues = malloc(numobjs * sizeof(int))) == NULL) return PyErr_NoMemory();
+                    typeNum = PyArray_INT;
+                    break;
+            }
+            if (type == HSTRNG)     //additional initilization for getting strings
+            {
+
+                if (class == HSERIE)
+                {
+                    PyObject** temp;
+                    PyArrayObject *mask;
+                    //string series
+                    int* missing;
+                    int* outlen;
+                    int inlen[1];
+                    int *mask_raw;
+
+                    if ( ((dbValues = malloc(numobjs * sizeof(char*))) == NULL) ||
+                         ((temp = malloc(numobjs * sizeof(PyObject*))) == NULL) ||
+                         ((mask_raw = malloc(numobjs * sizeof(int))) == NULL) ||
+                         ((missing = malloc(numobjs * sizeof(int))) == NULL) ||
+                         ((outlen = malloc(numobjs * sizeof(int))) == NULL) ) {
+                        return PyErr_NoMemory();
+                    }
+
+                    for (i = 0; i < numobjs; i++) {
+                        if ((((char**)dbValues)[i] = malloc((max_string_len+1) * sizeof(char))) == NULL) {
+                            return PyErr_NoMemory();
+                        }
+                    }
+
+                    inlen[0] = -max_string_len;
+
+                    //we need to know how big each string will be so that we can set up room for it
+                    CALLFAME(cfmgtsts(&status, dbkey, object_name, range, dbValues, missing, inlen, outlen));
+                    for (i = 0; i < numobjs; i++) {
+                        if (outlen[i] > max_string_len) {
+                            PyErr_SetString(PyExc_RuntimeError, "FAME returned a string longer than the max_string_len. Adjust max_string_len parameter.");
+                            return NULL;
+                        } else {
+
+                            if (missing[i] != HNMVAL) {
+                                if ((temp[i] = PyString_FromString("")) == NULL) {
+                                    PyErr_SetString(PyExc_RuntimeError, "Failed to initialize missing string element.");
+                                    return NULL;
+                                }
+                                mask_raw[i] = 1;
+                            } else {
+                                if ((temp[i] = PyString_FromStringAndSize(((char**)dbValues)[i], outlen[i])) == NULL) {
+                                    return PyErr_NoMemory();
+                                }
+                                mask_raw[i] = 0;
+                            }
+
+                            free(((char**)dbValues)[i]);
+                        }
+                    }
+
+                    free(dbValues);
+                    dbValues = temp;
+
+                    {
+                        PyArrayObject* data = (PyArrayObject *)PyArray_SimpleNewFromData(1, &numobjs, typeNum, dbValues);
+                        PyArrayObject* mask = (PyArrayObject*)PyArray_SimpleNewFromData(1, &numobjs, PyArray_INT32, mask_raw);
+                        PyObject* startindex = PyInt_FromLong((long)range[1]);
+
+                        // transfer ownership of dbValues to the array
+                        data->flags = (data->flags) | NPY_OWNDATA;
+                        mask->flags = (mask->flags) | NPY_OWNDATA;
+
+                        PyDict_SetItemString(returnVal, "data", (PyObject*)data);
+                        PyDict_SetItemString(returnVal, "mask", (PyObject*)mask);
+                        PyDict_SetItemString(returnVal, "startindex", startindex);
+
+                        Py_DECREF(data);
+                        Py_DECREF(mask);
+                        Py_DECREF(startindex);
+                    }
+
+                    free(missing);
+                    free(outlen);
+                }
+                else
+                {
+                    //get one string
+                    int missing;
+                    int length;
+
+                    if ((dbValues = malloc((max_string_len+1) * sizeof(char))) == NULL) return PyErr_NoMemory();
+
+                    CALLFAME(cfmgtstr(&status, dbkey, object_name, NULL, dbValues, &missing, max_string_len, &length));
+
+                    if (length > max_string_len) {
+                        PyErr_SetString(PyExc_RuntimeError, "FAME returned a string longer than the maxlength. Use extra long string parameter");
+                        return NULL;
+                    }
+
+                    {
+                        PyObject* data = PyString_FromString((char*)dbValues);
+                        PyObject* mask;
+                        PyObject* startindex = PyInt_FromLong(-1);
+
+                        if (missing != HNMVAL) { mask = PyBool_FromLong(1); }
+                        else {                   mask = PyBool_FromLong(0); }
+
+                        PyDict_SetItemString(returnVal, "data", data);
+                        PyDict_SetItemString(returnVal, "mask", mask);
+                        PyDict_SetItemString(returnVal, "startindex", startindex);
+
+                        Py_DECREF(data);
+                        Py_DECREF(mask);
+                        Py_DECREF(startindex);
+                    }
+
+                }
+            } else {
+                switch(type)
+                {
+
+                    case HNUMRC:
+                        CALLFAME(cfmrrng(&status, dbkey, object_name, range, dbValues, HTMIS, nmistt));
+                        break;
+                    case HBOOLN:
+                        CALLFAME(cfmrrng(&status, dbkey, object_name, range, dbValues, HTMIS, bmistt));
+                        break;
+                    case HPRECN:
+                        CALLFAME(cfmrrng(&status, dbkey, object_name, range, dbValues, HTMIS, pmistt));
+                        break;
+                    default:
+                        if (type >= 8) {
+                            CALLFAME(cfmrrng(&status, dbkey, object_name, range, dbValues, HTMIS, dmistt));
+                        } else {
+                            PyErr_SetString(PyExc_ValueError, "unsupported datatype");
+                            return NULL;
+                        }
+                }
+
+                {
+                    PyArrayObject* data = (PyArrayObject *)PyArray_SimpleNewFromData(1, &numobjs, typeNum, dbValues);
+                    PyObject* mask = make_mask(dbValues, numobjs, type);
+                    PyObject* startindex = PyInt_FromLong((long)range[1]);
+
+                    // transfer ownership of dbValues to the array
+                    data->flags = (data->flags) | NPY_OWNDATA;
+
+                    PyDict_SetItemString(returnVal, "data", (PyObject*)data);
+                    PyDict_SetItemString(returnVal, "mask", mask);
+                    PyDict_SetItemString(returnVal, "startindex", startindex);
+
+                    Py_DECREF(data);
+                    Py_DECREF(mask);
+                    Py_DECREF(startindex);
+                }
+            }
+
+
+
+        } // (dataFlag)
+
+
+        //if dataFlag was set return an object with no data
+        if (!dataFlag) {
+            return returnVal;
+        }
+    } // (type == HNAMEL)     //namelists
+    return returnVal;
+}
+
+
+// replace masked values with the ND constant
+static PyArrayObject *replace_mask(PyObject *orig_data, PyObject *orig_mask, int type) {
+
+    PyArrayObject *data_copy, *data, *mask;
+    PyObject *valMask, *fillVal;
+    int i;
+
+    data_copy = (PyArrayObject *)PyArray_Copy((PyArrayObject *)orig_data);
+    data = PyArray_GETCONTIGUOUS(data_copy);
+
+    // don't care if mask is contiguous or not
+    mask = (PyArrayObject *)orig_mask;
+
+    switch(type)
+    {
+
+        case HNUMRC:
+            fillVal = PyFloat_FromDouble(N_ND);
+            break;
+        case HBOOLN:
+            fillVal = PyInt_FromLong(B_ND);
+            break;
+        case HSTRNG:
+            fillVal = PyString_FromString("");
+            break;
+        case HPRECN:
+            fillVal = PyFloat_FromDouble(P_ND);
+            break;
+        default:
+            if (type >= 8) {
+                fillVal = PyInt_FromLong(D_ND);
+            } else {
+                PyErr_SetString(PyExc_ValueError, "unsupported datatype");
+                return NULL;
+            }
+    }
+
+    for (i = 0; i < data->dimensions[0]; i++) {
+        valMask = PyArray_GETITEM(mask, PyArray_GetPtr(mask, &i));
+        if (PyInt_AsLong(valMask)) {
+            PyArray_SETITEM(data, PyArray_GetPtr(data, &i), fillVal);
+        }
+        Py_DECREF(valMask);
+    }
+    return data;
+}
+
+static char cfame_write_series_doc[] = "write_series(dbkey, name, data, mask, start_index, end_index, source_type, source_freq)\n\nWrites a series to the DB";
+static PyObject *
+cfame_write_series(PyObject *self, PyObject *args)
+{
+    int status, dbkey;
+    PyObject *dataArrayTemp, *maskArrayTemp;
+    PyArrayObject *dataArray, *maskArray;
+    const char* name;
+    char errMsg[500];
+    int class, start_index, end_index, numobjs, source_type, type, ppd,
+        source_freq, freq, start_year, start_period, end_year, end_period;
+    PyObject * returnVal = NULL;
+    int range[3];
+
+    if (!PyArg_ParseTuple(args, "isOOiiii:write_series", &dbkey, &name, &dataArrayTemp, &maskArrayTemp, &start_index, &end_index, &source_type, &source_freq)) return NULL;   //get params
+    CALLFAME(cfmosiz(&status, dbkey, name, &class, &type, &freq, &start_year, &start_period, &end_year, &end_period));   //get object info
+
+    if (source_type != type) {
+        PyErr_SetString(PyExc_RuntimeError, "received a non-matching type, cannot write");
+        return NULL;
+    }
+
+    if (source_freq != freq) {
+        PyErr_SetString(PyExc_RuntimeError, "received a non-matching frequency, cannot write");
+        return NULL;
+    }
+
+    numobjs = -1;
+    if (freq == HCASEX) {
+        start_year = 0;
+        end_year = 0;
+        start_period = start_index;
+        end_period = end_index;
+    } else if (freq >= 226) {  // HOURLY, MINUTELY, or SECONDLY
+        CALLFAME(timeper(&status, freq, start_index, &start_year, &start_period));
+        CALLFAME(timeper(&status, freq, end_index, &end_year, &end_period));
+    } else {   //convert int dates to fame period dates
+        CALLFAME(cfmdatp(&status, freq, start_index, &start_year, &start_period));
+        CALLFAME(cfmdatp(&status, freq, end_index, &end_year, &end_period));
+    }
+    //set the range that we will be writing to
+    CALLFAME(cfmsrng(&status, freq, &start_year, &start_period, &end_year, &end_period, range, &numobjs));
+    if (!PyArray_Check(dataArrayTemp)) {
+        PyErr_SetString(PyExc_RuntimeError, "write_series was passed something other than an ndarray");
+        return NULL;
+    }
+
+    if (type == HSTRNG) {
+
+        //setting strings requires a different function call
+        int* missing;
+        int* lengths;
+        char** values;
+        int i;
+
+        PyObject *str, *mask;
+
+        if (((missing = malloc(numobjs * sizeof(int))) == NULL) ||
+            ((lengths = malloc(numobjs * sizeof(int))) == NULL) ||
+            ((values = malloc(numobjs * sizeof(char*))) == NULL)) {
+            return PyErr_NoMemory();
+        }
+
+        dataArray = (PyArrayObject*)dataArrayTemp;
+        maskArray = (PyArrayObject*)maskArrayTemp;
+
+        for (i = 0; i < numobjs; i++) {
+            //extract a string and add it to the array to be written
+
+            str = PyArray_GETITEM(dataArray, PyArray_GetPtr(dataArray, &i));
+            mask = PyArray_GETITEM(maskArray, PyArray_GetPtr(maskArray, &i));
+
+            lengths[i] = PyString_Size(str);
+            if ((values[i] = malloc((lengths[i]+1) * sizeof(char))) == NULL) return PyErr_NoMemory();
+            values[i] = PyString_AsString(str);
+
+            if (PyInt_AsLong(mask)) {
+                missing[i] = HNDVAL;
+            } else {
+                missing[i] = HNMVAL;
+            }
+        }
+        //write all the strings to Fame
+        CALLFAME(cfmwsts(&status, dbkey, name, range, values, missing, lengths));
+
+        //clear the extra memory that the strings are using
+        for (i = 0; i < numobjs; i++) {
+            free(values[i]);
+        }
+
+        free(missing);
+        free(lengths);
+        free(values);
+
+    } else {
+
+        // replace masked values with the ND constant
+        dataArray = replace_mask(dataArrayTemp, maskArrayTemp, type);
+
+        switch (type) {
+            case HNUMRC: { // numeric
+                    CALLFAME(cfmwrng(&status, dbkey, name, range, dataArray->data, HTMIS, nmistt));
+                    break;
+                }
+            case HPRECN: { // precision
+                    CALLFAME(cfmwrng(&status, dbkey, name, range, dataArray->data, HTMIS, pmistt));
+                    break;
+                }
+            case HBOOLN: { // boolean
+                    CALLFAME(cfmwrng(&status, dbkey, name, range, dataArray->data, HTMIS, bmistt));
+                    break;
+                }
+            default:
+                if(type >= 8) { // date type
+                    CALLFAME(cfmwrng(&status, dbkey, name, range, dataArray->data, HTMIS, dmistt));
+                } else {
+                    Py_DECREF(dataArray);
+                    sprintf(errMsg, "unsupported data type: %i", type);
+                    PyErr_SetString(PyExc_RuntimeError, errMsg);
+                    return NULL;
+                }
+
+        }
+
+        Py_DECREF(dataArray);
+
+    }
+
+    Py_RETURN_NONE;
+}
+
+
+static char cfame_write_scalar_doc[] = "write_scalar(dbkey, name, object, source_type)\n\nWrites a scalar to the DB";
+static PyObject *
+cfame_write_scalar(PyObject *self, PyObject *args)
+{
+    int status, dbkey;
+    PyObject* object;
+    const char* name;
+    int class, freq, start_year, start_period, end_year, end_period;  // data fields returned by cfmosiz
+    PyObject * returnVal = NULL;
+    int source_type, type;
+    int range[3];
+
+    if (!PyArg_ParseTuple(args, "isOi:write_scalar", &dbkey, &name, &object, &source_type)) return NULL;   //get params
+    CALLFAME(cfmosiz(&status, dbkey, name, &class, &type, &freq, &start_year, &start_period, &end_year, &end_period));   //get object info
+
+    if (source_type != type) {
+        PyErr_SetString(PyExc_RuntimeError, "received a non-matching type, cannot write");
+        return NULL;
+    }
+
+    switch (type) {
+        case HSTRNG: {
+                char* value;
+                int length;
+
+                length  = PyString_Size(object);
+                value = malloc((length + 1) * sizeof(char));
+                value = PyString_AsString(object);
+
+                CALLFAME(cfmwstr(&status, dbkey, name, range, value, HNMVAL, length));
+                free(value);
+            } break;
+        case HNUMRC: {
+                float values[1];
+                values[0] = (float)PyFloat_AsDouble(object);
+                CALLFAME(cfmwrng(&status, dbkey, name, range, values, HNTMIS, NULL));
+            } break;
+        case HPRECN: {
+                double values[1];
+                values[0] = PyFloat_AsDouble(object);
+                CALLFAME(cfmwrng(&status, dbkey, name, range, values, HNTMIS, NULL));
+            } break;
+        case HBOOLN: {
+                int values[1];
+                values[0] = (int)PyInt_AsLong(object);
+                CALLFAME(cfmwrng(&status, dbkey, name, range, values, HNTMIS, NULL));
+            } break;
+        default:
+            if (type >= 8) {
+                // date data type
+                int values[1];
+                values[0] = (int)PyInt_AsLong(object);
+                CALLFAME(cfmwrng(&status, dbkey, name, range, values, HNTMIS, NULL));
+            } else {
+                PyErr_SetString(PyExc_ValueError, "Unrecognized type, cannot write");
+                return NULL;
+            }
+    }
+    Py_RETURN_NONE;
+}
+
+
+static char cfame_write_namelist_doc[] = "write_namelist(dbkey, name, namelist_string)\n\nWrites a namelist to the DB";
+static PyObject *
+cfame_write_namelist(PyObject *self, PyObject *args)
+{
+    int status, dbkey;
+    const char* name;
+    const char* namelist;
+
+    if (!PyArg_ParseTuple(args, "iss:writeNamelist", &dbkey, &name, &namelist)) return NULL;
+
+    CALLFAME(cfmwtnl(&status, dbkey, name, HNLALL, namelist));
+
+    Py_RETURN_NONE;
+}
+
+static char cfame_create_doc[] = "create(dbkey, object_name, class_arg, freq_arg, type_arg, basis_arg, observed_arg)\n\nCreates a fame object in the DB";
+static PyObject *
+cfame_create(PyObject *self, PyObject *args)
+{
+    int status, dbkey;
+    const char* object_name;
+    int class_arg, freq_arg, type_arg, basis_arg, observed_arg;
+
+    if (!PyArg_ParseTuple(args, "isiiiii:create", &dbkey, &object_name, &class_arg, &freq_arg, &type_arg, &basis_arg, &observed_arg)) return NULL;   //get params
+    CALLFAME(cfmnwob(&status, dbkey, object_name, class_arg, freq_arg, type_arg, basis_arg, observed_arg));
+
+    Py_RETURN_NONE;
+}
+
+static char cfame_remove_doc[] = "remove(dbkey, object_name)";
+static PyObject*
+cfame_remove(PyObject* self, PyObject* args)
+{
+    int status, dbkey;
+    const char* object_name;
+
+    if (!PyArg_ParseTuple(args, "is:remove", &dbkey, &object_name)) return NULL;   //get params
+    CALLFAME(cfmdlob(&status, dbkey, object_name));
+
+    Py_RETURN_NONE;
+}
+
+static char cfame_exists_doc[] = "exists(dbkey, object_name)";
+static PyObject*
+cfame_exists(PyObject* self, PyObject* args)
+{
+    int status, dbkey;
+    const char* object_name;
+    int deslen, doclen;
+
+    if (!PyArg_ParseTuple(args, "is:exists", &dbkey, &object_name)) return NULL;   //get params
+
+    cfmdlen (&status, dbkey, object_name, &deslen, &doclen);
+    if (status == HNOOBJ)
+        Py_RETURN_FALSE;
+    else
+        Py_RETURN_TRUE;
+}
+
+static char cfame_updated_doc[] = "updated(dbkey, object_name)";
+static PyObject*
+cfame_updated(PyObject* self, PyObject* args)
+{
+    int status, dbkey;
+    const char *object_name;
+    int class, type, freq, start_year, start_period, end_year, end_period;  // data fields returned by cfmosiz
+    int basis, observ, created_year, created_month, created_day, mod_year, mod_month, mod_day;
+    char desc[1], doc[1];
+    PyObject * returnVal = NULL;
+
+    desc[0] = 0x0;
+    doc[0]  = 0x0;
+
+    if (!PyArg_ParseTuple(args, "is:updated", &dbkey, &object_name)) return NULL;   //get params
+
+    CALLFAME(cfmwhat(&status, dbkey, object_name, &class, &type, &freq, &basis, &observ,
+                     &start_year, &start_period, &end_year, &end_period,
+                     &created_year, &created_month, &created_day,
+                     &mod_year, &mod_month, &mod_day,
+                     desc, doc));
+
+    returnVal = PyDict_New();
+
+    PyDict_SetItemString(returnVal, "mod_year", PyInt_FromLong(mod_year));
+    PyDict_SetItemString(returnVal, "mod_month", PyInt_FromLong(mod_month));
+    PyDict_SetItemString(returnVal, "mod_day", PyInt_FromLong(mod_day));
+
+    return returnVal;
+}
+
+static char cfame_whats_doc[] = "whats(dbkey, data object name)\n\nReturns information about the specified object.";
+static PyObject *
+cfame_whats(PyObject *self, PyObject *args)
+{
+    //arguments
+    char *object_name;
+    int dbkey;
+
+    //return val
+    PyObject *returnVal = NULL;
+
+    int deslen, doclen; /* Length of desc and doc string return from cfmdlen */
+
+    int status;
+    void *tempdesc, *tempdoc;
+    char *desc, *doc;
+    int class, type, freq, start_year, start_period, end_year, end_period;  // data fields returned by cfmosiz
+    int basis, observ, created_year, created_month, created_day, mod_year, mod_month, mod_day;  //additional fields for cfmwhat
+
+    //get arguments.
+    //"is" means first one is integer second is string
+    //"whats" is what will appear in python error messages if this method crashes
+    if (!PyArg_ParseTuple(args, "is:whats",
+                                &dbkey,
+                                &object_name)) return NULL;
+
+    /* Get the length of the desc and doc strings */
+    CALLFAME(cfmdlen(&status, dbkey, object_name, &deslen, &doclen));
+
+    /* allocate the memory needed for the desc/doc strings */
+    if ((tempdesc = malloc((deslen + 1) * sizeof(char))) == NULL) return PyErr_NoMemory();
+    if ((tempdoc = malloc((doclen + 1) * sizeof(char))) == NULL) return PyErr_NoMemory();
+
+    /* set the memory to non-null chars to tell fame the length of string we will accept */
+    memset(tempdesc, 'A', deslen);
+    memset(tempdoc, 'A', doclen);
+
+    /* cast to char array */
+    desc = (char*)tempdesc;
+    doc = (char*)tempdoc;
+
+    /* terminate the string with a null */
+    desc[deslen] = 0x0;
+    doc[doclen] = 0x0;
+
+    CALLFAME(cfmwhat(&status, dbkey, object_name, &class, &type, &freq, &basis, &observ,
+                     &start_year, &start_period, &end_year, &end_period,
+                     &created_year, &created_month, &created_day,
+                     &mod_year, &mod_month, &mod_day,
+                     desc, doc));
+
+    returnVal = PyDict_New();
+
+    PyDict_SetItemString(returnVal, "type", PyInt_FromLong(type));
+    PyDict_SetItemString(returnVal, "freq", PyInt_FromLong(freq));
+    PyDict_SetItemString(returnVal, "class", PyInt_FromLong(class));
+    PyDict_SetItemString(returnVal, "start_year", PyInt_FromLong(start_year));
+    PyDict_SetItemString(returnVal, "start_period", PyInt_FromLong(start_period));
+    PyDict_SetItemString(returnVal, "end_year", PyInt_FromLong(end_year));
+    PyDict_SetItemString(returnVal, "end_period", PyInt_FromLong(end_period));
+    PyDict_SetItemString(returnVal, "mod_year", PyInt_FromLong(mod_year));
+    PyDict_SetItemString(returnVal, "mod_month", PyInt_FromLong(mod_month));
+    PyDict_SetItemString(returnVal, "mod_day", PyInt_FromLong(mod_day));
+    PyDict_SetItemString(returnVal, "desc", PyString_FromString(desc));
+    PyDict_SetItemString(returnVal, "doc", PyString_FromString(doc));
+
+    free((void*)desc);
+    free((void*)doc);
+
+    return returnVal;
+}
+
+static char cfame_size_doc[] = "size(dbkey, data object name)\n\nReturns limited about the specified object.";
+static PyObject *
+cfame_size(PyObject *self, PyObject *args)
+{
+    //arguments
+    char *object_name;
+    int dbkey;
+
+    //return val
+    PyObject *returnVal = NULL;
+
+    int status;
+    int class, type, freq, start_year, start_period, end_year, end_period;  // data fields returned by cfmosiz
+
+    //get arguments.
+    //"is" means first one is integer second is string
+    //"size" is what will appear in python error messages if this method crashes
+    if (!PyArg_ParseTuple(args, "is:size",
+                                &dbkey,
+                                &object_name)) return NULL;
+
+    CALLFAME(cfmosiz(&status, dbkey, object_name, &class, &type, &freq, &start_year, &start_period, &end_year, &end_period));
+
+    returnVal = PyDict_New();
+
+    PyDict_SetItemString(returnVal, "freq", PyInt_FromLong(freq));
+    //To return other fields add them here
+    //look up cfmosiz in fame help for other fields
+
+    return returnVal;
+}
+
+static char cfame_restore_doc[] = "restore(dbkey)\n\nDiscard any changes made to the database since it was last opened or posted.\nXXX: not sure what posted means, see FAME API";
+static PyObject *
+cfame_restore(PyObject *self, PyObject *args)
+{
+    int status, dbkey;
+
+    if (!PyArg_ParseTuple(args, "i:restore", &dbkey)) return NULL;
+
+    CALLFAME(cfmrsdb(&status, dbkey));
+    Py_RETURN_NONE;
+}
+
+///////////////////////////////////////////////////////////////////////
+
+static PyMethodDef cfame_methods[] = {
+    {"open", cfame_open, METH_VARARGS, cfame_open_doc},
+    {"close", cfame_close, METH_VARARGS, cfame_close_doc},
+    {"wildlist", cfame_wildlist, METH_VARARGS, cfame_wildlist_doc},
+    {"read", cfame_read, METH_VARARGS, cfame_read_doc},
+    {"whats", cfame_whats, METH_VARARGS, cfame_whats_doc},
+    {"size", cfame_size, METH_VARARGS, cfame_size_doc},
+    {"write_scalar", cfame_write_scalar, METH_VARARGS, cfame_write_scalar_doc},
+    {"write_series", cfame_write_series, METH_VARARGS, cfame_write_series_doc},
+    {"create", cfame_create, METH_VARARGS, cfame_create_doc},
+    {"remove", cfame_remove, METH_VARARGS, cfame_remove_doc},
+    {"exists", cfame_exists, METH_VARARGS, cfame_exists_doc},
+    {"updated", cfame_updated, METH_VARARGS, cfame_updated_doc},
+    {"write_namelist", cfame_write_namelist, METH_VARARGS, cfame_write_namelist_doc},
+    {"restore", cfame_restore, METH_VARARGS, cfame_restore_doc},
+    {NULL, NULL}
+};
+
+PyMODINIT_FUNC
+initcfame(void)
+{
+    int status;
+    cfmini(&status);
+    Py_InitModule3("cfame", cfame_methods, cfame_doc);
+    import_array();
+
+    makeTranslationTables();
+}
\ No newline at end of file

Added: trunk/Lib/sandbox/timeseries/io/fame/tests/test_fame.py
===================================================================
--- trunk/Lib/sandbox/timeseries/io/fame/tests/test_fame.py	2007-02-15 16:42:18 UTC (rev 2711)
+++ trunk/Lib/sandbox/timeseries/io/fame/tests/test_fame.py	2007-02-15 17:52:27 UTC (rev 2712)
@@ -0,0 +1,390 @@
+"""Tests suite for fame io submodule.
+
+:author: Matt Knox
+:contact: mattknox_ca_at_hotmail_dot_com
+:version: $Id: test_fame.py 2578 2007-01-17 19:25:10Z mattknox_ca $
+"""
+__author__ = "Matt Knox ($Author: mattknox_ca $)"
+__version__ = '1.0'
+__revision__ = "$Revision: 2578 $"
+__date__     = '$Date: 2007-01-17 14:25:10 -0500 (Wed, 17 Jan 2007) $'
+
+import numpy as N
+from numpy import bool_, complex_, float_, int_, object_
+import numpy.core.fromnumeric  as fromnumeric
+import numpy.core.numeric as numeric
+from numpy.testing import NumpyTest, NumpyTestCase
+from numpy.testing.utils import build_err_msg
+
+from timeseries.io import fame
+from timeseries import Report
+import timeseries as ts
+import maskedarray as ma
+import numpy as np
+
+import maskedarray
+from maskedarray import masked_array, masked, nomask
+
+import maskedarray.testutils
+from maskedarray.testutils import assert_equal, assert_array_equal, approx, assert_mask_equal
+
+# setup all the data to be used for reading and writing
+data = {'dates':{}, 'darrays':{}, 'freqs':{}, 'cser':{}, 'tser':{}, 'scalars':{}}
+
+data['dates']['a'] = ts.Date(freq='A', year=2004)
+data['dates']['q'] = ts.Date(freq='Q', year=2004, quarter=1)
+data['dates']['m'] = ts.Date(freq='M', year=2004, month=1)
+data['dates']['w'] = ts.Date(freq='W', year=2004, month=1, day=1)
+data['dates']['b'] = ts.Date(freq='b', year=2004, month=1, day=1)
+data['dates']['d'] = ts.Date(freq='d', year=2004, month=1, day=1)
+data['dates']['h'] = ts.Date(freq='h', year=2004, month=1, day=1, hour=0)
+data['dates']['t'] = ts.Date(freq='t', year=2004, month=1, day=1, hour=0, minute=0)
+data['dates']['s'] = ts.Date(freq='s', year=2004, month=1, day=1, hour=0, minute=0, second=0)
+
+for freq in data['dates']:
+    data['darrays'][freq] = ts.date_array(start_date=data['dates'][freq], length=10)
+    data['cser']['date_'+freq] = data['darrays'][freq]
+
+data['cser']['bool'] = [True, False, True, False, True, True]
+data['cser']['int32'] = np.arange(6).astype(np.int32)
+data['cser']['int64'] = np.arange(6).astype(np.int64)
+data['cser']['float32'] = np.arange(6).astype(np.float32)
+data['cser']['float64'] = np.arange(6).astype(np.float64)
+data['cser']['str'] = ["asdf", "aasssssssss", "zzzzzzzzzzzz", "", "blah"]
+
+for x in data['cser']:
+    data['cser'][x] = ma.masked_array(data['cser'][x])
+    data['tser'][x] = ts.time_series(data['cser'][x], start_date=data['dates']['a'])
+
+for freq in data['dates']:
+    data['freqs'][freq] = ts.time_series(np.arange(20).astype(np.float32), start_date=data['dates'][freq])
+
+# test writing for all data types as time series and as case series
+for x in data['tser']:
+    data['tser'][x][1] = ma.masked
+    data['cser'][x][1] = ma.masked
+
+# series for testing appending data to an existing series
+appendTSer = ts.time_series(np.arange(10, 15).astype(np.float32), freq='A', start_date=ts.Date(freq='A', year=2007))
+appendCSer = np.arange(10, 15).astype(np.float32)
+
+# series for testing writing over a specified range
+rangeTSer = ts.time_series(np.arange(20).astype(np.float32), freq='A', start_date=ts.Date(freq='A', year=2004))
+rangeCSer = np.arange(20).astype(np.float32)
+
+data['scalars']['int32'] = np.int32(5)
+data['scalars']['int64'] = np.int64(5)
+data['scalars']['float32'] = np.float32(5)
+data['scalars']['float64'] = np.float64(5)
+data['scalars']['pyInt'] = 5
+data['scalars']['pyFloat'] = 5234.6323
+data['scalars']['string'] = "mystring"
+data['scalars']['namelist'] = ["mystring", "$asdf","gggggggg"]
+data['scalars']['boolean'] = True
+for f in data['dates']:
+    data['scalars']['date_'+f] = data['dates'][f]
+
+class test_write(NumpyTestCase):
+    
+    def setUp(self):
+        self.db = fame.FameDb("testdb.db",'o')
+        
+    def test_main(self):
+        "execute all the tests. Order is important here"
+
+        self._test_write_scalars()
+        self._test_read_scalars()
+        
+        self._test_dict_scalars()
+
+        self._test_write_freqs_tser()
+        self._test_read_freqs_tser()
+
+        self._test_write_dtypes_tser()
+        self._test_read_dtypes_tser()
+        
+        self._test_read_range_tser()
+
+        self._test_write_append_tser()
+        self._test_read_append_tser()
+        
+        self._test_write_range_tser()
+        self._test_verify_write_range_tser()
+        
+        self._test_write_empty_tser()
+        self._test_read_empty_tser()
+        
+        self._test_overwrite_tser()
+        
+        self._test_assume_exists_tser()
+        
+        self._test_dict_tser()
+        
+        self._test_write_dtypes_cser()
+        self._test_read_dtypes_cser()
+        
+        self._test_read_range_cser()
+
+        self._test_write_append_cser()
+        self._test_read_append_cser()
+        
+        self._test_write_range_cser()
+        self._test_verify_write_range_cser()
+
+        self._test_write_empty_cser()
+        self._test_read_empty_cser()
+        
+        self._test_overwrite_cser()
+        
+        self._test_assume_exists_cser()
+        
+        self._test_dict_cser()
+
+    def _test_write_scalars(self):
+        "test writing all types of scalar values"
+        for s in data['scalars']:
+            self.db.write_scalar('$scalar_'+s, data['scalars'][s])
+            
+    def _test_dict_scalars(self):
+        "test writing multiple scalars at once using write_scalar_dict"
+        self.db.write_scalar_dict({'$scalar_1':data['scalars']['float32'],
+                                   '$scalar_2':data['scalars']['float32']})
+        result = self.db.read(['$scalar_1', '$scalar_2'])
+        assert_equal(result['$scalar_1'], data['scalars']['float32'])
+        assert_equal(result['$scalar_2'], data['scalars']['float32'])
+
+    def _test_read_scalars(self):
+        "read scalars of every data type"
+        for s in data['scalars']:
+            sclr = self.db.read('$scalar_'+s)
+            orig = data['scalars'][s]
+
+            if s == 'int32':
+                assert_equal(sclr, orig.astype(np.float32))
+            elif s in ('pyInt', 'pyFloat', 'int64'):
+                assert_equal(sclr, np.float64(orig))
+            elif s == 'namelist':
+                assert_equal(sclr, [x.upper() for x in orig])
+            else:
+                assert_equal(sclr, orig)
+
+    def _test_write_freqs_tser(self):
+        "test writing time series for all frequencies"
+        for x in data['freqs']:
+            self.db.write_tser('$freq_'+x, data['freqs'][x])
+
+    def _test_read_freqs_tser(self):
+        """read series at every frequency and ensure they are the
+        same as what was written"""
+        for x in data['freqs']:
+            ser = self.db.read('$freq_'+x)
+            assert_mask_equal(ser.mask, data['freqs'][x].mask)
+            assert((ser == data['freqs'][x]).all())
+
+    def _test_write_dtypes_tser(self):
+        "test writing for all dtypes for time series"
+        for x in data['tser']:
+            self.db.write_tser('$tser_'+x, data['tser'][x])
+
+    def _test_read_dtypes_tser(self):
+        "read time series of every data type"
+        for x in data['tser']:
+            ser = self.db.read('$tser_'+x)
+            if str(ser.dtype)[:5] == 'float' and str(data['tser'][x].dtype)[:3] == 'int':
+                ser = ser.astype(data['tser'][x].dtype)
+                
+            assert_mask_equal(ser.mask, data['tser'][x].mask)
+            assert((ser == data['tser'][x]).all())
+            
+    def _test_read_range_tser(self):
+        "test reading a time series over specified ranges"
+        src = data['tser']['float32']
+        s1 = src.start_date+2
+        s2 = src.start_date-2
+        e1 = src.end_date+2
+        e2 = src.end_date-2
+        
+        dateList = [(s1, e1),
+                    (s1, e2),
+                    (s2, e1),
+                    (s2, e2)]
+                    
+        for s, e in dateList:
+            res = ts.adjust_endpoints(src, start_date=s, end_date=e)
+            ser = self.db.read('$tser_float32', start_date=s, end_date=e)
+            assert_array_equal(res, ser)
+            
+
+    def _test_write_append_tser(self):
+        "test appending data to an existing time series"
+        self.db.write_tser('$appendTSer', data['tser']['float32'])
+        self.db.write_tser('$appendTSer', appendTSer)
+        
+    def _test_read_append_tser(self):
+        "test reading of appended time series"
+        result = ts.adjust_endpoints(data['tser']['float32'],
+                                     start_date=data['tser']['float32'].start_date,
+                                     end_date=appendTSer.end_date)
+        result[appendTSer.start_date:appendTSer.end_date+1] = appendTSer
+        
+        ser = self.db.read('$appendTSer')
+        
+        assert_array_equal(result, ser)
+
+
+    def _test_write_range_tser(self):
+        "test writing a time series over a specified range"
+        self.db.write_tser('$rangeTSer', rangeTSer,
+                           start_date=ts.Date(freq='A', year=2008),
+                           end_date=ts.Date(freq='A', year=2012))
+
+    def _test_verify_write_range_tser(self):
+        "verify that _test_write_range_write_tser worked as expected"
+        
+        ser = self.db.read('$rangeTSer')
+        
+        sDate = ts.Date(freq='A', year=2008)
+        eDate = ts.Date(freq='A', year=2012)
+        
+        assert_array_equal(ser, rangeTSer[sDate:eDate+1])
+
+    def _test_write_empty_tser(self):
+        "test writing a time series with no data"
+        emptySer = ts.time_series([], freq='A')
+        self.db.write_tser('$emptyTSer', emptySer)
+
+    def _test_read_empty_tser(self):
+        "test reading a time series with no data"
+        ser = self.db.read('$emptyTSer')
+        assert(ser.start_date is None)
+        
+    def _test_overwrite_tser(self):
+        "test overwriting a time series"
+        self.db.write_tser('$tser_float32', data['tser']['bool'], overwrite=True)
+        ser = self.db.read('$tser_float32')
+        assert_array_equal(ser, data['tser']['bool'])
+
+    def _test_assume_exists_tser(self):
+        "check to see if the assume_exists flag works for write_tser"
+        exception = False
+        try:
+            self.db.write_tser('$doesNotExist', appendTSer, assume_exists=True)
+        except fame.DBError:
+            exception = True
+        assert(exception)
+        
+    def _test_dict_tser(self):
+        "test writing multiple time series at once using write_tser_dict"
+        self.db.write_tser_dict({'$tser_1':data['tser']['float32'],
+                                   '$tser_2':data['tser']['float32']})
+        result = self.db.read(['$tser_1', '$tser_2'])
+        assert_array_equal(result['$tser_1'], data['tser']['float32'])
+        assert_array_equal(result['$tser_2'], data['tser']['float32'])
+
+    def _test_write_dtypes_cser(self):
+        "test writing for all dtypes for case series"""
+        for x in data['cser']:
+            self.db.write_cser('$cser_'+x, data['cser'][x])
+
+    def _test_read_dtypes_cser(self):
+        "read case series of every data type"
+        for x in data['cser']:
+            ser = self.db.read('$cser_'+x)
+            if str(ser.dtype)[:5] == 'float' and str(data['cser'][x].dtype)[:3] == 'int':
+                ser = ser.astype(data['cser'][x].dtype)
+                
+            assert_mask_equal(ser.mask, data['cser'][x].mask)
+            assert((ser == data['cser'][x]).all())
+
+    def _test_read_range_cser(self):
+        "test reading case series over specified ranges"
+        src = data['cser']['float32']
+        s1 = 3
+        s2 = 1
+        e1 = 8
+        e2 = 4
+        
+        caseList = [(s1, e1),
+                    (s1, e2),
+                    (s2, e1),
+                    (s2, e2)]
+                    
+        for s, e in caseList:
+            size = (e - s + 1)
+            res = ma.array([0]*size , np.float32, mask=[1]*size )
+
+            if e < src.size: _e = size
+            else: _e = size - max(e-size, 0, size - src.size)
+
+            res[0:_e] = src[s-1:min(e, src.size)]
+            ser = self.db.read('$cser_float32', start_case=s, end_case=e)
+
+            assert_array_equal(res, ser)
+
+    def _test_write_append_cser(self):
+        "test appending to an existing case series"
+        self.db.write_cser('$appendCSer', data['cser']['float32'])
+        self.db.write_cser('$appendCSer', appendCSer, zero_represents=4)
+        
+    def _test_read_append_cser(self):
+        "test reading of appended case series"
+        
+        result = ma.concatenate([data['cser']['float32'][:3], appendCSer])
+        ser = self.db.read('$appendCSer')
+        assert_array_equal(result, ser)
+        
+    def _test_write_range_cser(self):
+        "test writing over a specified range"
+        self.db.write_cser('$rangeCSer', rangeCSer,
+                           start_case=5, end_case=9)
+
+    def _test_verify_write_range_cser(self):
+        "verify that _test_write_range_write_cser worked as expected"
+        
+        ser = self.db.read('$rangeCSer')
+        result = ma.arange(9).astype(np.float32)
+        result[:4] = ma.masked
+        
+        assert_array_equal(ser, result)
+
+    def _test_write_empty_cser(self):
+        "test writing a case series with no data"
+        self.db.write_cser('$emptyCSer', ma.array([]))
+
+    def _test_read_empty_cser(self):
+        "test reading a case series with no data"
+        ser = self.db.read('$emptyCSer')
+        assert_equal(ser.size, 0)
+    
+    def _test_overwrite_cser(self):
+        "test overwriting a case series"
+        self.db.write_cser('$cser_float32', data['cser']['bool'], overwrite=True)
+        ser = self.db.read('$cser_float32')
+        assert_array_equal(ser, data['cser']['bool'])
+        
+    def _test_assume_exists_cser(self):
+        "check to see if the assume_exists flag works for write_cser"
+        exception = False
+        try:
+            self.db.write_cser('$doesNotExist', appendCSer, assume_exists=True)
+        except fame.DBError:
+            exception = True
+        assert(exception)
+
+    def _test_dict_cser(self):
+        "test writing multiple case series at once using write_cser_dict"
+        self.db.write_cser_dict({'$cser_1':data['cser']['float32'],
+                                   '$cser_2':data['cser']['float32']})
+        result = self.db.read(['$cser_1', '$cser_2'])
+        assert_array_equal(result['$cser_1'], data['cser']['float32'])
+        assert_array_equal(result['$cser_2'], data['cser']['float32'])
+    
+    def tearDown(self):
+        self.db.close()
+
+        
+        
+###############################################################################
+#------------------------------------------------------------------------------
+if __name__ == "__main__":
+    NumpyTest().run()




More information about the Scipy-svn mailing list