[Python-3000-checkins] r66089 - in python/branches/py3k: Lib/bsddb/__init__.py Lib/bsddb/db.py Lib/bsddb/dbobj.py Lib/bsddb/dbrecio.py Lib/bsddb/dbshelve.py Lib/bsddb/dbtables.py Lib/bsddb/dbutils.py Lib/bsddb/test/test_1413192.py Lib/bsddb/test/test_all.py Lib/bsddb/test/test_associate.py Lib/bsddb/test/test_basics.py Lib/bsddb/test/test_compare.py Lib/bsddb/test/test_compat.py Lib/bsddb/test/test_cursor_pget_bug.py Lib/bsddb/test/test_dbobj.py Lib/bsddb/test/test_dbshelve.py Lib/bsddb/test/test_dbtables.py Lib/bsddb/test/test_distributed_transactions.py Lib/bsddb/test/test_early_close.py Lib/bsddb/test/test_get_none.py Lib/bsddb/test/test_join.py Lib/bsddb/test/test_lock.py Lib/bsddb/test/test_misc.py Lib/bsddb/test/test_pickle.py Lib/bsddb/test/test_queue.py Lib/bsddb/test/test_recno.py Lib/bsddb/test/test_replication.py Lib/bsddb/test/test_sequence.py Lib/bsddb/test/test_thread.py Misc/NEWS Modules/_bsddb.c Modules/bsddb.h setup.py

jesus.cea python-3000-checkins at python.org
Sun Aug 31 16:12:13 CEST 2008


Author: jesus.cea
Date: Sun Aug 31 16:12:11 2008
New Revision: 66089

Log:
bsddb code updated to version 4.7.3pre2. This code is the same than
Python 2.6 one, since the intention is to keep an unified 2.x/3.x
codebase.

The Python code is automatically translated using "2to3". Please, do not
update this code in Python 3.0 by hand. Update the 2.6 one and then do
"2to3".



Added:
   python/branches/py3k/Lib/bsddb/test/test_distributed_transactions.py
   python/branches/py3k/Lib/bsddb/test/test_early_close.py
   python/branches/py3k/Lib/bsddb/test/test_replication.py
Removed:
   python/branches/py3k/Lib/bsddb/test/test_1413192.py
Modified:
   python/branches/py3k/Lib/bsddb/__init__.py
   python/branches/py3k/Lib/bsddb/db.py
   python/branches/py3k/Lib/bsddb/dbobj.py
   python/branches/py3k/Lib/bsddb/dbrecio.py
   python/branches/py3k/Lib/bsddb/dbshelve.py
   python/branches/py3k/Lib/bsddb/dbtables.py
   python/branches/py3k/Lib/bsddb/dbutils.py
   python/branches/py3k/Lib/bsddb/test/test_all.py
   python/branches/py3k/Lib/bsddb/test/test_associate.py
   python/branches/py3k/Lib/bsddb/test/test_basics.py
   python/branches/py3k/Lib/bsddb/test/test_compare.py
   python/branches/py3k/Lib/bsddb/test/test_compat.py
   python/branches/py3k/Lib/bsddb/test/test_cursor_pget_bug.py
   python/branches/py3k/Lib/bsddb/test/test_dbobj.py
   python/branches/py3k/Lib/bsddb/test/test_dbshelve.py
   python/branches/py3k/Lib/bsddb/test/test_dbtables.py
   python/branches/py3k/Lib/bsddb/test/test_get_none.py
   python/branches/py3k/Lib/bsddb/test/test_join.py
   python/branches/py3k/Lib/bsddb/test/test_lock.py
   python/branches/py3k/Lib/bsddb/test/test_misc.py
   python/branches/py3k/Lib/bsddb/test/test_pickle.py
   python/branches/py3k/Lib/bsddb/test/test_queue.py
   python/branches/py3k/Lib/bsddb/test/test_recno.py
   python/branches/py3k/Lib/bsddb/test/test_sequence.py
   python/branches/py3k/Lib/bsddb/test/test_thread.py
   python/branches/py3k/Misc/NEWS
   python/branches/py3k/Modules/_bsddb.c
   python/branches/py3k/Modules/bsddb.h
   python/branches/py3k/setup.py

Modified: python/branches/py3k/Lib/bsddb/__init__.py
==============================================================================
--- python/branches/py3k/Lib/bsddb/__init__.py	(original)
+++ python/branches/py3k/Lib/bsddb/__init__.py	Sun Aug 31 16:12:11 2008
@@ -33,18 +33,25 @@
 #----------------------------------------------------------------------
 
 
-"""Support for BerkeleyDB 3.3 through 4.4 with a simple interface.
+"""Support for Berkeley DB 4.0 through 4.7 with a simple interface.
 
 For the full featured object oriented interface use the bsddb.db module
-instead.  It mirrors the Sleepycat BerkeleyDB C API.
+instead.  It mirrors the Oracle Berkeley DB C API.
 """
 
+import sys
+absolute_import = (sys.version_info[0] >= 3)
+
 try:
     if __name__ == 'bsddb3':
         # import _pybsddb binary as it should be the more recent version from
         # a standalone pybsddb addon package than the version included with
         # python as bsddb._bsddb.
-        import _pybsddb
+        if absolute_import :
+            # Because this syntaxis is not valid before Python 2.5
+            exec("from . import _pybsddb")
+        else :
+            import _pybsddb
         _bsddb = _pybsddb
         from bsddb3.dbutils import DeadlockWrap as _DeadlockWrap
     else:
@@ -64,10 +71,18 @@
 
 #----------------------------------------------------------------------
 
-import sys, os, collections
+import sys, os
+
 from weakref import ref
 
-class _iter_mixin(collections.MutableMapping):
+if sys.version_info[0:2] <= (2, 5) :
+    import UserDict
+    MutableMapping = UserDict.DictMixin
+else :
+    import collections
+    MutableMapping = collections.MutableMapping
+
+class _iter_mixin(MutableMapping):
     def _make_iter_cursor(self):
         cur = _DeadlockWrap(self.db.cursor)
         key = id(cur)
@@ -81,64 +96,89 @@
         return lambda ref: self._cursor_refs.pop(key, None)
 
     def __iter__(self):
+        self._kill_iteration = False
+        self._in_iter += 1
         try:
-            cur = self._make_iter_cursor()
+            try:
+                cur = self._make_iter_cursor()
 
-            # FIXME-20031102-greg: race condition.  cursor could
-            # be closed by another thread before this call.
+                # FIXME-20031102-greg: race condition.  cursor could
+                # be closed by another thread before this call.
 
-            # since we're only returning keys, we call the cursor
-            # methods with flags=0, dlen=0, dofs=0
-            key = _DeadlockWrap(cur.first, 0,0,0)[0]
-            yield key
+                # since we're only returning keys, we call the cursor
+                # methods with flags=0, dlen=0, dofs=0
+                key = _DeadlockWrap(cur.first, 0,0,0)[0]
+                yield key
+
+                next = cur.__next__
+                while 1:
+                    try:
+                        key = _DeadlockWrap(next, 0,0,0)[0]
+                        yield key
+                    except _bsddb.DBCursorClosedError:
+                        if self._kill_iteration:
+                            raise RuntimeError('Database changed size '
+                                               'during iteration.')
+                        cur = self._make_iter_cursor()
+                        # FIXME-20031101-greg: race condition.  cursor could
+                        # be closed by another thread before this call.
+                        _DeadlockWrap(cur.set, key,0,0,0)
+                        next = cur.__next__
+            except _bsddb.DBNotFoundError:
+                pass
+            except _bsddb.DBCursorClosedError:
+                # the database was modified during iteration.  abort.
+                pass
+# When Python 2.3 not supported in bsddb3, we can change this to "finally"
+        except :
+            self._in_iter -= 1
+            raise
 
-            next = cur.next
-            while 1:
-                try:
-                    key = _DeadlockWrap(next, 0,0,0)[0]
-                    yield key
-                except _bsddb.DBCursorClosedError:
-                    cur = self._make_iter_cursor()
-                    # FIXME-20031101-greg: race condition.  cursor could
-                    # be closed by another thread before this call.
-                    _DeadlockWrap(cur.set, key,0,0,0)
-                    next = cur.next
-        except _bsddb.DBNotFoundError:
-            return
-        except _bsddb.DBCursorClosedError:
-            # the database was modified during iteration.  abort.
-            return
+        self._in_iter -= 1
 
     def iteritems(self):
         if not self.db:
             return
+        self._kill_iteration = False
+        self._in_iter += 1
         try:
-            cur = self._make_iter_cursor()
+            try:
+                cur = self._make_iter_cursor()
 
-            # FIXME-20031102-greg: race condition.  cursor could
-            # be closed by another thread before this call.
+                # FIXME-20031102-greg: race condition.  cursor could
+                # be closed by another thread before this call.
 
-            kv = _DeadlockWrap(cur.first)
-            key = kv[0]
-            yield kv
+                kv = _DeadlockWrap(cur.first)
+                key = kv[0]
+                yield kv
+
+                next = cur.__next__
+                while 1:
+                    try:
+                        kv = _DeadlockWrap(next)
+                        key = kv[0]
+                        yield kv
+                    except _bsddb.DBCursorClosedError:
+                        if self._kill_iteration:
+                            raise RuntimeError('Database changed size '
+                                               'during iteration.')
+                        cur = self._make_iter_cursor()
+                        # FIXME-20031101-greg: race condition.  cursor could
+                        # be closed by another thread before this call.
+                        _DeadlockWrap(cur.set, key,0,0,0)
+                        next = cur.__next__
+            except _bsddb.DBNotFoundError:
+                pass
+            except _bsddb.DBCursorClosedError:
+                # the database was modified during iteration.  abort.
+                pass
+# When Python 2.3 not supported in bsddb3, we can change this to "finally"
+        except :
+            self._in_iter -= 1
+            raise
+
+        self._in_iter -= 1
 
-            next = cur.next
-            while 1:
-                try:
-                    kv = _DeadlockWrap(next)
-                    key = kv[0]
-                    yield kv
-                except _bsddb.DBCursorClosedError:
-                    cur = self._make_iter_cursor()
-                    # FIXME-20031101-greg: race condition.  cursor could
-                    # be closed by another thread before this call.
-                    _DeadlockWrap(cur.set, key,0,0,0)
-                    next = cur.next
-        except _bsddb.DBNotFoundError:
-            return
-        except _bsddb.DBCursorClosedError:
-            # the database was modified during iteration.  abort.
-            return
 
 class _DBWithCursor(_iter_mixin):
     """
@@ -166,13 +206,12 @@
         # a collection of all DBCursor objects currently allocated
         # by the _iter_mixin interface.
         self._cursor_refs = {}
+        self._in_iter = 0
+        self._kill_iteration = False
 
     def __del__(self):
         self.close()
 
-    def __repr__(self):
-        return repr(dict(self.iteritems()))
-
     def _checkCursor(self):
         if self.dbc is None:
             self.dbc = _DeadlockWrap(self.db.cursor)
@@ -181,7 +220,7 @@
                 self.saved_dbc_key = None
 
     # This method is needed for all non-cursor DB calls to avoid
-    # BerkeleyDB deadlocks (due to being opened with DB_INIT_LOCK
+    # Berkeley DB deadlocks (due to being opened with DB_INIT_LOCK
     # and DB_THREAD to be thread safe) when intermixing database
     # operations that use the cursor internally with those that don't.
     def _closeCursors(self, save=1):
@@ -195,7 +234,7 @@
                     pass
             _DeadlockWrap(c.close)
             del c
-        for cref in self._cursor_refs.values():
+        for cref in list(self._cursor_refs.values()):
             c = cref()
             if c is not None:
                 _DeadlockWrap(c.close)
@@ -211,6 +250,12 @@
         self._checkOpen()
         return _DeadlockWrap(lambda: len(self.db))  # len(self.db)
 
+    if sys.version_info[0:2] >= (2, 6) :
+        def __repr__(self) :
+            if self.isOpen() :
+                return repr(dict(_DeadlockWrap(self.db.items)))
+            return repr(dict())
+
     def __getitem__(self, key):
         self._checkOpen()
         return _DeadlockWrap(lambda: self.db[key])  # self.db[key]
@@ -218,6 +263,8 @@
     def __setitem__(self, key, value):
         self._checkOpen()
         self._closeCursors()
+        if self._in_iter and key not in self:
+            self._kill_iteration = True
         def wrapF():
             self.db[key] = value
         _DeadlockWrap(wrapF)  # self.db[key] = value
@@ -225,6 +272,8 @@
     def __delitem__(self, key):
         self._checkOpen()
         self._closeCursors()
+        if self._in_iter and key in self:
+            self._kill_iteration = True
         def wrapF():
             del self.db[key]
         _DeadlockWrap(wrapF)  # del self.db[key]
@@ -248,17 +297,15 @@
         self._checkOpen()
         return _DeadlockWrap(self.db.has_key, key)
 
-    __contains__ = has_key
-
     def set_location(self, key):
         self._checkOpen()
         self._checkCursor()
         return _DeadlockWrap(self.dbc.set_range, key)
 
-    def next(self):
+    def __next__(self):
         self._checkOpen()
         self._checkCursor()
-        rv = _DeadlockWrap(self.dbc.next)
+        rv = _DeadlockWrap(self.dbc.__next__)
         return rv
 
     def previous(self):
@@ -287,146 +334,6 @@
         self._checkOpen()
         return _DeadlockWrap(self.db.sync)
 
-class _ExposedProperties:
-    @property
-    def _cursor_refs(self):
-        return self.db._cursor_refs
-
-class StringKeys(collections.MutableMapping, _ExposedProperties):
-    """Wrapper around DB object that automatically encodes
-    all keys as UTF-8; the keys must be strings."""
-
-    def __init__(self, db):
-        self.db = db
-
-    def __len__(self):
-        return len(self.db)
-
-    def __getitem__(self, key):
-        return self.db[key.encode("utf-8")]
-
-    def __setitem__(self, key, value):
-        self.db[key.encode("utf-8")] = value
-
-    def __delitem__(self, key):
-        del self.db[key.encode("utf-8")]
-
-    def __iter__(self):
-        for k in self.db:
-            yield k.decode("utf-8")
-
-    def close(self):
-        self.db.close()
-
-    def keys(self):
-        for k in self.db.keys():
-            yield k.decode("utf-8")
-
-    def has_key(self, key):
-        return self.db.has_key(key.encode("utf-8"))
-
-    __contains__ = has_key
-
-    def values(self):
-        return self.db.values()
-
-    def items(self):
-        for k,v in self.db.items():
-            yield k.decode("utf-8"), v
-
-    def set_location(self, key):
-        return self.db.set_location(key.encode("utf-8"))
-
-    def next(self):
-        key, value = self.db.next()
-        return key.decode("utf-8"), value
-
-    def previous(self):
-        key, value = self.db.previous()
-        return key.decode("utf-8"), value
-
-    def first(self):
-        key, value = self.db.first()
-        return key.decode("utf-8"), value
-
-    def last(self):
-        key, value = self.db.last()
-        return key.decode("utf-8"), value
-
-    def set_location(self, key):
-        key, value = self.db.set_location(key.encode("utf-8"))
-        return key.decode("utf-8"), value
-
-    def sync(self):
-        return self.db.sync()
-
-class StringValues(collections.MutableMapping, _ExposedProperties):
-    """Wrapper around DB object that automatically encodes
-    and decodes all values as UTF-8; input values must be strings."""
-
-    def __init__(self, db):
-        self.db = db
-
-    def __len__(self):
-        return len(self.db)
-
-    def __getitem__(self, key):
-        return self.db[key].decode("utf-8")
-
-    def __setitem__(self, key, value):
-        self.db[key] = value.encode("utf-8")
-
-    def __delitem__(self, key):
-        del self.db[key]
-
-    def __iter__(self):
-        return iter(self.db)
-
-    def close(self):
-        self.db.close()
-
-    def keys(self):
-        return self.db.keys()
-
-    def has_key(self, key):
-        return self.db.has_key(key)
-
-    __contains__ = has_key
-
-    def values(self):
-        for v in self.db.values():
-            yield v.decode("utf-8")
-
-    def items(self):
-        for k,v in self.db.items():
-            yield k, v.decode("utf-8")
-
-    def set_location(self, key):
-        return self.db.set_location(key)
-
-    def next(self):
-        key, value = self.db.next()
-        return key, value.decode("utf-8")
-
-    def previous(self):
-        key, value = self.db.previous()
-        return key, value.decode("utf-8")
-
-    def first(self):
-        key, value = self.db.first()
-        return key, value.decode("utf-8")
-
-    def last(self):
-        key, value = self.db.last()
-        return key, value.decode("utf-8")
-
-    def set_location(self, key):
-        key, value = self.db.set_location(key)
-        return key, value.decode("utf-8")
-
-    def sync(self):
-        return self.db.sync()
-
 
 #----------------------------------------------------------------------
 # Compatibility object factory functions
@@ -507,12 +414,12 @@
     elif flag == 'n':
         flags = db.DB_CREATE
         #flags = db.DB_CREATE | db.DB_TRUNCATE
-        # we used db.DB_TRUNCATE flag for this before but BerkeleyDB
+        # we used db.DB_TRUNCATE flag for this before but Berkeley DB
         # 4.2.52 changed to disallowed truncate with txn environments.
         if file is not None and os.path.isfile(file):
             os.unlink(file)
     else:
-        raise error("flags should be one of 'r', 'w', 'c' or 'n', not "+repr(flag))
+        raise error("flags should be one of 'r', 'w', 'c' or 'n'")
     return flags | db.DB_THREAD
 
 #----------------------------------------------------------------------
@@ -520,16 +427,14 @@
 
 # This is a silly little hack that allows apps to continue to use the
 # DB_THREAD flag even on systems without threads without freaking out
-# BerkeleyDB.
+# Berkeley DB.
 #
 # This assumes that if Python was built with thread support then
-# BerkeleyDB was too.
+# Berkeley DB was too.
 
 try:
     import _thread
     del _thread
-    if db.version() < (3, 3, 0):
-        db.DB_THREAD = 0
 except ImportError:
     db.DB_THREAD = 0
 

Modified: python/branches/py3k/Lib/bsddb/db.py
==============================================================================
--- python/branches/py3k/Lib/bsddb/db.py	(original)
+++ python/branches/py3k/Lib/bsddb/db.py	Sun Aug 31 16:12:11 2008
@@ -37,15 +37,24 @@
 # case we ever want to augment the stuff in _db in any way.  For now
 # it just simply imports everything from _db.
 
-if __name__.startswith('bsddb3.'):
-    # import _pybsddb binary as it should be the more recent version from
-    # a standalone pybsddb addon package than the version included with
-    # python as bsddb._bsddb.
-    from _pybsddb import *
-    from _pybsddb import __version__
-else:
-    from _bsddb import *
-    from _bsddb import __version__
+import sys
+absolute_import = (sys.version_info[0] >= 3)
 
-if version() < (3, 2, 0):
-    raise ImportError("correct BerkeleyDB symbols not found.  Perhaps python was statically linked with an older version?")
+if not absolute_import :
+    if __name__.startswith('bsddb3.') :
+        # import _pybsddb binary as it should be the more recent version from
+        # a standalone pybsddb addon package than the version included with
+        # python as bsddb._bsddb.
+        from _pybsddb import *
+        from _pybsddb import __version__
+    else:
+        from _bsddb import *
+        from _bsddb import __version__
+else :
+    # Because this syntaxis is not valid before Python 2.5
+    if __name__.startswith('bsddb3.') :
+        exec("from ._pybsddb import *")
+        exec("from ._pybsddb import __version__")
+    else :
+        exec("from ._bsddb import *")
+        exec("from ._bsddb import __version__")

Modified: python/branches/py3k/Lib/bsddb/dbobj.py
==============================================================================
--- python/branches/py3k/Lib/bsddb/dbobj.py	(original)
+++ python/branches/py3k/Lib/bsddb/dbobj.py	Sun Aug 31 16:12:11 2008
@@ -21,12 +21,24 @@
 # added to _bsddb.c.
 #
 
-from . import db
-
-try:
-    from collections import MutableMapping
-except ImportError:
-    class MutableMapping: pass
+import sys
+absolute_import = (sys.version_info[0] >= 3)
+if absolute_import :
+    # Because this syntaxis is not valid before Python 2.5
+    exec("from . import db")
+else :
+    from . import db
+
+if sys.version_info[0:2] <= (2, 5) :
+    try:
+        from UserDict import DictMixin
+    except ImportError:
+        # DictMixin is new in Python 2.3
+        class DictMixin: pass
+    MutableMapping = DictMixin
+else :
+    import collections
+    MutableMapping = collections.MutableMapping
 
 class DBEnv:
     def __init__(self, *args, **kwargs):
@@ -95,9 +107,8 @@
     def set_get_returns_none(self, *args, **kwargs):
         return self._cobj.set_get_returns_none(*args, **kwargs)
 
-    if db.version() >= (4,0):
-        def log_stat(self, *args, **kwargs):
-            return self._cobj.log_stat(*args, **kwargs)
+    def log_stat(self, *args, **kwargs):
+        return self._cobj.log_stat(*args, **kwargs)
 
     if db.version() >= (4,1):
         def dbremove(self, *args, **kwargs):
@@ -115,7 +126,7 @@
 class DB(MutableMapping):
     def __init__(self, dbenv, *args, **kwargs):
         # give it the proper DBEnv C object that its expecting
-        self._cobj = db.DB(dbenv._cobj, *args, **kwargs)
+        self._cobj = db.DB(*(dbenv._cobj,) + args, **kwargs)
 
     # TODO are there other dict methods that need to be overridden?
     def __len__(self):
@@ -126,8 +137,10 @@
         self._cobj[key] = value
     def __delitem__(self, arg):
         del self._cobj[arg]
-    def __iter__(self):
-        return iter(self.keys())
+
+    if sys.version_info[0:2] >= (2, 6) :
+        def __iter__(self) :
+            return self._cobj.__iter__()
 
     def append(self, *args, **kwargs):
         return self._cobj.append(*args, **kwargs)
@@ -163,8 +176,6 @@
         return self._cobj.key_range(*args, **kwargs)
     def has_key(self, *args, **kwargs):
         return self._cobj.has_key(*args, **kwargs)
-    def __contains__(self, key):
-        return self._cobj.has_key(key)
     def items(self, *args, **kwargs):
         return self._cobj.items(*args, **kwargs)
     def keys(self, *args, **kwargs):

Modified: python/branches/py3k/Lib/bsddb/dbrecio.py
==============================================================================
--- python/branches/py3k/Lib/bsddb/dbrecio.py	(original)
+++ python/branches/py3k/Lib/bsddb/dbrecio.py	Sun Aug 31 16:12:11 2008
@@ -29,6 +29,7 @@
 """
 
 import errno
+import string
 
 class DBRecIO:
     def __init__(self, db, key, txn=None):
@@ -38,6 +39,7 @@
         self.len = None
         self.pos = 0
         self.closed = 0
+        self.softspace = 0
 
     def close(self):
         if not self.closed:
@@ -82,9 +84,9 @@
         if self.closed:
             raise ValueError, "I/O operation on closed file"
         if self.buflist:
-            self.buf = self.buf + ''.join(self.buflist)
+            self.buf = self.buf + string.joinfields(self.buflist, '')
             self.buflist = []
-        i = self.buf.find('\n', self.pos)
+        i = string.find(self.buf, '\n', self.pos)
         if i < 0:
             newpos = self.len
         else:
@@ -133,7 +135,7 @@
         self.pos = newpos
 
     def writelines(self, list):
-        self.write(''.join(list))
+        self.write(string.joinfields(list, ''))
 
     def flush(self):
         if self.closed:
@@ -158,14 +160,14 @@
     if f.getvalue() != text:
         raise RuntimeError, 'write failed'
     length = f.tell()
-    print('File length =', length)
+    print 'File length =', length
     f.seek(len(lines[0]))
     f.write(lines[1])
     f.seek(0)
-    print('First line =', repr(f.readline()))
+    print 'First line =', repr(f.readline())
     here = f.tell()
     line = f.readline()
-    print('Second line =', repr(line))
+    print 'Second line =', repr(line)
     f.seek(-len(line), 1)
     line2 = f.read(len(line))
     if line != line2:
@@ -177,8 +179,8 @@
     line2 = f.read()
     if line != line2:
         raise RuntimeError, 'bad result after seek back from EOF'
-    print('Read', len(list), 'more lines')
-    print('File length =', f.tell())
+    print 'Read', len(list), 'more lines'
+    print 'File length =', f.tell()
     if f.tell() != length:
         raise RuntimeError, 'bad length'
     f.close()

Modified: python/branches/py3k/Lib/bsddb/dbshelve.py
==============================================================================
--- python/branches/py3k/Lib/bsddb/dbshelve.py	(original)
+++ python/branches/py3k/Lib/bsddb/dbshelve.py	Sun Aug 31 16:12:11 2008
@@ -32,21 +32,43 @@
 import pickle
 import sys
 
+import sys
+absolute_import = (sys.version_info[0] >= 3)
+if absolute_import :
+    # Because this syntaxis is not valid before Python 2.5
+    exec("from . import db")
+else :
+    from . import db
+
 #At version 2.3 cPickle switched to using protocol instead of bin
 if sys.version_info[:3] >= (2, 3, 0):
     HIGHEST_PROTOCOL = pickle.HIGHEST_PROTOCOL
-    def _dumps(object, protocol):
-        return pickle.dumps(object, protocol=protocol)
-    from collections import MutableMapping
+# In python 2.3.*, "cPickle.dumps" accepts no
+# named parameters. "pickle.dumps" accepts them,
+# so this seems a bug.
+    if sys.version_info[:3] < (2, 4, 0):
+        def _dumps(object, protocol):
+            return pickle.dumps(object, protocol)
+    else :
+        def _dumps(object, protocol):
+            return pickle.dumps(object, protocol=protocol)
+
 else:
     HIGHEST_PROTOCOL = None
     def _dumps(object, protocol):
         return pickle.dumps(object, bin=protocol)
-    class MutableMapping: pass
 
-from . import db
 
-_unspecified = object()
+if sys.version_info[0:2] <= (2, 5) :
+    try:
+        from UserDict import DictMixin
+    except ImportError:
+        # DictMixin is new in Python 2.3
+        class DictMixin: pass
+    MutableMapping = DictMixin
+else :
+    import collections
+    MutableMapping = collections.MutableMapping
 
 #------------------------------------------------------------------------
 
@@ -135,13 +157,15 @@
 
 
     def keys(self, txn=None):
-        if txn is not None:
+        if txn != None:
             return self.db.keys(txn)
         else:
-            return self.db.keys()
+            return list(self.db.keys())
+
+    if sys.version_info[0:2] >= (2, 6) :
+        def __iter__(self) :
+            return self.db.__iter__()
 
-    def __iter__(self):
-        return iter(self.keys())
 
     def open(self, *args, **kwargs):
         self.db.open(*args, **kwargs)
@@ -157,14 +181,14 @@
         if self._closed:
             return '<DBShelf @ 0x%x - closed>' % (id(self))
         else:
-            return repr(dict(self.iteritems()))
+            return repr(dict(iter(self.items())))
 
 
     def items(self, txn=None):
-        if txn is not None:
+        if txn != None:
             items = self.db.items(txn)
         else:
-            items = self.db.items()
+            items = list(self.db.items())
         newitems = []
 
         for k, v in items:
@@ -172,12 +196,12 @@
         return newitems
 
     def values(self, txn=None):
-        if txn is not None:
+        if txn != None:
             values = self.db.values(txn)
         else:
-            values = self.db.values()
+            values = list(self.db.values())
 
-        return map(pickle.loads, values)
+        return list(map(pickle.loads, values))
 
     #-----------------------------------
     # Other methods
@@ -194,24 +218,28 @@
 
     def associate(self, secondaryDB, callback, flags=0):
         def _shelf_callback(priKey, priData, realCallback=callback):
-            data = pickle.loads(priData)
+            # Safe in Python 2.x because expresion short circuit
+            if sys.version_info[0] < 3 or isinstance(priData, bytes) :
+                data = pickle.loads(priData)
+            else :
+                data = pickle.loads(bytes(priData, "iso8859-1"))  # 8 bits
             return realCallback(priKey, data)
+
         return self.db.associate(secondaryDB, _shelf_callback, flags)
 
 
-    def get(self, key, default=_unspecified, txn=None, flags=0):
-        # If no default is given, we must not pass one to the
-        # extension module, so that an exception can be raised if
-        # set_get_returns_none is turned off.
-        if default is _unspecified:
-            data = self.db.get(key, txn=txn, flags=flags)
-            # if this returns, the default value would be None
-            default = None
-        else:
-            data = self.db.get(key, default, txn=txn, flags=flags)
-        if data is default:
-            return data
-        return pickle.loads(data)
+    #def get(self, key, default=None, txn=None, flags=0):
+    def get(self, *args, **kw):
+        # We do it with *args and **kw so if the default value wasn't
+        # given nothing is passed to the extension module.  That way
+        # an exception can be raised if set_get_returns_none is turned
+        # off.
+        data = self.db.get(*args, **kw)
+        try:
+            return pickle.loads(data)
+        except (EOFError, TypeError, pickle.UnpicklingError):
+            return data  # we may be getting the default value, or None,
+                         # so it doesn't need unpickled.
 
     def get_both(self, key, value, txn=None, flags=0):
         data = _dumps(value, self.protocol)
@@ -234,10 +262,6 @@
         raise NotImplementedError
 
 
-    def __contains__(self, key):
-        return self.db.has_key(key)
-
-
     #----------------------------------------------
     # Methods allowed to pass-through to self.db
     #
@@ -331,7 +355,11 @@
             return None
         else:
             key, data = rec
-            return key, pickle.loads(data)
+            # Safe in Python 2.x because expresion short circuit
+            if sys.version_info[0] < 3 or isinstance(data, bytes) :
+                return key, pickle.loads(data)
+            else :
+                return key, pickle.loads(bytes(data, "iso8859-1"))  # 8 bits
 
     #----------------------------------------------
     # Methods allowed to pass-through to self.dbc

Modified: python/branches/py3k/Lib/bsddb/dbtables.py
==============================================================================
--- python/branches/py3k/Lib/bsddb/dbtables.py	(original)
+++ python/branches/py3k/Lib/bsddb/dbtables.py	Sun Aug 31 16:12:11 2008
@@ -13,30 +13,29 @@
 #   --  Gregory P. Smith <greg at krypto.org>
 
 # This provides a simple database table interface built on top of
-# the Python BerkeleyDB 3 interface.
+# the Python Berkeley DB 3 interface.
 #
 _cvsid = '$Id$'
 
 import re
 import sys
 import copy
-import struct
 import random
-import pickle
-
-from bsddb.db import *
+import struct
+import pickle as pickle
 
-# All table names, row names etc. must be ASCII strings
-# However, rowids, when represented as strings, are latin-1 encoded
-def _E(s):
-    return s.encode("ascii")
+try:
+    # For Pythons w/distutils pybsddb
+    from bsddb3 import db
+except ImportError:
+    # For Python 2.3
+    from bsddb import db
 
 # XXX(nnorwitz): is this correct? DBIncompleteError is conditional in _bsddb.c
-try:
-    DBIncompleteError
-except NameError:
+if not hasattr(db,"DBIncompleteError") :
     class DBIncompleteError(Exception):
         pass
+    db.DBIncompleteError = DBIncompleteError
 
 class TableDBError(Exception):
     pass
@@ -51,22 +50,22 @@
 
 class ExactCond(Cond):
     """Acts as an exact match condition function"""
-    def __init__(self, strtomatch, encoding="utf-8"):
-        self.strtomatch = strtomatch.encode(encoding)
+    def __init__(self, strtomatch):
+        self.strtomatch = strtomatch
     def __call__(self, s):
         return s == self.strtomatch
 
 class PrefixCond(Cond):
     """Acts as a condition function for matching a string prefix"""
-    def __init__(self, prefix, encoding="utf-8"):
-        self.prefix = prefix.encode(encoding)
+    def __init__(self, prefix):
+        self.prefix = prefix
     def __call__(self, s):
         return s[:len(self.prefix)] == self.prefix
 
 class PostfixCond(Cond):
     """Acts as a condition function for matching a string postfix"""
-    def __init__(self, postfix, encoding="utf-8"):
-        self.postfix = postfix.encode(encoding)
+    def __init__(self, postfix):
+        self.postfix = postfix
     def __call__(self, s):
         return s[-len(self.postfix):] == self.postfix
 
@@ -76,7 +75,7 @@
     string.  Case insensitive and % signs are wild cards.
     This isn't perfect but it should work for the simple common cases.
     """
-    def __init__(self, likestr, re_flags=re.IGNORECASE, encoding="utf-8"):
+    def __init__(self, likestr, re_flags=re.IGNORECASE):
         # escape python re characters
         chars_to_escape = '.*+()[]?'
         for char in chars_to_escape :
@@ -84,18 +83,8 @@
         # convert %s to wildcards
         self.likestr = likestr.replace('%', '.*')
         self.re = re.compile('^'+self.likestr+'$', re_flags)
-        self.encoding = encoding
     def __call__(self, s):
-        return self.re.match(s.decode(self.encoding))
-
-def CmpToKey(mycmp):
-    'Convert a cmp= function into a key= function'
-    class K(object):
-        def __init__(self, obj, *args):
-            self.obj = obj
-        def __lt__(self, other):
-            return mycmp(self.obj, other.obj) == -1
-    return K
+        return self.re.match(s)
 
 #
 # keys used to store database metadata
@@ -104,7 +93,7 @@
 _columns = '._COLUMNS__'  # table_name+this key contains a list of columns
 
 def _columns_key(table):
-    return _E(table + _columns)
+    return table + _columns
 
 #
 # these keys are found within table sub databases
@@ -114,20 +103,21 @@
                      # row in the table.  (no data is stored)
 _rowid_str_len = 8   # length in bytes of the unique rowid strings
 
+
 def _data_key(table, col, rowid):
-    return _E(table + _data + col + _data) + rowid
+    return table + _data + col + _data + rowid
 
 def _search_col_data_key(table, col):
-    return _E(table + _data + col + _data)
+    return table + _data + col + _data
 
 def _search_all_data_key(table):
-    return _E(table + _data)
+    return table + _data
 
 def _rowid_key(table, rowid):
-    return _E(table + _rowid) + rowid + _E(_rowid)
+    return table + _rowid + rowid + _rowid
 
 def _search_rowid_key(table):
-    return _E(table + _rowid)
+    return table + _rowid
 
 def contains_metastrings(s) :
     """Verify that the given string does not contain any
@@ -146,43 +136,110 @@
 class bsdTableDB :
     def __init__(self, filename, dbhome, create=0, truncate=0, mode=0o600,
                  recover=0, dbflags=0):
-        """bsdTableDB(filename, dbhome, create=0, truncate=0, mode=0o600)
+        """bsdTableDB(filename, dbhome, create=0, truncate=0, mode=0600)
 
-        Open database name in the dbhome BerkeleyDB directory.
+        Open database name in the dbhome Berkeley DB directory.
         Use keyword arguments when calling this constructor.
         """
         self.db = None
-        myflags = DB_THREAD
+        myflags = db.DB_THREAD
         if create:
-            myflags |= DB_CREATE
-        flagsforenv = (DB_INIT_MPOOL | DB_INIT_LOCK | DB_INIT_LOG |
-                       DB_INIT_TXN | dbflags)
+            myflags |= db.DB_CREATE
+        flagsforenv = (db.DB_INIT_MPOOL | db.DB_INIT_LOCK | db.DB_INIT_LOG |
+                       db.DB_INIT_TXN | dbflags)
         # DB_AUTO_COMMIT isn't a valid flag for env.open()
         try:
-            dbflags |= DB_AUTO_COMMIT
+            dbflags |= db.DB_AUTO_COMMIT
         except AttributeError:
             pass
         if recover:
-            flagsforenv = flagsforenv | DB_RECOVER
-        self.env = DBEnv()
+            flagsforenv = flagsforenv | db.DB_RECOVER
+        self.env = db.DBEnv()
         # enable auto deadlock avoidance
-        self.env.set_lk_detect(DB_LOCK_DEFAULT)
+        self.env.set_lk_detect(db.DB_LOCK_DEFAULT)
         self.env.open(dbhome, myflags | flagsforenv)
         if truncate:
-            myflags |= DB_TRUNCATE
-        self.db = DB(self.env)
+            myflags |= db.DB_TRUNCATE
+        self.db = db.DB(self.env)
         # this code relies on DBCursor.set* methods to raise exceptions
         # rather than returning None
         self.db.set_get_returns_none(1)
         # allow duplicate entries [warning: be careful w/ metadata]
-        self.db.set_flags(DB_DUP)
-        self.db.open(filename, DB_BTREE, dbflags | myflags, mode)
+        self.db.set_flags(db.DB_DUP)
+        self.db.open(filename, db.DB_BTREE, dbflags | myflags, mode)
         self.dbfilename = filename
+
+        if sys.version_info[0] >= 3 :
+            class cursor_py3k(object) :
+                def __init__(self, dbcursor) :
+                    self._dbcursor = dbcursor
+
+                def close(self) :
+                    return self._dbcursor.close()
+
+                def set_range(self, search) :
+                    v = self._dbcursor.set_range(bytes(search, "iso8859-1"))
+                    if v != None :
+                        v = (v[0].decode("iso8859-1"),
+                                v[1].decode("iso8859-1"))
+                    return v
+
+                def __next__(self) :
+                    v = getattr(self._dbcursor, "next")()
+                    if v != None :
+                        v = (v[0].decode("iso8859-1"),
+                                v[1].decode("iso8859-1"))
+                    return v
+
+            class db_py3k(object) :
+                def __init__(self, db) :
+                    self._db = db
+
+                def cursor(self, txn=None) :
+                    return cursor_py3k(self._db.cursor(txn=txn))
+
+                def has_key(self, key, txn=None) :
+                    return getattr(self._db,"has_key")(bytes(key, "iso8859-1"),
+                            txn=txn)
+
+                def put(self, key, value, flags=0, txn=None) :
+                    key = bytes(key, "iso8859-1")
+                    if value != None :
+                        value = bytes(value, "iso8859-1")
+                    return self._db.put(key, value, flags=flags, txn=txn)
+
+                def put_bytes(self, key, value, txn=None) :
+                    key = bytes(key, "iso8859-1")
+                    return self._db.put(key, value, txn=txn)
+
+                def get(self, key, txn=None, flags=0) :
+                    key = bytes(key, "iso8859-1")
+                    v = self._db.get(key, txn=txn, flags=flags)
+                    if v != None :
+                        v = v.decode("iso8859-1")
+                    return v
+
+                def get_bytes(self, key, txn=None, flags=0) :
+                    key = bytes(key, "iso8859-1")
+                    return self._db.get(key, txn=txn, flags=flags)
+
+                def delete(self, key, txn=None) :
+                    key = bytes(key, "iso8859-1")
+                    return self._db.delete(key, txn=txn)
+
+                def close (self) :
+                    return self._db.close()
+
+            self.db = db_py3k(self.db)
+        else :  # Python 2.x
+            pass
+
         # Initialize the table names list if this is a new database
         txn = self.env.txn_begin()
         try:
-            if not self.db.has_key(_E(_table_names_key), txn):
-                self.db.put(_E(_table_names_key), pickle.dumps([], 1), txn=txn)
+            if not getattr(self.db, "has_key")(_table_names_key, txn):
+                getattr(self.db, "put_bytes", self.db.put) \
+                        (_table_names_key, pickle.dumps([], 1), txn=txn)
         # Yes, bare except
         except:
             txn.abort()
@@ -206,13 +263,13 @@
     def checkpoint(self, mins=0):
         try:
             self.env.txn_checkpoint(mins)
-        except DBIncompleteError:
+        except db.DBIncompleteError:
             pass
 
     def sync(self):
         try:
             self.db.sync()
-        except DBIncompleteError:
+        except db.DBIncompleteError:
             pass
 
     def _db_print(self) :
@@ -223,13 +280,13 @@
             key, data = cur.first()
             while 1:
                 print(repr({key: data}))
-                next = cur.next()
+                next = next(cur)
                 if next:
                     key, data = next
                 else:
                     cur.close()
                     return
-        except DBNotFoundError:
+        except db.DBNotFoundError:
             cur.close()
 
 
@@ -239,6 +296,7 @@
         raises TableDBError if it already exists or for other DB errors.
         """
         assert isinstance(columns, list)
+
         txn = None
         try:
             # checking sanity of the table and column names here on
@@ -252,29 +310,33 @@
                         "bad column name: contains reserved metastrings")
 
             columnlist_key = _columns_key(table)
-            if self.db.has_key(columnlist_key):
+            if getattr(self.db, "has_key")(columnlist_key):
                 raise TableAlreadyExists("table already exists")
 
             txn = self.env.txn_begin()
             # store the table's column info
-            self.db.put(columnlist_key, pickle.dumps(columns, 1), txn=txn)
+            getattr(self.db, "put_bytes", self.db.put)(columnlist_key,
+                    pickle.dumps(columns, 1), txn=txn)
 
             # add the table name to the tablelist
-            tablelist = pickle.loads(self.db.get(_E(_table_names_key), txn=txn,
-                                                 flags=DB_RMW))
+            tablelist = pickle.loads(getattr(self.db, "get_bytes",
+                self.db.get) (_table_names_key, txn=txn, flags=db.DB_RMW))
             tablelist.append(table)
             # delete 1st, in case we opened with DB_DUP
-            self.db.delete(_E(_table_names_key), txn=txn)
-            self.db.put(_E(_table_names_key), pickle.dumps(tablelist, 1), txn=txn)
+            self.db.delete(_table_names_key, txn=txn)
+            getattr(self.db, "put_bytes", self.db.put)(_table_names_key,
+                    pickle.dumps(tablelist, 1), txn=txn)
 
             txn.commit()
             txn = None
-        except DBError as dberror:
-            raise TableDBError(dberror.args[1])
-        finally:
+        except db.DBError as dberror:
             if txn:
                 txn.abort()
-                txn = None
+            if sys.version_info[0] < 3 :
+                raise TableDBError(dberror[1])
+            else :
+                raise TableDBError(dberror.args[1])
+
 
     def ListTableColumns(self, table):
         """Return a list of columns in the given table.
@@ -285,9 +347,10 @@
             raise ValueError("bad table name: contains reserved metastrings")
 
         columnlist_key = _columns_key(table)
-        if not self.db.has_key(columnlist_key):
+        if not getattr(self.db, "has_key")(columnlist_key):
             return []
-        pickledcolumnlist = self.db.get(columnlist_key)
+        pickledcolumnlist = getattr(self.db, "get_bytes",
+                self.db.get)(columnlist_key)
         if pickledcolumnlist:
             return pickle.loads(pickledcolumnlist)
         else:
@@ -295,7 +358,7 @@
 
     def ListTables(self):
         """Return a list of tables in this database."""
-        pickledtablelist = self.db.get(_E(_table_names_key))
+        pickledtablelist = self.db.get_get(_table_names_key)
         if pickledtablelist:
             return pickle.loads(pickledtablelist)
         else:
@@ -311,6 +374,7 @@
         all of its current columns.
         """
         assert isinstance(columns, list)
+
         try:
             self.CreateTable(table, columns)
         except TableAlreadyExists:
@@ -322,7 +386,8 @@
 
                 # load the current column list
                 oldcolumnlist = pickle.loads(
-                    self.db.get(columnlist_key, txn=txn, flags=DB_RMW))
+                    getattr(self.db, "get_bytes",
+                        self.db.get)(columnlist_key, txn=txn, flags=db.DB_RMW))
                 # create a hash table for fast lookups of column names in the
                 # loop below
                 oldcolumnhash = {}
@@ -340,7 +405,7 @@
                 if newcolumnlist != oldcolumnlist :
                     # delete the old one first since we opened with DB_DUP
                     self.db.delete(columnlist_key, txn=txn)
-                    self.db.put(columnlist_key,
+                    getattr(self.db, "put_bytes", self.db.put)(columnlist_key,
                                 pickle.dumps(newcolumnlist, 1),
                                 txn=txn)
 
@@ -348,19 +413,22 @@
                 txn = None
 
                 self.__load_column_info(table)
-            except DBError as dberror:
-                raise TableDBError(dberror.args[1])
-            finally:
+            except db.DBError as dberror:
                 if txn:
                     txn.abort()
+                if sys.version_info[0] < 3 :
+                    raise TableDBError(dberror[1])
+                else :
+                    raise TableDBError(dberror.args[1])
 
 
     def __load_column_info(self, table) :
         """initialize the self.__tablecolumns dict"""
         # check the column names
         try:
-            tcolpickles = self.db.get(_columns_key(table))
-        except DBNotFoundError:
+            tcolpickles = getattr(self.db, "get_bytes",
+                    self.db.get)(_columns_key(table))
+        except db.DBNotFoundError:
             raise TableDBError("unknown table: %r" % (table,))
         if not tcolpickles:
             raise TableDBError("unknown table: %r" % (table,))
@@ -376,13 +444,16 @@
             blist = []
             for x in range(_rowid_str_len):
                 blist.append(random.randint(0,255))
-            newid = bytes(blist)
+            newid = struct.pack('B'*_rowid_str_len, *blist)
+
+            if sys.version_info[0] >= 3 :
+                newid = newid.decode("iso8859-1")  # 8 bits
 
             # Guarantee uniqueness by adding this key to the database
             try:
                 self.db.put(_rowid_key(table, newid), None, txn=txn,
-                            flags=DB_NOOVERWRITE)
-            except DBKeyExistError:
+                            flags=db.DB_NOOVERWRITE)
+            except db.DBKeyExistError:
                 pass
             else:
                 unique = 1
@@ -394,15 +465,16 @@
         """Insert(table, datadict) - Insert a new row into the table
         using the keys+values from rowdict as the column values.
         """
+
         txn = None
         try:
-            if not self.db.has_key(_columns_key(table)):
+            if not getattr(self.db, "has_key")(_columns_key(table)):
                 raise TableDBError("unknown table")
 
             # check the validity of each column name
             if table not in self.__tablecolumns:
                 self.__load_column_info(table)
-            for column in rowdict.keys() :
+            for column in list(rowdict.keys()) :
                 if not self.__tablecolumns[table].count(column):
                     raise TableDBError("unknown column: %r" % (column,))
 
@@ -411,14 +483,14 @@
             rowid = self.__new_rowid(table, txn=txn)
 
             # insert the row values into the table database
-            for column, dataitem in rowdict.items():
+            for column, dataitem in list(rowdict.items()):
                 # store the value
                 self.db.put(_data_key(table, column, rowid), dataitem, txn=txn)
 
             txn.commit()
             txn = None
 
-        except DBError as dberror:
+        except db.DBError as dberror:
             # WIBNI we could just abort the txn and re-raise the exception?
             # But no, because TableDBError is not related to DBError via
             # inheritance, so it would be backwards incompatible.  Do the next
@@ -427,11 +499,10 @@
             if txn:
                 txn.abort()
                 self.db.delete(_rowid_key(table, rowid))
-                txn = None
-            raise TableDBError(dberror.args[1]).with_traceback(info[2])
-        finally:
-            if txn:
-                txn.abort()
+            if sys.version_info[0] < 3 :
+                raise TableDBError(dberror[1]).with_traceback(info[2])
+            else :
+                raise TableDBError(dberror.args[1]).with_traceback(info[2])
 
 
     def Modify(self, table, conditions={}, mappings={}):
@@ -445,13 +516,13 @@
           condition callable expecting the data string as an argument and
           returning the new string for that column.
         """
+
         try:
             matching_rowids = self.__Select(table, [], conditions)
 
             # modify only requested columns
-            columns = mappings.keys()
-            for rowid in matching_rowids.keys():
-                rowid = rowid.encode("latin-1")
+            columns = list(mappings.keys())
+            for rowid in list(matching_rowids.keys()):
                 txn = None
                 try:
                     for column in columns:
@@ -464,7 +535,7 @@
                             self.db.delete(
                                 _data_key(table, column, rowid),
                                 txn=txn)
-                        except DBNotFoundError:
+                        except db.DBNotFoundError:
                              # XXXXXXX row key somehow didn't exist, assume no
                              # error
                             dataitem = None
@@ -477,12 +548,16 @@
                         txn = None
 
                 # catch all exceptions here since we call unknown callables
-                finally:
+                except:
                     if txn:
                         txn.abort()
+                    raise
 
-        except DBError as dberror:
-            raise TableDBError(dberror.args[1])
+        except db.DBError as dberror:
+            if sys.version_info[0] < 3 :
+                raise TableDBError(dberror[1])
+            else :
+                raise TableDBError(dberror.args[1])
 
     def Delete(self, table, conditions={}):
         """Delete(table, conditions) - Delete items matching the given
@@ -492,37 +567,41 @@
           condition functions expecting the data string as an
           argument and returning a boolean.
         """
+
         try:
             matching_rowids = self.__Select(table, [], conditions)
 
             # delete row data from all columns
             columns = self.__tablecolumns[table]
-            for rowid in matching_rowids.keys():
+            for rowid in list(matching_rowids.keys()):
                 txn = None
                 try:
                     txn = self.env.txn_begin()
                     for column in columns:
                         # delete the data key
                         try:
-                            self.db.delete(_data_key(table, column,
-                                                     rowid.encode("latin-1")),
+                            self.db.delete(_data_key(table, column, rowid),
                                            txn=txn)
-                        except DBNotFoundError:
+                        except db.DBNotFoundError:
                             # XXXXXXX column may not exist, assume no error
                             pass
 
                     try:
-                        self.db.delete(_rowid_key(table, rowid.encode("latin-1")), txn=txn)
-                    except DBNotFoundError:
+                        self.db.delete(_rowid_key(table, rowid), txn=txn)
+                    except db.DBNotFoundError:
                         # XXXXXXX row key somehow didn't exist, assume no error
                         pass
                     txn.commit()
                     txn = None
-                finally:
+                except db.DBError as dberror:
                     if txn:
                         txn.abort()
-        except DBError as dberror:
-            raise TableDBError(dberror.args[1])
+                    raise
+        except db.DBError as dberror:
+            if sys.version_info[0] < 3 :
+                raise TableDBError(dberror[1])
+            else :
+                raise TableDBError(dberror.args[1])
 
 
     def Select(self, table, columns, conditions={}):
@@ -541,10 +620,13 @@
             if columns is None:
                 columns = self.__tablecolumns[table]
             matching_rowids = self.__Select(table, columns, conditions)
-        except DBError as dberror:
-            raise TableDBError(dberror.args[1])
+        except db.DBError as dberror:
+            if sys.version_info[0] < 3 :
+                raise TableDBError(dberror[1])
+            else :
+                raise TableDBError(dberror.args[1])
         # return the matches as a list of dictionaries
-        return matching_rowids.values()
+        return list(matching_rowids.values())
 
 
     def __Select(self, table, columns, conditions):
@@ -595,8 +677,19 @@
             # leave all unknown condition callables alone as equals
             return 0
 
-        conditionlist = list(conditions.items())
-        conditionlist.sort(key=CmpToKey(cmp_conditions))
+        if sys.version_info[0] < 3 :
+            conditionlist = list(conditions.items())
+            conditionlist.sort(cmp_conditions)
+        else :  # Insertion Sort. Please, improve
+            conditionlist = []
+            for i in list(conditions.items()) :
+                for j, k in enumerate(conditionlist) :
+                    r = cmp_conditions(k, i)
+                    if r == 1 :
+                        conditionlist.insert(j, i)
+                        break
+                else :
+                    conditionlist.append(i)
 
         # Apply conditions to column data to find what we want
         cur = self.db.cursor()
@@ -614,7 +707,7 @@
                 key, data = cur.set_range(searchkey)
                 while key[:len(searchkey)] == searchkey:
                     # extract the rowid from the key
-                    rowid = key[-_rowid_str_len:].decode("latin-1")
+                    rowid = key[-_rowid_str_len:]
 
                     if rowid not in rejected_rowids:
                         # if no condition was specified or the condition
@@ -629,11 +722,15 @@
                                 del matching_rowids[rowid]
                             rejected_rowids[rowid] = rowid
 
-                    key, data = cur.next()
+                    key, data = next(cur)
 
-            except DBError as dberror:
-                if dberror.args[0] != DB_NOTFOUND:
-                    raise
+            except db.DBError as dberror:
+                if sys.version_info[0] < 3 :
+                    if dberror[0] != db.DB_NOTFOUND:
+                        raise
+                else :
+                    if dberror.args[0] != db.DB_NOTFOUND:
+                        raise
                 continue
 
         cur.close()
@@ -644,17 +741,20 @@
         # extract any remaining desired column data from the
         # database for the matching rows.
         if len(columns) > 0:
-            for rowid, rowdata in matching_rowids.items():
-                rowid = rowid.encode("latin-1")
+            for rowid, rowdata in list(matching_rowids.items()):
                 for column in columns:
                     if column in rowdata:
                         continue
                     try:
                         rowdata[column] = self.db.get(
                             _data_key(table, column, rowid))
-                    except DBError as dberror:
-                        if dberror.args[0] != DB_NOTFOUND:
-                            raise
+                    except db.DBError as dberror:
+                        if sys.version_info[0] < 3 :
+                            if dberror[0] != db.DB_NOTFOUND:
+                                raise
+                        else :
+                            if dberror.args[0] != db.DB_NOTFOUND:
+                                raise
                         rowdata[column] = None
 
         # return the matches
@@ -677,7 +777,7 @@
             while 1:
                 try:
                     key, data = cur.set_range(table_key)
-                except DBNotFoundError:
+                except db.DBNotFoundError:
                     break
                 # only delete items in this table
                 if key[:len(table_key)] != table_key:
@@ -689,7 +789,7 @@
             while 1:
                 try:
                     key, data = cur.set_range(table_key)
-                except DBNotFoundError:
+                except db.DBNotFoundError:
                     break
                 # only delete items in this table
                 if key[:len(table_key)] != table_key:
@@ -700,15 +800,17 @@
 
             # delete the tablename from the table name list
             tablelist = pickle.loads(
-                self.db.get(_E(_table_names_key), txn=txn, flags=DB_RMW))
+                getattr(self.db, "get_bytes", self.db.get)(_table_names_key,
+                    txn=txn, flags=db.DB_RMW))
             try:
                 tablelist.remove(table)
             except ValueError:
                 # hmm, it wasn't there, oh well, that's what we want.
                 pass
             # delete 1st, incase we opened with DB_DUP
-            self.db.delete(_E(_table_names_key), txn=txn)
-            self.db.put(_E(_table_names_key), pickle.dumps(tablelist, 1), txn=txn)
+            self.db.delete(_table_names_key, txn=txn)
+            getattr(self.db, "put_bytes", self.db.put)(_table_names_key,
+                    pickle.dumps(tablelist, 1), txn=txn)
 
             txn.commit()
             txn = None
@@ -716,8 +818,10 @@
             if table in self.__tablecolumns:
                 del self.__tablecolumns[table]
 
-        except DBError as dberror:
-            raise TableDBError(dberror.args[1])
-        finally:
+        except db.DBError as dberror:
             if txn:
                 txn.abort()
+            if sys.version_info[0] < 3 :
+                raise TableDBError(dberror[1])
+            else :
+                raise TableDBError(dberror.args[1])

Modified: python/branches/py3k/Lib/bsddb/dbutils.py
==============================================================================
--- python/branches/py3k/Lib/bsddb/dbutils.py	(original)
+++ python/branches/py3k/Lib/bsddb/dbutils.py	Sun Aug 31 16:12:11 2008
@@ -19,8 +19,20 @@
 #
 #------------------------------------------------------------------------
 
-import time
-from . import db
+
+#
+# import the time.sleep function in a namespace safe way to allow
+# "from bsddb.dbutils import *"
+#
+from time import sleep as _sleep
+
+import sys
+absolute_import = (sys.version_info[0] >= 3)
+if absolute_import :
+    # Because this syntaxis is not valid before Python 2.5
+    exec("from . import db")
+else :
+    from . import db
 
 # always sleep at least N seconds between retrys
 _deadlock_MinSleepTime = 1.0/128
@@ -54,22 +66,17 @@
     while True:
         try:
             return function(*_args, **_kwargs)
-        except db.DBLockDeadlockError as e:
+        except db.DBLockDeadlockError:
             if _deadlock_VerboseFile:
                 _deadlock_VerboseFile.write(
-                    'bsddb.dbutils.DeadlockWrap: ' +
-                    'sleeping %1.3f\n' % sleeptime)
-            time.sleep(sleeptime)
+                    'dbutils.DeadlockWrap: sleeping %1.3f\n' % sleeptime)
+            _sleep(sleeptime)
             # exponential backoff in the sleep time
             sleeptime *= 2
             if sleeptime > _deadlock_MaxSleepTime:
                 sleeptime = _deadlock_MaxSleepTime
             max_retries -= 1
             if max_retries == -1:
-                if _deadlock_VerboseFile:
-                    _deadlock_VerboseFile.write(
-                    'bsddb.dbutils.DeadlockWrap: ' +
-                    'max_retries reached, reraising %s\n' % e)
                 raise
 
 

Deleted: python/branches/py3k/Lib/bsddb/test/test_1413192.py
==============================================================================
--- python/branches/py3k/Lib/bsddb/test/test_1413192.py	Sun Aug 31 16:12:11 2008
+++ (empty file)
@@ -1,48 +0,0 @@
-# http://bugs.python.org/issue1413192
-#
-# See the bug report for details.
-# The problem was that the env was deallocated prior to the txn.
-
-import shutil
-import tempfile
-from test.support import catch_warning
-import warnings
-
-try:
-    # For Pythons w/distutils and add-on pybsddb
-    from bsddb3 import db
-except ImportError:
-    # For Python >= 2.3 builtin bsddb distribution
-    from bsddb import db
-
-env_name = tempfile.mkdtemp()
-
-# Wrap test operation in a class so we can control destruction rather than
-# waiting for the controlling Python executable to exit
-
-class Context:
-
-    def __init__(self):
-        self.env = db.DBEnv()
-        self.env.open(env_name,
-                      db.DB_CREATE | db.DB_INIT_TXN | db.DB_INIT_MPOOL)
-        self.the_txn = self.env.txn_begin()
-
-        self.map = db.DB(self.env)
-        self.map.open('xxx.db', "p",
-                      db.DB_HASH, db.DB_CREATE, 0o666, txn=self.the_txn)
-        del self.env
-        del self.the_txn
-
-
-with catch_warning():
-    warnings.filterwarnings('ignore', 'DBTxn aborted in destructor')
-    context = Context()
-    del context
-
-
-# try not to leave a turd
-try:
-    shutil.rmtree(env_name)
-except EnvironmentError:
-    pass

Modified: python/branches/py3k/Lib/bsddb/test/test_all.py
==============================================================================
--- python/branches/py3k/Lib/bsddb/test/test_all.py	(original)
+++ python/branches/py3k/Lib/bsddb/test/test_all.py	Sun Aug 31 16:12:11 2008
@@ -6,18 +6,377 @@
 import unittest
 try:
     # For Pythons w/distutils pybsddb
-    from bsddb3 import db
+    import bsddb3 as bsddb
 except ImportError:
     # For Python 2.3
-    from bsddb import db
+    import bsddb
 
-verbose = False
+
+if sys.version_info[0] >= 3 :
+    charset = "iso8859-1"  # Full 8 bit
+
+    class cursor_py3k(object) :
+        def __init__(self, db, *args, **kwargs) :
+            self._dbcursor = db.cursor(*args, **kwargs)
+
+        def __getattr__(self, v) :
+            return getattr(self._dbcursor, v)
+
+        def _fix(self, v) :
+            if v == None : return None
+            key, value = v
+            if isinstance(key, bytes) :
+                key = key.decode(charset)
+            return (key, value.decode(charset))
+
+        def __next__(self) :
+            v = getattr(self._dbcursor, "next")()
+            return self._fix(v)
+
+        def previous(self) :
+            v = self._dbcursor.previous()
+            return self._fix(v)
+
+        def last(self) :
+            v = self._dbcursor.last()
+            return self._fix(v)
+
+        def set(self, k) :
+            if isinstance(k, str) :
+                k = bytes(k, charset)
+            v = self._dbcursor.set(k)
+            return self._fix(v)
+
+        def set_recno(self, num) :
+            v = self._dbcursor.set_recno(num)
+            return self._fix(v)
+
+        def set_range(self, k, dlen=-1, doff=-1) :
+            if isinstance(k, str) :
+                k = bytes(k, charset)
+            v = self._dbcursor.set_range(k, dlen=dlen, doff=doff)
+            return self._fix(v)
+
+        def dup(self, flags=0) :
+            cursor = self._dbcursor.dup(flags)
+            return dup_cursor_py3k(cursor)
+
+        def next_dup(self) :
+            v = self._dbcursor.next_dup()
+            return self._fix(v)
+
+        def put(self, key, value, flags=0, dlen=-1, doff=-1) :
+            if isinstance(key, str) :
+                key = bytes(key, charset)
+            if isinstance(value, str) :
+                value = bytes(value, charset)
+            return self._dbcursor.put(key, value, flags=flags, dlen=dlen,
+                    doff=doff)
+
+        def current(self, flags=0, dlen=-1, doff=-1) :
+            v = self._dbcursor.current(flags=flags, dlen=dlen, doff=doff)
+            return self._fix(v)
+
+        def first(self) :
+            v = self._dbcursor.first()
+            return self._fix(v)
+
+        def pget(self, key=None, data=None, flags=0) :
+            # Incorrect because key can be a bare number,
+            # but enough to pass testsuite
+            if isinstance(key, int) and (data==None) and (flags==0) :
+                flags = key
+                key = None
+            if isinstance(key, str) :
+                key = bytes(key, charset)
+            if isinstance(data, int) and (flags==0) :
+                flags = data
+                data = None
+            if isinstance(data, str) :
+                data = bytes(data, charset)
+            v=self._dbcursor.pget(key=key, data=data, flags=flags)
+            if v != None :
+                v1, v2, v3 = v
+                if isinstance(v1, bytes) :
+                    v1 = v1.decode(charset)
+                if isinstance(v2, bytes) :
+                    v2 = v2.decode(charset)
+
+                v = (v1, v2, v3.decode(charset))
+
+            return v
+
+        def join_item(self) :
+            v = self._dbcursor.join_item()
+            if v != None :
+                v = v.decode(charset)
+            return v
+
+        def get(self, *args, **kwargs) :
+            l = len(args)
+            if l == 2 :
+                k, f = args
+                if isinstance(k, str) :
+                    k = bytes(k, "iso8859-1")
+                args = (k, f)
+            elif l == 3 :
+                k, d, f = args
+                if isinstance(k, str) :
+                    k = bytes(k, charset)
+                if isinstance(d, str) :
+                    d = bytes(d, charset)
+                args =(k, d, f)
+
+            v = self._dbcursor.get(*args, **kwargs)
+            if v != None :
+                k, v = v
+                if isinstance(k, bytes) :
+                    k = k.decode(charset)
+                v = (k, v.decode(charset))
+            return v
+
+        def get_both(self, key, value) :
+            if isinstance(key, str) :
+                key = bytes(key, charset)
+            if isinstance(value, str) :
+                value = bytes(value, charset)
+            v=self._dbcursor.get_both(key, value)
+            return self._fix(v)
+
+    class dup_cursor_py3k(cursor_py3k) :
+        def __init__(self, dbcursor) :
+            self._dbcursor = dbcursor
+
+    class DB_py3k(object) :
+        def __init__(self, *args, **kwargs) :
+            args2=[]
+            for i in args :
+                if isinstance(i, DBEnv_py3k) :
+                    i = i._dbenv
+                args2.append(i)
+            args = tuple(args2)
+            for k, v in list(kwargs.items()) :
+                if isinstance(v, DBEnv_py3k) :
+                    kwargs[k] = v._dbenv
+
+            self._db = bsddb._db.DB_orig(*args, **kwargs)
+
+        def __contains__(self, k) :
+            if isinstance(k, str) :
+                k = bytes(k, charset)
+            return getattr(self._db, "has_key")(k)
+
+        def __getitem__(self, k) :
+            if isinstance(k, str) :
+                k = bytes(k, charset)
+            v = self._db[k]
+            if v != None :
+                v = v.decode(charset)
+            return v
+
+        def __setitem__(self, k, v) :
+            if isinstance(k, str) :
+                k = bytes(k, charset)
+            if isinstance(v, str) :
+                v = bytes(v, charset)
+            self._db[k] = v
+
+        def __delitem__(self, k) :
+            if isinstance(k, str) :
+                k = bytes(k, charset)
+            del self._db[k]
+
+        def __getattr__(self, v) :
+            return getattr(self._db, v)
+
+        def __len__(self) :
+            return len(self._db)
+
+        def has_key(self, k, txn=None) :
+            if isinstance(k, str) :
+                k = bytes(k, charset)
+            return self._db.has_key(k, txn=txn)
+
+        def put(self, key, value, txn=None, flags=0, dlen=-1, doff=-1) :
+            if isinstance(key, str) :
+                key = bytes(key, charset)
+            if isinstance(value, str) :
+                value = bytes(value, charset)
+            return self._db.put(key, value, flags=flags, txn=txn, dlen=dlen,
+                    doff=doff)
+
+        def append(self, value, txn=None) :
+            if isinstance(value, str) :
+                value = bytes(value, charset)
+            return self._db.append(value, txn=txn)
+
+        def get_size(self, key) :
+            if isinstance(key, str) :
+                key = bytes(key, charset)
+            return self._db.get_size(key)
+
+        def get(self, key, default="MagicCookie", txn=None, flags=0, dlen=-1, doff=-1) :
+            if isinstance(key, str) :
+                key = bytes(key, charset)
+            if default != "MagicCookie" :  # Magic for 'test_get_none.py'
+                v=self._db.get(key, default=default, txn=txn, flags=flags,
+                        dlen=dlen, doff=doff)
+            else :
+                v=self._db.get(key, txn=txn, flags=flags,
+                        dlen=dlen, doff=doff)
+            if (v != None) and isinstance(v, bytes) :
+                v = v.decode(charset)
+            return v
+
+        def pget(self, key, txn=None) :
+            if isinstance(key, str) :
+                key = bytes(key, charset)
+            v=self._db.pget(key, txn=txn)
+            if v != None :
+                v1, v2 = v
+                if isinstance(v1, bytes) :
+                    v1 = v1.decode(charset)
+
+                v = (v1, v2.decode(charset))
+            return v
+
+        def get_both(self, key, value, txn=None, flags=0) :
+            if isinstance(key, str) :
+                key = bytes(key, charset)
+            if isinstance(value, str) :
+                value = bytes(value, charset)
+            v=self._db.get_both(key, value, txn=txn, flags=flags)
+            if v != None :
+                v = v.decode(charset)
+            return v
+
+        def delete(self, key, txn=None) :
+            if isinstance(key, str) :
+                key = bytes(key, charset)
+            return self._db.delete(key, txn=txn)
+
+        def keys(self) :
+            k = list(self._db.keys())
+            if len(k) and isinstance(k[0], bytes) :
+                return [i.decode(charset) for i in list(self._db.keys())]
+            else :
+                return k
+
+        def items(self) :
+            data = list(self._db.items())
+            if not len(data) : return data
+            data2 = []
+            for k, v in data :
+                if isinstance(k, bytes) :
+                    k = k.decode(charset)
+                data2.append((k, v.decode(charset)))
+            return data2
+
+        def associate(self, secondarydb, callback, flags=0, txn=None) :
+            class associate_callback(object) :
+                def __init__(self, callback) :
+                    self._callback = callback
+
+                def callback(self, key, data) :
+                    if isinstance(key, str) :
+                        key = key.decode(charset)
+                    data = data.decode(charset)
+                    key = self._callback(key, data)
+                    if (key != bsddb._db.DB_DONOTINDEX) and isinstance(key,
+                            str) :
+                        key = bytes(key, charset)
+                    return key
+
+            return self._db.associate(secondarydb._db,
+                    associate_callback(callback).callback, flags=flags, txn=txn)
+
+        def cursor(self, txn=None, flags=0) :
+            return cursor_py3k(self._db, txn=txn, flags=flags)
+
+        def join(self, cursor_list) :
+            cursor_list = [i._dbcursor for i in cursor_list]
+            return dup_cursor_py3k(self._db.join(cursor_list))
+
+    class DBEnv_py3k(object) :
+        def __init__(self, *args, **kwargs) :
+            self._dbenv = bsddb._db.DBEnv_orig(*args, **kwargs)
+
+        def __getattr__(self, v) :
+            return getattr(self._dbenv, v)
+
+    class DBSequence_py3k(object) :
+        def __init__(self, db, *args, **kwargs) :
+            self._db=db
+            self._dbsequence = bsddb._db.DBSequence_orig(db._db, *args, **kwargs)
+
+        def __getattr__(self, v) :
+            return getattr(self._dbsequence, v)
+
+        def open(self, key, *args, **kwargs) :
+            return self._dbsequence.open(bytes(key, charset), *args, **kwargs)
+
+        def get_key(self) :
+            return  self._dbsequence.get_key().decode(charset)
+
+        def get_dbp(self) :
+            return self._db
+
+    import string
+    string.letters=[chr(i) for i in range(65,91)]
+
+    bsddb._db.DBEnv_orig = bsddb._db.DBEnv
+    bsddb._db.DB_orig = bsddb._db.DB
+    bsddb._db.DBSequence_orig = bsddb._db.DBSequence
+
+    def do_proxy_db_py3k(flag) :
+        flag2 = do_proxy_db_py3k.flag
+        do_proxy_db_py3k.flag = flag
+        if flag :
+            bsddb.DBEnv = bsddb.db.DBEnv = bsddb._db.DBEnv = DBEnv_py3k
+            bsddb.DB = bsddb.db.DB = bsddb._db.DB = DB_py3k
+            bsddb._db.DBSequence = DBSequence_py3k
+        else :
+            bsddb.DBEnv = bsddb.db.DBEnv = bsddb._db.DBEnv = bsddb._db.DBEnv_orig
+            bsddb.DB = bsddb.db.DB = bsddb._db.DB = bsddb._db.DB_orig
+            bsddb._db.DBSequence = bsddb._db.DBSequence_orig
+        return flag2
+
+    do_proxy_db_py3k.flag = False
+    do_proxy_db_py3k(True)
+
+try:
+    # For Pythons w/distutils pybsddb
+    from bsddb3 import db, dbtables, dbutils, dbshelve, \
+            hashopen, btopen, rnopen, dbobj
+except ImportError:
+    # For Python 2.3
+    from bsddb import db, dbtables, dbutils, dbshelve, \
+            hashopen, btopen, rnopen, dbobj
+
+try:
+    from bsddb3 import test_support
+except ImportError:
+    from test import test_support
+
+
+try:
+    if sys.version_info[0] < 3 :
+        from threading import Thread, currentThread
+        del Thread, currentThread
+    else :
+        from threading import Thread, current_thread
+        del Thread, current_thread
+    have_threads = True
+except ImportError:
+    have_threads = False
+
+verbose = 0
 if 'verbose' in sys.argv:
-    verbose = True
+    verbose = 1
     sys.argv.remove('verbose')
 
 if 'silent' in sys.argv:  # take care of old flag, just in case
-    verbose = False
+    verbose = 0
     sys.argv.remove('silent')
 
 
@@ -28,11 +387,71 @@
     print('bsddb.db.version():   %s' % (db.version(), ))
     print('bsddb.db.__version__: %s' % db.__version__)
     print('bsddb.db.cvsid:       %s' % db.cvsid)
+    print('py module:            %s' % bsddb.__file__)
+    print('extension module:     %s' % bsddb._bsddb.__file__)
     print('python version:       %s' % sys.version)
     print('My pid:               %s' % os.getpid())
     print('-=' * 38)
 
 
+def get_new_path(name) :
+    get_new_path.mutex.acquire()
+    try :
+        import os
+        path=os.path.join(get_new_path.prefix,
+                name+"_"+str(os.getpid())+"_"+str(get_new_path.num))
+        get_new_path.num+=1
+    finally :
+        get_new_path.mutex.release()
+    return path
+
+def get_new_environment_path() :
+    path=get_new_path("environment")
+    import os
+    try:
+        os.makedirs(path,mode=0o700)
+    except os.error:
+        test_support.rmtree(path)
+        os.makedirs(path)
+    return path
+
+def get_new_database_path() :
+    path=get_new_path("database")
+    import os
+    if os.path.exists(path) :
+        os.remove(path)
+    return path
+
+
+# This path can be overriden via "set_test_path_prefix()".
+import os, os.path
+get_new_path.prefix=os.path.join(os.sep,"tmp","z-Berkeley_DB")
+get_new_path.num=0
+
+def get_test_path_prefix() :
+    return get_new_path.prefix
+
+def set_test_path_prefix(path) :
+    get_new_path.prefix=path
+
+def remove_test_path_directory() :
+    test_support.rmtree(get_new_path.prefix)
+
+if have_threads :
+    import threading
+    get_new_path.mutex=threading.Lock()
+    del threading
+else :
+    class Lock(object) :
+        def acquire(self) :
+            pass
+        def release(self) :
+            pass
+    get_new_path.mutex=Lock()
+    del Lock
+
+
+
 class PrintInfoFakeTest(unittest.TestCase):
     def testPrintVersions(self):
         print_versions()
@@ -41,30 +460,26 @@
 # This little hack is for when this module is run as main and all the
 # other modules import it so they will still be able to get the right
 # verbose setting.  It's confusing but it works.
-try:
-    import test_all
-except ImportError:
-    pass
-else:
+if sys.version_info[0] < 3 :
+    from . import test_all
     test_all.verbose = verbose
+else :
+    import sys
+    print("Work to do!", file=sys.stderr)
 
 
-def suite():
-    try:
-        # this is special, it used to segfault the interpreter
-        import test_1413192
-    except:
-        pass
-
+def suite(module_prefix='', timing_check=None):
     test_modules = [
         'test_associate',
         'test_basics',
-        'test_compat',
         'test_compare',
+        'test_compat',
+        'test_cursor_pget_bug',
         'test_dbobj',
         'test_dbshelve',
         'test_dbtables',
-        'test_env_close',
+        'test_distributed_transactions',
+        'test_early_close',
         'test_get_none',
         'test_join',
         'test_lock',
@@ -72,15 +487,21 @@
         'test_pickle',
         'test_queue',
         'test_recno',
-        'test_thread',
+        'test_replication',
         'test_sequence',
-        'test_cursor_pget_bug',
+        'test_thread',
         ]
 
     alltests = unittest.TestSuite()
     for name in test_modules:
-        module = __import__(name)
+        #module = __import__(name)
+        # Do it this way so that suite may be called externally via
+        # python's Lib/test/test_bsddb3.
+        module = __import__(module_prefix+name, globals(), locals(), name)
+
         alltests.addTest(module.test_suite())
+        if timing_check:
+            alltests.addTest(unittest.makeSuite(timing_check))
     return alltests
 
 

Modified: python/branches/py3k/Lib/bsddb/test/test_associate.py
==============================================================================
--- python/branches/py3k/Lib/bsddb/test/test_associate.py	(original)
+++ python/branches/py3k/Lib/bsddb/test/test_associate.py	Sun Aug 31 16:12:11 2008
@@ -2,32 +2,13 @@
 TestCases for DB.associate.
 """
 
-import shutil
-import sys, os
-import tempfile
+import sys, os, string
 import time
 from pprint import pprint
 
-try:
-    from threading import Thread, current_thread
-    have_threads = 1
-except ImportError:
-    have_threads = 0
-
 import unittest
-from bsddb.test.test_all import verbose
-
-try:
-    # For Pythons w/distutils pybsddb
-    from bsddb3 import db, dbshelve
-except ImportError:
-    # For Python 2.3
-    from bsddb import db, dbshelve
-
-try:
-    from bsddb3 import test_support
-except ImportError:
-    from test import support as test_support
+from .test_all import db, dbshelve, test_support, verbose, have_threads, \
+        get_new_environment_path
 
 
 #----------------------------------------------------------------------
@@ -97,15 +78,7 @@
 class AssociateErrorTestCase(unittest.TestCase):
     def setUp(self):
         self.filename = self.__class__.__name__ + '.db'
-        homeDir = os.path.join(tempfile.gettempdir(), 'db_home%d'%os.getpid())
-        self.homeDir = homeDir
-        try:
-            os.mkdir(homeDir)
-        except os.error:
-            import glob
-            files = glob.glob(os.path.join(self.homeDir, '*'))
-            for file in files:
-                os.remove(file)
+        self.homeDir = get_new_environment_path()
         self.env = db.DBEnv()
         self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL)
 
@@ -128,7 +101,7 @@
         secDB.open(self.filename, "secondary", db.DB_BTREE, db.DB_CREATE)
 
         # dupDB has been configured to allow duplicates, it can't
-        # associate with a secondary.  BerkeleyDB will return an error.
+        # associate with a secondary.  Berkeley DB will return an error.
         try:
             def f(a,b): return a+b
             dupDB.associate(secDB, f)
@@ -153,15 +126,7 @@
 
     def setUp(self):
         self.filename = self.__class__.__name__ + '.db'
-        homeDir = os.path.join(tempfile.gettempdir(), 'db_home%d'%os.getpid())
-        self.homeDir = homeDir
-        try:
-            os.mkdir(homeDir)
-        except os.error:
-            import glob
-            files = glob.glob(os.path.join(self.homeDir, '*'))
-            for file in files:
-                os.remove(file)
+        self.homeDir = get_new_environment_path()
         self.env = db.DBEnv()
         self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL |
                                db.DB_INIT_LOCK | db.DB_THREAD | self.envFlags)
@@ -170,13 +135,13 @@
         self.closeDB()
         self.env.close()
         self.env = None
-        shutil.rmtree(self.homeDir)
+        test_support.rmtree(self.homeDir)
 
     def addDataToDB(self, d, txn=None):
-        for key, value in musicdata.items():
+        for key, value in list(musicdata.items()):
             if type(self.keytype) == type(''):
-                key = ("%02d" % key).encode("utf-8")
-            d.put(key, '|'.join(value).encode("utf-8"), txn=txn)
+                key = "%02d" % key
+            d.put(key, '|'.join(value), txn=txn)
 
     def createDB(self, txn=None):
         self.cur = None
@@ -246,14 +211,14 @@
 
     def finish_test(self, secDB, txn=None):
         # 'Blues' should not be in the secondary database
-        vals = secDB.pget(b'Blues', txn=txn)
-        assert vals == None, vals
+        vals = secDB.pget('Blues', txn=txn)
+        self.assertEqual(vals, None, vals)
 
-        vals = secDB.pget(b'Unknown', txn=txn)
-        assert vals[0] == 99 or vals[0] == b'99', vals
-        vals[1].index(b'Unknown')
-        vals[1].index(b'Unnamed')
-        vals[1].index(b'unknown')
+        vals = secDB.pget('Unknown', txn=txn)
+        self.assert_(vals[0] == 99 or vals[0] == '99', vals)
+        vals[1].index('Unknown')
+        vals[1].index('Unnamed')
+        vals[1].index('unknown')
 
         if verbose:
             print("Primary key traversal:")
@@ -262,14 +227,14 @@
         rec = self.cur.first()
         while rec is not None:
             if type(self.keytype) == type(''):
-                assert int(rec[0])  # for primary db, key is a number
+                self.assert_(int(rec[0]))  # for primary db, key is a number
             else:
-                assert rec[0] and type(rec[0]) == type(0)
+                self.assert_(rec[0] and type(rec[0]) == type(0))
             count = count + 1
             if verbose:
                 print(rec)
-            rec = self.cur.next()
-        assert count == len(musicdata) # all items accounted for
+            rec = getattr(self.cur, "next")()
+        self.assertEqual(count, len(musicdata))  # all items accounted for
 
 
         if verbose:
@@ -278,38 +243,39 @@
         count = 0
 
         # test cursor pget
-        vals = self.cur.pget(b'Unknown', flags=db.DB_LAST)
-        assert vals[1] == 99 or vals[1] == b'99', vals
-        assert vals[0] == b'Unknown'
-        vals[2].index(b'Unknown')
-        vals[2].index(b'Unnamed')
-        vals[2].index(b'unknown')
+        vals = self.cur.pget('Unknown', flags=db.DB_LAST)
+        self.assert_(vals[1] == 99 or vals[1] == '99', vals)
+        self.assertEqual(vals[0], 'Unknown')
+        vals[2].index('Unknown')
+        vals[2].index('Unnamed')
+        vals[2].index('unknown')
 
-        vals = self.cur.pget(b'Unknown', data=b'wrong value', flags=db.DB_GET_BOTH)
-        assert vals == None, vals
+        vals = self.cur.pget('Unknown', data='wrong value', flags=db.DB_GET_BOTH)
+        self.assertEqual(vals, None, vals)
 
         rec = self.cur.first()
-        assert rec[0] == b"Jazz"
+        self.assertEqual(rec[0], "Jazz")
         while rec is not None:
             count = count + 1
             if verbose:
                 print(rec)
-            rec = self.cur.next()
+            rec = getattr(self.cur, "next")()
         # all items accounted for EXCEPT for 1 with "Blues" genre
-        assert count == len(musicdata)-1
+        self.assertEqual(count, len(musicdata)-1)
 
         self.cur = None
 
     def getGenre(self, priKey, priData):
-        assert type(priData) == type(b"")
-        priData = priData.decode("utf-8")
+        self.assertEqual(type(priData), type(""))
+        genre = priData.split('|')[2]
+
         if verbose:
             print('getGenre key: %r data: %r' % (priKey, priData))
-        genre = priData.split('|')[2]
+
         if genre == 'Blues':
             return db.DB_DONOTINDEX
         else:
-            return genre.encode("utf-8")
+            return genre
 
 
 #----------------------------------------------------------------------
@@ -380,21 +346,21 @@
                                      filetype=self.dbtype)
 
     def addDataToDB(self, d):
-        for key, value in musicdata.items():
+        for key, value in list(musicdata.items()):
             if type(self.keytype) == type(''):
-                key = ("%02d" % key).encode("utf-8")
+                key = "%02d" % key
             d.put(key, value)    # save the value as is this time
 
 
     def getGenre(self, priKey, priData):
-        assert type(priData) == type(())
+        self.assertEqual(type(priData), type(()))
         if verbose:
             print('getGenre key: %r data: %r' % (priKey, priData))
         genre = priData[2]
         if genre == 'Blues':
             return db.DB_DONOTINDEX
         else:
-            return genre.encode("utf-8")
+            return genre
 
 
 class ShelveAssociateHashTestCase(ShelveAssociateTestCase):
@@ -418,15 +384,17 @@
         t2 = Thread(target = self.writer2,
                     args = (d, ))
 
+        t1.setDaemon(True)
+        t2.setDaemon(True)
         t1.start()
         t2.start()
         t1.join()
         t2.join()
 
     def writer1(self, d):
-        for key, value in musicdata.items():
+        for key, value in list(musicdata.items()):
             if type(self.keytype) == type(''):
-                key = ("%02d" % key).encode("utf-8")
+                key = "%02d" % key
             d.put(key, '|'.join(value))
 
     def writer2(self, d):
@@ -452,24 +420,23 @@
 def test_suite():
     suite = unittest.TestSuite()
 
-    if db.version() >= (3, 3, 11):
-        suite.addTest(unittest.makeSuite(AssociateErrorTestCase))
-
-        suite.addTest(unittest.makeSuite(AssociateHashTestCase))
-        suite.addTest(unittest.makeSuite(AssociateBTreeTestCase))
-        suite.addTest(unittest.makeSuite(AssociateRecnoTestCase))
-
-        if db.version() >= (4, 1):
-            suite.addTest(unittest.makeSuite(AssociateBTreeTxnTestCase))
+    suite.addTest(unittest.makeSuite(AssociateErrorTestCase))
 
-        suite.addTest(unittest.makeSuite(ShelveAssociateHashTestCase))
-        suite.addTest(unittest.makeSuite(ShelveAssociateBTreeTestCase))
-        suite.addTest(unittest.makeSuite(ShelveAssociateRecnoTestCase))
-
-        if have_threads:
-            suite.addTest(unittest.makeSuite(ThreadedAssociateHashTestCase))
-            suite.addTest(unittest.makeSuite(ThreadedAssociateBTreeTestCase))
-            suite.addTest(unittest.makeSuite(ThreadedAssociateRecnoTestCase))
+    suite.addTest(unittest.makeSuite(AssociateHashTestCase))
+    suite.addTest(unittest.makeSuite(AssociateBTreeTestCase))
+    suite.addTest(unittest.makeSuite(AssociateRecnoTestCase))
+
+    if db.version() >= (4, 1):
+        suite.addTest(unittest.makeSuite(AssociateBTreeTxnTestCase))
+
+    suite.addTest(unittest.makeSuite(ShelveAssociateHashTestCase))
+    suite.addTest(unittest.makeSuite(ShelveAssociateBTreeTestCase))
+    suite.addTest(unittest.makeSuite(ShelveAssociateRecnoTestCase))
+
+    if have_threads:
+        suite.addTest(unittest.makeSuite(ThreadedAssociateHashTestCase))
+        suite.addTest(unittest.makeSuite(ThreadedAssociateBTreeTestCase))
+        suite.addTest(unittest.makeSuite(ThreadedAssociateRecnoTestCase))
 
     return suite
 

Modified: python/branches/py3k/Lib/bsddb/test/test_basics.py
==============================================================================
--- python/branches/py3k/Lib/bsddb/test/test_basics.py	(original)
+++ python/branches/py3k/Lib/bsddb/test/test_basics.py	Sun Aug 31 16:12:11 2008
@@ -4,29 +4,17 @@
 """
 
 import os
-import sys
 import errno
 import string
-import tempfile
 from pprint import pprint
 import unittest
 import time
 
-try:
-    # For Pythons w/distutils pybsddb
-    from bsddb3 import db
-except ImportError:
-    # For Python 2.3
-    from bsddb import db
-
-from bsddb.test.test_all import verbose
-try:
-    from bsddb3 import test_support
-except ImportError:
-    from test import support as test_support
+from .test_all import db, test_support, verbose, get_new_environment_path, \
+        get_new_database_path
+
+DASH = '-'
 
-DASH = b'-'
-letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
 
 #----------------------------------------------------------------------
 
@@ -38,8 +26,8 @@
             print('bsddb.db.version(): %s' % (info, ))
             print(db.DB_VERSION_STRING)
             print('-=' * 20)
-        assert info == (db.DB_VERSION_MAJOR, db.DB_VERSION_MINOR,
-                        db.DB_VERSION_PATCH)
+        self.assertEqual(info, (db.DB_VERSION_MAJOR, db.DB_VERSION_MINOR,
+                        db.DB_VERSION_PATCH))
 
 #----------------------------------------------------------------------
 
@@ -57,10 +45,7 @@
 
     def setUp(self):
         if self.useEnv:
-            homeDir = os.path.join(tempfile.gettempdir(), 'db_home%d'%os.getpid())
-            self.homeDir = homeDir
-            test_support.rmtree(homeDir)
-            os.mkdir(homeDir)
+            self.homeDir=get_new_environment_path()
             try:
                 self.env = db.DBEnv()
                 self.env.set_lg_max(1024*1024)
@@ -68,17 +53,14 @@
                 self.env.set_tx_timestamp(int(time.time()))
                 self.env.set_flags(self.envsetflags, 1)
                 self.env.open(self.homeDir, self.envflags | db.DB_CREATE)
-                old_tempfile_tempdir = tempfile.tempdir
-                tempfile.tempdir = self.homeDir
-                self.filename = os.path.split(tempfile.mktemp())[1]
-                tempfile.tempdir = old_tempfile_tempdir
+                self.filename = "test"
             # Yes, a bare except is intended, since we're re-raising the exc.
             except:
-                test_support.rmtree(homeDir)
+                test_support.rmtree(self.homeDir)
                 raise
         else:
             self.env = None
-            self.filename = tempfile.mktemp()
+            self.filename = get_new_database_path()
 
         # create and open the DB
         self.d = db.DB(self.env)
@@ -100,13 +82,6 @@
         if self.env is not None:
             self.env.close()
             test_support.rmtree(self.homeDir)
-            ## XXX(nnorwitz): is this comment stil valid?
-            ## Make a new DBEnv to remove the env files from the home dir.
-            ## (It can't be done while the env is open, nor after it has been
-            ## closed, so we make a new one to do it.)
-            #e = db.DBEnv()
-            #e.remove(self.homeDir)
-            #os.remove(os.path.join(self.homeDir, self.filename))
         else:
             os.remove(self.filename)
 
@@ -117,15 +92,13 @@
 
         for x in range(self._numKeys//2):
             key = '%04d' % (self._numKeys - x)  # insert keys in reverse order
-            key = key.encode("utf-8")
             data = self.makeData(key)
             d.put(key, data, _txn)
 
-        d.put(b'empty value', b'', _txn)
+        d.put('empty value', '', _txn)
 
         for x in range(self._numKeys//2-1):
             key = '%04d' % x  # and now some in forward order
-            key = key.encode("utf-8")
             data = self.makeData(key)
             d.put(key, data, _txn)
 
@@ -151,49 +124,57 @@
             print('\n', '-=' * 30)
             print("Running %s.test01_GetsAndPuts..." % self.__class__.__name__)
 
-        for key in [b'0001', b'0100', b'0400', b'0700', b'0999']:
+        for key in ['0001', '0100', '0400', '0700', '0999']:
             data = d.get(key)
             if verbose:
                 print(data)
 
-        assert d.get(b'0321') == b'0321-0321-0321-0321-0321'
+        self.assertEqual(d.get('0321'), '0321-0321-0321-0321-0321')
 
         # By default non-existant keys return None...
-        assert d.get(b'abcd') == None
+        self.assertEqual(d.get('abcd'), None)
 
         # ...but they raise exceptions in other situations.  Call
         # set_get_returns_none() to change it.
         try:
-            d.delete(b'abcd')
+            d.delete('abcd')
         except db.DBNotFoundError as val:
-            assert val.args[0] == db.DB_NOTFOUND
+            import sys
+            if sys.version_info[0] < 3 :
+                self.assertEqual(val[0], db.DB_NOTFOUND)
+            else :
+                self.assertEqual(val.args[0], db.DB_NOTFOUND)
             if verbose: print(val)
         else:
             self.fail("expected exception")
 
 
-        d.put(b'abcd', b'a new record')
-        assert d.get(b'abcd') == b'a new record'
+        d.put('abcd', 'a new record')
+        self.assertEqual(d.get('abcd'), 'a new record')
 
-        d.put(b'abcd', b'same key')
+        d.put('abcd', 'same key')
         if self.dbsetflags & db.DB_DUP:
-            assert d.get(b'abcd') == b'a new record'
+            self.assertEqual(d.get('abcd'), 'a new record')
         else:
-            assert d.get(b'abcd') == b'same key'
+            self.assertEqual(d.get('abcd'), 'same key')
 
 
         try:
-            d.put(b'abcd', b'this should fail', flags=db.DB_NOOVERWRITE)
+            d.put('abcd', 'this should fail', flags=db.DB_NOOVERWRITE)
         except db.DBKeyExistError as val:
-            assert val.args[0] == db.DB_KEYEXIST
+            import sys
+            if sys.version_info[0] < 3 :
+                self.assertEqual(val[0], db.DB_KEYEXIST)
+            else :
+                self.assertEqual(val.args[0], db.DB_KEYEXIST)
             if verbose: print(val)
         else:
             self.fail("expected exception")
 
         if self.dbsetflags & db.DB_DUP:
-            assert d.get(b'abcd') == b'a new record'
+            self.assertEqual(d.get('abcd'), 'a new record')
         else:
-            assert d.get(b'abcd') == b'same key'
+            self.assertEqual(d.get('abcd'), 'same key')
 
 
         d.sync()
@@ -207,28 +188,28 @@
             self.d.open(self.filename)
         d = self.d
 
-        assert d.get(b'0321') == b'0321-0321-0321-0321-0321'
+        self.assertEqual(d.get('0321'), '0321-0321-0321-0321-0321')
         if self.dbsetflags & db.DB_DUP:
-            assert d.get(b'abcd') == b'a new record'
+            self.assertEqual(d.get('abcd'), 'a new record')
         else:
-            assert d.get(b'abcd') == b'same key'
+            self.assertEqual(d.get('abcd'), 'same key')
 
-        rec = d.get_both(b'0555', b'0555-0555-0555-0555-0555')
+        rec = d.get_both('0555', '0555-0555-0555-0555-0555')
         if verbose:
             print(rec)
 
-        assert d.get_both(b'0555', b'bad data') == None
+        self.assertEqual(d.get_both('0555', 'bad data'), None)
 
         # test default value
-        data = d.get(b'bad key', b'bad data')
-        assert data == b'bad data'
+        data = d.get('bad key', 'bad data')
+        self.assertEqual(data, 'bad data')
 
         # any object can pass through
-        data = d.get(b'bad key', self)
-        assert data == self
+        data = d.get('bad key', self)
+        self.assertEqual(data, self)
 
         s = d.stat()
-        assert type(s) == type({})
+        self.assertEqual(type(s), type({}))
         if verbose:
             print('d.stat() returned this dictionary:')
             pprint(s)
@@ -244,49 +225,51 @@
             print("Running %s.test02_DictionaryMethods..." % \
                   self.__class__.__name__)
 
-        for key in [b'0002', b'0101', b'0401', b'0701', b'0998']:
+        for key in ['0002', '0101', '0401', '0701', '0998']:
             data = d[key]
-            assert data == self.makeData(key)
+            self.assertEqual(data, self.makeData(key))
             if verbose:
                 print(data)
 
-        assert len(d) == self._numKeys
-        keys = d.keys()
-        assert len(keys) == self._numKeys
-        assert type(keys) == type([])
-
-        d[b'new record'] = b'a new record'
-        assert len(d) == self._numKeys+1
-        keys = d.keys()
-        assert len(keys) == self._numKeys+1
-
-        d[b'new record'] = b'a replacement record'
-        assert len(d) == self._numKeys+1
-        keys = d.keys()
-        assert len(keys) == self._numKeys+1
+        self.assertEqual(len(d), self._numKeys)
+        keys = list(d.keys())
+        self.assertEqual(len(keys), self._numKeys)
+        self.assertEqual(type(keys), type([]))
+
+        d['new record'] = 'a new record'
+        self.assertEqual(len(d), self._numKeys+1)
+        keys = list(d.keys())
+        self.assertEqual(len(keys), self._numKeys+1)
+
+        d['new record'] = 'a replacement record'
+        self.assertEqual(len(d), self._numKeys+1)
+        keys = list(d.keys())
+        self.assertEqual(len(keys), self._numKeys+1)
 
         if verbose:
             print("the first 10 keys are:")
             pprint(keys[:10])
 
-        assert d[b'new record'] == b'a replacement record'
-
-        assert d.has_key(b'0001') == 1
-        assert d.has_key(b'spam') == 0
+        self.assertEqual(d['new record'], 'a replacement record')
 
-        items = d.items()
-        assert len(items) == self._numKeys+1
-        assert type(items) == type([])
-        assert type(items[0]) == type(())
-        assert len(items[0]) == 2
+# We check also the positional parameter
+        self.assertEqual(d.has_key('0001', None), 1)
+# We check also the keyword parameter
+        self.assertEqual(d.has_key('spam', txn=None), 0)
+
+        items = list(d.items())
+        self.assertEqual(len(items), self._numKeys+1)
+        self.assertEqual(type(items), type([]))
+        self.assertEqual(type(items[0]), type(()))
+        self.assertEqual(len(items[0]), 2)
 
         if verbose:
             print("the first 10 items are:")
             pprint(items[:10])
 
-        values = d.values()
-        assert len(values) == self._numKeys+1
-        assert type(values) == type([])
+        values = list(d.values())
+        self.assertEqual(len(values), self._numKeys+1)
+        self.assertEqual(type(values), type([]))
 
         if verbose:
             print("the first 10 values are:")
@@ -315,17 +298,22 @@
             if verbose and count % 100 == 0:
                 print(rec)
             try:
-                rec = c.next()
+                rec = next(c)
             except db.DBNotFoundError as val:
                 if get_raises_error:
-                    assert val.args[0] == db.DB_NOTFOUND
+                    import sys
+                    if sys.version_info[0] < 3 :
+                        self.assertEqual(val[0], db.DB_NOTFOUND)
+                    else :
+                        self.assertEqual(val.args[0], db.DB_NOTFOUND)
                     if verbose: print(val)
                     rec = None
                 else:
                     self.fail("unexpected DBNotFoundError")
-            assert c.get_current_size() == len(c.current()[1]), "%s != len(%r)" % (c.get_current_size(), c.current()[1])
+            self.assertEqual(c.get_current_size(), len(c.current()[1]),
+                    "%s != len(%r)" % (c.get_current_size(), c.current()[1]))
 
-        assert count == self._numKeys
+        self.assertEqual(count, self._numKeys)
 
 
         rec = c.last()
@@ -338,73 +326,89 @@
                 rec = c.prev()
             except db.DBNotFoundError as val:
                 if get_raises_error:
-                    assert val.args[0] == db.DB_NOTFOUND
+                    import sys
+                    if sys.version_info[0] < 3 :
+                        self.assertEqual(val[0], db.DB_NOTFOUND)
+                    else :
+                        self.assertEqual(val.args[0], db.DB_NOTFOUND)
                     if verbose: print(val)
                     rec = None
                 else:
                     self.fail("unexpected DBNotFoundError")
 
-        assert count == self._numKeys
+        self.assertEqual(count, self._numKeys)
 
-        rec = c.set(b'0505')
+        rec = c.set('0505')
         rec2 = c.current()
-        assert rec == rec2, (repr(rec),repr(rec2))
-        assert rec[0] == b'0505'
-        assert rec[1] == self.makeData(b'0505')
-        assert c.get_current_size() == len(rec[1])
+        self.assertEqual(rec, rec2)
+        self.assertEqual(rec[0], '0505')
+        self.assertEqual(rec[1], self.makeData('0505'))
+        self.assertEqual(c.get_current_size(), len(rec[1]))
 
         # make sure we get empty values properly
-        rec = c.set(b'empty value')
-        assert rec[1] == b''
-        assert c.get_current_size() == 0
+        rec = c.set('empty value')
+        self.assertEqual(rec[1], '')
+        self.assertEqual(c.get_current_size(), 0)
 
         try:
-            n = c.set(b'bad key')
+            n = c.set('bad key')
         except db.DBNotFoundError as val:
-            assert val.args[0] == db.DB_NOTFOUND
+            import sys
+            if sys.version_info[0] < 3 :
+                self.assertEqual(val[0], db.DB_NOTFOUND)
+            else :
+                self.assertEqual(val.args[0], db.DB_NOTFOUND)
             if verbose: print(val)
         else:
             if set_raises_error:
                 self.fail("expected exception")
-            if n is not None:
+            if n != None:
                 self.fail("expected None: %r" % (n,))
 
-        rec = c.get_both(b'0404', self.makeData(b'0404'))
-        assert rec == (b'0404', self.makeData(b'0404'))
+        rec = c.get_both('0404', self.makeData('0404'))
+        self.assertEqual(rec, ('0404', self.makeData('0404')))
 
         try:
-            n = c.get_both(b'0404', b'bad data')
+            n = c.get_both('0404', 'bad data')
         except db.DBNotFoundError as val:
-            assert val.args[0] == db.DB_NOTFOUND
+            import sys
+            if sys.version_info[0] < 3 :
+                self.assertEqual(val[0], db.DB_NOTFOUND)
+            else :
+                self.assertEqual(val.args[0], db.DB_NOTFOUND)
             if verbose: print(val)
         else:
             if get_raises_error:
                 self.fail("expected exception")
-            if n is not None:
+            if n != None:
                 self.fail("expected None: %r" % (n,))
 
         if self.d.get_type() == db.DB_BTREE:
-            rec = c.set_range(b'011')
+            rec = c.set_range('011')
             if verbose:
                 print("searched for '011', found: ", rec)
 
-            rec = c.set_range(b'011',dlen=0,doff=0)
+            rec = c.set_range('011',dlen=0,doff=0)
             if verbose:
                 print("searched (partial) for '011', found: ", rec)
-            if rec[1] != b'': self.fail('expected empty data portion')
+            if rec[1] != '': self.fail('expected empty data portion')
 
-            ev = c.set_range(b'empty value')
+            ev = c.set_range('empty value')
             if verbose:
                 print("search for 'empty value' returned", ev)
-            if ev[1] != b'': self.fail('empty value lookup failed')
+            if ev[1] != '': self.fail('empty value lookup failed')
 
-        c.set(b'0499')
+        c.set('0499')
         c.delete()
         try:
             rec = c.current()
         except db.DBKeyEmptyError as val:
             if get_raises_error:
-                assert val.args[0] == db.DB_KEYEMPTY
+                import sys
+                if sys.version_info[0] < 3 :
+                    self.assertEqual(val[0], db.DB_KEYEMPTY)
+                else :
+                    self.assertEqual(val.args[0], db.DB_KEYEMPTY)
                 if verbose: print(val)
             else:
                 self.fail("unexpected DBKeyEmptyError")
@@ -412,16 +416,16 @@
             if get_raises_error:
                 self.fail('DBKeyEmptyError exception expected')
 
-        c.next()
+        next(c)
         c2 = c.dup(db.DB_POSITION)
-        assert c.current() == c2.current()
+        self.assertEqual(c.current(), c2.current())
 
-        c2.put(b'', b'a new value', db.DB_CURRENT)
-        assert c.current() == c2.current()
-        assert c.current()[1] == b'a new value'
+        c2.put('', 'a new value', db.DB_CURRENT)
+        self.assertEqual(c.current(), c2.current())
+        self.assertEqual(c.current()[1], 'a new value')
 
-        c2.put(b'', b'er', db.DB_CURRENT, dlen=0, doff=5)
-        assert c2.current()[1] == b'a newer value'
+        c2.put('', 'er', db.DB_CURRENT, dlen=0, doff=5)
+        self.assertEqual(c2.current()[1], 'a newer value')
 
         c.close()
         c2.close()
@@ -441,7 +445,7 @@
             'put':('', 'spam', db.DB_CURRENT),
             'set': ("0505",),
         }
-        for method, args in methods_to_test.items():
+        for method, args in list(methods_to_test.items()):
             try:
                 if verbose:
                     print("attempting to use a closed cursor's %s method" % \
@@ -449,7 +453,11 @@
                 # a bug may cause a NULL pointer dereference...
                 getattr(c, method)(*args)
             except db.DBError as val:
-                assert val.args[0] == 0
+                import sys
+                if sys.version_info[0] < 3 :
+                    self.assertEqual(val[0], 0)
+                else :
+                    self.assertEqual(val.args[0], 0)
                 if verbose: print(val)
             else:
                 self.fail("no exception raised when using a buggy cursor's"
@@ -474,7 +482,7 @@
                   self.__class__.__name__)
 
         old = self.d.set_get_returns_none(0)
-        assert old == 2
+        self.assertEqual(old, 2)
         self.test03_SimpleCursorStuff(get_raises_error=1, set_raises_error=1)
 
     def test03b_SimpleCursorWithGetReturnsNone1(self):
@@ -496,9 +504,9 @@
                   self.__class__.__name__)
 
         old = self.d.set_get_returns_none(1)
-        assert old == 2
+        self.assertEqual(old, 2)
         old = self.d.set_get_returns_none(2)
-        assert old == 1
+        self.assertEqual(old, 1)
         self.test03_SimpleCursorStuff(get_raises_error=0, set_raises_error=0)
 
     #----------------------------------------
@@ -510,26 +518,27 @@
             print("Running %s.test04_PartialGetAndPut..." % \
                   self.__class__.__name__)
 
-        key = b"partialTest"
-        data = b"1" * 1000 + b"2" * 1000
+        key = "partialTest"
+        data = "1" * 1000 + "2" * 1000
         d.put(key, data)
-        assert d.get(key) == data
-        assert d.get(key, dlen=20, doff=990) == (b"1" * 10) + (b"2" * 10)
+        self.assertEqual(d.get(key), data)
+        self.assertEqual(d.get(key, dlen=20, doff=990),
+                ("1" * 10) + ("2" * 10))
 
-        d.put(b"partialtest2", (b"1" * 30000) + b"robin" )
-        assert d.get(b"partialtest2", dlen=5, doff=30000) == b"robin"
+        d.put("partialtest2", ("1" * 30000) + "robin" )
+        self.assertEqual(d.get("partialtest2", dlen=5, doff=30000), "robin")
 
         # There seems to be a bug in DB here...  Commented out the test for
         # now.
-        ##assert d.get("partialtest2", dlen=5, doff=30010) == ""
+        ##self.assertEqual(d.get("partialtest2", dlen=5, doff=30010), "")
 
         if self.dbsetflags != db.DB_DUP:
             # Partial put with duplicate records requires a cursor
-            d.put(key, b"0000", dlen=2000, doff=0)
-            assert d.get(key) == b"0000"
+            d.put(key, "0000", dlen=2000, doff=0)
+            self.assertEqual(d.get(key), "0000")
 
-            d.put(key, b"1111", dlen=1, doff=2)
-            assert d.get(key) == b"0011110"
+            d.put(key, "1111", dlen=1, doff=2)
+            self.assertEqual(d.get(key), "0011110")
 
     #----------------------------------------
 
@@ -540,30 +549,27 @@
             print("Running %s.test05_GetSize..." % self.__class__.__name__)
 
         for i in range(1, 50000, 500):
-            key = ("size%s" % i).encode("utf-8")
+            key = "size%s" % i
             #print "before ", i,
-            d.put(key, b"1" * i)
+            d.put(key, "1" * i)
             #print "after",
-            assert d.get_size(key) == i
+            self.assertEqual(d.get_size(key), i)
             #print "done"
 
     #----------------------------------------
 
     def test06_Truncate(self):
-        if db.version() < (3,3):
-            # truncate is a feature of BerkeleyDB 3.3 and above
-            return
-
         d = self.d
         if verbose:
             print('\n', '-=' * 30)
             print("Running %s.test99_Truncate..." % self.__class__.__name__)
 
-        d.put(b"abcde", b"ABCDE");
+        d.put("abcde", "ABCDE");
         num = d.truncate()
-        assert num >= 1, "truncate returned <= 0 on non-empty database"
+        self.assert_(num >= 1, "truncate returned <= 0 on non-empty database")
         num = d.truncate()
-        assert num == 0, "truncate on empty DB returned nonzero (%r)" % (num,)
+        self.assertEqual(num, 0,
+                "truncate on empty DB returned nonzero (%r)" % (num,))
 
     #----------------------------------------
 
@@ -628,6 +634,11 @@
 #----------------------------------------------------------------------
 
 class BasicTransactionTestCase(BasicTestCase):
+    import sys
+    if sys.version_info[:3] < (2, 4, 0):
+        def assertTrue(self, expr, msg=None):
+            self.failUnless(expr,msg=msg)
+
     dbopenflags = db.DB_THREAD | db.DB_AUTO_COMMIT
     useEnv = 1
     envflags = (db.DB_THREAD | db.DB_INIT_MPOOL | db.DB_INIT_LOCK |
@@ -653,19 +664,21 @@
             print('\n', '-=' * 30)
             print("Running %s.test06_Transactions..." % self.__class__.__name__)
 
-        assert d.get(b'new rec', txn=self.txn) == None
-        d.put(b'new rec', b'this is a new record', self.txn)
-        assert d.get(b'new rec', txn=self.txn) == b'this is a new record'
+        self.assertEqual(d.get('new rec', txn=self.txn), None)
+        d.put('new rec', 'this is a new record', self.txn)
+        self.assertEqual(d.get('new rec', txn=self.txn),
+                'this is a new record')
         self.txn.abort()
-        assert d.get(b'new rec') == None
+        self.assertEqual(d.get('new rec'), None)
 
         self.txn = self.env.txn_begin()
 
-        assert d.get(b'new rec', txn=self.txn) == None
-        d.put(b'new rec', b'this is a new record', self.txn)
-        assert d.get(b'new rec', txn=self.txn) == b'this is a new record'
+        self.assertEqual(d.get('new rec', txn=self.txn), None)
+        d.put('new rec', 'this is a new record', self.txn)
+        self.assertEqual(d.get('new rec', txn=self.txn),
+                'this is a new record')
         self.txn.commit()
-        assert d.get(b'new rec') == b'this is a new record'
+        self.assertEqual(d.get('new rec'), 'this is a new record')
 
         self.txn = self.env.txn_begin()
         c = d.cursor(self.txn)
@@ -675,8 +688,8 @@
             count = count + 1
             if verbose and count % 100 == 0:
                 print(rec)
-            rec = c.next()
-        assert count == self._numKeys+1
+            rec = next(c)
+        self.assertEqual(count, self._numKeys+1)
 
         c.close()                # Cursors *MUST* be closed before commit!
         self.txn.commit()
@@ -687,43 +700,39 @@
         except db.DBIncompleteError:
             pass
 
-        if db.version() >= (4,0):
-            statDict = self.env.log_stat(0);
-            assert 'magic' in statDict
-            assert 'version' in statDict
-            assert 'cur_file' in statDict
-            assert 'region_nowait' in statDict
+        statDict = self.env.log_stat(0);
+        self.assert_('magic' in statDict)
+        self.assert_('version' in statDict)
+        self.assert_('cur_file' in statDict)
+        self.assert_('region_nowait' in statDict)
 
         # must have at least one log file present:
         logs = self.env.log_archive(db.DB_ARCH_ABS | db.DB_ARCH_LOG)
-        assert logs != None
+        self.assertNotEqual(logs, None)
         for log in logs:
             if verbose:
                 print('log file: ' + log)
         if db.version() >= (4,2):
             logs = self.env.log_archive(db.DB_ARCH_REMOVE)
-            assert not logs
+            self.assertTrue(not logs)
 
         self.txn = self.env.txn_begin()
 
     #----------------------------------------
 
     def test07_TxnTruncate(self):
-        if db.version() < (3,3):
-            # truncate is a feature of BerkeleyDB 3.3 and above
-            return
-
         d = self.d
         if verbose:
             print('\n', '-=' * 30)
             print("Running %s.test07_TxnTruncate..." % self.__class__.__name__)
 
-        d.put(b"abcde", b"ABCDE");
+        d.put("abcde", "ABCDE");
         txn = self.env.txn_begin()
         num = d.truncate(txn)
-        assert num >= 1, "truncate returned <= 0 on non-empty database"
+        self.assert_(num >= 1, "truncate returned <= 0 on non-empty database")
         num = d.truncate(txn)
-        assert num == 0, "truncate on empty DB returned nonzero (%r)" % (num,)
+        self.assertEqual(num, 0,
+                "truncate on empty DB returned nonzero (%r)" % (num,))
         txn.commit()
 
     #----------------------------------------
@@ -769,20 +778,20 @@
             print("Running %s.test07_RecnoInBTree..." % self.__class__.__name__)
 
         rec = d.get(200)
-        assert type(rec) == type(())
-        assert len(rec) == 2
+        self.assertEqual(type(rec), type(()))
+        self.assertEqual(len(rec), 2)
         if verbose:
             print("Record #200 is ", rec)
 
         c = d.cursor()
-        c.set(b'0200')
+        c.set('0200')
         num = c.get_recno()
-        assert type(num) == type(1)
+        self.assertEqual(type(num), type(1))
         if verbose:
             print("recno of d['0200'] is ", num)
 
         rec = c.current()
-        assert c.set_recno(num) == rec
+        self.assertEqual(c.set_recno(num), rec)
 
         c.close()
 
@@ -803,40 +812,39 @@
             print("Running %s.test08_DuplicateKeys..." % \
                   self.__class__.__name__)
 
-        d.put(b"dup0", b"before")
+        d.put("dup0", "before")
         for x in "The quick brown fox jumped over the lazy dog.".split():
-            x = x.encode("ascii")
-            d.put(b"dup1", x)
-        d.put(b"dup2", b"after")
+            d.put("dup1", x)
+        d.put("dup2", "after")
 
-        data = d.get(b"dup1")
-        assert data == b"The"
+        data = d.get("dup1")
+        self.assertEqual(data, "The")
         if verbose:
             print(data)
 
         c = d.cursor()
-        rec = c.set(b"dup1")
-        assert rec == (b'dup1', b'The')
+        rec = c.set("dup1")
+        self.assertEqual(rec, ('dup1', 'The'))
 
-        next = c.next()
-        assert next == (b'dup1', b'quick')
+        next_reg = next(c)
+        self.assertEqual(next_reg, ('dup1', 'quick'))
 
-        rec = c.set(b"dup1")
+        rec = c.set("dup1")
         count = c.count()
-        assert count == 9
+        self.assertEqual(count, 9)
 
         next_dup = c.next_dup()
-        assert next_dup == (b'dup1', b'quick')
+        self.assertEqual(next_dup, ('dup1', 'quick'))
 
-        rec = c.set(b'dup1')
+        rec = c.set('dup1')
         while rec is not None:
             if verbose:
                 print(rec)
             rec = c.next_dup()
 
-        c.set(b'dup1')
+        c.set('dup1')
         rec = c.next_nodup()
-        assert rec[0] != b'dup1'
+        self.assertNotEqual(rec[0], 'dup1')
         if verbose:
             print(rec)
 
@@ -884,11 +892,9 @@
                 self.dbopenflags|db.DB_CREATE)
 
         for x in "The quick brown fox jumped over the lazy dog".split():
-            x = x.encode("ascii")
             d2.put(x, self.makeData(x))
 
-        for x in letters:
-            x = x.encode("ascii")
+        for x in string.letters:
             d3.put(x, x*70)
 
         d1.sync()
@@ -917,8 +923,8 @@
             count = count + 1
             if verbose and (count % 50) == 0:
                 print(rec)
-            rec = c1.next()
-        assert count == self._numKeys
+            rec = next(c1)
+        self.assertEqual(count, self._numKeys)
 
         count = 0
         rec = c2.first()
@@ -926,8 +932,8 @@
             count = count + 1
             if verbose:
                 print(rec)
-            rec = c2.next()
-        assert count == 9
+            rec = next(c2)
+        self.assertEqual(count, 9)
 
         count = 0
         rec = c3.first()
@@ -935,15 +941,14 @@
             count = count + 1
             if verbose:
                 print(rec)
-            rec = c3.next()
-        assert count == 52
+            rec = next(c3)
+        self.assertEqual(count, len(string.letters))
 
 
         c1.close()
         c2.close()
         c3.close()
 
-        d1.close()
         d2.close()
         d3.close()
 
@@ -965,6 +970,55 @@
     envflags = db.DB_THREAD | db.DB_INIT_MPOOL | db.DB_INIT_LOCK
 
 
+class PrivateObject(unittest.TestCase) :
+    import sys
+    if sys.version_info[:3] < (2, 4, 0):
+        def assertTrue(self, expr, msg=None):
+            self.failUnless(expr,msg=msg)
+
+    def tearDown(self) :
+        del self.obj
+
+    def test01_DefaultIsNone(self) :
+        self.assertEqual(self.obj.get_private(), None)
+
+    def test02_assignment(self) :
+        a = "example of private object"
+        self.obj.set_private(a)
+        b = self.obj.get_private()
+        self.assertTrue(a is b)  # Object identity
+
+    def test03_leak_assignment(self) :
+        import sys
+        a = "example of private object"
+        refcount = sys.getrefcount(a)
+        self.obj.set_private(a)
+        self.assertEqual(refcount+1, sys.getrefcount(a))
+        self.obj.set_private(None)
+        self.assertEqual(refcount, sys.getrefcount(a))
+
+    def test04_leak_GC(self) :
+        import sys
+        a = "example of private object"
+        refcount = sys.getrefcount(a)
+        self.obj.set_private(a)
+        self.obj = None
+        self.assertEqual(refcount, sys.getrefcount(a))
+
+class DBEnvPrivateObject(PrivateObject) :
+    def setUp(self) :
+        self.obj = db.DBEnv()
+
+class DBPrivateObject(PrivateObject) :
+    def setUp(self) :
+        self.obj = db.DB()
+
+class CrashAndBurn(unittest.TestCase) :
+    def test01_OpenCrash(self) :
+        # See http://bugs.python.org/issue3307
+        self.assertRaises(db.DBInvalidArgError, db.DB, None, 65535)
+
+
 #----------------------------------------------------------------------
 #----------------------------------------------------------------------
 
@@ -988,6 +1042,9 @@
     suite.addTest(unittest.makeSuite(HashDUPWithThreadTestCase))
     suite.addTest(unittest.makeSuite(BTreeMultiDBTestCase))
     suite.addTest(unittest.makeSuite(HashMultiDBTestCase))
+    suite.addTest(unittest.makeSuite(DBEnvPrivateObject))
+    suite.addTest(unittest.makeSuite(DBPrivateObject))
+    #suite.addTest(unittest.makeSuite(CrashAndBurn))
 
     return suite
 

Modified: python/branches/py3k/Lib/bsddb/test/test_compare.py
==============================================================================
--- python/branches/py3k/Lib/bsddb/test/test_compare.py	(original)
+++ python/branches/py3k/Lib/bsddb/test/test_compare.py	Sun Aug 31 16:12:11 2008
@@ -2,55 +2,51 @@
 TestCases for python DB Btree key comparison function.
 """
 
-import shutil
 import sys, os, re
-from io import StringIO
-import tempfile
 from . import test_all
+from io import StringIO
 
 import unittest
-try:
-    # For Pythons w/distutils pybsddb
-    from bsddb3 import db, dbshelve
-except ImportError:
-    # For Python 2.3
-    from bsddb import db, dbshelve
-
-try:
-    from bsddb3 import test_support
-except ImportError:
-    from test import support as test_support
+
+from .test_all import db, dbshelve, test_support, \
+        get_new_environment_path, get_new_database_path
+
 
 lexical_cmp = cmp
 
 def lowercase_cmp(left, right):
-    return cmp (str(left, encoding='ascii').lower(),
-                str(right, encoding='ascii').lower())
+    return cmp (left.lower(), right.lower())
 
 def make_reverse_comparator (cmp):
     def reverse (left, right, delegate=cmp):
         return - delegate (left, right)
     return reverse
 
-_expected_lexical_test_data = [s.encode('ascii') for s in
-        ('', 'CCCP', 'a', 'aaa', 'b', 'c', 'cccce', 'ccccf')]
-_expected_lowercase_test_data = [s.encode('ascii') for s in
-        ('', 'a', 'aaa', 'b', 'c', 'CC', 'cccce', 'ccccf', 'CCCP')]
-
-
-def CmpToKey(mycmp):
-    'Convert a cmp= function into a key= function'
-    class K(object):
-        def __init__(self, obj, *args):
-            self.obj = obj
-        def __lt__(self, other):
-            return mycmp(self.obj, other.obj) == -1
-    return K
+_expected_lexical_test_data = ['', 'CCCP', 'a', 'aaa', 'b', 'c', 'cccce', 'ccccf']
+_expected_lowercase_test_data = ['', 'a', 'aaa', 'b', 'c', 'CC', 'cccce', 'ccccf', 'CCCP']
 
 class ComparatorTests (unittest.TestCase):
     def comparator_test_helper (self, comparator, expected_data):
         data = expected_data[:]
-        data.sort (key=CmpToKey(comparator))
+
+        import sys
+        if sys.version_info[0] < 3 :
+            if sys.version_info[:3] < (2, 4, 0):
+                data.sort(comparator)
+            else :
+                data.sort(cmp=comparator)
+        else :  # Insertion Sort. Please, improve
+            data2 = []
+            for i in data :
+                for j, k in enumerate(data2) :
+                    r = comparator(k, i)
+                    if r == 1 :
+                        data2.insert(j, i)
+                        break
+                else :
+                    data2.append(i)
+            data = data2
+
         self.failUnless (data == expected_data,
                          "comparator `%s' is not right: %s vs. %s"
                          % (comparator, expected_data, data))
@@ -71,30 +67,24 @@
 
     def setUp (self):
         self.filename = self.__class__.__name__ + '.db'
-        homeDir = os.path.join (tempfile.gettempdir(), 'db_home%d'%os.getpid())
-        self.homeDir = homeDir
-        try:
-            os.mkdir (homeDir)
-        except os.error:
-            pass
-
-        env = db.DBEnv ()
+        self.homeDir = get_new_environment_path()
+        env = db.DBEnv()
         env.open (self.homeDir,
                   db.DB_CREATE | db.DB_INIT_MPOOL
                   | db.DB_INIT_LOCK | db.DB_THREAD)
         self.env = env
 
     def tearDown (self):
-        self.closeDB ()
+        self.closeDB()
         if self.env is not None:
-            self.env.close ()
+            self.env.close()
             self.env = None
         test_support.rmtree(self.homeDir)
 
     def addDataToDB (self, data):
         i = 0
         for item in data:
-            self.db.put (item, str(i).encode("ascii"))
+            self.db.put (item, str (i))
             i = i + 1
 
     def createDB (self, key_comparator):
@@ -128,10 +118,10 @@
                 self.failUnless (index < len (expected),
                                  "to many values returned from cursor")
                 self.failUnless (expected[index] == key,
-                                 "expected value %r at %d but got %r"
+                                 "expected value `%s' at %d but got `%s'"
                                  % (expected[index], index, key))
                 index = index + 1
-                rec = curs.next ()
+                rec = next(curs)
             self.failUnless (index == len (expected),
                              "not enough values returned from cursor")
         finally:
@@ -158,10 +148,10 @@
         def socialist_comparator (l, r):
             return 0
         self.createDB (socialist_comparator)
-        self.addDataToDB ([b'b', b'a', b'd'])
+        self.addDataToDB (['b', 'a', 'd'])
         # all things being equal the first key will be the only key
         # in the database...  (with the last key's value fwiw)
-        self.finishTest ([b'b'])
+        self.finishTest (['b'])
 
 
 class BtreeExceptionsTestCase (AbstractBtreeKeyCompareTestCase):
@@ -200,9 +190,9 @@
         finally:
             temp = sys.stderr
             sys.stderr = stdErr
-        errorOut = temp.getvalue()
-        if not successRe.search(errorOut):
-            self.fail("unexpected stderr output: %r" % errorOut)
+            errorOut = temp.getvalue()
+            if not successRe.search(errorOut):
+                self.fail("unexpected stderr output:\n"+errorOut)
 
     def _test_compare_function_exception (self):
         self.startTest ()
@@ -213,7 +203,7 @@
             raise RuntimeError("i'm a naughty comparison function")
         self.createDB (bad_comparator)
         #print "\n*** test should print 2 uncatchable tracebacks ***"
-        self.addDataToDB ([b'a', b'b', b'c'])  # this should raise, but...
+        self.addDataToDB (['a', 'b', 'c'])  # this should raise, but...
         self.finishTest ()
 
     def test_compare_function_exception(self):
@@ -231,7 +221,7 @@
             return l
         self.createDB (bad_comparator)
         #print "\n*** test should print 2 errors about returning an int ***"
-        self.addDataToDB ([b'a', b'b', b'c'])  # this should raise, but...
+        self.addDataToDB (['a', 'b', 'c'])  # this should raise, but...
         self.finishTest ()
 
     def test_compare_function_bad_return(self):
@@ -250,7 +240,7 @@
         self.createDB (my_compare)
         try:
             self.db.set_bt_compare (my_compare)
-            assert False, "this set should fail"
+            self.assert_(0, "this set should fail")
 
         except RuntimeError as msg:
             pass
@@ -259,10 +249,9 @@
     res = unittest.TestSuite ()
 
     res.addTest (unittest.makeSuite (ComparatorTests))
-    if db.version () >= (3, 3, 11):
-        res.addTest (unittest.makeSuite (BtreeExceptionsTestCase))
-        res.addTest (unittest.makeSuite (BtreeKeyCompareTestCase))
+    res.addTest (unittest.makeSuite (BtreeExceptionsTestCase))
+    res.addTest (unittest.makeSuite (BtreeKeyCompareTestCase))
     return res
 
 if __name__ == '__main__':
-    unittest.main (defaultTest = 'test_suite')
+    unittest.main (defaultTest = 'suite')

Modified: python/branches/py3k/Lib/bsddb/test/test_compat.py
==============================================================================
--- python/branches/py3k/Lib/bsddb/test/test_compat.py	(original)
+++ python/branches/py3k/Lib/bsddb/test/test_compat.py	Sun Aug 31 16:12:11 2008
@@ -3,18 +3,16 @@
 regression test suite.
 """
 
-import sys, os
+import os, string
 import unittest
-import tempfile
 
-from bsddb.test.test_all import verbose
-
-from bsddb import db, hashopen, btopen, rnopen
+from .test_all import db, hashopen, btopen, rnopen, verbose, \
+        get_new_database_path
 
 
 class CompatibilityTestCase(unittest.TestCase):
     def setUp(self):
-        self.filename = tempfile.mktemp()
+        self.filename = get_new_database_path()
 
     def tearDown(self):
         try:
@@ -36,31 +34,31 @@
 
         f = rnopen(self.filename, 'c')
         for x in range(len(data)):
-            f[x+1] = data[x].encode("ascii")
+            f[x+1] = data[x]
 
         getTest = (f[1], f[2], f[3])
         if verbose:
             print('%s %s %s' % getTest)
 
-        assert getTest[1] == b'quick', 'data mismatch!'
+        self.assertEqual(getTest[1], 'quick', 'data mismatch!')
 
         rv = f.set_location(3)
-        if rv != (3, b'brown'):
+        if rv != (3, 'brown'):
             self.fail('recno database set_location failed: '+repr(rv))
 
-        f[25] = b'twenty-five'
+        f[25] = 'twenty-five'
         f.close()
         del f
 
         f = rnopen(self.filename, 'w')
-        f[20] = b'twenty'
+        f[20] = 'twenty'
 
         def noRec(f):
             rec = f[15]
         self.assertRaises(KeyError, noRec, f)
 
         def badKey(f):
-            rec = f[b'a string']
+            rec = f['a string']
         self.assertRaises(TypeError, badKey, f)
 
         del f[3]
@@ -70,7 +68,7 @@
             if verbose:
                 print(rec)
             try:
-                rec = f.next()
+                rec = next(f)
             except KeyError:
                 break
 
@@ -96,42 +94,42 @@
         else:
             if verbose: print("truth test: false")
 
-        f[b'0'] = b''
-        f[b'a'] = b'Guido'
-        f[b'b'] = b'van'
-        f[b'c'] = b'Rossum'
-        f[b'd'] = b'invented'
+        f['0'] = ''
+        f['a'] = 'Guido'
+        f['b'] = 'van'
+        f['c'] = 'Rossum'
+        f['d'] = 'invented'
         # 'e' intentionally left out
-        f[b'f'] = b'Python'
+        f['f'] = 'Python'
         if verbose:
             print('%s %s %s' % (f['a'], f['b'], f['c']))
 
         if verbose:
             print('key ordering...')
         start = f.set_location(f.first()[0])
-        if start != (b'0', b''):
+        if start != ('0', ''):
             self.fail("incorrect first() result: "+repr(start))
         while 1:
             try:
-                rec = f.next()
+                rec = next(f)
             except KeyError:
-                assert rec == f.last(), 'Error, last != last!'
+                self.assertEqual(rec, f.last(), 'Error, last <> last!')
                 f.previous()
                 break
             if verbose:
                 print(rec)
 
-        assert f.has_key(b'f'), 'Error, missing key!'
+        self.assert_('f' in f, 'Error, missing key!')
 
         # test that set_location() returns the next nearest key, value
         # on btree databases and raises KeyError on others.
         if factory == btopen:
-            e = f.set_location(b'e')
-            if e != (b'f', b'Python'):
+            e = f.set_location('e')
+            if e != ('f', 'Python'):
                 self.fail('wrong key,value returned: '+repr(e))
         else:
             try:
-                e = f.set_location(b'e')
+                e = f.set_location('e')
             except KeyError:
                 pass
             else:
@@ -155,17 +153,17 @@
         if verbose:
             print('modification...')
         f = factory(self.filename, 'w')
-        f[b'd'] = b'discovered'
+        f['d'] = 'discovered'
 
         if verbose:
             print('access...')
-        for key in f.keys():
+        for key in list(f.keys()):
             word = f[key]
             if verbose:
                 print(word)
 
         def noRec(f):
-            rec = f[b'no such key']
+            rec = f['no such key']
         self.assertRaises(KeyError, noRec, f)
 
         def badKey(f):

Modified: python/branches/py3k/Lib/bsddb/test/test_cursor_pget_bug.py
==============================================================================
--- python/branches/py3k/Lib/bsddb/test/test_cursor_pget_bug.py	(original)
+++ python/branches/py3k/Lib/bsddb/test/test_cursor_pget_bug.py	Sun Aug 31 16:12:11 2008
@@ -1,16 +1,8 @@
 import unittest
-import tempfile
-import sys, os, glob
-import shutil
-import tempfile
-
-from bsddb import db
-
-try:
-    from bsddb3 import test_support
-except ImportError:
-    from test import support as test_support
+import os, glob
 
+from .test_all import db, test_support, get_new_environment_path, \
+        get_new_database_path
 
 #----------------------------------------------------------------------
 
@@ -19,11 +11,7 @@
     db_name = 'test-cursor_pget.db'
 
     def setUp(self):
-        self.homeDir = os.path.join(tempfile.gettempdir(), 'db_home%d'%os.getpid())
-        try:
-            os.mkdir(self.homeDir)
-        except os.error:
-            pass
+        self.homeDir = get_new_environment_path()
         self.env = db.DBEnv()
         self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL)
         self.primary_db = db.DB(self.env)
@@ -32,9 +20,9 @@
         self.secondary_db.set_flags(db.DB_DUP)
         self.secondary_db.open(self.db_name, 'secondary', db.DB_BTREE, db.DB_CREATE)
         self.primary_db.associate(self.secondary_db, lambda key, data: data)
-        self.primary_db.put(b'salad', b'eggs')
-        self.primary_db.put(b'spam', b'ham')
-        self.primary_db.put(b'omelet', b'eggs')
+        self.primary_db.put('salad', 'eggs')
+        self.primary_db.put('spam', 'ham')
+        self.primary_db.put('omelet', 'eggs')
 
 
     def tearDown(self):
@@ -49,11 +37,11 @@
     def test_pget(self):
         cursor = self.secondary_db.cursor()
 
-        self.assertEquals((b'eggs', b'salad', b'eggs'), cursor.pget(key=b'eggs', flags=db.DB_SET))
-        self.assertEquals((b'eggs', b'omelet', b'eggs'), cursor.pget(db.DB_NEXT_DUP))
+        self.assertEquals(('eggs', 'salad', 'eggs'), cursor.pget(key='eggs', flags=db.DB_SET))
+        self.assertEquals(('eggs', 'omelet', 'eggs'), cursor.pget(db.DB_NEXT_DUP))
         self.assertEquals(None, cursor.pget(db.DB_NEXT_DUP))
 
-        self.assertEquals((b'ham', b'spam', b'ham'), cursor.pget(b'ham', b'spam', flags=db.DB_SET))
+        self.assertEquals(('ham', 'spam', 'ham'), cursor.pget('ham', 'spam', flags=db.DB_SET))
         self.assertEquals(None, cursor.pget(db.DB_NEXT_DUP))
 
         cursor.close()

Modified: python/branches/py3k/Lib/bsddb/test/test_dbobj.py
==============================================================================
--- python/branches/py3k/Lib/bsddb/test/test_dbobj.py	(original)
+++ python/branches/py3k/Lib/bsddb/test/test_dbobj.py	Sun Aug 31 16:12:11 2008
@@ -1,21 +1,9 @@
 
-import shutil
-import sys, os
+import os, string
 import unittest
-import tempfile
-
-try:
-    # For Pythons w/distutils pybsddb
-    from bsddb3 import db, dbobj
-except ImportError:
-    # For Python 2.3
-    from bsddb import db, dbobj
-
-try:
-    from bsddb3 import test_support
-except ImportError:
-    from test import support as test_support
 
+from .test_all import db, dbobj, test_support, get_new_environment_path, \
+        get_new_database_path
 
 #----------------------------------------------------------------------
 
@@ -24,10 +12,7 @@
     db_name = 'test-dbobj.db'
 
     def setUp(self):
-        homeDir = os.path.join(tempfile.gettempdir(), 'db_home%d'%os.getpid())
-        self.homeDir = homeDir
-        try: os.mkdir(homeDir)
-        except os.error: pass
+        self.homeDir = get_new_environment_path()
 
     def tearDown(self):
         if hasattr(self, 'db'):
@@ -40,18 +25,18 @@
         class TestDBEnv(dbobj.DBEnv): pass
         class TestDB(dbobj.DB):
             def put(self, key, *args, **kwargs):
-                key = key.decode("ascii").upper().encode("ascii")
+                key = key.upper()
                 # call our parent classes put method with an upper case key
-                return dbobj.DB.put(self, key, *args, **kwargs)
+                return dbobj.DB.put(*(self, key) + args, **kwargs)
         self.env = TestDBEnv()
         self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL)
         self.db = TestDB(self.env)
         self.db.open(self.db_name, db.DB_HASH, db.DB_CREATE)
-        self.db.put(b'spam', b'eggs')
-        assert self.db.get(b'spam') == None, \
-               "overridden dbobj.DB.put() method failed [1]"
-        assert self.db.get(b'SPAM') == b'eggs', \
-               "overridden dbobj.DB.put() method failed [2]"
+        self.db.put('spam', 'eggs')
+        self.assertEqual(self.db.get('spam'), None,
+               "overridden dbobj.DB.put() method failed [1]")
+        self.assertEqual(self.db.get('SPAM'), 'eggs',
+               "overridden dbobj.DB.put() method failed [2]")
         self.db.close()
         self.env.close()
 
@@ -61,14 +46,14 @@
         self.db = dbobj.DB(self.env)
         self.db.open(self.db_name+'02', db.DB_HASH, db.DB_CREATE)
         # __setitem__
-        self.db[b'spam'] = b'eggs'
+        self.db['spam'] = 'eggs'
         # __len__
-        assert len(self.db) == 1
+        self.assertEqual(len(self.db), 1)
         # __getitem__
-        assert self.db[b'spam'] == b'eggs'
+        self.assertEqual(self.db['spam'], 'eggs')
         # __del__
-        del self.db[b'spam']
-        assert self.db.get(b'spam') == None, "dbobj __del__ failed"
+        del self.db['spam']
+        self.assertEqual(self.db.get('spam'), None, "dbobj __del__ failed")
         self.db.close()
         self.env.close()
 

Modified: python/branches/py3k/Lib/bsddb/test/test_dbshelve.py
==============================================================================
--- python/branches/py3k/Lib/bsddb/test/test_dbshelve.py	(original)
+++ python/branches/py3k/Lib/bsddb/test/test_dbshelve.py	Sun Aug 31 16:12:11 2008
@@ -2,19 +2,14 @@
 TestCases for checking dbShelve objects.
 """
 
-import os
-import shutil
-import tempfile, random
+import os, string
+import random
 import unittest
 
-from bsddb import db, dbshelve
 
-try:
-    from bsddb3 import test_support
-except ImportError:
-    from test import support as test_support
+from .test_all import db, dbshelve, test_support, verbose, \
+        get_new_environment_path, get_new_database_path
 
-from bsddb.test.test_all import verbose
 
 
 #----------------------------------------------------------------------
@@ -22,43 +17,44 @@
 # We want the objects to be comparable so we can test dbshelve.values
 # later on.
 class DataClass:
-
     def __init__(self):
         self.value = random.random()
 
-    def __repr__(self):
-        return "DataClass(%r)" % self.value
+    def __repr__(self) :  # For Python 3.0 comparison
+        return "DataClass %f" %self.value
 
-    def __eq__(self, other):
-        value = self.value
-        if isinstance(other, DataClass):
-            other = other.value
-        return value == other
-
-    def __lt__(self, other):
-        value = self.value
-        if isinstance(other, DataClass):
-            other = other.value
-        return value < other
+    def __cmp__(self, other):  # For Python 2.x comparison
+        return cmp(self.value, other)
 
-letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
 
 class DBShelveTestCase(unittest.TestCase):
     def setUp(self):
-        self.filename = tempfile.mktemp()
+        import sys
+        if sys.version_info[0] >= 3 :
+            from .test_all import do_proxy_db_py3k
+            self._flag_proxy_db_py3k = do_proxy_db_py3k(False)
+        self.filename = get_new_database_path()
         self.do_open()
 
     def tearDown(self):
+        import sys
+        if sys.version_info[0] >= 3 :
+            from .test_all import do_proxy_db_py3k
+            do_proxy_db_py3k(self._flag_proxy_db_py3k)
         self.do_close()
         test_support.unlink(self.filename)
 
     def mk(self, key):
         """Turn key into an appropriate key type for this db"""
         # override in child class for RECNO
-        return key.encode("ascii")
+        import sys
+        if sys.version_info[0] < 3 :
+            return key
+        else :
+            return bytes(key, "iso8859-1")  # 8 bits
 
     def populateDB(self, d):
-        for x in letters:
+        for x in string.letters:
             d[self.mk('S' + x)] = 10 * x           # add a string
             d[self.mk('I' + x)] = ord(x)           # add an integer
             d[self.mk('L' + x)] = [x] * 10         # add a list
@@ -86,19 +82,13 @@
             print("Running %s.test01_basics..." % self.__class__.__name__)
 
         self.populateDB(self.d)
-        if verbose:
-            print(1, self.d.keys())
         self.d.sync()
-        if verbose:
-            print(2, self.d.keys())
         self.do_close()
         self.do_open()
-        if verbose:
-            print(3, self.d.keys())
         d = self.d
 
         l = len(d)
-        k = d.keys()
+        k = list(d.keys())
         s = d.stat()
         f = d.fd()
 
@@ -107,30 +97,37 @@
             print("keys:", k)
             print("stats:", s)
 
-        self.assertFalse(d.has_key(self.mk('bad key')))
-        self.assertTrue(d.has_key(self.mk('IA')), d.keys())
-        self.assertTrue(d.has_key(self.mk('OA')))
+        self.assertEqual(0, self.mk('bad key') in d)
+        self.assertEqual(1, self.mk('IA') in d)
+        self.assertEqual(1, self.mk('OA') in d)
 
         d.delete(self.mk('IA'))
         del d[self.mk('OA')]
-        self.assertFalse(d.has_key(self.mk('IA')))
-        self.assertFalse(d.has_key(self.mk('OA')))
+        self.assertEqual(0, self.mk('IA') in d)
+        self.assertEqual(0, self.mk('OA') in d)
         self.assertEqual(len(d), l-2)
 
         values = []
-        for key in d.keys():
+        for key in list(d.keys()):
             value = d[key]
             values.append(value)
             if verbose:
                 print("%s: %s" % (key, value))
             self.checkrec(key, value)
 
-        dbvalues = sorted(d.values(), key=lambda x: (str(type(x)), x))
-        self.assertEqual(len(dbvalues), len(d.keys()))
-        values.sort(key=lambda x: (str(type(x)), x))
-        self.assertEqual(values, dbvalues, "%r != %r" % (values, dbvalues))
+        dbvalues = list(d.values())
+        self.assertEqual(len(dbvalues), len(list(d.keys())))
+        import sys
+        if sys.version_info[0] < 3 :
+            values.sort()
+            dbvalues.sort()
+            self.assertEqual(values, dbvalues)
+        else :  # XXX: Convert all to strings. Please, improve
+            values.sort(key=lambda x : str(x))
+            dbvalues.sort(key=lambda x : str(x))
+            self.assertEqual(repr(values), repr(dbvalues))
 
-        items = d.items()
+        items = list(d.items())
         self.assertEqual(len(items), len(values))
 
         for key, value in items:
@@ -138,16 +135,16 @@
 
         self.assertEqual(d.get(self.mk('bad key')), None)
         self.assertEqual(d.get(self.mk('bad key'), None), None)
-        self.assertEqual(d.get(self.mk('bad key'), b'a string'), b'a string')
+        self.assertEqual(d.get(self.mk('bad key'), 'a string'), 'a string')
         self.assertEqual(d.get(self.mk('bad key'), [1, 2, 3]), [1, 2, 3])
 
         d.set_get_returns_none(0)
         self.assertRaises(db.DBNotFoundError, d.get, self.mk('bad key'))
         d.set_get_returns_none(1)
 
-        d.put(self.mk('new key'), b'new data')
-        self.assertEqual(d.get(self.mk('new key')), b'new data')
-        self.assertEqual(d[self.mk('new key')], b'new data')
+        d.put(self.mk('new key'), 'new data')
+        self.assertEqual(d.get(self.mk('new key')), 'new data')
+        self.assertEqual(d[self.mk('new key')], 'new data')
 
 
 
@@ -165,10 +162,11 @@
         while rec is not None:
             count = count + 1
             if verbose:
-                print(repr(rec))
+                print(rec)
             key, value = rec
             self.checkrec(key, value)
-            rec = c.next()
+            # Hack to avoid conversion by 2to3 tool
+            rec = getattr(c, "next")()
         del c
 
         self.assertEqual(count, len(d))
@@ -191,6 +189,7 @@
         self.checkrec(key, value)
         del c
 
+
     def test03_append(self):
         # NOTE: this is overridden in RECNO subclass, don't change its name.
         if verbose:
@@ -198,31 +197,44 @@
             print("Running %s.test03_append..." % self.__class__.__name__)
 
         self.assertRaises(dbshelve.DBShelveError,
-                          self.d.append, b'unit test was here')
+                          self.d.append, 'unit test was here')
 
 
     def checkrec(self, key, value):
         # override this in a subclass if the key type is different
-        x = key[1:]
-        if key[0:1] == b'S':
-            self.assertEquals(type(value), str)
-            self.assertEquals(value, 10 * x.decode("ascii"))
-
-        elif key[0:1] == b'I':
-            self.assertEquals(type(value), int)
-            self.assertEquals(value, ord(x))
-
-        elif key[0:1] == b'L':
-            self.assertEquals(type(value), list)
-            self.assertEquals(value, [x.decode("ascii")] * 10)
-
-        elif key[0:1] == b'O':
-            self.assertEquals(value.S, 10 * x.decode("ascii"))
-            self.assertEquals(value.I, ord(x))
-            self.assertEquals(value.L, [x.decode("ascii")] * 10)
+
+        import sys
+        if sys.version_info[0] >= 3 :
+            if isinstance(key, bytes) :
+                key = key.decode("iso8859-1")  # 8 bits
+
+        x = key[1]
+        if key[0] == 'S':
+            self.assertEqual(type(value), str)
+            self.assertEqual(value, 10 * x)
+
+        elif key[0] == 'I':
+            self.assertEqual(type(value), int)
+            self.assertEqual(value, ord(x))
+
+        elif key[0] == 'L':
+            self.assertEqual(type(value), list)
+            self.assertEqual(value, [x] * 10)
+
+        elif key[0] == 'O':
+            import sys
+            if sys.version_info[0] < 3 :
+                from types import InstanceType
+                self.assertEqual(type(value), InstanceType)
+            else :
+                self.assertEqual(type(value), DataClass)
+
+            self.assertEqual(value.S, 10 * x)
+            self.assertEqual(value.I, ord(x))
+            self.assertEqual(value.L, [x] * 10)
 
         else:
-            self.fail('Unknown key type, fix the test')
+            self.assert_(0, 'Unknown key type, fix the test')
 
 #----------------------------------------------------------------------
 
@@ -258,19 +270,12 @@
 #----------------------------------------------------------------------
 
 class BasicEnvShelveTestCase(DBShelveTestCase):
-    def setUp(self):
-        self.homeDir = tempfile.mkdtemp()
-        self.filename = 'dbshelve_db_file.db'
-        self.do_open()
-
     def do_open(self):
-        self.homeDir = homeDir = os.path.join(
-            tempfile.gettempdir(), 'db_home%d'%os.getpid())
-        try: os.mkdir(homeDir)
-        except os.error: pass
         self.env = db.DBEnv()
-        self.env.open(self.homeDir, self.envflags | db.DB_INIT_MPOOL | db.DB_CREATE)
+        self.env.open(self.homeDir,
+                self.envflags | db.DB_INIT_MPOOL | db.DB_CREATE)
 
+        self.filename = os.path.split(self.filename)[1]
         self.d = dbshelve.DBShelf(self.env)
         self.d.open(self.filename, self.dbtype, self.dbflags)
 
@@ -280,7 +285,15 @@
         self.env.close()
 
 
+    def setUp(self) :
+        self.homeDir = get_new_environment_path()
+        DBShelveTestCase.setUp(self)
+
     def tearDown(self):
+        import sys
+        if sys.version_info[0] >= 3 :
+            from .test_all import do_proxy_db_py3k
+            do_proxy_db_py3k(self._flag_proxy_db_py3k)
         self.do_close()
         test_support.rmtree(self.homeDir)
 
@@ -327,7 +340,7 @@
     def mk(self, key):
         if key not in self.key_map:
             self.key_map[key] = self.key_pool.pop(0)
-            self.intkey_map[self.key_map[key]] = key.encode('ascii')
+            self.intkey_map[self.key_map[key]] = key
         return self.key_map[key]
 
     def checkrec(self, intkey, value):
@@ -339,14 +352,14 @@
             print('\n', '-=' * 30)
             print("Running %s.test03_append..." % self.__class__.__name__)
 
-        self.d[1] = b'spam'
-        self.d[5] = b'eggs'
-        self.assertEqual(6, self.d.append(b'spam'))
-        self.assertEqual(7, self.d.append(b'baked beans'))
-        self.assertEqual(b'spam', self.d.get(6))
-        self.assertEqual(b'spam', self.d.get(1))
-        self.assertEqual(b'baked beans', self.d.get(7))
-        self.assertEqual(b'eggs', self.d.get(5))
+        self.d[1] = 'spam'
+        self.d[5] = 'eggs'
+        self.assertEqual(6, self.d.append('spam'))
+        self.assertEqual(7, self.d.append('baked beans'))
+        self.assertEqual('spam', self.d.get(6))
+        self.assertEqual('spam', self.d.get(1))
+        self.assertEqual('baked beans', self.d.get(7))
+        self.assertEqual('eggs', self.d.get(5))
 
 
 #----------------------------------------------------------------------

Modified: python/branches/py3k/Lib/bsddb/test/test_dbtables.py
==============================================================================
--- python/branches/py3k/Lib/bsddb/test/test_dbtables.py	(original)
+++ python/branches/py3k/Lib/bsddb/test/test_dbtables.py	Sun Aug 31 16:12:11 2008
@@ -20,25 +20,16 @@
 #
 # $Id$
 
-import sys, os, re
-import pickle
-import tempfile
-
-import unittest
-from bsddb.test.test_all import verbose
-
-try:
-    # For Pythons w/distutils pybsddb
-    from bsddb3 import db, dbtables
-except ImportError:
-    # For Python 2.3
-    from bsddb import db, dbtables
-
+import os, re
 try:
-    from bsddb3 import test_support
+    import pickle
+    pickle = pickle
 except ImportError:
-    from test import support as test_support
+    import pickle
 
+import unittest
+from .test_all import db, dbtables, test_support, verbose, \
+        get_new_environment_path, get_new_database_path
 
 #----------------------------------------------------------------------
 
@@ -46,16 +37,21 @@
     db_name = 'test-table.db'
 
     def setUp(self):
-        homeDir = tempfile.mkdtemp()
-        self.testHomeDir = homeDir
-        try: os.mkdir(homeDir)
-        except os.error: pass
+        import sys
+        if sys.version_info[0] >= 3 :
+            from .test_all import do_proxy_db_py3k
+            self._flag_proxy_db_py3k = do_proxy_db_py3k(False)
 
+        self.testHomeDir = get_new_environment_path()
         self.tdb = dbtables.bsdTableDB(
-            filename='tabletest.db', dbhome=homeDir, create=1)
+            filename='tabletest.db', dbhome=self.testHomeDir, create=1)
 
     def tearDown(self):
         self.tdb.close()
+        import sys
+        if sys.version_info[0] >= 3 :
+            from .test_all import do_proxy_db_py3k
+            do_proxy_db_py3k(self._flag_proxy_db_py3k)
         test_support.rmtree(self.testHomeDir)
 
     def test01(self):
@@ -66,21 +62,26 @@
         except dbtables.TableDBError:
             pass
         self.tdb.CreateTable(tabname, [colname])
-        try:
+        import sys
+        if sys.version_info[0] < 3 :
             self.tdb.Insert(tabname, {colname: pickle.dumps(3.14159, 1)})
-        except Exception:
-            import traceback
-            traceback.print_exc()
+        else :
+            self.tdb.Insert(tabname, {colname: pickle.dumps(3.14159,
+                1).decode("iso8859-1")})  # 8 bits
 
         if verbose:
             self.tdb._db_print()
 
         values = self.tdb.Select(
             tabname, [colname], conditions={colname: None})
-        values = list(values)
 
-        colval = pickle.loads(values[0][colname])
-        self.assertTrue(colval > 3.141 and colval < 3.142)
+        import sys
+        if sys.version_info[0] < 3 :
+            colval = pickle.loads(values[0][colname])
+        else :
+            colval = pickle.loads(bytes(values[0][colname], "iso8859-1"))
+        self.assert_(colval > 3.141)
+        self.assert_(colval < 3.142)
 
 
     def test02(self):
@@ -88,11 +89,23 @@
         col0 = 'coolness factor'
         col1 = 'but can it fly?'
         col2 = 'Species'
-        testinfo = [
-            {col0: pickle.dumps(8, 1), col1: b'no', col2: b'Penguin'},
-            {col0: pickle.dumps(-1, 1), col1: b'no', col2: b'Turkey'},
-            {col0: pickle.dumps(9, 1), col1: b'yes', col2: b'SR-71A Blackbird'}
-        ]
+
+        import sys
+        if sys.version_info[0] < 3 :
+            testinfo = [
+                {col0: pickle.dumps(8, 1), col1: 'no', col2: 'Penguin'},
+                {col0: pickle.dumps(-1, 1), col1: 'no', col2: 'Turkey'},
+                {col0: pickle.dumps(9, 1), col1: 'yes', col2: 'SR-71A Blackbird'}
+            ]
+        else :
+            testinfo = [
+                {col0: pickle.dumps(8, 1).decode("iso8859-1"),
+                    col1: 'no', col2: 'Penguin'},
+                {col0: pickle.dumps(-1, 1).decode("iso8859-1"),
+                    col1: 'no', col2: 'Turkey'},
+                {col0: pickle.dumps(9, 1).decode("iso8859-1"),
+                    col1: 'yes', col2: 'SR-71A Blackbird'}
+            ]
 
         try:
             self.tdb.Drop(tabname)
@@ -102,19 +115,24 @@
         for row in testinfo :
             self.tdb.Insert(tabname, row)
 
-        values = self.tdb.Select(tabname, [col2],
-            conditions={col0: lambda x: pickle.loads(x) >= 8})
-        values = list(values)
-
-        self.assertEquals(len(values), 2)
-        if values[0]['Species'] == b'Penguin' :
-            self.assertEquals(values[1]['Species'], b'SR-71A Blackbird')
-        elif values[0]['Species'] == b'SR-71A Blackbird' :
-            self.assertEquals(values[1]['Species'], b'Penguin')
+        import sys
+        if sys.version_info[0] < 3 :
+            values = self.tdb.Select(tabname, [col2],
+                conditions={col0: lambda x: pickle.loads(x) >= 8})
+        else :
+            values = self.tdb.Select(tabname, [col2],
+                conditions={col0: lambda x:
+                    pickle.loads(bytes(x, "iso8859-1")) >= 8})
+
+        self.assertEqual(len(values), 2)
+        if values[0]['Species'] == 'Penguin' :
+            self.assertEqual(values[1]['Species'], 'SR-71A Blackbird')
+        elif values[0]['Species'] == 'SR-71A Blackbird' :
+            self.assertEqual(values[1]['Species'], 'Penguin')
         else :
             if verbose:
                 print("values= %r" % (values,))
-            self.fail("Wrong values returned!")
+            raise RuntimeError("Wrong values returned!")
 
     def test03(self):
         tabname = "test03"
@@ -140,57 +158,55 @@
                             {'a': "",
                              'e': pickle.dumps([{4:5, 6:7}, 'foo'], 1),
                              'f': "Zero"})
-            self.fail("exception not raised")
+            self.fail('Expected an exception')
         except dbtables.TableDBError:
             pass
 
         try:
             self.tdb.Select(tabname, [], conditions={'foo': '123'})
-            self.fail("exception not raised")
+            self.fail('Expected an exception')
         except dbtables.TableDBError:
             pass
 
         self.tdb.Insert(tabname,
-                        {'a': b'42',
-                         'b': b'bad',
-                         'c': b'meep',
-                         'e': b'Fuzzy wuzzy was a bear'})
+                        {'a': '42',
+                         'b': "bad",
+                         'c': "meep",
+                         'e': 'Fuzzy wuzzy was a bear'})
         self.tdb.Insert(tabname,
-                        {'a': b'581750',
-                         'b': b'good',
-                         'd': b'bla',
-                         'c': b'black',
-                         'e': b'fuzzy was here'})
+                        {'a': '581750',
+                         'b': "good",
+                         'd': "bla",
+                         'c': "black",
+                         'e': 'fuzzy was here'})
         self.tdb.Insert(tabname,
-                        {'a': b'800000',
-                         'b': b'good',
-                         'd': b'bla',
-                         'c': b'black',
-                         'e': b'Fuzzy wuzzy is a bear'})
+                        {'a': '800000',
+                         'b': "good",
+                         'd': "bla",
+                         'c': "black",
+                         'e': 'Fuzzy wuzzy is a bear'})
 
         if verbose:
             self.tdb._db_print()
 
         # this should return two rows
         values = self.tdb.Select(tabname, ['b', 'a', 'd'],
-            conditions={'e': re.compile(b'wuzzy').search,
-                        'a': re.compile(b'^[0-9]+$').match})
-        self.assertEquals(len(values), 2)
+            conditions={'e': re.compile('wuzzy').search,
+                        'a': re.compile('^[0-9]+$').match})
+        self.assertEqual(len(values), 2)
 
         # now lets delete one of them and try again
         self.tdb.Delete(tabname, conditions={'b': dbtables.ExactCond('good')})
         values = self.tdb.Select(
             tabname, ['a', 'd', 'b'],
             conditions={'e': dbtables.PrefixCond('Fuzzy')})
-        values = list(values)
-        self.assertEquals(len(values), 1)
-        self.assertEquals(values[0]['d'], None)
+        self.assertEqual(len(values), 1)
+        self.assertEqual(values[0]['d'], None)
 
         values = self.tdb.Select(tabname, ['b'],
-            conditions={'c': lambda c: c.decode("ascii") == 'meep'})
-        values = list(values)
-        self.assertEquals(len(values), 1)
-        self.assertEquals(values[0]['b'], b"bad")
+            conditions={'c': lambda c: c == 'meep'})
+        self.assertEqual(len(values), 1)
+        self.assertEqual(values[0]['b'], "bad")
 
 
     def test04_MultiCondSelect(self):
@@ -203,19 +219,19 @@
 
         try:
             self.tdb.Insert(tabname,
-                            {'a': b"",
+                            {'a': "",
                              'e': pickle.dumps([{4:5, 6:7}, 'foo'], 1),
-                             'f': b"Zero"})
-            self.fail("exception not raised")
+                             'f': "Zero"})
+            self.fail('Expected an exception')
         except dbtables.TableDBError:
             pass
 
-        self.tdb.Insert(tabname, {'a': b"A", 'b': b"B", 'c': b"C",
-                                  'd': b"D", 'e': b"E"})
-        self.tdb.Insert(tabname, {'a': b"-A", 'b': b"-B", 'c': b"-C",
-                                  'd': b"-D", 'e': b"-E"})
-        self.tdb.Insert(tabname, {'a': b"A-", 'b': b"B-", 'c': b"C-",
-                                  'd': b"D-", 'e': b"E-"})
+        self.tdb.Insert(tabname, {'a': "A", 'b': "B", 'c': "C", 'd': "D",
+                                  'e': "E"})
+        self.tdb.Insert(tabname, {'a': "-A", 'b': "-B", 'c': "-C", 'd': "-D",
+                                  'e': "-E"})
+        self.tdb.Insert(tabname, {'a': "A-", 'b': "B-", 'c': "C-", 'd': "D-",
+                                  'e': "E-"})
 
         if verbose:
             self.tdb._db_print()
@@ -230,7 +246,7 @@
                         'a': dbtables.ExactCond('A'),
                         'd': dbtables.PrefixCond('-')
                        } )
-        self.assertEquals(len(values), 0, values)
+        self.assertEqual(len(values), 0, values)
 
 
     def test_CreateOrExtend(self):
@@ -240,9 +256,9 @@
             tabname, ['name', 'taste', 'filling', 'alcohol content', 'price'])
         try:
             self.tdb.Insert(tabname,
-                            {'taste': b'crap',
-                             'filling': b'no',
-                             'is it Guinness?': b'no'})
+                            {'taste': 'crap',
+                             'filling': 'no',
+                             'is it Guinness?': 'no'})
             self.fail("Insert should've failed due to bad column name")
         except:
             pass
@@ -250,11 +266,11 @@
                                      ['name', 'taste', 'is it Guinness?'])
 
         # these should both succeed as the table should contain the union of both sets of columns.
-        self.tdb.Insert(tabname, {'taste': b'crap', 'filling': b'no',
-                                  'is it Guinness?': b'no'})
-        self.tdb.Insert(tabname, {'taste': b'great', 'filling': b'yes',
-                                  'is it Guinness?': b'yes',
-                                  'name': b'Guinness'})
+        self.tdb.Insert(tabname, {'taste': 'crap', 'filling': 'no',
+                                  'is it Guinness?': 'no'})
+        self.tdb.Insert(tabname, {'taste': 'great', 'filling': 'yes',
+                                  'is it Guinness?': 'yes',
+                                  'name': 'Guinness'})
 
 
     def test_CondObjs(self):
@@ -262,33 +278,31 @@
 
         self.tdb.CreateTable(tabname, ['a', 'b', 'c', 'd', 'e', 'p'])
 
-        self.tdb.Insert(tabname, {'a': b"the letter A",
-                                  'b': b"the letter B",
-                                  'c': b"is for cookie"})
-        self.tdb.Insert(tabname, {'a': b"is for aardvark",
-                                  'e': b"the letter E",
-                                  'c': b"is for cookie",
-                                  'd': b"is for dog"})
-        self.tdb.Insert(tabname, {'a': b"the letter A",
-                                  'e': b"the letter E",
-                                  'c': b"is for cookie",
-                                  'p': b"is for Python"})
+        self.tdb.Insert(tabname, {'a': "the letter A",
+                                  'b': "the letter B",
+                                  'c': "is for cookie"})
+        self.tdb.Insert(tabname, {'a': "is for aardvark",
+                                  'e': "the letter E",
+                                  'c': "is for cookie",
+                                  'd': "is for dog"})
+        self.tdb.Insert(tabname, {'a': "the letter A",
+                                  'e': "the letter E",
+                                  'c': "is for cookie",
+                                  'p': "is for Python"})
 
         values = self.tdb.Select(
             tabname, ['p', 'e'],
             conditions={'e': dbtables.PrefixCond('the l')})
-        values = list(values)
-        self.assertEquals(len(values), 2)
-        self.assertEquals(values[0]['e'], values[1]['e'])
-        self.assertNotEquals(values[0]['p'], values[1]['p'])
+        self.assertEqual(len(values), 2, values)
+        self.assertEqual(values[0]['e'], values[1]['e'], values)
+        self.assertNotEqual(values[0]['p'], values[1]['p'], values)
 
         values = self.tdb.Select(
             tabname, ['d', 'a'],
             conditions={'a': dbtables.LikeCond('%aardvark%')})
-        values = list(values)
-        self.assertEquals(len(values), 1)
-        self.assertEquals(values[0]['d'], b"is for dog")
-        self.assertEquals(values[0]['a'], b"is for aardvark")
+        self.assertEqual(len(values), 1, values)
+        self.assertEqual(values[0]['d'], "is for dog", values)
+        self.assertEqual(values[0]['a'], "is for aardvark", values)
 
         values = self.tdb.Select(tabname, None,
                                  {'b': dbtables.Cond(),
@@ -297,10 +311,9 @@
                                   'd':dbtables.ExactCond('is for dog'),
                                   'c':dbtables.PrefixCond('is for'),
                                   'p':lambda s: not s})
-        values = list(values)
-        self.assertEquals(len(values), 1)
-        self.assertEquals(values[0]['d'], b"is for dog")
-        self.assertEquals(values[0]['a'], b"is for aardvark")
+        self.assertEqual(len(values), 1, values)
+        self.assertEqual(values[0]['d'], "is for dog", values)
+        self.assertEqual(values[0]['a'], "is for aardvark", values)
 
     def test_Delete(self):
         tabname = "test_Delete"
@@ -310,30 +323,30 @@
         # fail if it encountered any rows that did not have values in
         # every column.
         # Hunted and Squashed by <Donwulff> (Jukka Santala - donwulff at nic.fi)
-        self.tdb.Insert(tabname, {'x': b'X1', 'y':b'Y1'})
-        self.tdb.Insert(tabname, {'x': b'X2', 'y':b'Y2', 'z': b'Z2'})
+        self.tdb.Insert(tabname, {'x': 'X1', 'y':'Y1'})
+        self.tdb.Insert(tabname, {'x': 'X2', 'y':'Y2', 'z': 'Z2'})
 
         self.tdb.Delete(tabname, conditions={'x': dbtables.PrefixCond('X')})
         values = self.tdb.Select(tabname, ['y'],
                                  conditions={'x': dbtables.PrefixCond('X')})
-        self.assertEquals(len(values), 0)
+        self.assertEqual(len(values), 0)
 
     def test_Modify(self):
         tabname = "test_Modify"
         self.tdb.CreateTable(tabname, ['Name', 'Type', 'Access'])
 
-        self.tdb.Insert(tabname, {'Name': b'Index to MP3 files.doc',
-                                  'Type': b'Word', 'Access': b'8'})
-        self.tdb.Insert(tabname, {'Name': b'Nifty.MP3', 'Access': b'1'})
-        self.tdb.Insert(tabname, {'Type': b'Unknown', 'Access': b'0'})
+        self.tdb.Insert(tabname, {'Name': 'Index to MP3 files.doc',
+                                  'Type': 'Word', 'Access': '8'})
+        self.tdb.Insert(tabname, {'Name': 'Nifty.MP3', 'Access': '1'})
+        self.tdb.Insert(tabname, {'Type': 'Unknown', 'Access': '0'})
 
         def set_type(type):
-            if type is None:
-                return b'MP3'
+            if type == None:
+                return 'MP3'
             return type
 
         def increment_access(count):
-            return str(int(count)+1).encode('ascii')
+            return str(int(count)+1)
 
         def remove_value(value):
             return None
@@ -351,7 +364,7 @@
         try:
             self.tdb.Modify(tabname,
                             conditions={'Name': dbtables.LikeCond('%')},
-                            mappings={'Access': b'What is your quest?'})
+                            mappings={'Access': 'What is your quest?'})
         except TypeError:
             # success, the string value in mappings isn't callable
             pass
@@ -362,27 +375,24 @@
         values = self.tdb.Select(
             tabname, None,
             conditions={'Type': dbtables.ExactCond('Unknown')})
-        values = list(values)
-        self.assertEquals(len(values), 1)
-        self.assertEquals(values[0]['Name'], None)
-        self.assertEquals(values[0]['Access'], None)
+        self.assertEqual(len(values), 1, values)
+        self.assertEqual(values[0]['Name'], None, values)
+        self.assertEqual(values[0]['Access'], None, values)
 
         # Modify value by select conditions
         values = self.tdb.Select(
             tabname, None,
             conditions={'Name': dbtables.ExactCond('Nifty.MP3')})
-        values = list(values)
-        self.assertEquals(len(values), 1)
-        self.assertEquals(values[0]['Type'], b"MP3")
-        self.assertEquals(values[0]['Access'], b"2")
+        self.assertEqual(len(values), 1, values)
+        self.assertEqual(values[0]['Type'], "MP3", values)
+        self.assertEqual(values[0]['Access'], "2", values)
 
         # Make sure change applied only to select conditions
         values = self.tdb.Select(
             tabname, None, conditions={'Name': dbtables.LikeCond('%doc%')})
-        values = list(values)
-        self.assertEquals(len(values), 1)
-        self.assertEquals(values[0]['Type'], b"Word")
-        self.assertEquals(values[0]['Access'], b"9")
+        self.assertEqual(len(values), 1, values)
+        self.assertEqual(values[0]['Type'], "Word", values)
+        self.assertEqual(values[0]['Access'], "9", values)
 
 
 def test_suite():

Added: python/branches/py3k/Lib/bsddb/test/test_distributed_transactions.py
==============================================================================
--- (empty file)
+++ python/branches/py3k/Lib/bsddb/test/test_distributed_transactions.py	Sun Aug 31 16:12:11 2008
@@ -0,0 +1,163 @@
+"""TestCases for distributed transactions.
+"""
+
+import os
+import unittest
+
+from .test_all import db, test_support, get_new_environment_path, \
+        get_new_database_path
+
+try :
+    a=set()
+except : # Python 2.3
+    from sets import Set as set
+else :
+    del a
+
+from .test_all import verbose
+
+#----------------------------------------------------------------------
+
+class DBTxn_distributed(unittest.TestCase):
+    num_txns=1234
+    nosync=True
+    must_open_db=False
+    def _create_env(self, must_open_db) :
+        self.dbenv = db.DBEnv()
+        self.dbenv.set_tx_max(self.num_txns)
+        self.dbenv.set_lk_max_lockers(self.num_txns*2)
+        self.dbenv.set_lk_max_locks(self.num_txns*2)
+        self.dbenv.set_lk_max_objects(self.num_txns*2)
+        if self.nosync :
+            self.dbenv.set_flags(db.DB_TXN_NOSYNC,True)
+        self.dbenv.open(self.homeDir, db.DB_CREATE | db.DB_THREAD |
+                db.DB_RECOVER |
+                db.DB_INIT_TXN | db.DB_INIT_LOG | db.DB_INIT_MPOOL |
+                db.DB_INIT_LOCK, 0o666)
+        self.db = db.DB(self.dbenv)
+        self.db.set_re_len(db.DB_XIDDATASIZE)
+        if must_open_db :
+            if db.version() > (4,1) :
+                txn=self.dbenv.txn_begin()
+                self.db.open(self.filename,
+                        db.DB_QUEUE, db.DB_CREATE | db.DB_THREAD, 0o666,
+                        txn=txn)
+                txn.commit()
+            else :
+                self.db.open(self.filename,
+                        db.DB_QUEUE, db.DB_CREATE | db.DB_THREAD, 0o666)
+
+    def setUp(self) :
+        self.homeDir = get_new_environment_path()
+        self.filename = "test"
+        return self._create_env(must_open_db=True)
+
+    def _destroy_env(self):
+        if self.nosync or (db.version()[:2] == (4,6)):  # Known bug
+            self.dbenv.log_flush()
+        self.db.close()
+        self.dbenv.close()
+
+    def tearDown(self):
+        self._destroy_env()
+        test_support.rmtree(self.homeDir)
+
+    def _recreate_env(self,must_open_db) :
+        self._destroy_env()
+        self._create_env(must_open_db)
+
+    def test01_distributed_transactions(self) :
+        txns=set()
+        adapt = lambda x : x
+        import sys
+        if sys.version_info[0] >= 3 :
+            adapt = lambda x : bytes(x, "ascii")
+    # Create transactions, "prepare" them, and
+    # let them be garbage collected.
+        for i in range(self.num_txns) :
+            txn = self.dbenv.txn_begin()
+            gid = "%%%dd" %db.DB_XIDDATASIZE
+            gid = adapt(gid %i)
+            self.db.put(i, gid, txn=txn, flags=db.DB_APPEND)
+            txns.add(gid)
+            txn.prepare(gid)
+        del txn
+
+        self._recreate_env(self.must_open_db)
+
+    # Get "to be recovered" transactions but
+    # let them be garbage collected.
+        recovered_txns=self.dbenv.txn_recover()
+        self.assertEquals(self.num_txns,len(recovered_txns))
+        for gid,txn in recovered_txns :
+            self.assert_(gid in txns)
+        del txn
+        del recovered_txns
+
+        self._recreate_env(self.must_open_db)
+
+    # Get "to be recovered" transactions. Commit, abort and
+    # discard them.
+        recovered_txns=self.dbenv.txn_recover()
+        self.assertEquals(self.num_txns,len(recovered_txns))
+        discard_txns=set()
+        committed_txns=set()
+        state=0
+        for gid,txn in recovered_txns :
+            if state==0 or state==1:
+                committed_txns.add(gid)
+                txn.commit()
+            elif state==2 :
+                txn.abort()
+            elif state==3 :
+                txn.discard()
+                discard_txns.add(gid)
+                state=-1
+            state+=1
+        del txn
+        del recovered_txns
+
+        self._recreate_env(self.must_open_db)
+
+    # Verify the discarded transactions are still
+    # around, and dispose them.
+        recovered_txns=self.dbenv.txn_recover()
+        self.assertEquals(len(discard_txns),len(recovered_txns))
+        for gid,txn in recovered_txns :
+            txn.abort()
+        del txn
+        del recovered_txns
+
+        self._recreate_env(must_open_db=True)
+
+    # Be sure there are not pending transactions.
+    # Check also database size.
+        recovered_txns=self.dbenv.txn_recover()
+        self.assert_(len(recovered_txns)==0)
+        self.assertEquals(len(committed_txns),self.db.stat()["nkeys"])
+
+class DBTxn_distributedSYNC(DBTxn_distributed):
+    nosync=False
+
+class DBTxn_distributed_must_open_db(DBTxn_distributed):
+    must_open_db=True
+
+class DBTxn_distributedSYNC_must_open_db(DBTxn_distributed):
+    nosync=False
+    must_open_db=True
+
+#----------------------------------------------------------------------
+
+def test_suite():
+    suite = unittest.TestSuite()
+    if db.version() >= (4,5) :
+        suite.addTest(unittest.makeSuite(DBTxn_distributed))
+        suite.addTest(unittest.makeSuite(DBTxn_distributedSYNC))
+    if db.version() >= (4,6) :
+        suite.addTest(unittest.makeSuite(DBTxn_distributed_must_open_db))
+        suite.addTest(unittest.makeSuite(DBTxn_distributedSYNC_must_open_db))
+    return suite
+
+
+if __name__ == '__main__':
+    unittest.main(defaultTest='test_suite')

Added: python/branches/py3k/Lib/bsddb/test/test_early_close.py
==============================================================================
--- (empty file)
+++ python/branches/py3k/Lib/bsddb/test/test_early_close.py	Sun Aug 31 16:12:11 2008
@@ -0,0 +1,195 @@
+"""TestCases for checking that it does not segfault when a DBEnv object
+is closed before its DB objects.
+"""
+
+import os
+import unittest
+
+from .test_all import db, test_support, verbose, get_new_environment_path, get_new_database_path
+
+# We're going to get warnings in this module about trying to close the db when
+# its env is already closed.  Let's just ignore those.
+try:
+    import warnings
+except ImportError:
+    pass
+else:
+    warnings.filterwarnings('ignore',
+                            message='DB could not be closed in',
+                            category=RuntimeWarning)
+
+
+#----------------------------------------------------------------------
+
+class DBEnvClosedEarlyCrash(unittest.TestCase):
+    def setUp(self):
+        self.homeDir = get_new_environment_path()
+        self.filename = "test"
+
+    def tearDown(self):
+        test_support.rmtree(self.homeDir)
+
+    def test01_close_dbenv_before_db(self):
+        dbenv = db.DBEnv()
+        dbenv.open(self.homeDir,
+                   db.DB_INIT_CDB| db.DB_CREATE |db.DB_THREAD|db.DB_INIT_MPOOL,
+                   0o666)
+
+        d = db.DB(dbenv)
+        d2 = db.DB(dbenv)
+        d.open(self.filename, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0o666)
+
+        self.assertRaises(db.DBNoSuchFileError, d2.open,
+                self.filename+"2", db.DB_BTREE, db.DB_THREAD, 0o666)
+
+        d.put("test","this is a test")
+        self.assertEqual(d.get("test"), "this is a test", "put!=get")
+        dbenv.close()  # This "close" should close the child db handle also
+        self.assertRaises(db.DBError, d.get, "test")
+
+    def test02_close_dbenv_before_dbcursor(self):
+        dbenv = db.DBEnv()
+        dbenv.open(self.homeDir,
+                   db.DB_INIT_CDB| db.DB_CREATE |db.DB_THREAD|db.DB_INIT_MPOOL,
+                   0o666)
+
+        d = db.DB(dbenv)
+        d.open(self.filename, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0o666)
+
+        d.put("test","this is a test")
+        d.put("test2","another test")
+        d.put("test3","another one")
+        self.assertEqual(d.get("test"), "this is a test", "put!=get")
+        c=d.cursor()
+        c.first()
+        next(c)
+        d.close()  # This "close" should close the child db handle also
+     # db.close should close the child cursor
+        self.assertRaises(db.DBError,c.__next__)
+
+        d = db.DB(dbenv)
+        d.open(self.filename, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0o666)
+        c=d.cursor()
+        c.first()
+        next(c)
+        dbenv.close()
+    # The "close" should close the child db handle also, with cursors
+        self.assertRaises(db.DBError, c.__next__)
+
+    def test03_close_db_before_dbcursor_without_env(self):
+        import os.path
+        path=os.path.join(self.homeDir,self.filename)
+        d = db.DB()
+        d.open(path, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0o666)
+
+        d.put("test","this is a test")
+        d.put("test2","another test")
+        d.put("test3","another one")
+        self.assertEqual(d.get("test"), "this is a test", "put!=get")
+        c=d.cursor()
+        c.first()
+        next(c)
+        d.close()
+    # The "close" should close the child db handle also
+        self.assertRaises(db.DBError, c.__next__)
+
+    def test04_close_massive(self):
+        dbenv = db.DBEnv()
+        dbenv.open(self.homeDir,
+                   db.DB_INIT_CDB| db.DB_CREATE |db.DB_THREAD|db.DB_INIT_MPOOL,
+                   0o666)
+
+        dbs=[db.DB(dbenv) for i in range(16)]
+        cursors=[]
+        for i in dbs :
+            i.open(self.filename, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0o666)
+
+        dbs[10].put("test","this is a test")
+        dbs[10].put("test2","another test")
+        dbs[10].put("test3","another one")
+        self.assertEqual(dbs[4].get("test"), "this is a test", "put!=get")
+
+        for i in dbs :
+            cursors.extend([i.cursor() for j in range(32)])
+
+        for i in dbs[::3] :
+            i.close()
+        for i in cursors[::3] :
+            i.close()
+
+    # Check for missing exception in DB! (after DB close)
+        self.assertRaises(db.DBError, dbs[9].get, "test")
+
+    # Check for missing exception in DBCursor! (after DB close)
+        self.assertRaises(db.DBError, cursors[101].first)
+
+        cursors[80].first()
+        next(cursors[80])
+        dbenv.close()  # This "close" should close the child db handle also
+    # Check for missing exception! (after DBEnv close)
+        self.assertRaises(db.DBError, cursors[80].__next__)
+
+    def test05_close_dbenv_delete_db_success(self):
+        dbenv = db.DBEnv()
+        dbenv.open(self.homeDir,
+                   db.DB_INIT_CDB| db.DB_CREATE |db.DB_THREAD|db.DB_INIT_MPOOL,
+                   0o666)
+
+        d = db.DB(dbenv)
+        d.open(self.filename, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0o666)
+
+        dbenv.close()  # This "close" should close the child db handle also
+
+        del d
+        try:
+            import gc
+        except ImportError:
+            gc = None
+        if gc:
+            # force d.__del__ [DB_dealloc] to be called
+            gc.collect()
+
+    def test06_close_txn_before_dup_cursor(self) :
+        dbenv = db.DBEnv()
+        dbenv.open(self.homeDir,db.DB_INIT_TXN | db.DB_INIT_MPOOL |
+                db.DB_INIT_LOG | db.DB_CREATE)
+        d = db.DB(dbenv)
+        txn = dbenv.txn_begin()
+        if db.version() < (4,1) :
+            d.open(self.filename, dbtype = db.DB_HASH, flags = db.DB_CREATE)
+        else :
+            d.open(self.filename, dbtype = db.DB_HASH, flags = db.DB_CREATE,
+                    txn=txn)
+        d.put("XXX", "yyy", txn=txn)
+        txn.commit()
+        txn = dbenv.txn_begin()
+        c1 = d.cursor(txn)
+        c2 = c1.dup()
+        self.assertEquals(("XXX", "yyy"), c1.first())
+        import warnings
+        # Not interested in warnings about implicit close.
+        warnings.simplefilter("ignore")
+        txn.commit()
+        warnings.resetwarnings()
+        self.assertRaises(db.DBCursorClosedError, c2.first)
+
+    if db.version() > (4,3,0) :
+        def test07_close_db_before_sequence(self):
+            import os.path
+            path=os.path.join(self.homeDir,self.filename)
+            d = db.DB()
+            d.open(path, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0o666)
+            dbs=db.DBSequence(d)
+            d.close()  # This "close" should close the child DBSequence also
+            dbs.close()  # If not closed, core dump (in Berkeley DB 4.6.*)
+
+#----------------------------------------------------------------------
+
+def test_suite():
+    suite = unittest.TestSuite()
+    suite.addTest(unittest.makeSuite(DBEnvClosedEarlyCrash))
+    return suite
+
+
+if __name__ == '__main__':
+    unittest.main(defaultTest='test_suite')

Modified: python/branches/py3k/Lib/bsddb/test/test_get_none.py
==============================================================================
--- python/branches/py3k/Lib/bsddb/test/test_get_none.py	(original)
+++ python/branches/py3k/Lib/bsddb/test/test_get_none.py	Sun Aug 31 16:12:11 2008
@@ -2,22 +2,17 @@
 TestCases for checking set_get_returns_none.
 """
 
-import sys, os, string
-import tempfile
-from pprint import pprint
+import os, string
 import unittest
 
-from bsddb import db
+from .test_all import db, verbose, get_new_database_path
 
-from bsddb.test.test_all import verbose
-
-letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
 
 #----------------------------------------------------------------------
 
 class GetReturnsNoneTestCase(unittest.TestCase):
     def setUp(self):
-        self.filename = tempfile.mktemp()
+        self.filename = get_new_database_path()
 
     def tearDown(self):
         try:
@@ -31,25 +26,24 @@
         d.open(self.filename, db.DB_BTREE, db.DB_CREATE)
         d.set_get_returns_none(1)
 
-        for x in letters:
-            x = x.encode("ascii")
+        for x in string.letters:
             d.put(x, x * 40)
 
-        data = d.get(b'bad key')
-        assert data == None
+        data = d.get('bad key')
+        self.assertEqual(data, None)
 
-        data = d.get(b'a')
-        assert data == b'a'*40
+        data = d.get(string.letters[0])
+        self.assertEqual(data, string.letters[0]*40)
 
         count = 0
         c = d.cursor()
         rec = c.first()
         while rec:
             count = count + 1
-            rec = c.next()
+            rec = next(c)
 
-        assert rec == None
-        assert count == 52
+        self.assertEqual(rec, None)
+        self.assertEqual(count, len(string.letters))
 
         c.close()
         d.close()
@@ -60,15 +54,14 @@
         d.open(self.filename, db.DB_BTREE, db.DB_CREATE)
         d.set_get_returns_none(0)
 
-        for x in letters:
-            x = x.encode("ascii")
+        for x in string.letters:
             d.put(x, x * 40)
 
-        self.assertRaises(db.DBNotFoundError, d.get, b'bad key')
-        self.assertRaises(KeyError, d.get, b'bad key')
+        self.assertRaises(db.DBNotFoundError, d.get, 'bad key')
+        self.assertRaises(KeyError, d.get, 'bad key')
 
-        data = d.get(b'a')
-        assert data == b'a'*40
+        data = d.get(string.letters[0])
+        self.assertEqual(data, string.letters[0]*40)
 
         count = 0
         exceptionHappened = 0
@@ -77,14 +70,14 @@
         while rec:
             count = count + 1
             try:
-                rec = c.next()
+                rec = next(c)
             except db.DBNotFoundError:  # end of the records
                 exceptionHappened = 1
                 break
 
-        assert rec != None
-        assert exceptionHappened
-        assert count == 52
+        self.assertNotEqual(rec, None)
+        self.assert_(exceptionHappened)
+        self.assertEqual(count, len(string.letters))
 
         c.close()
         d.close()

Modified: python/branches/py3k/Lib/bsddb/test/test_join.py
==============================================================================
--- python/branches/py3k/Lib/bsddb/test/test_join.py	(original)
+++ python/branches/py3k/Lib/bsddb/test/test_join.py	Sun Aug 31 16:12:11 2008
@@ -1,27 +1,12 @@
 """TestCases for using the DB.join and DBCursor.join_item methods.
 """
 
-import shutil
-import sys, os
-import tempfile
-import time
-from pprint import pprint
-
-try:
-    from threading import Thread, current_thread
-    have_threads = 1
-except ImportError:
-    have_threads = 0
+import os
 
 import unittest
-from bsddb.test.test_all import verbose
 
-from bsddb import db, dbshelve, StringKeys
-
-try:
-    from bsddb3 import test_support
-except ImportError:
-    from test import support as test_support
+from .test_all import db, dbshelve, test_support, verbose, \
+        get_new_environment_path, get_new_database_path
 
 #----------------------------------------------------------------------
 
@@ -44,18 +29,12 @@
     ('black', "shotgun"),
 ]
 
-def ASCII(s):
-    return s.encode("ascii")
-
 class JoinTestCase(unittest.TestCase):
     keytype = ''
 
     def setUp(self):
         self.filename = self.__class__.__name__ + '.db'
-        homeDir = os.path.join(tempfile.gettempdir(), 'db_home%d'%os.getpid())
-        self.homeDir = homeDir
-        try: os.mkdir(homeDir)
-        except os.error: pass
+        self.homeDir = get_new_environment_path()
         self.env = db.DBEnv()
         self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL | db.DB_INIT_LOCK )
 
@@ -72,13 +51,13 @@
         # create and populate primary index
         priDB = db.DB(self.env)
         priDB.open(self.filename, "primary", db.DB_BTREE, db.DB_CREATE)
-        [priDB.put(ASCII(k),ASCII(v)) for k,v in ProductIndex]
+        list(map(lambda t, priDB=priDB: priDB.put(*t), ProductIndex))
 
         # create and populate secondary index
         secDB = db.DB(self.env)
         secDB.set_flags(db.DB_DUP | db.DB_DUPSORT)
         secDB.open(self.filename, "secondary", db.DB_BTREE, db.DB_CREATE)
-        [secDB.put(ASCII(k),ASCII(v)) for k,v in ColorIndex]
+        list(map(lambda t, secDB=secDB: secDB.put(*t), ColorIndex))
 
         sCursor = None
         jCursor = None
@@ -87,19 +66,19 @@
             sCursor = secDB.cursor()
             # Don't do the .set() in an assert, or you can get a bogus failure
             # when running python -O
-            tmp = sCursor.set(b'red')
-            assert tmp
+            tmp = sCursor.set('red')
+            self.assert_(tmp)
 
             # FIXME: jCursor doesn't properly hold a reference to its
             # cursors, if they are closed before jcursor is used it
             # can cause a crash.
             jCursor = priDB.join([sCursor])
 
-            if jCursor.get(0) != (b'apple', b"Convenience Store"):
+            if jCursor.get(0) != ('apple', "Convenience Store"):
                 self.fail("join cursor positioned wrong")
-            if jCursor.join_item() != b'chainsaw':
+            if jCursor.join_item() != 'chainsaw':
                 self.fail("DBCursor.join_item returned wrong item")
-            if jCursor.get(0)[0] != b'strawberry':
+            if jCursor.get(0)[0] != 'strawberry':
                 self.fail("join cursor returned wrong thing")
             if jCursor.get(0):  # there were only three red items to return
                 self.fail("join cursor returned too many items")

Modified: python/branches/py3k/Lib/bsddb/test/test_lock.py
==============================================================================
--- python/branches/py3k/Lib/bsddb/test/test_lock.py	(original)
+++ python/branches/py3k/Lib/bsddb/test/test_lock.py	Sun Aug 31 16:12:11 2008
@@ -2,39 +2,31 @@
 TestCases for testing the locking sub-system.
 """
 
-import sys
-import tempfile
 import time
 
-try:
-    from threading import Thread, current_thread
-    have_threads = 1
-except ImportError:
-    have_threads = 0
-
-
 import unittest
-from bsddb.test.test_all import verbose
-
-try:
-    # For Pythons w/distutils pybsddb
-    from bsddb3 import db
-except ImportError:
-    # For Python 2.3
-    from bsddb import db
-
-try:
-    from bsddb3 import test_support
-except ImportError:
-    from test import support as test_support
+from .test_all import db, test_support, verbose, have_threads, \
+        get_new_environment_path, get_new_database_path
 
+if have_threads :
+    from threading import Thread
+    import sys
+    if sys.version_info[0] < 3 :
+        from threading import currentThread
+    else :
+        from threading import current_thread as currentThread
 
 #----------------------------------------------------------------------
 
 class LockingTestCase(unittest.TestCase):
+    import sys
+    if sys.version_info[:3] < (2, 4, 0):
+        def assertTrue(self, expr, msg=None):
+            self.failUnless(expr,msg=msg)
+
 
     def setUp(self):
-        self.homeDir = tempfile.mkdtemp('.test_lock')
+        self.homeDir = get_new_environment_path()
         self.env = db.DBEnv()
         self.env.open(self.homeDir, db.DB_THREAD | db.DB_INIT_MPOOL |
                                     db.DB_INIT_LOCK | db.DB_CREATE)
@@ -53,15 +45,13 @@
         anID = self.env.lock_id()
         if verbose:
             print("locker ID: %s" % anID)
-        lock = self.env.lock_get(anID, b"some locked thing", db.DB_LOCK_WRITE)
+        lock = self.env.lock_get(anID, "some locked thing", db.DB_LOCK_WRITE)
         if verbose:
             print("Aquired lock: %s" % lock)
-        time.sleep(1)
         self.env.lock_put(lock)
         if verbose:
             print("Released lock: %s" % lock)
-        if db.version() >= (4,0):
-            self.env.lock_id_free(anID)
+        self.env.lock_id_free(anID)
 
 
     def test02_threaded(self):
@@ -71,34 +61,35 @@
 
         threads = []
         threads.append(Thread(target = self.theThread,
-                              args=(5, db.DB_LOCK_WRITE)))
+                              args=(db.DB_LOCK_WRITE,)))
         threads.append(Thread(target = self.theThread,
-                              args=(1, db.DB_LOCK_READ)))
+                              args=(db.DB_LOCK_READ,)))
         threads.append(Thread(target = self.theThread,
-                              args=(1, db.DB_LOCK_READ)))
+                              args=(db.DB_LOCK_READ,)))
         threads.append(Thread(target = self.theThread,
-                              args=(1, db.DB_LOCK_WRITE)))
+                              args=(db.DB_LOCK_WRITE,)))
         threads.append(Thread(target = self.theThread,
-                              args=(1, db.DB_LOCK_READ)))
+                              args=(db.DB_LOCK_READ,)))
         threads.append(Thread(target = self.theThread,
-                              args=(1, db.DB_LOCK_READ)))
+                              args=(db.DB_LOCK_READ,)))
         threads.append(Thread(target = self.theThread,
-                              args=(1, db.DB_LOCK_WRITE)))
+                              args=(db.DB_LOCK_WRITE,)))
         threads.append(Thread(target = self.theThread,
-                              args=(1, db.DB_LOCK_WRITE)))
+                              args=(db.DB_LOCK_WRITE,)))
         threads.append(Thread(target = self.theThread,
-                              args=(1, db.DB_LOCK_WRITE)))
+                              args=(db.DB_LOCK_WRITE,)))
 
         for t in threads:
+            import sys
+            if sys.version_info[0] < 3 :
+                t.setDaemon(True)
+            else :
+                t.daemon = True
             t.start()
         for t in threads:
             t.join()
 
-    def _DISABLED_test03_lock_timeout(self):
-        # Disabled as this test crashes the python interpreter built in
-        # debug mode with:
-        #  Fatal Python error: UNREF invalid object
-        # the error occurs as marked below.
+    def test03_lock_timeout(self):
         self.env.set_timeout(0, db.DB_SET_LOCK_TIMEOUT)
         self.env.set_timeout(0, db.DB_SET_TXN_TIMEOUT)
         self.env.set_timeout(123456, db.DB_SET_LOCK_TIMEOUT)
@@ -117,7 +108,11 @@
         deadlock_detection.end=False
         deadlock_detection.count=0
         t=Thread(target=deadlock_detection)
-        t.daemon = True
+        import sys
+        if sys.version_info[0] < 3 :
+            t.setDaemon(True)
+        else :
+            t.daemon = True
         t.start()
         self.env.set_timeout(100000, db.DB_SET_LOCK_TIMEOUT)
         anID = self.env.lock_id()
@@ -125,8 +120,6 @@
         self.assertNotEqual(anID, anID2)
         lock = self.env.lock_get(anID, "shared lock", db.DB_LOCK_WRITE)
         start_time=time.time()
-        # FIXME: I see the UNREF crash as the interpreter trys to exit
-        # from this call to lock_get.
         self.assertRaises(db.DBLockNotGrantedError,
                 self.env.lock_get,anID2, "shared lock", db.DB_LOCK_READ)
         end_time=time.time()
@@ -135,15 +128,19 @@
         self.env.lock_put(lock)
         t.join()
 
-        if db.version() >= (4,0):
-            self.env.lock_id_free(anID)
-            self.env.lock_id_free(anID2)
+        self.env.lock_id_free(anID)
+        self.env.lock_id_free(anID2)
 
         if db.version() >= (4,6):
             self.assertTrue(deadlock_detection.count>0)
 
-    def theThread(self, sleepTime, lockType):
-        name = current_thread().name
+    def theThread(self, lockType):
+        import sys
+        if sys.version_info[0] < 3 :
+            name = currentThread().getName()
+        else :
+            name = currentThread().name
+
         if lockType ==  db.DB_LOCK_WRITE:
             lt = "write"
         else:
@@ -153,17 +150,16 @@
         if verbose:
             print("%s: locker ID: %s" % (name, anID))
 
-        lock = self.env.lock_get(anID, b"some locked thing", lockType)
-        if verbose:
-            print("%s: Aquired %s lock: %s" % (name, lt, lock))
-
-        time.sleep(sleepTime)
+        for i in range(1000) :
+            lock = self.env.lock_get(anID, "some locked thing", lockType)
+            if verbose:
+                print("%s: Aquired %s lock: %s" % (name, lt, lock))
+
+            self.env.lock_put(lock)
+            if verbose:
+                print("%s: Released %s lock: %s" % (name, lt, lock))
 
-        self.env.lock_put(lock)
-        if verbose:
-            print("%s: Released %s lock: %s" % (name, lt, lock))
-        if db.version() >= (4,0):
-            self.env.lock_id_free(anID)
+        self.env.lock_id_free(anID)
 
 
 #----------------------------------------------------------------------

Modified: python/branches/py3k/Lib/bsddb/test/test_misc.py
==============================================================================
--- python/branches/py3k/Lib/bsddb/test/test_misc.py	(original)
+++ python/branches/py3k/Lib/bsddb/test/test_misc.py	Sun Aug 31 16:12:11 2008
@@ -2,34 +2,16 @@
 """
 
 import os
-import shutil
-import sys
 import unittest
-import tempfile
 
-try:
-    # For Pythons w/distutils pybsddb
-    from bsddb3 import db, dbshelve, hashopen
-except ImportError:
-    # For the bundled bsddb
-    from bsddb import db, dbshelve, hashopen
-
-try:
-    from bsddb3 import test_support
-except ImportError:
-    from test import support as test_support
+from .test_all import db, dbshelve, hashopen, test_support, get_new_environment_path, get_new_database_path
 
 #----------------------------------------------------------------------
 
 class MiscTestCase(unittest.TestCase):
     def setUp(self):
         self.filename = self.__class__.__name__ + '.db'
-        homeDir = os.path.join(tempfile.gettempdir(), 'db_home%d'%os.getpid())
-        self.homeDir = homeDir
-        try:
-            os.mkdir(homeDir)
-        except OSError:
-            pass
+        self.homeDir = get_new_environment_path()
 
     def tearDown(self):
         test_support.unlink(self.filename)
@@ -38,14 +20,18 @@
     def test01_badpointer(self):
         dbs = dbshelve.open(self.filename)
         dbs.close()
-        self.assertRaises(db.DBError, dbs.get, b"foo")
+        self.assertRaises(db.DBError, dbs.get, "foo")
 
     def test02_db_home(self):
         env = db.DBEnv()
         # check for crash fixed when db_home is used before open()
-        assert env.db_home is None
+        self.assert_(env.db_home is None)
         env.open(self.homeDir, db.DB_CREATE)
-        assert self.homeDir == env.db_home
+        import sys
+        if sys.version_info[0] < 3 :
+            self.assertEqual(self.homeDir, env.db_home)
+        else :
+            self.assertEqual(bytes(self.homeDir, "ascii"), env.db_home)
 
     def test03_repr_closed_db(self):
         db = hashopen(self.filename)
@@ -53,6 +39,18 @@
         rp = repr(db)
         self.assertEquals(rp, "{}")
 
+    def test04_repr_db(self) :
+        db = hashopen(self.filename)
+        d = {}
+        for i in range(100) :
+            db[repr(i)] = repr(100*i)
+            d[repr(i)] = repr(100*i)
+        db.close()
+        db = hashopen(self.filename)
+        rp = repr(db)
+        self.assertEquals(rp, repr(d))
+        db.close()
+
     # http://sourceforge.net/tracker/index.php?func=detail&aid=1708868&group_id=13900&atid=313900
     #
     # See the bug report for details.
@@ -60,65 +58,65 @@
     # The problem was that make_key_dbt() was not allocating a copy of
     # string keys but FREE_DBT() was always being told to free it when the
     # database was opened with DB_THREAD.
-    def test04_double_free_make_key_dbt(self):
+    def test05_double_free_make_key_dbt(self):
         try:
             db1 = db.DB()
             db1.open(self.filename, None, db.DB_BTREE,
                      db.DB_CREATE | db.DB_THREAD)
 
             curs = db1.cursor()
-            t = curs.get(b"/foo", db.DB_SET)
+            t = curs.get("/foo", db.DB_SET)
             # double free happened during exit from DBC_get
         finally:
             db1.close()
-            os.unlink(self.filename)
+            test_support.unlink(self.filename)
 
-    def test05_key_with_null_bytes(self):
+    def test06_key_with_null_bytes(self):
         try:
             db1 = db.DB()
             db1.open(self.filename, None, db.DB_HASH, db.DB_CREATE)
-            db1[b'a'] = b'eh?'
-            db1[b'a\x00'] = b'eh zed.'
-            db1[b'a\x00a'] = b'eh zed eh?'
-            db1[b'aaa'] = b'eh eh eh!'
-            keys = db1.keys()
+            db1['a'] = 'eh?'
+            db1['a\x00'] = 'eh zed.'
+            db1['a\x00a'] = 'eh zed eh?'
+            db1['aaa'] = 'eh eh eh!'
+            keys = list(db1.keys())
             keys.sort()
-            self.assertEqual([b'a', b'a\x00', b'a\x00a', b'aaa'], keys)
-            self.assertEqual(db1[b'a'], b'eh?')
-            self.assertEqual(db1[b'a\x00'], b'eh zed.')
-            self.assertEqual(db1[b'a\x00a'], b'eh zed eh?')
-            self.assertEqual(db1[b'aaa'], b'eh eh eh!')
+            self.assertEqual(['a', 'a\x00', 'a\x00a', 'aaa'], keys)
+            self.assertEqual(db1['a'], 'eh?')
+            self.assertEqual(db1['a\x00'], 'eh zed.')
+            self.assertEqual(db1['a\x00a'], 'eh zed eh?')
+            self.assertEqual(db1['aaa'], 'eh eh eh!')
         finally:
             db1.close()
-            os.unlink(self.filename)
+            test_support.unlink(self.filename)
 
-    def test_DB_set_flags_persists(self):
+    def test07_DB_set_flags_persists(self):
         if db.version() < (4,2):
             # The get_flags API required for this to work is only available
-            # in BerkeleyDB >= 4.2
+            # in Berkeley DB >= 4.2
             return
         try:
             db1 = db.DB()
             db1.set_flags(db.DB_DUPSORT)
             db1.open(self.filename, db.DB_HASH, db.DB_CREATE)
-            db1[b'a'] = b'eh'
-            db1[b'a'] = b'A'
-            self.assertEqual([(b'a', b'A')], db1.items())
-            db1.put(b'a', b'Aa')
-            self.assertEqual([(b'a', b'A'), (b'a', b'Aa')], db1.items())
+            db1['a'] = 'eh'
+            db1['a'] = 'A'
+            self.assertEqual([('a', 'A')], list(db1.items()))
+            db1.put('a', 'Aa')
+            self.assertEqual([('a', 'A'), ('a', 'Aa')], list(db1.items()))
             db1.close()
             db1 = db.DB()
             # no set_flags call, we're testing that it reads and obeys
             # the flags on open.
             db1.open(self.filename, db.DB_HASH)
-            self.assertEqual([(b'a', b'A'), (b'a', b'Aa')], db1.items())
+            self.assertEqual([('a', 'A'), ('a', 'Aa')], list(db1.items()))
             # if it read the flags right this will replace all values
-            # for key b'a' instead of adding a new one.  (as a dict should)
-            db1[b'a'] = b'new A'
-            self.assertEqual([(b'a', b'new A')], db1.items())
+            # for key 'a' instead of adding a new one.  (as a dict should)
+            db1['a'] = 'new A'
+            self.assertEqual([('a', 'new A')], list(db1.items()))
         finally:
             db1.close()
-            os.unlink(self.filename)
+            test_support.unlink(self.filename)
 
 
 #----------------------------------------------------------------------

Modified: python/branches/py3k/Lib/bsddb/test/test_pickle.py
==============================================================================
--- python/branches/py3k/Lib/bsddb/test/test_pickle.py	(original)
+++ python/branches/py3k/Lib/bsddb/test/test_pickle.py	Sun Aug 31 16:12:11 2008
@@ -1,23 +1,13 @@
 
-import shutil
-import sys, os
+import os
 import pickle
-import tempfile
-import unittest
-import tempfile
-
-try:
-    # For Pythons w/distutils pybsddb
-    from bsddb3 import db
-except ImportError as e:
-    # For Python 2.3
-    from bsddb import db
-
 try:
-    from bsddb3 import test_support
+    import pickle
 except ImportError:
-    from test import support as test_support
+    pickle = None
+import unittest
 
+from .test_all import db, test_support, get_new_environment_path, get_new_database_path
 
 #----------------------------------------------------------------------
 
@@ -26,10 +16,7 @@
     db_name = 'test-dbobj.db'
 
     def setUp(self):
-        homeDir = os.path.join(tempfile.gettempdir(), 'db_home%d'%os.getpid())
-        self.homeDir = homeDir
-        try: os.mkdir(homeDir)
-        except os.error: pass
+        self.homeDir = get_new_environment_path()
 
     def tearDown(self):
         if hasattr(self, 'db'):
@@ -43,10 +30,10 @@
         self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL)
         self.db = db.DB(self.env)
         self.db.open(self.db_name, db.DB_HASH, db.DB_CREATE)
-        self.db.put(b'spam', b'eggs')
-        self.assertEqual(self.db[b'spam'], b'eggs')
+        self.db.put('spam', 'eggs')
+        self.assertEqual(self.db['spam'], 'eggs')
         try:
-            self.db.put(b'spam', b'ham', flags=db.DB_NOOVERWRITE)
+            self.db.put('spam', 'ham', flags=db.DB_NOOVERWRITE)
         except db.DBError as egg:
             pickledEgg = pickle.dumps(egg)
             #print repr(pickledEgg)
@@ -54,7 +41,7 @@
             if rottenEgg.args != egg.args or type(rottenEgg) != type(egg):
                 raise Exception(rottenEgg, '!=', egg)
         else:
-            self.fail("where's my DBError exception?!?")
+            raise Exception("where's my DBError exception?!?")
 
         self.db.close()
         self.env.close()
@@ -62,6 +49,10 @@
     def test01_pickle_DBError(self):
         self._base_test_pickle_DBError(pickle=pickle)
 
+    if pickle:
+        def test02_cPickle_DBError(self):
+            self._base_test_pickle_DBError(pickle=pickle)
+
 #----------------------------------------------------------------------
 
 def test_suite():

Modified: python/branches/py3k/Lib/bsddb/test/test_queue.py
==============================================================================
--- python/branches/py3k/Lib/bsddb/test/test_queue.py	(original)
+++ python/branches/py3k/Lib/bsddb/test/test_queue.py	Sun Aug 31 16:12:11 2008
@@ -2,27 +2,17 @@
 TestCases for exercising a Queue DB.
 """
 
-import sys, os, string
-import tempfile
+import os, string
 from pprint import pprint
 import unittest
 
-try:
-    # For Pythons w/distutils pybsddb
-    from bsddb3 import db
-except ImportError:
-    # For Python 2.3
-    from bsddb import db
-
-from bsddb.test.test_all import verbose
-
-letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
+from .test_all import db, verbose, get_new_database_path
 
 #----------------------------------------------------------------------
 
 class SimpleQueueTestCase(unittest.TestCase):
     def setUp(self):
-        self.filename = tempfile.mktemp()
+        self.filename = get_new_database_path()
 
     def tearDown(self):
         try:
@@ -46,17 +36,17 @@
             print("before appends" + '-' * 30)
             pprint(d.stat())
 
-        for x in letters:
-            d.append(x.encode('ascii') * 40)
+        for x in string.letters:
+            d.append(x * 40)
 
-        assert len(d) == 52
+        self.assertEqual(len(d), len(string.letters))
 
-        d.put(100, b"some more data")
-        d.put(101, b"and some more ")
-        d.put(75,  b"out of order")
-        d.put(1,   b"replacement data")
+        d.put(100, "some more data")
+        d.put(101, "and some more ")
+        d.put(75,  "out of order")
+        d.put(1,   "replacement data")
 
-        assert len(d) == 55
+        self.assertEqual(len(d), len(string.letters)+3)
 
         if verbose:
             print("before close" + '-' * 30)
@@ -71,7 +61,11 @@
             print("after open" + '-' * 30)
             pprint(d.stat())
 
-        d.append(b"one more")
+        # Test "txn" as a positional parameter
+        d.append("one more", None)
+        # Test "txn" as a keyword parameter
+        d.append("another one", txn=None)
+
         c = d.cursor()
 
         if verbose:
@@ -89,9 +83,9 @@
             print("after consume loop" + '-' * 30)
             pprint(d.stat())
 
-        assert len(d) == 0, \
+        self.assertEqual(len(d), 0, \
                "if you see this message then you need to rebuild " \
-               "BerkeleyDB 3.1.17 with the patch in patches/qam_stat.diff"
+               "Berkeley DB 3.1.17 with the patch in patches/qam_stat.diff")
 
         d.close()
 
@@ -118,17 +112,17 @@
             print("before appends" + '-' * 30)
             pprint(d.stat())
 
-        for x in letters:
-            d.append(x.encode('ascii') * 40)
+        for x in string.letters:
+            d.append(x * 40)
 
-        assert len(d) == 52
+        self.assertEqual(len(d), len(string.letters))
 
-        d.put(100, b"some more data")
-        d.put(101, b"and some more ")
-        d.put(75,  b"out of order")
-        d.put(1,   b"replacement data")
+        d.put(100, "some more data")
+        d.put(101, "and some more ")
+        d.put(75,  "out of order")
+        d.put(1,   "replacement data")
 
-        assert len(d) == 55
+        self.assertEqual(len(d), len(string.letters)+3)
 
         if verbose:
             print("before close" + '-' * 30)
@@ -144,7 +138,7 @@
             print("after open" + '-' * 30)
             pprint(d.stat())
 
-        d.append(b"one more")
+        d.append("one more")
 
         if verbose:
             print("after append" + '-' * 30)

Modified: python/branches/py3k/Lib/bsddb/test/test_recno.py
==============================================================================
--- python/branches/py3k/Lib/bsddb/test/test_recno.py	(original)
+++ python/branches/py3k/Lib/bsddb/test/test_recno.py	Sun Aug 31 16:12:11 2008
@@ -2,26 +2,11 @@
 """
 
 import os
-import shutil
-import sys
 import errno
-import tempfile
 from pprint import pprint
 import unittest
 
-from bsddb.test.test_all import verbose
-
-try:
-    # For Pythons w/distutils pybsddb
-    from bsddb3 import db
-except ImportError:
-    # For Python 2.3
-    from bsddb import db
-
-try:
-    from bsddb3 import test_support
-except ImportError:
-    from test import support as test_support
+from .test_all import db, test_support, verbose, get_new_environment_path, get_new_database_path
 
 letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
 
@@ -29,8 +14,13 @@
 #----------------------------------------------------------------------
 
 class SimpleRecnoTestCase(unittest.TestCase):
+    import sys
+    if sys.version_info[:3] < (2, 4, 0):
+        def assertFalse(self, expr, msg=None):
+            self.failIf(expr,msg=msg)
+
     def setUp(self):
-        self.filename = tempfile.mktemp()
+        self.filename = get_new_database_path()
         self.homeDir = None
 
     def tearDown(self):
@@ -47,9 +37,9 @@
         d.open(self.filename, db.DB_RECNO, db.DB_CREATE)
 
         for x in letters:
-            recno = d.append(x.encode('ascii') * 60)
-            assert type(recno) == type(0)
-            assert recno >= 1
+            recno = d.append(x * 60)
+            self.assertEqual(type(recno), type(0))
+            self.assert_(recno >= 1)
             if verbose:
                 print(recno, end=' ')
 
@@ -64,20 +54,24 @@
             if verbose:
                 print(data)
 
-            assert type(data) == bytes
-            assert data == d.get(recno)
+            self.assertEqual(type(data), type(""))
+            self.assertEqual(data, d.get(recno))
 
         try:
             data = d[0]  # This should raise a KeyError!?!?!
         except db.DBInvalidArgError as val:
-            assert val.args[0] == db.EINVAL
+            import sys
+            if sys.version_info[0] < 3 :
+                self.assertEqual(val[0], db.EINVAL)
+            else :
+                self.assertEqual(val.args[0], db.EINVAL)
             if verbose: print(val)
         else:
             self.fail("expected exception")
 
         # test that has_key raises DB exceptions (fixed in pybsddb 4.3.2)
         try:
-            d.has_key(0)
+            0 in d
         except db.DBError as val:
             pass
         else:
@@ -96,35 +90,35 @@
             if get_returns_none:
                 self.fail("unexpected exception")
         else:
-            assert data == None
+            self.assertEqual(data, None)
 
-        keys = d.keys()
+        keys = list(d.keys())
         if verbose:
             print(keys)
-        assert type(keys) == type([])
-        assert type(keys[0]) == type(123)
-        assert len(keys) == len(d)
+        self.assertEqual(type(keys), type([]))
+        self.assertEqual(type(keys[0]), type(123))
+        self.assertEqual(len(keys), len(d))
 
-        items = d.items()
+        items = list(d.items())
         if verbose:
             pprint(items)
-        assert type(items) == type([])
-        assert type(items[0]) == type(())
-        assert len(items[0]) == 2
-        assert type(items[0][0]) == type(123)
-        assert type(items[0][1]) == bytes
-        assert len(items) == len(d)
+        self.assertEqual(type(items), type([]))
+        self.assertEqual(type(items[0]), type(()))
+        self.assertEqual(len(items[0]), 2)
+        self.assertEqual(type(items[0][0]), type(123))
+        self.assertEqual(type(items[0][1]), type(""))
+        self.assertEqual(len(items), len(d))
 
-        assert d.has_key(25)
+        self.assert_(25 in d)
 
         del d[25]
-        assert not d.has_key(25)
+        self.assertFalse(25 in d)
 
         d.delete(13)
-        assert not d.has_key(13)
+        self.assertFalse(13 in d)
 
-        data = d.get_both(26, b"z" * 60)
-        assert data == b"z" * 60, 'was %r' % data
+        data = d.get_both(26, "z" * 60)
+        self.assertEqual(data, "z" * 60, 'was %r' % data)
         if verbose:
             print(data)
 
@@ -137,18 +131,18 @@
         while rec:
             if verbose:
                 print(rec)
-            rec = c.next()
+            rec = next(c)
 
         c.set(50)
         rec = c.current()
         if verbose:
             print(rec)
 
-        c.put(-1, b"a replacement record", db.DB_CURRENT)
+        c.put(-1, "a replacement record", db.DB_CURRENT)
 
         c.set(50)
         rec = c.current()
-        assert rec == (50, b"a replacement record")
+        self.assertEqual(rec, (50, "a replacement record"))
         if verbose:
             print(rec)
 
@@ -159,7 +153,7 @@
         # test that non-existant key lookups work (and that
         # DBC_set_range doesn't have a memleak under valgrind)
         rec = c.set_range(999999)
-        assert rec == None
+        self.assertEqual(rec, None)
         if verbose:
             print(rec)
 
@@ -171,8 +165,8 @@
         c = d.cursor()
 
         # put a record beyond the consecutive end of the recno's
-        d[100] = b"way out there"
-        assert d[100] == b"way out there"
+        d[100] = "way out there"
+        self.assertEqual(d[100], "way out there")
 
         try:
             data = d[99]
@@ -187,7 +181,7 @@
             if get_returns_none:
                 self.fail("unexpected DBKeyEmptyError exception")
             else:
-                assert val.args[0] == db.DB_KEYEMPTY
+                self.assertEqual(val[0], db.DB_KEYEMPTY)
                 if verbose: print(val)
         else:
             if not get_returns_none:
@@ -197,7 +191,7 @@
         while rec:
             if verbose:
                 print(rec)
-            rec = c.next()
+            rec = next(c)
 
         c.close()
         d.close()
@@ -209,7 +203,7 @@
         just a line in the file, but you can set a different record delimiter
         if needed.
         """
-        homeDir = os.path.join(tempfile.gettempdir(), 'db_home%d'%os.getpid())
+        homeDir = get_new_environment_path()
         self.homeDir = homeDir
         source = os.path.join(homeDir, 'test_recno.txt')
         if not os.path.isdir(homeDir):
@@ -226,7 +220,7 @@
 
         data = "The quick brown fox jumped over the lazy dog".split()
         for datum in data:
-            d.append(datum.encode('ascii'))
+            d.append(datum)
         d.sync()
         d.close()
 
@@ -238,15 +232,15 @@
             print(data)
             print(text.split('\n'))
 
-        assert text.split('\n') == data
+        self.assertEqual(text.split('\n'), data)
 
         # open as a DB again
         d = db.DB()
         d.set_re_source(source)
         d.open(self.filename, db.DB_RECNO)
 
-        d[3] = b'reddish-brown'
-        d[8] = b'comatose'
+        d[3] = 'reddish-brown'
+        d[8] = 'comatose'
 
         d.sync()
         d.close()
@@ -257,8 +251,8 @@
             print(text)
             print(text.split('\n'))
 
-        assert text.split('\n') == \
-             "The quick reddish-brown fox jumped over the comatose dog".split()
+        self.assertEqual(text.split('\n'),
+           "The quick reddish-brown fox jumped over the comatose dog".split())
 
     def test03_FixedLength(self):
         d = db.DB()
@@ -268,14 +262,18 @@
         d.open(self.filename, db.DB_RECNO, db.DB_CREATE)
 
         for x in letters:
-            d.append(x.encode('ascii') * 35)    # These will be padded
+            d.append(x * 35)    # These will be padded
 
-        d.append(b'.' * 40)      # this one will be exact
+        d.append('.' * 40)      # this one will be exact
 
         try:                    # this one will fail
-            d.append(b'bad' * 20)
+            d.append('bad' * 20)
         except db.DBInvalidArgError as val:
-            assert val.args[0] == db.EINVAL
+            import sys
+            if sys.version_info[0] < 3 :
+                self.assertEqual(val[0], db.EINVAL)
+            else :
+                self.assertEqual(val.args[0], db.EINVAL)
             if verbose: print(val)
         else:
             self.fail("expected exception")
@@ -285,7 +283,7 @@
         while rec:
             if verbose:
                 print(rec)
-            rec = c.next()
+            rec = next(c)
 
         c.close()
         d.close()

Added: python/branches/py3k/Lib/bsddb/test/test_replication.py
==============================================================================
--- (empty file)
+++ python/branches/py3k/Lib/bsddb/test/test_replication.py	Sun Aug 31 16:12:11 2008
@@ -0,0 +1,444 @@
+"""TestCases for distributed transactions.
+"""
+
+import os
+import time
+import unittest
+
+from .test_all import db, test_support, have_threads, verbose, \
+        get_new_environment_path, get_new_database_path
+
+
+#----------------------------------------------------------------------
+
+class DBReplicationManager(unittest.TestCase):
+    import sys
+    if sys.version_info[:3] < (2, 4, 0):
+        def assertTrue(self, expr, msg=None):
+            self.failUnless(expr,msg=msg)
+
+    def setUp(self) :
+        self.homeDirMaster = get_new_environment_path()
+        self.homeDirClient = get_new_environment_path()
+
+        self.dbenvMaster = db.DBEnv()
+        self.dbenvClient = db.DBEnv()
+
+        # Must use "DB_THREAD" because the Replication Manager will
+        # be executed in other threads but will use the same environment.
+        # http://forums.oracle.com/forums/thread.jspa?threadID=645788&tstart=0
+        self.dbenvMaster.open(self.homeDirMaster, db.DB_CREATE | db.DB_INIT_TXN
+                | db.DB_INIT_LOG | db.DB_INIT_MPOOL | db.DB_INIT_LOCK |
+                db.DB_INIT_REP | db.DB_RECOVER | db.DB_THREAD, 0o666)
+        self.dbenvClient.open(self.homeDirClient, db.DB_CREATE | db.DB_INIT_TXN
+                | db.DB_INIT_LOG | db.DB_INIT_MPOOL | db.DB_INIT_LOCK |
+                db.DB_INIT_REP | db.DB_RECOVER | db.DB_THREAD, 0o666)
+
+        self.confirmed_master=self.client_startupdone=False
+        def confirmed_master(a,b,c) :
+            if b==db.DB_EVENT_REP_MASTER :
+                self.confirmed_master=True
+
+        def client_startupdone(a,b,c) :
+            if b==db.DB_EVENT_REP_STARTUPDONE :
+                self.client_startupdone=True
+
+        self.dbenvMaster.set_event_notify(confirmed_master)
+        self.dbenvClient.set_event_notify(client_startupdone)
+
+        #self.dbenvMaster.set_verbose(db.DB_VERB_REPLICATION, True)
+        #self.dbenvMaster.set_verbose(db.DB_VERB_FILEOPS_ALL, True)
+        #self.dbenvClient.set_verbose(db.DB_VERB_REPLICATION, True)
+        #self.dbenvClient.set_verbose(db.DB_VERB_FILEOPS_ALL, True)
+
+        self.dbMaster = self.dbClient = None
+
+
+    def tearDown(self):
+        if self.dbClient :
+            self.dbClient.close()
+        if self.dbMaster :
+            self.dbMaster.close()
+        self.dbenvClient.close()
+        self.dbenvMaster.close()
+        test_support.rmtree(self.homeDirClient)
+        test_support.rmtree(self.homeDirMaster)
+
+    def test01_basic_replication(self) :
+        master_port = test_support.find_unused_port()
+        self.dbenvMaster.repmgr_set_local_site("127.0.0.1", master_port)
+        client_port = test_support.find_unused_port()
+        self.dbenvClient.repmgr_set_local_site("127.0.0.1", client_port)
+        self.dbenvMaster.repmgr_add_remote_site("127.0.0.1", client_port)
+        self.dbenvClient.repmgr_add_remote_site("127.0.0.1", master_port)
+        self.dbenvMaster.rep_set_nsites(2)
+        self.dbenvClient.rep_set_nsites(2)
+        self.dbenvMaster.rep_set_priority(10)
+        self.dbenvClient.rep_set_priority(0)
+
+        self.dbenvMaster.rep_set_timeout(db.DB_REP_CONNECTION_RETRY,100123)
+        self.dbenvClient.rep_set_timeout(db.DB_REP_CONNECTION_RETRY,100321)
+        self.assertEquals(self.dbenvMaster.rep_get_timeout(
+            db.DB_REP_CONNECTION_RETRY), 100123)
+        self.assertEquals(self.dbenvClient.rep_get_timeout(
+            db.DB_REP_CONNECTION_RETRY), 100321)
+
+        self.dbenvMaster.rep_set_timeout(db.DB_REP_ELECTION_TIMEOUT, 100234)
+        self.dbenvClient.rep_set_timeout(db.DB_REP_ELECTION_TIMEOUT, 100432)
+        self.assertEquals(self.dbenvMaster.rep_get_timeout(
+            db.DB_REP_ELECTION_TIMEOUT), 100234)
+        self.assertEquals(self.dbenvClient.rep_get_timeout(
+            db.DB_REP_ELECTION_TIMEOUT), 100432)
+
+        self.dbenvMaster.rep_set_timeout(db.DB_REP_ELECTION_RETRY, 100345)
+        self.dbenvClient.rep_set_timeout(db.DB_REP_ELECTION_RETRY, 100543)
+        self.assertEquals(self.dbenvMaster.rep_get_timeout(
+            db.DB_REP_ELECTION_RETRY), 100345)
+        self.assertEquals(self.dbenvClient.rep_get_timeout(
+            db.DB_REP_ELECTION_RETRY), 100543)
+
+        self.dbenvMaster.repmgr_set_ack_policy(db.DB_REPMGR_ACKS_ALL)
+        self.dbenvClient.repmgr_set_ack_policy(db.DB_REPMGR_ACKS_ALL)
+
+        self.dbenvMaster.repmgr_start(1, db.DB_REP_MASTER);
+        self.dbenvClient.repmgr_start(1, db.DB_REP_CLIENT);
+
+        self.assertEquals(self.dbenvMaster.rep_get_nsites(),2)
+        self.assertEquals(self.dbenvClient.rep_get_nsites(),2)
+        self.assertEquals(self.dbenvMaster.rep_get_priority(),10)
+        self.assertEquals(self.dbenvClient.rep_get_priority(),0)
+        self.assertEquals(self.dbenvMaster.repmgr_get_ack_policy(),
+                db.DB_REPMGR_ACKS_ALL)
+        self.assertEquals(self.dbenvClient.repmgr_get_ack_policy(),
+                db.DB_REPMGR_ACKS_ALL)
+
+        # The timeout is necessary in BDB 4.5, since DB_EVENT_REP_STARTUPDONE
+        # is not generated if the master has no new transactions.
+        # This is solved in BDB 4.6 (#15542).
+        import time
+        timeout = time.time()+10
+        while (time.time()<timeout) and not (self.confirmed_master and self.client_startupdone) :
+            time.sleep(0.02)
+        self.assertTrue(time.time()<timeout)
+
+        d = self.dbenvMaster.repmgr_site_list()
+        self.assertEquals(len(d), 1)
+        self.assertEquals(d[0][0], "127.0.0.1")
+        self.assertEquals(d[0][1], client_port)
+        self.assertTrue((d[0][2]==db.DB_REPMGR_CONNECTED) or \
+                (d[0][2]==db.DB_REPMGR_DISCONNECTED))
+
+        d = self.dbenvClient.repmgr_site_list()
+        self.assertEquals(len(d), 1)
+        self.assertEquals(d[0][0], "127.0.0.1")
+        self.assertEquals(d[0][1], master_port)
+        self.assertTrue((d[0][2]==db.DB_REPMGR_CONNECTED) or \
+                (d[0][2]==db.DB_REPMGR_DISCONNECTED))
+
+        if db.version() >= (4,6) :
+            d = self.dbenvMaster.repmgr_stat(flags=db.DB_STAT_CLEAR);
+            self.assertTrue("msgs_queued" in d)
+
+        self.dbMaster=db.DB(self.dbenvMaster)
+        txn=self.dbenvMaster.txn_begin()
+        self.dbMaster.open("test", db.DB_HASH, db.DB_CREATE, 0o666, txn=txn)
+        txn.commit()
+
+        import time,os.path
+        timeout=time.time()+10
+        while (time.time()<timeout) and \
+          not (os.path.exists(os.path.join(self.homeDirClient,"test"))) :
+            time.sleep(0.01)
+
+        self.dbClient=db.DB(self.dbenvClient)
+        while True :
+            txn=self.dbenvClient.txn_begin()
+            try :
+                self.dbClient.open("test", db.DB_HASH, flags=db.DB_RDONLY,
+                        mode=0o666, txn=txn)
+            except db.DBRepHandleDeadError :
+                txn.abort()
+                self.dbClient.close()
+                self.dbClient=db.DB(self.dbenvClient)
+                continue
+
+            txn.commit()
+            break
+
+        txn=self.dbenvMaster.txn_begin()
+        self.dbMaster.put("ABC", "123", txn=txn)
+        txn.commit()
+        import time
+        timeout=time.time()+10
+        v=None
+        while (time.time()<timeout) and (v==None) :
+            txn=self.dbenvClient.txn_begin()
+            v=self.dbClient.get("ABC", txn=txn)
+            txn.commit()
+            if v==None :
+                time.sleep(0.02)
+        self.assertTrue(time.time()<timeout)
+        self.assertEquals("123", v)
+
+        txn=self.dbenvMaster.txn_begin()
+        self.dbMaster.delete("ABC", txn=txn)
+        txn.commit()
+        timeout=time.time()+10
+        while (time.time()<timeout) and (v!=None) :
+            txn=self.dbenvClient.txn_begin()
+            v=self.dbClient.get("ABC", txn=txn)
+            txn.commit()
+            if v==None :
+                time.sleep(0.02)
+        self.assertTrue(time.time()<timeout)
+        self.assertEquals(None, v)
+
+class DBBaseReplication(DBReplicationManager):
+    def setUp(self) :
+        DBReplicationManager.setUp(self)
+        def confirmed_master(a,b,c) :
+            if (b == db.DB_EVENT_REP_MASTER) or (b == db.DB_EVENT_REP_ELECTED) :
+                self.confirmed_master = True
+
+        def client_startupdone(a,b,c) :
+            if b == db.DB_EVENT_REP_STARTUPDONE :
+                self.client_startupdone = True
+
+        self.dbenvMaster.set_event_notify(confirmed_master)
+        self.dbenvClient.set_event_notify(client_startupdone)
+
+        import queue
+        self.m2c = queue.Queue()
+        self.c2m = queue.Queue()
+
+        # There are only two nodes, so we don't need to
+        # do any routing decision
+        def m2c(dbenv, control, rec, lsnp, envid, flags) :
+            self.m2c.put((control, rec))
+
+        def c2m(dbenv, control, rec, lsnp, envid, flags) :
+            self.c2m.put((control, rec))
+
+        self.dbenvMaster.rep_set_transport(13,m2c)
+        self.dbenvMaster.rep_set_priority(10)
+        self.dbenvClient.rep_set_transport(3,c2m)
+        self.dbenvClient.rep_set_priority(0)
+
+        self.assertEquals(self.dbenvMaster.rep_get_priority(),10)
+        self.assertEquals(self.dbenvClient.rep_get_priority(),0)
+
+        #self.dbenvMaster.set_verbose(db.DB_VERB_REPLICATION, True)
+        #self.dbenvMaster.set_verbose(db.DB_VERB_FILEOPS_ALL, True)
+        #self.dbenvClient.set_verbose(db.DB_VERB_REPLICATION, True)
+        #self.dbenvClient.set_verbose(db.DB_VERB_FILEOPS_ALL, True)
+
+        def thread_master() :
+            return self.thread_do(self.dbenvMaster, self.c2m, 3,
+                    self.master_doing_election, True)
+
+        def thread_client() :
+            return self.thread_do(self.dbenvClient, self.m2c, 13,
+                    self.client_doing_election, False)
+
+        from threading import Thread
+        t_m=Thread(target=thread_master)
+        t_c=Thread(target=thread_client)
+        import sys
+        if sys.version_info[0] < 3 :
+            t_m.setDaemon(True)
+            t_c.setDaemon(True)
+        else :
+            t_m.daemon = True
+            t_c.daemon = True
+
+        self.t_m = t_m
+        self.t_c = t_c
+
+        self.dbMaster = self.dbClient = None
+
+        self.master_doing_election=[False]
+        self.client_doing_election=[False]
+
+
+    def tearDown(self):
+        if self.dbClient :
+            self.dbClient.close()
+        if self.dbMaster :
+            self.dbMaster.close()
+        self.m2c.put(None)
+        self.c2m.put(None)
+        self.t_m.join()
+        self.t_c.join()
+        self.dbenvClient.close()
+        self.dbenvMaster.close()
+        test_support.rmtree(self.homeDirClient)
+        test_support.rmtree(self.homeDirMaster)
+
+    def basic_rep_threading(self) :
+        self.dbenvMaster.rep_start(flags=db.DB_REP_MASTER)
+        self.dbenvClient.rep_start(flags=db.DB_REP_CLIENT)
+
+        def thread_do(env, q, envid, election_status, must_be_master) :
+            while True :
+                v=q.get()
+                if v == None : return
+                env.rep_process_message(v[0], v[1], envid)
+
+        self.thread_do = thread_do
+
+        self.t_m.start()
+        self.t_c.start()
+
+    def test01_basic_replication(self) :
+        self.basic_rep_threading()
+
+        # The timeout is necessary in BDB 4.5, since DB_EVENT_REP_STARTUPDONE
+        # is not generated if the master has no new transactions.
+        # This is solved in BDB 4.6 (#15542).
+        import time
+        timeout = time.time()+10
+        while (time.time()<timeout) and not (self.confirmed_master and
+                self.client_startupdone) :
+            time.sleep(0.02)
+        self.assertTrue(time.time()<timeout)
+
+        self.dbMaster=db.DB(self.dbenvMaster)
+        txn=self.dbenvMaster.txn_begin()
+        self.dbMaster.open("test", db.DB_HASH, db.DB_CREATE, 0o666, txn=txn)
+        txn.commit()
+
+        import time,os.path
+        timeout=time.time()+10
+        while (time.time()<timeout) and \
+          not (os.path.exists(os.path.join(self.homeDirClient,"test"))) :
+            time.sleep(0.01)
+
+        self.dbClient=db.DB(self.dbenvClient)
+        while True :
+            txn=self.dbenvClient.txn_begin()
+            try :
+                self.dbClient.open("test", db.DB_HASH, flags=db.DB_RDONLY,
+                        mode=0o666, txn=txn)
+            except db.DBRepHandleDeadError :
+                txn.abort()
+                self.dbClient.close()
+                self.dbClient=db.DB(self.dbenvClient)
+                continue
+
+            txn.commit()
+            break
+
+        txn=self.dbenvMaster.txn_begin()
+        self.dbMaster.put("ABC", "123", txn=txn)
+        txn.commit()
+        import time
+        timeout=time.time()+10
+        v=None
+        while (time.time()<timeout) and (v==None) :
+            txn=self.dbenvClient.txn_begin()
+            v=self.dbClient.get("ABC", txn=txn)
+            txn.commit()
+            if v==None :
+                time.sleep(0.02)
+        self.assertTrue(time.time()<timeout)
+        self.assertEquals("123", v)
+
+        txn=self.dbenvMaster.txn_begin()
+        self.dbMaster.delete("ABC", txn=txn)
+        txn.commit()
+        timeout=time.time()+10
+        while (time.time()<timeout) and (v!=None) :
+            txn=self.dbenvClient.txn_begin()
+            v=self.dbClient.get("ABC", txn=txn)
+            txn.commit()
+            if v==None :
+                time.sleep(0.02)
+        self.assertTrue(time.time()<timeout)
+        self.assertEquals(None, v)
+
+    if db.version() >= (4,7) :
+        def test02_test_request(self) :
+            self.basic_rep_threading()
+            (minimum, maximum) = self.dbenvClient.rep_get_request()
+            self.dbenvClient.rep_set_request(minimum-1, maximum+1)
+            self.assertEqual(self.dbenvClient.rep_get_request(),
+                    (minimum-1, maximum+1))
+
+    if db.version() >= (4,6) :
+        def test03_master_election(self) :
+            # Get ready to hold an election
+            #self.dbenvMaster.rep_start(flags=db.DB_REP_MASTER)
+            self.dbenvMaster.rep_start(flags=db.DB_REP_CLIENT)
+            self.dbenvClient.rep_start(flags=db.DB_REP_CLIENT)
+
+            def thread_do(env, q, envid, election_status, must_be_master) :
+                while True :
+                    v=q.get()
+                    if v == None : return
+                    r = env.rep_process_message(v[0],v[1],envid)
+                    if must_be_master and self.confirmed_master :
+                        self.dbenvMaster.rep_start(flags = db.DB_REP_MASTER)
+                        must_be_master = False
+
+                    if r[0] == db.DB_REP_HOLDELECTION :
+                        def elect() :
+                            while True :
+                                try :
+                                    env.rep_elect(2, 1)
+                                    election_status[0] = False
+                                    break
+                                except db.DBRepUnavailError :
+                                    pass
+                        if not election_status[0] and not self.confirmed_master :
+                            from threading import Thread
+                            election_status[0] = True
+                            t=Thread(target=elect)
+                            import sys
+                            if sys.version_info[0] < 3 :
+                                t.setDaemon(True)
+                            else :
+                                t.daemon = True
+                            t.start()
+
+            self.thread_do = thread_do
+
+            self.t_m.start()
+            self.t_c.start()
+
+            self.dbenvMaster.rep_set_timeout(db.DB_REP_ELECTION_TIMEOUT, 50000)
+            self.dbenvClient.rep_set_timeout(db.DB_REP_ELECTION_TIMEOUT, 50000)
+            self.client_doing_election[0] = True
+            while True :
+                try :
+                    self.dbenvClient.rep_elect(2, 1)
+                    self.client_doing_election[0] = False
+                    break
+                except db.DBRepUnavailError :
+                    pass
+
+            self.assertTrue(self.confirmed_master)
+
+#----------------------------------------------------------------------
+
+def test_suite():
+    suite = unittest.TestSuite()
+    if db.version() >= (4, 6) :
+        dbenv = db.DBEnv()
+        try :
+            dbenv.repmgr_get_ack_policy()
+            ReplicationManager_available=True
+        except :
+            ReplicationManager_available=False
+        dbenv.close()
+        del dbenv
+        if ReplicationManager_available :
+            suite.addTest(unittest.makeSuite(DBReplicationManager))
+
+        if have_threads :
+            suite.addTest(unittest.makeSuite(DBBaseReplication))
+
+    return suite
+
+
+if __name__ == '__main__':
+    unittest.main(defaultTest='test_suite')

Modified: python/branches/py3k/Lib/bsddb/test/test_sequence.py
==============================================================================
--- python/branches/py3k/Lib/bsddb/test/test_sequence.py	(original)
+++ python/branches/py3k/Lib/bsddb/test/test_sequence.py	Sun Aug 31 16:12:11 2008
@@ -1,33 +1,19 @@
 import unittest
 import os
-import shutil
-import sys
-import tempfile
-
-try:
-    # For Pythons w/distutils pybsddb
-    from bsddb3 import db
-except ImportError:
-    from bsddb import db
-
-from bsddb.test.test_all import verbose
-try:
-    from bsddb3 import test_support
-except ImportError:
-    from test import support as test_support
+
+from .test_all import db, test_support, get_new_environment_path, get_new_database_path
 
 
 class DBSequenceTest(unittest.TestCase):
+    import sys
+    if sys.version_info[:3] < (2, 4, 0):
+        def assertTrue(self, expr, msg=None):
+            self.failUnless(expr,msg=msg)
+
     def setUp(self):
         self.int_32_max = 0x100000000
-        self.homeDir = os.path.join(tempfile.gettempdir(), 'db_home%d'%os.getpid())
-        try:
-            os.mkdir(self.homeDir)
-        except os.error:
-            pass
-        tempfile.tempdir = self.homeDir
-        self.filename = os.path.split(tempfile.mktemp())[1]
-        tempfile.tempdir = None
+        self.homeDir = get_new_environment_path()
+        self.filename = "test"
 
         self.dbenv = db.DBEnv()
         self.dbenv.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL, 0o666)
@@ -52,39 +38,39 @@
         start_value = 10 * self.int_32_max
         self.assertEqual(0xA00000000, start_value)
         self.assertEquals(None, self.seq.init_value(start_value))
-        self.assertEquals(None, self.seq.open(key=b'id', txn=None, flags=db.DB_CREATE))
+        self.assertEquals(None, self.seq.open(key='id', txn=None, flags=db.DB_CREATE))
         self.assertEquals(start_value, self.seq.get(5))
         self.assertEquals(start_value + 5, self.seq.get())
 
     def test_remove(self):
         self.seq = db.DBSequence(self.d, flags=0)
-        self.assertEquals(None, self.seq.open(key=b'foo', txn=None, flags=db.DB_CREATE))
+        self.assertEquals(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE))
         self.assertEquals(None, self.seq.remove(txn=None, flags=0))
         del self.seq
 
     def test_get_key(self):
         self.seq = db.DBSequence(self.d, flags=0)
-        key = b'foo'
+        key = 'foo'
         self.assertEquals(None, self.seq.open(key=key, txn=None, flags=db.DB_CREATE))
         self.assertEquals(key, self.seq.get_key())
 
     def test_get_dbp(self):
         self.seq = db.DBSequence(self.d, flags=0)
-        self.assertEquals(None, self.seq.open(key=b'foo', txn=None, flags=db.DB_CREATE))
+        self.assertEquals(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE))
         self.assertEquals(self.d, self.seq.get_dbp())
 
     def test_cachesize(self):
         self.seq = db.DBSequence(self.d, flags=0)
         cashe_size = 10
         self.assertEquals(None, self.seq.set_cachesize(cashe_size))
-        self.assertEquals(None, self.seq.open(key=b'foo', txn=None, flags=db.DB_CREATE))
+        self.assertEquals(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE))
         self.assertEquals(cashe_size, self.seq.get_cachesize())
 
     def test_flags(self):
         self.seq = db.DBSequence(self.d, flags=0)
         flag = db.DB_SEQ_WRAP;
         self.assertEquals(None, self.seq.set_flags(flag))
-        self.assertEquals(None, self.seq.open(key=b'foo', txn=None, flags=db.DB_CREATE))
+        self.assertEquals(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE))
         self.assertEquals(flag, self.seq.get_flags() & flag)
 
     def test_range(self):
@@ -92,17 +78,59 @@
         seq_range = (10 * self.int_32_max, 11 * self.int_32_max - 1)
         self.assertEquals(None, self.seq.set_range(seq_range))
         self.seq.init_value(seq_range[0])
-        self.assertEquals(None, self.seq.open(key=b'foo', txn=None, flags=db.DB_CREATE))
+        self.assertEquals(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE))
         self.assertEquals(seq_range, self.seq.get_range())
 
     def test_stat(self):
         self.seq = db.DBSequence(self.d, flags=0)
-        self.assertEquals(None, self.seq.open(key=b'foo', txn=None, flags=db.DB_CREATE))
+        self.assertEquals(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE))
         stat = self.seq.stat()
         for param in ('nowait', 'min', 'max', 'value', 'current',
                       'flags', 'cache_size', 'last_value', 'wait'):
             self.assertTrue(param in stat, "parameter %s isn't in stat info" % param)
 
+    if db.version() >= (4,7) :
+        # This code checks a crash solved in Berkeley DB 4.7
+        def test_stat_crash(self) :
+            d=db.DB()
+            d.open(None,dbtype=db.DB_HASH,flags=db.DB_CREATE)  # In RAM
+            seq = db.DBSequence(d, flags=0)
+
+            self.assertRaises(db.DBNotFoundError, seq.open,
+                    key='id', txn=None, flags=0)
+
+            self.assertRaises(db.DBInvalidArgError, seq.stat)
+
+            d.close()
+
+    def test_64bits(self) :
+        # We don't use both extremes because they are problematic
+        value_plus=(1<<63)-2
+        self.assertEquals(9223372036854775806,value_plus)
+        value_minus=(-1<<63)+1  # Two complement
+        self.assertEquals(-9223372036854775807,value_minus)
+        self.seq = db.DBSequence(self.d, flags=0)
+        self.assertEquals(None, self.seq.init_value(value_plus-1))
+        self.assertEquals(None, self.seq.open(key='id', txn=None,
+            flags=db.DB_CREATE))
+        self.assertEquals(value_plus-1, self.seq.get(1))
+        self.assertEquals(value_plus, self.seq.get(1))
+
+        self.seq.remove(txn=None, flags=0)
+
+        self.seq = db.DBSequence(self.d, flags=0)
+        self.assertEquals(None, self.seq.init_value(value_minus))
+        self.assertEquals(None, self.seq.open(key='id', txn=None,
+            flags=db.DB_CREATE))
+        self.assertEquals(value_minus, self.seq.get(1))
+        self.assertEquals(value_minus+1, self.seq.get(1))
+
+    def test_multiple_close(self):
+        self.seq = db.DBSequence(self.d)
+        self.seq.close()  # You can close a Sequence multiple times
+        self.seq.close()
+        self.seq.close()
+
 def test_suite():
     suite = unittest.TestSuite()
     if db.version() >= (4,3):

Modified: python/branches/py3k/Lib/bsddb/test/test_thread.py
==============================================================================
--- python/branches/py3k/Lib/bsddb/test/test_thread.py	(original)
+++ python/branches/py3k/Lib/bsddb/test/test_thread.py	Sun Aug 31 16:12:11 2008
@@ -5,17 +5,9 @@
 import sys
 import time
 import errno
-import tempfile
-from pprint import pprint
 from random import random
 
-DASH = b'-'
-
-try:
-    from threading import Thread, current_thread
-    have_threads = True
-except ImportError:
-    have_threads = False
+DASH = '-'
 
 try:
     WindowsError
@@ -24,19 +16,16 @@
         pass
 
 import unittest
-from bsddb.test.test_all import verbose
+from .test_all import db, dbutils, test_support, verbose, have_threads, \
+        get_new_environment_path, get_new_database_path
 
-try:
-    # For Pythons w/distutils pybsddb
-    from bsddb3 import db, dbutils
-except ImportError:
-    # For Python 2.3
-    from bsddb import db, dbutils
-
-try:
-    from bsddb3 import test_support
-except ImportError:
-    from test import support as test_support
+if have_threads :
+    from threading import Thread
+    import sys
+    if sys.version_info[0] < 3 :
+        from threading import currentThread
+    else :
+        from threading import current_thread as currentThread
 
 
 #----------------------------------------------------------------------
@@ -47,16 +36,16 @@
     dbsetflags   = 0
     envflags     = 0
 
+    import sys
+    if sys.version_info[:3] < (2, 4, 0):
+        def assertTrue(self, expr, msg=None):
+            self.failUnless(expr,msg=msg)
+
     def setUp(self):
         if verbose:
             dbutils._deadlock_VerboseFile = sys.stdout
 
-        homeDir = os.path.join(tempfile.gettempdir(), 'db_home%d'%os.getpid())
-        self.homeDir = homeDir
-        try:
-            os.mkdir(homeDir)
-        except OSError as e:
-            if e.errno != errno.EEXIST: raise
+        self.homeDir = get_new_environment_path()
         self.env = db.DBEnv()
         self.setEnvOpts()
         self.env.open(self.homeDir, self.envflags | db.DB_CREATE)
@@ -78,33 +67,6 @@
     def makeData(self, key):
         return DASH.join([key] * 5)
 
-    def _writerThread(self, *args, **kwargs):
-        raise RuntimeError("must override this in a subclass")
-
-    def _readerThread(self, *args, **kwargs):
-        raise RuntimeError("must override this in a subclass")
-
-    def writerThread(self, *args, **kwargs):
-        try:
-            self._writerThread(*args, **kwargs)
-        except db.DBLockDeadlockError:
-            if verbose:
-                print(current_thread().name, 'died from', e)
-        else:
-            if verbose:
-                print(current_thread().name, "finished.")
-
-    def readerThread(self, *args, **kwargs):
-        try:
-            self._readerThread(*args, **kwargs)
-        except db.DBLockDeadlockError as e:
-            if verbose:
-                print(current_thread().name, 'died from', e)
-        else:
-            if verbose:
-                print(current_thread().name, "finished.")
-
-
 
 #----------------------------------------------------------------------
 
@@ -121,60 +83,91 @@
             print('\n', '-=' * 30)
             print("Running %s.test01_1WriterMultiReaders..." % \
                   self.__class__.__name__)
-            print('Using:', self.homeDir, self.filename)
 
-        threads = []
-        wt = Thread(target = self.writerThread,
-                    args = (self.d, self.records),
-                    name = 'the writer',
-                    )#verbose = verbose)
-        threads.append(wt)
+        keys=list(range(self.records))
+        import random
+        random.shuffle(keys)
+        records_per_writer=self.records//self.writers
+        readers_per_writer=self.readers//self.writers
+        self.assertEqual(self.records,self.writers*records_per_writer)
+        self.assertEqual(self.readers,self.writers*readers_per_writer)
+        self.assertTrue((records_per_writer%readers_per_writer)==0)
+        readers = []
 
         for x in range(self.readers):
             rt = Thread(target = self.readerThread,
                         args = (self.d, x),
                         name = 'reader %d' % x,
                         )#verbose = verbose)
-            threads.append(rt)
+            import sys
+            if sys.version_info[0] < 3 :
+                rt.setDaemon(True)
+            else :
+                rt.daemon = True
+            readers.append(rt)
 
-        for t in threads:
+        writers=[]
+        for x in range(self.writers):
+            a=keys[records_per_writer*x:records_per_writer*(x+1)]
+            a.sort()  # Generate conflicts
+            b=readers[readers_per_writer*x:readers_per_writer*(x+1)]
+            wt = Thread(target = self.writerThread,
+                        args = (self.d, a, b),
+                        name = 'writer %d' % x,
+                        )#verbose = verbose)
+            writers.append(wt)
+
+        for t in writers:
+            import sys
+            if sys.version_info[0] < 3 :
+                t.setDaemon(True)
+            else :
+                t.daemon = True
             t.start()
-        for t in threads:
+
+        for t in writers:
             t.join()
+        for t in readers:
+            t.join()
+
+    def writerThread(self, d, keys, readers):
+        import sys
+        if sys.version_info[0] < 3 :
+            name = currentThread().getName()
+        else :
+            name = currentThread().name
+
+        if verbose:
+            print("%s: creating records %d - %d" % (name, start, stop))
+
+        count=len(keys)//len(readers)
+        count2=count
+        for x in keys :
+            key = '%04d' % x
+            dbutils.DeadlockWrap(d.put, key, self.makeData(key),
+                                 max_retries=12)
+            if verbose and x % 100 == 0:
+                print("%s: records %d - %d finished" % (name, start, x))
 
-    def _writerThread(self, d, howMany):
-        name = current_thread().name
-        start = 0
-        stop = howMany
-        if verbose:
-            print(name+": creating records", start, "-", stop)
-
-        for x in range(start, stop):
-            key = ('%04d' % x).encode("ascii")
-            d.put(key, self.makeData(key))
-            if verbose and x > start and x % 50 == 0:
-                print(name+": records", start, "-", x, "finished")
+            count2-=1
+            if not count2 :
+                readers.pop().start()
+                count2=count
 
         if verbose:
             print("%s: finished creating records" % name)
 
-##         # Each write-cursor will be exclusive, the only one that can update the DB...
-##         if verbose: print "%s: deleting a few records" % name
-##         c = d.cursor(flags = db.DB_WRITECURSOR)
-##         for x in range(10):
-##             key = int(random() * howMany) + start
-##             key = '%04d' % key
-##             if d.has_key(key):
-##                 c.set(key)
-##                 c.delete()
-
-##         c.close()
-
-    def _readerThread(self, d, readerNum):
-        time.sleep(0.01 * readerNum)
-        name = current_thread().name
+        if verbose:
+            print("%s: thread finished" % name)
 
-        for loop in range(5):
+    def readerThread(self, d, readerNum):
+        import sys
+        if sys.version_info[0] < 3 :
+            name = currentThread().getName()
+        else :
+            name = currentThread().name
+
+        for i in range(5) :
             c = d.cursor()
             count = 0
             rec = c.first()
@@ -182,24 +175,26 @@
                 count += 1
                 key, data = rec
                 self.assertEqual(self.makeData(key), data)
-                rec = c.next()
+                rec = next(c)
             if verbose:
-                print(name+": found", count, "records")
+                print("%s: found %d records" % (name, count))
             c.close()
-            time.sleep(0.05)
+
+        if verbose:
+            print("%s: thread finished" % name)
 
 
 class BTreeConcurrentDataStore(ConcurrentDataStoreBase):
     dbtype  = db.DB_BTREE
-    writers = 1
+    writers = 2
     readers = 10
     records = 1000
 
 
 class HashConcurrentDataStore(ConcurrentDataStoreBase):
     dbtype  = db.DB_HASH
-    writers = 1
-    readers = 0
+    writers = 2
+    readers = 10
     records = 1000
 
 
@@ -208,8 +203,8 @@
 class SimpleThreadedBase(BaseThreadedTestCase):
     dbopenflags = db.DB_THREAD
     envflags    = db.DB_THREAD | db.DB_INIT_MPOOL | db.DB_INIT_LOCK
-    readers = 5
-    writers = 3
+    readers = 10
+    writers = 2
     records = 1000
 
     def setEnvOpts(self):
@@ -220,87 +215,98 @@
             print('\n', '-=' * 30)
             print("Running %s.test02_SimpleLocks..." % self.__class__.__name__)
 
-        threads = []
-        for x in range(self.writers):
-            wt = Thread(target = self.writerThread,
-                        args = (self.d, self.records, x),
-                        name = 'writer %d' % x,
-                        )#verbose = verbose)
-            threads.append(wt)
+
+        keys=list(range(self.records))
+        import random
+        random.shuffle(keys)
+        records_per_writer=self.records//self.writers
+        readers_per_writer=self.readers//self.writers
+        self.assertEqual(self.records,self.writers*records_per_writer)
+        self.assertEqual(self.readers,self.writers*readers_per_writer)
+        self.assertTrue((records_per_writer%readers_per_writer)==0)
+
+        readers = []
         for x in range(self.readers):
             rt = Thread(target = self.readerThread,
                         args = (self.d, x),
                         name = 'reader %d' % x,
                         )#verbose = verbose)
-            threads.append(rt)
+            import sys
+            if sys.version_info[0] < 3 :
+                rt.setDaemon(True)
+            else :
+                rt.daemon = True
+            readers.append(rt)
+
+        writers = []
+        for x in range(self.writers):
+            a=keys[records_per_writer*x:records_per_writer*(x+1)]
+            a.sort()  # Generate conflicts
+            b=readers[readers_per_writer*x:readers_per_writer*(x+1)]
+            wt = Thread(target = self.writerThread,
+                        args = (self.d, a, b),
+                        name = 'writer %d' % x,
+                        )#verbose = verbose)
+            writers.append(wt)
 
-        for t in threads:
+        for t in writers:
+            import sys
+            if sys.version_info[0] < 3 :
+                t.setDaemon(True)
+            else :
+                t.daemon = True
             t.start()
-        for t in threads:
+
+        for t in writers:
+            t.join()
+        for t in readers:
             t.join()
 
-    def _writerThread(self, d, howMany, writerNum):
-        name = current_thread().name
-        start = howMany * writerNum
-        stop = howMany * (writerNum + 1) - 1
+    def writerThread(self, d, keys, readers):
+        import sys
+        if sys.version_info[0] < 3 :
+            name = currentThread().getName()
+        else :
+            name = currentThread().name
         if verbose:
             print("%s: creating records %d - %d" % (name, start, stop))
 
-        # create a bunch of records
-        for x in range(start, stop):
-            key = ('%04d' % x).encode("ascii")
+        count=len(keys)//len(readers)
+        count2=count
+        for x in keys :
+            key = '%04d' % x
             dbutils.DeadlockWrap(d.put, key, self.makeData(key),
-                                 max_retries=20)
+                                 max_retries=12)
 
             if verbose and x % 100 == 0:
                 print("%s: records %d - %d finished" % (name, start, x))
 
-            # do a bit or reading too
-            if random() <= 0.05:
-                for y in range(start, x):
-                    key = ('%04d' % x).encode("ascii")
-                    data = dbutils.DeadlockWrap(d.get, key, max_retries=20)
-                    self.assertEqual(data, self.makeData(key))
-
-        # flush them
-        try:
-            dbutils.DeadlockWrap(d.sync, max_retries=20)
-        except db.DBIncompleteError as val:
-            if verbose:
-                print("could not complete sync()...")
-
-        # read them back, deleting a few
-        for x in range(start, stop):
-            key = ('%04d' % x).encode("ascii")
-            data = dbutils.DeadlockWrap(d.get, key, max_retries=20)
-            if verbose and x % 100 == 0:
-                print("%s: fetched record (%s, %s)" % (name, key, data))
-            self.assertEqual(data, self.makeData(key))
-            if random() <= 0.10:
-                dbutils.DeadlockWrap(d.delete, key, max_retries=20)
-                if verbose:
-                    print("%s: deleted record %s" % (name, key))
+            count2-=1
+            if not count2 :
+                readers.pop().start()
+                count2=count
 
         if verbose:
             print("%s: thread finished" % name)
 
-    def _readerThread(self, d, readerNum):
-        time.sleep(0.01 * readerNum)
-        name = current_thread().name
-
-        for loop in range(5):
-            c = d.cursor()
-            count = 0
-            rec = dbutils.DeadlockWrap(c.first, max_retries=20)
-            while rec:
-                count += 1
-                key, data = rec
-                self.assertEqual(self.makeData(key), data)
-                rec = dbutils.DeadlockWrap(c.next, max_retries=20)
-            if verbose:
-                print("%s: found %d records" % (name, count))
-            c.close()
-            time.sleep(0.05)
+    def readerThread(self, d, readerNum):
+        import sys
+        if sys.version_info[0] < 3 :
+            name = currentThread().getName()
+        else :
+            name = currentThread().name
+
+        c = d.cursor()
+        count = 0
+        rec = dbutils.DeadlockWrap(c.first, max_retries=10)
+        while rec:
+            count += 1
+            key, data = rec
+            self.assertEqual(self.makeData(key), data)
+            rec = dbutils.DeadlockWrap(c.__next__, max_retries=10)
+        if verbose:
+            print("%s: found %d records" % (name, count))
+        c.close()
 
         if verbose:
             print("%s: thread finished" % name)
@@ -340,120 +346,118 @@
             print("Running %s.test03_ThreadedTransactions..." % \
                   self.__class__.__name__)
 
-        threads = []
-        for x in range(self.writers):
-            wt = Thread(target = self.writerThread,
-                        args = (self.d, self.records, x),
-                        name = 'writer %d' % x,
-                        )#verbose = verbose)
-            threads.append(wt)
+        keys=list(range(self.records))
+        import random
+        random.shuffle(keys)
+        records_per_writer=self.records//self.writers
+        readers_per_writer=self.readers//self.writers
+        self.assertEqual(self.records,self.writers*records_per_writer)
+        self.assertEqual(self.readers,self.writers*readers_per_writer)
+        self.assertTrue((records_per_writer%readers_per_writer)==0)
 
+        readers=[]
         for x in range(self.readers):
             rt = Thread(target = self.readerThread,
                         args = (self.d, x),
                         name = 'reader %d' % x,
                         )#verbose = verbose)
-            threads.append(rt)
+            import sys
+            if sys.version_info[0] < 3 :
+                rt.setDaemon(True)
+            else :
+                rt.daemon = True
+            readers.append(rt)
+
+        writers = []
+        for x in range(self.writers):
+            a=keys[records_per_writer*x:records_per_writer*(x+1)]
+            b=readers[readers_per_writer*x:readers_per_writer*(x+1)]
+            wt = Thread(target = self.writerThread,
+                        args = (self.d, a, b),
+                        name = 'writer %d' % x,
+                        )#verbose = verbose)
+            writers.append(wt)
 
         dt = Thread(target = self.deadlockThread)
+        import sys
+        if sys.version_info[0] < 3 :
+            dt.setDaemon(True)
+        else :
+            dt.daemon = True
         dt.start()
 
-        for t in threads:
+        for t in writers:
+            import sys
+            if sys.version_info[0] < 3 :
+                t.setDaemon(True)
+            else :
+                t.daemon = True
             t.start()
-        for t in threads:
+
+        for t in writers:
+            t.join()
+        for t in readers:
             t.join()
 
         self.doLockDetect = False
         dt.join()
 
-    def doWrite(self, d, name, start, stop):
-        finished = False
-        while not finished:
+    def writerThread(self, d, keys, readers):
+        import sys
+        if sys.version_info[0] < 3 :
+            name = currentThread().getName()
+        else :
+            name = currentThread().name
+
+        count=len(keys)//len(readers)
+        while len(keys):
             try:
                 txn = self.env.txn_begin(None, self.txnFlag)
-                for x in range(start, stop):
-                    key = ('%04d' % x).encode("ascii")
+                keys2=keys[:count]
+                for x in keys2 :
+                    key = '%04d' % x
                     d.put(key, self.makeData(key), txn)
                     if verbose and x % 100 == 0:
                         print("%s: records %d - %d finished" % (name, start, x))
                 txn.commit()
-                finished = True
+                keys=keys[count:]
+                readers.pop().start()
             except (db.DBLockDeadlockError, db.DBLockNotGrantedError) as val:
                 if verbose:
-                    print("%s: Aborting transaction (%s)" % (name, val))
+                    print("%s: Aborting transaction (%s)" % (name, val[1]))
                 txn.abort()
-                time.sleep(0.05)
 
-    def _writerThread(self, d, howMany, writerNum):
-        name = current_thread().name
-        start = howMany * writerNum
-        stop = howMany * (writerNum + 1) - 1
         if verbose:
-            print("%s: creating records %d - %d" % (name, start, stop))
-
-        step = 100
-        for x in range(start, stop, step):
-            self.doWrite(d, name, x, min(stop, x+step))
+            print("%s: thread finished" % name)
 
-        if verbose:
-            print("%s: finished creating records" % name)
-        if verbose:
-            print("%s: deleting a few records" % name)
+    def readerThread(self, d, readerNum):
+        import sys
+        if sys.version_info[0] < 3 :
+            name = currentThread().getName()
+        else :
+            name = currentThread().name
 
         finished = False
         while not finished:
             try:
-                recs = []
                 txn = self.env.txn_begin(None, self.txnFlag)
-                for x in range(10):
-                    key = int(random() * howMany) + start
-                    key = ('%04d' % key).encode("ascii")
-                    data = d.get(key, None, txn, db.DB_RMW)
-                    if data is not None:
-                        d.delete(key, txn)
-                        recs.append(key)
+                c = d.cursor(txn)
+                count = 0
+                rec = c.first()
+                while rec:
+                    count += 1
+                    key, data = rec
+                    self.assertEqual(self.makeData(key), data)
+                    rec = next(c)
+                if verbose: print("%s: found %d records" % (name, count))
+                c.close()
                 txn.commit()
                 finished = True
-                if verbose:
-                    print("%s: deleted records %s" % (name, recs))
             except (db.DBLockDeadlockError, db.DBLockNotGrantedError) as val:
                 if verbose:
-                    print("%s: Aborting transaction (%s)" % (name, val))
+                    print("%s: Aborting transaction (%s)" % (name, val[1]))
+                c.close()
                 txn.abort()
-                time.sleep(0.05)
-
-        if verbose:
-            print("%s: thread finished" % name)
-
-    def _readerThread(self, d, readerNum):
-        time.sleep(0.01 * readerNum + 0.05)
-        name = current_thread().name
-
-        for loop in range(5):
-            finished = False
-            while not finished:
-                try:
-                    txn = self.env.txn_begin(None, self.txnFlag)
-                    c = d.cursor(txn)
-                    count = 0
-                    rec = c.first()
-                    while rec:
-                        count += 1
-                        key, data = rec
-                        self.assertEqual(self.makeData(key), data)
-                        rec = c.next()
-                    if verbose: print("%s: found %d records" % (name, count))
-                    c.close()
-                    txn.commit()
-                    finished = True
-                except (db.DBLockDeadlockError, db.DBLockNotGrantedError) as val:
-                    if verbose:
-                        print("%s: Aborting transaction (%s)" % (name, val))
-                    c.close()
-                    txn.abort()
-                    time.sleep(0.05)
-
-            time.sleep(0.05)
 
         if verbose:
             print("%s: thread finished" % name)
@@ -461,7 +465,7 @@
     def deadlockThread(self):
         self.doLockDetect = True
         while self.doLockDetect:
-            time.sleep(0.5)
+            time.sleep(0.05)
             try:
                 aborted = self.env.lock_detect(
                     db.DB_LOCK_RANDOM, db.DB_LOCK_CONFLICT)
@@ -474,28 +478,28 @@
 
 class BTreeThreadedTransactions(ThreadedTransactionsBase):
     dbtype = db.DB_BTREE
-    writers = 3
-    readers = 5
-    records = 2000
+    writers = 2
+    readers = 10
+    records = 1000
 
 class HashThreadedTransactions(ThreadedTransactionsBase):
     dbtype = db.DB_HASH
-    writers = 1
-    readers = 5
-    records = 2000
+    writers = 2
+    readers = 10
+    records = 1000
 
 class BTreeThreadedNoWaitTransactions(ThreadedTransactionsBase):
     dbtype = db.DB_BTREE
-    writers = 3
-    readers = 5
-    records = 2000
+    writers = 2
+    readers = 10
+    records = 1000
     txnFlag = db.DB_TXN_NOWAIT
 
 class HashThreadedNoWaitTransactions(ThreadedTransactionsBase):
     dbtype = db.DB_HASH
-    writers = 1
-    readers = 5
-    records = 2000
+    writers = 2
+    readers = 10
+    records = 1000
     txnFlag = db.DB_TXN_NOWAIT
 
 

Modified: python/branches/py3k/Misc/NEWS
==============================================================================
--- python/branches/py3k/Misc/NEWS	(original)
+++ python/branches/py3k/Misc/NEWS	Sun Aug 31 16:12:11 2008
@@ -63,6 +63,12 @@
 - Issue #3643: Added a few more checks to _testcapi to prevent segfaults by
   exploitation of poor argument checking.
 
+- bsddb code updated to version 4.7.3pre2. This code is the same than
+  Python 2.6 one, since the intention is to keep an unified 2.x/3.x codebase.
+  The Python code is automatically translated using "2to3". Please, do not
+  update this code in Python 3.0 by hand. Update the 2.6 one and then
+  do "2to3".
+
 Tools/Demos
 -----------
 

Modified: python/branches/py3k/Modules/_bsddb.c
==============================================================================
--- python/branches/py3k/Modules/_bsddb.c	(original)
+++ python/branches/py3k/Modules/_bsddb.c	Sun Aug 31 16:12:11 2008
@@ -36,7 +36,7 @@
 /*
  * Handwritten code to wrap version 3.x of the Berkeley DB library,
  * written to replace a SWIG-generated file.  It has since been updated
- * to compile with BerkeleyDB versions 3.2 through 4.2.
+ * to compile with Berkeley DB versions 3.2 through 4.2.
  *
  * This module was started by Andrew Kuchling to remove the dependency
  * on SWIG in a package by Gregory P. Smith who based his work on a
@@ -48,7 +48,10 @@
  * the DB 3.x API and to build a solid unit test suite.  Robin has
  * since gone onto other projects (wxPython).
  *
- * Gregory P. Smith <greg at krypto.org> is once again the maintainer.
+ * Gregory P. Smith <greg at krypto.org> was once again the maintainer.
+ *
+ * Since January 2008, new maintainer is Jesus Cea <jcea at jcea.es>.
+ * Jesus Cea licenses this code to PSF under a Contributor Agreement.
  *
  * Use the pybsddb-users at lists.sf.net mailing list for all questions.
  * Things can change faster than the header of this file is updated.  This
@@ -92,7 +95,7 @@
 #include "bsddb.h"
 #undef COMPILING_BSDDB_C
 
-static char *svn_id = "$Id$";
+static char *rcs_id = "$Id$";
 
 /* --------------------------------------------------------------------- */
 /* Various macro definitions */
@@ -101,6 +104,27 @@
 typedef int Py_ssize_t;
 #endif
 
+#if (PY_VERSION_HEX < 0x02060000)  /* really: before python trunk r63675 */
+/* This code now uses PyBytes* API function names instead of PyString*.
+ * These #defines map to their equivalent on earlier python versions.    */
+#define PyBytes_FromStringAndSize PyString_FromStringAndSize
+#define PyBytes_FromString PyString_FromString
+#define PyBytes_AsStringAndSize PyString_AsStringAndSize
+#define PyBytes_Check PyString_Check
+#define PyBytes_GET_SIZE PyString_GET_SIZE
+#define PyBytes_AS_STRING PyString_AS_STRING
+#endif
+
+#if (PY_VERSION_HEX >= 0x03000000)
+#define NUMBER_Check    PyLong_Check
+#define NUMBER_AsLong   PyLong_AsLong
+#define NUMBER_FromLong PyLong_FromLong
+#else
+#define NUMBER_Check    PyInt_Check
+#define NUMBER_AsLong   PyInt_AsLong
+#define NUMBER_FromLong PyInt_FromLong
+#endif
+
 #ifdef WITH_THREAD
 
 /* These are for when calling Python --> C */
@@ -164,10 +188,8 @@
 static PyObject* DBNoServerError;       /* DB_NOSERVER */
 static PyObject* DBNoServerHomeError;   /* DB_NOSERVER_HOME */
 static PyObject* DBNoServerIDError;     /* DB_NOSERVER_ID */
-#if (DBVER >= 33)
 static PyObject* DBPageNotFoundError;   /* DB_PAGE_NOTFOUND */
 static PyObject* DBSecondaryBadError;   /* DB_SECONDARY_BAD */
-#endif
 
 #if !INCOMPLETE_IS_WARNING
 static PyObject* DBIncompleteError;     /* DB_INCOMPLETE */
@@ -183,6 +205,12 @@
 static PyObject* DBNoSuchFileError;     /* ENOENT */
 static PyObject* DBPermissionsError;    /* EPERM  */
 
+#if (DBVER >= 42)
+static PyObject* DBRepHandleDeadError;  /* DB_REP_HANDLE_DEAD */
+#endif
+
+static PyObject* DBRepUnavailError;     /* DB_REP_UNAVAIL */
+
 #if (DBVER < 43)
 #define	DB_BUFFER_SMALL		ENOMEM
 #endif
@@ -201,7 +229,24 @@
 #define DEFAULT_CURSOR_SET_RETURNS_NONE         1   /* 0 in pybsddb < 4.2, python < 2.4 */
 
 
-static PyTypeObject DB_Type, DBCursor_Type, DBEnv_Type, DBTxn_Type, DBLock_Type;
+/* See comment in Python 2.6 "object.h" */
+#ifndef staticforward
+#define staticforward static
+#endif
+#ifndef statichere
+#define statichere static
+#endif
+
+staticforward PyTypeObject DB_Type, DBCursor_Type, DBEnv_Type, DBTxn_Type,
+              DBLock_Type;
+#if (DBVER >= 43)
+staticforward PyTypeObject DBSequence_Type;
+#endif
+
+#ifndef Py_TYPE
+/* for compatibility with Python 2.5 and earlier */
+#define Py_TYPE(ob)              (((PyObject*)(ob))->ob_type)
+#endif
 
 #define DBObject_Check(v)           (Py_TYPE(v) == &DB_Type)
 #define DBCursorObject_Check(v)     (Py_TYPE(v) == &DBCursor_Type)
@@ -212,10 +257,77 @@
 #define DBSequenceObject_Check(v)   (Py_TYPE(v) == &DBSequence_Type)
 #endif
 
+#if (DBVER < 46)
+  #define _DBC_close(dbc)           dbc->c_close(dbc)
+  #define _DBC_count(dbc,a,b)       dbc->c_count(dbc,a,b)
+  #define _DBC_del(dbc,a)           dbc->c_del(dbc,a)
+  #define _DBC_dup(dbc,a,b)         dbc->c_dup(dbc,a,b)
+  #define _DBC_get(dbc,a,b,c)       dbc->c_get(dbc,a,b,c)
+  #define _DBC_pget(dbc,a,b,c,d)    dbc->c_pget(dbc,a,b,c,d)
+  #define _DBC_put(dbc,a,b,c)       dbc->c_put(dbc,a,b,c)
+#else
+  #define _DBC_close(dbc)           dbc->close(dbc)
+  #define _DBC_count(dbc,a,b)       dbc->count(dbc,a,b)
+  #define _DBC_del(dbc,a)           dbc->del(dbc,a)
+  #define _DBC_dup(dbc,a,b)         dbc->dup(dbc,a,b)
+  #define _DBC_get(dbc,a,b,c)       dbc->get(dbc,a,b,c)
+  #define _DBC_pget(dbc,a,b,c,d)    dbc->pget(dbc,a,b,c,d)
+  #define _DBC_put(dbc,a,b,c)       dbc->put(dbc,a,b,c)
+#endif
+
 
 /* --------------------------------------------------------------------- */
 /* Utility macros and functions */
 
+#define INSERT_IN_DOUBLE_LINKED_LIST(backlink,object)                   \
+    {                                                                   \
+        object->sibling_next=backlink;                                  \
+        object->sibling_prev_p=&(backlink);                             \
+        backlink=object;                                                \
+        if (object->sibling_next) {                                     \
+          object->sibling_next->sibling_prev_p=&(object->sibling_next); \
+        }                                                               \
+    }
+
+#define EXTRACT_FROM_DOUBLE_LINKED_LIST(object)                          \
+    {                                                                    \
+        if (object->sibling_next) {                                      \
+            object->sibling_next->sibling_prev_p=object->sibling_prev_p; \
+        }                                                                \
+        *(object->sibling_prev_p)=object->sibling_next;                  \
+    }
+
+#define EXTRACT_FROM_DOUBLE_LINKED_LIST_MAYBE_NULL(object)               \
+    {                                                                    \
+        if (object->sibling_next) {                                      \
+            object->sibling_next->sibling_prev_p=object->sibling_prev_p; \
+        }                                                                \
+        if (object->sibling_prev_p) {                                    \
+            *(object->sibling_prev_p)=object->sibling_next;              \
+        }                                                                \
+    }
+
+#define INSERT_IN_DOUBLE_LINKED_LIST_TXN(backlink,object)  \
+    {                                                      \
+        object->sibling_next_txn=backlink;                 \
+        object->sibling_prev_p_txn=&(backlink);            \
+        backlink=object;                                   \
+        if (object->sibling_next_txn) {                    \
+            object->sibling_next_txn->sibling_prev_p_txn=  \
+                &(object->sibling_next_txn);               \
+        }                                                  \
+    }
+
+#define EXTRACT_FROM_DOUBLE_LINKED_LIST_TXN(object)             \
+    {                                                           \
+        if (object->sibling_next_txn) {                         \
+            object->sibling_next_txn->sibling_prev_p_txn=       \
+                object->sibling_prev_p_txn;                     \
+        }                                                       \
+        *(object->sibling_prev_p_txn)=object->sibling_next_txn; \
+    }
+
+
 #define RETURN_IF_ERR()          \
     if (makeDBError(err)) {      \
         return NULL;             \
@@ -227,8 +339,10 @@
     if ((nonNull) == NULL) {          \
         PyObject *errTuple = NULL;    \
         errTuple = Py_BuildValue("(is)", 0, #name " object has been closed"); \
-        PyErr_SetObject((pyErrObj), errTuple);  \
-	Py_DECREF(errTuple);          \
+        if (errTuple) { \
+            PyErr_SetObject((pyErrObj), errTuple);  \
+            Py_DECREF(errTuple);          \
+        } \
         return NULL;                  \
     }
 
@@ -251,6 +365,9 @@
 
 #define CLEAR_DBT(dbt)              (memset(&(dbt), 0, sizeof(dbt)))
 
+#define FREE_DBT(dbt)               if ((dbt.flags & (DB_DBT_MALLOC|DB_DBT_REALLOC)) && \
+                                         dbt.data != NULL) { free(dbt.data); dbt.data = NULL; }
+
 
 static int makeDBError(int err);
 
@@ -258,104 +375,34 @@
 /* Return the access method type of the DBObject */
 static int _DB_get_type(DBObject* self)
 {
-#if (DBVER >= 33)
     DBTYPE type;
     int err;
+
     err = self->db->get_type(self->db, &type);
     if (makeDBError(err)) {
         return -1;
     }
     return type;
-#else
-    return self->db->get_type(self->db);
-#endif
-}
-
-
-/* Handy function to free a DBT and any self-allocated data within.
-   To be used on self created DBTs.  The make_dbt and make_key_dbt
-   functions have their own free routines that do more that this. */
-static void free_dbt(DBT *dbt)
-{
-    if ((dbt->flags & (DB_DBT_MALLOC|DB_DBT_REALLOC)) && dbt->data != NULL) {
-         free(dbt->data);
-         dbt->data = NULL;
-    }
-}
-
-
-/* Cleanup a Python buffer API view created by make_dbt() */
-static void free_buf_view(PyObject *obj, Py_buffer *view)
-{
-    if (view) {
-        PyBuffer_Release(view);
-        PyMem_Free(view);
-    }
-}
-
-
-/* Cleanup a DBT and an associated Python buffer API view
-   created by make_key_dbt() */
-#define FREE_DBT_VIEW(dbt, obj, view)    \
-            do { \
-                free_dbt(&(dbt)); \
-                free_buf_view((obj), (view)); \
-            } while(0);
-
-
-static Py_buffer * _malloc_view(PyObject *obj)
-{
-    Py_buffer *view;
-
-    if (!(view = PyMem_Malloc(sizeof(Py_buffer)))) {
-        PyErr_SetString(PyExc_MemoryError,
-                        "Py_buffer malloc failed");
-        return NULL;
-    }
-
-    if (PyObject_GetBuffer(obj, view, PyBUF_SIMPLE))
-        return NULL;
-
-    if (view->ndim > 1) {
-        PyErr_SetString(PyExc_BufferError,
-                        "buffers must be single dimension");
-        PyBuffer_Release(view);
-        PyMem_Free(view);
-        return NULL;
-    }
-    return view;
 }
 
 
 /* Create a DBT structure (containing key and data values) from Python
-   strings.  Returns >= 1 on success, 0 on an error.  The returned_view_p
-   may be filled with a newly allocated Py_buffer view on success.
-   The caller MUST call free_buf_view() on any returned Py_buffer. */
-static int make_dbt(PyObject* obj, DBT* dbt, Py_buffer** returned_view_p)
-{
-    Py_buffer *view;
-
-    /* simple way to ensure the caller can detect if we've returned a
-       new buffer view or not: require their pointer to start out NULL. */
-    assert(*returned_view_p == NULL);
-
+   strings.  Returns 1 on success, 0 on an error. */
+static int make_dbt(PyObject* obj, DBT* dbt)
+{
     CLEAR_DBT(*dbt);
     if (obj == Py_None) {
         /* no need to do anything, the structure has already been zeroed */
-        return 1;
     }
-    if (!PyObject_CheckBuffer(obj)) {
+    else if (!PyArg_Parse(obj, "s#", &dbt->data, &dbt->size)) {
         PyErr_SetString(PyExc_TypeError,
-                        "Data values must support the buffer API or be None.");
+#if (PY_VERSION_HEX < 0x03000000)
+                        "Data values must be of type string or None.");
+#else
+                        "Data values must be of type bytes or None.");
+#endif
         return 0;
     }
-
-    if ( !(view = _malloc_view(obj)) )
-        return 0;
-
-    dbt->data = view->buf;
-    dbt->size = Py_SAFE_DOWNCAST(view->len, Py_ssize_t, u_int32_t);
-    *returned_view_p = view;
     return 1;
 }
 
@@ -363,19 +410,12 @@
 /* Recno and Queue DBs can have integer keys.  This function figures out
    what's been given, verifies that it's allowed, and then makes the DBT.
 
-   Caller MUST call FREE_DBT_VIEW(keydbt, keyobj, key_view) with all
-   returned DBT and Py_buffer values when done. */
+   Caller MUST call FREE_DBT(key) when done. */
 static int
-make_key_dbt(DBObject* self, PyObject* keyobj, DBT* key, int* pflags,
-             Py_buffer** returned_view_p)
+make_key_dbt(DBObject* self, PyObject* keyobj, DBT* key, int* pflags)
 {
     db_recno_t recno;
     int type;
-    Py_buffer *view;
-
-    /* simple way to ensure the caller can detect if we've returned a
-       new buffer view or not: require their pointer to start out NULL. */
-    assert(*returned_view_p == NULL);
 
     CLEAR_DBT(*key);
     if (keyobj == Py_None) {
@@ -391,73 +431,76 @@
         /* no need to do anything, the structure has already been zeroed */
     }
 
-    else if (PyLong_Check(keyobj)) {
+    else if (PyBytes_Check(keyobj)) {
         /* verify access method type */
         type = _DB_get_type(self);
         if (type == -1)
             return 0;
-        if (type == DB_BTREE && pflags != NULL) {
-            /* if BTREE then an Integer key is allowed with the
-             * DB_SET_RECNO flag */
-            *pflags |= DB_SET_RECNO;
-        }
-        else if (type != DB_RECNO && type != DB_QUEUE) {
+        if (type == DB_RECNO || type == DB_QUEUE) {
             PyErr_SetString(
                 PyExc_TypeError,
-                "Integer keys only allowed for Recno and Queue DB's");
+#if (PY_VERSION_HEX < 0x03000000)
+                "String keys not allowed for Recno and Queue DB's");
+#else
+                "Bytes keys not allowed for Recno and Queue DB's");
+#endif
             return 0;
         }
 
-        /* Make a key out of the requested recno, use allocated space so DB
-         * will be able to realloc room for the real key if needed. */
-        recno = PyLong_AS_LONG(keyobj);
-        key->data = malloc(sizeof(db_recno_t));
+        /*
+         * NOTE(gps): I don't like doing a data copy here, it seems
+         * wasteful.  But without a clean way to tell FREE_DBT if it
+         * should free key->data or not we have to.  Other places in
+         * the code check for DB_THREAD and forceably set DBT_MALLOC
+         * when we otherwise would leave flags 0 to indicate that.
+         */
+        key->data = malloc(PyBytes_GET_SIZE(keyobj));
         if (key->data == NULL) {
             PyErr_SetString(PyExc_MemoryError, "Key memory allocation failed");
             return 0;
         }
-        key->ulen = key->size = sizeof(db_recno_t);
-        memcpy(key->data, &recno, sizeof(db_recno_t));
+        memcpy(key->data, PyBytes_AS_STRING(keyobj),
+               PyBytes_GET_SIZE(keyobj));
         key->flags = DB_DBT_REALLOC;
+        key->size = PyBytes_GET_SIZE(keyobj);
     }
 
-    else if (PyObject_CheckBuffer(keyobj)) {
+    else if (NUMBER_Check(keyobj)) {
         /* verify access method type */
         type = _DB_get_type(self);
         if (type == -1)
             return 0;
-        if (type == DB_RECNO || type == DB_QUEUE) {
+        if (type == DB_BTREE && pflags != NULL) {
+            /* if BTREE then an Integer key is allowed with the
+             * DB_SET_RECNO flag */
+            *pflags |= DB_SET_RECNO;
+        }
+        else if (type != DB_RECNO && type != DB_QUEUE) {
             PyErr_SetString(
                 PyExc_TypeError,
-                "Non-integer keys not allowed for Recno and Queue DB's");
+                "Integer keys only allowed for Recno and Queue DB's");
             return 0;
         }
 
-        if ( !(view = _malloc_view(keyobj)) )
-            return 0;
-
-        /*
-         * NOTE(gps): I don't like doing a data copy here, it seems
-         * wasteful.  But without a clean way to tell FREE_DBT if it
-         * should free key->data or not we have to.  Other places in
-         * the code check for DB_THREAD and forceably set DBT_MALLOC
-         * when we otherwise would leave flags 0 to indicate that.
-         */
-        key->size = Py_SAFE_DOWNCAST(view->len, Py_ssize_t, u_int32_t);
-        key->data = malloc(key->size);
+        /* Make a key out of the requested recno, use allocated space so DB
+         * will be able to realloc room for the real key if needed. */
+        recno = NUMBER_AsLong(keyobj);
+        key->data = malloc(sizeof(db_recno_t));
         if (key->data == NULL) {
             PyErr_SetString(PyExc_MemoryError, "Key memory allocation failed");
-            key->size = 0;
             return 0;
         }
-        memcpy(key->data, view->buf, key->size);
+        key->ulen = key->size = sizeof(db_recno_t);
+        memcpy(key->data, &recno, sizeof(db_recno_t));
         key->flags = DB_DBT_REALLOC;
-        *returned_view_p = view;
     }
-
     else {
         PyErr_Format(PyExc_TypeError,
-                     "buffer or int object expected for key, %s found",
+#if (PY_VERSION_HEX < 0x03000000)
+                     "String or Integer object expected for key, %s found",
+#else
+                     "Bytes or Integer object expected for key, %s found",
+#endif
                      Py_TYPE(keyobj)->tp_name);
         return 0;
     }
@@ -518,6 +561,102 @@
 }
 
 
+/*
+** We need these functions because some results
+** are undefined if pointer is NULL. Some other
+** give None instead of "".
+**
+** This functions are static and will be
+** -I hope- inlined.
+*/
+static const char *DummyString = "This string is a simple placeholder";
+static PyObject *Build_PyString(const char *p,int s)
+{
+  if (!p) {
+    p=DummyString;
+    assert(s==0);
+  }
+  return PyBytes_FromStringAndSize(p,s);
+}
+
+static PyObject *BuildValue_S(const void *p,int s)
+{
+  if (!p) {
+    p=DummyString;
+    assert(s==0);
+  }
+  return PyBytes_FromStringAndSize(p, s);
+}
+
+static PyObject *BuildValue_SS(const void *p1,int s1,const void *p2,int s2)
+{
+PyObject *a, *b, *r;
+
+  if (!p1) {
+    p1=DummyString;
+    assert(s1==0);
+  }
+  if (!p2) {
+    p2=DummyString;
+    assert(s2==0);
+  }
+
+  if (!(a = PyBytes_FromStringAndSize(p1, s1))) {
+      return NULL;
+  }
+  if (!(b = PyBytes_FromStringAndSize(p2, s2))) {
+      Py_DECREF(a);
+      return NULL;
+  }
+
+#if (PY_VERSION_HEX >= 0x02040000)
+  r = PyTuple_Pack(2, a, b) ;
+#else
+  r = Py_BuildValue("OO", a, b);
+#endif
+  Py_DECREF(a);
+  Py_DECREF(b);
+  return r;
+}
+
+static PyObject *BuildValue_IS(int i,const void *p,int s)
+{
+  PyObject *a, *r;
+
+  if (!p) {
+    p=DummyString;
+    assert(s==0);
+  }
+
+  if (!(a = PyBytes_FromStringAndSize(p, s))) {
+      return NULL;
+  }
+
+  r = Py_BuildValue("iO", i, a);
+  Py_DECREF(a);
+  return r;
+}
+
+static PyObject *BuildValue_LS(long l,const void *p,int s)
+{
+  PyObject *a, *r;
+
+  if (!p) {
+    p=DummyString;
+    assert(s==0);
+  }
+
+  if (!(a = PyBytes_FromStringAndSize(p, s))) {
+      return NULL;
+  }
+
+  r = Py_BuildValue("lO", l, a);
+  Py_DECREF(a);
+  return r;
+}
+
+
+
 /* make a nice exception object to raise for errors. */
 static int makeDBError(int err)
 {
@@ -542,7 +681,7 @@
                 strncat(errTxt, _db_errmsg, bytes_left);
             }
             _db_errmsg[0] = 0;
-            exceptionRaised = PyErr_WarnEx(PyExc_RuntimeWarning, errTxt, 1);
+            exceptionRaised = PyErr_Warn(PyExc_RuntimeWarning, errTxt);
 
 #else  /* do an exception instead */
         errObj = DBIncompleteError;
@@ -561,10 +700,8 @@
         case DB_NOSERVER:           errObj = DBNoServerError;       break;
         case DB_NOSERVER_HOME:      errObj = DBNoServerHomeError;   break;
         case DB_NOSERVER_ID:        errObj = DBNoServerIDError;     break;
-#if (DBVER >= 33)
         case DB_PAGE_NOTFOUND:      errObj = DBPageNotFoundError;   break;
         case DB_SECONDARY_BAD:      errObj = DBSecondaryBadError;   break;
-#endif
         case DB_BUFFER_SMALL:       errObj = DBNoMemoryError;       break;
 
 #if (DBVER >= 43)
@@ -580,6 +717,12 @@
         case ENOENT:  errObj = DBNoSuchFileError;   break;
         case EPERM :  errObj = DBPermissionsError;  break;
 
+#if (DBVER >= 42)
+        case DB_REP_HANDLE_DEAD : errObj = DBRepHandleDeadError; break;
+#endif
+
+        case DB_REP_UNAVAIL : errObj = DBRepUnavailError; break;
+
         default:      errObj = DBError;             break;
     }
 
@@ -594,9 +737,13 @@
         }
         _db_errmsg[0] = 0;
 
-	errTuple = Py_BuildValue("(is)", err, errTxt);
+        errTuple = Py_BuildValue("(is)", err, errTxt);
+        if (errTuple == NULL) {
+            Py_DECREF(errObj);
+            return !0;
+        }
         PyErr_SetObject(errObj, errTuple);
-	Py_DECREF(errTuple);
+        Py_DECREF(errTuple);
     }
 
     return ((errObj != NULL) || exceptionRaised);
@@ -683,16 +830,11 @@
     flags |= extra_flags;
     CLEAR_DBT(key);
     CLEAR_DBT(data);
-    if (CHECK_DBFLAG(self->mydb, DB_THREAD)) {
-        /* Tell BerkeleyDB to malloc the return value (thread safe) */
-        data.flags = DB_DBT_MALLOC;
-        key.flags = DB_DBT_MALLOC;
-    }
     if (!add_partial_dbt(&data, dlen, doff))
         return NULL;
 
     MYDB_BEGIN_ALLOW_THREADS;
-    err = self->dbc->c_get(self->dbc, &key, &data, flags);
+    err = _DBC_get(self->dbc, &key, &data, flags);
     MYDB_END_ALLOW_THREADS;
 
     if ((err == DB_NOTFOUND || err == DB_KEYEMPTY)
@@ -713,21 +855,15 @@
 
         case DB_RECNO:
         case DB_QUEUE:
-            retval = Py_BuildValue("iy#", *((db_recno_t*)key.data),
-                                   data.data, data.size);
+            retval = BuildValue_IS(*((db_recno_t*)key.data), data.data, data.size);
             break;
         case DB_HASH:
         case DB_BTREE:
         default:
-            retval = Py_BuildValue("y#y#", key.data, key.size,
-                                   data.data, data.size);
+            retval = BuildValue_SS(key.data, key.size, data.data, data.size);
             break;
         }
     }
-    if (!err) {
-        free_dbt(&key);
-        free_dbt(&data);
-    }
     return retval;
 }
 
@@ -735,7 +871,7 @@
 /* add an integer to a dictionary using the given name as a key */
 static void _addIntToDict(PyObject* dict, char *name, int value)
 {
-    PyObject* v = PyLong_FromLong((long) value);
+    PyObject* v = NUMBER_FromLong((long) value);
     if (!v || PyDict_SetItemString(dict, name, v))
         PyErr_Clear();
 
@@ -747,12 +883,12 @@
 {
     PyObject* v;
 	/* if the value fits in regular int, use that. */
-#ifdef HAVE_LONG_LONG
+#ifdef PY_LONG_LONG
 	if (sizeof(time_t) > sizeof(long))
 		v = PyLong_FromLongLong((PY_LONG_LONG) value);
 	else
 #endif
-		v = PyLong_FromLong((long) value);
+		v = NUMBER_FromLong((long) value);
     if (!v || PyDict_SetItemString(dict, name, v))
         PyErr_Clear();
 
@@ -771,7 +907,14 @@
 }
 #endif
 
+static void _addDB_lsnToDict(PyObject* dict, char *name, DB_LSN value)
+{
+    PyObject *v = Py_BuildValue("(ll)",value.file,value.offset);
+    if (!v || PyDict_SetItemString(dict, name, v))
+        PyErr_Clear();
 
+    Py_XDECREF(v);
+}
 
 /* --------------------------------------------------------------------- */
 /* Allocators and deallocators */
@@ -791,11 +934,16 @@
     self->flags = 0;
     self->setflags = 0;
     self->myenvobj = NULL;
-#if (DBVER >= 33)
+    self->db = NULL;
+    self->children_cursors = NULL;
+#if (DBVER >=43)
+    self->children_sequences = NULL;
+#endif
     self->associateCallback = NULL;
     self->btCompareCallback = NULL;
     self->primaryDBType = 0;
-#endif
+    Py_INCREF(Py_None);
+    self->private_obj = Py_None;
     self->in_weakreflist = NULL;
 
     /* keep a reference to our python DBEnv object */
@@ -803,7 +951,14 @@
         Py_INCREF(arg);
         self->myenvobj = arg;
         db_env = arg->db_env;
+        INSERT_IN_DOUBLE_LINKED_LIST(self->myenvobj->children_dbs,self);
+    } else {
+      self->sibling_prev_p=NULL;
+      self->sibling_next=NULL;
     }
+    self->txn=NULL;
+    self->sibling_prev_p_txn=NULL;
+    self->sibling_next_txn=NULL;
 
     if (self->myenvobj)
         self->moduleFlags = self->myenvobj->moduleFlags;
@@ -815,9 +970,7 @@
     err = db_create(&self->db, db_env, flags);
     if (self->db != NULL) {
         self->db->set_errcall(self->db, _db_errorCallback);
-#if (DBVER >= 33)
         self->db->app_private = (void*)self;
-#endif
     }
     MYDB_END_ALLOW_THREADS;
     /* TODO add a weakref(self) to the self->myenvobj->open_child_weakrefs
@@ -828,32 +981,24 @@
             Py_DECREF(self->myenvobj);
             self->myenvobj = NULL;
         }
-        PyObject_Del(self);
+        Py_DECREF(self);
         self = NULL;
     }
     return self;
 }
 
 
+/* Forward declaration */
+static PyObject *DB_close_internal(DBObject* self, int flags);
+
 static void
 DB_dealloc(DBObject* self)
 {
+  PyObject *dummy;
+
     if (self->db != NULL) {
-        /* avoid closing a DB when its DBEnv has been closed out from under
-         * it */
-        if (!self->myenvobj ||
-            (self->myenvobj && self->myenvobj->db_env))
-        {
-            MYDB_BEGIN_ALLOW_THREADS;
-            self->db->close(self->db, 0);
-            MYDB_END_ALLOW_THREADS;
-        } else {
-            PyErr_WarnEx(PyExc_RuntimeWarning,
-			 "DB could not be closed in destructor:"
-			 " DBEnv already closed",
-			 1);
-        }
-        self->db = NULL;
+      dummy=DB_close_internal(self,0);
+      Py_XDECREF(dummy);
     }
     if (self->in_weakreflist != NULL) {
         PyObject_ClearWeakRefs((PyObject *) self);
@@ -862,7 +1007,6 @@
         Py_DECREF(self->myenvobj);
         self->myenvobj = NULL;
     }
-#if (DBVER >= 33)
     if (self->associateCallback != NULL) {
         Py_DECREF(self->associateCallback);
         self->associateCallback = NULL;
@@ -871,13 +1015,12 @@
         Py_DECREF(self->btCompareCallback);
         self->btCompareCallback = NULL;
     }
-#endif
+    Py_DECREF(self->private_obj);
     PyObject_Del(self);
 }
 
-
 static DBCursorObject*
-newDBCursorObject(DBC* dbc, DBObject* db)
+newDBCursorObject(DBC* dbc, DBTxnObject *txn, DBObject* db)
 {
     DBCursorObject* self = PyObject_New(DBCursorObject, &DBCursor_Type);
     if (self == NULL)
@@ -885,40 +1028,37 @@
 
     self->dbc = dbc;
     self->mydb = db;
+
+    INSERT_IN_DOUBLE_LINKED_LIST(self->mydb->children_cursors,self);
+    if (txn && ((PyObject *)txn!=Py_None)) {
+	    INSERT_IN_DOUBLE_LINKED_LIST_TXN(txn->children_cursors,self);
+	    self->txn=txn;
+    } else {
+	    self->txn=NULL;
+    }
+
     self->in_weakreflist = NULL;
     Py_INCREF(self->mydb);
     return self;
 }
 
 
+/* Forward declaration */
+static PyObject *DBC_close_internal(DBCursorObject* self);
+
 static void
 DBCursor_dealloc(DBCursorObject* self)
 {
-    int err;
+    PyObject *dummy;
 
+    if (self->dbc != NULL) {
+      dummy=DBC_close_internal(self);
+      Py_XDECREF(dummy);
+    }
     if (self->in_weakreflist != NULL) {
         PyObject_ClearWeakRefs((PyObject *) self);
     }
-
-    if (self->dbc != NULL) {
-	/* If the underlying database has been closed, we don't
-	   need to do anything. If the environment has been closed
-	   we need to leak, as BerkeleyDB will crash trying to access
-	   the environment. There was an exception when the 
-	   user closed the environment even though there still was
-	   a database open. */
-	if (self->mydb->db && self->mydb->myenvobj &&
-	    !self->mydb->myenvobj->closed)
-        /* test for: open db + no environment or non-closed environment */
-	if (self->mydb->db && (!self->mydb->myenvobj || (self->mydb->myenvobj &&
-	    !self->mydb->myenvobj->closed))) {
-            MYDB_BEGIN_ALLOW_THREADS;
-            err = self->dbc->c_close(self->dbc);
-            MYDB_END_ALLOW_THREADS;
-        }
-        self->dbc = NULL;
-    }
-    Py_XDECREF( self->mydb );
+    Py_DECREF(self->mydb);
     PyObject_Del(self);
 }
 
@@ -935,88 +1075,134 @@
     self->flags = flags;
     self->moduleFlags.getReturnsNone = DEFAULT_GET_RETURNS_NONE;
     self->moduleFlags.cursorSetReturnsNone = DEFAULT_CURSOR_SET_RETURNS_NONE;
+    self->children_dbs = NULL;
+    self->children_txns = NULL;
+    Py_INCREF(Py_None);
+    self->private_obj = Py_None;
+    Py_INCREF(Py_None);
+    self->rep_transport = Py_None;
     self->in_weakreflist = NULL;
+    self->event_notifyCallback = NULL;
 
     MYDB_BEGIN_ALLOW_THREADS;
     err = db_env_create(&self->db_env, flags);
     MYDB_END_ALLOW_THREADS;
     if (makeDBError(err)) {
-        PyObject_Del(self);
+        Py_DECREF(self);
         self = NULL;
     }
     else {
         self->db_env->set_errcall(self->db_env, _db_errorCallback);
+        self->db_env->app_private = self;
     }
     return self;
 }
 
+/* Forward declaration */
+static PyObject *DBEnv_close_internal(DBEnvObject* self, int flags);
 
 static void
 DBEnv_dealloc(DBEnvObject* self)
 {
-    if (self->in_weakreflist != NULL) {
-        PyObject_ClearWeakRefs((PyObject *) self);
-    }
+  PyObject *dummy;
 
     if (self->db_env && !self->closed) {
-        MYDB_BEGIN_ALLOW_THREADS;
-        self->db_env->close(self->db_env, 0);
-        MYDB_END_ALLOW_THREADS;
+      dummy=DBEnv_close_internal(self,0);
+      Py_XDECREF(dummy);
+    }
+
+    Py_XDECREF(self->event_notifyCallback);
+    self->event_notifyCallback = NULL;
+
+    if (self->in_weakreflist != NULL) {
+        PyObject_ClearWeakRefs((PyObject *) self);
     }
+    Py_DECREF(self->private_obj);
+    Py_DECREF(self->rep_transport);
     PyObject_Del(self);
 }
 
 
 static DBTxnObject*
-newDBTxnObject(DBEnvObject* myenv, DB_TXN *parent, int flags)
+newDBTxnObject(DBEnvObject* myenv, DBTxnObject *parent, DB_TXN *txn, int flags)
 {
     int err;
+    DB_TXN *parent_txn = NULL;
+
     DBTxnObject* self = PyObject_New(DBTxnObject, &DBTxn_Type);
     if (self == NULL)
         return NULL;
-    Py_INCREF(myenv);
-    self->env = (PyObject*)myenv;
+
     self->in_weakreflist = NULL;
+    self->children_txns = NULL;
+    self->children_dbs = NULL;
+    self->children_cursors = NULL;
+    self->children_sequences = NULL;
+    self->flag_prepare = 0;
+    self->parent_txn = NULL;
+    self->env = NULL;
 
-    MYDB_BEGIN_ALLOW_THREADS;
-#if (DBVER >= 40)
-    err = myenv->db_env->txn_begin(myenv->db_env, parent, &(self->txn), flags);
-#else
-    err = txn_begin(myenv->db_env, parent, &(self->txn), flags);
-#endif
-    MYDB_END_ALLOW_THREADS;
-    if (makeDBError(err)) {
-        Py_DECREF(self->env);
-        PyObject_Del(self);
-        self = NULL;
+    if (parent && ((PyObject *)parent!=Py_None)) {
+        parent_txn = parent->txn;
     }
+
+    if (txn) {
+        self->txn = txn;
+    } else {
+        MYDB_BEGIN_ALLOW_THREADS;
+        err = myenv->db_env->txn_begin(myenv->db_env, parent_txn, &(self->txn), flags);
+        MYDB_END_ALLOW_THREADS;
+
+        if (makeDBError(err)) {
+            Py_DECREF(self);
+            return NULL;
+        }
+    }
+
+    /* Can't use 'parent' because could be 'parent==Py_None' */
+    if (parent_txn) {
+        self->parent_txn = parent;
+        Py_INCREF(parent);
+        self->env = NULL;
+        INSERT_IN_DOUBLE_LINKED_LIST(parent->children_txns, self);
+    } else {
+        self->parent_txn = NULL;
+        Py_INCREF(myenv);
+        self->env = myenv;
+        INSERT_IN_DOUBLE_LINKED_LIST(myenv->children_txns, self);
+    }
+
     return self;
 }
 
+/* Forward declaration */
+static PyObject *
+DBTxn_abort_discard_internal(DBTxnObject* self, int discard);
 
 static void
 DBTxn_dealloc(DBTxnObject* self)
 {
+  PyObject *dummy;
+
+    if (self->txn) {
+        int flag_prepare = self->flag_prepare;
+        dummy=DBTxn_abort_discard_internal(self,0);
+        Py_XDECREF(dummy);
+        if (!flag_prepare) {
+            PyErr_Warn(PyExc_RuntimeWarning,
+              "DBTxn aborted in destructor.  No prior commit() or abort().");
+        }
+    }
+
     if (self->in_weakreflist != NULL) {
         PyObject_ClearWeakRefs((PyObject *) self);
     }
 
-    if (self->txn) {
-        /* it hasn't been finalized, abort it! */
-        MYDB_BEGIN_ALLOW_THREADS;
-#if (DBVER >= 40)
-        self->txn->abort(self->txn);
-#else
-        txn_abort(self->txn);
-#endif
-        MYDB_END_ALLOW_THREADS;
-        PyErr_WarnEx(PyExc_RuntimeWarning,
-		     "DBTxn aborted in destructor. "
-		     " No prior commit() or abort().",
-		     1);
+    if (self->env) {
+        Py_DECREF(self->env);
+    } else {
+        Py_DECREF(self->parent_txn);
     }
-
-    Py_DECREF(self->env);
     PyObject_Del(self);
 }
 
@@ -1032,15 +1218,11 @@
     self->in_weakreflist = NULL;
 
     MYDB_BEGIN_ALLOW_THREADS;
-#if (DBVER >= 40)
     err = myenv->db_env->lock_get(myenv->db_env, locker, flags, obj, lock_mode,
                                   &self->lock);
-#else
-    err = lock_get(myenv->db_env, locker, flags, obj, lock_mode, &self->lock);
-#endif
     MYDB_END_ALLOW_THREADS;
     if (makeDBError(err)) {
-        PyObject_Del(self);
+        Py_DECREF(self);
         self = NULL;
     }
 
@@ -1070,25 +1252,37 @@
         return NULL;
     Py_INCREF(mydb);
     self->mydb = mydb;
-    self->in_weakreflist = NULL;
 
+    INSERT_IN_DOUBLE_LINKED_LIST(self->mydb->children_sequences,self);
+    self->txn = NULL;
+
+    self->in_weakreflist = NULL;
 
     MYDB_BEGIN_ALLOW_THREADS;
     err = db_sequence_create(&self->sequence, self->mydb->db, flags);
     MYDB_END_ALLOW_THREADS;
     if (makeDBError(err)) {
-        Py_DECREF(self->mydb);
-        PyObject_Del(self);
+        Py_DECREF(self);
         self = NULL;
     }
 
     return self;
 }
 
+/* Forward declaration */
+static PyObject
+*DBSequence_close_internal(DBSequenceObject* self, int flags, int do_not_close);
 
 static void
 DBSequence_dealloc(DBSequenceObject* self)
 {
+    PyObject *dummy;
+
+    if (self->sequence != NULL) {
+        dummy=DBSequence_close_internal(self,0,0);
+        Py_XDECREF(dummy);
+    }
+
     if (self->in_weakreflist != NULL) {
         PyObject_ClearWeakRefs((PyObject *) self);
     }
@@ -1102,16 +1296,17 @@
 /* DB methods */
 
 static PyObject*
-DB_append(DBObject* self, PyObject* args)
+DB_append(DBObject* self, PyObject* args, PyObject* kwargs)
 {
     PyObject* txnobj = NULL;
     PyObject* dataobj;
-    Py_buffer* data_buf_view = NULL;
     db_recno_t recno;
     DBT key, data;
     DB_TXN *txn = NULL;
+    static char* kwnames[] = { "data", "txn", NULL };
 
-    if (!PyArg_UnpackTuple(args, "append", 1, 2, &dataobj, &txnobj))
+    if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|O:append", kwnames,
+                                     &dataobj, &txnobj))
         return NULL;
 
     CHECK_DB_NOT_CLOSED(self);
@@ -1124,21 +1319,16 @@
     key.ulen = key.size;
     key.flags = DB_DBT_USERMEM;
 
+    if (!make_dbt(dataobj, &data)) return NULL;
     if (!checkTxnObj(txnobj, &txn)) return NULL;
-    if (!make_dbt(dataobj, &data, &data_buf_view)) return NULL;
 
-    if (-1 == _DB_put(self, txn, &key, &data, DB_APPEND)) {
-        free_buf_view(dataobj, data_buf_view);
+    if (-1 == _DB_put(self, txn, &key, &data, DB_APPEND))
         return NULL;
-    }
 
-    free_buf_view(dataobj, data_buf_view);
-    return PyLong_FromLong(recno);
+    return NUMBER_FromLong(recno);
 }
 
 
-#if (DBVER >= 33)
-
 static int
 _db_associateCallback(DB* db, const DBT* priKey, const DBT* priData,
                       DBT* secKey)
@@ -1155,11 +1345,9 @@
         MYDB_BEGIN_BLOCK_THREADS;
 
         if (type == DB_RECNO || type == DB_QUEUE)
-            args = Py_BuildValue("(ly#)", *((db_recno_t*)priKey->data),
-                                 priData->data, priData->size);
+            args = BuildValue_LS(*((db_recno_t*)priKey->data), priData->data, priData->size);
         else
-            args = Py_BuildValue("(y#y#)", priKey->data, priKey->size,
-                                 priData->data, priData->size);
+            args = BuildValue_SS(priKey->data, priKey->size, priData->data, priData->size);
         if (args != NULL) {
                 result = PyEval_CallObject(callback, args);
         }
@@ -1169,19 +1357,15 @@
         else if (result == Py_None) {
             retval = DB_DONOTINDEX;
         }
-        else if (PyLong_Check(result)) {
-            retval = PyLong_AsLong(result);
+        else if (NUMBER_Check(result)) {
+            retval = NUMBER_AsLong(result);
         }
-        else if (PyByteArray_Check(result) || PyBytes_Check(result)) {
+        else if (PyBytes_Check(result)) {
             char* data;
             Py_ssize_t size;
 
             CLEAR_DBT(*secKey);
-            size = Py_SIZE(result);
-            if (PyByteArray_Check(result))
-                data = PyByteArray_AS_STRING(result);
-            else
-                data = PyBytes_AS_STRING(result);
+            PyBytes_AsStringAndSize(result, &data, &size);
             secKey->flags = DB_DBT_APPMALLOC;   /* DB will free */
             secKey->data = malloc(size);        /* TODO, check this */
 	    if (secKey->data) {
@@ -1198,7 +1382,7 @@
         else {
             PyErr_SetString(
                PyExc_TypeError,
-               "DB associate callback should return DB_DONOTINDEX or bytes.");
+               "DB associate callback should return DB_DONOTINDEX or string.");
             PyErr_Print();
         }
 
@@ -1300,25 +1484,51 @@
 }
 
 
-#endif
-
-
 static PyObject*
-DB_close(DBObject* self, PyObject* args)
+DB_close_internal(DBObject* self, int flags)
 {
-    int err, flags=0;
-    if (!PyArg_ParseTuple(args,"|i:close", &flags))
-        return NULL;
+    PyObject *dummy;
+    int err;
+
     if (self->db != NULL) {
-        if (self->myenvobj)
-            CHECK_ENV_NOT_CLOSED(self->myenvobj);
-        err = self->db->close(self->db, flags);
-        self->db = NULL;
-        RETURN_IF_ERR();
+        /* Can be NULL if db is not in an environment */
+        EXTRACT_FROM_DOUBLE_LINKED_LIST_MAYBE_NULL(self);
+
+        if (self->txn) {
+            EXTRACT_FROM_DOUBLE_LINKED_LIST_TXN(self);
+            self->txn=NULL;
+        }
+
+        while(self->children_cursors) {
+          dummy=DBC_close_internal(self->children_cursors);
+          Py_XDECREF(dummy);
+        }
+
+#if (DBVER >= 43)
+        while(self->children_sequences) {
+            dummy=DBSequence_close_internal(self->children_sequences,0,0);
+            Py_XDECREF(dummy);
+        }
+#endif
+
+        MYDB_BEGIN_ALLOW_THREADS;
+        err = self->db->close(self->db, flags);
+        MYDB_END_ALLOW_THREADS;
+        self->db = NULL;
+        RETURN_IF_ERR();
     }
     RETURN_NONE();
 }
 
+static PyObject*
+DB_close(DBObject* self, PyObject* args)
+{
+    int flags=0;
+    if (!PyArg_ParseTuple(args,"|i:close", &flags))
+        return NULL;
+    return DB_close_internal(self,flags);
+}
+
 
 static PyObject*
 _DB_consume(DBObject* self, PyObject* args, PyObject* kwargs, int consume_flag)
@@ -1349,7 +1559,7 @@
     CLEAR_DBT(key);
     CLEAR_DBT(data);
     if (CHECK_DBFLAG(self, DB_THREAD)) {
-        /* Tell BerkeleyDB to malloc the return value (thread safe) */
+        /* Tell Berkeley DB to malloc the return value (thread safe) */
         data.flags = DB_DBT_MALLOC;
         key.flags = DB_DBT_MALLOC;
     }
@@ -1365,10 +1575,9 @@
         retval = Py_None;
     }
     else if (!err) {
-        retval = Py_BuildValue("y#y#", key.data, key.size, data.data,
-                               data.size);
-        free_dbt(&key);
-        free_dbt(&data);
+        retval = BuildValue_SS(key.data, key.size, data.data, data.size);
+        FREE_DBT(key);
+        FREE_DBT(data);
     }
 
     RETURN_IF_ERR();
@@ -1409,7 +1618,7 @@
     err = self->db->cursor(self->db, txn, &dbc, flags);
     MYDB_END_ALLOW_THREADS;
     RETURN_IF_ERR();
-    return (PyObject*) newDBCursorObject(dbc, self);
+    return (PyObject*) newDBCursorObject(dbc, (DBTxnObject *)txnobj, self);
 }
 
 
@@ -1419,7 +1628,6 @@
     PyObject* txnobj = NULL;
     int flags = 0;
     PyObject* keyobj;
-    Py_buffer* key_buf_view = NULL;
     DBT key;
     DB_TXN *txn = NULL;
     static char* kwnames[] = { "key", "txn", "flags", NULL };
@@ -1428,37 +1636,35 @@
                                      &keyobj, &txnobj, &flags))
         return NULL;
     CHECK_DB_NOT_CLOSED(self);
-    if (!make_key_dbt(self, keyobj, &key, NULL, &key_buf_view))
+    if (!make_key_dbt(self, keyobj, &key, NULL))
         return NULL;
     if (!checkTxnObj(txnobj, &txn)) {
-        FREE_DBT_VIEW(key, keyobj, key_buf_view);
+        FREE_DBT(key);
         return NULL;
     }
 
     if (-1 == _DB_delete(self, txn, &key, 0)) {
-        FREE_DBT_VIEW(key, keyobj, key_buf_view);
+        FREE_DBT(key);
         return NULL;
     }
 
-    FREE_DBT_VIEW(key, keyobj, key_buf_view);
+    FREE_DBT(key);
     RETURN_NONE();
 }
 
 
 static PyObject*
-DB_fd(DBObject* self, PyObject* args)
+DB_fd(DBObject* self)
 {
     int err, the_fd;
 
-    if (!PyArg_ParseTuple(args,":fd"))
-        return NULL;
     CHECK_DB_NOT_CLOSED(self);
 
     MYDB_BEGIN_ALLOW_THREADS;
     err = self->db->fd(self->db, &the_fd);
     MYDB_END_ALLOW_THREADS;
     RETURN_IF_ERR();
-    return PyLong_FromLong(the_fd);
+    return NUMBER_FromLong(the_fd);
 }
 
 
@@ -1470,7 +1676,6 @@
     PyObject* keyobj;
     PyObject* dfltobj = NULL;
     PyObject* retval = NULL;
-    Py_buffer* key_buf_view = NULL;
     int dlen = -1;
     int doff = -1;
     DBT key, data;
@@ -1484,20 +1689,20 @@
         return NULL;
 
     CHECK_DB_NOT_CLOSED(self);
-    if (!make_key_dbt(self, keyobj, &key, &flags, &key_buf_view))
+    if (!make_key_dbt(self, keyobj, &key, &flags))
         return NULL;
     if (!checkTxnObj(txnobj, &txn)) {
-        FREE_DBT_VIEW(key, keyobj, key_buf_view);
+        FREE_DBT(key);
         return NULL;
     }
 
     CLEAR_DBT(data);
     if (CHECK_DBFLAG(self, DB_THREAD)) {
-        /* Tell BerkeleyDB to malloc the return value (thread safe) */
+        /* Tell Berkeley DB to malloc the return value (thread safe) */
         data.flags = DB_DBT_MALLOC;
     }
     if (!add_partial_dbt(&data, dlen, doff)) {
-        FREE_DBT_VIEW(key, keyobj, key_buf_view);
+        FREE_DBT(key);
         return NULL;
     }
 
@@ -1518,19 +1723,17 @@
     }
     else if (!err) {
         if (flags & DB_SET_RECNO) /* return both key and data */
-            retval = Py_BuildValue("y#y#", key.data, key.size, data.data,
-                                   data.size);
+            retval = BuildValue_SS(key.data, key.size, data.data, data.size);
         else /* return just the data */
-            retval = PyBytes_FromStringAndSize((char*)data.data, data.size);
-        free_dbt(&data);
+            retval = Build_PyString(data.data, data.size);
+        FREE_DBT(data);
     }
-    FREE_DBT_VIEW(key, keyobj, key_buf_view);
+    FREE_DBT(key);
 
     RETURN_IF_ERR();
     return retval;
 }
 
-#if (DBVER >= 33)
 static PyObject*
 DB_pget(DBObject* self, PyObject* args, PyObject* kwargs)
 {
@@ -1539,7 +1742,6 @@
     PyObject* keyobj;
     PyObject* dfltobj = NULL;
     PyObject* retval = NULL;
-    Py_buffer* key_buf_view = NULL;
     int dlen = -1;
     int doff = -1;
     DBT key, pkey, data;
@@ -1553,26 +1755,26 @@
         return NULL;
 
     CHECK_DB_NOT_CLOSED(self);
-    if (!make_key_dbt(self, keyobj, &key, &flags, &key_buf_view))
+    if (!make_key_dbt(self, keyobj, &key, &flags))
         return NULL;
     if (!checkTxnObj(txnobj, &txn)) {
-        FREE_DBT_VIEW(key, keyobj, key_buf_view);
+        FREE_DBT(key);
         return NULL;
     }
 
     CLEAR_DBT(data);
     if (CHECK_DBFLAG(self, DB_THREAD)) {
-        /* Tell BerkeleyDB to malloc the return value (thread safe) */
+        /* Tell Berkeley DB to malloc the return value (thread safe) */
         data.flags = DB_DBT_MALLOC;
     }
     if (!add_partial_dbt(&data, dlen, doff)) {
-        FREE_DBT_VIEW(key, keyobj, key_buf_view);
+        FREE_DBT(key);
         return NULL;
     }
 
     CLEAR_DBT(pkey);
     pkey.flags = DB_DBT_MALLOC;
-    
+
     MYDB_BEGIN_ALLOW_THREADS;
     err = self->db->pget(self->db, txn, &key, &pkey, &data, flags);
     MYDB_END_ALLOW_THREADS;
@@ -1591,22 +1793,22 @@
     else if (!err) {
         PyObject *pkeyObj;
         PyObject *dataObj;
-        dataObj = PyBytes_FromStringAndSize(data.data, data.size);
+        dataObj = Build_PyString(data.data, data.size);
 
         if (self->primaryDBType == DB_RECNO ||
             self->primaryDBType == DB_QUEUE)
-            pkeyObj = PyLong_FromLong(*(int *)pkey.data);
+            pkeyObj = NUMBER_FromLong(*(int *)pkey.data);
         else
-            pkeyObj = PyBytes_FromStringAndSize(pkey.data, pkey.size);
+            pkeyObj = Build_PyString(pkey.data, pkey.size);
 
         if (flags & DB_SET_RECNO) /* return key , pkey and data */
         {
             PyObject *keyObj;
             int type = _DB_get_type(self);
             if (type == DB_RECNO || type == DB_QUEUE)
-                keyObj = PyLong_FromLong(*(int *)key.data);
+                keyObj = NUMBER_FromLong(*(int *)key.data);
             else
-                keyObj = PyBytes_FromStringAndSize(key.data, key.size);
+                keyObj = Build_PyString(key.data, key.size);
 #if (PY_VERSION_HEX >= 0x02040000)
             retval = PyTuple_Pack(3, keyObj, pkeyObj, dataObj);
 #else
@@ -1624,15 +1826,14 @@
         }
         Py_DECREF(dataObj);
         Py_DECREF(pkeyObj);
-	free_dbt(&pkey);
-        free_dbt(&data);
+        FREE_DBT(pkey);
+        FREE_DBT(data);
     }
-    FREE_DBT_VIEW(key, keyobj, key_buf_view);
+    FREE_DBT(key);
 
     RETURN_IF_ERR();
     return retval;
 }
-#endif
 
 
 /* Return size of entry */
@@ -1643,7 +1844,6 @@
     PyObject* txnobj = NULL;
     PyObject* keyobj;
     PyObject* retval = NULL;
-    Py_buffer* key_buf_view = NULL;
     DBT key, data;
     DB_TXN *txn = NULL;
     static char* kwnames[] = { "key", "txn", NULL };
@@ -1652,10 +1852,10 @@
                                      &keyobj, &txnobj))
         return NULL;
     CHECK_DB_NOT_CLOSED(self);
-    if (!make_key_dbt(self, keyobj, &key, &flags, &key_buf_view))
+    if (!make_key_dbt(self, keyobj, &key, &flags))
         return NULL;
     if (!checkTxnObj(txnobj, &txn)) {
-        FREE_DBT_VIEW(key, keyobj, key_buf_view);
+        FREE_DBT(key);
         return NULL;
     }
     CLEAR_DBT(data);
@@ -1668,12 +1868,12 @@
     err = self->db->get(self->db, txn, &key, &data, flags);
     MYDB_END_ALLOW_THREADS;
     if (err == DB_BUFFER_SMALL) {
-        retval = PyLong_FromLong((long)data.size);
+        retval = NUMBER_FromLong((long)data.size);
         err = 0;
     }
 
-    FREE_DBT_VIEW(key, keyobj, key_buf_view);
-    free_dbt(&data);
+    FREE_DBT(key);
+    FREE_DBT(data);
     RETURN_IF_ERR();
     return retval;
 }
@@ -1687,25 +1887,22 @@
     PyObject* keyobj;
     PyObject* dataobj;
     PyObject* retval = NULL;
-    Py_buffer* data_buf_view = NULL;
-    Py_buffer* key_buf_view = NULL;
     DBT key, data;
     void *orig_data;
     DB_TXN *txn = NULL;
     static char* kwnames[] = { "key", "data", "txn", "flags", NULL };
 
-
     if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO|Oi:get_both", kwnames,
                                      &keyobj, &dataobj, &txnobj, &flags))
         return NULL;
 
     CHECK_DB_NOT_CLOSED(self);
-    if (!make_key_dbt(self, keyobj, &key, NULL, &key_buf_view))
+    if (!make_key_dbt(self, keyobj, &key, NULL))
         return NULL;
-    if ( !checkTxnObj(txnobj, &txn) ||
-         !make_dbt(dataobj, &data, &data_buf_view) )
+    if ( !make_dbt(dataobj, &data) ||
+         !checkTxnObj(txnobj, &txn) )
     {
-        FREE_DBT_VIEW(key, keyobj, key_buf_view);
+        FREE_DBT(key);
         return NULL;
     }
 
@@ -1713,7 +1910,7 @@
     orig_data = data.data;
 
     if (CHECK_DBFLAG(self, DB_THREAD)) {
-        /* Tell BerkeleyDB to malloc the return value (thread safe) */
+        /* Tell Berkeley DB to malloc the return value (thread safe) */
         /* XXX(nnorwitz): At least 4.4.20 and 4.5.20 require this flag. */
         data.flags = DB_DBT_MALLOC;
     }
@@ -1722,8 +1919,6 @@
     err = self->db->get(self->db, txn, &key, &data, flags);
     MYDB_END_ALLOW_THREADS;
 
-    free_buf_view(dataobj, data_buf_view);
-
     if ((err == DB_NOTFOUND || err == DB_KEYEMPTY)
 	    && self->moduleFlags.getReturnsNone) {
         err = 0;
@@ -1732,61 +1927,47 @@
     }
     else if (!err) {
         /* XXX(nnorwitz): can we do: retval = dataobj; Py_INCREF(retval); */
-        /* XXX(gps) I think not: buffer API input vs. bytes object output. */
-        /* XXX(guido) But what if the input is PyString? */
-        retval = PyBytes_FromStringAndSize((char*)data.data, data.size);
+        retval = Build_PyString(data.data, data.size);
 
         /* Even though the flags require DB_DBT_MALLOC, data is not always
            allocated.  4.4: allocated, 4.5: *not* allocated. :-( */
         if (data.data != orig_data)
-            free_dbt(&data);
+            FREE_DBT(data);
     }
 
-    FREE_DBT_VIEW(key, keyobj, key_buf_view);
+    FREE_DBT(key);
     RETURN_IF_ERR();
     return retval;
 }
 
 
 static PyObject*
-DB_get_byteswapped(DBObject* self, PyObject* args)
+DB_get_byteswapped(DBObject* self)
 {
-#if (DBVER >= 33)
     int err = 0;
-#endif
     int retval = -1;
 
-    if (!PyArg_ParseTuple(args,":get_byteswapped"))
-        return NULL;
     CHECK_DB_NOT_CLOSED(self);
 
-#if (DBVER >= 33)
     MYDB_BEGIN_ALLOW_THREADS;
     err = self->db->get_byteswapped(self->db, &retval);
     MYDB_END_ALLOW_THREADS;
     RETURN_IF_ERR();
-#else
-    MYDB_BEGIN_ALLOW_THREADS;
-    retval = self->db->get_byteswapped(self->db);
-    MYDB_END_ALLOW_THREADS;
-#endif
-    return PyLong_FromLong(retval);
+    return NUMBER_FromLong(retval);
 }
 
 
 static PyObject*
-DB_get_type(DBObject* self, PyObject* args)
+DB_get_type(DBObject* self)
 {
     int type;
 
-    if (!PyArg_ParseTuple(args,":get_type"))
-        return NULL;
     CHECK_DB_NOT_CLOSED(self);
 
     type = _DB_get_type(self);
     if (type == -1)
         return NULL;
-    return PyLong_FromLong(type);
+    return NUMBER_FromLong(type);
 }
 
 
@@ -1845,7 +2026,7 @@
        but does not hold python references to them or prevent
        them from being closed prematurely.  This can cause
        python to crash when things are done in the wrong order. */
-    return (PyObject*) newDBCursorObject(dbc, self);
+    return (PyObject*) newDBCursorObject(dbc, NULL, self);
 }
 
 
@@ -1855,7 +2036,6 @@
     int err, flags=0;
     PyObject* txnobj = NULL;
     PyObject* keyobj;
-    Py_buffer* key_buf_view = NULL;
     DBT key;
     DB_TXN *txn = NULL;
     DB_KEY_RANGE range;
@@ -1865,18 +2045,16 @@
                                      &keyobj, &txnobj, &flags))
         return NULL;
     CHECK_DB_NOT_CLOSED(self);
-    if (!checkTxnObj(txnobj, &txn))
-        return NULL;
-    if (!make_dbt(keyobj, &key, &key_buf_view))
+    if (!make_dbt(keyobj, &key))
         /* BTree only, don't need to allow for an int key */
         return NULL;
+    if (!checkTxnObj(txnobj, &txn))
+        return NULL;
 
     MYDB_BEGIN_ALLOW_THREADS;
     err = self->db->key_range(self->db, txn, &key, &range, flags);
     MYDB_END_ALLOW_THREADS;
 
-    free_buf_view(keyobj, key_buf_view);
-
     RETURN_IF_ERR();
     return Py_BuildValue("ddd", range.less, range.equal, range.greater);
 }
@@ -1940,11 +2118,24 @@
     if (NULL == self->db) {
         PyObject *t = Py_BuildValue("(is)", 0,
                                 "Cannot call open() twice for DB object");
-        PyErr_SetObject(DBError, t);
-        Py_DECREF(t);
+        if (t) {
+            PyErr_SetObject(DBError, t);
+            Py_DECREF(t);
+        }
         return NULL;
     }
 
+#if (DBVER >= 41)
+    if (txn) {  /* Can't use 'txnobj' because could be 'txnobj==Py_None' */
+        INSERT_IN_DOUBLE_LINKED_LIST_TXN(((DBTxnObject *)txnobj)->children_dbs,self);
+        self->txn=(DBTxnObject *)txnobj;
+    } else {
+        self->txn=NULL;
+    }
+#else
+    self->txn=NULL;
+#endif
+
     MYDB_BEGIN_ALLOW_THREADS;
 #if (DBVER >= 41)
     err = self->db->open(self->db, txn, filename, dbname, type, flags, mode);
@@ -1953,8 +2144,10 @@
 #endif
     MYDB_END_ALLOW_THREADS;
     if (makeDBError(err)) {
-        self->db->close(self->db, 0);
-        self->db = NULL;
+        PyObject *dummy;
+
+        dummy=DB_close_internal(self,0);
+        Py_XDECREF(dummy);
         return NULL;
     }
 
@@ -1963,6 +2156,7 @@
 #endif
 
     self->flags = flags;
+
     RETURN_NONE();
 }
 
@@ -1974,9 +2168,7 @@
     PyObject* txnobj = NULL;
     int dlen = -1;
     int doff = -1;
-    PyObject *keyobj, *dataobj, *retval;
-    Py_buffer *data_buf_view = NULL;
-    Py_buffer *key_buf_view = NULL;
+    PyObject* keyobj, *dataobj, *retval;
     DBT key, data;
     DB_TXN *txn = NULL;
     static char* kwnames[] = { "key", "data", "txn", "flags", "dlen",
@@ -1987,31 +2179,28 @@
         return NULL;
 
     CHECK_DB_NOT_CLOSED(self);
-    if (!make_key_dbt(self, keyobj, &key, NULL, &key_buf_view))
+    if (!make_key_dbt(self, keyobj, &key, NULL))
         return NULL;
-    if ( !make_dbt(dataobj, &data, &data_buf_view) ||
+    if ( !make_dbt(dataobj, &data) ||
          !add_partial_dbt(&data, dlen, doff) ||
          !checkTxnObj(txnobj, &txn) )
     {
-        FREE_DBT_VIEW(key, keyobj, key_buf_view);
-        free_buf_view(dataobj, data_buf_view);
+        FREE_DBT(key);
         return NULL;
     }
 
     if (-1 == _DB_put(self, txn, &key, &data, flags)) {
-        FREE_DBT_VIEW(key, keyobj, key_buf_view);
-        free_buf_view(dataobj, data_buf_view);
+        FREE_DBT(key);
         return NULL;
     }
 
     if (flags & DB_APPEND)
-        retval = PyLong_FromLong(*((db_recno_t*)key.data));
+        retval = NUMBER_FromLong(*((db_recno_t*)key.data));
     else {
         retval = Py_None;
         Py_INCREF(retval);
     }
-    FREE_DBT_VIEW(key, keyobj, key_buf_view);
-    free_buf_view(dataobj, data_buf_view);
+    FREE_DBT(key);
     return retval;
 }
 
@@ -2030,7 +2219,12 @@
         return NULL;
     CHECK_DB_NOT_CLOSED(self);
 
+    EXTRACT_FROM_DOUBLE_LINKED_LIST_MAYBE_NULL(self);
+
+    MYDB_BEGIN_ALLOW_THREADS;
     err = self->db->remove(self->db, filename, database, flags);
+    MYDB_END_ALLOW_THREADS;
+
     self->db = NULL;
     RETURN_IF_ERR();
     RETURN_NONE();
@@ -2060,6 +2254,25 @@
 
 
 static PyObject*
+DB_get_private(DBObject* self)
+{
+    /* We can give out the private field even if db is closed */
+    Py_INCREF(self->private_obj);
+    return self->private_obj;
+}
+
+static PyObject*
+DB_set_private(DBObject* self, PyObject* private_obj)
+{
+    /* We can set the private field even if db is closed */
+    Py_DECREF(self->private_obj);
+    Py_INCREF(private_obj);
+    self->private_obj = private_obj;
+    RETURN_NONE();
+}
+
+
+static PyObject*
 DB_set_bt_minkey(DBObject* self, PyObject* args)
 {
     int err, minkey;
@@ -2075,17 +2288,16 @@
     RETURN_NONE();
 }
 
-#if (DBVER >= 33)
-static int 
+static int
 _default_cmp(const DBT *leftKey,
 	     const DBT *rightKey)
 {
   int res;
   int lsize = leftKey->size, rsize = rightKey->size;
 
-  res = memcmp(leftKey->data, rightKey->data, 
+  res = memcmp(leftKey->data, rightKey->data,
 	       lsize < rsize ? lsize : rsize);
-  
+
   if (res == 0) {
       if (lsize < rsize) {
 	  res = -1;
@@ -2098,7 +2310,7 @@
 }
 
 static int
-_db_compareCallback(DB* db, 
+_db_compareCallback(DB* db,
 		    const DBT *leftKey,
 		    const DBT *rightKey)
 {
@@ -2120,8 +2332,7 @@
     } else {
 	MYDB_BEGIN_BLOCK_THREADS;
 
-	args = Py_BuildValue("y#y#", leftKey->data, leftKey->size,
-			     rightKey->data, rightKey->size);
+	args = BuildValue_SS(leftKey->data, leftKey->size, rightKey->data, rightKey->size);
 	if (args != NULL) {
 		/* XXX(twouters) I highly doubt this INCREF is correct */
 		Py_INCREF(self);
@@ -2131,8 +2342,8 @@
 	    /* we're in a callback within the DB code, we can't raise */
 	    PyErr_Print();
 	    res = _default_cmp(leftKey, rightKey);
-	} else if (PyLong_Check(result)) {
-	    res = PyLong_AsLong(result);
+	} else if (NUMBER_Check(result)) {
+	    res = NUMBER_AsLong(result);
 	} else {
 	    PyErr_SetString(PyExc_TypeError,
 			    "DB_bt_compare callback MUST return an int.");
@@ -2140,7 +2351,7 @@
 	    PyErr_Print();
 	    res = _default_cmp(leftKey, rightKey);
 	}
-    
+
 	Py_XDECREF(args);
 	Py_XDECREF(result);
 
@@ -2150,15 +2361,11 @@
 }
 
 static PyObject*
-DB_set_bt_compare(DBObject* self, PyObject* args)
+DB_set_bt_compare(DBObject* self, PyObject* comparator)
 {
     int err;
-    PyObject *comparator;
     PyObject *tuple, *result;
 
-    if (!PyArg_ParseTuple(args, "O:set_bt_compare", &comparator))
-	return NULL;
-
     CHECK_DB_NOT_CLOSED(self);
 
     if (!PyCallable_Check(comparator)) {
@@ -2166,7 +2373,7 @@
 	return NULL;
     }
 
-    /* 
+    /*
      * Perform a test call of the comparator function with two empty
      * string objects here.  verify that it returns an int (0).
      * err if not.
@@ -2176,11 +2383,11 @@
     Py_DECREF(tuple);
     if (result == NULL)
         return NULL;
-    if (!PyLong_Check(result)) {
+    if (!NUMBER_Check(result)) {
 	PyErr_SetString(PyExc_TypeError,
 		        "callback MUST return an int");
 	return NULL;
-    } else if (PyLong_AsLong(result) != 0) {
+    } else if (NUMBER_AsLong(result) != 0) {
 	PyErr_SetString(PyExc_TypeError,
 		        "callback failed to return 0 on two empty strings");
 	return NULL;
@@ -2215,7 +2422,6 @@
     RETURN_IF_ERR();
     RETURN_NONE();
 }
-#endif /* DBVER >= 33 */
 
 
 static PyObject*
@@ -2447,10 +2653,8 @@
     MYDB_BEGIN_ALLOW_THREADS;
 #if (DBVER >= 43)
     err = self->db->stat(self->db, txn, &sp, flags);
-#elif (DBVER >= 33)
-    err = self->db->stat(self->db, &sp, flags);
 #else
-    err = self->db->stat(self->db, &sp, NULL, flags);
+    err = self->db->stat(self->db, &sp, flags);
 #endif
     MYDB_END_ALLOW_THREADS;
     RETURN_IF_ERR();
@@ -2474,6 +2678,9 @@
         MAKE_HASH_ENTRY(version);
         MAKE_HASH_ENTRY(nkeys);
         MAKE_HASH_ENTRY(ndata);
+#if (DBVER >= 46)
+        MAKE_HASH_ENTRY(pagecnt);
+#endif
         MAKE_HASH_ENTRY(pagesize);
 #if (DBVER < 41)
         MAKE_HASH_ENTRY(nelem);
@@ -2496,6 +2703,9 @@
         MAKE_BT_ENTRY(version);
         MAKE_BT_ENTRY(nkeys);
         MAKE_BT_ENTRY(ndata);
+#if (DBVER >= 46)
+        MAKE_BT_ENTRY(pagecnt);
+#endif
         MAKE_BT_ENTRY(pagesize);
         MAKE_BT_ENTRY(minkey);
         MAKE_BT_ENTRY(re_len);
@@ -2505,6 +2715,9 @@
         MAKE_BT_ENTRY(leaf_pg);
         MAKE_BT_ENTRY(dup_pg);
         MAKE_BT_ENTRY(over_pg);
+#if (DBVER >= 43)
+        MAKE_BT_ENTRY(empty_pg);
+#endif
         MAKE_BT_ENTRY(free);
         MAKE_BT_ENTRY(int_pgfree);
         MAKE_BT_ENTRY(leaf_pgfree);
@@ -2518,6 +2731,9 @@
         MAKE_QUEUE_ENTRY(nkeys);
         MAKE_QUEUE_ENTRY(ndata);
         MAKE_QUEUE_ENTRY(pagesize);
+#if (DBVER >= 41)
+        MAKE_QUEUE_ENTRY(extentsize);
+#endif
         MAKE_QUEUE_ENTRY(pages);
         MAKE_QUEUE_ENTRY(re_len);
         MAKE_QUEUE_ENTRY(re_pad);
@@ -2561,7 +2777,6 @@
 }
 
 
-#if (DBVER >= 33)
 static PyObject*
 DB_truncate(DBObject* self, PyObject* args, PyObject* kwargs)
 {
@@ -2582,9 +2797,8 @@
     err = self->db->truncate(self->db, txn, &count, flags);
     MYDB_END_ALLOW_THREADS;
     RETURN_IF_ERR();
-    return PyLong_FromLong(count);
+    return NUMBER_FromLong(count);
 }
-#endif
 
 
 static PyObject*
@@ -2632,15 +2846,14 @@
     if (outFile)
         fclose(outFile);
 
-    /* DB.verify acts as a DB handle destructor (like close); this was
-     * documented in BerkeleyDB 4.2 but had the undocumented effect
-     * of not being safe in prior versions while still requiring an explicit
-     * DB.close call afterwards.  Lets call close for the user to emulate
-     * the safe 4.2 behaviour. */
-#if (DBVER <= 41)
-    self->db->close(self->db, 0);
-#endif
-    self->db = NULL;
+    {  /* DB.verify acts as a DB handle destructor (like close) */
+        PyObject *error;
+
+        error=DB_close_internal(self,0);
+        if (error ) {
+          return error;
+        }
+     }
 
     RETURN_IF_ERR();
     RETURN_NONE();
@@ -2663,7 +2876,7 @@
         ++oldValue;
     self->moduleFlags.getReturnsNone = (flags >= 1);
     self->moduleFlags.cursorSetReturnsNone = (flags >= 2);
-    return PyLong_FromLong(oldValue);
+    return NUMBER_FromLong(oldValue);
 }
 
 #if (DBVER >= 41)
@@ -2703,8 +2916,10 @@
 
     if (self->db == NULL) {
         PyObject *t = Py_BuildValue("(is)", 0, "DB object has been closed");
-        PyErr_SetObject(DBError, t);
-        Py_DECREF(t);
+        if (t) {
+            PyErr_SetObject(DBError, t);
+            Py_DECREF(t);
+        }
         return -1;
     }
 
@@ -2717,17 +2932,15 @@
 redo_stat_for_length:
 #if (DBVER >= 43)
     err = self->db->stat(self->db, /*txnid*/ NULL, &sp, flags);
-#elif (DBVER >= 33)
-    err = self->db->stat(self->db, &sp, flags);
 #else
-    err = self->db->stat(self->db, &sp, NULL, flags);
+    err = self->db->stat(self->db, &sp, flags);
 #endif
 
     /* All the stat structures have matching fields upto the ndata field,
        so we can use any of them for the type cast */
     size = ((DB_BTREE_STAT*)sp)->bt_ndata;
 
-    /* A size of 0 could mean that BerkeleyDB no longer had the stat values cached.
+    /* A size of 0 could mean that Berkeley DB no longer had the stat values cached.
      * redo a full stat to make sure.
      *   Fixes SF python bug 1493322, pybsddb bug 1184012
      */
@@ -2754,17 +2967,16 @@
 {
     int err;
     PyObject* retval;
-    Py_buffer* key_buf_view = NULL;
     DBT key;
     DBT data;
 
     CHECK_DB_NOT_CLOSED(self);
-    if (!make_key_dbt(self, keyobj, &key, NULL, &key_buf_view))
+    if (!make_key_dbt(self, keyobj, &key, NULL))
         return NULL;
 
     CLEAR_DBT(data);
     if (CHECK_DBFLAG(self, DB_THREAD)) {
-        /* Tell BerkeleyDB to malloc the return value (thread safe) */
+        /* Tell Berkeley DB to malloc the return value (thread safe) */
         data.flags = DB_DBT_MALLOC;
     }
     MYDB_BEGIN_ALLOW_THREADS;
@@ -2778,11 +2990,11 @@
         retval = NULL;
     }
     else {
-        retval = PyBytes_FromStringAndSize((char*)data.data, data.size);
-        free_dbt(&data);
+        retval = Build_PyString(data.data, data.size);
+        FREE_DBT(data);
     }
 
-    FREE_DBT_VIEW(key, keyobj, key_buf_view);
+    FREE_DBT(key);
     return retval;
 }
 
@@ -2793,21 +3005,21 @@
     DBT key, data;
     int retval;
     int flags = 0;
-    Py_buffer *data_buf_view = NULL;
-    Py_buffer *key_buf_view = NULL;
 
     if (self->db == NULL) {
         PyObject *t = Py_BuildValue("(is)", 0, "DB object has been closed");
-        PyErr_SetObject(DBError, t);
-        Py_DECREF(t);
+        if (t) {
+            PyErr_SetObject(DBError, t);
+            Py_DECREF(t);
+        }
         return -1;
     }
 
-    if (!make_key_dbt(self, keyobj, &key, NULL, &key_buf_view))
+    if (!make_key_dbt(self, keyobj, &key, NULL))
         return -1;
 
     if (dataobj != NULL) {
-        if (!make_dbt(dataobj, &data, &data_buf_view))
+        if (!make_dbt(dataobj, &data))
             retval =  -1;
         else {
             if (self->setflags & (DB_DUP|DB_DUPSORT))
@@ -2828,29 +3040,30 @@
         /* dataobj == NULL, so delete the key */
         retval = _DB_delete(self, NULL, &key, 0);
     }
-    FREE_DBT_VIEW(key, keyobj, key_buf_view);
-    free_buf_view(dataobj, data_buf_view);
+    FREE_DBT(key);
     return retval;
 }
 
 
 static PyObject*
-DB_has_key(DBObject* self, PyObject* args)
+DB_has_key(DBObject* self, PyObject* args, PyObject* kwargs)
 {
     int err;
     PyObject* keyobj;
-    Py_buffer* key_buf_view = NULL;
     DBT key, data;
     PyObject* txnobj = NULL;
     DB_TXN *txn = NULL;
+    static char* kwnames[] = {"key","txn", NULL};
 
-    if (!PyArg_ParseTuple(args,"O|O:has_key", &keyobj, &txnobj))
+    if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|O:has_key", kwnames,
+                &keyobj, &txnobj))
         return NULL;
+
     CHECK_DB_NOT_CLOSED(self);
-    if (!make_key_dbt(self, keyobj, &key, NULL, &key_buf_view))
+    if (!make_key_dbt(self, keyobj, &key, NULL))
         return NULL;
     if (!checkTxnObj(txnobj, &txn)) {
-        FREE_DBT_VIEW(key, keyobj, key_buf_view);
+        FREE_DBT(key);
         return NULL;
     }
 
@@ -2864,12 +3077,12 @@
     MYDB_BEGIN_ALLOW_THREADS;
     err = self->db->get(self->db, txn, &key, &data, 0);
     MYDB_END_ALLOW_THREADS;
-    FREE_DBT_VIEW(key, keyobj, key_buf_view);
+    FREE_DBT(key);
 
     if (err == DB_BUFFER_SMALL || err == 0) {
-        return PyLong_FromLong(1);
+        return NUMBER_FromLong(1);
     } else if (err == DB_NOTFOUND || err == DB_KEYEMPTY) {
-        return PyLong_FromLong(0);
+        return NUMBER_FromLong(0);
     }
 
     makeDBError(err);
@@ -2912,14 +3125,9 @@
         return NULL;
     }
 
-    if (CHECK_DBFLAG(self, DB_THREAD)) {
-        key.flags = DB_DBT_REALLOC;
-        data.flags = DB_DBT_REALLOC;
-    }
-
     while (1) { /* use the cursor to traverse the DB, collecting items */
         MYDB_BEGIN_ALLOW_THREADS;
-        err = cursor->c_get(cursor, &key, &data, DB_NEXT);
+        err = _DBC_get(cursor, &key, &data, DB_NEXT);
         MYDB_END_ALLOW_THREADS;
 
         if (err) {
@@ -2933,17 +3141,17 @@
             case DB_BTREE:
             case DB_HASH:
             default:
-                item = PyBytes_FromStringAndSize((char*)key.data, key.size);
+                item = Build_PyString(key.data, key.size);
                 break;
             case DB_RECNO:
             case DB_QUEUE:
-                item = PyLong_FromLong(*((db_recno_t*)key.data));
+                item = NUMBER_FromLong(*((db_recno_t*)key.data));
                 break;
             }
             break;
 
         case _VALUES_LIST:
-            item = PyBytes_FromStringAndSize((char*)data.data, data.size);
+            item = Build_PyString(data.data, data.size);
             break;
 
         case _ITEMS_LIST:
@@ -2951,13 +3159,11 @@
             case DB_BTREE:
             case DB_HASH:
             default:
-                item = Py_BuildValue("y#y#", key.data, key.size, data.data,
-                                     data.size);
+                item = BuildValue_SS(key.data, key.size, data.data, data.size);
                 break;
             case DB_RECNO:
             case DB_QUEUE:
-                item = Py_BuildValue("iy#", *((db_recno_t*)key.data),
-                                     data.data, data.size);
+                item = BuildValue_IS(*((db_recno_t*)key.data), data.data, data.size);
                 break;
             }
             break;
@@ -2971,7 +3177,12 @@
             list = NULL;
             goto done;
         }
-        PyList_Append(list, item);
+        if (PyList_Append(list, item)) {
+            Py_DECREF(list);
+            Py_DECREF(item);
+            list = NULL;
+            goto done;
+        }
         Py_DECREF(item);
     }
 
@@ -2982,10 +3193,8 @@
     }
 
  done:
-    free_dbt(&key);
-    free_dbt(&data);
     MYDB_BEGIN_ALLOW_THREADS;
-    cursor->c_close(cursor);
+    _DBC_close(cursor);
     MYDB_END_ALLOW_THREADS;
     return list;
 }
@@ -3037,23 +3246,32 @@
 
 
 static PyObject*
-DBC_close(DBCursorObject* self, PyObject* args)
+DBC_close_internal(DBCursorObject* self)
 {
     int err = 0;
 
-    if (!PyArg_ParseTuple(args, ":close"))
-        return NULL;
-
     if (self->dbc != NULL) {
+        EXTRACT_FROM_DOUBLE_LINKED_LIST(self);
+        if (self->txn) {
+            EXTRACT_FROM_DOUBLE_LINKED_LIST_TXN(self);
+            self->txn=NULL;
+        }
+
         MYDB_BEGIN_ALLOW_THREADS;
-        err = self->dbc->c_close(self->dbc);
-        self->dbc = NULL;
+        err = _DBC_close(self->dbc);
         MYDB_END_ALLOW_THREADS;
+        self->dbc = NULL;
     }
     RETURN_IF_ERR();
     RETURN_NONE();
 }
 
+static PyObject*
+DBC_close(DBCursorObject* self)
+{
+    return DBC_close_internal(self);
+}
+
 
 static PyObject*
 DBC_count(DBCursorObject* self, PyObject* args)
@@ -3068,11 +3286,11 @@
     CHECK_CURSOR_NOT_CLOSED(self);
 
     MYDB_BEGIN_ALLOW_THREADS;
-    err = self->dbc->c_count(self->dbc, &count, flags);
+    err = _DBC_count(self->dbc, &count, flags);
     MYDB_END_ALLOW_THREADS;
     RETURN_IF_ERR();
 
-    return PyLong_FromLong(count);
+    return NUMBER_FromLong(count);
 }
 
 
@@ -3094,7 +3312,7 @@
     CHECK_CURSOR_NOT_CLOSED(self);
 
     MYDB_BEGIN_ALLOW_THREADS;
-    err = self->dbc->c_del(self->dbc, flags);
+    err = _DBC_del(self->dbc, flags);
     MYDB_END_ALLOW_THREADS;
     RETURN_IF_ERR();
 
@@ -3115,11 +3333,11 @@
     CHECK_CURSOR_NOT_CLOSED(self);
 
     MYDB_BEGIN_ALLOW_THREADS;
-    err = self->dbc->c_dup(self->dbc, &dbc, flags);
+    err = _DBC_dup(self->dbc, &dbc, flags);
     MYDB_END_ALLOW_THREADS;
     RETURN_IF_ERR();
 
-    return (PyObject*) newDBCursorObject(dbc, self->mydb);
+    return (PyObject*) newDBCursorObject(dbc, self->txn, self->mydb);
 }
 
 static PyObject*
@@ -3136,8 +3354,6 @@
     PyObject* keyobj = NULL;
     PyObject* dataobj = NULL;
     PyObject* retval = NULL;
-    Py_buffer* data_buf_view = NULL;
-    Py_buffer* key_buf_view = NULL;
     int dlen = -1;
     int doff = -1;
     DBT key, data;
@@ -3151,7 +3367,7 @@
     {
         PyErr_Clear();
         if (!PyArg_ParseTupleAndKeywords(args, kwargs, "Oi|ii:get",
-                                         &kwnames[1], 
+                                         &kwnames[1],
 					 &keyobj, &flags, &dlen, &doff))
         {
             PyErr_Clear();
@@ -3166,25 +3382,17 @@
 
     CHECK_CURSOR_NOT_CLOSED(self);
 
-    if (keyobj && !make_key_dbt(self->mydb, keyobj, &key, NULL, &key_buf_view))
+    if (keyobj && !make_key_dbt(self->mydb, keyobj, &key, NULL))
         return NULL;
-    if ( (dataobj && !make_dbt(dataobj, &data, &data_buf_view)) ||
+    if ( (dataobj && !make_dbt(dataobj, &data)) ||
          (!add_partial_dbt(&data, dlen, doff)) )
     {
-        FREE_DBT_VIEW(key, keyobj, key_buf_view);
-        free_buf_view(dataobj, data_buf_view);
+        FREE_DBT(key); /* 'make_key_dbt' could do a 'malloc' */
         return NULL;
     }
 
-    if (CHECK_DBFLAG(self->mydb, DB_THREAD)) {
-        data.flags = DB_DBT_MALLOC;
-        if (!(key.flags & DB_DBT_REALLOC)) {
-            key.flags |= DB_DBT_MALLOC;
-        }
-    }
-
     MYDB_BEGIN_ALLOW_THREADS;
-    err = self->dbc->c_get(self->dbc, &key, &data, flags);
+    err = _DBC_get(self->dbc, &key, &data, flags);
     MYDB_END_ALLOW_THREADS;
 
     if ((err == DB_NOTFOUND || err == DB_KEYEMPTY)
@@ -3203,23 +3411,18 @@
         case DB_BTREE:
         case DB_HASH:
         default:
-            retval = Py_BuildValue("y#y#", key.data, key.size,
-                                   data.data, data.size);
+            retval = BuildValue_SS(key.data, key.size, data.data, data.size);
             break;
         case DB_RECNO:
         case DB_QUEUE:
-            retval = Py_BuildValue("iy#", *((db_recno_t*)key.data),
-                                   data.data, data.size);
+            retval = BuildValue_IS(*((db_recno_t*)key.data), data.data, data.size);
             break;
         }
-        free_dbt(&data);
     }
-    FREE_DBT_VIEW(key, keyobj, key_buf_view);
-    free_buf_view(dataobj, data_buf_view);
+    FREE_DBT(key);  /* 'make_key_dbt' could do a 'malloc' */
     return retval;
 }
 
-#if (DBVER >= 33)
 static PyObject*
 DBC_pget(DBCursorObject* self, PyObject* args, PyObject *kwargs)
 {
@@ -3227,8 +3430,6 @@
     PyObject* keyobj = NULL;
     PyObject* dataobj = NULL;
     PyObject* retval = NULL;
-    Py_buffer* data_buf_view = NULL;
-    Py_buffer* key_buf_view = NULL;
     int dlen = -1;
     int doff = -1;
     DBT key, pkey, data;
@@ -3257,27 +3458,19 @@
 
     CHECK_CURSOR_NOT_CLOSED(self);
 
-    if (keyobj && !make_key_dbt(self->mydb, keyobj, &key, NULL, &key_buf_view))
+    if (keyobj && !make_key_dbt(self->mydb, keyobj, &key, NULL))
         return NULL;
-    if ( (dataobj && !make_dbt(dataobj, &data, &data_buf_view)) ||
+    if ( (dataobj && !make_dbt(dataobj, &data)) ||
          (!add_partial_dbt(&data, dlen, doff)) ) {
-        FREE_DBT_VIEW(key, keyobj, key_buf_view);
-        free_buf_view(dataobj, data_buf_view);
+        FREE_DBT(key);  /* 'make_key_dbt' could do a 'malloc' */
         return NULL;
     }
 
-    if (CHECK_DBFLAG(self->mydb, DB_THREAD)) {
-        data.flags = DB_DBT_MALLOC;
-        if (!(key.flags & DB_DBT_REALLOC)) {
-            key.flags |= DB_DBT_MALLOC;
-        }
-    }
-
     CLEAR_DBT(pkey);
     pkey.flags = DB_DBT_MALLOC;
 
     MYDB_BEGIN_ALLOW_THREADS;
-    err = self->dbc->c_pget(self->dbc, &key, &pkey, &data, flags);
+    err = _DBC_pget(self->dbc, &key, &pkey, &data, flags);
     MYDB_END_ALLOW_THREADS;
 
     if ((err == DB_NOTFOUND || err == DB_KEYEMPTY)
@@ -3291,76 +3484,71 @@
     else {
         PyObject *pkeyObj;
         PyObject *dataObj;
-        dataObj = PyBytes_FromStringAndSize(data.data, data.size);
+        dataObj = Build_PyString(data.data, data.size);
 
         if (self->mydb->primaryDBType == DB_RECNO ||
             self->mydb->primaryDBType == DB_QUEUE)
-            pkeyObj = PyLong_FromLong(*(int *)pkey.data);
+            pkeyObj = NUMBER_FromLong(*(int *)pkey.data);
         else
-            pkeyObj = PyBytes_FromStringAndSize(pkey.data, pkey.size);
+            pkeyObj = Build_PyString(pkey.data, pkey.size);
 
         if (key.data && key.size) /* return key, pkey and data */
         {
             PyObject *keyObj;
             int type = _DB_get_type(self->mydb);
             if (type == DB_RECNO || type == DB_QUEUE)
-                keyObj = PyLong_FromLong(*(int *)key.data);
+                keyObj = NUMBER_FromLong(*(int *)key.data);
             else
-                keyObj = PyBytes_FromStringAndSize(key.data, key.size);
+                keyObj = Build_PyString(key.data, key.size);
+#if (PY_VERSION_HEX >= 0x02040000)
             retval = PyTuple_Pack(3, keyObj, pkeyObj, dataObj);
+#else
+            retval = Py_BuildValue("OOO", keyObj, pkeyObj, dataObj);
+#endif
             Py_DECREF(keyObj);
+            FREE_DBT(key);  /* 'make_key_dbt' could do a 'malloc' */
         }
         else /* return just the pkey and data */
         {
+#if (PY_VERSION_HEX >= 0x02040000)
             retval = PyTuple_Pack(2, pkeyObj, dataObj);
+#else
+            retval = Py_BuildValue("OO", pkeyObj, dataObj);
+#endif
         }
         Py_DECREF(dataObj);
         Py_DECREF(pkeyObj);
-        free_dbt(&pkey);
-        free_dbt(&data);
+        FREE_DBT(pkey);
     }
     /* the only time REALLOC should be set is if we used an integer
      * key that make_key_dbt malloc'd for us.  always free these. */
-    if (key.flags & DB_DBT_REALLOC) {
-        free_dbt(&key);
+    if (key.flags & DB_DBT_REALLOC) {  /* 'make_key_dbt' could do a 'malloc' */
+        FREE_DBT(key);
     }
-    free_buf_view(keyobj, key_buf_view);
-    free_buf_view(dataobj, data_buf_view);
     return retval;
 }
-#endif
 
 
 static PyObject*
-DBC_get_recno(DBCursorObject* self, PyObject* args)
+DBC_get_recno(DBCursorObject* self)
 {
     int err;
     db_recno_t recno;
     DBT key;
     DBT data;
 
-    if (!PyArg_ParseTuple(args, ":get_recno"))
-        return NULL;
-
     CHECK_CURSOR_NOT_CLOSED(self);
 
     CLEAR_DBT(key);
     CLEAR_DBT(data);
-    if (CHECK_DBFLAG(self->mydb, DB_THREAD)) {
-        /* Tell BerkeleyDB to malloc the return value (thread safe) */
-        data.flags = DB_DBT_MALLOC;
-        key.flags = DB_DBT_MALLOC;
-    }
 
     MYDB_BEGIN_ALLOW_THREADS;
-    err = self->dbc->c_get(self->dbc, &key, &data, DB_GET_RECNO);
+    err = _DBC_get(self->dbc, &key, &data, DB_GET_RECNO);
     MYDB_END_ALLOW_THREADS;
     RETURN_IF_ERR();
 
     recno = *((db_recno_t*)data.data);
-    free_dbt(&key);
-    free_dbt(&data);
-    return PyLong_FromLong(recno);
+    return NUMBER_FromLong(recno);
 }
 
 
@@ -3389,9 +3577,7 @@
 DBC_put(DBCursorObject* self, PyObject* args, PyObject* kwargs)
 {
     int err, flags = 0;
-    PyObject *keyobj, *dataobj;
-    Py_buffer *data_buf_view = NULL;
-    Py_buffer *key_buf_view = NULL;
+    PyObject* keyobj, *dataobj;
     DBT key, data;
     static char* kwnames[] = { "key", "data", "flags", "dlen", "doff",
                                      NULL };
@@ -3404,21 +3590,19 @@
 
     CHECK_CURSOR_NOT_CLOSED(self);
 
-    if (!make_key_dbt(self->mydb, keyobj, &key, NULL, &key_buf_view))
+    if (!make_key_dbt(self->mydb, keyobj, &key, NULL))
         return NULL;
-    if (!make_dbt(dataobj, &data, &data_buf_view) ||
+    if (!make_dbt(dataobj, &data) ||
         !add_partial_dbt(&data, dlen, doff) )
     {
-        FREE_DBT_VIEW(key, keyobj, key_buf_view);
-        free_buf_view(dataobj, data_buf_view);
+        FREE_DBT(key);  /* 'make_key_dbt' could do a 'malloc' */
         return NULL;
     }
 
     MYDB_BEGIN_ALLOW_THREADS;
-    err = self->dbc->c_put(self->dbc, &key, &data, flags);
+    err = _DBC_put(self->dbc, &key, &data, flags);
     MYDB_END_ALLOW_THREADS;
-    FREE_DBT_VIEW(key, keyobj, key_buf_view);
-    free_buf_view(dataobj, data_buf_view);
+    FREE_DBT(key);  /* 'make_key_dbt' could do a 'malloc' */
     RETURN_IF_ERR();
     self->mydb->haveStat = 0;
     RETURN_NONE();
@@ -3430,8 +3614,7 @@
 {
     int err, flags = 0;
     DBT key, data;
-    PyObject *retval, *keyobj;
-    Py_buffer *key_buf_view = NULL;
+    PyObject* retval, *keyobj;
     static char* kwnames[] = { "key", "flags", "dlen", "doff", NULL };
     int dlen = -1;
     int doff = -1;
@@ -3442,21 +3625,17 @@
 
     CHECK_CURSOR_NOT_CLOSED(self);
 
-    if (!make_key_dbt(self->mydb, keyobj, &key, NULL, &key_buf_view))
+    if (!make_key_dbt(self->mydb, keyobj, &key, NULL))
         return NULL;
 
     CLEAR_DBT(data);
-    if (CHECK_DBFLAG(self->mydb, DB_THREAD)) {
-        /* Tell BerkeleyDB to malloc the return value (thread safe) */
-        data.flags = DB_DBT_MALLOC;
-    }
     if (!add_partial_dbt(&data, dlen, doff)) {
-        FREE_DBT_VIEW(key, keyobj, key_buf_view);
+        FREE_DBT(key);  /* 'make_key_dbt' could do a 'malloc' */
         return NULL;
     }
 
     MYDB_BEGIN_ALLOW_THREADS;
-    err = self->dbc->c_get(self->dbc, &key, &data, flags|DB_SET);
+    err = _DBC_get(self->dbc, &key, &data, flags|DB_SET);
     MYDB_END_ALLOW_THREADS;
     if ((err == DB_NOTFOUND || err == DB_KEYEMPTY)
 	    && self->mydb->moduleFlags.cursorSetReturnsNone) {
@@ -3474,24 +3653,20 @@
         case DB_BTREE:
         case DB_HASH:
         default:
-            retval = Py_BuildValue("y#y#", key.data, key.size,
-                                   data.data, data.size);
+            retval = BuildValue_SS(key.data, key.size, data.data, data.size);
             break;
         case DB_RECNO:
         case DB_QUEUE:
-            retval = Py_BuildValue("iy#", *((db_recno_t*)key.data),
-                                   data.data, data.size);
+            retval = BuildValue_IS(*((db_recno_t*)key.data), data.data, data.size);
             break;
         }
-        free_dbt(&data);
-        free_dbt(&key);
+        FREE_DBT(key);  /* 'make_key_dbt' could do a 'malloc' */
     }
     /* the only time REALLOC should be set is if we used an integer
      * key that make_key_dbt malloc'd for us.  always free these. */
     if (key.flags & DB_DBT_REALLOC) {
-        free_dbt(&key);
+        FREE_DBT(key);  /* 'make_key_dbt' could do a 'malloc' */
     }
-    free_buf_view(keyobj, key_buf_view);
 
     return retval;
 }
@@ -3502,8 +3677,7 @@
 {
     int err, flags = 0;
     DBT key, data;
-    PyObject *retval, *keyobj;
-    Py_buffer *key_buf_view = NULL;
+    PyObject* retval, *keyobj;
     static char* kwnames[] = { "key", "flags", "dlen", "doff", NULL };
     int dlen = -1;
     int doff = -1;
@@ -3514,24 +3688,16 @@
 
     CHECK_CURSOR_NOT_CLOSED(self);
 
-    if (!make_key_dbt(self->mydb, keyobj, &key, NULL, &key_buf_view))
+    if (!make_key_dbt(self->mydb, keyobj, &key, NULL))
         return NULL;
 
     CLEAR_DBT(data);
     if (!add_partial_dbt(&data, dlen, doff)) {
-        FREE_DBT_VIEW(key, keyobj, key_buf_view);
+        FREE_DBT(key);  /* 'make_key_dbt' could do a 'malloc' */
         return NULL;
     }
-    if (CHECK_DBFLAG(self->mydb, DB_THREAD)) {
-        /* Tell BerkeleyDB to malloc the return value (thread safe) */
-        data.flags |= DB_DBT_MALLOC;
-        /* only BTREE databases will return anything in the key */
-        if (!(key.flags & DB_DBT_REALLOC) && _DB_get_type(self->mydb) == DB_BTREE) {
-            key.flags |= DB_DBT_MALLOC;
-        }
-    }
     MYDB_BEGIN_ALLOW_THREADS;
-    err = self->dbc->c_get(self->dbc, &key, &data, flags|DB_SET_RANGE);
+    err = _DBC_get(self->dbc, &key, &data, flags|DB_SET_RANGE);
     MYDB_END_ALLOW_THREADS;
     if ((err == DB_NOTFOUND || err == DB_KEYEMPTY)
 	    && self->mydb->moduleFlags.cursorSetReturnsNone) {
@@ -3549,24 +3715,20 @@
         case DB_BTREE:
         case DB_HASH:
         default:
-            retval = Py_BuildValue("y#y#", key.data, key.size,
-                                   data.data, data.size);
+            retval = BuildValue_SS(key.data, key.size, data.data, data.size);
             break;
         case DB_RECNO:
         case DB_QUEUE:
-            retval = Py_BuildValue("iy#", *((db_recno_t*)key.data),
-                                   data.data, data.size);
+            retval = BuildValue_IS(*((db_recno_t*)key.data), data.data, data.size);
             break;
         }
-        free_dbt(&key);
-        free_dbt(&data);
+        FREE_DBT(key);  /* 'make_key_dbt' could do a 'malloc' */
     }
     /* the only time REALLOC should be set is if we used an integer
      * key that make_key_dbt malloc'd for us.  always free these. */
     if (key.flags & DB_DBT_REALLOC) {
-        free_dbt(&key);
+        FREE_DBT(key);  /* 'make_key_dbt' could do a 'malloc' */
     }
-    free_buf_view(keyobj, key_buf_view);
 
     return retval;
 }
@@ -3577,20 +3739,18 @@
 {
     int err;
     DBT key, data;
-    PyObject *retval;
-    Py_buffer *data_buf_view = NULL;
-    Py_buffer *key_buf_view = NULL;
+    PyObject* retval;
 
     /* the caller did this:  CHECK_CURSOR_NOT_CLOSED(self); */
-    if (!make_key_dbt(self->mydb, keyobj, &key, NULL, &key_buf_view))
+    if (!make_key_dbt(self->mydb, keyobj, &key, NULL))
         return NULL;
-    if (!make_dbt(dataobj, &data, &data_buf_view)) {
-        FREE_DBT_VIEW(key, keyobj, key_buf_view);
+    if (!make_dbt(dataobj, &data)) {
+        FREE_DBT(key);  /* 'make_key_dbt' could do a 'malloc' */
         return NULL;
     }
 
     MYDB_BEGIN_ALLOW_THREADS;
-    err = self->dbc->c_get(self->dbc, &key, &data, flags|DB_GET_BOTH);
+    err = _DBC_get(self->dbc, &key, &data, flags|DB_GET_BOTH);
     MYDB_END_ALLOW_THREADS;
     if ((err == DB_NOTFOUND || err == DB_KEYEMPTY) && returnsNone) {
         Py_INCREF(Py_None);
@@ -3607,19 +3767,16 @@
         case DB_BTREE:
         case DB_HASH:
         default:
-            retval = Py_BuildValue("y#y#", key.data, key.size,
-                                   data.data, data.size);
+            retval = BuildValue_SS(key.data, key.size, data.data, data.size);
             break;
         case DB_RECNO:
         case DB_QUEUE:
-            retval = Py_BuildValue("iy#", *((db_recno_t*)key.data),
-                                   data.data, data.size);
+            retval = BuildValue_IS(*((db_recno_t*)key.data), data.data, data.size);
             break;
         }
     }
 
-    FREE_DBT_VIEW(key, keyobj, key_buf_view);
-    free_buf_view(dataobj, data_buf_view);
+    FREE_DBT(key);  /* 'make_key_dbt' could do a 'malloc' */
     return retval;
 }
 
@@ -3641,14 +3798,12 @@
 
 /* Return size of entry */
 static PyObject*
-DBC_get_current_size(DBCursorObject* self, PyObject* args)
+DBC_get_current_size(DBCursorObject* self)
 {
     int err, flags=DB_CURRENT;
     PyObject* retval = NULL;
     DBT key, data;
 
-    if (!PyArg_ParseTuple(args, ":get_current_size"))
-        return NULL;
     CHECK_CURSOR_NOT_CLOSED(self);
     CLEAR_DBT(key);
     CLEAR_DBT(data);
@@ -3658,16 +3813,14 @@
     data.flags = DB_DBT_USERMEM;
     data.ulen = 0;
     MYDB_BEGIN_ALLOW_THREADS;
-    err = self->dbc->c_get(self->dbc, &key, &data, flags);
+    err = _DBC_get(self->dbc, &key, &data, flags);
     MYDB_END_ALLOW_THREADS;
     if (err == DB_BUFFER_SMALL || !err) {
         /* DB_BUFFER_SMALL means positive size, !err means zero length value */
-        retval = PyLong_FromLong((long)data.size);
+        retval = NUMBER_FromLong((long)data.size);
         err = 0;
     }
 
-    free_dbt(&key);
-    free_dbt(&data);
     RETURN_IF_ERR();
     return retval;
 }
@@ -3721,17 +3874,13 @@
     key.flags = DB_DBT_REALLOC;
 
     CLEAR_DBT(data);
-    if (CHECK_DBFLAG(self->mydb, DB_THREAD)) {
-        /* Tell BerkeleyDB to malloc the return value (thread safe) */
-        data.flags = DB_DBT_MALLOC;
-    }
     if (!add_partial_dbt(&data, dlen, doff)) {
-        free_dbt(&key);
+        FREE_DBT(key);
         return NULL;
     }
 
     MYDB_BEGIN_ALLOW_THREADS;
-    err = self->dbc->c_get(self->dbc, &key, &data, flags|DB_SET_RECNO);
+    err = _DBC_get(self->dbc, &key, &data, flags|DB_SET_RECNO);
     MYDB_END_ALLOW_THREADS;
     if ((err == DB_NOTFOUND || err == DB_KEYEMPTY)
 	    && self->mydb->moduleFlags.cursorSetReturnsNone) {
@@ -3742,11 +3891,9 @@
         retval = NULL;
     }
     else {  /* Can only be used for BTrees, so no need to return int key */
-        retval = Py_BuildValue("y#y#", key.data, key.size,
-                               data.data, data.size);
-        free_dbt(&data);
+        retval = BuildValue_SS(key.data, key.size, data.data, data.size);
     }
-    free_dbt(&key);
+    FREE_DBT(key);
 
     return retval;
 }
@@ -3794,13 +3941,9 @@
 
     CLEAR_DBT(key);
     CLEAR_DBT(data);
-    if (CHECK_DBFLAG(self->mydb, DB_THREAD)) {
-        /* Tell BerkeleyDB to malloc the return value (thread safe) */
-        key.flags = DB_DBT_MALLOC;
-    }
 
     MYDB_BEGIN_ALLOW_THREADS;
-    err = self->dbc->c_get(self->dbc, &key, &data, flags | DB_JOIN_ITEM);
+    err = _DBC_get(self->dbc, &key, &data, flags | DB_JOIN_ITEM);
     MYDB_END_ALLOW_THREADS;
     if ((err == DB_NOTFOUND || err == DB_KEYEMPTY)
 	    && self->mydb->moduleFlags.getReturnsNone) {
@@ -3811,8 +3954,7 @@
         retval = NULL;
     }
     else {
-        retval = Py_BuildValue("y#", key.data, key.size);
-        free_dbt(&key);
+        retval = BuildValue_S(key.data, key.size);
     }
 
     return retval;
@@ -3825,18 +3967,26 @@
 
 
 static PyObject*
-DBEnv_close(DBEnvObject* self, PyObject* args)
+DBEnv_close_internal(DBEnvObject* self, int flags)
 {
-    int err, flags = 0;
+    PyObject *dummy;
+    int err;
 
-    if (!PyArg_ParseTuple(args, "|i:close", &flags))
-        return NULL;
     if (!self->closed) {      /* Don't close more than once */
+        while(self->children_txns) {
+          dummy=DBTxn_abort_discard_internal(self->children_txns,0);
+          Py_XDECREF(dummy);
+        }
+        while(self->children_dbs) {
+          dummy=DB_close_internal(self->children_dbs,0);
+          Py_XDECREF(dummy);
+        }
+
         MYDB_BEGIN_ALLOW_THREADS;
         err = self->db_env->close(self->db_env, flags);
         MYDB_END_ALLOW_THREADS;
         /* after calling DBEnv->close, regardless of error, this DBEnv
-         * may not be accessed again (BerkeleyDB docs). */
+         * may not be accessed again (Berkeley DB docs). */
         self->closed = 1;
         self->db_env = NULL;
         RETURN_IF_ERR();
@@ -3844,6 +3994,16 @@
     RETURN_NONE();
 }
 
+static PyObject*
+DBEnv_close(DBEnvObject* self, PyObject* args)
+{
+    int flags = 0;
+
+    if (!PyArg_ParseTuple(args, "|i:close", &flags))
+        return NULL;
+    return DBEnv_close_internal(self,flags);
+}
+
 
 static PyObject*
 DBEnv_open(DBEnvObject* self, PyObject* args)
@@ -3961,7 +4121,6 @@
 }
 #endif /* DBVER >= 41 */
 
-#if (DBVER >= 40)
 static PyObject*
 DBEnv_set_timeout(DBEnvObject* self, PyObject* args, PyObject* kwargs)
 {
@@ -3982,7 +4141,6 @@
     RETURN_IF_ERR();
     RETURN_NONE();
 }
-#endif /* DBVER >= 40 */
 
 static PyObject*
 DBEnv_set_shm_key(DBEnvObject* self, PyObject* args)
@@ -4035,6 +4193,26 @@
 }
 
 
+#if (DBVER >= 47)
+static PyObject*
+DBEnv_log_set_config(DBEnvObject* self, PyObject* args)
+{
+    int err, flags, onoff;
+
+    if (!PyArg_ParseTuple(args, "ii:log_set_config",
+                          &flags, &onoff))
+        return NULL;
+    CHECK_ENV_NOT_CLOSED(self);
+
+    MYDB_BEGIN_ALLOW_THREADS;
+    err = self->db_env->log_set_config(self->db_env, flags, onoff);
+    MYDB_END_ALLOW_THREADS;
+    RETURN_IF_ERR();
+    RETURN_NONE();
+}
+#endif /* DBVER >= 47 */
+
+
 static PyObject*
 DBEnv_set_data_dir(DBEnvObject* self, PyObject* args)
 {
@@ -4103,8 +4281,24 @@
     RETURN_NONE();
 }
 
+#if (DBVER >= 42)
+static PyObject*
+DBEnv_get_lg_max(DBEnvObject* self)
+{
+    int err;
+    u_int32_t lg_max;
+
+    CHECK_ENV_NOT_CLOSED(self);
+
+    MYDB_BEGIN_ALLOW_THREADS;
+    err = self->db_env->get_lg_max(self->db_env, &lg_max);
+    MYDB_END_ALLOW_THREADS;
+    RETURN_IF_ERR();
+    return NUMBER_FromLong(lg_max);
+}
+#endif
+
 
-#if (DBVER >= 33)
 static PyObject*
 DBEnv_set_lg_regionmax(DBEnvObject* self, PyObject* args)
 {
@@ -4120,7 +4314,6 @@
     RETURN_IF_ERR();
     RETURN_NONE();
 }
-#endif
 
 
 static PyObject*
@@ -4247,6 +4440,79 @@
 
 
 static PyObject*
+DBEnv_txn_recover(DBEnvObject* self)
+{
+    int flags = DB_FIRST;
+    int err, i;
+    PyObject *list, *tuple, *gid;
+    DBTxnObject *txn;
+#define PREPLIST_LEN 16
+    DB_PREPLIST preplist[PREPLIST_LEN];
+    long retp;
+
+    CHECK_ENV_NOT_CLOSED(self);
+
+    list=PyList_New(0);
+    if (!list)
+        return NULL;
+    while (!0) {
+        MYDB_BEGIN_ALLOW_THREADS
+        err=self->db_env->txn_recover(self->db_env,
+                        preplist, PREPLIST_LEN, &retp, flags);
+#undef PREPLIST_LEN
+        MYDB_END_ALLOW_THREADS
+        if (err) {
+            Py_DECREF(list);
+            RETURN_IF_ERR();
+        }
+        if (!retp) break;
+        flags=DB_NEXT;  /* Prepare for next loop pass */
+        for (i=0; i<retp; i++) {
+            gid=PyBytes_FromStringAndSize((char *)(preplist[i].gid),
+                                DB_XIDDATASIZE);
+            if (!gid) {
+                Py_DECREF(list);
+                return NULL;
+            }
+            txn=newDBTxnObject(self, NULL, preplist[i].txn, flags);
+            if (!txn) {
+                Py_DECREF(list);
+                Py_DECREF(gid);
+                return NULL;
+            }
+            txn->flag_prepare=1;  /* Recover state */
+            tuple=PyTuple_New(2);
+            if (!tuple) {
+                Py_DECREF(list);
+                Py_DECREF(gid);
+                Py_DECREF(txn);
+                return NULL;
+            }
+            if (PyTuple_SetItem(tuple, 0, gid)) {
+                Py_DECREF(list);
+                Py_DECREF(gid);
+                Py_DECREF(txn);
+                Py_DECREF(tuple);
+                return NULL;
+            }
+            if (PyTuple_SetItem(tuple, 1, (PyObject *)txn)) {
+                Py_DECREF(list);
+                Py_DECREF(txn);
+                Py_DECREF(tuple); /* This delete the "gid" also */
+                return NULL;
+            }
+            if (PyList_Append(list, tuple)) {
+                Py_DECREF(list);
+                Py_DECREF(tuple);/* This delete the "gid" and the "txn" also */
+                return NULL;
+            }
+            Py_DECREF(tuple);
+        }
+    }
+    return list;
+}
+
+static PyObject*
 DBEnv_txn_begin(DBEnvObject* self, PyObject* args, PyObject* kwargs)
 {
     int flags = 0;
@@ -4262,7 +4528,7 @@
         return NULL;
     CHECK_ENV_NOT_CLOSED(self);
 
-    return (PyObject*)newDBTxnObject(self, txn, flags);
+    return (PyObject*)newDBTxnObject(self, (DBTxnObject *)txnobj, NULL, flags);
 }
 
 
@@ -4276,11 +4542,7 @@
     CHECK_ENV_NOT_CLOSED(self);
 
     MYDB_BEGIN_ALLOW_THREADS;
-#if (DBVER >= 40)
     err = self->db_env->txn_checkpoint(self->db_env, kbyte, min, flags);
-#else
-    err = txn_checkpoint(self->db_env, kbyte, min, flags);
-#endif
     MYDB_END_ALLOW_THREADS;
     RETURN_IF_ERR();
     RETURN_NONE();
@@ -4330,14 +4592,10 @@
     CHECK_ENV_NOT_CLOSED(self);
 
     MYDB_BEGIN_ALLOW_THREADS;
-#if (DBVER >= 40)
     err = self->db_env->lock_detect(self->db_env, flags, atype, &aborted);
-#else
-    err = lock_detect(self->db_env, flags, atype, &aborted);
-#endif
     MYDB_END_ALLOW_THREADS;
     RETURN_IF_ERR();
-    return PyLong_FromLong(aborted);
+    return NUMBER_FromLong(aborted);
 }
 
 
@@ -4347,44 +4605,34 @@
     int flags=0;
     int locker, lock_mode;
     DBT obj;
-    PyObject *objobj, *retval;
-    Py_buffer *obj_buf_view = NULL;
+    PyObject* objobj;
 
     if (!PyArg_ParseTuple(args, "iOi|i:lock_get", &locker, &objobj, &lock_mode, &flags))
         return NULL;
 
-    if (!make_dbt(objobj, &obj, &obj_buf_view))
+
+    if (!make_dbt(objobj, &obj))
         return NULL;
 
-    retval = (PyObject*)newDBLockObject(self, locker, &obj, lock_mode, flags);
-    free_buf_view(objobj, obj_buf_view);
-    return retval;
+    return (PyObject*)newDBLockObject(self, locker, &obj, lock_mode, flags);
 }
 
 
 static PyObject*
-DBEnv_lock_id(DBEnvObject* self, PyObject* args)
+DBEnv_lock_id(DBEnvObject* self)
 {
     int err;
     u_int32_t theID;
 
-    if (!PyArg_ParseTuple(args, ":lock_id"))
-        return NULL;
-
     CHECK_ENV_NOT_CLOSED(self);
     MYDB_BEGIN_ALLOW_THREADS;
-#if (DBVER >= 40)
     err = self->db_env->lock_id(self->db_env, &theID);
-#else
-    err = lock_id(self->db_env, &theID);
-#endif
     MYDB_END_ALLOW_THREADS;
     RETURN_IF_ERR();
 
-    return PyLong_FromLong((long)theID);
+    return NUMBER_FromLong((long)theID);
 }
 
-#if (DBVER >= 40)
 static PyObject*
 DBEnv_lock_id_free(DBEnvObject* self, PyObject* args)
 {
@@ -4401,7 +4649,6 @@
     RETURN_IF_ERR();
     RETURN_NONE();
 }
-#endif
 
 static PyObject*
 DBEnv_lock_put(DBEnvObject* self, PyObject* args)
@@ -4414,11 +4661,7 @@
 
     CHECK_ENV_NOT_CLOSED(self);
     MYDB_BEGIN_ALLOW_THREADS;
-#if (DBVER >= 40)
     err = self->db_env->lock_put(self->db_env, &dblockobj->lock);
-#else
-    err = lock_put(self->db_env, &dblockobj->lock);
-#endif
     MYDB_END_ALLOW_THREADS;
     RETURN_IF_ERR();
     RETURN_NONE();
@@ -4446,7 +4689,6 @@
 }
 #endif /* DBVER >= 4.4 */
 
-#if (DBVER >= 40)
 static PyObject*
 DBEnv_log_stat(DBEnvObject* self, PyObject* args)
 {
@@ -4482,7 +4724,7 @@
     MAKE_ENTRY(lg_size);
     MAKE_ENTRY(record);
 #endif
-#if (DBVER <= 40)
+#if (DBVER < 41)
     MAKE_ENTRY(lg_max);
 #endif
     MAKE_ENTRY(w_mbytes);
@@ -4509,7 +4751,6 @@
     free(statp);
     return d;
 } /* DBEnv_log_stat */
-#endif /* DBVER >= 4.0 for log_stat method */
 
 
 static PyObject*
@@ -4525,15 +4766,7 @@
     CHECK_ENV_NOT_CLOSED(self);
 
     MYDB_BEGIN_ALLOW_THREADS;
-#if (DBVER >= 40)
     err = self->db_env->lock_stat(self->db_env, &sp, flags);
-#else
-#if (DBVER >= 33)
-    err = lock_stat(self->db_env, &sp);
-#else
-    err = lock_stat(self->db_env, &sp, NULL);
-#endif
-#endif
     MYDB_END_ALLOW_THREADS;
     RETURN_IF_ERR();
 
@@ -4549,6 +4782,10 @@
 #if (DBVER < 41)
     MAKE_ENTRY(lastid);
 #endif
+#if (DBVER >=41)
+    MAKE_ENTRY(id);
+    MAKE_ENTRY(cur_maxid);
+#endif
     MAKE_ENTRY(nmodes);
     MAKE_ENTRY(maxlocks);
     MAKE_ENTRY(maxlockers);
@@ -4561,6 +4798,10 @@
     MAKE_ENTRY(maxnobjects);
     MAKE_ENTRY(nrequests);
     MAKE_ENTRY(nreleases);
+#if (DBVER >= 44)
+    MAKE_ENTRY(nupgrade);
+    MAKE_ENTRY(ndowngrade);
+#endif
 #if (DBVER < 44)
     MAKE_ENTRY(nnowaits);       /* these were renamed in 4.4 */
     MAKE_ENTRY(nconflicts);
@@ -4569,6 +4810,26 @@
     MAKE_ENTRY(lock_wait);
 #endif
     MAKE_ENTRY(ndeadlocks);
+#if (DBVER >= 41)
+    MAKE_ENTRY(locktimeout);
+    MAKE_ENTRY(txntimeout);
+#endif
+    MAKE_ENTRY(nlocktimeouts);
+    MAKE_ENTRY(ntxntimeouts);
+#if (DBVER >= 46)
+    MAKE_ENTRY(objs_wait);
+    MAKE_ENTRY(objs_nowait);
+    MAKE_ENTRY(lockers_wait);
+    MAKE_ENTRY(lockers_nowait);
+#if (DBVER >= 47)
+    MAKE_ENTRY(lock_wait);
+    MAKE_ENTRY(lock_nowait);
+#else
+    MAKE_ENTRY(locks_wait);
+    MAKE_ENTRY(locks_nowait);
+#endif
+    MAKE_ENTRY(hash_len);
+#endif
     MAKE_ENTRY(regsize);
     MAKE_ENTRY(region_wait);
     MAKE_ENTRY(region_nowait);
@@ -4578,28 +4839,36 @@
     return d;
 }
 
-
 static PyObject*
-DBEnv_log_archive(DBEnvObject* self, PyObject* args)
+DBEnv_log_flush(DBEnvObject* self)
 {
-    int flags=0;
     int err;
-    char **log_list = NULL;
-    PyObject* list;
-    PyObject* item = NULL;
-
-    if (!PyArg_ParseTuple(args, "|i:log_archive", &flags))
-        return NULL;
 
     CHECK_ENV_NOT_CLOSED(self);
-    MYDB_BEGIN_ALLOW_THREADS;
-#if (DBVER >= 40)
-    err = self->db_env->log_archive(self->db_env, &log_list, flags);
-#elif (DBVER == 33)
-    err = log_archive(self->db_env, &log_list, flags);
-#else
-    err = log_archive(self->db_env, &log_list, flags, NULL);
-#endif
+
+    MYDB_BEGIN_ALLOW_THREADS
+    err = self->db_env->log_flush(self->db_env, NULL);
+    MYDB_END_ALLOW_THREADS
+
+    RETURN_IF_ERR();
+    RETURN_NONE();
+}
+
+static PyObject*
+DBEnv_log_archive(DBEnvObject* self, PyObject* args)
+{
+    int flags=0;
+    int err;
+    char **log_list = NULL;
+    PyObject* list;
+    PyObject* item = NULL;
+
+    if (!PyArg_ParseTuple(args, "|i:log_archive", &flags))
+        return NULL;
+
+    CHECK_ENV_NOT_CLOSED(self);
+    MYDB_BEGIN_ALLOW_THREADS;
+    err = self->db_env->log_archive(self->db_env, &log_list, flags);
     MYDB_END_ALLOW_THREADS;
     RETURN_IF_ERR();
 
@@ -4613,13 +4882,18 @@
     if (log_list) {
         char **log_list_start;
         for (log_list_start = log_list; *log_list != NULL; ++log_list) {
-            item = PyUnicode_FromString (*log_list);
+            item = PyBytes_FromString (*log_list);
             if (item == NULL) {
                 Py_DECREF(list);
                 list = NULL;
                 break;
             }
-            PyList_Append(list, item);
+            if (PyList_Append(list, item)) {
+                Py_DECREF(list);
+                list = NULL;
+                Py_DECREF(item);
+                break;
+            }
             Py_DECREF(item);
         }
         free(log_list_start);
@@ -4641,13 +4915,7 @@
     CHECK_ENV_NOT_CLOSED(self);
 
     MYDB_BEGIN_ALLOW_THREADS;
-#if (DBVER >= 40)
     err = self->db_env->txn_stat(self->db_env, &sp, flags);
-#elif (DBVER == 33)
-    err = txn_stat(self->db_env, &sp);
-#else
-    err = txn_stat(self->db_env, &sp, NULL);
-#endif
     MYDB_END_ALLOW_THREADS;
     RETURN_IF_ERR();
 
@@ -4658,21 +4926,29 @@
         return NULL;
     }
 
-#define MAKE_ENTRY(name)  _addIntToDict(d, #name, sp->st_##name)
-#define MAKE_TIME_T_ENTRY(name)_addTimeTToDict(d, #name, sp->st_##name)
+#define MAKE_ENTRY(name)        _addIntToDict(d, #name, sp->st_##name)
+#define MAKE_TIME_T_ENTRY(name) _addTimeTToDict(d, #name, sp->st_##name)
+#define MAKE_DB_LSN_ENTRY(name) _addDB_lsnToDict(d, #name, sp->st_##name)
 
+    MAKE_DB_LSN_ENTRY(last_ckp);
     MAKE_TIME_T_ENTRY(time_ckp);
     MAKE_ENTRY(last_txnid);
     MAKE_ENTRY(maxtxns);
     MAKE_ENTRY(nactive);
     MAKE_ENTRY(maxnactive);
+#if (DBVER >= 45)
+    MAKE_ENTRY(nsnapshot);
+    MAKE_ENTRY(maxnsnapshot);
+#endif
     MAKE_ENTRY(nbegins);
     MAKE_ENTRY(naborts);
     MAKE_ENTRY(ncommits);
+    MAKE_ENTRY(nrestores);
     MAKE_ENTRY(regsize);
     MAKE_ENTRY(region_wait);
     MAKE_ENTRY(region_nowait);
 
+#undef MAKE_DB_LSN_ENTRY
 #undef MAKE_ENTRY
 #undef MAKE_TIME_T_ENTRY
     free(sp);
@@ -4696,151 +4972,1043 @@
         ++oldValue;
     self->moduleFlags.getReturnsNone = (flags >= 1);
     self->moduleFlags.cursorSetReturnsNone = (flags >= 2);
-    return PyLong_FromLong(oldValue);
+    return NUMBER_FromLong(oldValue);
 }
 
+static PyObject*
+DBEnv_get_private(DBEnvObject* self)
+{
+    /* We can give out the private field even if dbenv is closed */
+    Py_INCREF(self->private_obj);
+    return self->private_obj;
+}
 
-/* --------------------------------------------------------------------- */
-/* DBTxn methods */
+static PyObject*
+DBEnv_set_private(DBEnvObject* self, PyObject* private_obj)
+{
+    /* We can set the private field even if dbenv is closed */
+    Py_DECREF(self->private_obj);
+    Py_INCREF(private_obj);
+    self->private_obj = private_obj;
+    RETURN_NONE();
+}
 
 
 static PyObject*
-DBTxn_commit(DBTxnObject* self, PyObject* args)
+DBEnv_set_rpc_server(DBEnvObject* self, PyObject* args, PyObject* kwargs)
 {
-    int flags=0, err;
-    DB_TXN *txn;
+    int err;
+    char *host;
+    long cl_timeout=0, sv_timeout=0;
 
-    if (!PyArg_ParseTuple(args, "|i:commit", &flags))
+    static char* kwnames[] = { "host", "cl_timeout", "sv_timeout", NULL};
+
+    if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s|ll:set_rpc_server", kwnames,
+                                     &host, &cl_timeout, &sv_timeout))
         return NULL;
+    CHECK_ENV_NOT_CLOSED(self);
 
-    if (!self->txn) {
-        PyObject *t =  Py_BuildValue("(is)", 0, "DBTxn must not be used "
-                                     "after txn_commit or txn_abort");
-        PyErr_SetObject(DBError, t);
-        Py_DECREF(t);
+    MYDB_BEGIN_ALLOW_THREADS;
+    err = self->db_env->set_rpc_server(self->db_env, NULL, host, cl_timeout,
+            sv_timeout, 0);
+    MYDB_END_ALLOW_THREADS;
+    RETURN_IF_ERR();
+    RETURN_NONE();
+}
+
+static PyObject*
+DBEnv_set_verbose(DBEnvObject* self, PyObject* args)
+{
+    int err;
+    int which, onoff;
+
+    if (!PyArg_ParseTuple(args, "ii:set_verbose", &which, &onoff)) {
         return NULL;
     }
-    txn = self->txn;
-    self->txn = NULL;   /* this DB_TXN is no longer valid after this call */
+    CHECK_ENV_NOT_CLOSED(self);
     MYDB_BEGIN_ALLOW_THREADS;
-#if (DBVER >= 40)
-    err = txn->commit(txn, flags);
-#else
-    err = txn_commit(txn, flags);
+    err = self->db_env->set_verbose(self->db_env, which, onoff);
+    MYDB_END_ALLOW_THREADS;
+    RETURN_IF_ERR();
+    RETURN_NONE();
+}
+
+#if (DBVER >= 42)
+static PyObject*
+DBEnv_get_verbose(DBEnvObject* self, PyObject* args)
+{
+    int err;
+    int which;
+    int verbose;
+
+    if (!PyArg_ParseTuple(args, "i:get_verbose", &which)) {
+        return NULL;
+    }
+    CHECK_ENV_NOT_CLOSED(self);
+    MYDB_BEGIN_ALLOW_THREADS;
+    err = self->db_env->get_verbose(self->db_env, which, &verbose);
+    MYDB_END_ALLOW_THREADS;
+    RETURN_IF_ERR();
+    return PyBool_FromLong(verbose);
+}
+#endif
+
+#if (DBVER >= 45)
+static void
+_dbenv_event_notifyCallback(DB_ENV* db_env, u_int32_t event, void *event_info)
+{
+    DBEnvObject *dbenv;
+    PyObject* callback;
+    PyObject* args;
+    PyObject* result = NULL;
+
+    MYDB_BEGIN_BLOCK_THREADS;
+    dbenv = (DBEnvObject *)db_env->app_private;
+    callback = dbenv->event_notifyCallback;
+    if (callback) {
+        if (event == DB_EVENT_REP_NEWMASTER) {
+            args = Py_BuildValue("(Oii)", dbenv, event, *((int *)event_info));
+        } else {
+            args = Py_BuildValue("(OiO)", dbenv, event, Py_None);
+        }
+        if (args) {
+            result = PyEval_CallObject(callback, args);
+        }
+        if ((!args) || (!result)) {
+            PyErr_Print();
+        }
+        Py_XDECREF(args);
+        Py_XDECREF(result);
+    }
+    MYDB_END_BLOCK_THREADS;
+}
+#endif
+
+#if (DBVER >= 45)
+static PyObject*
+DBEnv_set_event_notify(DBEnvObject* self, PyObject* notifyFunc)
+{
+    int err;
+
+    CHECK_ENV_NOT_CLOSED(self);
+
+    if (!PyCallable_Check(notifyFunc)) {
+	    makeTypeError("Callable", notifyFunc);
+	    return NULL;
+    }
+
+    Py_XDECREF(self->event_notifyCallback);
+    Py_INCREF(notifyFunc);
+    self->event_notifyCallback = notifyFunc;
+
+    /* This is to workaround a problem with un-initialized threads (see
+       comment in DB_associate) */
+#ifdef WITH_THREAD
+    PyEval_InitThreads();
 #endif
+
+    MYDB_BEGIN_ALLOW_THREADS;
+    err = self->db_env->set_event_notify(self->db_env, _dbenv_event_notifyCallback);
     MYDB_END_ALLOW_THREADS;
+
+    if (err) {
+	    Py_DECREF(notifyFunc);
+	    self->event_notifyCallback = NULL;
+    }
+
     RETURN_IF_ERR();
     RETURN_NONE();
 }
+#endif
+
+
+/* --------------------------------------------------------------------- */
+/* REPLICATION METHODS: Base Replication */
+
 
 static PyObject*
-DBTxn_prepare(DBTxnObject* self, PyObject* args)
+DBEnv_rep_process_message(DBEnvObject* self, PyObject* args)
 {
-#if (DBVER >= 33)
     int err;
-    char* gid=NULL;
-    int   gid_size=0;
+    PyObject *control_py, *rec_py;
+    DBT control, rec;
+    int envid;
+#if (DBVER >= 42)
+    DB_LSN lsn;
+#endif
 
-    if (!PyArg_ParseTuple(args, "y#:prepare", &gid, &gid_size))
+    if (!PyArg_ParseTuple(args, "OOi:rep_process_message", &control_py,
+                &rec_py, &envid))
         return NULL;
+    CHECK_ENV_NOT_CLOSED(self);
 
-    if (gid_size != DB_XIDDATASIZE) {
-        PyErr_SetString(PyExc_TypeError,
-                        "gid must be DB_XIDDATASIZE bytes long");
+    if (!make_dbt(control_py, &control))
         return NULL;
+    if (!make_dbt(rec_py, &rec))
+        return NULL;
+
+    MYDB_BEGIN_ALLOW_THREADS;
+#if (DBVER >= 46)
+    err = self->db_env->rep_process_message(self->db_env, &control, &rec,
+            envid, &lsn);
+#else
+#if (DBVER >= 42)
+    err = self->db_env->rep_process_message(self->db_env, &control, &rec,
+            &envid, &lsn);
+#else
+    err = self->db_env->rep_process_message(self->db_env, &control, &rec,
+            &envid);
+#endif
+#endif
+    MYDB_END_ALLOW_THREADS;
+    switch (err) {
+        case DB_REP_NEWMASTER :
+          return Py_BuildValue("(iO)", envid, Py_None);
+          break;
+
+        case DB_REP_DUPMASTER :
+        case DB_REP_HOLDELECTION :
+#if (DBVER >= 44)
+        case DB_REP_IGNORE :
+        case DB_REP_JOIN_FAILURE :
+#endif
+            return Py_BuildValue("(iO)", err, Py_None);
+            break;
+        case DB_REP_NEWSITE :
+            {
+                PyObject *tmp, *r;
+
+                if (!(tmp = PyBytes_FromStringAndSize(rec.data, rec.size))) {
+                    return NULL;
+                }
+
+                r = Py_BuildValue("(iO)", err, tmp);
+                Py_DECREF(tmp);
+                return r;
+                break;
+            }
+#if (DBVER >= 42)
+        case DB_REP_NOTPERM :
+        case DB_REP_ISPERM :
+            return Py_BuildValue("(i(ll))", err, lsn.file, lsn.offset);
+            break;
+#endif
     }
+    RETURN_IF_ERR();
+    return Py_BuildValue("(OO)", Py_None, Py_None);
+}
 
-    if (!self->txn) {
-        PyObject *t = Py_BuildValue("(is)", 0,"DBTxn must not be used "
-                                    "after txn_commit or txn_abort");
-        PyErr_SetObject(DBError, t);
-        Py_DECREF(t);
+static int
+_DBEnv_rep_transportCallback(DB_ENV* db_env, const DBT* control, const DBT* rec,
+        const DB_LSN *lsn, int envid, u_int32_t flags)
+{
+    DBEnvObject *dbenv;
+    PyObject* rep_transport;
+    PyObject* args;
+    PyObject *a, *b;
+    PyObject* result = NULL;
+    int ret=0;
+
+    MYDB_BEGIN_BLOCK_THREADS;
+    dbenv = (DBEnvObject *)db_env->app_private;
+    rep_transport = dbenv->rep_transport;
+
+    /*
+    ** The errors in 'a' or 'b' are detected in "Py_BuildValue".
+    */
+    a = PyBytes_FromStringAndSize(control->data, control->size);
+    b = PyBytes_FromStringAndSize(rec->data, rec->size);
+
+    args = Py_BuildValue(
+#if (PY_VERSION_HEX >= 0x02040000)
+            "(OOO(ll)iI)",
+#else
+            "(OOO(ll)ii)",
+#endif
+            dbenv,
+            a, b,
+            lsn->file, lsn->offset, envid, flags);
+    if (args) {
+        result = PyEval_CallObject(rep_transport, args);
+    }
+
+    if ((!args) || (!result)) {
+        PyErr_Print();
+        ret = -1;
+    }
+    Py_XDECREF(a);
+    Py_XDECREF(b);
+    Py_XDECREF(args);
+    Py_XDECREF(result);
+    MYDB_END_BLOCK_THREADS;
+    return ret;
+}
+
+#if (DBVER <= 41)
+static int
+_DBEnv_rep_transportCallbackOLD(DB_ENV* db_env, const DBT* control, const DBT* rec,
+        int envid, u_int32_t flags)
+{
+    DB_LSN lsn;
+
+    lsn.file = -1;  /* Dummy values */
+    lsn.offset = -1;
+    return _DBEnv_rep_transportCallback(db_env, control, rec, &lsn, envid,
+            flags);
+}
+#endif
+
+static PyObject*
+DBEnv_rep_set_transport(DBEnvObject* self, PyObject* args)
+{
+    int err;
+    int envid;
+    PyObject *rep_transport;
+
+    if (!PyArg_ParseTuple(args, "iO:rep_set_transport", &envid, &rep_transport))
+        return NULL;
+    CHECK_ENV_NOT_CLOSED(self);
+    if (!PyCallable_Check(rep_transport)) {
+        makeTypeError("Callable", rep_transport);
         return NULL;
     }
+
     MYDB_BEGIN_ALLOW_THREADS;
-#if (DBVER >= 40)
-    err = self->txn->prepare(self->txn, (u_int8_t*)gid);
+#if (DBVER >=45)
+    err = self->db_env->rep_set_transport(self->db_env, envid,
+            &_DBEnv_rep_transportCallback);
+#else
+#if (DBVER >= 42)
+    err = self->db_env->set_rep_transport(self->db_env, envid,
+            &_DBEnv_rep_transportCallback);
 #else
-    err = txn_prepare(self->txn, (u_int8_t*)gid);
+    err = self->db_env->set_rep_transport(self->db_env, envid,
+            &_DBEnv_rep_transportCallbackOLD);
+#endif
 #endif
     MYDB_END_ALLOW_THREADS;
     RETURN_IF_ERR();
+
+    Py_DECREF(self->rep_transport);
+    Py_INCREF(rep_transport);
+    self->rep_transport = rep_transport;
+    RETURN_NONE();
+}
+
+#if (DBVER >= 47)
+static PyObject*
+DBEnv_rep_set_request(DBEnvObject* self, PyObject* args)
+{
+    int err;
+    unsigned int minimum, maximum;
+
+    if (!PyArg_ParseTuple(args,"II:rep_set_request", &minimum, &maximum))
+        return NULL;
+    CHECK_ENV_NOT_CLOSED(self);
+
+    MYDB_BEGIN_ALLOW_THREADS;
+    err = self->db_env->rep_set_request(self->db_env, minimum, maximum);
+    MYDB_END_ALLOW_THREADS;
+    RETURN_IF_ERR();
     RETURN_NONE();
+}
+
+static PyObject*
+DBEnv_rep_get_request(DBEnvObject* self)
+{
+    int err;
+    u_int32_t minimum, maximum;
+
+    CHECK_ENV_NOT_CLOSED(self);
+    MYDB_BEGIN_ALLOW_THREADS;
+    err = self->db_env->rep_get_request(self->db_env, &minimum, &maximum);
+    MYDB_END_ALLOW_THREADS;
+    RETURN_IF_ERR();
+#if (PY_VERSION_HEX >= 0x02040000)
+    return Py_BuildValue("II", minimum, maximum);
 #else
+    return Py_BuildValue("ii", minimum, maximum);
+#endif
+}
+#endif
+
+#if (DBVER >= 45)
+static PyObject*
+DBEnv_rep_set_limit(DBEnvObject* self, PyObject* args)
+{
     int err;
+    int limit;
 
-    if (!PyArg_ParseTuple(args, ":prepare"))
+    if (!PyArg_ParseTuple(args,"i:rep_set_limit", &limit))
         return NULL;
+    CHECK_ENV_NOT_CLOSED(self);
 
-    if (!self->txn) {
-        PyObject *t = Py_BuildValue("(is)", 0, "DBTxn must not be used "
-                                    "after txn_commit or txn_abort");
-        PyErr_SetObject(DBError, t);
-        Py_DECREF(t);
+    MYDB_BEGIN_ALLOW_THREADS;
+    err = self->db_env->rep_set_limit(self->db_env, 0, limit);
+    MYDB_END_ALLOW_THREADS;
+    RETURN_IF_ERR();
+    RETURN_NONE();
+}
+
+static PyObject*
+DBEnv_rep_get_limit(DBEnvObject* self)
+{
+    int err;
+    u_int32_t gbytes, bytes;
+
+    CHECK_ENV_NOT_CLOSED(self);
+    MYDB_BEGIN_ALLOW_THREADS;
+    err = self->db_env->rep_get_limit(self->db_env, &gbytes, &bytes);
+    MYDB_END_ALLOW_THREADS;
+    RETURN_IF_ERR();
+    return NUMBER_FromLong(bytes);
+}
+#endif
+
+#if (DBVER >= 44)
+static PyObject*
+DBEnv_rep_set_config(DBEnvObject* self, PyObject* args)
+{
+    int err;
+    int which;
+    int onoff;
+
+    if (!PyArg_ParseTuple(args,"ii:rep_set_config", &which, &onoff))
+        return NULL;
+    CHECK_ENV_NOT_CLOSED(self);
+
+    MYDB_BEGIN_ALLOW_THREADS;
+    err = self->db_env->rep_set_config(self->db_env, which, onoff);
+    MYDB_END_ALLOW_THREADS;
+    RETURN_IF_ERR();
+    RETURN_NONE();
+}
+
+static PyObject*
+DBEnv_rep_get_config(DBEnvObject* self, PyObject* args)
+{
+    int err;
+    int which;
+    int onoff;
+
+    if (!PyArg_ParseTuple(args, "i:rep_get_config", &which)) {
+        return NULL;
+    }
+    CHECK_ENV_NOT_CLOSED(self);
+    MYDB_BEGIN_ALLOW_THREADS;
+    err = self->db_env->rep_get_config(self->db_env, which, &onoff);
+    MYDB_END_ALLOW_THREADS;
+    RETURN_IF_ERR();
+    return PyBool_FromLong(onoff);
+}
+#endif
+
+#if (DBVER >= 46)
+static PyObject*
+DBEnv_rep_elect(DBEnvObject* self, PyObject* args)
+{
+    int err;
+    u_int32_t nsites, nvotes;
+
+    if (!PyArg_ParseTuple(args, "II:rep_elect", &nsites, &nvotes)) {
+        return NULL;
+    }
+    CHECK_ENV_NOT_CLOSED(self);
+    MYDB_BEGIN_ALLOW_THREADS;
+    err = self->db_env->rep_elect(self->db_env, nvotes, nvotes, 0);
+    MYDB_END_ALLOW_THREADS;
+    RETURN_IF_ERR();
+    RETURN_NONE();
+}
+#endif
+
+static PyObject*
+DBEnv_rep_start(DBEnvObject* self, PyObject* args, PyObject* kwargs)
+{
+    int err;
+    PyObject *cdata_py = Py_None;
+    DBT cdata;
+    int flags;
+    static char* kwnames[] = {"flags","cdata", NULL};
+
+    if (!PyArg_ParseTupleAndKeywords(args, kwargs,
+                "i|O:rep_start", kwnames, &flags, &cdata_py))
+    {
+	    return NULL;
+    }
+    CHECK_ENV_NOT_CLOSED(self);
+
+    if (!make_dbt(cdata_py, &cdata))
+        return NULL;
+
+    MYDB_BEGIN_ALLOW_THREADS;
+    err = self->db_env->rep_start(self->db_env, cdata.size ? &cdata : NULL,
+            flags);
+    MYDB_END_ALLOW_THREADS;
+    RETURN_IF_ERR();
+    RETURN_NONE();
+}
+
+#if (DBVER >= 44)
+static PyObject*
+DBEnv_rep_sync(DBEnvObject* self)
+{
+    int err;
+
+    CHECK_ENV_NOT_CLOSED(self);
+    MYDB_BEGIN_ALLOW_THREADS;
+    err = self->db_env->rep_sync(self->db_env, 0);
+    MYDB_END_ALLOW_THREADS;
+    RETURN_IF_ERR();
+    RETURN_NONE();
+}
+#endif
+
+
+#if (DBVER >= 45)
+static PyObject*
+DBEnv_rep_set_nsites(DBEnvObject* self, PyObject* args)
+{
+    int err;
+    int nsites;
+
+    if (!PyArg_ParseTuple(args, "i:rep_set_nsites", &nsites)) {
+        return NULL;
+    }
+    CHECK_ENV_NOT_CLOSED(self);
+    MYDB_BEGIN_ALLOW_THREADS;
+    err = self->db_env->rep_set_nsites(self->db_env, nsites);
+    MYDB_END_ALLOW_THREADS;
+    RETURN_IF_ERR();
+    RETURN_NONE();
+}
+
+static PyObject*
+DBEnv_rep_get_nsites(DBEnvObject* self)
+{
+    int err;
+#if (DBVER >= 47)
+    u_int32_t nsites;
+#else
+    int nsites;
+#endif
+
+    CHECK_ENV_NOT_CLOSED(self);
+    MYDB_BEGIN_ALLOW_THREADS;
+    err = self->db_env->rep_get_nsites(self->db_env, &nsites);
+    MYDB_END_ALLOW_THREADS;
+    RETURN_IF_ERR();
+    return NUMBER_FromLong(nsites);
+}
+
+static PyObject*
+DBEnv_rep_set_priority(DBEnvObject* self, PyObject* args)
+{
+    int err;
+    int priority;
+
+    if (!PyArg_ParseTuple(args, "i:rep_set_priority", &priority)) {
+        return NULL;
+    }
+    CHECK_ENV_NOT_CLOSED(self);
+    MYDB_BEGIN_ALLOW_THREADS;
+    err = self->db_env->rep_set_priority(self->db_env, priority);
+    MYDB_END_ALLOW_THREADS;
+    RETURN_IF_ERR();
+    RETURN_NONE();
+}
+
+static PyObject*
+DBEnv_rep_get_priority(DBEnvObject* self)
+{
+    int err;
+#if (DBVER >= 47)
+    u_int32_t priority;
+#else
+    int priority;
+#endif
+
+    CHECK_ENV_NOT_CLOSED(self);
+    MYDB_BEGIN_ALLOW_THREADS;
+    err = self->db_env->rep_get_priority(self->db_env, &priority);
+    MYDB_END_ALLOW_THREADS;
+    RETURN_IF_ERR();
+    return NUMBER_FromLong(priority);
+}
+
+static PyObject*
+DBEnv_rep_set_timeout(DBEnvObject* self, PyObject* args)
+{
+    int err;
+    int which, timeout;
+
+    if (!PyArg_ParseTuple(args, "ii:rep_set_timeout", &which, &timeout)) {
+        return NULL;
+    }
+    CHECK_ENV_NOT_CLOSED(self);
+    MYDB_BEGIN_ALLOW_THREADS;
+    err = self->db_env->rep_set_timeout(self->db_env, which, timeout);
+    MYDB_END_ALLOW_THREADS;
+    RETURN_IF_ERR();
+    RETURN_NONE();
+}
+
+static PyObject*
+DBEnv_rep_get_timeout(DBEnvObject* self, PyObject* args)
+{
+    int err;
+    int which;
+    u_int32_t timeout;
+
+    if (!PyArg_ParseTuple(args, "i:rep_get_timeout", &which)) {
+        return NULL;
+    }
+    CHECK_ENV_NOT_CLOSED(self);
+    MYDB_BEGIN_ALLOW_THREADS;
+    err = self->db_env->rep_get_timeout(self->db_env, which, &timeout);
+    MYDB_END_ALLOW_THREADS;
+    RETURN_IF_ERR();
+    return NUMBER_FromLong(timeout);
+}
+#endif
+
+/* --------------------------------------------------------------------- */
+/* REPLICATION METHODS: Replication Manager */
+
+#if (DBVER >= 45)
+static PyObject*
+DBEnv_repmgr_start(DBEnvObject* self, PyObject* args, PyObject*
+        kwargs)
+{
+    int err;
+    int nthreads, flags;
+    static char* kwnames[] = {"nthreads","flags", NULL};
+
+    if (!PyArg_ParseTupleAndKeywords(args, kwargs,
+                "ii:repmgr_start", kwnames, &nthreads, &flags))
+    {
+	    return NULL;
+    }
+    CHECK_ENV_NOT_CLOSED(self);
+    MYDB_BEGIN_ALLOW_THREADS;
+    err = self->db_env->repmgr_start(self->db_env, nthreads, flags);
+    MYDB_END_ALLOW_THREADS;
+    RETURN_IF_ERR();
+    RETURN_NONE();
+}
+
+static PyObject*
+DBEnv_repmgr_set_local_site(DBEnvObject* self, PyObject* args, PyObject*
+        kwargs)
+{
+    int err;
+    char *host;
+    int port;
+    int flags = 0;
+    static char* kwnames[] = {"host", "port", "flags", NULL};
+
+    if (!PyArg_ParseTupleAndKeywords(args, kwargs,
+                "si|i:repmgr_set_local_site", kwnames, &host, &port, &flags))
+    {
+	    return NULL;
+    }
+    CHECK_ENV_NOT_CLOSED(self);
+    MYDB_BEGIN_ALLOW_THREADS;
+    err = self->db_env->repmgr_set_local_site(self->db_env, host, port, flags);
+    MYDB_END_ALLOW_THREADS;
+    RETURN_IF_ERR();
+    RETURN_NONE();
+}
+
+static PyObject*
+DBEnv_repmgr_add_remote_site(DBEnvObject* self, PyObject* args, PyObject*
+        kwargs)
+{
+    int err;
+    char *host;
+    int port;
+    int flags = 0;
+    int eidp;
+    static char* kwnames[] = {"host", "port", "flags", NULL};
+
+    if (!PyArg_ParseTupleAndKeywords(args, kwargs,
+                "si|i:repmgr_add_remote_site", kwnames, &host, &port, &flags))
+    {
+	    return NULL;
+    }
+    CHECK_ENV_NOT_CLOSED(self);
+    MYDB_BEGIN_ALLOW_THREADS;
+    err = self->db_env->repmgr_add_remote_site(self->db_env, host, port, &eidp, flags);
+    MYDB_END_ALLOW_THREADS;
+    RETURN_IF_ERR();
+    return NUMBER_FromLong(eidp);
+}
+
+static PyObject*
+DBEnv_repmgr_set_ack_policy(DBEnvObject* self, PyObject* args)
+{
+    int err;
+    int ack_policy;
+
+    if (!PyArg_ParseTuple(args, "i:repmgr_set_ack_policy", &ack_policy))
+    {
+	    return NULL;
+    }
+    CHECK_ENV_NOT_CLOSED(self);
+    MYDB_BEGIN_ALLOW_THREADS;
+    err = self->db_env->repmgr_set_ack_policy(self->db_env, ack_policy);
+    MYDB_END_ALLOW_THREADS;
+    RETURN_IF_ERR();
+    RETURN_NONE();
+}
+
+static PyObject*
+DBEnv_repmgr_get_ack_policy(DBEnvObject* self)
+{
+    int err;
+    int ack_policy;
+
+    CHECK_ENV_NOT_CLOSED(self);
+    MYDB_BEGIN_ALLOW_THREADS;
+    err = self->db_env->repmgr_get_ack_policy(self->db_env, &ack_policy);
+    MYDB_END_ALLOW_THREADS;
+    RETURN_IF_ERR();
+    return NUMBER_FromLong(ack_policy);
+}
+
+static PyObject*
+DBEnv_repmgr_site_list(DBEnvObject* self)
+{
+    int err;
+    unsigned int countp;
+    DB_REPMGR_SITE *listp;
+    PyObject *stats, *key, *tuple;
+
+    CHECK_ENV_NOT_CLOSED(self);
+    MYDB_BEGIN_ALLOW_THREADS;
+    err = self->db_env->repmgr_site_list(self->db_env, &countp, &listp);
+    MYDB_END_ALLOW_THREADS;
+    RETURN_IF_ERR();
+
+    stats=PyDict_New();
+    if (stats == NULL) {
+        free(listp);
+        return NULL;
+    }
+
+    for(;countp--;) {
+        key=NUMBER_FromLong(listp[countp].eid);
+        if(!key) {
+            Py_DECREF(stats);
+            free(listp);
+            return NULL;
+        }
+#if (PY_VERSION_HEX >= 0x02040000)
+        tuple=Py_BuildValue("(sII)", listp[countp].host,
+                listp[countp].port, listp[countp].status);
+#else
+        tuple=Py_BuildValue("(sii)", listp[countp].host,
+                listp[countp].port, listp[countp].status);
+#endif
+        if(!tuple) {
+            Py_DECREF(key);
+            Py_DECREF(stats);
+            free(listp);
+            return NULL;
+        }
+        if(PyDict_SetItem(stats, key, tuple)) {
+            Py_DECREF(key);
+            Py_DECREF(tuple);
+            Py_DECREF(stats);
+            free(listp);
+            return NULL;
+        }
+    }
+    free(listp);
+    return stats;
+}
+#endif
+
+#if (DBVER >= 46)
+static PyObject*
+DBEnv_repmgr_stat_print(DBEnvObject* self, PyObject* args, PyObject *kwargs)
+{
+    int err;
+    int flags=0;
+    static char* kwnames[] = { "flags", NULL };
+
+    if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i:repmgr_stat_print",
+                kwnames, &flags))
+    {
+        return NULL;
+    }
+    CHECK_ENV_NOT_CLOSED(self);
+    MYDB_BEGIN_ALLOW_THREADS;
+    err = self->db_env->repmgr_stat_print(self->db_env, flags);
+    MYDB_END_ALLOW_THREADS;
+    RETURN_IF_ERR();
+    RETURN_NONE();
+}
+
+static PyObject*
+DBEnv_repmgr_stat(DBEnvObject* self, PyObject* args, PyObject *kwargs)
+{
+    int err;
+    int flags=0;
+    DB_REPMGR_STAT *statp;
+    PyObject *stats;
+    static char* kwnames[] = { "flags", NULL };
+
+    if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i:repmgr_stat",
+                kwnames, &flags))
+    {
+        return NULL;
+    }
+    CHECK_ENV_NOT_CLOSED(self);
+    MYDB_BEGIN_ALLOW_THREADS;
+    err = self->db_env->repmgr_stat(self->db_env, &statp, flags);
+    MYDB_END_ALLOW_THREADS;
+    RETURN_IF_ERR();
+
+    stats=PyDict_New();
+    if (stats == NULL) {
+        free(statp);
+        return NULL;
+    }
+
+#define MAKE_ENTRY(name)  _addIntToDict(stats, #name, statp->st_##name)
+
+    MAKE_ENTRY(perm_failed);
+    MAKE_ENTRY(msgs_queued);
+    MAKE_ENTRY(msgs_dropped);
+    MAKE_ENTRY(connection_drop);
+    MAKE_ENTRY(connect_fail);
+
+#undef MAKE_ENTRY
+
+    free(statp);
+    return stats;
+}
+#endif
+
+
+/* --------------------------------------------------------------------- */
+/* DBTxn methods */
+
+
+static void _close_transaction_cursors(DBTxnObject* txn)
+{
+    PyObject *dummy;
+
+    while(txn->children_cursors) {
+        PyErr_Warn(PyExc_RuntimeWarning,
+            "Must close cursors before resolving a transaction.");
+        dummy=DBC_close_internal(txn->children_cursors);
+        Py_XDECREF(dummy);
+    }
+}
+
+static void _promote_transaction_dbs_and_sequences(DBTxnObject *txn)
+{
+    DBObject *db;
+#if (DBVER >= 43)
+    DBSequenceObject *dbs;
+#endif
+
+    while (txn->children_dbs) {
+        db=txn->children_dbs;
+        EXTRACT_FROM_DOUBLE_LINKED_LIST_TXN(db);
+        if (txn->parent_txn) {
+            INSERT_IN_DOUBLE_LINKED_LIST_TXN(txn->parent_txn->children_dbs,db);
+            db->txn=txn->parent_txn;
+        } else {
+            /* The db is already linked to its environment,
+            ** so nothing to do.
+            */
+            db->txn=NULL; 
+        }
+    }
+
+#if (DBVER >= 43)
+    while (txn->children_sequences) {
+        dbs=txn->children_sequences;
+        EXTRACT_FROM_DOUBLE_LINKED_LIST_TXN(dbs);
+        if (txn->parent_txn) {
+            INSERT_IN_DOUBLE_LINKED_LIST_TXN(txn->parent_txn->children_sequences,dbs);
+            dbs->txn=txn->parent_txn;
+        } else {
+            /* The sequence is already linked to its
+            ** parent db. Nothing to do.
+            */
+            dbs->txn=NULL;
+        }
+    }
+#endif
+}
+
+
+static PyObject*
+DBTxn_commit(DBTxnObject* self, PyObject* args)
+{
+    int flags=0, err;
+    DB_TXN *txn;
+
+    if (!PyArg_ParseTuple(args, "|i:commit", &flags))
+        return NULL;
+
+    _close_transaction_cursors(self);
+
+    if (!self->txn) {
+        PyObject *t =  Py_BuildValue("(is)", 0, "DBTxn must not be used "
+                                     "after txn_commit, txn_abort "
+                                     "or txn_discard");
+        if (t) {
+            PyErr_SetObject(DBError, t);
+            Py_DECREF(t);
+        }
+        return NULL;
+    }
+    self->flag_prepare=0;
+    txn = self->txn;
+    self->txn = NULL;   /* this DB_TXN is no longer valid after this call */
+
+    EXTRACT_FROM_DOUBLE_LINKED_LIST(self);
+
+    MYDB_BEGIN_ALLOW_THREADS;
+    err = txn->commit(txn, flags);
+    MYDB_END_ALLOW_THREADS;
+
+    _promote_transaction_dbs_and_sequences(self);
+
+    RETURN_IF_ERR();
+    RETURN_NONE();
+}
+
+static PyObject*
+DBTxn_prepare(DBTxnObject* self, PyObject* args)
+{
+    int err;
+    char* gid=NULL;
+    int   gid_size=0;
+
+    if (!PyArg_ParseTuple(args, "s#:prepare", &gid, &gid_size))
+        return NULL;
+
+    if (gid_size != DB_XIDDATASIZE) {
+        PyErr_SetString(PyExc_TypeError,
+                        "gid must be DB_XIDDATASIZE bytes long");
+        return NULL;
+    }
+
+    if (!self->txn) {
+        PyObject *t = Py_BuildValue("(is)", 0,"DBTxn must not be used "
+                                    "after txn_commit, txn_abort "
+                                    "or txn_discard");
+        if (t) {
+            PyErr_SetObject(DBError, t);
+            Py_DECREF(t);
+        }
         return NULL;
     }
+    self->flag_prepare=1;  /* Prepare state */
     MYDB_BEGIN_ALLOW_THREADS;
-    err = txn_prepare(self->txn);
+    err = self->txn->prepare(self->txn, (u_int8_t*)gid);
     MYDB_END_ALLOW_THREADS;
     RETURN_IF_ERR();
     RETURN_NONE();
-#endif
 }
 
 
 static PyObject*
-DBTxn_abort(DBTxnObject* self, PyObject* args)
+DBTxn_abort_discard_internal(DBTxnObject* self, int discard)
 {
-    int err;
+    PyObject *dummy;
+    int err=0;
     DB_TXN *txn;
 
-    if (!PyArg_ParseTuple(args, ":abort"))
-        return NULL;
-
     if (!self->txn) {
         PyObject *t = Py_BuildValue("(is)", 0, "DBTxn must not be used "
-                                    "after txn_commit or txn_abort");
-        PyErr_SetObject(DBError, t);
-        Py_DECREF(t);
+                                    "after txn_commit, txn_abort "
+                                    "or txn_discard");
+        if (t) {
+            PyErr_SetObject(DBError, t);
+            Py_DECREF(t);
+        }
         return NULL;
     }
     txn = self->txn;
     self->txn = NULL;   /* this DB_TXN is no longer valid after this call */
-    MYDB_BEGIN_ALLOW_THREADS;
-#if (DBVER >= 40)
-    err = txn->abort(txn);
-#else
-    err = txn_abort(txn);
+
+    _close_transaction_cursors(self);
+#if (DBVER >= 43)
+    while (self->children_sequences) {
+        dummy=DBSequence_close_internal(self->children_sequences,0,0);
+        Py_XDECREF(dummy);
+    }
 #endif
+    while (self->children_dbs) {
+        dummy=DB_close_internal(self->children_dbs,0);
+        Py_XDECREF(dummy);
+    }
+
+    EXTRACT_FROM_DOUBLE_LINKED_LIST(self);
+
+    MYDB_BEGIN_ALLOW_THREADS;
+    if (discard) {
+        assert(!self->flag_prepare);
+        err = txn->discard(txn,0);
+    } else {
+        /*
+        ** If the transaction is in the "prepare" or "recover" state,
+        ** we better do not implicitly abort it.
+        */
+        if (!self->flag_prepare) {
+            err = txn->abort(txn);
+        }
+    }
     MYDB_END_ALLOW_THREADS;
     RETURN_IF_ERR();
     RETURN_NONE();
 }
 
+static PyObject*
+DBTxn_abort(DBTxnObject* self)
+{
+    self->flag_prepare=0;
+    _close_transaction_cursors(self);
+
+    return DBTxn_abort_discard_internal(self,0);
+}
+
+static PyObject*
+DBTxn_discard(DBTxnObject* self)
+{
+    self->flag_prepare=0;
+    _close_transaction_cursors(self);
+
+    return DBTxn_abort_discard_internal(self,1);
+}
+
 
 static PyObject*
-DBTxn_id(DBTxnObject* self, PyObject* args)
+DBTxn_id(DBTxnObject* self)
 {
     int id;
 
-    if (!PyArg_ParseTuple(args, ":id"))
-        return NULL;
-
     if (!self->txn) {
         PyObject *t = Py_BuildValue("(is)", 0, "DBTxn must not be used "
-                                    "after txn_commit or txn_abort");
-        PyErr_SetObject(DBError, t);
-        Py_DECREF(t);
+                                    "after txn_commit, txn_abort "
+                                    "or txn_discard");
+        if (t) {
+            PyErr_SetObject(DBError, t);
+            Py_DECREF(t);
+        }
         return NULL;
     }
     MYDB_BEGIN_ALLOW_THREADS;
-#if (DBVER >= 40)
     id = self->txn->id(self->txn);
-#else
-    id = txn_id(self->txn);
-#endif
     MYDB_END_ALLOW_THREADS;
-    return PyLong_FromLong(id);
+    return NUMBER_FromLong(id);
 }
 
 #if (DBVER >= 43)
@@ -4849,24 +6017,41 @@
 
 
 static PyObject*
-DBSequence_close(DBSequenceObject* self, PyObject* args)
+DBSequence_close_internal(DBSequenceObject* self, int flags, int do_not_close)
 {
-    int err, flags=0;
-    if (!PyArg_ParseTuple(args,"|i:close", &flags))
-        return NULL;
-    CHECK_SEQUENCE_NOT_CLOSED(self)
+    int err=0;
 
-    MYDB_BEGIN_ALLOW_THREADS
-    err = self->sequence->close(self->sequence, flags);
-    self->sequence = NULL;
-    MYDB_END_ALLOW_THREADS
+    if (self->sequence!=NULL) {
+        EXTRACT_FROM_DOUBLE_LINKED_LIST(self);
+        if (self->txn) {
+            EXTRACT_FROM_DOUBLE_LINKED_LIST_TXN(self);
+            self->txn=NULL;
+        }
 
-    RETURN_IF_ERR();
+        if (!do_not_close) {
+            MYDB_BEGIN_ALLOW_THREADS
+            err = self->sequence->close(self->sequence, flags);
+            MYDB_END_ALLOW_THREADS
+        }
+        self->sequence = NULL;
+
+        RETURN_IF_ERR();
+    }
 
     RETURN_NONE();
 }
 
 static PyObject*
+DBSequence_close(DBSequenceObject* self, PyObject* args)
+{
+    int flags=0;
+    if (!PyArg_ParseTuple(args,"|i:close", &flags))
+        return NULL;
+
+    return DBSequence_close_internal(self,flags,0);
+}
+
+static PyObject*
 DBSequence_get(DBSequenceObject* self, PyObject* args, PyObject* kwargs)
 {
     int err, flags = 0;
@@ -4888,25 +6073,23 @@
 
     RETURN_IF_ERR();
     return PyLong_FromLongLong(value);
-
 }
 
 static PyObject*
-DBSequence_get_dbp(DBSequenceObject* self, PyObject* args)
+DBSequence_get_dbp(DBSequenceObject* self)
 {
-    if (!PyArg_ParseTuple(args,":get_dbp"))
-        return NULL;
     CHECK_SEQUENCE_NOT_CLOSED(self)
     Py_INCREF(self->mydb);
     return (PyObject* )self->mydb;
 }
 
 static PyObject*
-DBSequence_get_key(DBSequenceObject* self, PyObject* args)
+DBSequence_get_key(DBSequenceObject* self)
 {
     int err;
     DBT key;
     PyObject *retval = NULL;
+
     key.flags = DB_DBT_MALLOC;
     CHECK_SEQUENCE_NOT_CLOSED(self)
     MYDB_BEGIN_ALLOW_THREADS
@@ -4914,9 +6097,9 @@
     MYDB_END_ALLOW_THREADS
 
     if (!err)
-        retval = PyBytes_FromStringAndSize(key.data, key.size);
+        retval = Build_PyString(key.data, key.size);
 
-    free_dbt(&key);
+    FREE_DBT(key);
     RETURN_IF_ERR();
 
     return retval;
@@ -4926,13 +6109,15 @@
 DBSequence_init_value(DBSequenceObject* self, PyObject* args)
 {
     int err;
-    db_seq_t value;
+    PY_LONG_LONG value;
+    db_seq_t value2;
     if (!PyArg_ParseTuple(args,"L:init_value", &value))
         return NULL;
     CHECK_SEQUENCE_NOT_CLOSED(self)
 
+    value2=value; /* If truncation, compiler should show a warning */
     MYDB_BEGIN_ALLOW_THREADS
-    err = self->sequence->initial_value(self->sequence, value);
+    err = self->sequence->initial_value(self->sequence, value2);
     MYDB_END_ALLOW_THREADS
 
     RETURN_IF_ERR();
@@ -4944,8 +6129,7 @@
 DBSequence_open(DBSequenceObject* self, PyObject* args, PyObject* kwargs)
 {
     int err, flags = 0;
-    PyObject *keyobj;
-    Py_buffer *key_buf_view = NULL;
+    PyObject* keyobj;
     PyObject *txnobj = NULL;
     DB_TXN *txn = NULL;
     DBT key;
@@ -4957,22 +6141,28 @@
     if (!checkTxnObj(txnobj, &txn))
         return NULL;
 
-    if (!make_key_dbt(self->mydb, keyobj, &key, NULL, &key_buf_view))
+    if (!make_key_dbt(self->mydb, keyobj, &key, NULL))
         return NULL;
 
     MYDB_BEGIN_ALLOW_THREADS
     err = self->sequence->open(self->sequence, txn, &key, flags);
     MYDB_END_ALLOW_THREADS
 
-    FREE_DBT_VIEW(key, keyobj, key_buf_view);
+    CLEAR_DBT(key);
     RETURN_IF_ERR();
 
+    if (txn) {
+        INSERT_IN_DOUBLE_LINKED_LIST_TXN(((DBTxnObject *)txnobj)->children_sequences,self);
+        self->txn=(DBTxnObject *)txnobj;
+    }
+
     RETURN_NONE();
 }
 
 static PyObject*
 DBSequence_remove(DBSequenceObject* self, PyObject* args, PyObject* kwargs)
 {
+    PyObject *dummy;
     int err, flags = 0;
     PyObject *txnobj = NULL;
     DB_TXN *txn = NULL;
@@ -4990,6 +6180,9 @@
     err = self->sequence->remove(self->sequence, txn, flags);
     MYDB_END_ALLOW_THREADS
 
+    dummy=DBSequence_close_internal(self,flags,1);
+    Py_XDECREF(dummy);
+
     RETURN_IF_ERR();
     RETURN_NONE();
 }
@@ -5011,11 +6204,10 @@
 }
 
 static PyObject*
-DBSequence_get_cachesize(DBSequenceObject* self, PyObject* args)
+DBSequence_get_cachesize(DBSequenceObject* self)
 {
     int err, size;
-    if (!PyArg_ParseTuple(args,":get_cachesize"))
-        return NULL;
+
     CHECK_SEQUENCE_NOT_CLOSED(self)
 
     MYDB_BEGIN_ALLOW_THREADS
@@ -5023,7 +6215,7 @@
     MYDB_END_ALLOW_THREADS
 
     RETURN_IF_ERR();
-    return PyLong_FromLong(size);
+    return NUMBER_FromLong(size);
 }
 
 static PyObject*
@@ -5040,16 +6232,14 @@
 
     RETURN_IF_ERR();
     RETURN_NONE();
-
 }
 
 static PyObject*
-DBSequence_get_flags(DBSequenceObject* self, PyObject* args)
+DBSequence_get_flags(DBSequenceObject* self)
 {
     unsigned int flags;
     int err;
-    if (!PyArg_ParseTuple(args,":get_flags"))
-        return NULL;
+
     CHECK_SEQUENCE_NOT_CLOSED(self)
 
     MYDB_BEGIN_ALLOW_THREADS
@@ -5057,20 +6247,23 @@
     MYDB_END_ALLOW_THREADS
 
     RETURN_IF_ERR();
-    return PyLong_FromLong((int)flags);
+    return NUMBER_FromLong((int)flags);
 }
 
 static PyObject*
 DBSequence_set_range(DBSequenceObject* self, PyObject* args)
 {
     int err;
-    db_seq_t min, max;
+    PY_LONG_LONG min, max;
+    db_seq_t min2, max2;
     if (!PyArg_ParseTuple(args,"(LL):set_range", &min, &max))
         return NULL;
     CHECK_SEQUENCE_NOT_CLOSED(self)
 
+    min2=min;  /* If truncation, compiler should show a warning */
+    max2=max;
     MYDB_BEGIN_ALLOW_THREADS
-    err = self->sequence->set_range(self->sequence, min, max);
+    err = self->sequence->set_range(self->sequence, min2, max2);
     MYDB_END_ALLOW_THREADS
 
     RETURN_IF_ERR();
@@ -5078,19 +6271,21 @@
 }
 
 static PyObject*
-DBSequence_get_range(DBSequenceObject* self, PyObject* args)
+DBSequence_get_range(DBSequenceObject* self)
 {
     int err;
-    db_seq_t min, max;
-    if (!PyArg_ParseTuple(args,":get_range"))
-        return NULL;
+    PY_LONG_LONG min, max;
+    db_seq_t min2, max2;
+
     CHECK_SEQUENCE_NOT_CLOSED(self)
 
     MYDB_BEGIN_ALLOW_THREADS
-    err = self->sequence->get_range(self->sequence, &min, &max);
+    err = self->sequence->get_range(self->sequence, &min2, &max2);
     MYDB_END_ALLOW_THREADS
 
     RETURN_IF_ERR();
+    min=min2;  /* If truncation, compiler should show a warning */
+    max=max2;
     return Py_BuildValue("(LL)", min, max);
 }
 
@@ -5142,27 +6337,23 @@
 /* Method definition tables and type objects */
 
 static PyMethodDef DB_methods[] = {
-    {"append",          (PyCFunction)DB_append,         METH_VARARGS},
-#if (DBVER >= 33)
+    {"append",          (PyCFunction)DB_append,         METH_VARARGS|METH_KEYWORDS},
     {"associate",       (PyCFunction)DB_associate,      METH_VARARGS|METH_KEYWORDS},
-#endif
     {"close",           (PyCFunction)DB_close,          METH_VARARGS},
     {"consume",         (PyCFunction)DB_consume,        METH_VARARGS|METH_KEYWORDS},
     {"consume_wait",    (PyCFunction)DB_consume_wait,   METH_VARARGS|METH_KEYWORDS},
     {"cursor",          (PyCFunction)DB_cursor,         METH_VARARGS|METH_KEYWORDS},
     {"delete",          (PyCFunction)DB_delete,         METH_VARARGS|METH_KEYWORDS},
-    {"fd",              (PyCFunction)DB_fd,             METH_VARARGS},
+    {"fd",              (PyCFunction)DB_fd,             METH_NOARGS},
     {"get",             (PyCFunction)DB_get,            METH_VARARGS|METH_KEYWORDS},
-#if (DBVER >= 33)
     {"pget",            (PyCFunction)DB_pget,           METH_VARARGS|METH_KEYWORDS},
-#endif
     {"get_both",        (PyCFunction)DB_get_both,       METH_VARARGS|METH_KEYWORDS},
-    {"get_byteswapped", (PyCFunction)DB_get_byteswapped,METH_VARARGS},
+    {"get_byteswapped", (PyCFunction)DB_get_byteswapped,METH_NOARGS},
     {"get_size",        (PyCFunction)DB_get_size,       METH_VARARGS|METH_KEYWORDS},
-    {"get_type",        (PyCFunction)DB_get_type,       METH_VARARGS},
+    {"get_type",        (PyCFunction)DB_get_type,       METH_NOARGS},
     {"join",            (PyCFunction)DB_join,           METH_VARARGS},
     {"key_range",       (PyCFunction)DB_key_range,      METH_VARARGS|METH_KEYWORDS},
-    {"has_key",         (PyCFunction)DB_has_key,        METH_VARARGS},
+    {"has_key",         (PyCFunction)DB_has_key,        METH_VARARGS|METH_KEYWORDS},
     {"items",           (PyCFunction)DB_items,          METH_VARARGS},
     {"keys",            (PyCFunction)DB_keys,           METH_VARARGS},
     {"open",            (PyCFunction)DB_open,           METH_VARARGS|METH_KEYWORDS},
@@ -5170,9 +6361,7 @@
     {"remove",          (PyCFunction)DB_remove,         METH_VARARGS|METH_KEYWORDS},
     {"rename",          (PyCFunction)DB_rename,         METH_VARARGS},
     {"set_bt_minkey",   (PyCFunction)DB_set_bt_minkey,  METH_VARARGS},
-#if (DBVER >= 33)
-    {"set_bt_compare",  (PyCFunction)DB_set_bt_compare, METH_VARARGS},
-#endif
+    {"set_bt_compare",  (PyCFunction)DB_set_bt_compare, METH_O},
     {"set_cachesize",   (PyCFunction)DB_set_cachesize,  METH_VARARGS},
 #if (DBVER >= 41)
     {"set_encrypt",     (PyCFunction)DB_set_encrypt,    METH_VARARGS|METH_KEYWORDS},
@@ -5186,13 +6375,13 @@
     {"set_re_len",      (PyCFunction)DB_set_re_len,     METH_VARARGS},
     {"set_re_pad",      (PyCFunction)DB_set_re_pad,     METH_VARARGS},
     {"set_re_source",   (PyCFunction)DB_set_re_source,  METH_VARARGS},
-    {"set_q_extentsize",(PyCFunction)DB_set_q_extentsize,METH_VARARGS},
+    {"set_q_extentsize",(PyCFunction)DB_set_q_extentsize, METH_VARARGS},
+    {"set_private",     (PyCFunction)DB_set_private,    METH_O},
+    {"get_private",     (PyCFunction)DB_get_private,    METH_NOARGS},
     {"stat",            (PyCFunction)DB_stat,           METH_VARARGS|METH_KEYWORDS},
     {"sync",            (PyCFunction)DB_sync,           METH_VARARGS},
-#if (DBVER >= 33)
     {"truncate",        (PyCFunction)DB_truncate,       METH_VARARGS|METH_KEYWORDS},
-#endif
-    {"type",            (PyCFunction)DB_get_type,       METH_VARARGS},
+    {"type",            (PyCFunction)DB_get_type,       METH_NOARGS},
     {"upgrade",         (PyCFunction)DB_upgrade,        METH_VARARGS},
     {"values",          (PyCFunction)DB_values,         METH_VARARGS},
     {"verify",          (PyCFunction)DB_verify,         METH_VARARGS|METH_KEYWORDS},
@@ -5209,17 +6398,15 @@
 
 
 static PyMethodDef DBCursor_methods[] = {
-    {"close",           (PyCFunction)DBC_close,         METH_VARARGS},
+    {"close",           (PyCFunction)DBC_close,         METH_NOARGS},
     {"count",           (PyCFunction)DBC_count,         METH_VARARGS},
     {"current",         (PyCFunction)DBC_current,       METH_VARARGS|METH_KEYWORDS},
     {"delete",          (PyCFunction)DBC_delete,        METH_VARARGS},
     {"dup",             (PyCFunction)DBC_dup,           METH_VARARGS},
     {"first",           (PyCFunction)DBC_first,         METH_VARARGS|METH_KEYWORDS},
     {"get",             (PyCFunction)DBC_get,           METH_VARARGS|METH_KEYWORDS},
-#if (DBVER >= 33)
     {"pget",            (PyCFunction)DBC_pget,          METH_VARARGS|METH_KEYWORDS},
-#endif
-    {"get_recno",       (PyCFunction)DBC_get_recno,     METH_VARARGS},
+    {"get_recno",       (PyCFunction)DBC_get_recno,     METH_NOARGS},
     {"last",            (PyCFunction)DBC_last,          METH_VARARGS|METH_KEYWORDS},
     {"next",            (PyCFunction)DBC_next,          METH_VARARGS|METH_KEYWORDS},
     {"prev",            (PyCFunction)DBC_prev,          METH_VARARGS|METH_KEYWORDS},
@@ -5227,7 +6414,7 @@
     {"set",             (PyCFunction)DBC_set,           METH_VARARGS|METH_KEYWORDS},
     {"set_range",       (PyCFunction)DBC_set_range,     METH_VARARGS|METH_KEYWORDS},
     {"get_both",        (PyCFunction)DBC_get_both,      METH_VARARGS},
-    {"get_current_size",(PyCFunction)DBC_get_current_size, METH_VARARGS},
+    {"get_current_size",(PyCFunction)DBC_get_current_size, METH_NOARGS},
     {"set_both",        (PyCFunction)DBC_set_both,      METH_VARARGS},
     {"set_recno",       (PyCFunction)DBC_set_recno,     METH_VARARGS|METH_KEYWORDS},
     {"consume",         (PyCFunction)DBC_consume,       METH_VARARGS|METH_KEYWORDS},
@@ -5248,19 +6435,21 @@
     {"dbrename",        (PyCFunction)DBEnv_dbrename,         METH_VARARGS|METH_KEYWORDS},
     {"set_encrypt",     (PyCFunction)DBEnv_set_encrypt,      METH_VARARGS|METH_KEYWORDS},
 #endif
-#if (DBVER >= 40)
     {"set_timeout",     (PyCFunction)DBEnv_set_timeout,      METH_VARARGS|METH_KEYWORDS},
-#endif
     {"set_shm_key",     (PyCFunction)DBEnv_set_shm_key,      METH_VARARGS},
     {"set_cachesize",   (PyCFunction)DBEnv_set_cachesize,    METH_VARARGS},
     {"set_data_dir",    (PyCFunction)DBEnv_set_data_dir,     METH_VARARGS},
     {"set_flags",       (PyCFunction)DBEnv_set_flags,        METH_VARARGS},
+#if (DBVER >= 47)
+    {"log_set_config",  (PyCFunction)DBEnv_log_set_config,   METH_VARARGS},
+#endif
     {"set_lg_bsize",    (PyCFunction)DBEnv_set_lg_bsize,     METH_VARARGS},
     {"set_lg_dir",      (PyCFunction)DBEnv_set_lg_dir,       METH_VARARGS},
     {"set_lg_max",      (PyCFunction)DBEnv_set_lg_max,       METH_VARARGS},
-#if (DBVER >= 33)
-    {"set_lg_regionmax",(PyCFunction)DBEnv_set_lg_regionmax, METH_VARARGS},
+#if (DBVER >= 42)
+    {"get_lg_max",      (PyCFunction)DBEnv_get_lg_max,       METH_NOARGS},
 #endif
+    {"set_lg_regionmax",(PyCFunction)DBEnv_set_lg_regionmax, METH_VARARGS},
     {"set_lk_detect",   (PyCFunction)DBEnv_set_lk_detect,    METH_VARARGS},
 #if (DBVER < 45)
     {"set_lk_max",      (PyCFunction)DBEnv_set_lk_max,       METH_VARARGS},
@@ -5277,20 +6466,78 @@
     {"set_tx_timestamp", (PyCFunction)DBEnv_set_tx_timestamp, METH_VARARGS},
     {"lock_detect",     (PyCFunction)DBEnv_lock_detect,      METH_VARARGS},
     {"lock_get",        (PyCFunction)DBEnv_lock_get,         METH_VARARGS},
-    {"lock_id",         (PyCFunction)DBEnv_lock_id,          METH_VARARGS},
-#if (DBVER >= 40)
+    {"lock_id",         (PyCFunction)DBEnv_lock_id,          METH_NOARGS},
     {"lock_id_free",    (PyCFunction)DBEnv_lock_id_free,     METH_VARARGS},
-#endif
     {"lock_put",        (PyCFunction)DBEnv_lock_put,         METH_VARARGS},
     {"lock_stat",       (PyCFunction)DBEnv_lock_stat,        METH_VARARGS},
     {"log_archive",     (PyCFunction)DBEnv_log_archive,      METH_VARARGS},
-#if (DBVER >= 40)
+    {"log_flush",       (PyCFunction)DBEnv_log_flush,        METH_NOARGS},
     {"log_stat",        (PyCFunction)DBEnv_log_stat,         METH_VARARGS},
-#endif
 #if (DBVER >= 44)
     {"lsn_reset",       (PyCFunction)DBEnv_lsn_reset,        METH_VARARGS|METH_KEYWORDS},
 #endif
     {"set_get_returns_none",(PyCFunction)DBEnv_set_get_returns_none, METH_VARARGS},
+    {"txn_recover",     (PyCFunction)DBEnv_txn_recover,       METH_NOARGS},
+    {"set_rpc_server",  (PyCFunction)DBEnv_set_rpc_server,
+        METH_VARARGS||METH_KEYWORDS},
+    {"set_verbose",     (PyCFunction)DBEnv_set_verbose,       METH_VARARGS},
+#if (DBVER >= 42)
+    {"get_verbose",     (PyCFunction)DBEnv_get_verbose,       METH_VARARGS},
+#endif
+    {"set_private",     (PyCFunction)DBEnv_set_private,       METH_O},
+    {"get_private",     (PyCFunction)DBEnv_get_private,       METH_NOARGS},
+    {"rep_start",       (PyCFunction)DBEnv_rep_start,
+        METH_VARARGS|METH_KEYWORDS},
+    {"rep_set_transport", (PyCFunction)DBEnv_rep_set_transport, METH_VARARGS},
+    {"rep_process_message", (PyCFunction)DBEnv_rep_process_message,
+        METH_VARARGS},
+#if (DBVER >= 46)
+    {"rep_elect",       (PyCFunction)DBEnv_rep_elect,         METH_VARARGS},
+#endif
+#if (DBVER >= 44)
+    {"rep_set_config",  (PyCFunction)DBEnv_rep_set_config,    METH_VARARGS},
+    {"rep_get_config",  (PyCFunction)DBEnv_rep_get_config,    METH_VARARGS},
+    {"rep_sync",        (PyCFunction)DBEnv_rep_sync,          METH_NOARGS},
+#endif
+#if (DBVER >= 45)
+    {"rep_set_limit",   (PyCFunction)DBEnv_rep_set_limit,     METH_VARARGS},
+    {"rep_get_limit",   (PyCFunction)DBEnv_rep_get_limit,     METH_NOARGS},
+#endif
+#if (DBVER >= 47)
+    {"rep_set_request", (PyCFunction)DBEnv_rep_set_request,   METH_VARARGS},
+    {"rep_get_request", (PyCFunction)DBEnv_rep_get_request,   METH_NOARGS},
+#endif
+#if (DBVER >= 45)
+    {"set_event_notify", (PyCFunction)DBEnv_set_event_notify, METH_O},
+#endif
+#if (DBVER >= 45)
+    {"rep_set_nsites", (PyCFunction)DBEnv_rep_set_nsites, METH_VARARGS},
+    {"rep_get_nsites", (PyCFunction)DBEnv_rep_get_nsites, METH_NOARGS},
+    {"rep_set_priority", (PyCFunction)DBEnv_rep_set_priority, METH_VARARGS},
+    {"rep_get_priority", (PyCFunction)DBEnv_rep_get_priority, METH_NOARGS},
+    {"rep_set_timeout", (PyCFunction)DBEnv_rep_set_timeout, METH_VARARGS},
+    {"rep_get_timeout", (PyCFunction)DBEnv_rep_get_timeout, METH_VARARGS},
+#endif
+#if (DBVER >= 45)
+    {"repmgr_start", (PyCFunction)DBEnv_repmgr_start,
+        METH_VARARGS|METH_KEYWORDS},
+    {"repmgr_set_local_site", (PyCFunction)DBEnv_repmgr_set_local_site,
+        METH_VARARGS|METH_KEYWORDS},
+    {"repmgr_add_remote_site", (PyCFunction)DBEnv_repmgr_add_remote_site,
+        METH_VARARGS|METH_KEYWORDS},
+    {"repmgr_set_ack_policy", (PyCFunction)DBEnv_repmgr_set_ack_policy,
+        METH_VARARGS},
+    {"repmgr_get_ack_policy", (PyCFunction)DBEnv_repmgr_get_ack_policy,
+        METH_NOARGS},
+    {"repmgr_site_list", (PyCFunction)DBEnv_repmgr_site_list,
+        METH_NOARGS},
+#endif
+#if (DBVER >= 46)
+    {"repmgr_stat", (PyCFunction)DBEnv_repmgr_stat,
+        METH_VARARGS|METH_KEYWORDS},
+    {"repmgr_stat_print", (PyCFunction)DBEnv_repmgr_stat_print,
+        METH_VARARGS|METH_KEYWORDS},
+#endif
     {NULL,      NULL}       /* sentinel */
 };
 
@@ -5298,8 +6545,9 @@
 static PyMethodDef DBTxn_methods[] = {
     {"commit",          (PyCFunction)DBTxn_commit,      METH_VARARGS},
     {"prepare",         (PyCFunction)DBTxn_prepare,     METH_VARARGS},
-    {"abort",           (PyCFunction)DBTxn_abort,       METH_VARARGS},
-    {"id",              (PyCFunction)DBTxn_id,          METH_VARARGS},
+    {"discard",         (PyCFunction)DBTxn_discard,     METH_NOARGS},
+    {"abort",           (PyCFunction)DBTxn_abort,       METH_NOARGS},
+    {"id",              (PyCFunction)DBTxn_id,          METH_NOARGS},
     {NULL,      NULL}       /* sentinel */
 };
 
@@ -5308,30 +6556,40 @@
 static PyMethodDef DBSequence_methods[] = {
     {"close",           (PyCFunction)DBSequence_close,          METH_VARARGS},
     {"get",             (PyCFunction)DBSequence_get,            METH_VARARGS|METH_KEYWORDS},
-    {"get_dbp",         (PyCFunction)DBSequence_get_dbp,        METH_VARARGS},
-    {"get_key",         (PyCFunction)DBSequence_get_key,        METH_VARARGS},
+    {"get_dbp",         (PyCFunction)DBSequence_get_dbp,        METH_NOARGS},
+    {"get_key",         (PyCFunction)DBSequence_get_key,        METH_NOARGS},
     {"init_value",      (PyCFunction)DBSequence_init_value,     METH_VARARGS},
     {"open",            (PyCFunction)DBSequence_open,           METH_VARARGS|METH_KEYWORDS},
     {"remove",          (PyCFunction)DBSequence_remove,         METH_VARARGS|METH_KEYWORDS},
     {"set_cachesize",   (PyCFunction)DBSequence_set_cachesize,  METH_VARARGS},
-    {"get_cachesize",   (PyCFunction)DBSequence_get_cachesize,  METH_VARARGS},
+    {"get_cachesize",   (PyCFunction)DBSequence_get_cachesize,  METH_NOARGS},
     {"set_flags",       (PyCFunction)DBSequence_set_flags,      METH_VARARGS},
-    {"get_flags",       (PyCFunction)DBSequence_get_flags,      METH_VARARGS},
+    {"get_flags",       (PyCFunction)DBSequence_get_flags,      METH_NOARGS},
     {"set_range",       (PyCFunction)DBSequence_set_range,      METH_VARARGS},
-    {"get_range",       (PyCFunction)DBSequence_get_range,      METH_VARARGS},
+    {"get_range",       (PyCFunction)DBSequence_get_range,      METH_NOARGS},
     {"stat",            (PyCFunction)DBSequence_stat,           METH_VARARGS|METH_KEYWORDS},
     {NULL,      NULL}       /* sentinel */
 };
 #endif
 
+
 static PyObject*
 DBEnv_db_home_get(DBEnvObject* self)
 {
+    const char *home = NULL;
+
     CHECK_ENV_NOT_CLOSED(self);
-    if (self->db_env->db_home == NULL) {
+
+#if (DBVER >= 42)
+    self->db_env->get_home(self->db_env, &home);
+#else
+    home=self->db_env->db_home;
+#endif
+
+    if (home == NULL) {
         RETURN_NONE();
     }
-    return PyUnicode_FromString(self->db_env->db_home);
+    return PyBytes_FromString(home);
 }
 
 static PyGetSetDef DBEnv_getsets[] = {
@@ -5340,16 +6598,21 @@
 };
 
 
-static PyTypeObject DB_Type = {
+statichere PyTypeObject DB_Type = {
+#if (PY_VERSION_HEX < 0x03000000)
+    PyObject_HEAD_INIT(NULL)
+    0,                  /*ob_size*/
+#else
     PyVarObject_HEAD_INIT(NULL, 0)
+#endif
     "DB",               /*tp_name*/
     sizeof(DBObject),   /*tp_basicsize*/
     0,                  /*tp_itemsize*/
     /* methods */
     (destructor)DB_dealloc, /*tp_dealloc*/
-    0,                  /*tp_print*/
-    0,                  /*tp_getattr*/
-    0,                  /*tp_setattr*/
+    0,          /*tp_print*/
+    0,          /*tp_getattr*/
+    0,          /*tp_setattr*/
     0,          /*tp_compare*/
     0,          /*tp_repr*/
     0,          /*tp_as_number*/
@@ -5359,55 +6622,75 @@
     0,			/* tp_call */
     0,			/* tp_str */
     0,  		/* tp_getattro */
-    0,                  /* tp_setattro */
+    0,          /* tp_setattro */
     0,			/* tp_as_buffer */
-    Py_TPFLAGS_DEFAULT, /* tp_flags */
-    0,                  /* tp_doc */
-    0,		        /* tp_traverse */
+#if (PY_VERSION_HEX < 0x03000000)
+    Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_WEAKREFS,      /* tp_flags */
+#else
+    Py_TPFLAGS_DEFAULT,      /* tp_flags */
+#endif
+    0,          /* tp_doc */
+    0,		    /* tp_traverse */
     0,			/* tp_clear */
     0,			/* tp_richcompare */
     offsetof(DBObject, in_weakreflist),   /* tp_weaklistoffset */
-    0,			/* tp_iter */
-    0,			/* tp_iternext */
-    DB_methods,		/* tp_methods */
+    0,          /*tp_iter*/
+    0,          /*tp_iternext*/
+    DB_methods, /*tp_methods*/
+    0, /*tp_members*/
 };
 
 
-static PyTypeObject DBCursor_Type = {
+statichere PyTypeObject DBCursor_Type = {
+#if (PY_VERSION_HEX < 0x03000000)
+    PyObject_HEAD_INIT(NULL)
+    0,                  /*ob_size*/
+#else
     PyVarObject_HEAD_INIT(NULL, 0)
+#endif
     "DBCursor",         /*tp_name*/
     sizeof(DBCursorObject),  /*tp_basicsize*/
-    0,                  /*tp_itemsize*/
+    0,          /*tp_itemsize*/
     /* methods */
     (destructor)DBCursor_dealloc,/*tp_dealloc*/
-    0,                  /*tp_print*/
-    0,                  /*tp_getattr*/
-    0,                  /*tp_setattr*/
-    0,                  /*tp_compare*/
-    0,                  /*tp_repr*/
-    0,                  /*tp_as_number*/
-    0,                  /*tp_as_sequence*/
-    0,                  /*tp_as_mapping*/
-    0,                  /*tp_hash*/
-    0,			/* tp_call */
-    0,			/* tp_str */
-    0,  		/* tp_getattro */
-    0,                  /* tp_setattro */
-    0,			/* tp_as_buffer */
-    Py_TPFLAGS_DEFAULT, /* tp_flags */
-    0,                  /* tp_doc */
-    0,		        /* tp_traverse */
-    0,			/* tp_clear */
-    0,			/* tp_richcompare */
+    0,          /*tp_print*/
+    0,          /*tp_getattr*/
+    0,          /*tp_setattr*/
+    0,          /*tp_compare*/
+    0,          /*tp_repr*/
+    0,          /*tp_as_number*/
+    0,          /*tp_as_sequence*/
+    0,          /*tp_as_mapping*/
+    0,          /*tp_hash*/
+    0,          /*tp_call*/
+    0,          /*tp_str*/
+    0,          /*tp_getattro*/
+    0,          /*tp_setattro*/
+    0,          /*tp_as_buffer*/
+#if (PY_VERSION_HEX < 0x03000000)
+    Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_WEAKREFS,      /* tp_flags */
+#else
+    Py_TPFLAGS_DEFAULT,      /* tp_flags */
+#endif
+    0,          /* tp_doc */
+    0,          /* tp_traverse */
+    0,          /* tp_clear */
+    0,          /* tp_richcompare */
     offsetof(DBCursorObject, in_weakreflist),   /* tp_weaklistoffset */
-    0,			/* tp_iter */
-    0,			/* tp_iternext */
-    DBCursor_methods,	/* tp_methods */
+    0,          /*tp_iter*/
+    0,          /*tp_iternext*/
+    DBCursor_methods, /*tp_methods*/
+    0,          /*tp_members*/
 };
 
 
-static PyTypeObject DBEnv_Type = {
+statichere PyTypeObject DBEnv_Type = {
+#if (PY_VERSION_HEX < 0x03000000)
+    PyObject_HEAD_INIT(NULL)
+    0,                  /*ob_size*/
+#else
     PyVarObject_HEAD_INIT(NULL, 0)
+#endif
     "DBEnv",            /*tp_name*/
     sizeof(DBEnvObject),    /*tp_basicsize*/
     0,          /*tp_itemsize*/
@@ -5425,23 +6708,32 @@
     0,			/* tp_call */
     0,			/* tp_str */
     0,  		/* tp_getattro */
-    0,                  /* tp_setattro */
+    0,          /* tp_setattro */
     0,			/* tp_as_buffer */
-    Py_TPFLAGS_DEFAULT, /* tp_flags */
-    0,                  /* tp_doc */
-    0,		        /* tp_traverse */
+#if (PY_VERSION_HEX < 0x03000000)
+    Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_WEAKREFS,      /* tp_flags */
+#else
+    Py_TPFLAGS_DEFAULT,      /* tp_flags */
+#endif
+    0,          /* tp_doc */
+    0,		    /* tp_traverse */
     0,			/* tp_clear */
     0,			/* tp_richcompare */
     offsetof(DBEnvObject, in_weakreflist),   /* tp_weaklistoffset */
-    0,                  /* tp_iter */
-    0,                  /* tp_iternext */
+    0,          /* tp_iter */
+    0,          /* tp_iternext */
     DBEnv_methods,      /* tp_methods */
-    0,                  /* tp_members */
+    0,          /* tp_members */
     DBEnv_getsets,      /* tp_getsets */
 };
 
-static PyTypeObject DBTxn_Type = {
+statichere PyTypeObject DBTxn_Type = {
+#if (PY_VERSION_HEX < 0x03000000)
+    PyObject_HEAD_INIT(NULL)
+    0,                  /*ob_size*/
+#else
     PyVarObject_HEAD_INIT(NULL, 0)
+#endif
     "DBTxn",    /*tp_name*/
     sizeof(DBTxnObject),  /*tp_basicsize*/
     0,          /*tp_itemsize*/
@@ -5459,22 +6751,32 @@
     0,			/* tp_call */
     0,			/* tp_str */
     0,  		/* tp_getattro */
-    0,                  /* tp_setattro */
+    0,          /* tp_setattro */
     0,			/* tp_as_buffer */
-    Py_TPFLAGS_DEFAULT, /* tp_flags */
-    0,                  /* tp_doc */
-    0,		        /* tp_traverse */
+#if (PY_VERSION_HEX < 0x03000000)
+    Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_WEAKREFS,      /* tp_flags */
+#else
+    Py_TPFLAGS_DEFAULT,      /* tp_flags */
+#endif
+    0,          /* tp_doc */
+    0,	        /* tp_traverse */
     0,			/* tp_clear */
     0,			/* tp_richcompare */
     offsetof(DBTxnObject, in_weakreflist),   /* tp_weaklistoffset */
-    0,			/* tp_iter */
-    0,			/* tp_iternext */
-    DBTxn_methods,	/* tp_methods */
+    0,          /*tp_iter*/
+    0,          /*tp_iternext*/
+    DBTxn_methods, /*tp_methods*/
+    0,          /*tp_members*/
 };
 
 
-static PyTypeObject DBLock_Type = {
+statichere PyTypeObject DBLock_Type = {
+#if (PY_VERSION_HEX < 0x03000000)
+    PyObject_HEAD_INIT(NULL)
+    0,                  /*ob_size*/
+#else
     PyVarObject_HEAD_INIT(NULL, 0)
+#endif
     "DBLock",   /*tp_name*/
     sizeof(DBLockObject),  /*tp_basicsize*/
     0,          /*tp_itemsize*/
@@ -5492,19 +6794,28 @@
     0,			/* tp_call */
     0,			/* tp_str */
     0,  		/* tp_getattro */
-    0,                  /* tp_setattro */
+    0,          /* tp_setattro */
     0,			/* tp_as_buffer */
-    Py_TPFLAGS_DEFAULT, /* tp_flags */
-    0,                  /* tp_doc */
-    0,		        /* tp_traverse */
+#if (PY_VERSION_HEX < 0x03000000)
+    Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_WEAKREFS,      /* tp_flags */
+#else
+    Py_TPFLAGS_DEFAULT,      /* tp_flags */
+#endif
+    0,          /* tp_doc */
+    0,		    /* tp_traverse */
     0,			/* tp_clear */
     0,			/* tp_richcompare */
     offsetof(DBLockObject, in_weakreflist),   /* tp_weaklistoffset */
 };
 
 #if (DBVER >= 43)
-static PyTypeObject DBSequence_Type = {
+statichere PyTypeObject DBSequence_Type = {
+#if (PY_VERSION_HEX < 0x03000000)
+    PyObject_HEAD_INIT(NULL)
+    0,                  /*ob_size*/
+#else
     PyVarObject_HEAD_INIT(NULL, 0)
+#endif
     "DBSequence",                   /*tp_name*/
     sizeof(DBSequenceObject),       /*tp_basicsize*/
     0,          /*tp_itemsize*/
@@ -5524,15 +6835,20 @@
     0,  		/* tp_getattro */
     0,          /* tp_setattro */
     0,			/* tp_as_buffer */
-    Py_TPFLAGS_DEFAULT, /* tp_flags */
+#if (PY_VERSION_HEX < 0x03000000)
+    Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_WEAKREFS,      /* tp_flags */
+#else
+    Py_TPFLAGS_DEFAULT,      /* tp_flags */
+#endif
     0,          /* tp_doc */
     0,		    /* tp_traverse */
     0,			/* tp_clear */
     0,			/* tp_richcompare */
     offsetof(DBSequenceObject, in_weakreflist),   /* tp_weaklistoffset */
-    0,			/* tp_iter */
-    0,			/* tp_iternext */
-    DBSequence_methods,	/* tp_methods */
+    0,          /*tp_iter*/
+    0,          /*tp_iternext*/
+    DBSequence_methods, /*tp_methods*/
+    0,          /*tp_members*/
 };
 #endif
 
@@ -5591,29 +6907,27 @@
 underlying DB library.";
 
 static PyObject*
-bsddb_version(PyObject* self, PyObject* args)
+bsddb_version(PyObject* self)
 {
     int major, minor, patch;
 
-        if (!PyArg_ParseTuple(args, ":version"))
-        return NULL;
-        db_version(&major, &minor, &patch);
-        return Py_BuildValue("(iii)", major, minor, patch);
+    db_version(&major, &minor, &patch);
+    return Py_BuildValue("(iii)", major, minor, patch);
 }
 
 
 /* List of functions defined in the module */
-
 static PyMethodDef bsddb_methods[] = {
     {"DB",          (PyCFunction)DB_construct,          METH_VARARGS | METH_KEYWORDS },
     {"DBEnv",       (PyCFunction)DBEnv_construct,       METH_VARARGS},
-#if (DBVER >= 43)    
+#if (DBVER >= 43)
     {"DBSequence",  (PyCFunction)DBSequence_construct,  METH_VARARGS | METH_KEYWORDS },
-#endif    
-    {"version",     (PyCFunction)bsddb_version,         METH_VARARGS, bsddb_version_doc},
+#endif
+    {"version",     (PyCFunction)bsddb_version,         METH_NOARGS, bsddb_version_doc},
     {NULL,      NULL}       /* sentinel */
 };
 
+
 /* API structure */
 static BSDDB_api bsddb_api;
 
@@ -5630,44 +6944,51 @@
 #define MODULE_NAME_MAX_LEN     11
 static char _bsddbModuleName[MODULE_NAME_MAX_LEN+1] = "_bsddb";
 
-
-static struct PyModuleDef _bsddbmodule = {
-	PyModuleDef_HEAD_INIT,
-	_bsddbModuleName,
-	NULL,
-	-1,
-	bsddb_methods,
-	NULL,
-	NULL,
-	NULL,
-	NULL
+#if (PY_VERSION_HEX >= 0x03000000)
+static struct PyModuleDef bsddbmodule = {
+    PyModuleDef_HEAD_INIT,
+    _bsddbModuleName,   /* Name of module */
+    NULL,               /* module documentation, may be NULL */
+    -1,                 /* size of per-interpreter state of the module,
+                            or -1 if the module keeps state in global variables. */
+    bsddb_methods,
+    NULL,   /* Reload */
+    NULL,   /* Traverse */
+    NULL,   /* Clear */
+    NULL    /* Free */
 };
+#endif
 
-PyMODINIT_FUNC PyInit__bsddb(void)
+
+#if (PY_VERSION_HEX < 0x03000000)
+DL_EXPORT(void) init_bsddb(void)
+#else
+PyMODINIT_FUNC  PyInit__bsddb(void)    /* Note the two underscores */
+#endif
 {
     PyObject* m;
     PyObject* d;
-    PyObject* pybsddb_version_s = PyUnicode_FromString(PY_BSDDB_VERSION);
-    PyObject* db_version_s = PyUnicode_FromString(DB_VERSION_STRING);
-    PyObject* svnid_s = PyUnicode_FromString(svn_id);
+    PyObject* pybsddb_version_s = PyBytes_FromString( PY_BSDDB_VERSION );
+    PyObject* db_version_s = PyBytes_FromString( DB_VERSION_STRING );
+    PyObject* cvsid_s = PyBytes_FromString( rcs_id );
     PyObject* py_api;
 
     /* Initialize object types */
-    if (PyType_Ready(&DB_Type) < 0)
-        return NULL;
-    if (PyType_Ready(&DBCursor_Type) < 0)
-        return NULL;
-    if (PyType_Ready(&DBEnv_Type) < 0)
-        return NULL;
-    if (PyType_Ready(&DBTxn_Type) < 0)
-        return NULL;
-    if (PyType_Ready(&DBLock_Type) < 0)
-        return NULL;
+    if ((PyType_Ready(&DB_Type) < 0)
+        || (PyType_Ready(&DBCursor_Type) < 0)
+        || (PyType_Ready(&DBEnv_Type) < 0)
+        || (PyType_Ready(&DBTxn_Type) < 0)
+        || (PyType_Ready(&DBLock_Type) < 0)
 #if (DBVER >= 43)
-    if (PyType_Ready(&DBSequence_Type) < 0)
+        || (PyType_Ready(&DBSequence_Type) < 0)
+#endif
+        ) {
+#if (PY_VERSION_HEX < 0x03000000)
+        return;
+#else
         return NULL;
 #endif
-
+    }
 
 #if defined(WITH_THREAD) && !defined(MYDB_USE_GILSTATE)
     /* Save the current interpreter, so callbacks can do the right thing. */
@@ -5675,19 +6996,28 @@
 #endif
 
     /* Create the module and add the functions */
-    m = PyModule_Create(&_bsddbmodule);
-    if (m == NULL)
+#if (PY_VERSION_HEX < 0x03000000)
+    m = Py_InitModule(_bsddbModuleName, bsddb_methods);
+#else
+    m=PyModule_Create(&bsddbmodule);
+#endif
+    if (m == NULL) {
+#if (PY_VERSION_HEX < 0x03000000)
+        return;
+#else
     	return NULL;
+#endif
+    }
 
     /* Add some symbolic constants to the module */
     d = PyModule_GetDict(m);
     PyDict_SetItemString(d, "__version__", pybsddb_version_s);
-    PyDict_SetItemString(d, "cvsid", svnid_s);
+    PyDict_SetItemString(d, "cvsid", cvsid_s);
     PyDict_SetItemString(d, "DB_VERSION_STRING", db_version_s);
     Py_DECREF(pybsddb_version_s);
     pybsddb_version_s = NULL;
-    Py_DECREF(svnid_s);
-    svnid_s = NULL;
+    Py_DECREF(cvsid_s);
+    cvsid_s = NULL;
     Py_DECREF(db_version_s);
     db_version_s = NULL;
 
@@ -5702,7 +7032,7 @@
     ADD_INT(d, DB_RPCCLIENT);
 #else
     ADD_INT(d, DB_CLIENT);
-    /* allow apps to be written using DB_RPCCLIENT on older BerkeleyDB */
+    /* allow apps to be written using DB_RPCCLIENT on older Berkeley DB */
     _addIntToDict(d, "DB_RPCCLIENT", DB_CLIENT);
 #endif
     ADD_INT(d, DB_XA_CREATE);
@@ -5710,6 +7040,9 @@
     ADD_INT(d, DB_CREATE);
     ADD_INT(d, DB_NOMMAP);
     ADD_INT(d, DB_THREAD);
+#if (DBVER >= 45)
+    ADD_INT(d, DB_MULTIVERSION);
+#endif
 
     ADD_INT(d, DB_FORCE);
     ADD_INT(d, DB_INIT_CDB);
@@ -5719,6 +7052,8 @@
     ADD_INT(d, DB_INIT_TXN);
     ADD_INT(d, DB_JOINENV);
 
+    ADD_INT(d, DB_XIDDATASIZE);
+
     ADD_INT(d, DB_RECOVER);
     ADD_INT(d, DB_RECOVER_FATAL);
     ADD_INT(d, DB_TXN_NOSYNC);
@@ -5747,10 +7082,7 @@
     ADD_INT(d, DB_NOORDERCHK);
     ADD_INT(d, DB_ORDERCHKONLY);
     ADD_INT(d, DB_PR_PAGE);
-#if ! (DBVER >= 33)
-    ADD_INT(d, DB_VRFY_FLAGMASK);
-    ADD_INT(d, DB_PR_HEADERS);
-#endif
+
     ADD_INT(d, DB_PR_RECOVERYTEST);
     ADD_INT(d, DB_SALVAGE);
 
@@ -5759,19 +7091,16 @@
     ADD_INT(d, DB_LOCK_OLDEST);
     ADD_INT(d, DB_LOCK_RANDOM);
     ADD_INT(d, DB_LOCK_YOUNGEST);
-#if (DBVER >= 33)
     ADD_INT(d, DB_LOCK_MAXLOCKS);
     ADD_INT(d, DB_LOCK_MINLOCKS);
     ADD_INT(d, DB_LOCK_MINWRITE);
-#endif
 
+    ADD_INT(d, DB_LOCK_EXPIRE);
+#if (DBVER >= 43)
+    ADD_INT(d, DB_LOCK_MAXWRITE);
+#endif
 
-#if (DBVER >= 33)
-    /* docs say to use zero instead */
     _addIntToDict(d, "DB_LOCK_CONFLICT", 0);
-#else
-    ADD_INT(d, DB_LOCK_CONFLICT);
-#endif
 
     ADD_INT(d, DB_LOCK_DUMP);
     ADD_INT(d, DB_LOCK_GET);
@@ -5788,39 +7117,31 @@
     ADD_INT(d, DB_LOCK_IWRITE);
     ADD_INT(d, DB_LOCK_IREAD);
     ADD_INT(d, DB_LOCK_IWR);
-#if (DBVER >= 33)
 #if (DBVER < 44)
     ADD_INT(d, DB_LOCK_DIRTY);
 #else
     ADD_INT(d, DB_LOCK_READ_UNCOMMITTED);  /* renamed in 4.4 */
 #endif
     ADD_INT(d, DB_LOCK_WWRITE);
-#endif
 
     ADD_INT(d, DB_LOCK_RECORD);
     ADD_INT(d, DB_LOCK_UPGRADE);
     ADD_INT(d, DB_LOCK_SWITCH);
-#if (DBVER >= 33)
     ADD_INT(d, DB_LOCK_UPGRADE_WRITE);
-#endif
 
     ADD_INT(d, DB_LOCK_NOWAIT);
     ADD_INT(d, DB_LOCK_RECORD);
     ADD_INT(d, DB_LOCK_UPGRADE);
 
-#if (DBVER >= 33)
     ADD_INT(d, DB_LSTAT_ABORTED);
 #if (DBVER < 43)
     ADD_INT(d, DB_LSTAT_ERR);
 #endif
     ADD_INT(d, DB_LSTAT_FREE);
     ADD_INT(d, DB_LSTAT_HELD);
-#if (DBVER == 33)
-    ADD_INT(d, DB_LSTAT_NOGRANT);
-#endif
+
     ADD_INT(d, DB_LSTAT_PENDING);
     ADD_INT(d, DB_LSTAT_WAITING);
-#endif
 
     ADD_INT(d, DB_ARCH_ABS);
     ADD_INT(d, DB_ARCH_DATA);
@@ -5850,21 +7171,20 @@
 #if (DBVER < 45)
     ADD_INT(d, DB_CACHED_COUNTS);
 #endif
+
 #if (DBVER >= 41)
     _addIntToDict(d, "DB_CHECKPOINT", 0);
 #else
     ADD_INT(d, DB_CHECKPOINT);
     ADD_INT(d, DB_CURLSN);
 #endif
-#if ((DBVER >= 33) && (DBVER <= 41))
+#if (DBVER <= 41)
     ADD_INT(d, DB_COMMIT);
 #endif
     ADD_INT(d, DB_CONSUME);
     ADD_INT(d, DB_CONSUME_WAIT);
     ADD_INT(d, DB_CURRENT);
-#if (DBVER >= 33)
     ADD_INT(d, DB_FAST_STAT);
-#endif
     ADD_INT(d, DB_FIRST);
     ADD_INT(d, DB_FLUSH);
     ADD_INT(d, DB_GET_BOTH);
@@ -5892,21 +7212,16 @@
 
     ADD_INT(d, DB_OPFLAGS_MASK);
     ADD_INT(d, DB_RMW);
-#if (DBVER >= 33)
     ADD_INT(d, DB_DIRTY_READ);
     ADD_INT(d, DB_MULTIPLE);
     ADD_INT(d, DB_MULTIPLE_KEY);
-#endif
 
 #if (DBVER >= 44)
     ADD_INT(d, DB_READ_UNCOMMITTED);    /* replaces DB_DIRTY_READ in 4.4 */
     ADD_INT(d, DB_READ_COMMITTED);
 #endif
 
-#if (DBVER >= 33)
     ADD_INT(d, DB_DONOTINDEX);
-    ADD_INT(d, DB_XIDDATASIZE);
-#endif
 
 #if (DBVER >= 41)
     _addIntToDict(d, "DB_INCOMPLETE", 0);
@@ -5924,17 +7239,17 @@
     ADD_INT(d, DB_OLD_VERSION);
     ADD_INT(d, DB_RUNRECOVERY);
     ADD_INT(d, DB_VERIFY_BAD);
-#if (DBVER >= 33)
     ADD_INT(d, DB_PAGE_NOTFOUND);
     ADD_INT(d, DB_SECONDARY_BAD);
-#endif
-#if (DBVER >= 40)
     ADD_INT(d, DB_STAT_CLEAR);
     ADD_INT(d, DB_REGION_INIT);
     ADD_INT(d, DB_NOLOCKING);
     ADD_INT(d, DB_YIELDCPU);
     ADD_INT(d, DB_PANIC_ENVIRONMENT);
     ADD_INT(d, DB_NOPANIC);
+
+#if (DBVER >= 41)
+    ADD_INT(d, DB_OVERWRITE);
 #endif
 
 #ifdef DB_REGISTER
@@ -5945,27 +7260,118 @@
     ADD_INT(d, DB_TIME_NOTGRANTED);
     ADD_INT(d, DB_TXN_NOT_DURABLE);
     ADD_INT(d, DB_TXN_WRITE_NOSYNC);
-    ADD_INT(d, DB_LOG_AUTOREMOVE);
-    ADD_INT(d, DB_DIRECT_LOG);
     ADD_INT(d, DB_DIRECT_DB);
     ADD_INT(d, DB_INIT_REP);
     ADD_INT(d, DB_ENCRYPT);
     ADD_INT(d, DB_CHKSUM);
 #endif
 
+#if (DBVER >= 42) && (DBVER < 47)
+    ADD_INT(d, DB_LOG_AUTOREMOVE);
+    ADD_INT(d, DB_DIRECT_LOG);
+#endif
+
+#if (DBVER >= 47)
+    ADD_INT(d, DB_LOG_DIRECT);
+    ADD_INT(d, DB_LOG_DSYNC);
+    ADD_INT(d, DB_LOG_IN_MEMORY);
+    ADD_INT(d, DB_LOG_AUTO_REMOVE);
+    ADD_INT(d, DB_LOG_ZERO);
+#endif
+
+#if (DBVER >= 44)
+    ADD_INT(d, DB_DSYNC_DB);
+#endif
+
+#if (DBVER >= 45)
+    ADD_INT(d, DB_TXN_SNAPSHOT);
+#endif
+
+    ADD_INT(d, DB_VERB_DEADLOCK);
+#if (DBVER >= 46)
+    ADD_INT(d, DB_VERB_FILEOPS);
+    ADD_INT(d, DB_VERB_FILEOPS_ALL);
+#endif
+    ADD_INT(d, DB_VERB_RECOVERY);
+#if (DBVER >= 44)
+    ADD_INT(d, DB_VERB_REGISTER);
+#endif
+    ADD_INT(d, DB_VERB_REPLICATION);
+    ADD_INT(d, DB_VERB_WAITSFOR);
+
+#if (DBVER >= 45)
+    ADD_INT(d, DB_EVENT_PANIC);
+    ADD_INT(d, DB_EVENT_REP_CLIENT);
+#if (DBVER >= 46)
+    ADD_INT(d, DB_EVENT_REP_ELECTED);
+#endif
+    ADD_INT(d, DB_EVENT_REP_MASTER);
+    ADD_INT(d, DB_EVENT_REP_NEWMASTER);
+#if (DBVER >= 46)
+    ADD_INT(d, DB_EVENT_REP_PERM_FAILED);
+#endif
+    ADD_INT(d, DB_EVENT_REP_STARTUPDONE);
+    ADD_INT(d, DB_EVENT_WRITE_FAILED);
+#endif
+
+    ADD_INT(d, DB_REP_DUPMASTER);
+    ADD_INT(d, DB_REP_HOLDELECTION);
+#if (DBVER >= 44)
+    ADD_INT(d, DB_REP_IGNORE);
+    ADD_INT(d, DB_REP_JOIN_FAILURE);
+#endif
+#if (DBVER >= 42)
+    ADD_INT(d, DB_REP_ISPERM);
+    ADD_INT(d, DB_REP_NOTPERM);
+#endif
+    ADD_INT(d, DB_REP_NEWSITE);
+
+    ADD_INT(d, DB_REP_MASTER);
+    ADD_INT(d, DB_REP_CLIENT);
+#if (DBVER >= 45)
+    ADD_INT(d, DB_REP_ELECTION);
+
+    ADD_INT(d, DB_REP_ACK_TIMEOUT);
+    ADD_INT(d, DB_REP_CONNECTION_RETRY);
+    ADD_INT(d, DB_REP_ELECTION_TIMEOUT);
+    ADD_INT(d, DB_REP_ELECTION_RETRY);
+#endif
+#if (DBVER >= 46)
+    ADD_INT(d, DB_REP_CHECKPOINT_DELAY);
+    ADD_INT(d, DB_REP_FULL_ELECTION_TIMEOUT);
+#endif
+
+#if (DBVER >= 45)
+    ADD_INT(d, DB_REPMGR_PEER);
+    ADD_INT(d, DB_REPMGR_ACKS_ALL);
+    ADD_INT(d, DB_REPMGR_ACKS_ALL_PEERS);
+    ADD_INT(d, DB_REPMGR_ACKS_NONE);
+    ADD_INT(d, DB_REPMGR_ACKS_ONE);
+    ADD_INT(d, DB_REPMGR_ACKS_ONE_PEER);
+    ADD_INT(d, DB_REPMGR_ACKS_QUORUM);
+    ADD_INT(d, DB_REPMGR_CONNECTED);
+    ADD_INT(d, DB_REPMGR_DISCONNECTED);
+    ADD_INT(d, DB_STAT_CLEAR);
+    ADD_INT(d, DB_STAT_ALL);
+#endif
+
 #if (DBVER >= 43)
-    ADD_INT(d, DB_LOG_INMEMORY);
     ADD_INT(d, DB_BUFFER_SMALL);
     ADD_INT(d, DB_SEQ_DEC);
     ADD_INT(d, DB_SEQ_INC);
     ADD_INT(d, DB_SEQ_WRAP);
 #endif
 
+#if (DBVER >= 43) && (DBVER < 47)
+    ADD_INT(d, DB_LOG_INMEMORY);
+    ADD_INT(d, DB_DSYNC_LOG);
+#endif
+
 #if (DBVER >= 41)
     ADD_INT(d, DB_ENCRYPT_AES);
     ADD_INT(d, DB_AUTO_COMMIT);
 #else
-    /* allow berkeleydb 4.1 aware apps to run on older versions */
+    /* allow Berkeley DB 4.1 aware apps to run on older versions */
     _addIntToDict(d, "DB_AUTO_COMMIT", 0);
 #endif
 
@@ -5979,10 +7385,8 @@
     ADD_INT(d, ENOENT);
     ADD_INT(d, EPERM);
 
-#if (DBVER >= 40)
     ADD_INT(d, DB_SET_LOCK_TIMEOUT);
     ADD_INT(d, DB_SET_TXN_TIMEOUT);
-#endif
 
     /* The exception name must be correct for pickled exception *
      * objects to unpickle properly.                            */
@@ -6000,20 +7404,37 @@
     DBError = NULL;     /* used in MAKE_EX so that it derives from nothing */
     MAKE_EX(DBError);
 
+#if (PY_VERSION_HEX < 0x03000000)
     /* Some magic to make DBNotFoundError and DBKeyEmptyError derive
      * from both DBError and KeyError, since the API only supports
      * using one base class. */
     PyDict_SetItemString(d, "KeyError", PyExc_KeyError);
-    { 
-	    PyObject *builtin_mod = PyImport_ImportModule("builtins");
-	    PyDict_SetItemString(d, "__builtins__", builtin_mod);
-    }
     PyRun_String("class DBNotFoundError(DBError, KeyError): pass\n"
 	         "class DBKeyEmptyError(DBError, KeyError): pass",
                  Py_file_input, d, d);
     DBNotFoundError = PyDict_GetItemString(d, "DBNotFoundError");
     DBKeyEmptyError = PyDict_GetItemString(d, "DBKeyEmptyError");
     PyDict_DelItemString(d, "KeyError");
+#else
+    /* Since Python 2.5, PyErr_NewException() accepts a tuple, to be able to
+    ** derive from several classes. We use this new API only for Python 3.0,
+    ** though.
+    */
+    {
+        PyObject* bases;
+
+        bases = PyTuple_Pack(2, DBError, PyExc_KeyError);
+
+#define MAKE_EX2(name)   name = PyErr_NewException(PYBSDDB_EXCEPTION_BASE #name, bases, NULL); \
+                         PyDict_SetItemString(d, #name, name)
+        MAKE_EX2(DBNotFoundError);
+        MAKE_EX2(DBKeyEmptyError);
+
+#undef MAKE_EX2
+
+        Py_XDECREF(bases);
+    }
+#endif
 
 
 #if !INCOMPLETE_IS_WARNING
@@ -6030,10 +7451,8 @@
     MAKE_EX(DBNoServerError);
     MAKE_EX(DBNoServerHomeError);
     MAKE_EX(DBNoServerIDError);
-#if (DBVER >= 33)
     MAKE_EX(DBPageNotFoundError);
     MAKE_EX(DBSecondaryBadError);
-#endif
 
     MAKE_EX(DBInvalidArgError);
     MAKE_EX(DBAccessError);
@@ -6045,6 +7464,12 @@
     MAKE_EX(DBNoSuchFileError);
     MAKE_EX(DBPermissionsError);
 
+#if (DBVER >= 42)
+    MAKE_EX(DBRepHandleDeadError);
+#endif
+
+    MAKE_EX(DBRepUnavailError);
+
 #undef MAKE_EX
 
     /* Initiliase the C API structure and add it to the module */
@@ -6065,18 +7490,31 @@
     /* Check for errors */
     if (PyErr_Occurred()) {
         PyErr_Print();
-        Py_FatalError("can't initialize module _bsddb");
-	Py_DECREF(m);
-	m = NULL;
+        Py_FatalError("can't initialize module _bsddb/_pybsddb");
+        Py_DECREF(m);
+        m = NULL;
     }
+#if (PY_VERSION_HEX < 0x03000000)
+    return;
+#else
     return m;
+#endif
 }
 
 /* allow this module to be named _pybsddb so that it can be installed
  * and imported on top of python >= 2.3 that includes its own older
  * copy of the library named _bsddb without importing the old version. */
-PyMODINIT_FUNC init_pybsddb(void)
+#if (PY_VERSION_HEX < 0x03000000)
+DL_EXPORT(void) init_pybsddb(void)
+#else
+PyMODINIT_FUNC PyInit__pybsddb(void)  /* Note the two underscores */
+#endif
 {
     strncpy(_bsddbModuleName, "_pybsddb", MODULE_NAME_MAX_LEN);
-    return PyInit__bsddb();
+#if (PY_VERSION_HEX < 0x03000000)
+    init_bsddb();
+#else
+    return PyInit__bsddb();   /* Note the two underscores */
+#endif
 }
+

Modified: python/branches/py3k/Modules/bsddb.h
==============================================================================
--- python/branches/py3k/Modules/bsddb.h	(original)
+++ python/branches/py3k/Modules/bsddb.h	Sun Aug 31 16:12:11 2008
@@ -36,7 +36,7 @@
 /*
  * Handwritten code to wrap version 3.x of the Berkeley DB library,
  * written to replace a SWIG-generated file.  It has since been updated
- * to compile with BerkeleyDB versions 3.2 through 4.2.
+ * to compile with Berkeley DB versions 3.2 through 4.2.
  *
  * This module was started by Andrew Kuchling to remove the dependency
  * on SWIG in a package by Gregory P. Smith who based his work on a
@@ -105,7 +105,7 @@
 #error "eek! DBVER can't handle minor versions > 9"
 #endif
 
-#define PY_BSDDB_VERSION "4.6.0"
+#define PY_BSDDB_VERSION "4.7.3pre2"
 
 /* Python object definitions */
 
@@ -119,17 +119,27 @@
 };
 
 
+
+struct DBObject;          /* Forward declaration */
+struct DBCursorObject;    /* Forward declaration */
+struct DBTxnObject;       /* Forward declaration */
+struct DBSequenceObject;  /* Forward declaration */
+
 typedef struct {
     PyObject_HEAD
     DB_ENV*     db_env;
     u_int32_t   flags;             /* saved flags from open() */
     int         closed;
     struct behaviourFlags moduleFlags;
+    PyObject*       event_notifyCallback;
+    struct DBObject *children_dbs;
+    struct DBTxnObject *children_txns;
+    PyObject        *private_obj;
+    PyObject        *rep_transport;
     PyObject        *in_weakreflist; /* List of weak references */
 } DBEnvObject;
 
-
-typedef struct {
+typedef struct DBObject {
     PyObject_HEAD
     DB*             db;
     DBEnvObject*    myenvobj;  /* PyObject containing the DB_ENV */
@@ -137,27 +147,48 @@
     u_int32_t       setflags;  /* saved flags from set_flags() */
     int             haveStat;
     struct behaviourFlags moduleFlags;
-#if (DBVER >= 33)
+    struct DBTxnObject *txn;
+    struct DBCursorObject *children_cursors;
+#if (DBVER >=43)
+    struct DBSequenceObject *children_sequences;
+#endif
+    struct DBObject **sibling_prev_p;
+    struct DBObject *sibling_next;
+    struct DBObject **sibling_prev_p_txn;
+    struct DBObject *sibling_next_txn;
     PyObject*       associateCallback;
     PyObject*       btCompareCallback;
     int             primaryDBType;
-#endif
+    PyObject        *private_obj;
     PyObject        *in_weakreflist; /* List of weak references */
 } DBObject;
 
 
-typedef struct {
+typedef struct DBCursorObject {
     PyObject_HEAD
     DBC*            dbc;
+    struct DBCursorObject **sibling_prev_p;
+    struct DBCursorObject *sibling_next;
+    struct DBCursorObject **sibling_prev_p_txn;
+    struct DBCursorObject *sibling_next_txn;
     DBObject*       mydb;
+    struct DBTxnObject *txn;
     PyObject        *in_weakreflist; /* List of weak references */
 } DBCursorObject;
 
 
-typedef struct {
+typedef struct DBTxnObject {
     PyObject_HEAD
     DB_TXN*         txn;
-    PyObject        *env;
+    DBEnvObject*    env;
+    int             flag_prepare;
+    struct DBTxnObject *parent_txn;
+    struct DBTxnObject **sibling_prev_p;
+    struct DBTxnObject *sibling_next;
+    struct DBTxnObject *children_txns;
+    struct DBObject *children_dbs;
+    struct DBSequenceObject *children_sequences;
+    struct DBCursorObject *children_cursors;
     PyObject        *in_weakreflist; /* List of weak references */
 } DBTxnObject;
 
@@ -170,13 +201,17 @@
 
 
 #if (DBVER >= 43)
-typedef struct {
+typedef struct DBSequenceObject {
     PyObject_HEAD
     DB_SEQUENCE*     sequence;
     DBObject*        mydb;
+    struct DBTxnObject *txn;
+    struct DBSequenceObject **sibling_prev_p;
+    struct DBSequenceObject *sibling_next;
+    struct DBSequenceObject **sibling_prev_p_txn;
+    struct DBSequenceObject *sibling_next_txn;
     PyObject        *in_weakreflist; /* List of weak references */
 } DBSequenceObject;
-static PyTypeObject DBSequence_Type;
 #endif
 
 

Modified: python/branches/py3k/setup.py
==============================================================================
--- python/branches/py3k/setup.py	(original)
+++ python/branches/py3k/setup.py	Sun Aug 31 16:12:11 2008
@@ -673,12 +673,8 @@
         # a release.  Most open source OSes come with one or more
         # versions of BerkeleyDB already installed.
 
-        max_db_ver = (4, 5)  # XXX(gregory.p.smith): 4.6 "works" but seems to
-                             # have issues on many platforms.  I've temporarily
-                             # disabled 4.6 to see what the odd platform
-                             # buildbots say.
-        max_db_ver = (4, 7)  # XXX(matthias.klose): test with 4.7 on some buildds
-        min_db_ver = (3, 3)
+        max_db_ver = (4, 7)
+        min_db_ver = (4, 0)
         db_setup_debug = False   # verbose debug prints from this script?
 
         # construct a list of paths to look for the header file in on


More information about the Python-3000-checkins mailing list