[Python-checkins] cpython (3.1): Issue #12352: Fix a deadlock in multiprocessing.Heap when a block is freed by

charles-francois.natali python-checkins at python.org
Sat Jul 2 14:36:24 CEST 2011


http://hg.python.org/cpython/rev/0d4ca1e77205
changeset:   71126:0d4ca1e77205
branch:      3.1
parent:      71040:633597815463
user:        Charles-François Natali <neologix at free.fr>
date:        Sat Jul 02 14:35:49 2011 +0200
summary:
  Issue #12352: Fix a deadlock in multiprocessing.Heap when a block is freed by
the garbage collector while the Heap lock is held.

files:
  Lib/multiprocessing/heap.py      |  39 ++++++++++++++++---
  Lib/test/test_multiprocessing.py |  24 ++++++++++++
  Misc/NEWS                        |   3 +
  3 files changed, 60 insertions(+), 6 deletions(-)


diff --git a/Lib/multiprocessing/heap.py b/Lib/multiprocessing/heap.py
--- a/Lib/multiprocessing/heap.py
+++ b/Lib/multiprocessing/heap.py
@@ -101,6 +101,8 @@
         self._stop_to_block = {}
         self._allocated_blocks = set()
         self._arenas = []
+        # list of pending blocks to free - see free() comment below
+        self._pending_free_blocks = []
 
     @staticmethod
     def _roundup(n, alignment):
@@ -175,15 +177,39 @@
 
         return start, stop
 
+    def _free_pending_blocks(self):
+        # Free all the blocks in the pending list - called with the lock held.
+        while True:
+            try:
+                block = self._pending_free_blocks.pop()
+            except IndexError:
+                break
+            self._allocated_blocks.remove(block)
+            self._free(block)
+
     def free(self, block):
         # free a block returned by malloc()
+        # Since free() can be called asynchronously by the GC, it could happen
+        # that it's called while self._lock is held: in that case,
+        # self._lock.acquire() would deadlock (issue #12352). To avoid that, a
+        # trylock is used instead, and if the lock can't be acquired
+        # immediately, the block is added to a list of blocks to be freed
+        # synchronously sometimes later from malloc() or free(), by calling
+        # _free_pending_blocks() (appending and retrieving from a list is not
+        # strictly thread-safe but under cPython it's atomic thanks to the GIL).
         assert os.getpid() == self._lastpid
-        self._lock.acquire()
-        try:
-            self._allocated_blocks.remove(block)
-            self._free(block)
-        finally:
-            self._lock.release()
+        if not self._lock.acquire(False):
+            # can't acquire the lock right now, add the block to the list of
+            # pending blocks to free
+            self._pending_free_blocks.append(block)
+        else:
+            # we hold the lock
+            try:
+                self._free_pending_blocks()
+                self._allocated_blocks.remove(block)
+                self._free(block)
+            finally:
+                self._lock.release()
 
     def malloc(self, size):
         # return a block of right size (possibly rounded up)
@@ -191,6 +217,7 @@
         if os.getpid() != self._lastpid:
             self.__init__()                     # reinitialize after fork
         self._lock.acquire()
+        self._free_pending_blocks()
         try:
             size = self._roundup(max(size,1), self._alignment)
             (arena, start, stop) = self._malloc(size)
diff --git a/Lib/test/test_multiprocessing.py b/Lib/test/test_multiprocessing.py
--- a/Lib/test/test_multiprocessing.py
+++ b/Lib/test/test_multiprocessing.py
@@ -1580,6 +1580,8 @@
         # verify the state of the heap
         all = []
         occupied = 0
+        heap._lock.acquire()
+        self.addCleanup(heap._lock.release)
         for L in list(heap._len_to_seq.values()):
             for arena, start, stop in L:
                 all.append((heap._arenas.index(arena), start, stop,
@@ -1597,6 +1599,28 @@
             self.assertTrue((arena != narena and nstart == 0) or
                             (stop == nstart))
 
+    def test_free_from_gc(self):
+        # Check that freeing of blocks by the garbage collector doesn't deadlock
+        # (issue #12352).
+        # Make sure the GC is enabled, and set lower collection thresholds to
+        # make collections more frequent (and increase the probability of
+        # deadlock).
+        if not gc.isenabled():
+            gc.enable()
+            self.addCleanup(gc.disable)
+        thresholds = gc.get_threshold()
+        self.addCleanup(gc.set_threshold, *thresholds)
+        gc.set_threshold(10)
+
+        # perform numerous block allocations, with cyclic references to make
+        # sure objects are collected asynchronously by the gc
+        for i in range(5000):
+            a = multiprocessing.heap.BufferWrapper(1)
+            b = multiprocessing.heap.BufferWrapper(1)
+            # circular references
+            a.buddy = b
+            b.buddy = a
+
 #
 #
 #
diff --git a/Misc/NEWS b/Misc/NEWS
--- a/Misc/NEWS
+++ b/Misc/NEWS
@@ -107,6 +107,9 @@
 Library
 -------
 
+- Issue #12352: Fix a deadlock in multiprocessing.Heap when a block is freed by
+  the garbage collector while the Heap lock is held.
+
 - Issue #985064: Make plistlib more resilient to faulty input plists.
   Patch by Mher Movsisyan.
 

-- 
Repository URL: http://hg.python.org/cpython


More information about the Python-checkins mailing list