[Python-checkins] Use support.sleeping_retry() and support.busy_retry() (#93848)

vstinner webhook-mailer at python.org
Wed Jun 15 08:10:09 EDT 2022


https://github.com/python/cpython/commit/0ba80273f2dba5b70de870a333e65ad025cca640
commit: 0ba80273f2dba5b70de870a333e65ad025cca640
branch: main
author: Victor Stinner <vstinner at python.org>
committer: vstinner <vstinner at python.org>
date: 2022-06-15T14:09:56+02:00
summary:

Use support.sleeping_retry() and support.busy_retry() (#93848)

* Replace time.sleep(0.010) with sleeping_retry() to
  use an exponential sleep.
* support.wait_process(): reuse sleeping_retry().
* _test_eintr: remove unused variables.

files:
M Lib/test/_test_eintr.py
M Lib/test/signalinterproctester.py
M Lib/test/support/__init__.py
M Lib/test/support/threading_helper.py
M Lib/test/test_asyncio/utils.py
M Lib/test/test_asyncore.py
M Lib/test/test_logging.py

diff --git a/Lib/test/_test_eintr.py b/Lib/test/_test_eintr.py
index e43b59d064f55..ca637b2906332 100644
--- a/Lib/test/_test_eintr.py
+++ b/Lib/test/_test_eintr.py
@@ -403,11 +403,9 @@ def check_sigwait(self, wait_func):
         old_mask = signal.pthread_sigmask(signal.SIG_BLOCK, [signum])
         self.addCleanup(signal.pthread_sigmask, signal.SIG_UNBLOCK, [signum])
 
-        t0 = time.monotonic()
         proc = self.subprocess(code)
         with kill_on_error(proc):
             wait_func(signum)
-            dt = time.monotonic() - t0
 
         self.assertEqual(proc.wait(), 0)
 
@@ -497,16 +495,18 @@ def _lock(self, lock_func, lock_name):
         proc = self.subprocess(code)
         with kill_on_error(proc):
             with open(os_helper.TESTFN, 'wb') as f:
-                while True:  # synchronize the subprocess
-                    dt = time.monotonic() - start_time
-                    if dt > 60.0:
-                        raise Exception("failed to sync child in %.1f sec" % dt)
+                # synchronize the subprocess
+                start_time = time.monotonic()
+                for _ in support.sleeping_retry(60.0, error=False):
                     try:
                         lock_func(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
                         lock_func(f, fcntl.LOCK_UN)
-                        time.sleep(0.01)
                     except BlockingIOError:
                         break
+                else:
+                    dt = time.monotonic() - start_time
+                    raise Exception("failed to sync child in %.1f sec" % dt)
+
                 # the child locked the file just a moment ago for 'sleep_time' seconds
                 # that means that the lock below will block for 'sleep_time' minus some
                 # potential context switch delay
diff --git a/Lib/test/signalinterproctester.py b/Lib/test/signalinterproctester.py
index bc60b747f7168..cdcd92a8baace 100644
--- a/Lib/test/signalinterproctester.py
+++ b/Lib/test/signalinterproctester.py
@@ -28,16 +28,15 @@ def wait_signal(self, child, signame):
             # (if set)
             child.wait()
 
-        timeout = support.SHORT_TIMEOUT
-        deadline = time.monotonic() + timeout
-
-        while time.monotonic() < deadline:
+        start_time = time.monotonic()
+        for _ in support.busy_retry(support.SHORT_TIMEOUT, error=False):
             if self.got_signals[signame]:
                 return
             signal.pause()
-
-        self.fail('signal %s not received after %s seconds'
-                  % (signame, timeout))
+        else:
+            dt = time.monotonic() - start_time
+            self.fail('signal %s not received after %.1f seconds'
+                      % (signame, dt))
 
     def subprocess_send_signal(self, pid, signame):
         code = 'import os, signal; os.kill(%s, signal.%s)' % (pid, signame)
diff --git a/Lib/test/support/__init__.py b/Lib/test/support/__init__.py
index a62e8b4ec4f6b..a875548018b99 100644
--- a/Lib/test/support/__init__.py
+++ b/Lib/test/support/__init__.py
@@ -2072,31 +2072,26 @@ def wait_process(pid, *, exitcode, timeout=None):
 
         if timeout is None:
             timeout = SHORT_TIMEOUT
-        t0 = time.monotonic()
-        sleep = 0.001
-        max_sleep = 0.1
-        while True:
+
+        start_time = time.monotonic()
+        for _ in sleeping_retry(timeout, error=False):
             pid2, status = os.waitpid(pid, os.WNOHANG)
             if pid2 != 0:
                 break
-            # process is still running
-
-            dt = time.monotonic() - t0
-            if dt > SHORT_TIMEOUT:
-                try:
-                    os.kill(pid, signal.SIGKILL)
-                    os.waitpid(pid, 0)
-                except OSError:
-                    # Ignore errors like ChildProcessError or PermissionError
-                    pass
-
-                raise AssertionError(f"process {pid} is still running "
-                                     f"after {dt:.1f} seconds")
+            # rety: the process is still running
+        else:
+            try:
+                os.kill(pid, signal.SIGKILL)
+                os.waitpid(pid, 0)
+            except OSError:
+                # Ignore errors like ChildProcessError or PermissionError
+                pass
 
-            sleep = min(sleep * 2, max_sleep)
-            time.sleep(sleep)
+            dt = time.monotonic() - start_time
+            raise AssertionError(f"process {pid} is still running "
+                                 f"after {dt:.1f} seconds")
     else:
-        # Windows implementation
+        # Windows implementation: don't support timeout :-(
         pid2, status = os.waitpid(pid, 0)
 
     exitcode2 = os.waitstatus_to_exitcode(status)
diff --git a/Lib/test/support/threading_helper.py b/Lib/test/support/threading_helper.py
index 26cbc6f4d2439..b9973c8bf5c91 100644
--- a/Lib/test/support/threading_helper.py
+++ b/Lib/test/support/threading_helper.py
@@ -88,19 +88,17 @@ def wait_threads_exit(timeout=None):
         yield
     finally:
         start_time = time.monotonic()
-        deadline = start_time + timeout
-        while True:
+        for _ in support.sleeping_retry(timeout, error=False):
+            support.gc_collect()
             count = _thread._count()
             if count <= old_count:
                 break
-            if time.monotonic() > deadline:
-                dt = time.monotonic() - start_time
-                msg = (f"wait_threads() failed to cleanup {count - old_count} "
-                       f"threads after {dt:.1f} seconds "
-                       f"(count: {count}, old count: {old_count})")
-                raise AssertionError(msg)
-            time.sleep(0.010)
-            support.gc_collect()
+        else:
+            dt = time.monotonic() - start_time
+            msg = (f"wait_threads() failed to cleanup {count - old_count} "
+                   f"threads after {dt:.1f} seconds "
+                   f"(count: {count}, old count: {old_count})")
+            raise AssertionError(msg)
 
 
 def join_thread(thread, timeout=None):
diff --git a/Lib/test/test_asyncio/utils.py b/Lib/test/test_asyncio/utils.py
index c32494d40ccea..07ef33d3fc244 100644
--- a/Lib/test/test_asyncio/utils.py
+++ b/Lib/test/test_asyncio/utils.py
@@ -109,13 +109,12 @@ async def once():
 
 
 def run_until(loop, pred, timeout=support.SHORT_TIMEOUT):
-    deadline = time.monotonic() + timeout
-    while not pred():
-        if timeout is not None:
-            timeout = deadline - time.monotonic()
-            if timeout <= 0:
-                raise futures.TimeoutError()
+    for _ in support.busy_retry(timeout, error=False):
+        if pred():
+            break
         loop.run_until_complete(tasks.sleep(0.001))
+    else:
+        raise futures.TimeoutError()
 
 
 def run_once(loop):
diff --git a/Lib/test/test_asyncore.py b/Lib/test/test_asyncore.py
index 98ccd3a9304de..1564221f85715 100644
--- a/Lib/test/test_asyncore.py
+++ b/Lib/test/test_asyncore.py
@@ -76,8 +76,7 @@ def capture_server(evt, buf, serv):
         pass
     else:
         n = 200
-        start = time.monotonic()
-        while n > 0 and time.monotonic() - start < 3.0:
+        for _ in support.busy_retry(3.0, error=False):
             r, w, e = select.select([conn], [], [], 0.1)
             if r:
                 n -= 1
@@ -86,6 +85,8 @@ def capture_server(evt, buf, serv):
                 buf.write(data.replace(b'\n', b''))
                 if b'\n' in data:
                     break
+            if n <= 0:
+                break
             time.sleep(0.01)
 
         conn.close()
diff --git a/Lib/test/test_logging.py b/Lib/test/test_logging.py
index 49545573682d0..d43742ef603f9 100644
--- a/Lib/test/test_logging.py
+++ b/Lib/test/test_logging.py
@@ -3602,7 +3602,6 @@ def do_queuehandler_configuration(self, qspec, lspec):
         if lspec is not None:
             cd['handlers']['ah']['listener'] = lspec
         qh = None
-        delay = 0.01
         try:
             self.apply_config(cd)
             qh = logging.getHandlerByName('ah')
@@ -3612,12 +3611,14 @@ def do_queuehandler_configuration(self, qspec, lspec):
             logging.debug('foo')
             logging.info('bar')
             logging.warning('baz')
+
             # Need to let the listener thread finish its work
-            deadline = time.monotonic() + support.LONG_TIMEOUT
-            while not qh.listener.queue.empty():
-                time.sleep(delay)
-                if time.monotonic() > deadline:
-                    self.fail("queue not empty")
+            while support.sleeping_retry(support.LONG_TIMEOUT, error=False):
+                if qh.listener.queue.empty():
+                    break
+            else:
+                self.fail("queue not empty")
+
             with open(fn, encoding='utf-8') as f:
                 data = f.read().splitlines()
             self.assertEqual(data, ['foo', 'bar', 'baz'])



More information about the Python-checkins mailing list