[pypy-commit] pypy py3.5-eintr-pep475: hg merge py3.5

arigo pypy.commits at gmail.com
Sun Dec 4 05:47:23 EST 2016


Author: Armin Rigo <arigo at tunes.org>
Branch: py3.5-eintr-pep475
Changeset: r88875:1613a0094a88
Date: 2016-12-04 11:39 +0100
http://bitbucket.org/pypy/pypy/changeset/1613a0094a88/

Log:	hg merge py3.5

diff too long, truncating to 2000 out of 28117 lines

diff --git a/lib-python/3/_collections_abc.py b/lib-python/3/_collections_abc.py
--- a/lib-python/3/_collections_abc.py
+++ b/lib-python/3/_collections_abc.py
@@ -156,7 +156,7 @@
     __slots__ = ()
 
     @abstractmethod
-    async def __aiter__(self):
+    def __aiter__(self):
         return AsyncIterator()
 
     @classmethod
@@ -176,7 +176,7 @@
         """Return the next item or raise StopAsyncIteration when exhausted."""
         raise StopAsyncIteration
 
-    async def __aiter__(self):
+    def __aiter__(self):
         return self
 
     @classmethod
diff --git a/lib-python/3/_compat_pickle.py b/lib-python/3/_compat_pickle.py
--- a/lib-python/3/_compat_pickle.py
+++ b/lib-python/3/_compat_pickle.py
@@ -177,6 +177,13 @@
     'DocXMLRPCServer': 'xmlrpc.server',
     'SimpleHTTPServer': 'http.server',
     'CGIHTTPServer': 'http.server',
+    # For compatibility with broken pickles saved in old Python 3 versions
+    'UserDict': 'collections',
+    'UserList': 'collections',
+    'UserString': 'collections',
+    'whichdb': 'dbm',
+    'StringIO':  'io',
+    'cStringIO': 'io',
 })
 
 REVERSE_IMPORT_MAPPING.update({
diff --git a/lib-python/3/_osx_support.py b/lib-python/3/_osx_support.py
--- a/lib-python/3/_osx_support.py
+++ b/lib-python/3/_osx_support.py
@@ -151,13 +151,13 @@
     #    can only be found inside Xcode.app if the "Command Line Tools"
     #    are not installed.
     #
-    #    Futhermore, the compiler that can be used varies between
+    #    Furthermore, the compiler that can be used varies between
     #    Xcode releases. Up to Xcode 4 it was possible to use 'gcc-4.2'
     #    as the compiler, after that 'clang' should be used because
     #    gcc-4.2 is either not present, or a copy of 'llvm-gcc' that
     #    miscompiles Python.
 
-    # skip checks if the compiler was overriden with a CC env variable
+    # skip checks if the compiler was overridden with a CC env variable
     if 'CC' in os.environ:
         return _config_vars
 
@@ -193,7 +193,7 @@
     if cc != oldcc:
         # Found a replacement compiler.
         # Modify config vars using new compiler, if not already explicitly
-        # overriden by an env variable, preserving additional arguments.
+        # overridden by an env variable, preserving additional arguments.
         for cv in _COMPILER_CONFIG_VARS:
             if cv in _config_vars and cv not in os.environ:
                 cv_split = _config_vars[cv].split()
@@ -207,7 +207,7 @@
     """Remove all universal build arguments from config vars"""
 
     for cv in _UNIVERSAL_CONFIG_VARS:
-        # Do not alter a config var explicitly overriden by env var
+        # Do not alter a config var explicitly overridden by env var
         if cv in _config_vars and cv not in os.environ:
             flags = _config_vars[cv]
             flags = re.sub('-arch\s+\w+\s', ' ', flags, re.ASCII)
@@ -228,7 +228,7 @@
     # build extensions on OSX 10.7 and later with the prebuilt
     # 32-bit installer on the python.org website.
 
-    # skip checks if the compiler was overriden with a CC env variable
+    # skip checks if the compiler was overridden with a CC env variable
     if 'CC' in os.environ:
         return _config_vars
 
@@ -244,7 +244,7 @@
             # across Xcode and compiler versions, there is no reliable way
             # to be sure why it failed.  Assume here it was due to lack of
             # PPC support and remove the related '-arch' flags from each
-            # config variables not explicitly overriden by an environment
+            # config variables not explicitly overridden by an environment
             # variable.  If the error was for some other reason, we hope the
             # failure will show up again when trying to compile an extension
             # module.
@@ -292,7 +292,7 @@
         sdk = m.group(1)
         if not os.path.exists(sdk):
             for cv in _UNIVERSAL_CONFIG_VARS:
-                # Do not alter a config var explicitly overriden by env var
+                # Do not alter a config var explicitly overridden by env var
                 if cv in _config_vars and cv not in os.environ:
                     flags = _config_vars[cv]
                     flags = re.sub(r'-isysroot\s+\S+(?:\s|$)', ' ', flags)
diff --git a/lib-python/3/_pydecimal.py b/lib-python/3/_pydecimal.py
--- a/lib-python/3/_pydecimal.py
+++ b/lib-python/3/_pydecimal.py
@@ -252,7 +252,7 @@
 class ConversionSyntax(InvalidOperation):
     """Trying to convert badly formed string.
 
-    This occurs and signals invalid-operation if an string is being
+    This occurs and signals invalid-operation if a string is being
     converted to a number and it does not conform to the numeric string
     syntax.  The result is [0,qNaN].
     """
@@ -1102,7 +1102,7 @@
     def __pos__(self, context=None):
         """Returns a copy, unless it is a sNaN.
 
-        Rounds the number (if more then precision digits)
+        Rounds the number (if more than precision digits)
         """
         if self._is_special:
             ans = self._check_nans(context=context)
diff --git a/lib-python/3/_pyio.py b/lib-python/3/_pyio.py
--- a/lib-python/3/_pyio.py
+++ b/lib-python/3/_pyio.py
@@ -296,8 +296,9 @@
     called.
 
     The basic type used for binary data read from or written to a file is
-    bytes. bytearrays are accepted too, and in some cases (such as
-    readinto) needed. Text I/O classes work with str data.
+    bytes. Other bytes-like objects are accepted as method arguments too. In
+    some cases (such as readinto), a writable object is required. Text I/O
+    classes work with str data.
 
     Note that calling any method (even inquiries) on a closed stream is
     undefined. Implementations may raise OSError in this case.
@@ -390,7 +391,7 @@
     def seekable(self):
         """Return a bool indicating whether object supports random access.
 
-        If False, seek(), tell() and truncate() will raise UnsupportedOperation.
+        If False, seek(), tell() and truncate() will raise OSError.
         This method may need to do a test seek().
         """
         return False
@@ -405,7 +406,7 @@
     def readable(self):
         """Return a bool indicating whether object was opened for reading.
 
-        If False, read() will raise UnsupportedOperation.
+        If False, read() will raise OSError.
         """
         return False
 
@@ -419,7 +420,7 @@
     def writable(self):
         """Return a bool indicating whether object was opened for writing.
 
-        If False, write() and truncate() will raise UnsupportedOperation.
+        If False, write() and truncate() will raise OSError.
         """
         return False
 
@@ -439,7 +440,7 @@
         return self.__closed
 
     def _checkClosed(self, msg=None):
-        """Internal: raise an ValueError if file is closed
+        """Internal: raise a ValueError if file is closed
         """
         if self.closed:
             raise ValueError("I/O operation on closed file."
@@ -596,7 +597,7 @@
             return data
 
     def readinto(self, b):
-        """Read up to len(b) bytes into bytearray b.
+        """Read bytes into a pre-allocated bytes-like object b.
 
         Returns an int representing the number of bytes read (0 for EOF), or
         None if the object is set not to block and has no data to read.
@@ -606,7 +607,8 @@
     def write(self, b):
         """Write the given buffer to the IO stream.
 
-        Returns the number of bytes written, which may be less than len(b).
+        Returns the number of bytes written, which may be less than the
+        length of b in bytes.
         """
         self._unsupported("write")
 
@@ -659,7 +661,7 @@
         self._unsupported("read1")
 
     def readinto(self, b):
-        """Read up to len(b) bytes into bytearray b.
+        """Read bytes into a pre-allocated bytes-like object b.
 
         Like read(), this may issue multiple reads to the underlying raw
         stream, unless the latter is 'interactive'.
@@ -673,7 +675,7 @@
         return self._readinto(b, read1=False)
 
     def readinto1(self, b):
-        """Read up to len(b) bytes into *b*, using at most one system call
+        """Read bytes into buffer *b*, using at most one system call
 
         Returns an int representing the number of bytes read (0 for EOF).
 
@@ -701,8 +703,8 @@
     def write(self, b):
         """Write the given bytes buffer to the IO stream.
 
-        Return the number of bytes written, which is never less than
-        len(b).
+        Return the number of bytes written, which is always the length of b
+        in bytes.
 
         Raises BlockingIOError if the buffer is full and the
         underlying raw stream cannot accept more data at the moment.
@@ -787,12 +789,6 @@
     def seekable(self):
         return self.raw.seekable()
 
-    def readable(self):
-        return self.raw.readable()
-
-    def writable(self):
-        return self.raw.writable()
-
     @property
     def raw(self):
         return self._raw
@@ -890,7 +886,8 @@
             raise ValueError("write to closed file")
         if isinstance(b, str):
             raise TypeError("can't write str to binary stream")
-        n = len(b)
+        with memoryview(b) as view:
+            n = view.nbytes  # Size of any bytes-like object
         if n == 0:
             return 0
         pos = self._pos
@@ -982,6 +979,9 @@
         self._reset_read_buf()
         self._read_lock = Lock()
 
+    def readable(self):
+        return self.raw.readable()
+
     def _reset_read_buf(self):
         self._read_buf = b""
         self._read_pos = 0
@@ -1043,7 +1043,7 @@
                 break
             avail += len(chunk)
             chunks.append(chunk)
-        # n is more then avail only when an EOF occurred or when
+        # n is more than avail only when an EOF occurred or when
         # read() would have blocked.
         n = min(n, avail)
         out = b"".join(chunks)
@@ -1093,14 +1093,13 @@
     def _readinto(self, buf, read1):
         """Read data into *buf* with at most one system call."""
 
-        if len(buf) == 0:
-            return 0
-
         # Need to create a memoryview object of type 'b', otherwise
         # we may not be able to assign bytes to it, and slicing it
         # would create a new object.
         if not isinstance(buf, memoryview):
             buf = memoryview(buf)
+        if buf.nbytes == 0:
+            return 0
         buf = buf.cast('B')
 
         written = 0
@@ -1170,6 +1169,9 @@
         self._write_buf = bytearray()
         self._write_lock = Lock()
 
+    def writable(self):
+        return self.raw.writable()
+
     def write(self, b):
         if self.closed:
             raise ValueError("write to closed file")
diff --git a/lib-python/3/_strptime.py b/lib-python/3/_strptime.py
--- a/lib-python/3/_strptime.py
+++ b/lib-python/3/_strptime.py
@@ -77,6 +77,8 @@
         self.__calc_date_time()
         if _getlang() != self.lang:
             raise ValueError("locale changed during initialization")
+        if time.tzname != self.tzname or time.daylight != self.daylight:
+            raise ValueError("timezone changed during initialization")
 
     def __pad(self, seq, front):
         # Add '' to seq to either the front (is True), else the back.
@@ -161,15 +163,17 @@
 
     def __calc_timezone(self):
         # Set self.timezone by using time.tzname.
-        # Do not worry about possibility of time.tzname[0] == timetzname[1]
-        # and time.daylight; handle that in strptime .
+        # Do not worry about possibility of time.tzname[0] == time.tzname[1]
+        # and time.daylight; handle that in strptime.
         try:
             time.tzset()
         except AttributeError:
             pass
-        no_saving = frozenset({"utc", "gmt", time.tzname[0].lower()})
-        if time.daylight:
-            has_saving = frozenset({time.tzname[1].lower()})
+        self.tzname = time.tzname
+        self.daylight = time.daylight
+        no_saving = frozenset({"utc", "gmt", self.tzname[0].lower()})
+        if self.daylight:
+            has_saving = frozenset({self.tzname[1].lower()})
         else:
             has_saving = frozenset()
         self.timezone = (no_saving, has_saving)
@@ -307,13 +311,15 @@
 
     global _TimeRE_cache, _regex_cache
     with _cache_lock:
-
-        if _getlang() != _TimeRE_cache.locale_time.lang:
+        locale_time = _TimeRE_cache.locale_time
+        if (_getlang() != locale_time.lang or
+            time.tzname != locale_time.tzname or
+            time.daylight != locale_time.daylight):
             _TimeRE_cache = TimeRE()
             _regex_cache.clear()
+            locale_time = _TimeRE_cache.locale_time
         if len(_regex_cache) > _CACHE_MAX_SIZE:
             _regex_cache.clear()
-        locale_time = _TimeRE_cache.locale_time
         format_regex = _regex_cache.get(format)
         if not format_regex:
             try:
@@ -456,6 +462,10 @@
         week_starts_Mon = True if week_of_year_start == 0 else False
         julian = _calc_julian_from_U_or_W(year, week_of_year, weekday,
                                             week_starts_Mon)
+        if julian <= 0:
+            year -= 1
+            yday = 366 if calendar.isleap(year) else 365
+            julian += yday
     # Cannot pre-calculate datetime_date() since can change in Julian
     # calculation and thus could have different value for the day of the week
     # calculation.
diff --git a/lib-python/3/asyncio/base_events.py b/lib-python/3/asyncio/base_events.py
--- a/lib-python/3/asyncio/base_events.py
+++ b/lib-python/3/asyncio/base_events.py
@@ -52,6 +52,12 @@
 # before cleanup of cancelled handles is performed.
 _MIN_CANCELLED_TIMER_HANDLES_FRACTION = 0.5
 
+# Exceptions which must not call the exception handler in fatal error
+# methods (_fatal_error())
+_FATAL_ERROR_IGNORE = (BrokenPipeError,
+                       ConnectionResetError, ConnectionAbortedError)
+
+
 def _format_handle(handle):
     cb = handle._callback
     if inspect.ismethod(cb) and isinstance(cb.__self__, tasks.Task):
@@ -70,49 +76,89 @@
         return repr(fd)
 
 
-def _check_resolved_address(sock, address):
-    # Ensure that the address is already resolved to avoid the trap of hanging
-    # the entire event loop when the address requires doing a DNS lookup.
-    #
-    # getaddrinfo() is slow (around 10 us per call): this function should only
-    # be called in debug mode
-    family = sock.family
+# Linux's sock.type is a bitmask that can include extra info about socket.
+_SOCKET_TYPE_MASK = 0
+if hasattr(socket, 'SOCK_NONBLOCK'):
+    _SOCKET_TYPE_MASK |= socket.SOCK_NONBLOCK
+if hasattr(socket, 'SOCK_CLOEXEC'):
+    _SOCKET_TYPE_MASK |= socket.SOCK_CLOEXEC
 
-    if family == socket.AF_INET:
-        host, port = address
-    elif family == socket.AF_INET6:
-        host, port = address[:2]
-    else:
+
+def _ipaddr_info(host, port, family, type, proto):
+    # Try to skip getaddrinfo if "host" is already an IP. Users might have
+    # handled name resolution in their own code and pass in resolved IPs.
+    if not hasattr(socket, 'inet_pton'):
         return
 
-    # On Windows, socket.inet_pton() is only available since Python 3.4
-    if hasattr(socket, 'inet_pton'):
-        # getaddrinfo() is slow and has known issue: prefer inet_pton()
-        # if available
+    if proto not in {0, socket.IPPROTO_TCP, socket.IPPROTO_UDP} or \
+            host is None:
+        return None
+
+    type &= ~_SOCKET_TYPE_MASK
+    if type == socket.SOCK_STREAM:
+        proto = socket.IPPROTO_TCP
+    elif type == socket.SOCK_DGRAM:
+        proto = socket.IPPROTO_UDP
+    else:
+        return None
+
+    if port is None:
+        port = 0
+    elif isinstance(port, bytes):
+        if port == b'':
+            port = 0
+        else:
+            try:
+                port = int(port)
+            except ValueError:
+                # Might be a service name like b"http".
+                port = socket.getservbyname(port.decode('ascii'))
+    elif isinstance(port, str):
+        if port == '':
+            port = 0
+        else:
+            try:
+                port = int(port)
+            except ValueError:
+                # Might be a service name like "http".
+                port = socket.getservbyname(port)
+
+    if family == socket.AF_UNSPEC:
+        afs = [socket.AF_INET, socket.AF_INET6]
+    else:
+        afs = [family]
+
+    if isinstance(host, bytes):
+        host = host.decode('idna')
+    if '%' in host:
+        # Linux's inet_pton doesn't accept an IPv6 zone index after host,
+        # like '::1%lo0'.
+        return None
+
+    for af in afs:
         try:
-            socket.inet_pton(family, host)
-        except OSError as exc:
-            raise ValueError("address must be resolved (IP address), "
-                             "got host %r: %s"
-                             % (host, exc))
+            socket.inet_pton(af, host)
+            # The host has already been resolved.
+            return af, type, proto, '', (host, port)
+        except OSError:
+            pass
+
+    # "host" is not an IP address.
+    return None
+
+
+def _ensure_resolved(address, *, family=0, type=socket.SOCK_STREAM, proto=0,
+                     flags=0, loop):
+    host, port = address[:2]
+    info = _ipaddr_info(host, port, family, type, proto)
+    if info is not None:
+        # "host" is already a resolved IP.
+        fut = loop.create_future()
+        fut.set_result([info])
+        return fut
     else:
-        # Use getaddrinfo(flags=AI_NUMERICHOST) to ensure that the address is
-        # already resolved.
-        type_mask = 0
-        if hasattr(socket, 'SOCK_NONBLOCK'):
-            type_mask |= socket.SOCK_NONBLOCK
-        if hasattr(socket, 'SOCK_CLOEXEC'):
-            type_mask |= socket.SOCK_CLOEXEC
-        try:
-            socket.getaddrinfo(host, port,
-                               family=family,
-                               type=(sock.type & ~type_mask),
-                               proto=sock.proto,
-                               flags=socket.AI_NUMERICHOST)
-        except socket.gaierror as err:
-            raise ValueError("address must be resolved (IP address), "
-                             "got host %r: %s"
-                             % (host, err))
+        return loop.getaddrinfo(host, port, family=family, type=type,
+                                proto=proto, flags=flags)
 
 
 def _run_until_complete_cb(fut):
@@ -167,7 +213,7 @@
     def wait_closed(self):
         if self.sockets is None or self._waiters is None:
             return
-        waiter = futures.Future(loop=self._loop)
+        waiter = self._loop.create_future()
         self._waiters.append(waiter)
         yield from waiter
 
@@ -201,6 +247,10 @@
                 % (self.__class__.__name__, self.is_running(),
                    self.is_closed(), self.get_debug()))
 
+    def create_future(self):
+        """Create a Future object attached to the loop."""
+        return futures.Future(loop=self)
+
     def create_task(self, coro):
         """Schedule a coroutine object.
 
@@ -494,7 +544,7 @@
             assert not args
             assert not isinstance(func, events.TimerHandle)
             if func._cancelled:
-                f = futures.Future(loop=self)
+                f = self.create_future()
                 f.set_result(None)
                 return f
             func, args = func._callback, func._args
@@ -584,14 +634,14 @@
                 raise ValueError(
                     'host/port and sock can not be specified at the same time')
 
-            f1 = self.getaddrinfo(
-                host, port, family=family,
-                type=socket.SOCK_STREAM, proto=proto, flags=flags)
+            f1 = _ensure_resolved((host, port), family=family,
+                                  type=socket.SOCK_STREAM, proto=proto,
+                                  flags=flags, loop=self)
             fs = [f1]
             if local_addr is not None:
-                f2 = self.getaddrinfo(
-                    *local_addr, family=family,
-                    type=socket.SOCK_STREAM, proto=proto, flags=flags)
+                f2 = _ensure_resolved(local_addr, family=family,
+                                      type=socket.SOCK_STREAM, proto=proto,
+                                      flags=flags, loop=self)
                 fs.append(f2)
             else:
                 f2 = None
@@ -673,7 +723,7 @@
     def _create_connection_transport(self, sock, protocol_factory, ssl,
                                      server_hostname):
         protocol = protocol_factory()
-        waiter = futures.Future(loop=self)
+        waiter = self.create_future()
         if ssl:
             sslcontext = None if isinstance(ssl, bool) else ssl
             transport = self._make_ssl_transport(
@@ -726,9 +776,9 @@
                         assert isinstance(addr, tuple) and len(addr) == 2, (
                             '2-tuple is expected')
 
-                        infos = yield from self.getaddrinfo(
-                            *addr, family=family, type=socket.SOCK_DGRAM,
-                            proto=proto, flags=flags)
+                        infos = yield from _ensure_resolved(
+                            addr, family=family, type=socket.SOCK_DGRAM,
+                            proto=proto, flags=flags, loop=self)
                         if not infos:
                             raise OSError('getaddrinfo() returned empty list')
 
@@ -793,7 +843,7 @@
                 raise exceptions[0]
 
         protocol = protocol_factory()
-        waiter = futures.Future(loop=self)
+        waiter = self.create_future()
         transport = self._make_datagram_transport(
             sock, protocol, r_addr, waiter)
         if self._debug:
@@ -816,9 +866,9 @@
 
     @coroutine
     def _create_server_getaddrinfo(self, host, port, family, flags):
-        infos = yield from self.getaddrinfo(host, port, family=family,
+        infos = yield from _ensure_resolved((host, port), family=family,
                                             type=socket.SOCK_STREAM,
-                                            flags=flags)
+                                            flags=flags, loop=self)
         if not infos:
             raise OSError('getaddrinfo({!r}) returned empty list'.format(host))
         return infos
@@ -839,7 +889,10 @@
         to host and port.
 
         The host parameter can also be a sequence of strings and in that case
-        the TCP server is bound to all hosts of the sequence.
+        the TCP server is bound to all hosts of the sequence. If a host
+        appears multiple times (possibly indirectly e.g. when hostnames
+        resolve to the same IP address), the server is only bound once to that
+        host.
 
         Return a Server object which can be used to stop the service.
 
@@ -868,7 +921,7 @@
                                                   flags=flags)
                   for host in hosts]
             infos = yield from tasks.gather(*fs, loop=self)
-            infos = itertools.chain.from_iterable(infos)
+            infos = set(itertools.chain.from_iterable(infos))
 
             completed = False
             try:
@@ -929,7 +982,7 @@
     @coroutine
     def connect_read_pipe(self, protocol_factory, pipe):
         protocol = protocol_factory()
-        waiter = futures.Future(loop=self)
+        waiter = self.create_future()
         transport = self._make_read_pipe_transport(pipe, protocol, waiter)
 
         try:
@@ -946,7 +999,7 @@
     @coroutine
     def connect_write_pipe(self, protocol_factory, pipe):
         protocol = protocol_factory()
-        waiter = futures.Future(loop=self)
+        waiter = self.create_future()
         transport = self._make_write_pipe_transport(pipe, protocol, waiter)
 
         try:
@@ -1028,6 +1081,11 @@
             logger.info('%s: %r' % (debug_log, transport))
         return transport, protocol
 
+    def get_exception_handler(self):
+        """Return an exception handler, or None if the default one is in use.
+        """
+        return self._exception_handler
+
     def set_exception_handler(self, handler):
         """Set handler as the new event loop exception handler.
 
diff --git a/lib-python/3/asyncio/base_subprocess.py b/lib-python/3/asyncio/base_subprocess.py
--- a/lib-python/3/asyncio/base_subprocess.py
+++ b/lib-python/3/asyncio/base_subprocess.py
@@ -210,6 +210,10 @@
             logger.info('%r exited with return code %r',
                         self, returncode)
         self._returncode = returncode
+        if self._proc.returncode is None:
+            # asyncio uses a child watcher: copy the status into the Popen
+            # object. On Python 3.6, it is required to avoid a ResourceWarning.
+            self._proc.returncode = returncode
         self._call(self._protocol.process_exited)
         self._try_finish()
 
@@ -227,7 +231,7 @@
         if self._returncode is not None:
             return self._returncode
 
-        waiter = futures.Future(loop=self._loop)
+        waiter = self._loop.create_future()
         self._exit_waiters.append(waiter)
         return (yield from waiter)
 
diff --git a/lib-python/3/asyncio/compat.py b/lib-python/3/asyncio/compat.py
--- a/lib-python/3/asyncio/compat.py
+++ b/lib-python/3/asyncio/compat.py
@@ -4,6 +4,7 @@
 
 PY34 = sys.version_info >= (3, 4)
 PY35 = sys.version_info >= (3, 5)
+PY352 = sys.version_info >= (3, 5, 2)
 
 
 def flatten_list_bytes(list_of_data):
diff --git a/lib-python/3/asyncio/coroutines.py b/lib-python/3/asyncio/coroutines.py
--- a/lib-python/3/asyncio/coroutines.py
+++ b/lib-python/3/asyncio/coroutines.py
@@ -27,8 +27,8 @@
 # before you define your coroutines.  A downside of using this feature
 # is that tracebacks show entries for the CoroWrapper.__next__ method
 # when _DEBUG is true.
-_DEBUG = (not sys.flags.ignore_environment
-          and bool(os.environ.get('PYTHONASYNCIODEBUG')))
+_DEBUG = (not sys.flags.ignore_environment and
+          bool(os.environ.get('PYTHONASYNCIODEBUG')))
 
 
 try:
@@ -86,7 +86,7 @@
     def __init__(self, gen, func=None):
         assert inspect.isgenerator(gen) or inspect.iscoroutine(gen), gen
         self.gen = gen
-        self.func = func # Used to unwrap @coroutine decorator
+        self.func = func  # Used to unwrap @coroutine decorator
         self._source_traceback = traceback.extract_stack(sys._getframe(1))
         self.__name__ = getattr(gen, '__name__', None)
         self.__qualname__ = getattr(gen, '__qualname__', None)
@@ -204,7 +204,8 @@
         @functools.wraps(func)
         def coro(*args, **kw):
             res = func(*args, **kw)
-            if isinstance(res, futures.Future) or inspect.isgenerator(res):
+            if isinstance(res, futures.Future) or inspect.isgenerator(res) or \
+                    isinstance(res, CoroWrapper):
                 res = yield from res
             elif _AwaitableABC is not None:
                 # If 'func' returns an Awaitable (new in 3.5) we
@@ -283,10 +284,13 @@
         coro_frame = coro.cr_frame
 
     filename = coro_code.co_filename
-    if (isinstance(coro, CoroWrapper)
-    and not inspect.isgeneratorfunction(coro.func)
-    and coro.func is not None):
-        filename, lineno = events._get_function_source(coro.func)
+    lineno = 0
+    if (isinstance(coro, CoroWrapper) and
+            not inspect.isgeneratorfunction(coro.func) and
+            coro.func is not None):
+        source = events._get_function_source(coro.func)
+        if source is not None:
+            filename, lineno = source
         if coro_frame is None:
             coro_repr = ('%s done, defined at %s:%s'
                          % (coro_name, filename, lineno))
diff --git a/lib-python/3/asyncio/events.py b/lib-python/3/asyncio/events.py
--- a/lib-python/3/asyncio/events.py
+++ b/lib-python/3/asyncio/events.py
@@ -266,6 +266,9 @@
     def time(self):
         raise NotImplementedError
 
+    def create_future(self):
+        raise NotImplementedError
+
     # Method scheduling a coroutine object: create a task.
 
     def create_task(self, coro):
@@ -484,6 +487,9 @@
 
     # Error handlers.
 
+    def get_exception_handler(self):
+        raise NotImplementedError
+
     def set_exception_handler(self, handler):
         raise NotImplementedError
 
diff --git a/lib-python/3/asyncio/futures.py b/lib-python/3/asyncio/futures.py
--- a/lib-python/3/asyncio/futures.py
+++ b/lib-python/3/asyncio/futures.py
@@ -142,7 +142,7 @@
     def __init__(self, *, loop=None):
         """Initialize the future.
 
-        The optional event_loop argument allows to explicitly set the event
+        The optional event_loop argument allows explicitly setting the event
         loop object used by the future. If it's not provided, the future uses
         the default event loop.
         """
@@ -341,6 +341,9 @@
             raise InvalidStateError('{}: {!r}'.format(self._state, self))
         if isinstance(exception, type):
             exception = exception()
+        if type(exception) is StopIteration:
+            raise TypeError("StopIteration interacts badly with generators "
+                            "and cannot be raised into a Future")
         self._exception = exception
         self._state = _FINISHED
         self._schedule_callbacks()
@@ -448,6 +451,8 @@
         return future
     assert isinstance(future, concurrent.futures.Future), \
         'concurrent.futures.Future is expected, got {!r}'.format(future)
-    new_future = Future(loop=loop)
+    if loop is None:
+        loop = events.get_event_loop()
+    new_future = loop.create_future()
     _chain_future(future, new_future)
     return new_future
diff --git a/lib-python/3/asyncio/locks.py b/lib-python/3/asyncio/locks.py
--- a/lib-python/3/asyncio/locks.py
+++ b/lib-python/3/asyncio/locks.py
@@ -111,7 +111,7 @@
     acquire() is a coroutine and should be called with 'yield from'.
 
     Locks also support the context management protocol.  '(yield from lock)'
-    should be used as context manager expression.
+    should be used as the context manager expression.
 
     Usage:
 
@@ -170,7 +170,7 @@
             self._locked = True
             return True
 
-        fut = futures.Future(loop=self._loop)
+        fut = self._loop.create_future()
         self._waiters.append(fut)
         try:
             yield from fut
@@ -258,7 +258,7 @@
         if self._value:
             return True
 
-        fut = futures.Future(loop=self._loop)
+        fut = self._loop.create_future()
         self._waiters.append(fut)
         try:
             yield from fut
@@ -320,7 +320,7 @@
 
         self.release()
         try:
-            fut = futures.Future(loop=self._loop)
+            fut = self._loop.create_future()
             self._waiters.append(fut)
             try:
                 yield from fut
@@ -329,7 +329,13 @@
                 self._waiters.remove(fut)
 
         finally:
-            yield from self.acquire()
+            # Must reacquire lock even if wait is cancelled
+            while True:
+                try:
+                    yield from self.acquire()
+                    break
+                except futures.CancelledError:
+                    pass
 
     @coroutine
     def wait_for(self, predicate):
@@ -433,7 +439,7 @@
         True.
         """
         while self._value <= 0:
-            fut = futures.Future(loop=self._loop)
+            fut = self._loop.create_future()
             self._waiters.append(fut)
             try:
                 yield from fut
diff --git a/lib-python/3/asyncio/proactor_events.py b/lib-python/3/asyncio/proactor_events.py
--- a/lib-python/3/asyncio/proactor_events.py
+++ b/lib-python/3/asyncio/proactor_events.py
@@ -90,7 +90,7 @@
                 self.close()
 
     def _fatal_error(self, exc, message='Fatal error on pipe transport'):
-        if isinstance(exc, (BrokenPipeError, ConnectionResetError)):
+        if isinstance(exc, base_events._FATAL_ERROR_IGNORE):
             if self._loop.get_debug():
                 logger.debug("%r: %s", self, message, exc_info=True)
         else:
@@ -440,15 +440,7 @@
         return self._proactor.send(sock, data)
 
     def sock_connect(self, sock, address):
-        try:
-            if self._debug:
-                base_events._check_resolved_address(sock, address)
-        except ValueError as err:
-            fut = futures.Future(loop=self)
-            fut.set_exception(err)
-            return fut
-        else:
-            return self._proactor.connect(sock, address)
+        return self._proactor.connect(sock, address)
 
     def sock_accept(self, sock):
         return self._proactor.accept(sock)
diff --git a/lib-python/3/asyncio/queues.py b/lib-python/3/asyncio/queues.py
--- a/lib-python/3/asyncio/queues.py
+++ b/lib-python/3/asyncio/queues.py
@@ -128,7 +128,7 @@
         This method is a coroutine.
         """
         while self.full():
-            putter = futures.Future(loop=self._loop)
+            putter = self._loop.create_future()
             self._putters.append(putter)
             try:
                 yield from putter
@@ -162,7 +162,7 @@
         This method is a coroutine.
         """
         while self.empty():
-            getter = futures.Future(loop=self._loop)
+            getter = self._loop.create_future()
             self._getters.append(getter)
             try:
                 yield from getter
diff --git a/lib-python/3/asyncio/selector_events.py b/lib-python/3/asyncio/selector_events.py
--- a/lib-python/3/asyncio/selector_events.py
+++ b/lib-python/3/asyncio/selector_events.py
@@ -196,7 +196,7 @@
         transport = None
         try:
             protocol = protocol_factory()
-            waiter = futures.Future(loop=self)
+            waiter = self.create_future()
             if sslcontext:
                 transport = self._make_ssl_transport(
                     conn, protocol, sslcontext, waiter=waiter,
@@ -314,7 +314,7 @@
         """
         if self._debug and sock.gettimeout() != 0:
             raise ValueError("the socket must be non-blocking")
-        fut = futures.Future(loop=self)
+        fut = self.create_future()
         self._sock_recv(fut, False, sock, n)
         return fut
 
@@ -352,7 +352,7 @@
         """
         if self._debug and sock.gettimeout() != 0:
             raise ValueError("the socket must be non-blocking")
-        fut = futures.Future(loop=self)
+        fut = self.create_future()
         if data:
             self._sock_sendall(fut, False, sock, data)
         else:
@@ -385,25 +385,28 @@
     def sock_connect(self, sock, address):
         """Connect to a remote socket at address.
 
-        The address must be already resolved to avoid the trap of hanging the
-        entire event loop when the address requires doing a DNS lookup. For
-        example, it must be an IP address, not an hostname, for AF_INET and
-        AF_INET6 address families. Use getaddrinfo() to resolve the hostname
-        asynchronously.
-
         This method is a coroutine.
         """
         if self._debug and sock.gettimeout() != 0:
             raise ValueError("the socket must be non-blocking")
-        fut = futures.Future(loop=self)
+
+        fut = self.create_future()
+        if hasattr(socket, 'AF_UNIX') and sock.family == socket.AF_UNIX:
+            self._sock_connect(fut, sock, address)
+        else:
+            resolved = base_events._ensure_resolved(address, loop=self)
+            resolved.add_done_callback(
+                lambda resolved: self._on_resolved(fut, sock, resolved))
+
+        return fut
+
+    def _on_resolved(self, fut, sock, resolved):
         try:
-            if self._debug:
-                base_events._check_resolved_address(sock, address)
-        except ValueError as err:
-            fut.set_exception(err)
+            _, _, _, _, address = resolved.result()[0]
+        except Exception as exc:
+            fut.set_exception(exc)
         else:
             self._sock_connect(fut, sock, address)
-        return fut
 
     def _sock_connect(self, fut, sock, address):
         fd = sock.fileno()
@@ -454,7 +457,7 @@
         """
         if self._debug and sock.gettimeout() != 0:
             raise ValueError("the socket must be non-blocking")
-        fut = futures.Future(loop=self)
+        fut = self.create_future()
         self._sock_accept(fut, False, sock)
         return fut
 
@@ -566,6 +569,7 @@
         self._loop.remove_reader(self._sock_fd)
         if not self._buffer:
             self._conn_lost += 1
+            self._loop.remove_writer(self._sock_fd)
             self._loop.call_soon(self._call_connection_lost, None)
 
     # On Python 3.3 and older, objects with a destructor part of a reference
@@ -579,8 +583,7 @@
 
     def _fatal_error(self, exc, message='Fatal error on transport'):
         # Should be called from exception handler only.
-        if isinstance(exc, (BrokenPipeError,
-                            ConnectionResetError, ConnectionAbortedError)):
+        if isinstance(exc, base_events._FATAL_ERROR_IGNORE):
             if self._loop.get_debug():
                 logger.debug("%r: %s", self, message, exc_info=True)
         else:
@@ -660,6 +663,8 @@
             logger.debug("%r resumes reading", self)
 
     def _read_ready(self):
+        if self._conn_lost:
+            return
         try:
             data = self._sock.recv(self.max_size)
         except (BlockingIOError, InterruptedError):
@@ -683,8 +688,8 @@
 
     def write(self, data):
         if not isinstance(data, (bytes, bytearray, memoryview)):
-            raise TypeError('data argument must be byte-ish (%r)',
-                            type(data))
+            raise TypeError('data argument must be a bytes-like object, '
+                            'not %r' % type(data).__name__)
         if self._eof:
             raise RuntimeError('Cannot call write() after write_eof()')
         if not data:
@@ -719,6 +724,8 @@
     def _write_ready(self):
         assert self._buffer, 'Data should not be empty'
 
+        if self._conn_lost:
+            return
         try:
             n = self._sock.send(self._buffer)
         except (BlockingIOError, InterruptedError):
@@ -889,6 +896,8 @@
             logger.debug("%r resumes reading", self)
 
     def _read_ready(self):
+        if self._conn_lost:
+            return
         if self._write_wants_read:
             self._write_wants_read = False
             self._write_ready()
@@ -921,6 +930,8 @@
                     self.close()
 
     def _write_ready(self):
+        if self._conn_lost:
+            return
         if self._read_wants_write:
             self._read_wants_write = False
             self._read_ready()
@@ -955,8 +966,8 @@
 
     def write(self, data):
         if not isinstance(data, (bytes, bytearray, memoryview)):
-            raise TypeError('data argument must be byte-ish (%r)',
-                            type(data))
+            raise TypeError('data argument must be a bytes-like object, '
+                            'not %r' % type(data).__name__)
         if not data:
             return
 
@@ -998,6 +1009,8 @@
         return sum(len(data) for data, _ in self._buffer)
 
     def _read_ready(self):
+        if self._conn_lost:
+            return
         try:
             data, addr = self._sock.recvfrom(self.max_size)
         except (BlockingIOError, InterruptedError):
@@ -1011,8 +1024,8 @@
 
     def sendto(self, data, addr=None):
         if not isinstance(data, (bytes, bytearray, memoryview)):
-            raise TypeError('data argument must be byte-ish (%r)',
-                            type(data))
+            raise TypeError('data argument must be a bytes-like object, '
+                            'not %r' % type(data).__name__)
         if not data:
             return
 
diff --git a/lib-python/3/asyncio/sslproto.py b/lib-python/3/asyncio/sslproto.py
--- a/lib-python/3/asyncio/sslproto.py
+++ b/lib-python/3/asyncio/sslproto.py
@@ -603,7 +603,7 @@
         self._wakeup_waiter()
         self._session_established = True
         # In case transport.write() was already called. Don't call
-        # immediatly _process_write_backlog(), but schedule it:
+        # immediately _process_write_backlog(), but schedule it:
         # _on_handshake_complete() can be called indirectly from
         # _process_write_backlog(), and _process_write_backlog() is not
         # reentrant.
@@ -655,7 +655,7 @@
 
     def _fatal_error(self, exc, message='Fatal error on transport'):
         # Should be called from exception handler only.
-        if isinstance(exc, (BrokenPipeError, ConnectionResetError)):
+        if isinstance(exc, base_events._FATAL_ERROR_IGNORE):
             if self._loop.get_debug():
                 logger.debug("%r: %s", self, message, exc_info=True)
         else:
diff --git a/lib-python/3/asyncio/streams.py b/lib-python/3/asyncio/streams.py
--- a/lib-python/3/asyncio/streams.py
+++ b/lib-python/3/asyncio/streams.py
@@ -3,6 +3,7 @@
 __all__ = ['StreamReader', 'StreamWriter', 'StreamReaderProtocol',
            'open_connection', 'start_server',
            'IncompleteReadError',
+           'LimitOverrunError',
            ]
 
 import socket
@@ -13,13 +14,12 @@
 from . import coroutines
 from . import compat
 from . import events
-from . import futures
 from . import protocols
 from .coroutines import coroutine
 from .log import logger
 
 
-_DEFAULT_LIMIT = 2**16
+_DEFAULT_LIMIT = 2 ** 16
 
 
 class IncompleteReadError(EOFError):
@@ -27,15 +27,26 @@
     Incomplete read error. Attributes:
 
     - partial: read bytes string before the end of stream was reached
-    - expected: total number of expected bytes
+    - expected: total number of expected bytes (or None if unknown)
     """
     def __init__(self, partial, expected):
-        EOFError.__init__(self, "%s bytes read on a total of %s expected bytes"
-                                % (len(partial), expected))
+        super().__init__("%d bytes read on a total of %r expected bytes"
+                         % (len(partial), expected))
         self.partial = partial
         self.expected = expected
 
 
+class LimitOverrunError(Exception):
+    """Reached the buffer limit while looking for a separator.
+
+    Attributes:
+    - consumed: total number of to be consumed bytes.
+    """
+    def __init__(self, message, consumed):
+        super().__init__(message)
+        self.consumed = consumed
+
+
 @coroutine
 def open_connection(host=None, port=None, *,
                     loop=None, limit=_DEFAULT_LIMIT, **kwds):
@@ -118,7 +129,6 @@
         writer = StreamWriter(transport, protocol, reader, loop)
         return reader, writer
 
-
     @coroutine
     def start_unix_server(client_connected_cb, path=None, *,
                           loop=None, limit=_DEFAULT_LIMIT, **kwds):
@@ -196,7 +206,7 @@
             return
         waiter = self._drain_waiter
         assert waiter is None or waiter.cancelled()
-        waiter = futures.Future(loop=self._loop)
+        waiter = self._loop.create_future()
         self._drain_waiter = waiter
         yield from waiter
 
@@ -215,9 +225,11 @@
         self._stream_reader = stream_reader
         self._stream_writer = None
         self._client_connected_cb = client_connected_cb
+        self._over_ssl = False
 
     def connection_made(self, transport):
         self._stream_reader.set_transport(transport)
+        self._over_ssl = transport.get_extra_info('sslcontext') is not None
         if self._client_connected_cb is not None:
             self._stream_writer = StreamWriter(transport, self,
                                                self._stream_reader,
@@ -228,17 +240,25 @@
                 self._loop.create_task(res)
 
     def connection_lost(self, exc):
-        if exc is None:
-            self._stream_reader.feed_eof()
-        else:
-            self._stream_reader.set_exception(exc)
+        if self._stream_reader is not None:
+            if exc is None:
+                self._stream_reader.feed_eof()
+            else:
+                self._stream_reader.set_exception(exc)
         super().connection_lost(exc)
+        self._stream_reader = None
+        self._stream_writer = None
 
     def data_received(self, data):
         self._stream_reader.feed_data(data)
 
     def eof_received(self):
         self._stream_reader.feed_eof()
+        if self._over_ssl:
+            # Prevent a warning in SSLProtocol.eof_received:
+            # "returning true from eof_received()
+            # has no effect when using ssl"
+            return False
         return True
 
 
@@ -318,6 +338,10 @@
     def __init__(self, limit=_DEFAULT_LIMIT, loop=None):
         # The line length limit is  a security feature;
         # it also doubles as half the buffer limit.
+
+        if limit <= 0:
+            raise ValueError('Limit cannot be <= 0')
+
         self._limit = limit
         if loop is None:
             self._loop = events.get_event_loop()
@@ -361,7 +385,7 @@
                 waiter.set_exception(exc)
 
     def _wakeup_waiter(self):
-        """Wakeup read() or readline() function waiting for data or EOF."""
+        """Wakeup read*() functions waiting for data or EOF."""
         waiter = self._waiter
         if waiter is not None:
             self._waiter = None
@@ -395,8 +419,8 @@
         self._wakeup_waiter()
 
         if (self._transport is not None and
-            not self._paused and
-            len(self._buffer) > 2*self._limit):
+                not self._paused and
+                len(self._buffer) > 2 * self._limit):
             try:
                 self._transport.pause_reading()
             except NotImplementedError:
@@ -409,7 +433,10 @@
 
     @coroutine
     def _wait_for_data(self, func_name):
-        """Wait until feed_data() or feed_eof() is called."""
+        """Wait until feed_data() or feed_eof() is called.
+
+        If stream was paused, automatically resume it.
+        """
         # StreamReader uses a future to link the protocol feed_data() method
         # to a read coroutine. Running two read coroutines at the same time
         # would have an unexpected behaviour. It would not possible to know
@@ -418,7 +445,14 @@
             raise RuntimeError('%s() called while another coroutine is '
                                'already waiting for incoming data' % func_name)
 
-        self._waiter = futures.Future(loop=self._loop)
+        assert not self._eof, '_wait_for_data after EOF'
+
+        # Waiting for data while paused will make deadlock, so prevent it.
+        if self._paused:
+            self._paused = False
+            self._transport.resume_reading()
+
+        self._waiter = self._loop.create_future()
         try:
             yield from self._waiter
         finally:
@@ -426,43 +460,154 @@
 
     @coroutine
     def readline(self):
+        """Read chunk of data from the stream until newline (b'\n') is found.
+
+        On success, return chunk that ends with newline. If only partial
+        line can be read due to EOF, return incomplete line without
+        terminating newline. When EOF was reached while no bytes read, empty
+        bytes object is returned.
+
+        If limit is reached, ValueError will be raised. In that case, if
+        newline was found, complete line including newline will be removed
+        from internal buffer. Else, internal buffer will be cleared. Limit is
+        compared against part of the line without newline.
+
+        If stream was paused, this function will automatically resume it if
+        needed.
+        """
+        sep = b'\n'
+        seplen = len(sep)
+        try:
+            line = yield from self.readuntil(sep)
+        except IncompleteReadError as e:
+            return e.partial
+        except LimitOverrunError as e:
+            if self._buffer.startswith(sep, e.consumed):
+                del self._buffer[:e.consumed + seplen]
+            else:
+                self._buffer.clear()
+            self._maybe_resume_transport()
+            raise ValueError(e.args[0])
+        return line
+
+    @coroutine
+    def readuntil(self, separator=b'\n'):
+        """Read data from the stream until ``separator`` is found.
+
+        On success, the data and separator will be removed from the
+        internal buffer (consumed). Returned data will include the
+        separator at the end.
+
+        Configured stream limit is used to check result. Limit sets the
+        maximal length of data that can be returned, not counting the
+        separator.
+
+        If an EOF occurs and the complete separator is still not found,
+        an IncompleteReadError exception will be raised, and the internal
+        buffer will be reset.  The IncompleteReadError.partial attribute
+        may contain the separator partially.
+
+        If the data cannot be read because of over limit, a
+        LimitOverrunError exception  will be raised, and the data
+        will be left in the internal buffer, so it can be read again.
+        """
+        seplen = len(separator)
+        if seplen == 0:
+            raise ValueError('Separator should be at least one-byte string')
+
         if self._exception is not None:
             raise self._exception
 
-        line = bytearray()
-        not_enough = True
+        # Consume whole buffer except last bytes, which length is
+        # one less than seplen. Let's check corner cases with
+        # separator='SEPARATOR':
+        # * we have received almost complete separator (without last
+        #   byte). i.e buffer='some textSEPARATO'. In this case we
+        #   can safely consume len(separator) - 1 bytes.
+        # * last byte of buffer is first byte of separator, i.e.
+        #   buffer='abcdefghijklmnopqrS'. We may safely consume
+        #   everything except that last byte, but this require to
+        #   analyze bytes of buffer that match partial separator.
+        #   This is slow and/or require FSM. For this case our
+        #   implementation is not optimal, since require rescanning
+        #   of data that is known to not belong to separator. In
+        #   real world, separator will not be so long to notice
+        #   performance problems. Even when reading MIME-encoded
+        #   messages :)
 
-        while not_enough:
-            while self._buffer and not_enough:
-                ichar = self._buffer.find(b'\n')
-                if ichar < 0:
-                    line.extend(self._buffer)
-                    self._buffer.clear()
-                else:
-                    ichar += 1
-                    line.extend(self._buffer[:ichar])
-                    del self._buffer[:ichar]
-                    not_enough = False
+        # `offset` is the number of bytes from the beginning of the buffer
+        # where there is no occurrence of `separator`.
+        offset = 0
 
-                if len(line) > self._limit:
-                    self._maybe_resume_transport()
-                    raise ValueError('Line is too long')
+        # Loop until we find `separator` in the buffer, exceed the buffer size,
+        # or an EOF has happened.
+        while True:
+            buflen = len(self._buffer)
 
+            # Check if we now have enough data in the buffer for `separator` to
+            # fit.
+            if buflen - offset >= seplen:
+                isep = self._buffer.find(separator, offset)
+
+                if isep != -1:
+                    # `separator` is in the buffer. `isep` will be used later
+                    # to retrieve the data.
+                    break
+
+                # see upper comment for explanation.
+                offset = buflen + 1 - seplen
+                if offset > self._limit:
+                    raise LimitOverrunError(
+                        'Separator is not found, and chunk exceed the limit',
+                        offset)
+
+            # Complete message (with full separator) may be present in buffer
+            # even when EOF flag is set. This may happen when the last chunk
+            # adds data which makes separator be found. That's why we check for
+            # EOF *ater* inspecting the buffer.
             if self._eof:
-                break
+                chunk = bytes(self._buffer)
+                self._buffer.clear()
+                raise IncompleteReadError(chunk, None)
 
-            if not_enough:
-                yield from self._wait_for_data('readline')
+            # _wait_for_data() will resume reading if stream was paused.
+            yield from self._wait_for_data('readuntil')
 
+        if isep > self._limit:
+            raise LimitOverrunError(
+                'Separator is found, but chunk is longer than limit', isep)
+
+        chunk = self._buffer[:isep + seplen]
+        del self._buffer[:isep + seplen]
         self._maybe_resume_transport()
-        return bytes(line)
+        return bytes(chunk)
 
     @coroutine
     def read(self, n=-1):
+        """Read up to `n` bytes from the stream.
+
+        If n is not provided, or set to -1, read until EOF and return all read
+        bytes. If the EOF was received and the internal buffer is empty, return
+        an empty bytes object.
+
+        If n is zero, return empty bytes object immediatelly.
+
+        If n is positive, this function try to read `n` bytes, and may return
+        less or equal bytes than requested, but at least one byte. If EOF was
+        received before any byte is read, this function returns empty byte
+        object.
+
+        Returned value is not limited with limit, configured at stream
+        creation.
+
+        If stream was paused, this function will automatically resume it if
+        needed.
+        """
+
         if self._exception is not None:
             raise self._exception
 
-        if not n:
+        if n == 0:
             return b''
 
         if n < 0:
@@ -477,26 +622,42 @@
                     break
                 blocks.append(block)
             return b''.join(blocks)
-        else:
-            if not self._buffer and not self._eof:
-                yield from self._wait_for_data('read')
 
-        if n < 0 or len(self._buffer) <= n:
-            data = bytes(self._buffer)
-            self._buffer.clear()
-        else:
-            # n > 0 and len(self._buffer) > n
-            data = bytes(self._buffer[:n])
-            del self._buffer[:n]
+        if not self._buffer and not self._eof:
+            yield from self._wait_for_data('read')
+
+        # This will work right even if buffer is less than n bytes
+        data = bytes(self._buffer[:n])
+        del self._buffer[:n]
 
         self._maybe_resume_transport()
         return data
 
     @coroutine
     def readexactly(self, n):
+        """Read exactly `n` bytes.
+
+        Raise an IncompleteReadError if EOF is reached before `n` bytes can be
+        read. The IncompleteReadError.partial attribute of the exception will
+        contain the partial read bytes.
+
+        if n is zero, return empty bytes object.
+
+        Returned value is not limited with limit, configured at stream
+        creation.
+
+        If stream was paused, this function will automatically resume it if
+        needed.
+        """
+        if n < 0:
+            raise ValueError('readexactly size can not be less than zero')
+
         if self._exception is not None:
             raise self._exception
 
+        if n == 0:
+            return b''
+
         # There used to be "optimized" code here.  It created its own
         # Future and waited until self._buffer had at least the n
         # bytes, then called read(n).  Unfortunately, this could pause
@@ -513,6 +674,8 @@
             blocks.append(block)
             n -= len(block)
 
+        assert n == 0
+
         return b''.join(blocks)
 
     if compat.PY35:
@@ -526,3 +689,9 @@
             if val == b'':
                 raise StopAsyncIteration
             return val
+
+    if compat.PY352:
+        # In Python 3.5.2 and greater, __aiter__ should return
+        # the asynchronous iterator directly.
+        def __aiter__(self):
+            return self
diff --git a/lib-python/3/asyncio/subprocess.py b/lib-python/3/asyncio/subprocess.py
--- a/lib-python/3/asyncio/subprocess.py
+++ b/lib-python/3/asyncio/subprocess.py
@@ -166,7 +166,7 @@
 
     @coroutine
     def communicate(self, input=None):
-        if input:
+        if input is not None:
             stdin = self._feed_stdin(input)
         else:
             stdin = self._noop()
diff --git a/lib-python/3/asyncio/tasks.py b/lib-python/3/asyncio/tasks.py
--- a/lib-python/3/asyncio/tasks.py
+++ b/lib-python/3/asyncio/tasks.py
@@ -251,7 +251,13 @@
         else:
             if isinstance(result, futures.Future):
                 # Yielded Future must come from Future.__iter__().
-                if result._blocking:
+                if result._loop is not self._loop:
+                    self._loop.call_soon(
+                        self._step,
+                        RuntimeError(
+                            'Task {!r} got Future {!r} attached to a '
+                            'different loop'.format(self, result)))
+                elif result._blocking:
                     result._blocking = False
                     result.add_done_callback(self._wakeup)
                     self._fut_waiter = result
@@ -366,7 +372,7 @@
     if timeout is None:
         return (yield from fut)
 
-    waiter = futures.Future(loop=loop)
+    waiter = loop.create_future()
     timeout_handle = loop.call_later(timeout, _release_waiter, waiter)
     cb = functools.partial(_release_waiter, waiter)
 
@@ -394,12 +400,12 @@
 
 @coroutine
 def _wait(fs, timeout, return_when, loop):
-    """Internal helper for wait() and _wait_for().
+    """Internal helper for wait() and wait_for().
 
     The fs argument must be a collection of Futures.
     """
     assert fs, 'Set of Futures is empty.'
-    waiter = futures.Future(loop=loop)
+    waiter = loop.create_future()
     timeout_handle = None
     if timeout is not None:
         timeout_handle = loop.call_later(timeout, _release_waiter, waiter)
@@ -500,7 +506,9 @@
         yield
         return result
 
-    future = futures.Future(loop=loop)
+    if loop is None:
+        loop = events.get_event_loop()
+    future = loop.create_future()
     h = future._loop.call_later(delay,
                                 futures._set_result_unless_cancelled,
                                 future, result)
@@ -597,7 +605,9 @@
     be cancelled.)
     """
     if not coros_or_futures:
-        outer = futures.Future(loop=loop)
+        if loop is None:
+            loop = events.get_event_loop()
+        outer = loop.create_future()
         outer.set_result([])
         return outer
 
@@ -685,7 +695,7 @@
         # Shortcut.
         return inner
     loop = inner._loop
-    outer = futures.Future(loop=loop)
+    outer = loop.create_future()
 
     def _done_callback(inner):
         if outer.cancelled():
diff --git a/lib-python/3/asyncio/test_utils.py b/lib-python/3/asyncio/test_utils.py
--- a/lib-python/3/asyncio/test_utils.py
+++ b/lib-python/3/asyncio/test_utils.py
@@ -446,9 +446,14 @@
     finally:
         logger.setLevel(old_level)
 
-def mock_nonblocking_socket():
+
+def mock_nonblocking_socket(proto=socket.IPPROTO_TCP, type=socket.SOCK_STREAM,
+                            family=socket.AF_INET):
     """Create a mock of a non-blocking socket."""
-    sock = mock.Mock(socket.socket)
+    sock = mock.MagicMock(socket.socket)
+    sock.proto = proto
+    sock.type = type
+    sock.family = family
     sock.gettimeout.return_value = 0.0
     return sock
 
diff --git a/lib-python/3/asyncio/unix_events.py b/lib-python/3/asyncio/unix_events.py
--- a/lib-python/3/asyncio/unix_events.py
+++ b/lib-python/3/asyncio/unix_events.py
@@ -177,7 +177,7 @@
                                    stdin, stdout, stderr, bufsize,
                                    extra=None, **kwargs):
         with events.get_child_watcher() as watcher:
-            waiter = futures.Future(loop=self)
+            waiter = self.create_future()
             transp = _UnixSubprocessTransport(self, protocol, args, shell,
                                               stdin, stdout, stderr, bufsize,
                                               waiter=waiter, extra=extra,
@@ -329,14 +329,17 @@
         elif self._closing:
             info.append('closing')
         info.append('fd=%s' % self._fileno)
-        if self._pipe is not None:
+        selector = getattr(self._loop, '_selector', None)
+        if self._pipe is not None and selector is not None:
             polling = selector_events._test_selector_event(
-                          self._loop._selector,
+                          selector,
                           self._fileno, selectors.EVENT_READ)
             if polling:
                 info.append('polling')
             else:
                 info.append('idle')
+        elif self._pipe is not None:
+            info.append('open')
         else:
             info.append('closed')
         return '<%s>' % ' '.join(info)
@@ -453,9 +456,10 @@
         elif self._closing:
             info.append('closing')
         info.append('fd=%s' % self._fileno)
-        if self._pipe is not None:
+        selector = getattr(self._loop, '_selector', None)
+        if self._pipe is not None and selector is not None:
             polling = selector_events._test_selector_event(
-                          self._loop._selector,
+                          selector,
                           self._fileno, selectors.EVENT_WRITE)
             if polling:
                 info.append('polling')
@@ -464,6 +468,8 @@
 
             bufsize = self.get_write_buffer_size()
             info.append('bufsize=%s' % bufsize)
+        elif self._pipe is not None:
+            info.append('open')
         else:
             info.append('closed')
         return '<%s>' % ' '.join(info)
@@ -575,7 +581,7 @@
 
     def _fatal_error(self, exc, message='Fatal error on pipe transport'):
         # should be called by exception handler only
-        if isinstance(exc, (BrokenPipeError, ConnectionResetError)):
+        if isinstance(exc, base_events._FATAL_ERROR_IGNORE):
             if self._loop.get_debug():
                 logger.debug("%r: %s", self, message, exc_info=True)
         else:
diff --git a/lib-python/3/asyncio/windows_events.py b/lib-python/3/asyncio/windows_events.py
--- a/lib-python/3/asyncio/windows_events.py
+++ b/lib-python/3/asyncio/windows_events.py
@@ -197,7 +197,7 @@
         #
         # If the IocpProactor already received the event, it's safe to call
         # _unregister() because we kept a reference to the Overlapped object
-        # which is used as an unique key.
+        # which is used as a unique key.
         self._proactor._unregister(self._ov)
         self._proactor = None
 
@@ -366,7 +366,7 @@
     def _make_subprocess_transport(self, protocol, args, shell,
                                    stdin, stdout, stderr, bufsize,
                                    extra=None, **kwargs):
-        waiter = futures.Future(loop=self)
+        waiter = self.create_future()
         transp = _WindowsSubprocessTransport(self, protocol, args, shell,
                                              stdin, stdout, stderr, bufsize,
                                              waiter=waiter, extra=extra,
@@ -417,7 +417,7 @@
         return tmp
 
     def _result(self, value):
-        fut = futures.Future(loop=self._loop)
+        fut = self._loop.create_future()
         fut.set_result(value)
         return fut
 
diff --git a/lib-python/3/base64.py b/lib-python/3/base64.py
--- a/lib-python/3/base64.py
+++ b/lib-python/3/base64.py
@@ -12,7 +12,7 @@
 
 
 __all__ = [
-    # Legacy interface exports traditional RFC 1521 Base64 encodings
+    # Legacy interface exports traditional RFC 2045 Base64 encodings
     'encode', 'decode', 'encodebytes', 'decodebytes',
     # Generalized interface for other encodings
     'b64encode', 'b64decode', 'b32encode', 'b32decode',
@@ -49,14 +49,11 @@
 # Base64 encoding/decoding uses binascii
 
 def b64encode(s, altchars=None):
-    """Encode a byte string using Base64.
+    """Encode the bytes-like object s using Base64 and return a bytes object.
 
-    s is the byte string to encode.  Optional altchars must be a byte
-    string of length 2 which specifies an alternative alphabet for the
-    '+' and '/' characters.  This allows an application to
-    e.g. generate url or filesystem safe Base64 strings.
-
-    The encoded byte string is returned.
+    Optional altchars should be a byte string of length 2 which specifies an
+    alternative alphabet for the '+' and '/' characters.  This allows an
+    application to e.g. generate url or filesystem safe Base64 strings.
     """
     # Strip off the trailing newline
     encoded = binascii.b2a_base64(s)[:-1]
@@ -67,18 +64,19 @@
 
 
 def b64decode(s, altchars=None, validate=False):
-    """Decode a Base64 encoded byte string.
+    """Decode the Base64 encoded bytes-like object or ASCII string s.
 
-    s is the byte string to decode.  Optional altchars must be a
-    string of length 2 which specifies the alternative alphabet used
-    instead of the '+' and '/' characters.
+    Optional altchars must be a bytes-like object or ASCII string of length 2
+    which specifies the alternative alphabet used instead of the '+' and '/'
+    characters.
 
-    The decoded string is returned.  A binascii.Error is raised if s is
-    incorrectly padded.
+    The result is returned as a bytes object.  A binascii.Error is raised if
+    s is incorrectly padded.
 
-    If validate is False (the default), non-base64-alphabet characters are
-    discarded prior to the padding check.  If validate is True,
-    non-base64-alphabet characters in the input result in a binascii.Error.
+    If validate is False (the default), characters that are neither in the
+    normal base-64 alphabet nor the alternative alphabet are discarded prior
+    to the padding check.  If validate is True, these non-alphabet characters
+    in the input result in a binascii.Error.
     """
     s = _bytes_from_decode_data(s)
     if altchars is not None:
@@ -91,19 +89,19 @@
 
 
 def standard_b64encode(s):
-    """Encode a byte string using the standard Base64 alphabet.
+    """Encode bytes-like object s using the standard Base64 alphabet.
 
-    s is the byte string to encode.  The encoded byte string is returned.
+    The result is returned as a bytes object.
     """
     return b64encode(s)
 
 def standard_b64decode(s):
-    """Decode a byte string encoded with the standard Base64 alphabet.
+    """Decode bytes encoded with the standard Base64 alphabet.
 
-    s is the byte string to decode.  The decoded byte string is
-    returned.  binascii.Error is raised if the input is incorrectly
-    padded or if there are non-alphabet characters present in the
-    input.
+    Argument s is a bytes-like object or ASCII string to decode.  The result
+    is returned as a bytes object.  A binascii.Error is raised if the input
+    is incorrectly padded.  Characters that are not in the standard alphabet
+    are discarded prior to the padding check.
     """
     return b64decode(s)
 
@@ -112,21 +110,22 @@
 _urlsafe_decode_translation = bytes.maketrans(b'-_', b'+/')
 
 def urlsafe_b64encode(s):
-    """Encode a byte string using a url-safe Base64 alphabet.
+    """Encode bytes using the URL- and filesystem-safe Base64 alphabet.
 
-    s is the byte string to encode.  The encoded byte string is
-    returned.  The alphabet uses '-' instead of '+' and '_' instead of
+    Argument s is a bytes-like object to encode.  The result is returned as a
+    bytes object.  The alphabet uses '-' instead of '+' and '_' instead of
     '/'.
     """
     return b64encode(s).translate(_urlsafe_encode_translation)
 
 def urlsafe_b64decode(s):
-    """Decode a byte string encoded with the standard Base64 alphabet.
+    """Decode bytes using the URL- and filesystem-safe Base64 alphabet.
 
-    s is the byte string to decode.  The decoded byte string is
-    returned.  binascii.Error is raised if the input is incorrectly
-    padded or if there are non-alphabet characters present in the
-    input.
+    Argument s is a bytes-like object or ASCII string to decode.  The result
+    is returned as a bytes object.  A binascii.Error is raised if the input
+    is incorrectly padded.  Characters that are not in the URL-safe base-64
+    alphabet, and are not a plus '+' or slash '/', are discarded prior to the
+    padding check.
 
     The alphabet uses '-' instead of '+' and '_' instead of '/'.
     """
@@ -142,9 +141,7 @@
 _b32rev = None
 
 def b32encode(s):
-    """Encode a byte string using Base32.
-
-    s is the byte string to encode.  The encoded byte string is returned.
+    """Encode the bytes-like object s using Base32 and return a bytes object.
     """
     global _b32tab2
     # Delay the initialization of the table to not waste memory
@@ -182,11 +179,10 @@
     return bytes(encoded)
 
 def b32decode(s, casefold=False, map01=None):
-    """Decode a Base32 encoded byte string.
+    """Decode the Base32 encoded bytes-like object or ASCII string s.
 
-    s is the byte string to decode.  Optional casefold is a flag
-    specifying whether a lowercase alphabet is acceptable as input.
-    For security purposes, the default is False.
+    Optional casefold is a flag specifying whether a lowercase alphabet is
+    acceptable as input.  For security purposes, the default is False.
 
     RFC 3548 allows for optional mapping of the digit 0 (zero) to the
     letter O (oh), and for optional mapping of the digit 1 (one) to
@@ -196,7 +192,7 @@
     the letter O).  For security purposes the default is None, so that
     0 and 1 are not allowed in the input.
 
-    The decoded byte string is returned.  binascii.Error is raised if
+    The result is returned as a bytes object.  A binascii.Error is raised if
     the input is incorrectly padded or if there are non-alphabet
     characters present in the input.
     """
@@ -257,23 +253,20 @@
 # lowercase.  The RFC also recommends against accepting input case
 # insensitively.
 def b16encode(s):
-    """Encode a byte string using Base16.
-
-    s is the byte string to encode.  The encoded byte string is returned.
+    """Encode the bytes-like object s using Base16 and return a bytes object.
     """
     return binascii.hexlify(s).upper()
 
 
 def b16decode(s, casefold=False):
-    """Decode a Base16 encoded byte string.
+    """Decode the Base16 encoded bytes-like object or ASCII string s.
 
-    s is the byte string to decode.  Optional casefold is a flag
-    specifying whether a lowercase alphabet is acceptable as input.
-    For security purposes, the default is False.
+    Optional casefold is a flag specifying whether a lowercase alphabet is
+    acceptable as input.  For security purposes, the default is False.
 
-    The decoded byte string is returned.  binascii.Error is raised if
-    s were incorrectly padded or if there are non-alphabet characters
-    present in the string.
+    The result is returned as a bytes object.  A binascii.Error is raised if
+    s is incorrectly padded or if there are non-alphabet characters present
+    in the input.
     """
     s = _bytes_from_decode_data(s)
     if casefold:
@@ -316,19 +309,17 @@
     return b''.join(chunks)
 
 def a85encode(b, *, foldspaces=False, wrapcol=0, pad=False, adobe=False):
-    """Encode a byte string using Ascii85.
-
-    b is the byte string to encode. The encoded byte string is returned.
+    """Encode bytes-like object b using Ascii85 and return a bytes object.
 
     foldspaces is an optional flag that uses the special short sequence 'y'
     instead of 4 consecutive spaces (ASCII 0x20) as supported by 'btoa'. This
     feature is not supported by the "standard" Adobe encoding.
 
-    wrapcol controls whether the output should have newline ('\\n') characters
+    wrapcol controls whether the output should have newline (b'\\n') characters
     added to it. If this is non-zero, each output line will be at most this
     many characters long.
 
-    pad controls whether the input string is padded to a multiple of 4 before
+    pad controls whether the input is padded to a multiple of 4 before
     encoding. Note that the btoa implementation always pads.
 
     adobe controls whether the encoded byte sequence is framed with <~ and ~>,
@@ -359,9 +350,7 @@
     return result
 
 def a85decode(b, *, foldspaces=False, adobe=False, ignorechars=b' \t\n\r\v'):
-    """Decode an Ascii85 encoded byte string.
-
-    s is the byte string to decode.
+    """Decode the Ascii85 encoded bytes-like object or ASCII string b.
 
     foldspaces is a flag that specifies whether the 'y' short sequence should be
     accepted as shorthand for 4 consecutive spaces (ASCII 0x20). This feature is
@@ -373,13 +362,20 @@
     ignorechars should be a byte string containing characters to ignore from the
     input. This should only contain whitespace characters, and by default
     contains all whitespace characters in ASCII.
+
+    The result is returned as a bytes object.
     """
     b = _bytes_from_decode_data(b)
     if adobe:
-        if not (b.startswith(_A85START) and b.endswith(_A85END)):
-            raise ValueError("Ascii85 encoded byte sequences must be bracketed "
-                             "by {!r} and {!r}".format(_A85START, _A85END))
-        b = b[2:-2] # Strip off start/end markers
+        if not b.endswith(_A85END):
+            raise ValueError(
+                "Ascii85 encoded byte sequences must end "
+                "with {!r}".format(_A85END)
+                )
+        if b.startswith(_A85START):
+            b = b[2:-2]  # Strip off start/end markers
+        else:
+            b = b[:-2]
     #
     # We have to go through this stepwise, so as to ignore spaces and handle
     # special short sequences
@@ -432,10 +428,10 @@
 _b85dec = None
 
 def b85encode(b, pad=False):
-    """Encode an ASCII-encoded byte array in base85 format.
+    """Encode bytes-like object b in base85 format and return a bytes object.
 
-    If pad is true, the input is padded with "\\0" so its length is a multiple of
-    4 characters before encoding.
+    If pad is true, the input is padded with b'\\0' so its length is a multiple of
+    4 bytes before encoding.
     """
     global _b85chars, _b85chars2
     # Delay the initialization of tables to not waste memory
@@ -446,7 +442,10 @@
     return _85encode(b, _b85chars, _b85chars2, pad)
 
 def b85decode(b):
-    """Decode base85-encoded byte array"""
+    """Decode the base85-encoded bytes-like object or ASCII string b
+
+    The result is returned as a bytes object.
+    """
     global _b85dec
     # Delay the initialization of tables to not waste memory
     # if the function is never called
@@ -531,7 +530,7 @@
 
 
 def encodebytes(s):
-    """Encode a bytestring into a bytestring containing multiple lines
+    """Encode a bytestring into a bytes object containing multiple lines
     of base-64 data."""
     _input_type_check(s)
     pieces = []
@@ -549,7 +548,7 @@
 
 
 def decodebytes(s):
-    """Decode a bytestring of base-64 data into a bytestring."""
+    """Decode a bytestring of base-64 data into a bytes object."""
     _input_type_check(s)
     return binascii.a2b_base64(s)


More information about the pypy-commit mailing list