[Jython-checkins] jython: Combine changesets from https://bitbucket.org/jimbaker/jython-socket-reboot
jim.baker
jython-checkins at python.org
Thu May 8 06:56:30 CEST 2014
http://hg.python.org/jython/rev/107fe4a4c96b
changeset: 7222:107fe4a4c96b
user: Jim Baker <jim.baker at rackspace.com>
date: Wed May 07 22:41:56 2014 -0600
summary:
Combine changesets from https://bitbucket.org/jimbaker/jython-socket-reboot
files:
CPythonLib.includes | 4 +
Lib/BaseHTTPServer.py | 614 ++
Lib/SocketServer.py | 1462 +++---
Lib/_socket.py | 2059 ++++++++++
Lib/_sslcerts.py | 239 +
Lib/asynchat.py | 295 -
Lib/asyncore.py | 705 ---
Lib/distutils/command/install.py | 93 +-
Lib/ftplib.py | 1047 -----
Lib/logging/config.py | 909 ----
Lib/netrc.py | 119 -
Lib/robotparser.py | 222 -
Lib/select.py | 274 +-
Lib/socket.py | 1993 +---------
Lib/ssl.py | 267 +-
Lib/telnetlib.py | 663 ---
Lib/test/test_httplib.py | 472 --
Lib/test/test_httpservers.py | 543 --
Lib/test/test_logging.py | 172 +-
Lib/test/test_mhlib.py | 57 +-
Lib/test/test_select.py | 67 +-
Lib/test/test_socket.py | 285 +-
Lib/test/test_socket_ssl.py | 80 -
Lib/test/test_socketserver.py | 3 +-
Lib/test/test_urllib2_localnet.py | 557 --
Lib/uu.py | 211 -
Lib/zlib.py | 84 +-
build.xml | 14 +
extlibs/bcpkix-jdk15on-150.jar | Bin
extlibs/bcprov-jdk15on-150.jar | Bin
extlibs/netty-buffer-4.0.18.Final.jar | Bin
extlibs/netty-codec-4.0.18.Final.jar | Bin
extlibs/netty-common-4.0.18.Final.jar | Bin
extlibs/netty-handler-4.0.18.Final.jar | Bin
extlibs/netty-transport-4.0.18.Final.jar | Bin
35 files changed, 4559 insertions(+), 8951 deletions(-)
diff --git a/CPythonLib.includes b/CPythonLib.includes
--- a/CPythonLib.includes
+++ b/CPythonLib.includes
@@ -27,6 +27,7 @@
ast.py
atexit.py
asynchat.py
+asyncore.py
BaseHTTPServer.py
base64.py
bdb.py
@@ -101,6 +102,7 @@
mimify.py
multifile.py
mutex.py
+netrc.py
nntplib.py
numbers.py
nturl2path.py
@@ -149,6 +151,7 @@
symbol.py
sysconfig.py
tabnanny.py
+telnetlib.py
this.py
textwrap.py
tempfile.py
@@ -164,6 +167,7 @@
UserDict.py
UserList.py
UserString.py
+uu.py
uuid.py
warnings.py
whichdb.py
diff --git a/Lib/BaseHTTPServer.py b/Lib/BaseHTTPServer.py
new file mode 100644
--- /dev/null
+++ b/Lib/BaseHTTPServer.py
@@ -0,0 +1,614 @@
+"""HTTP server base class.
+
+Note: the class in this module doesn't implement any HTTP request; see
+SimpleHTTPServer for simple implementations of GET, HEAD and POST
+(including CGI scripts). It does, however, optionally implement HTTP/1.1
+persistent connections, as of version 0.3.
+
+Contents:
+
+- BaseHTTPRequestHandler: HTTP request handler base class
+- test: test function
+
+XXX To do:
+
+- log requests even later (to capture byte count)
+- log user-agent header and other interesting goodies
+- send error log to separate file
+"""
+
+
+# See also:
+#
+# HTTP Working Group T. Berners-Lee
+# INTERNET-DRAFT R. T. Fielding
+# <draft-ietf-http-v10-spec-00.txt> H. Frystyk Nielsen
+# Expires September 8, 1995 March 8, 1995
+#
+# URL: http://www.ics.uci.edu/pub/ietf/http/draft-ietf-http-v10-spec-00.txt
+#
+# and
+#
+# Network Working Group R. Fielding
+# Request for Comments: 2616 et al
+# Obsoletes: 2068 June 1999
+# Category: Standards Track
+#
+# URL: http://www.faqs.org/rfcs/rfc2616.html
+
+# Log files
+# ---------
+#
+# Here's a quote from the NCSA httpd docs about log file format.
+#
+# | The logfile format is as follows. Each line consists of:
+# |
+# | host rfc931 authuser [DD/Mon/YYYY:hh:mm:ss] "request" ddd bbbb
+# |
+# | host: Either the DNS name or the IP number of the remote client
+# | rfc931: Any information returned by identd for this person,
+# | - otherwise.
+# | authuser: If user sent a userid for authentication, the user name,
+# | - otherwise.
+# | DD: Day
+# | Mon: Month (calendar name)
+# | YYYY: Year
+# | hh: hour (24-hour format, the machine's timezone)
+# | mm: minutes
+# | ss: seconds
+# | request: The first line of the HTTP request as sent by the client.
+# | ddd: the status code returned by the server, - if not available.
+# | bbbb: the total number of bytes sent,
+# | *not including the HTTP/1.0 header*, - if not available
+# |
+# | You can determine the name of the file accessed through request.
+#
+# (Actually, the latter is only true if you know the server configuration
+# at the time the request was made!)
+
+__version__ = "0.3"
+
+__all__ = ["HTTPServer", "BaseHTTPRequestHandler"]
+
+import sys
+import time
+import socket # For gethostbyaddr()
+from warnings import filterwarnings, catch_warnings
+with catch_warnings():
+ if sys.py3kwarning:
+ filterwarnings("ignore", ".*mimetools has been removed",
+ DeprecationWarning)
+ import mimetools
+import SocketServer
+
+# Default error message template
+DEFAULT_ERROR_MESSAGE = """\
+<head>
+<title>Error response</title>
+</head>
+<body>
+<h1>Error response</h1>
+<p>Error code %(code)d.
+<p>Message: %(message)s.
+<p>Error code explanation: %(code)s = %(explain)s.
+</body>
+"""
+
+DEFAULT_ERROR_CONTENT_TYPE = "text/html"
+
+def _quote_html(html):
+ return html.replace("&", "&").replace("<", "<").replace(">", ">")
+
+class HTTPServer(SocketServer.TCPServer):
+
+ allow_reuse_address = 1 # Seems to make sense in testing environment
+
+ def server_bind(self):
+ """Override server_bind to store the server name."""
+ SocketServer.TCPServer.server_bind(self)
+ try:
+ host, port = self.socket.getsockname()[:2]
+ self.server_name = socket.getfqdn(host)
+ self.server_port = port
+ except socket.error:
+ pass
+
+ def server_activate(self):
+ SocketServer.TCPServer.server_activate(self)
+ # Adding a second call to getsockname() because of this issue
+ # http://wiki.python.org/jython/NewSocketModule#Deferredsocketcreationonjython
+ host, port = self.socket.getsockname()[:2]
+ self.server_name = socket.getfqdn(host)
+ self.server_port = port
+
+
+class BaseHTTPRequestHandler(SocketServer.StreamRequestHandler):
+
+ """HTTP request handler base class.
+
+ The following explanation of HTTP serves to guide you through the
+ code as well as to expose any misunderstandings I may have about
+ HTTP (so you don't need to read the code to figure out I'm wrong
+ :-).
+
+ HTTP (HyperText Transfer Protocol) is an extensible protocol on
+ top of a reliable stream transport (e.g. TCP/IP). The protocol
+ recognizes three parts to a request:
+
+ 1. One line identifying the request type and path
+ 2. An optional set of RFC-822-style headers
+ 3. An optional data part
+
+ The headers and data are separated by a blank line.
+
+ The first line of the request has the form
+
+ <command> <path> <version>
+
+ where <command> is a (case-sensitive) keyword such as GET or POST,
+ <path> is a string containing path information for the request,
+ and <version> should be the string "HTTP/1.0" or "HTTP/1.1".
+ <path> is encoded using the URL encoding scheme (using %xx to signify
+ the ASCII character with hex code xx).
+
+ The specification specifies that lines are separated by CRLF but
+ for compatibility with the widest range of clients recommends
+ servers also handle LF. Similarly, whitespace in the request line
+ is treated sensibly (allowing multiple spaces between components
+ and allowing trailing whitespace).
+
+ Similarly, for output, lines ought to be separated by CRLF pairs
+ but most clients grok LF characters just fine.
+
+ If the first line of the request has the form
+
+ <command> <path>
+
+ (i.e. <version> is left out) then this is assumed to be an HTTP
+ 0.9 request; this form has no optional headers and data part and
+ the reply consists of just the data.
+
+ The reply form of the HTTP 1.x protocol again has three parts:
+
+ 1. One line giving the response code
+ 2. An optional set of RFC-822-style headers
+ 3. The data
+
+ Again, the headers and data are separated by a blank line.
+
+ The response code line has the form
+
+ <version> <responsecode> <responsestring>
+
+ where <version> is the protocol version ("HTTP/1.0" or "HTTP/1.1"),
+ <responsecode> is a 3-digit response code indicating success or
+ failure of the request, and <responsestring> is an optional
+ human-readable string explaining what the response code means.
+
+ This server parses the request and the headers, and then calls a
+ function specific to the request type (<command>). Specifically,
+ a request SPAM will be handled by a method do_SPAM(). If no
+ such method exists the server sends an error response to the
+ client. If it exists, it is called with no arguments:
+
+ do_SPAM()
+
+ Note that the request name is case sensitive (i.e. SPAM and spam
+ are different requests).
+
+ The various request details are stored in instance variables:
+
+ - client_address is the client IP address in the form (host,
+ port);
+
+ - command, path and version are the broken-down request line;
+
+ - headers is an instance of mimetools.Message (or a derived
+ class) containing the header information;
+
+ - rfile is a file object open for reading positioned at the
+ start of the optional input data part;
+
+ - wfile is a file object open for writing.
+
+ IT IS IMPORTANT TO ADHERE TO THE PROTOCOL FOR WRITING!
+
+ The first thing to be written must be the response line. Then
+ follow 0 or more header lines, then a blank line, and then the
+ actual data (if any). The meaning of the header lines depends on
+ the command executed by the server; in most cases, when data is
+ returned, there should be at least one header line of the form
+
+ Content-type: <type>/<subtype>
+
+ where <type> and <subtype> should be registered MIME types,
+ e.g. "text/html" or "text/plain".
+
+ """
+
+ # The Python system version, truncated to its first component.
+ sys_version = "Python/" + sys.version.split()[0]
+
+ # The server software version. You may want to override this.
+ # The format is multiple whitespace-separated strings,
+ # where each string is of the form name[/version].
+ server_version = "BaseHTTP/" + __version__
+
+ # The default request version. This only affects responses up until
+ # the point where the request line is parsed, so it mainly decides what
+ # the client gets back when sending a malformed request line.
+ # Most web servers default to HTTP 0.9, i.e. don't send a status line.
+ default_request_version = "HTTP/0.9"
+
+ def parse_request(self):
+ """Parse a request (internal).
+
+ The request should be stored in self.raw_requestline; the results
+ are in self.command, self.path, self.request_version and
+ self.headers.
+
+ Return True for success, False for failure; on failure, an
+ error is sent back.
+
+ """
+ self.command = None # set in case of error on the first line
+ self.request_version = version = self.default_request_version
+ self.close_connection = 1
+ requestline = self.raw_requestline
+ requestline = requestline.rstrip('\r\n')
+ self.requestline = requestline
+ words = requestline.split()
+ if len(words) == 3:
+ command, path, version = words
+ if version[:5] != 'HTTP/':
+ self.send_error(400, "Bad request version (%r)" % version)
+ return False
+ try:
+ base_version_number = version.split('/', 1)[1]
+ version_number = base_version_number.split(".")
+ # RFC 2145 section 3.1 says there can be only one "." and
+ # - major and minor numbers MUST be treated as
+ # separate integers;
+ # - HTTP/2.4 is a lower version than HTTP/2.13, which in
+ # turn is lower than HTTP/12.3;
+ # - Leading zeros MUST be ignored by recipients.
+ if len(version_number) != 2:
+ raise ValueError
+ version_number = int(version_number[0]), int(version_number[1])
+ except (ValueError, IndexError):
+ self.send_error(400, "Bad request version (%r)" % version)
+ return False
+ if version_number >= (1, 1) and self.protocol_version >= "HTTP/1.1":
+ self.close_connection = 0
+ if version_number >= (2, 0):
+ self.send_error(505,
+ "Invalid HTTP Version (%s)" % base_version_number)
+ return False
+ elif len(words) == 2:
+ command, path = words
+ self.close_connection = 1
+ if command != 'GET':
+ self.send_error(400,
+ "Bad HTTP/0.9 request type (%r)" % command)
+ return False
+ elif not words:
+ return False
+ else:
+ self.send_error(400, "Bad request syntax (%r)" % requestline)
+ return False
+ self.command, self.path, self.request_version = command, path, version
+
+ # Examine the headers and look for a Connection directive
+ self.headers = self.MessageClass(self.rfile, 0)
+
+ conntype = self.headers.get('Connection', "")
+ if conntype.lower() == 'close':
+ self.close_connection = 1
+ elif (conntype.lower() == 'keep-alive' and
+ self.protocol_version >= "HTTP/1.1"):
+ self.close_connection = 0
+ return True
+
+ def handle_one_request(self):
+ """Handle a single HTTP request.
+
+ You normally don't need to override this method; see the class
+ __doc__ string for information on how to handle specific HTTP
+ commands such as GET and POST.
+
+ """
+ try:
+ self.raw_requestline = self.rfile.readline(65537)
+ if len(self.raw_requestline) > 65536:
+ self.requestline = ''
+ self.request_version = ''
+ self.command = ''
+ self.send_error(414)
+ return
+ if not self.raw_requestline:
+ self.close_connection = 1
+ return
+ if not self.parse_request():
+ # An error code has been sent, just exit
+ return
+ mname = 'do_' + self.command
+ if not hasattr(self, mname):
+ self.send_error(501, "Unsupported method (%r)" % self.command)
+ return
+ method = getattr(self, mname)
+ method()
+ self.wfile.flush() #actually send the response if not already done.
+ except socket.timeout, e:
+ #a read or a write timed out. Discard this connection
+ self.log_error("Request timed out: %r", e)
+ self.close_connection = 1
+ return
+
+ def handle(self):
+ """Handle multiple requests if necessary."""
+ self.close_connection = 1
+
+ self.handle_one_request()
+ while not self.close_connection:
+ self.handle_one_request()
+
+ def send_error(self, code, message=None):
+ """Send and log an error reply.
+
+ Arguments are the error code, and a detailed message.
+ The detailed message defaults to the short entry matching the
+ response code.
+
+ This sends an error response (so it must be called before any
+ output has been generated), logs the error, and finally sends
+ a piece of HTML explaining the error to the user.
+
+ """
+
+ try:
+ short, long = self.responses[code]
+ except KeyError:
+ short, long = '???', '???'
+ if message is None:
+ message = short
+ explain = long
+ self.log_error("code %d, message %s", code, message)
+ # using _quote_html to prevent Cross Site Scripting attacks (see bug #1100201)
+ content = (self.error_message_format %
+ {'code': code, 'message': _quote_html(message), 'explain': explain})
+ self.send_response(code, message)
+ self.send_header("Content-Type", self.error_content_type)
+ self.send_header('Connection', 'close')
+ self.end_headers()
+ if self.command != 'HEAD' and code >= 200 and code not in (204, 304):
+ self.wfile.write(content)
+
+ error_message_format = DEFAULT_ERROR_MESSAGE
+ error_content_type = DEFAULT_ERROR_CONTENT_TYPE
+
+ def send_response(self, code, message=None):
+ """Send the response header and log the response code.
+
+ Also send two standard headers with the server software
+ version and the current date.
+
+ """
+ self.log_request(code)
+ if message is None:
+ if code in self.responses:
+ message = self.responses[code][0]
+ else:
+ message = ''
+ if self.request_version != 'HTTP/0.9':
+ self.wfile.write("%s %d %s\r\n" %
+ (self.protocol_version, code, message))
+ # print (self.protocol_version, code, message)
+ self.send_header('Server', self.version_string())
+ self.send_header('Date', self.date_time_string())
+
+ def send_header(self, keyword, value):
+ """Send a MIME header."""
+ if self.request_version != 'HTTP/0.9':
+ self.wfile.write("%s: %s\r\n" % (keyword, value))
+
+ if keyword.lower() == 'connection':
+ if value.lower() == 'close':
+ self.close_connection = 1
+ elif value.lower() == 'keep-alive':
+ self.close_connection = 0
+
+ def end_headers(self):
+ """Send the blank line ending the MIME headers."""
+ if self.request_version != 'HTTP/0.9':
+ self.wfile.write("\r\n")
+
+ def log_request(self, code='-', size='-'):
+ """Log an accepted request.
+
+ This is called by send_response().
+
+ """
+
+ self.log_message('"%s" %s %s',
+ self.requestline, str(code), str(size))
+
+ def log_error(self, format, *args):
+ """Log an error.
+
+ This is called when a request cannot be fulfilled. By
+ default it passes the message on to log_message().
+
+ Arguments are the same as for log_message().
+
+ XXX This should go to the separate error log.
+
+ """
+
+ self.log_message(format, *args)
+
+ def log_message(self, format, *args):
+ """Log an arbitrary message.
+
+ This is used by all other logging functions. Override
+ it if you have specific logging wishes.
+
+ The first argument, FORMAT, is a format string for the
+ message to be logged. If the format string contains
+ any % escapes requiring parameters, they should be
+ specified as subsequent arguments (it's just like
+ printf!).
+
+ The client ip address and current date/time are prefixed to every
+ message.
+
+ """
+
+ sys.stderr.write("%s - - [%s] %s\n" %
+ (self.client_address[0],
+ self.log_date_time_string(),
+ format%args))
+
+ def version_string(self):
+ """Return the server software version string."""
+ return self.server_version + ' ' + self.sys_version
+
+ def date_time_string(self, timestamp=None):
+ """Return the current date and time formatted for a message header."""
+ if timestamp is None:
+ timestamp = time.time()
+ year, month, day, hh, mm, ss, wd, y, z = time.gmtime(timestamp)
+ s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
+ self.weekdayname[wd],
+ day, self.monthname[month], year,
+ hh, mm, ss)
+ return s
+
+ def log_date_time_string(self):
+ """Return the current time formatted for logging."""
+ now = time.time()
+ year, month, day, hh, mm, ss, x, y, z = time.localtime(now)
+ s = "%02d/%3s/%04d %02d:%02d:%02d" % (
+ day, self.monthname[month], year, hh, mm, ss)
+ return s
+
+ weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
+
+ monthname = [None,
+ 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
+ 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
+
+ def address_string(self):
+ """Return the client address formatted for logging.
+
+ This version looks up the full hostname using gethostbyaddr(),
+ and tries to find a name that contains at least one dot.
+
+ """
+
+ host, port = self.client_address[:2]
+ return socket.getfqdn(host)
+
+ # Essentially static class variables
+
+ # The version of the HTTP protocol we support.
+ # Set this to HTTP/1.1 to enable automatic keepalive
+ protocol_version = "HTTP/1.0"
+
+ # The Message-like class used to parse headers
+ MessageClass = mimetools.Message
+
+ # Table mapping response codes to messages; entries have the
+ # form {code: (shortmessage, longmessage)}.
+ # See RFC 2616.
+ responses = {
+ 100: ('Continue', 'Request received, please continue'),
+ 101: ('Switching Protocols',
+ 'Switching to new protocol; obey Upgrade header'),
+
+ 200: ('OK', 'Request fulfilled, document follows'),
+ 201: ('Created', 'Document created, URL follows'),
+ 202: ('Accepted',
+ 'Request accepted, processing continues off-line'),
+ 203: ('Non-Authoritative Information', 'Request fulfilled from cache'),
+ 204: ('No Content', 'Request fulfilled, nothing follows'),
+ 205: ('Reset Content', 'Clear input form for further input.'),
+ 206: ('Partial Content', 'Partial content follows.'),
+
+ 300: ('Multiple Choices',
+ 'Object has several resources -- see URI list'),
+ 301: ('Moved Permanently', 'Object moved permanently -- see URI list'),
+ 302: ('Found', 'Object moved temporarily -- see URI list'),
+ 303: ('See Other', 'Object moved -- see Method and URL list'),
+ 304: ('Not Modified',
+ 'Document has not changed since given time'),
+ 305: ('Use Proxy',
+ 'You must use proxy specified in Location to access this '
+ 'resource.'),
+ 307: ('Temporary Redirect',
+ 'Object moved temporarily -- see URI list'),
+
+ 400: ('Bad Request',
+ 'Bad request syntax or unsupported method'),
+ 401: ('Unauthorized',
+ 'No permission -- see authorization schemes'),
+ 402: ('Payment Required',
+ 'No payment -- see charging schemes'),
+ 403: ('Forbidden',
+ 'Request forbidden -- authorization will not help'),
+ 404: ('Not Found', 'Nothing matches the given URI'),
+ 405: ('Method Not Allowed',
+ 'Specified method is invalid for this resource.'),
+ 406: ('Not Acceptable', 'URI not available in preferred format.'),
+ 407: ('Proxy Authentication Required', 'You must authenticate with '
+ 'this proxy before proceeding.'),
+ 408: ('Request Timeout', 'Request timed out; try again later.'),
+ 409: ('Conflict', 'Request conflict.'),
+ 410: ('Gone',
+ 'URI no longer exists and has been permanently removed.'),
+ 411: ('Length Required', 'Client must specify Content-Length.'),
+ 412: ('Precondition Failed', 'Precondition in headers is false.'),
+ 413: ('Request Entity Too Large', 'Entity is too large.'),
+ 414: ('Request-URI Too Long', 'URI is too long.'),
+ 415: ('Unsupported Media Type', 'Entity body in unsupported format.'),
+ 416: ('Requested Range Not Satisfiable',
+ 'Cannot satisfy request range.'),
+ 417: ('Expectation Failed',
+ 'Expect condition could not be satisfied.'),
+
+ 500: ('Internal Server Error', 'Server got itself in trouble'),
+ 501: ('Not Implemented',
+ 'Server does not support this operation'),
+ 502: ('Bad Gateway', 'Invalid responses from another server/proxy.'),
+ 503: ('Service Unavailable',
+ 'The server cannot process the request due to a high load'),
+ 504: ('Gateway Timeout',
+ 'The gateway server did not receive a timely response'),
+ 505: ('HTTP Version Not Supported', 'Cannot fulfill request.'),
+ }
+
+
+def test(HandlerClass = BaseHTTPRequestHandler,
+ ServerClass = HTTPServer, protocol="HTTP/1.0"):
+ """Test the HTTP request handler class.
+
+ This runs an HTTP server on port 8000 (or the first command line
+ argument).
+
+ """
+
+ if sys.argv[1:]:
+ port = int(sys.argv[1])
+ else:
+ port = 8000
+ server_address = ('', port)
+
+ HandlerClass.protocol_version = protocol
+ httpd = ServerClass(server_address, HandlerClass)
+
+ sa = httpd.socket.getsockname()
+ print "Serving HTTP on", sa[0], "port", sa[1], "..."
+ httpd.serve_forever()
+
+
+if __name__ == '__main__':
+ test()
diff --git a/Lib/SocketServer.py b/Lib/SocketServer.py
--- a/Lib/SocketServer.py
+++ b/Lib/SocketServer.py
@@ -1,723 +1,739 @@
-"""Generic socket server classes.
-
-This module tries to capture the various aspects of defining a server:
-
-For socket-based servers:
-
-- address family:
- - AF_INET{,6}: IP (Internet Protocol) sockets (default)
- - AF_UNIX: Unix domain sockets
- - others, e.g. AF_DECNET are conceivable (see <socket.h>
-- socket type:
- - SOCK_STREAM (reliable stream, e.g. TCP)
- - SOCK_DGRAM (datagrams, e.g. UDP)
-
-For request-based servers (including socket-based):
-
-- client address verification before further looking at the request
- (This is actually a hook for any processing that needs to look
- at the request before anything else, e.g. logging)
-- how to handle multiple requests:
- - synchronous (one request is handled at a time)
- - forking (each request is handled by a new process)
- - threading (each request is handled by a new thread)
-
-The classes in this module favor the server type that is simplest to
-write: a synchronous TCP/IP server. This is bad class design, but
-save some typing. (There's also the issue that a deep class hierarchy
-slows down method lookups.)
-
-There are five classes in an inheritance diagram, four of which represent
-synchronous servers of four types:
-
- +------------+
- | BaseServer |
- +------------+
- |
- v
- +-----------+ +------------------+
- | TCPServer |------->| UnixStreamServer |
- +-----------+ +------------------+
- |
- v
- +-----------+ +--------------------+
- | UDPServer |------->| UnixDatagramServer |
- +-----------+ +--------------------+
-
-Note that UnixDatagramServer derives from UDPServer, not from
-UnixStreamServer -- the only difference between an IP and a Unix
-stream server is the address family, which is simply repeated in both
-unix server classes.
-
-Forking and threading versions of each type of server can be created
-using the ForkingMixIn and ThreadingMixIn mix-in classes. For
-instance, a threading UDP server class is created as follows:
-
- class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
-
-The Mix-in class must come first, since it overrides a method defined
-in UDPServer! Setting the various member variables also changes
-the behavior of the underlying server mechanism.
-
-To implement a service, you must derive a class from
-BaseRequestHandler and redefine its handle() method. You can then run
-various versions of the service by combining one of the server classes
-with your request handler class.
-
-The request handler class must be different for datagram or stream
-services. This can be hidden by using the request handler
-subclasses StreamRequestHandler or DatagramRequestHandler.
-
-Of course, you still have to use your head!
-
-For instance, it makes no sense to use a forking server if the service
-contains state in memory that can be modified by requests (since the
-modifications in the child process would never reach the initial state
-kept in the parent process and passed to each child). In this case,
-you can use a threading server, but you will probably have to use
-locks to avoid two requests that come in nearly simultaneous to apply
-conflicting changes to the server state.
-
-On the other hand, if you are building e.g. an HTTP server, where all
-data is stored externally (e.g. in the file system), a synchronous
-class will essentially render the service "deaf" while one request is
-being handled -- which may be for a very long time if a client is slow
-to reqd all the data it has requested. Here a threading or forking
-server is appropriate.
-
-In some cases, it may be appropriate to process part of a request
-synchronously, but to finish processing in a forked child depending on
-the request data. This can be implemented by using a synchronous
-server and doing an explicit fork in the request handler class
-handle() method.
-
-Another approach to handling multiple simultaneous requests in an
-environment that supports neither threads nor fork (or where these are
-too expensive or inappropriate for the service) is to maintain an
-explicit table of partially finished requests and to use select() to
-decide which request to work on next (or whether to handle a new
-incoming request). This is particularly important for stream services
-where each client can potentially be connected for a long time (if
-threads or subprocesses cannot be used).
-
-Future work:
-- Standard classes for Sun RPC (which uses either UDP or TCP)
-- Standard mix-in classes to implement various authentication
- and encryption schemes
-- Standard framework for select-based multiplexing
-
-XXX Open problems:
-- What to do with out-of-band data?
-
-BaseServer:
-- split generic "request" functionality out into BaseServer class.
- Copyright (C) 2000 Luke Kenneth Casson Leighton <lkcl at samba.org>
-
- example: read entries from a SQL database (requires overriding
- get_request() to return a table entry from the database).
- entry is processed by a RequestHandlerClass.
-
-"""
-
-# Author of the BaseServer patch: Luke Kenneth Casson Leighton
-
-# XXX Warning!
-# There is a test suite for this module, but it cannot be run by the
-# standard regression test.
-# To run it manually, run Lib/test/test_socketserver.py.
-
-__version__ = "0.4"
-
-
-import socket
-import select
-import sys
-import os
-try:
- import threading
-except ImportError:
- import dummy_threading as threading
-
-select_fn = select.select
-if sys.platform.startswith('java'):
- select_fn = select.cpython_compatible_select
-
-__all__ = ["TCPServer","UDPServer","ForkingUDPServer","ForkingTCPServer",
- "ThreadingUDPServer","ThreadingTCPServer","BaseRequestHandler",
- "StreamRequestHandler","DatagramRequestHandler",
- "ThreadingMixIn", "ForkingMixIn"]
-if hasattr(socket, "AF_UNIX"):
- __all__.extend(["UnixStreamServer","UnixDatagramServer",
- "ThreadingUnixStreamServer",
- "ThreadingUnixDatagramServer"])
-
-class BaseServer:
-
- """Base class for server classes.
-
- Methods for the caller:
-
- - __init__(server_address, RequestHandlerClass)
- - serve_forever(poll_interval=0.5)
- - shutdown()
- - handle_request() # if you do not use serve_forever()
- - fileno() -> int # for select()
-
- Methods that may be overridden:
-
- - server_bind()
- - server_activate()
- - get_request() -> request, client_address
- - handle_timeout()
- - verify_request(request, client_address)
- - server_close()
- - process_request(request, client_address)
- - shutdown_request(request)
- - close_request(request)
- - handle_error()
-
- Methods for derived classes:
-
- - finish_request(request, client_address)
-
- Class variables that may be overridden by derived classes or
- instances:
-
- - timeout
- - address_family
- - socket_type
- - allow_reuse_address
-
- Instance variables:
-
- - RequestHandlerClass
- - socket
-
- """
-
- timeout = None
-
- def __init__(self, server_address, RequestHandlerClass):
- """Constructor. May be extended, do not override."""
- self.server_address = server_address
- self.RequestHandlerClass = RequestHandlerClass
- self.__is_shut_down = threading.Event()
- self.__shutdown_request = False
-
- def server_activate(self):
- """Called by constructor to activate the server.
-
- May be overridden.
-
- """
- pass
-
- def serve_forever(self, poll_interval=0.5):
- """Handle one request at a time until shutdown.
-
- Polls for shutdown every poll_interval seconds. Ignores
- self.timeout. If you need to do periodic tasks, do them in
- another thread.
- """
- self.__is_shut_down.clear()
- try:
- while not self.__shutdown_request:
- # XXX: Consider using another file descriptor or
- # connecting to the socket to wake this up instead of
- # polling. Polling reduces our responsiveness to a
- # shutdown request and wastes cpu at all other times.
- r, w, e = select_fn([self], [], [], poll_interval)
- if self in r:
- self._handle_request_noblock()
- finally:
- self.__shutdown_request = False
- self.__is_shut_down.set()
-
- def shutdown(self):
- """Stops the serve_forever loop.
-
- Blocks until the loop has finished. This must be called while
- serve_forever() is running in another thread, or it will
- deadlock.
- """
- self.__shutdown_request = True
- self.__is_shut_down.wait()
-
- # The distinction between handling, getting, processing and
- # finishing a request is fairly arbitrary. Remember:
- #
- # - handle_request() is the top-level call. It calls
- # select, get_request(), verify_request() and process_request()
- # - get_request() is different for stream or datagram sockets
- # - process_request() is the place that may fork a new process
- # or create a new thread to finish the request
- # - finish_request() instantiates the request handler class;
- # this constructor will handle the request all by itself
-
- def handle_request(self):
- """Handle one request, possibly blocking.
-
- Respects self.timeout.
- """
- # Support people who used socket.settimeout() to escape
- # handle_request before self.timeout was available.
- timeout = self.socket.gettimeout()
- if timeout is None:
- timeout = self.timeout
- elif self.timeout is not None:
- timeout = min(timeout, self.timeout)
- fd_sets = select_fn([self], [], [], timeout)
- if not fd_sets[0]:
- self.handle_timeout()
- return
- self._handle_request_noblock()
-
- def _handle_request_noblock(self):
- """Handle one request, without blocking.
-
- I assume that select_fn has returned that the socket is
- readable before this function was called, so there should be
- no risk of blocking in get_request().
- """
- try:
- request, client_address = self.get_request()
- except socket.error:
- return
- if self.verify_request(request, client_address):
- try:
- self.process_request(request, client_address)
- except:
- self.handle_error(request, client_address)
- self.shutdown_request(request)
-
- def handle_timeout(self):
- """Called if no new request arrives within self.timeout.
-
- Overridden by ForkingMixIn.
- """
- pass
-
- def verify_request(self, request, client_address):
- """Verify the request. May be overridden.
-
- Return True if we should proceed with this request.
-
- """
- return True
-
- def process_request(self, request, client_address):
- """Call finish_request.
-
- Overridden by ForkingMixIn and ThreadingMixIn.
-
- """
- self.finish_request(request, client_address)
- self.shutdown_request(request)
-
- def server_close(self):
- """Called to clean-up the server.
-
- May be overridden.
-
- """
- pass
-
- def finish_request(self, request, client_address):
- """Finish one request by instantiating RequestHandlerClass."""
- self.RequestHandlerClass(request, client_address, self)
-
- def shutdown_request(self, request):
- """Called to shutdown and close an individual request."""
- self.close_request(request)
-
- def close_request(self, request):
- """Called to clean up an individual request."""
- pass
-
- def handle_error(self, request, client_address):
- """Handle an error gracefully. May be overridden.
-
- The default is to print a traceback and continue.
-
- """
- print '-'*40
- print 'Exception happened during processing of request from',
- print client_address
- import traceback
- traceback.print_exc() # XXX But this goes to stderr!
- print '-'*40
-
-
-class TCPServer(BaseServer):
-
- """Base class for various socket-based server classes.
-
- Defaults to synchronous IP stream (i.e., TCP).
-
- Methods for the caller:
-
- - __init__(server_address, RequestHandlerClass, bind_and_activate=True)
- - serve_forever(poll_interval=0.5)
- - shutdown()
- - handle_request() # if you don't use serve_forever()
- - fileno() -> int # for select()
-
- Methods that may be overridden:
-
- - server_bind()
- - server_activate()
- - get_request() -> request, client_address
- - handle_timeout()
- - verify_request(request, client_address)
- - process_request(request, client_address)
- - shutdown_request(request)
- - close_request(request)
- - handle_error()
-
- Methods for derived classes:
-
- - finish_request(request, client_address)
-
- Class variables that may be overridden by derived classes or
- instances:
-
- - timeout
- - address_family
- - socket_type
- - request_queue_size (only for stream sockets)
- - allow_reuse_address
-
- Instance variables:
-
- - server_address
- - RequestHandlerClass
- - socket
-
- """
-
- address_family = socket.AF_INET
-
- socket_type = socket.SOCK_STREAM
-
- request_queue_size = 5
-
- allow_reuse_address = False
-
- def __init__(self, server_address, RequestHandlerClass, bind_and_activate=True):
- """Constructor. May be extended, do not override."""
- BaseServer.__init__(self, server_address, RequestHandlerClass)
- self.socket = socket.socket(self.address_family,
- self.socket_type)
- if bind_and_activate:
- self.server_bind()
- self.server_activate()
-
- def server_bind(self):
- """Called by constructor to bind the socket.
-
- May be overridden.
-
- """
- if self.allow_reuse_address:
- self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- self.socket.bind(self.server_address)
- self.server_address = self.socket.getsockname()
-
- def server_activate(self):
- """Called by constructor to activate the server.
-
- May be overridden.
-
- """
- self.socket.listen(self.request_queue_size)
- # Adding a second call to getsockname() because of this issue
- # http://wiki.python.org/jython/NewSocketModule#Deferredsocketcreationonjython
- self.server_address = self.socket.getsockname()
-
- def server_close(self):
- """Called to clean-up the server.
-
- May be overridden.
-
- """
- self.socket.close()
-
- def fileno(self):
- """Return socket file number.
-
- Interface required by select().
-
- """
- return self.socket.fileno()
-
- def get_request(self):
- """Get the request and client address from the socket.
-
- May be overridden.
-
- """
- return self.socket.accept()
-
- def shutdown_request(self, request):
- """Called to shutdown and close an individual request."""
- try:
- #explicitly shutdown. socket.close() merely releases
- #the socket and waits for GC to perform the actual close.
- request.shutdown(socket.SHUT_WR)
- except socket.error:
- pass #some platforms may raise ENOTCONN here
- self.close_request(request)
-
- def close_request(self, request):
- """Called to clean up an individual request."""
- request.close()
-
-
-class UDPServer(TCPServer):
-
- """UDP server class."""
-
- allow_reuse_address = False
-
- socket_type = socket.SOCK_DGRAM
-
- max_packet_size = 8192
-
- def get_request(self):
- data, client_addr = self.socket.recvfrom(self.max_packet_size)
- return (data, self.socket), client_addr
-
- def server_activate(self):
- # No need to call listen() for UDP.
- pass
-
- def shutdown_request(self, request):
- # No need to shutdown anything.
- self.close_request(request)
-
- def close_request(self, request):
- # No need to close anything.
- pass
-
-class ForkingMixIn:
-
- """Mix-in class to handle each request in a new process."""
-
- timeout = 300
- active_children = None
- max_children = 40
-
- def collect_children(self):
- """Internal routine to wait for children that have exited."""
- if self.active_children is None: return
- while len(self.active_children) >= self.max_children:
- # XXX: This will wait for any child process, not just ones
- # spawned by this library. This could confuse other
- # libraries that expect to be able to wait for their own
- # children.
- try:
- pid, status = os.waitpid(0, 0)
- except os.error:
- pid = None
- if pid not in self.active_children: continue
- self.active_children.remove(pid)
-
- # XXX: This loop runs more system calls than it ought
- # to. There should be a way to put the active_children into a
- # process group and then use os.waitpid(-pgid) to wait for any
- # of that set, but I couldn't find a way to allocate pgids
- # that couldn't collide.
- for child in self.active_children:
- try:
- pid, status = os.waitpid(child, os.WNOHANG)
- except os.error:
- pid = None
- if not pid: continue
- try:
- self.active_children.remove(pid)
- except ValueError, e:
- raise ValueError('%s. x=%d and list=%r' % (e.message, pid,
- self.active_children))
-
- def handle_timeout(self):
- """Wait for zombies after self.timeout seconds of inactivity.
-
- May be extended, do not override.
- """
- self.collect_children()
-
- def process_request(self, request, client_address):
- """Fork a new subprocess to process the request."""
- self.collect_children()
- pid = os.fork()
- if pid:
- # Parent process
- if self.active_children is None:
- self.active_children = []
- self.active_children.append(pid)
- self.close_request(request) #close handle in parent process
- return
- else:
- # Child process.
- # This must never return, hence os._exit()!
- try:
- self.finish_request(request, client_address)
- self.shutdown_request(request)
- os._exit(0)
- except:
- try:
- self.handle_error(request, client_address)
- self.shutdown_request(request)
- finally:
- os._exit(1)
-
-
-class ThreadingMixIn:
- """Mix-in class to handle each request in a new thread."""
-
- # Decides how threads will act upon termination of the
- # main process
- daemon_threads = False
-
- def process_request_thread(self, request, client_address):
- """Same as in BaseServer but as a thread.
-
- In addition, exception handling is done here.
-
- """
- try:
- self.finish_request(request, client_address)
- self.shutdown_request(request)
- except:
- self.handle_error(request, client_address)
- self.shutdown_request(request)
-
- def process_request(self, request, client_address):
- """Start a new thread to process the request."""
- t = threading.Thread(target = self.process_request_thread,
- args = (request, client_address))
- if self.daemon_threads:
- t.setDaemon (1)
- t.start()
-
-
-class ForkingUDPServer(ForkingMixIn, UDPServer): pass
-class ForkingTCPServer(ForkingMixIn, TCPServer): pass
-
-class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
-class ThreadingTCPServer(ThreadingMixIn, TCPServer): pass
-
-if hasattr(socket, 'AF_UNIX'):
-
- class UnixStreamServer(TCPServer):
- address_family = socket.AF_UNIX
-
- class UnixDatagramServer(UDPServer):
- address_family = socket.AF_UNIX
-
- class ThreadingUnixStreamServer(ThreadingMixIn, UnixStreamServer): pass
-
- class ThreadingUnixDatagramServer(ThreadingMixIn, UnixDatagramServer): pass
-
-class BaseRequestHandler:
-
- """Base class for request handler classes.
-
- This class is instantiated for each request to be handled. The
- constructor sets the instance variables request, client_address
- and server, and then calls the handle() method. To implement a
- specific service, all you need to do is to derive a class which
- defines a handle() method.
-
- The handle() method can find the request as self.request, the
- client address as self.client_address, and the server (in case it
- needs access to per-server information) as self.server. Since a
- separate instance is created for each request, the handle() method
- can define arbitrary other instance variariables.
-
- """
-
- def __init__(self, request, client_address, server):
- self.request = request
- self.client_address = client_address
- self.server = server
- self.setup()
- try:
- self.handle()
- finally:
- self.finish()
-
- def setup(self):
- pass
-
- def handle(self):
- pass
-
- def finish(self):
- pass
-
-
-# The following two classes make it possible to use the same service
-# class for stream or datagram servers.
-# Each class sets up these instance variables:
-# - rfile: a file object from which receives the request is read
-# - wfile: a file object to which the reply is written
-# When the handle() method returns, wfile is flushed properly
-
-
-class StreamRequestHandler(BaseRequestHandler):
-
- """Define self.rfile and self.wfile for stream sockets."""
-
- # Default buffer sizes for rfile, wfile.
- # We default rfile to buffered because otherwise it could be
- # really slow for large data (a getc() call per byte); we make
- # wfile unbuffered because (a) often after a write() we want to
- # read and we need to flush the line; (b) big writes to unbuffered
- # files are typically optimized by stdio even when big reads
- # aren't.
- rbufsize = -1
- wbufsize = 0
-
- # A timeout to apply to the request socket, if not None.
- timeout = None
-
- # Disable nagle algorithm for this socket, if True.
- # Use only when wbufsize != 0, to avoid small packets.
- disable_nagle_algorithm = False
-
- def setup(self):
- self.connection = self.request
- if self.timeout is not None:
- self.connection.settimeout(self.timeout)
- if self.disable_nagle_algorithm:
- self.connection.setsockopt(socket.IPPROTO_TCP,
- socket.TCP_NODELAY, True)
- self.rfile = self.connection.makefile('rb', self.rbufsize)
- self.wfile = self.connection.makefile('wb', self.wbufsize)
-
- def finish(self):
- if not self.wfile.closed:
- self.wfile.flush()
- self.wfile.close()
- self.rfile.close()
-
-
-class DatagramRequestHandler(BaseRequestHandler):
-
- # XXX Regrettably, I cannot get this working on Linux;
- # s.recvfrom() doesn't return a meaningful client address.
-
- """Define self.rfile and self.wfile for datagram sockets."""
-
- def setup(self):
- try:
- from cStringIO import StringIO
- except ImportError:
- from StringIO import StringIO
- self.packet, self.socket = self.request
- self.rfile = StringIO(self.packet)
- self.wfile = StringIO()
-
- def finish(self):
- self.socket.sendto(self.wfile.getvalue(), self.client_address)
+"""Generic socket server classes.
+
+This module tries to capture the various aspects of defining a server:
+
+For socket-based servers:
+
+- address family:
+ - AF_INET{,6}: IP (Internet Protocol) sockets (default)
+ - AF_UNIX: Unix domain sockets
+ - others, e.g. AF_DECNET are conceivable (see <socket.h>
+- socket type:
+ - SOCK_STREAM (reliable stream, e.g. TCP)
+ - SOCK_DGRAM (datagrams, e.g. UDP)
+
+For request-based servers (including socket-based):
+
+- client address verification before further looking at the request
+ (This is actually a hook for any processing that needs to look
+ at the request before anything else, e.g. logging)
+- how to handle multiple requests:
+ - synchronous (one request is handled at a time)
+ - forking (each request is handled by a new process)
+ - threading (each request is handled by a new thread)
+
+The classes in this module favor the server type that is simplest to
+write: a synchronous TCP/IP server. This is bad class design, but
+save some typing. (There's also the issue that a deep class hierarchy
+slows down method lookups.)
+
+There are five classes in an inheritance diagram, four of which represent
+synchronous servers of four types:
+
+ +------------+
+ | BaseServer |
+ +------------+
+ |
+ v
+ +-----------+ +------------------+
+ | TCPServer |------->| UnixStreamServer |
+ +-----------+ +------------------+
+ |
+ v
+ +-----------+ +--------------------+
+ | UDPServer |------->| UnixDatagramServer |
+ +-----------+ +--------------------+
+
+Note that UnixDatagramServer derives from UDPServer, not from
+UnixStreamServer -- the only difference between an IP and a Unix
+stream server is the address family, which is simply repeated in both
+unix server classes.
+
+Forking and threading versions of each type of server can be created
+using the ForkingMixIn and ThreadingMixIn mix-in classes. For
+instance, a threading UDP server class is created as follows:
+
+ class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
+
+The Mix-in class must come first, since it overrides a method defined
+in UDPServer! Setting the various member variables also changes
+the behavior of the underlying server mechanism.
+
+To implement a service, you must derive a class from
+BaseRequestHandler and redefine its handle() method. You can then run
+various versions of the service by combining one of the server classes
+with your request handler class.
+
+The request handler class must be different for datagram or stream
+services. This can be hidden by using the request handler
+subclasses StreamRequestHandler or DatagramRequestHandler.
+
+Of course, you still have to use your head!
+
+For instance, it makes no sense to use a forking server if the service
+contains state in memory that can be modified by requests (since the
+modifications in the child process would never reach the initial state
+kept in the parent process and passed to each child). In this case,
+you can use a threading server, but you will probably have to use
+locks to avoid two requests that come in nearly simultaneous to apply
+conflicting changes to the server state.
+
+On the other hand, if you are building e.g. an HTTP server, where all
+data is stored externally (e.g. in the file system), a synchronous
+class will essentially render the service "deaf" while one request is
+being handled -- which may be for a very long time if a client is slow
+to read all the data it has requested. Here a threading or forking
+server is appropriate.
+
+In some cases, it may be appropriate to process part of a request
+synchronously, but to finish processing in a forked child depending on
+the request data. This can be implemented by using a synchronous
+server and doing an explicit fork in the request handler class
+handle() method.
+
+Another approach to handling multiple simultaneous requests in an
+environment that supports neither threads nor fork (or where these are
+too expensive or inappropriate for the service) is to maintain an
+explicit table of partially finished requests and to use select() to
+decide which request to work on next (or whether to handle a new
+incoming request). This is particularly important for stream services
+where each client can potentially be connected for a long time (if
+threads or subprocesses cannot be used).
+
+Future work:
+- Standard classes for Sun RPC (which uses either UDP or TCP)
+- Standard mix-in classes to implement various authentication
+ and encryption schemes
+- Standard framework for select-based multiplexing
+
+XXX Open problems:
+- What to do with out-of-band data?
+
+BaseServer:
+- split generic "request" functionality out into BaseServer class.
+ Copyright (C) 2000 Luke Kenneth Casson Leighton <lkcl at samba.org>
+
+ example: read entries from a SQL database (requires overriding
+ get_request() to return a table entry from the database).
+ entry is processed by a RequestHandlerClass.
+
+"""
+
+# Author of the BaseServer patch: Luke Kenneth Casson Leighton
+
+# XXX Warning!
+# There is a test suite for this module, but it cannot be run by the
+# standard regression test.
+# To run it manually, run Lib/test/test_socketserver.py.
+
+__version__ = "0.4"
+
+
+import socket
+import select
+import sys
+import os
+import errno
+try:
+ import threading
+except ImportError:
+ import dummy_threading as threading
+
+__all__ = ["TCPServer","UDPServer","ForkingUDPServer","ForkingTCPServer",
+ "ThreadingUDPServer","ThreadingTCPServer","BaseRequestHandler",
+ "StreamRequestHandler","DatagramRequestHandler",
+ "ThreadingMixIn", "ForkingMixIn"]
+if hasattr(socket, "AF_UNIX"):
+ __all__.extend(["UnixStreamServer","UnixDatagramServer",
+ "ThreadingUnixStreamServer",
+ "ThreadingUnixDatagramServer"])
+
+def _eintr_retry(func, *args):
+ """restart a system call interrupted by EINTR"""
+ while True:
+ try:
+ return func(*args)
+ except (OSError, select.error) as e:
+ if e.args[0] != errno.EINTR:
+ raise
+
+class BaseServer:
+
+ """Base class for server classes.
+
+ Methods for the caller:
+
+ - __init__(server_address, RequestHandlerClass)
+ - serve_forever(poll_interval=0.5)
+ - shutdown()
+ - handle_request() # if you do not use serve_forever()
+ - fileno() -> int # for select()
+
+ Methods that may be overridden:
+
+ - server_bind()
+ - server_activate()
+ - get_request() -> request, client_address
+ - handle_timeout()
+ - verify_request(request, client_address)
+ - server_close()
+ - process_request(request, client_address)
+ - shutdown_request(request)
+ - close_request(request)
+ - handle_error()
+
+ Methods for derived classes:
+
+ - finish_request(request, client_address)
+
+ Class variables that may be overridden by derived classes or
+ instances:
+
+ - timeout
+ - address_family
+ - socket_type
+ - allow_reuse_address
+
+ Instance variables:
+
+ - RequestHandlerClass
+ - socket
+
+ """
+
+ timeout = None
+
+ def __init__(self, server_address, RequestHandlerClass):
+ """Constructor. May be extended, do not override."""
+ self.server_address = server_address
+ self.RequestHandlerClass = RequestHandlerClass
+ self.__is_shut_down = threading.Event()
+ self.__shutdown_request = False
+
+ def server_activate(self):
+ """Called by constructor to activate the server.
+
+ May be overridden.
+
+ """
+ pass
+
+ def serve_forever(self, poll_interval=0.5):
+ """Handle one request at a time until shutdown.
+
+ Polls for shutdown every poll_interval seconds. Ignores
+ self.timeout. If you need to do periodic tasks, do them in
+ another thread.
+ """
+ self.__is_shut_down.clear()
+ try:
+ while not self.__shutdown_request:
+ # XXX: Consider using another file descriptor or
+ # connecting to the socket to wake this up instead of
+ # polling. Polling reduces our responsiveness to a
+ # shutdown request and wastes cpu at all other times.
+ r, w, e = _eintr_retry(select.select, [self], [], [],
+ poll_interval)
+ if self in r:
+ self._handle_request_noblock()
+ finally:
+ self.__shutdown_request = False
+ self.__is_shut_down.set()
+
+ def shutdown(self):
+ """Stops the serve_forever loop.
+
+ Blocks until the loop has finished. This must be called while
+ serve_forever() is running in another thread, or it will
+ deadlock.
+ """
+ self.__shutdown_request = True
+ self.__is_shut_down.wait()
+
+ # The distinction between handling, getting, processing and
+ # finishing a request is fairly arbitrary. Remember:
+ #
+ # - handle_request() is the top-level call. It calls
+ # select, get_request(), verify_request() and process_request()
+ # - get_request() is different for stream or datagram sockets
+ # - process_request() is the place that may fork a new process
+ # or create a new thread to finish the request
+ # - finish_request() instantiates the request handler class;
+ # this constructor will handle the request all by itself
+
+ def handle_request(self):
+ """Handle one request, possibly blocking.
+
+ Respects self.timeout.
+ """
+ # Support people who used socket.settimeout() to escape
+ # handle_request before self.timeout was available.
+ timeout = self.socket.gettimeout()
+ if timeout is None:
+ timeout = self.timeout
+ elif self.timeout is not None:
+ timeout = min(timeout, self.timeout)
+ fd_sets = _eintr_retry(select.select, [self], [], [], timeout)
+ if not fd_sets[0]:
+ self.handle_timeout()
+ return
+ self._handle_request_noblock()
+
+ def _handle_request_noblock(self):
+ """Handle one request, without blocking.
+
+ I assume that select.select has returned that the socket is
+ readable before this function was called, so there should be
+ no risk of blocking in get_request().
+ """
+ try:
+ request, client_address = self.get_request()
+ except socket.error:
+ return
+ if self.verify_request(request, client_address):
+ try:
+ self.process_request(request, client_address)
+ except:
+ self.handle_error(request, client_address)
+ self.shutdown_request(request)
+
+ def handle_timeout(self):
+ """Called if no new request arrives within self.timeout.
+
+ Overridden by ForkingMixIn.
+ """
+ pass
+
+ def verify_request(self, request, client_address):
+ """Verify the request. May be overridden.
+
+ Return True if we should proceed with this request.
+
+ """
+ return True
+
+ def process_request(self, request, client_address):
+ """Call finish_request.
+
+ Overridden by ForkingMixIn and ThreadingMixIn.
+
+ """
+ self.finish_request(request, client_address)
+ self.shutdown_request(request)
+
+ def server_close(self):
+ """Called to clean-up the server.
+
+ May be overridden.
+
+ """
+ pass
+
+ def finish_request(self, request, client_address):
+ """Finish one request by instantiating RequestHandlerClass."""
+ self.RequestHandlerClass(request, client_address, self)
+
+ def shutdown_request(self, request):
+ """Called to shutdown and close an individual request."""
+ self.close_request(request)
+
+ def close_request(self, request):
+ """Called to clean up an individual request."""
+ pass
+
+ def handle_error(self, request, client_address):
+ """Handle an error gracefully. May be overridden.
+
+ The default is to print a traceback and continue.
+
+ """
+ print '-'*40
+ print 'Exception happened during processing of request from',
+ print client_address
+ import traceback
+ traceback.print_exc() # XXX But this goes to stderr!
+ print '-'*40
+
+
+class TCPServer(BaseServer):
+
+ """Base class for various socket-based server classes.
+
+ Defaults to synchronous IP stream (i.e., TCP).
+
+ Methods for the caller:
+
+ - __init__(server_address, RequestHandlerClass, bind_and_activate=True)
+ - serve_forever(poll_interval=0.5)
+ - shutdown()
+ - handle_request() # if you don't use serve_forever()
+ - fileno() -> int # for select()
+
+ Methods that may be overridden:
+
+ - server_bind()
+ - server_activate()
+ - get_request() -> request, client_address
+ - handle_timeout()
+ - verify_request(request, client_address)
+ - process_request(request, client_address)
+ - shutdown_request(request)
+ - close_request(request)
+ - handle_error()
+
+ Methods for derived classes:
+
+ - finish_request(request, client_address)
+
+ Class variables that may be overridden by derived classes or
+ instances:
+
+ - timeout
+ - address_family
+ - socket_type
+ - request_queue_size (only for stream sockets)
+ - allow_reuse_address
+
+ Instance variables:
+
+ - server_address
+ - RequestHandlerClass
+ - socket
+
+ """
+
+ address_family = socket.AF_INET
+
+ socket_type = socket.SOCK_STREAM
+
+ request_queue_size = 5
+
+ allow_reuse_address = False
+
+ def __init__(self, server_address, RequestHandlerClass, bind_and_activate=True):
+ """Constructor. May be extended, do not override."""
+ BaseServer.__init__(self, server_address, RequestHandlerClass)
+ self.socket = socket.socket(self.address_family,
+ self.socket_type)
+ if bind_and_activate:
+ self.server_bind()
+ self.server_activate()
+
+ def server_bind(self):
+ """Called by constructor to bind the socket.
+
+ May be overridden.
+
+ """
+ if self.allow_reuse_address:
+ self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ self.socket.bind(self.server_address)
+ try:
+ self.server_address = self.socket.getsockname()
+ except socket.error:
+ # Jython may raise ENOTCONN here;
+ # we will pick up again in server_activate
+ pass
+
+ def server_activate(self):
+ """Called by constructor to activate the server.
+
+ May be overridden.
+
+ """
+ self.socket.listen(self.request_queue_size)
+ # Adding a second call to getsockname() because of this issue
+ # http://wiki.python.org/jython/NewSocketModule#Deferredsocketcreationonjython
+ self.server_address = self.socket.getsockname()
+
+ def server_close(self):
+ """Called to clean-up the server.
+
+ May be overridden.
+
+ """
+ self.socket.close()
+
+ def fileno(self):
+ """Return socket file number.
+
+ Interface required by select().
+
+ """
+ return self.socket.fileno()
+
+ def get_request(self):
+ """Get the request and client address from the socket.
+
+ May be overridden.
+
+ """
+ return self.socket.accept()
+
+ def shutdown_request(self, request):
+ """Called to shutdown and close an individual request."""
+ try:
+ #explicitly shutdown. socket.close() merely releases
+ #the socket and waits for GC to perform the actual close.
+ request.shutdown(socket.SHUT_WR)
+ except socket.error:
+ pass #some platforms may raise ENOTCONN here
+ self.close_request(request)
+
+ def close_request(self, request):
+ """Called to clean up an individual request."""
+ request.close()
+
+
+class UDPServer(TCPServer):
+
+ """UDP server class."""
+
+ allow_reuse_address = False
+
+ socket_type = socket.SOCK_DGRAM
+
+ max_packet_size = 8192
+
+ def get_request(self):
+ data, client_addr = self.socket.recvfrom(self.max_packet_size)
+ return (data, self.socket), client_addr
+
+ def server_activate(self):
+ # No need to call listen() for UDP.
+ pass
+
+ def shutdown_request(self, request):
+ # No need to shutdown anything.
+ self.close_request(request)
+
+ def close_request(self, request):
+ # No need to close anything.
+ pass
+
+class ForkingMixIn:
+
+ """Mix-in class to handle each request in a new process."""
+
+ timeout = 300
+ active_children = None
+ max_children = 40
+
+ def collect_children(self):
+ """Internal routine to wait for children that have exited."""
+ if self.active_children is None: return
+ while len(self.active_children) >= self.max_children:
+ # XXX: This will wait for any child process, not just ones
+ # spawned by this library. This could confuse other
+ # libraries that expect to be able to wait for their own
+ # children.
+ try:
+ pid, status = os.waitpid(0, 0)
+ except os.error:
+ pid = None
+ if pid not in self.active_children: continue
+ self.active_children.remove(pid)
+
+ # XXX: This loop runs more system calls than it ought
+ # to. There should be a way to put the active_children into a
+ # process group and then use os.waitpid(-pgid) to wait for any
+ # of that set, but I couldn't find a way to allocate pgids
+ # that couldn't collide.
+ for child in self.active_children:
+ try:
+ pid, status = os.waitpid(child, os.WNOHANG)
+ except os.error:
+ pid = None
+ if not pid: continue
+ try:
+ self.active_children.remove(pid)
+ except ValueError, e:
+ raise ValueError('%s. x=%d and list=%r' % (e.message, pid,
+ self.active_children))
+
+ def handle_timeout(self):
+ """Wait for zombies after self.timeout seconds of inactivity.
+
+ May be extended, do not override.
+ """
+ self.collect_children()
+
+ def process_request(self, request, client_address):
+ """Fork a new subprocess to process the request."""
+ self.collect_children()
+ pid = os.fork()
+ if pid:
+ # Parent process
+ if self.active_children is None:
+ self.active_children = []
+ self.active_children.append(pid)
+ self.close_request(request) #close handle in parent process
+ return
+ else:
+ # Child process.
+ # This must never return, hence os._exit()!
+ try:
+ self.finish_request(request, client_address)
+ self.shutdown_request(request)
+ os._exit(0)
+ except:
+ try:
+ self.handle_error(request, client_address)
+ self.shutdown_request(request)
+ finally:
+ os._exit(1)
+
+
+class ThreadingMixIn:
+ """Mix-in class to handle each request in a new thread."""
+
+ # Decides how threads will act upon termination of the
+ # main process
+ daemon_threads = False
+
+ def process_request_thread(self, request, client_address):
+ """Same as in BaseServer but as a thread.
+
+ In addition, exception handling is done here.
+
+ """
+ try:
+ self.finish_request(request, client_address)
+ self.shutdown_request(request)
+ except:
+ self.handle_error(request, client_address)
+ self.shutdown_request(request)
+
+ def process_request(self, request, client_address):
+ """Start a new thread to process the request."""
+ t = threading.Thread(target = self.process_request_thread,
+ args = (request, client_address))
+ t.daemon = self.daemon_threads
+ t.start()
+
+
+class ForkingUDPServer(ForkingMixIn, UDPServer): pass
+class ForkingTCPServer(ForkingMixIn, TCPServer): pass
+
+class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
+class ThreadingTCPServer(ThreadingMixIn, TCPServer): pass
+
+if hasattr(socket, 'AF_UNIX'):
+
+ class UnixStreamServer(TCPServer):
+ address_family = socket.AF_UNIX
+
+ class UnixDatagramServer(UDPServer):
+ address_family = socket.AF_UNIX
+
+ class ThreadingUnixStreamServer(ThreadingMixIn, UnixStreamServer): pass
+
+ class ThreadingUnixDatagramServer(ThreadingMixIn, UnixDatagramServer): pass
+
+class BaseRequestHandler:
+
+ """Base class for request handler classes.
+
+ This class is instantiated for each request to be handled. The
+ constructor sets the instance variables request, client_address
+ and server, and then calls the handle() method. To implement a
+ specific service, all you need to do is to derive a class which
+ defines a handle() method.
+
+ The handle() method can find the request as self.request, the
+ client address as self.client_address, and the server (in case it
+ needs access to per-server information) as self.server. Since a
+ separate instance is created for each request, the handle() method
+ can define arbitrary other instance variariables.
+
+ """
+
+ def __init__(self, request, client_address, server):
+ self.request = request
+ self.client_address = client_address
+ self.server = server
+ self.setup()
+ try:
+ self.handle()
+ finally:
+ self.finish()
+
+ def setup(self):
+ pass
+
+ def handle(self):
+ pass
+
+ def finish(self):
+ pass
+
+
+# The following two classes make it possible to use the same service
+# class for stream or datagram servers.
+# Each class sets up these instance variables:
+# - rfile: a file object from which receives the request is read
+# - wfile: a file object to which the reply is written
+# When the handle() method returns, wfile is flushed properly
+
+
+class StreamRequestHandler(BaseRequestHandler):
+
+ """Define self.rfile and self.wfile for stream sockets."""
+
+ # Default buffer sizes for rfile, wfile.
+ # We default rfile to buffered because otherwise it could be
+ # really slow for large data (a getc() call per byte); we make
+ # wfile unbuffered because (a) often after a write() we want to
+ # read and we need to flush the line; (b) big writes to unbuffered
+ # files are typically optimized by stdio even when big reads
+ # aren't.
+ rbufsize = -1
+ wbufsize = 0
+
+ # A timeout to apply to the request socket, if not None.
+ timeout = None
+
+ # Disable nagle algorithm for this socket, if True.
+ # Use only when wbufsize != 0, to avoid small packets.
+ disable_nagle_algorithm = False
+
+ def setup(self):
+ self.connection = self.request
+ if self.timeout is not None:
+ self.connection.settimeout(self.timeout)
+ if self.disable_nagle_algorithm:
+ self.connection.setsockopt(socket.IPPROTO_TCP,
+ socket.TCP_NODELAY, True)
+ self.rfile = self.connection.makefile('rb', self.rbufsize)
+ self.wfile = self.connection.makefile('wb', self.wbufsize)
+
+ def finish(self):
+ if not self.wfile.closed:
+ try:
+ self.wfile.flush()
+ except socket.error:
+ # An final socket error may have occurred here, such as
+ # the local error ECONNABORTED.
+ pass
+ self.wfile.close()
+ self.rfile.close()
+
+
+class DatagramRequestHandler(BaseRequestHandler):
+
+ # XXX Regrettably, I cannot get this working on Linux;
+ # s.recvfrom() doesn't return a meaningful client address.
+
+ """Define self.rfile and self.wfile for datagram sockets."""
+
+ def setup(self):
+ try:
+ from cStringIO import StringIO
+ except ImportError:
+ from StringIO import StringIO
+ self.packet, self.socket = self.request
+ self.rfile = StringIO(self.packet)
+ self.wfile = StringIO()
+
+ def finish(self):
+ self.socket.sendto(self.wfile.getvalue(), self.client_address)
diff --git a/Lib/_socket.py b/Lib/_socket.py
new file mode 100644
--- /dev/null
+++ b/Lib/_socket.py
@@ -0,0 +1,2059 @@
+import array
+import encodings.idna
+import errno
+import jarray
+import logging
+import pprint
+import sys
+import time
+import _google_ipaddr_r234
+from collections import namedtuple, Sequence
+from contextlib import contextmanager
+from functools import partial, wraps
+from itertools import chain
+from numbers import Number
+from StringIO import StringIO
+from threading import Condition, Lock
+from types import MethodType, NoneType
+
+import java
+from java.io import IOException, InterruptedIOException
+from java.lang import Thread
+from java.net import InetAddress, InetSocketAddress
+from java.nio.channels import ClosedChannelException
+from java.util import NoSuchElementException
+from java.util.concurrent import (
+ ArrayBlockingQueue, CopyOnWriteArrayList, CountDownLatch, LinkedBlockingQueue,
+ RejectedExecutionException, ThreadFactory, TimeUnit)
+from java.util.concurrent.atomic import AtomicBoolean
+from javax.net.ssl import SSLPeerUnverifiedException
+
+try:
+ # jarjar-ed version
+ from org.python.netty.bootstrap import Bootstrap, ChannelFactory, ServerBootstrap
+ from org.python.netty.buffer import PooledByteBufAllocator, Unpooled
+ from org.python.netty.channel import ChannelInboundHandlerAdapter, ChannelInitializer, ChannelOption
+ from org.python.netty.channel.nio import NioEventLoopGroup
+ from org.python.netty.channel.socket import DatagramPacket
+ from org.python.netty.channel.socket.nio import NioDatagramChannel, NioSocketChannel, NioServerSocketChannel
+except ImportError:
+ # dev version from extlibs
+ from io.netty.bootstrap import Bootstrap, ChannelFactory, ServerBootstrap
+ from io.netty.buffer import PooledByteBufAllocator, Unpooled
+ from io.netty.channel import ChannelInboundHandlerAdapter, ChannelInitializer, ChannelOption
+ from io.netty.channel.nio import NioEventLoopGroup
+ from io.netty.channel.socket import DatagramPacket
+ from io.netty.channel.socket.nio import NioDatagramChannel, NioSocketChannel, NioServerSocketChannel
+
+
+log = logging.getLogger("_socket")
+
+
+def _debug():
+ FORMAT = '%(asctime)-15s %(threadName)s %(levelname)s %(funcName)s %(message)s %(sock)s'
+ logging.basicConfig(format=FORMAT, level=logging.DEBUG)
+
+
+# Constants
+###########
+
+has_ipv6 = True # IPV6 FTW!
+_GLOBAL_DEFAULT_TIMEOUT = object()
+_EPHEMERAL_ADDRESS = InetSocketAddress(0)
+_BOUND_EPHEMERAL_ADDRESS = object()
+
+# FIXME most constants should come from JNR if possible; they may be
+# arbitrary for the implementation of socket/ssl/select purposes, but
+# some misbehaved code may want to use the arbitrary numbers
+
+SHUT_RD = 0
+SHUT_WR = 1
+SHUT_RDWR = 2
+
+AF_UNSPEC = 0
+AF_INET = 2
+AF_INET6 = 23
+
+AI_PASSIVE = 1
+AI_CANONNAME = 2
+AI_NUMERICHOST = 4
+AI_V4MAPPED = 8
+AI_ALL = 16
+AI_ADDRCONFIG = 32
+AI_NUMERICSERV = 1024
+
+EAI_NONAME = -2
+EAI_SERVICE = -8
+EAI_ADDRFAMILY = -9
+
+NI_NUMERICHOST = 1
+NI_NUMERICSERV = 2
+NI_NOFQDN = 4
+NI_NAMEREQD = 8
+NI_DGRAM = 16
+NI_MAXSERV = 32
+NI_IDN = 64
+NI_IDN_ALLOW_UNASSIGNED = 128
+NI_IDN_USE_STD3_ASCII_RULES = 256
+NI_MAXHOST = 1025
+
+SOCK_DGRAM = 1
+SOCK_STREAM = 2
+SOCK_RAW = 3 # not supported
+SOCK_RDM = 4 # not supported
+SOCK_SEQPACKET = 5 # not supported
+
+SOL_SOCKET = 0xFFFF
+
+IPPROTO_AH = 51 # not supported
+IPPROTO_DSTOPTS = 60 # not supported
+IPPROTO_ESP = 50 # not supported
+IPPROTO_FRAGMENT = 44 # not supported
+IPPROTO_GGP = 3 # not supported
+IPPROTO_HOPOPTS = 0 # not supported
+IPPROTO_ICMP = 1 # not supported
+IPPROTO_ICMPV6 = 58 # not supported
+IPPROTO_IDP = 22 # not supported
+IPPROTO_IGMP = 2 # not supported
+IPPROTO_IP = 0
+IPPROTO_IPV4 = 4 # not supported
+IPPROTO_IPV6 = 41 # not supported
+IPPROTO_MAX = 256 # not supported
+IPPROTO_ND = 77 # not supported
+IPPROTO_NONE = 59 # not supported
+IPPROTO_PUP = 12 # not supported
+IPPROTO_RAW = 255 # not supported
+IPPROTO_ROUTING = 43 # not supported
+IPPROTO_TCP = 6
+IPPROTO_UDP = 17
+
+SO_ACCEPTCONN = 1
+SO_BROADCAST = 2
+SO_ERROR = 4
+SO_KEEPALIVE = 8
+SO_LINGER = 16
+SO_OOBINLINE = 32
+SO_RCVBUF = 64
+SO_REUSEADDR = 128
+SO_SNDBUF = 256
+SO_TIMEOUT = 512
+SO_TYPE = 1024
+
+# Options with negative constants are not supported
+# They are being added here so that code that refers to them
+# will not break with an AttributeError
+
+SO_DEBUG = -1
+SO_DONTROUTE = -1
+SO_EXCLUSIVEADDRUSE = -8
+SO_RCVLOWAT = -16
+SO_RCVTIMEO = -32
+SO_REUSEPORT = -64
+SO_SNDLOWAT = -128
+SO_SNDTIMEO = -256
+SO_USELOOPBACK = -512
+
+TCP_NODELAY = 2048
+
+INADDR_ANY = "0.0.0.0"
+INADDR_BROADCAST = "255.255.255.255"
+
+IN6ADDR_ANY_INIT = "::"
+
+POLLIN = 1
+POLLOUT = 2
+POLLPRI = 4 # Ignored - not supportable on Java
+POLLERR = 8
+POLLHUP = 16
+POLLNVAL = 32 # Polled when not open - no Netty channel
+
+
+# Specific constants for socket-reboot:
+
+# Keep the highest possible precision for converting from Python's use
+# of floating point for durations to Java's use of both a long
+# duration and a specific unit, in this case TimeUnit.NANOSECONDS
+_TO_NANOSECONDS = 1000000000
+
+_PEER_CLOSED = object()
+
+
+# Event loop management
+#######################
+
+# Use daemon threads for the event loop group. This is just fine
+# because these threads only handle ephemeral data, such as performing
+# SSL wrap/unwrap.
+
+class DaemonThreadFactory(ThreadFactory):
+ def newThread(self, runnable):
+ t = Thread(runnable)
+ t.daemon = True
+ return t
+
+
+# This number should be configurable by the user. 10 is the default
+# number as of 4.0.17 of Netty. FIXME this default may be based on core count.
+
+NIO_GROUP = NioEventLoopGroup(10, DaemonThreadFactory())
+
+def _check_threadpool_for_pending_threads():
+ pending_threads = []
+ for t in NIO_GROUP:
+ pending_count = t.pendingTasks()
+ if pending_count > 0:
+ pending_threads.append((t, pending_count))
+ log.debug("Pending threads in Netty pool: %s", pprint.pformat(pending_threads), extra={"sock": "*"})
+ return pending_threads
+
+
+def _shutdown_threadpool():
+ log.debug("Shutting down thread pool...", extra={"sock": "*"})
+ # FIXME this timeout probably should be configurable; for client
+ # usage that have completed this probably only produces scary
+ # messages at worst, but TBD; in particular this may because we
+ # are seeing closes both in SSL and at the socket level
+
+ NIO_GROUP.shutdownGracefully(0, 100, TimeUnit.MILLISECONDS)
+ log.debug("Shut down thread pool", extra={"sock": "*"})
+
+# Ensure deallocation of thread pool if PySystemState.cleanup is
+# called; this includes in the event of sigterm
+
+sys.registerCloser(_shutdown_threadpool)
+
+
+# Error management
+##################
+
+class error(IOError): pass
+class herror(error): pass
+class gaierror(error): pass
+class timeout(error): pass
+class SSLError(error): pass
+
+SSL_ERROR_SSL = 1
+SSL_ERROR_WANT_READ = 2
+SSL_ERROR_WANT_WRITE = 3
+SSL_ERROR_WANT_X509_LOOKUP = 4
+SSL_ERROR_SYSCALL = 5
+SSL_ERROR_ZERO_RETURN = 6
+SSL_ERROR_WANT_CONNECT = 7
+SSL_ERROR_EOF = 8
+SSL_ERROR_INVALID_ERROR_CODE = 9
+
+
+def _add_exception_attrs(exc):
+ exc.errno = exc[0]
+ exc.strerror = exc[1]
+ return exc
+
+
+def _unmapped_exception(exc):
+ return _add_exception_attrs(error(-1, 'Unmapped exception: %s' % exc))
+
+
+def java_net_socketexception_handler(exc):
+ if exc.message.startswith("Address family not supported by protocol family"):
+ return _add_exception_attrs(
+ error(errno.EAFNOSUPPORT,
+ 'Address family not supported by protocol family: See http://wiki.python.org/jython/NewSocketModule#IPV6_address_support'))
+ return _unmapped_exception(exc)
+
+
+def would_block_error(exc=None):
+ return _add_exception_attrs(
+ error(errno.EWOULDBLOCK, 'The socket operation could not complete without blocking'))
+
+
+_exception_map = {
+
+ # javaexception : callable that raises the python equivalent exception, or None to stub out as unmapped
+
+ IOException : lambda x: error(errno.ECONNRESET, 'Software caused connection abort'),
+ InterruptedIOException : lambda x: timeout(None, 'timed out'),
+
+ java.net.BindException : lambda x: error(errno.EADDRINUSE, 'Address already in use'),
+ java.net.ConnectException : lambda x: error(errno.ECONNREFUSED, 'Connection refused'),
+ java.net.NoRouteToHostException : lambda x: error(errno.EHOSTUNREACH, 'No route to host'),
+ java.net.PortUnreachableException : None,
+ java.net.ProtocolException : None,
+ java.net.SocketException : java_net_socketexception_handler,
+ java.net.SocketTimeoutException : lambda x: timeout(None, 'timed out'),
+ java.net.UnknownHostException : lambda x: gaierror(errno.EGETADDRINFOFAILED, 'getaddrinfo failed'),
+
+ java.nio.channels.AlreadyConnectedException : lambda x: error(errno.EISCONN, 'Socket is already connected'),
+ java.nio.channels.AsynchronousCloseException : None,
+ java.nio.channels.CancelledKeyException : None,
+ java.nio.channels.ClosedByInterruptException : None,
+ java.nio.channels.ClosedChannelException : lambda x: error(errno.ECONNRESET, 'Socket closed'),
+ java.nio.channels.ClosedSelectorException : None,
+ java.nio.channels.ConnectionPendingException : None,
+ java.nio.channels.IllegalBlockingModeException : None,
+ java.nio.channels.IllegalSelectorException : None,
+ java.nio.channels.NoConnectionPendingException : None,
+ java.nio.channels.NonReadableChannelException : None,
+ java.nio.channels.NonWritableChannelException : None,
+ java.nio.channels.NotYetBoundException : None,
+ java.nio.channels.NotYetConnectedException : None,
+ java.nio.channels.UnresolvedAddressException : lambda x: gaierror(errno.EGETADDRINFOFAILED, 'getaddrinfo failed'),
+ java.nio.channels.UnsupportedAddressTypeException : None,
+
+ SSLPeerUnverifiedException: lambda x: SSLError(SSL_ERROR_SSL, "FIXME"),
+}
+
+
+def _map_exception(java_exception):
+ mapped_exception = _exception_map.get(java_exception.__class__)
+ if mapped_exception:
+ py_exception = mapped_exception(java_exception)
+ else:
+ py_exception = error(-1, 'Unmapped exception: %s' % java_exception)
+ py_exception.java_exception = java_exception
+ return _add_exception_attrs(py_exception)
+
+
+def raises_java_exception(method_or_function):
+ """Maps java socket exceptions to the equivalent python exception.
+ Also sets _last_error on socket objects so as to support SO_ERROR.
+ """
+
+ @wraps(method_or_function)
+ def handle_exception(*args, **kwargs):
+ is_socket = len(args) > 0 and isinstance(args[0], _realsocket)
+ try:
+ try:
+ return method_or_function(*args, **kwargs)
+ except java.lang.Exception, jlx:
+ raise _map_exception(jlx)
+ except error, e:
+ if is_socket:
+ args[0]._last_error = e[0]
+ raise
+ else:
+ if is_socket:
+ args[0]._last_error = 0
+ return handle_exception
+
+
+# select support
+################
+
+class _Select(object):
+
+ def __init__(self, rlist, wlist, xlist):
+ self.cv = Condition()
+ self.rlist = frozenset(rlist)
+ self.wlist = frozenset(wlist)
+ self.xlist = frozenset(xlist)
+
+ def _normalize_sockets(self, socks):
+ # Get underlying socket, via fileno lookup
+ _socks = []
+ for sock in socks:
+ try:
+ _sock = sock.fileno()
+ _sock._register_selector # double check our API requirements
+ _socks.append(_sock)
+ except AttributeError:
+ raise error(errno.EBADF, "Bad file descriptor: %s" % (sock,))
+ return _socks
+
+ def notify(self, sock, **_):
+ with self.cv:
+ self.cv.notify()
+
+ def __str__(self):
+ return "_Select(r={},w={},x={})".format(list(self.rlist), list(self.wlist), list(self.xlist))
+
+ @contextmanager
+ def _register_sockets(self, socks):
+ socks = self._normalize_sockets(socks)
+ for sock in socks:
+ sock._register_selector(self)
+ yield self
+ for sock in socks:
+ sock._unregister_selector(self)
+
+ def __call__(self, timeout):
+ started = time.time()
+ with self.cv, self._register_sockets(chain(self.rlist, self.wlist, self.xlist)):
+ while True:
+ # Checking if sockets are ready (readable OR writable)
+ # converts selection from detecting edges to detecting levels
+ selected_rlist = set(sock for sock in self.rlist if sock.fileno()._readable())
+ selected_wlist = set(sock for sock in self.wlist if sock.fileno()._writable())
+ # FIXME add support for exceptions
+ selected_xlist = []
+
+ # As usual with condition variables, we need to ensure
+ # there's not a spurious wakeup; this test also ensures
+ # shortcircuiting if the socket was in fact ready for
+ # reading/writing/exception before the select call
+ if selected_rlist or selected_wlist:
+ return sorted(selected_rlist), sorted(selected_wlist), sorted(selected_xlist)
+ elif timeout is not None and time.time() - started >= timeout:
+ return [], [], []
+ self.cv.wait(timeout)
+
+
+# poll support
+##############
+
+_PollNotification = namedtuple("_PollNotification", ["sock", "fd", "exception", "hangup"])
+
+
+class poll(object):
+
+ def __init__(self):
+ self.queue = LinkedBlockingQueue()
+ self.registered = dict() # fd -> eventmask
+
+ def notify(self, sock, exception=None, hangup=False):
+ notification = _PollNotification(
+ sock=sock,
+ fd=sock.fileno(),
+ exception=exception,
+ hangup=hangup)
+ self.queue.put(notification)
+
+ def register(self, fd, eventmask=POLLIN|POLLPRI|POLLOUT):
+ self.registered[fd] = eventmask
+ # NOTE in case fd != sock in a future release, modifiy accordingly
+ sock = fd
+ sock._register_selector(self)
+ self.notify(sock) # Ensure we get an initial notification
+
+ def modify(self, fd, eventmask):
+ if fd not in self.registered:
+ raise error(errno.ENOENT, "No such file or directory")
+ self.registered[fd] = eventmask
+
+ def unregister(self, fd):
+ del self.registered[fd]
+ sock = fd
+ sock._unregister_selector(self)
+
+ def _event_test(self, notification):
+ # Performs standard edge vs event polling, except that we get
+ # edges around errors and hangup
+ if notification is None:
+ return None, 0
+ mask = self.registered.get(notification.sock, 0) # handle if concurrently removed, by simply ignoring
+ event = 0
+ if mask & POLLIN and notification.sock._readable():
+ event |= POLLIN
+ if mask & POLLOUT and notification.sock._writable():
+ event |= POLLOUT
+ if mask & POLLERR and notification.exception:
+ event |= POLLERR
+ if mask & POLLHUP and (notification.hangup or not notification.sock.channel):
+ event |= POLLHUP
+ if mask & POLLNVAL and not notification.sock.peer_closed:
+ event |= POLLNVAL
+ return notification.fd, event
+
+ def poll(self, timeout=None):
+ if not timeout or timeout < 0:
+ # Simplify logic around timeout resets
+ timeout = None
+ else:
+ timeout /= 1000. # convert from milliseconds to seconds
+
+ while True:
+ if timeout is None:
+ notification = self.queue.take()
+ elif timeout > 0:
+ started = time.time()
+ timeout_in_ns = int(timeout * _TO_NANOSECONDS)
+ notification = self.queue.poll(timeout_in_ns, TimeUnit.NANOSECONDS)
+ # Need to reset the timeout, because this notification
+ # may not be of interest when masked out
+ timeout = timeout - (time.time() - started)
+ else:
+ return []
+
+ if notification is None:
+ continue
+
+ # Pull as many outstanding notifications as possible out
+ # of the queue
+ notifications = [notification]
+ self.queue.drainTo(notifications)
+ log.debug("Got notification(s) %s", notifications, extra={"sock": "MODULE"})
+ result = []
+ socks = set()
+
+ # But given how we notify, it's possible to see possible
+ # multiple notifications. Just return one (fd, event) for a
+ # given socket
+ for notification in notifications:
+ if notification.sock not in socks:
+ fd, event = self._event_test(notification)
+ if event:
+ result.append((fd, event))
+ socks.add(notification.sock)
+ # Repump sockets to pick up a subsequent level change
+ for sock in socks:
+ self.notify(sock)
+ if result:
+ return result
+
+
+# integration with Netty
+########################
+
+class PythonInboundHandler(ChannelInboundHandlerAdapter):
+
+ def __init__(self, sock):
+ self.sock = sock
+ log.debug("Initializing inbound handler", extra={"sock": self.sock})
+
+ def channelActive(self, ctx):
+ log.debug("Channel is active", extra={"sock": self.sock})
+ self.sock._notify_selectors()
+ ctx.fireChannelActive()
+
+ def channelRead(self, ctx, msg):
+ log.debug("Channel read message %s", msg, extra={"sock": self.sock})
+ msg.retain() # bump ref count so it can be used in the blocking queue
+ self.sock.incoming.put(msg)
+ self.sock._notify_selectors()
+ ctx.fireChannelRead(msg)
+
+ def channelWritabilityChanged(self, ctx):
+ log.debug("Channel ready for write", extra={"sock": self.sock})
+ self.sock._notify_selectors()
+ ctx.fireChannelWritabilityChanged()
+
+ def exceptionCaught(self, ctx, cause):
+ log.debug("Channel caught exception %s", cause, extra={"sock": self.sock})
+ self.sock._notify_selectors(exception=cause)
+ ctx.fireExceptionCaught(cause)
+
+
+class ChildSocketHandler(ChannelInitializer):
+
+ def __init__(self, parent_socket):
+ self.parent_socket = parent_socket
+
+ def initChannel(self, child_channel):
+ child = ChildSocket()
+ child.proto = IPPROTO_TCP
+ child._init_client_mode(child_channel)
+
+ # Get most current options from the parent. This enables any subsequent divergence.
+ #
+ # It's OK that this copy could occur without a mutex, given that such iteration
+ # is guaranteed to be weakly consistent
+ child.options = dict(((option, value) for option, value in self.parent_socket.options.iteritems()))
+ if child.options:
+ log.debug("Setting inherited options %s", child.options, extra={"sock": child})
+ config = child_channel.config()
+ for option, value in child.options.iteritems():
+ config.setOption(option, value)
+
+ log.debug("Notifing listeners of parent socket %s", self.parent_socket, extra={"sock": child})
+ self.parent_socket.child_queue.put(child)
+ self.parent_socket._notify_selectors()
+ log.debug("Notified listeners of parent socket %s with queue %s",
+ self.parent_socket, self.parent_socket.child_queue, extra={"sock": child})
+
+ # Must block until the child socket is actually "used". This is
+ # because there may be some additional setup required, such as
+ # wrapping the socket, before the child is ready to read.
+
+ def unlatch_child(_):
+ # FIXME when bound methods are supported for single method interfaces
+ child._unlatch()
+
+ # Ensure that this handler will not block if the channel is closed,
+ # otherwise this handler will simply sit idly as a pending task in the Netty
+ # thread pool
+ child_channel.closeFuture().addListener(unlatch_child)
+
+ child._wait_on_latch()
+ log.debug("Socket initChannel completed waiting on latch", extra={"sock": child})
+
+
+# FIXME raise exceptions for ops not permitted on client socket, server socket
+UNKNOWN_SOCKET, CLIENT_SOCKET, SERVER_SOCKET, DATAGRAM_SOCKET = range(4)
+
+
+# These are the only socket protocols we currently support, so it's easy to map as follows:
+
+_socket_options = {
+ IPPROTO_TCP: {
+ (SOL_SOCKET, SO_KEEPALIVE): (ChannelOption.SO_KEEPALIVE, bool),
+ (SOL_SOCKET, SO_LINGER): (ChannelOption.SO_LINGER, int),
+ (SOL_SOCKET, SO_RCVBUF): (ChannelOption.SO_RCVBUF, int),
+ (SOL_SOCKET, SO_REUSEADDR): (ChannelOption.SO_REUSEADDR, bool),
+ (SOL_SOCKET, SO_SNDBUF): (ChannelOption.SO_SNDBUF, int),
+ # FIXME SO_TIMEOUT needs to be handled by an IdleStateHandler -
+ # ChannelOption.SO_TIMEOUT really only applies to OIO (old) socket channels,
+ # we want to use NIO ones
+ (SOL_SOCKET, SO_TIMEOUT): (ChannelOption.SO_TIMEOUT, int),
+ (IPPROTO_TCP, TCP_NODELAY): (ChannelOption.TCP_NODELAY, bool),
+ },
+ IPPROTO_UDP: {
+ (SOL_SOCKET, SO_BROADCAST): (ChannelOption.SO_BROADCAST, bool),
+ (SOL_SOCKET, SO_LINGER): (ChannelOption.SO_LINGER, int),
+ (SOL_SOCKET, SO_RCVBUF): (ChannelOption.SO_RCVBUF, int),
+ (SOL_SOCKET, SO_REUSEADDR): (ChannelOption.SO_REUSEADDR, bool),
+ (SOL_SOCKET, SO_SNDBUF): (ChannelOption.SO_SNDBUF, int),
+ (SOL_SOCKET, SO_TIMEOUT): (ChannelOption.SO_TIMEOUT, int),
+ }
+}
+
+# actual socket support
+#######################
+
+class _realsocket(object):
+
+ def __init__(self, family=None, type=None, proto=None):
+ # FIXME verify args are correct
+ self.family = family
+ self.type = type
+ if proto is None:
+ if type == SOCK_STREAM:
+ proto = IPPROTO_TCP
+ elif type == SOCK_DGRAM:
+ proto = IPPROTO_UDP
+ self.proto = proto
+
+ self._last_error = 0 # supports SO_ERROR
+ self.connected = False
+ self.timeout = _defaulttimeout
+ self.channel = None
+ self.bind_addr = _EPHEMERAL_ADDRESS
+ self.bind_timestamp = None # Handle Netty race condition on bound addresses
+ self.selectors = CopyOnWriteArrayList()
+ self.options = {} # deferred options until bootstrap
+ self.peer_closed = False
+
+ # Reference count this underlying socket
+ self.open_lock = Lock()
+ self.open_count = 1
+
+ if self.type == SOCK_DGRAM:
+ self.socket_type = DATAGRAM_SOCKET
+ self.incoming = LinkedBlockingQueue() # list of read buffers
+ self.incoming_head = None # allows msg buffers to be broken up
+ self.python_inbound_handler = None
+ self._can_write = True
+ else:
+ self.socket_type = UNKNOWN_SOCKET
+
+ def __repr__(self):
+ return "<_realsocket at {:#x} type={} open_count={} channel={} timeout={}>".format(
+ id(self), self.socket_type, self.open_count, self.channel, self.timeout)
+
+ def _unlatch(self):
+ pass # no-op once mutated from ChildSocket to normal _socketobject
+
+ def _register_selector(self, selector):
+ self.selectors.addIfAbsent(selector)
+
+ def _unregister_selector(self, selector):
+ return self.selectors.remove(selector)
+
+ def _notify_selectors(self, exception=None, hangup=False):
+ for selector in self.selectors:
+ selector.notify(self, exception=exception, hangup=hangup)
+
+ @raises_java_exception
+ def _handle_channel_future(self, future, reason):
+ # All differences between nonblocking vs blocking with optional timeouts
+ # is managed by this method.
+ #
+ # All sockets can be selected on, regardless of blocking/nonblocking state.
+
+ def workaround_jython_bug_for_bound_methods(_):
+ # FIXME check for errors, notify the corresponding socket accordingly
+ # FIXME wrapper function is needed because of http://bugs.jython.org/issue2115
+ self._notify_selectors()
+
+ future.addListener(workaround_jython_bug_for_bound_methods)
+
+ if self.timeout is None:
+ log.debug("Syncing on future %s for %s", future, reason, extra={"sock": self})
+ return future.sync()
+ elif self.timeout:
+ self._handle_timeout(future.await, reason)
+ if not future.isSuccess():
+ raise future.cause()
+ return future
+ else:
+ return future
+
+ def setblocking(self, flag):
+ if flag:
+ self.settimeout(None)
+ else:
+ self.settimeout(0.0)
+
+ def settimeout(self, timeout):
+ self.timeout = _calctimeoutvalue(timeout)
+
+ def gettimeout(self):
+ return self.timeout
+
+ def _handle_timeout(self, waiter, reason):
+ timeout_in_ns = int(self.timeout * _TO_NANOSECONDS)
+ log.debug("Waiting for up to %.2fs for %s", self.timeout, reason, extra={"sock": self})
+ started = time.time()
+ result = waiter(timeout_in_ns, TimeUnit.NANOSECONDS)
+ log.debug("Completed in %.2fs", time.time() - started, extra={"sock": self})
+ if not result:
+ # above predicate handles either the case the waiter is
+ # returning a value or in the case ChannelFuture#await,
+ # that the timeout expired, in which case False is
+ # returned
+ if self.timeout == 0:
+ raise error(errno.ETIMEDOUT, "Connection timed out")
+ else:
+ raise timeout("timed out")
+ return result
+
+ def bind(self, address):
+ # Netty 4 supports binding a socket to multiple addresses;
+ # apparently this is the not the case for C API sockets
+ self.bind_addr = _get_jsockaddr(address, self.family, self.type, self.proto, AI_PASSIVE)
+ self._datagram_connect() # as necessary
+
+ # CLIENT METHODS
+ # Calling connect/connect_ex means this is a client socket; these
+ # in turn use _connect, which uses Bootstrap, not ServerBootstrap
+
+ def _init_client_mode(self, channel=None):
+ # this is client socket specific
+ self.socket_type = CLIENT_SOCKET
+ self.incoming = LinkedBlockingQueue() # list of read buffers
+ self.incoming_head = None # allows msg buffers to be broken up
+ self.python_inbound_handler = None
+ self._can_write = True
+ self.connect_handlers = []
+ self.connected = False
+ if channel:
+ log.debug("Setting up channel %s", channel, extra={"sock": self})
+ self.channel = channel
+ self.python_inbound_handler = PythonInboundHandler(self)
+ self.connect_handlers = [self.python_inbound_handler]
+ self.connected = True
+
+ def _connect(self, addr):
+ log.debug("Begin connection to %s", addr, extra={"sock": self})
+ addr = _get_jsockaddr(addr, self.family, self.type, self.proto, 0)
+ self._init_client_mode()
+ self.connected = True
+ self.python_inbound_handler = PythonInboundHandler(self)
+ bootstrap = Bootstrap().group(NIO_GROUP).channel(NioSocketChannel)
+ for option, value in self.options.iteritems():
+ bootstrap.option(option, value)
+
+ # FIXME really this is just for SSL handling, so make more
+ # specific than a list of connect_handlers
+ if self.connect_handlers:
+ for handler in self.connect_handlers:
+ bootstrap.handler(handler)
+ else:
+ bootstrap.handler(self.python_inbound_handler)
+
+ if self.bind_addr:
+ log.debug("Connect %s to %s", self.bind_addr, addr, extra={"sock": self})
+ bind_future = bootstrap.bind(self.bind_addr)
+ self._handle_channel_future(bind_future, "local bind")
+ self.channel = bind_future.channel()
+ future = self.channel.connect(addr)
+ else:
+ log.debug("Connect to %s", addr, extra={"sock": self})
+ future = bootstrap.connect(addr)
+ self.channel = future.channel()
+
+ self._handle_channel_future(future, "connect")
+ self.bind_timestamp = time.time()
+
+ def _post_connect(self):
+ # Post-connect step is necessary to handle SSL setup,
+ # otherwise the read adapter can race in seeing encrypted
+ # messages from the peer
+ if self.connect_handlers:
+ self.channel.pipeline().addLast(self.python_inbound_handler)
+
+ def peer_closed(x):
+ log.debug("Peer closed channel %s", x, extra={"sock": self})
+ self.incoming.put(_PEER_CLOSED)
+ self._notify_selectors(hangup=True)
+
+ self.channel.closeFuture().addListener(peer_closed)
+
+ def connect(self, addr):
+ # Unwrapped sockets can immediately perform the post-connect step
+ if self.socket_type == DATAGRAM_SOCKET:
+ self._datagram_connect(addr)
+ log.debug("Completed datagram connection to %s", addr, extra={"sock": self})
+ else:
+ self._connect(addr)
+ self._post_connect()
+ log.debug("Completed connection to %s", addr, extra={"sock": self})
+
+ def connect_ex(self, addr):
+ self.connect(addr)
+ if self.timeout is None:
+ return errno.EISCONN
+ else:
+ return errno.EINPROGRESS
+
+
+ # SERVER METHODS
+ # Calling listen means this is a server socket
+
+ def listen(self, backlog):
+ self.socket_type = SERVER_SOCKET
+
+ b = ServerBootstrap()
+ self.group = NioEventLoopGroup(10, DaemonThreadFactory())
+ b.group(self.group)
+ b.channel(NioServerSocketChannel)
+ b.option(ChannelOption.SO_BACKLOG, backlog)
+ for option, value in self.options.iteritems():
+ b.option(option, value)
+ # Note that child options are set in the child handler so
+ # that they can take into account any subsequent changes,
+ # plus have shadow support
+
+ self.child_queue = ArrayBlockingQueue(backlog)
+ self.child_handler = ChildSocketHandler(self)
+ b.childHandler(self.child_handler)
+
+ future = b.bind(self.bind_addr)
+ self._handle_channel_future(future, "listen")
+ self.bind_timestamp = time.time()
+ self.channel = future.channel()
+ log.debug("Bound server socket to %s", self.bind_addr, extra={"sock": self})
+
+ def accept(self):
+ if self.timeout is None:
+ log.debug("Blocking indefinitely for child on queue %s", self.child_queue, extra={"sock": self})
+ child = self.child_queue.take()
+ elif self.timeout:
+ log.debug("Timed wait for child on queue %s", self.child_queue, extra={"sock": self})
+ child = self._handle_timeout(self.child_queue.poll, "accept")
+ else:
+ log.debug("Polling for child on queue %s", self.child_queue, extra={"sock": self})
+ child = self.child_queue.poll()
+ if child is None:
+ raise error(errno.EWOULDBLOCK, "Resource temporarily unavailable")
+ peername = child.getpeername() if child else None
+ log.debug("Got child %s connected to %s", child, peername, extra={"sock": self})
+ return child, peername
+
+ # DATAGRAM METHODS
+
+ def _datagram_connect(self, addr=None):
+ # FIXME raise exception if not of the right family
+ if addr is not None:
+ addr = _get_jsockaddr(addr, self.family, self.type, self.proto, 0)
+
+ if not self.connected and self.socket_type == DATAGRAM_SOCKET:
+ log.debug("Binding datagram socket to %s", self.bind_addr, extra={"sock": self})
+ self.connected = True
+ self.python_inbound_handler = PythonInboundHandler(self)
+ bootstrap = Bootstrap().group(NIO_GROUP).channel(NioDatagramChannel)
+ bootstrap.handler(self.python_inbound_handler)
+ for option, value in self.options.iteritems():
+ bootstrap.option(option, value)
+
+ future = bootstrap.register()
+ self._handle_channel_future(future, "register")
+ self.channel = future.channel()
+ self._handle_channel_future(self.channel.bind(self.bind_addr), "bind")
+
+ if addr is not None:
+ # Handles the relatively rare case that this is a
+ # CONNECTED datagram socket, which Netty does not
+ # support in its bootstrap setup.
+ log.debug("Connecting datagram socket to %s", addr, extra={"sock": self})
+ future = self.channel.connect(addr)
+ self._handle_channel_future(future, "connect")
+
+ def sendto(self, string, arg1, arg2=None):
+ # Unfortunate arg overloading
+ if arg2 is not None:
+ flags = arg1
+ address = arg2
+ else:
+ flags = None
+ address = arg1
+
+ address = _get_jsockaddr(address, self.family, self.type, self.proto, 0)
+
+ log.debug("Sending datagram to %s <<<{!r:.20}>>>".format(string), address, extra={"sock": self})
+ self._datagram_connect()
+ packet = DatagramPacket(Unpooled.wrappedBuffer(string), address)
+ future = self.channel.writeAndFlush(packet)
+ self._handle_channel_future(future, "sendto")
+ return len(string)
+
+
+ # FIXME implement these methods
+
+ def recvfrom_into(self, buffer, nbytes=0, flags=0):
+ raise NotImplementedError()
+
+ def recv_into(self, buffer, nbytes=0, flags=0):
+ raise NotImplementedError()
+
+ # GENERAL METHODS
+
+ def close(self):
+ with self.open_lock:
+ self.open_count -= 1
+ if self.open_count > 0:
+ log.debug("Open count > 0, so not closing underlying socket", extra={"sock": self})
+ return
+
+ if self.channel is None:
+ return
+
+ try:
+ self.channel.close().sync()
+ except RejectedExecutionException:
+ # Do not care about tasks that attempt to schedule after close
+ pass
+ if self.socket_type == SERVER_SOCKET:
+ self.group.shutdownGracefully(0, 100, TimeUnit.MILLISECONDS)
+ while True:
+ child = self.child_queue.poll()
+ if child is None:
+ break
+ log.debug("Closed child socket %s not yet accepted", child, extra={"sock": self})
+ child.close()
+
+ log.debug("Closed socket", extra={"sock": self})
+
+ def shutdown(self, how):
+ self._verify_channel()
+ if how & SHUT_RD:
+ try:
+ self.channel.pipeline().remove(self.python_inbound_handler)
+ except NoSuchElementException:
+ pass # already removed, can safely ignore (presumably)
+ if how & SHUT_WR:
+ self._can_write = False
+
+ def _readable(self):
+ if self.socket_type == CLIENT_SOCKET or self.socket_type == DATAGRAM_SOCKET:
+ return ((self.incoming_head is not None and self.incoming_head.readableBytes()) or
+ self.incoming.peek())
+ elif self.socket_type == SERVER_SOCKET:
+ return bool(self.child_queue.peek())
+ else:
+ return False
+
+ def _writable(self):
+ return self.channel and self.channel.isActive() and self.channel.isWritable()
+
+ can_write = _writable
+
+ def _verify_channel(self):
+ if self.channel is None:
+ log.debug("Channel is not connected or setup", extra={"sock": self})
+ raise error(errno.ENOTCONN, "Socket is not connected")
+
+ @raises_java_exception
+ def send(self, data, flags=0):
+ # FIXME this almost certainly needs to chunk things
+ self._verify_channel()
+ data = str(data) # FIXME temporary fix if data is of type buffer
+ log.debug("Sending data <<<{!r:.20}>>>".format(data), extra={"sock": self})
+
+ if self.socket_type == DATAGRAM_SOCKET:
+ packet = DatagramPacket(Unpooled.wrappedBuffer(data), self.channel.remoteAddress())
+ future = self.channel.writeAndFlush(packet)
+ self._handle_channel_future(future, "send")
+ return len(data)
+
+ if not self._can_write:
+ raise error(errno.ENOTCONN, 'Socket not connected')
+ future = self.channel.writeAndFlush(Unpooled.wrappedBuffer(data))
+ self._handle_channel_future(future, "send")
+ # FIXME are we sure we are going to be able to send this much data, especially async?
+ return len(data)
+
+ sendall = send # FIXME see note above!
+
+ def _get_incoming_msg(self, reason):
+ if self.incoming_head is None:
+ if self.timeout is None:
+ if self.peer_closed:
+ return None
+ self.incoming_head = self.incoming.take()
+ elif self.timeout:
+ if self.peer_closed:
+ return None
+ self.incoming_head = self._handle_timeout(self.incoming.poll, reason)
+ else:
+ self.incoming_head = self.incoming.poll() # Could be None
+ if self.incoming_head is None:
+ # FIXME FIXME C socket semantics return a '' after the first EAGAIN (not certain if this gets reset or not)
+ log.debug("No data yet for socket", extra={"sock": self})
+ raise error(errno.EAGAIN, "Resource temporarily unavailable")
+
+ # Only return _PEER_CLOSED once
+ msg = self.incoming_head
+ if msg is _PEER_CLOSED:
+ self.incoming_head = None
+ self.peer_closed = True
+ return msg
+
+ @raises_java_exception
+ def _get_message(self, bufsize, reason):
+ self._datagram_connect()
+ self._verify_channel()
+ msg = self._get_incoming_msg(reason)
+
+ if self.socket_type == DATAGRAM_SOCKET:
+ if msg is None:
+ return None, None
+ elif msg is _PEER_CLOSED:
+ return "", None
+ else:
+ if msg is None:
+ return None, self.channel.remoteAddress()
+ elif msg is _PEER_CLOSED:
+ return "", self.channel.remoteAddress()
+
+ if self.socket_type == DATAGRAM_SOCKET:
+ content = msg.content()
+ sender = msg.sender()
+ else:
+ content = msg
+ sender = self.channel.remoteAddress()
+ msg_length = content.readableBytes()
+ buf = jarray.zeros(min(msg_length, bufsize), "b")
+ content.readBytes(buf)
+ if content.readableBytes() == 0:
+ msg.release() # return msg ByteBuf back to Netty's pool
+ self.incoming_head = None
+ return buf.tostring(), sender
+
+ def recv(self, bufsize, flags=0):
+ self._verify_channel()
+ log.debug("Waiting on recv", extra={"sock": self})
+ # For obvious reasons, concurrent reads on the same socket
+ # have to be locked; I don't believe it is the job of recv to
+ # do this; in particular this is the policy of SocketChannel,
+ # which underlies Netty's support for such channels.
+ data, _ = self._get_message(bufsize, "recv")
+ log.debug("Received <<<{!r:.20}>>>".format(data), extra={"sock": self})
+ return data
+
+ def recvfrom(self, bufsize, flags=0):
+ self._verify_channel()
+ data, sender = self._get_message(bufsize, "recvfrom")
+ remote_addr = sender.getHostString(), sender.getPort()
+ log.debug("Received from sender %s <<<{!r:20}>>>".format(data), remote_addr, extra={"sock": self})
+ return data, remote_addr
+
+ def fileno(self):
+ return self
+
+ def setsockopt(self, level, optname, value):
+ try:
+ option, cast = _socket_options[self.proto][(level, optname)]
+ except KeyError:
+ raise error(errno.ENOPROTOOPT, "Protocol not available")
+
+ # FIXME for NIO sockets, SO_TIMEOUT doesn't work - should use
+ # IdleStateHandler instead
+ cast_value = cast(value)
+ self.options[option] = cast_value
+ log.debug("Setting option %s to %s", optname, value, extra={"sock": self})
+ if self.channel:
+ self.channel.config().setOption(option, cast(value))
+
+ def getsockopt(self, level, optname, buflen=None):
+ # Pseudo options for interrogating the status of this socket
+ if level == SOL_SOCKET:
+ if optname == SO_ACCEPTCONN:
+ if self.socket_type == SERVER_SOCKET:
+ return 1
+ elif self.type == SOCK_STREAM:
+ return 0
+ else:
+ raise error(errno.ENOPROTOOPT, "Protocol not available")
+ if optname == SO_TYPE:
+ return self.type
+ if optname == SO_ERROR:
+ last_error = self._last_error
+ self._last_error = 0
+ return last_error
+
+ # Normal options
+ try:
+ option, _ = _socket_options[self.proto][(level, optname)]
+ except KeyError:
+ raise error(errno.ENOPROTOOPT, "Protocol not available")
+ log.debug("Shadow option settings %s", self.options, extra={"sock": self})
+ return self.options.get(option, 0)
+
+ def getsockname(self):
+ if self.channel is None:
+ if self.bind_addr == _EPHEMERAL_ADDRESS:
+ raise error(errno.ENOTCONN, "Socket is not connected")
+ else:
+ return self.bind_addr.getHostString(), self.bind_addr.getPort()
+ # Netty 4 currently races between bind to ephemeral port and the availability
+ # of the local address for the channel. Workaround for now is to poll.
+ while True:
+ local_addr = self.channel.localAddress()
+ if local_addr:
+ break
+ if time.time() - self.bind_timestamp > 1:
+ # Presumably after a second something is completely wrong,
+ # so punt
+ raise error(errno.ENOTCONN, "Socket is not connected")
+ log.debug("Poll for local address", extra={"sock": self})
+ time.sleep(0.1) # completely arbitrary
+ return local_addr.getHostString(), local_addr.getPort()
+
+ def getpeername(self):
+ self._verify_channel()
+ remote_addr = self.channel.remoteAddress()
+ if remote_addr is None:
+ raise error(errno.ENOTCONN, "Socket is not connected")
+ return remote_addr.getHostString(), remote_addr.getPort()
+
+
+_socketmethods = (
+ 'bind', 'connect', 'connect_ex', 'fileno', 'listen',
+ 'getpeername', 'getsockname', 'getsockopt', 'setsockopt',
+ 'sendall', 'setblocking',
+ 'settimeout', 'gettimeout', 'shutdown')
+
+
+# All the method names that must be delegated to either the real socket
+# object or the _closedsocket object.
+# For socket-reboot, this also means anything used by _Select
+
+_delegate_methods = (
+ "recv", "recvfrom", "recv_into", "recvfrom_into",
+ "send", "sendto", "fileno")
+
+class _closedsocket(object):
+
+ def close(self):
+ pass # Should be able to close repeatedly
+
+ def _dummy(*args):
+ raise error(errno.EBADF, 'Bad file descriptor')
+
+ # All _delegate_methods must also be initialized here.
+ fileno = send = recv = recv_into = sendto = recvfrom = recvfrom_into = _dummy
+
+ __getattr__ = _dummy
+
+
+# Wrapper around platform socket objects. This implements
+# a platform-independent dup() functionality. The
+# implementation currently relies on reference counting
+# to close the underlying socket object.
+class _socketobject(object):
+
+ __doc__ = _realsocket.__doc__
+
+
+ def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, _sock=None):
+ if _sock is None:
+ _sock = _realsocket(family, type, proto)
+ self._sock = _sock
+ for method in _delegate_methods:
+ setattr(self, method, getattr(_sock, method))
+
+ def close(self, _closedsocket=_closedsocket,
+ _delegate_methods=_delegate_methods, setattr=setattr):
+ # This function should not reference any globals. See issue #808164.
+ self._sock.close()
+ self._sock = _closedsocket()
+ dummy = self._sock._dummy
+ for method in _delegate_methods:
+ setattr(self, method, dummy)
+ close.__doc__ = _realsocket.close.__doc__
+
+ def fileno(self):
+ return self._sock
+
+ def accept(self):
+ sock, addr = self._sock.accept()
+ return _socketobject(_sock=sock), addr
+ accept.__doc__ = _realsocket.accept.__doc__
+
+ def dup(self):
+ """dup() -> socket object
+
+ Return a new socket object connected to the same system resource."""
+
+ if isinstance(self._sock, _closedsocket):
+ return _socketobject(_sock=_closedsocket())
+ with self._sock.open_lock:
+ self._sock.open_count += 1
+ return _socketobject(_sock=self._sock)
+
+ def makefile(self, mode='r', bufsize=-1):
+ """makefile([mode[, bufsize]]) -> file object
+
+ Return a regular file object corresponding to the socket. The mode
+ and bufsize arguments are as for the built-in open() function."""
+
+ if isinstance(self._sock, _closedsocket):
+ return _fileobject(_closedsocket(), mode, bufsize, close=True)
+ with self._sock.open_lock:
+ self._sock.open_count += 1
+ return _fileobject(self._sock, mode, bufsize, close=True)
+
+ family = property(lambda self: self._sock.family, doc="the socket family")
+ type = property(lambda self: self._sock.type, doc="the socket type")
+ proto = property(lambda self: self._sock.proto, doc="the socket protocol")
+
+
+def meth(name,self,*args):
+ return getattr(self._sock,name)(*args)
+
+
+for _m in _socketmethods:
+ p = partial(meth,_m)
+ p.__name__ = _m
+ p.__doc__ = getattr(_realsocket,_m).__doc__
+ m = MethodType(p,None,_socketobject)
+ setattr(_socketobject,_m,m)
+
+
+socket = SocketType = _socketobject
+
+
+class ChildSocket(_realsocket):
+
+ def __init__(self):
+ super(ChildSocket, self).__init__()
+ self.active = AtomicBoolean()
+ self.active_latch = CountDownLatch(1)
+
+ def _ensure_post_connect(self):
+ do_post_connect = not self.active.getAndSet(True)
+ if do_post_connect:
+ self._post_connect()
+ self.active_latch.countDown()
+
+ def _wait_on_latch(self):
+ log.debug("Waiting for activity", extra={"sock": self})
+ self.active_latch.await()
+ log.debug("Latch released, can now proceed", extra={"sock": self})
+
+ # FIXME raise exception for accept, listen, bind, connect, connect_ex
+
+ # All ops that allow us to characterize the mode of operation of
+ # this socket as being either Start TLS or SSL when
+ # connected. These should be ops that send/receive/change
+ # connection, not metadata.
+
+ def send(self, data):
+ self._ensure_post_connect()
+ return super(ChildSocket, self).send(data)
+
+ sendall = send
+
+ def recv(self, bufsize, flags=0):
+ self._ensure_post_connect()
+ return super(ChildSocket, self).recv(bufsize, flags)
+
+ def recvfrom(self, bufsize, flags=0):
+ self._ensure_post_connect()
+ return super(ChildSocket, self).recvfrom(bufsize, flags)
+
+ def setblocking(self, mode):
+ self._ensure_post_connect()
+ return super(ChildSocket, self).setblocking(mode)
+
+ # FIXME FIXME FIXME other ops.
+
+ def close(self):
+ self._ensure_post_connect()
+ super(ChildSocket, self).close()
+
+ def shutdown(self, how):
+ self._ensure_post_connect()
+ super(ChildSocket, self).shutdown(how)
+
+ def __del__(self):
+ # Required in the case this child socket never becomes active.
+ # This cleanup will ensure that the pending thread for the
+ # handler is released when a GC happens, not necessarily
+ # before shutdown of course. Naturally no extra work will be
+ # done in setting up the channel.
+ self.active_latch.countDown()
+ self.close()
+
+
+# EXPORTED constructors
+
+def socket(family=None, type=None, proto=None):
+ return _socketobject(family, type, proto)
+
+
+def select(rlist, wlist, xlist, timeout=None):
+ for lst in (rlist, wlist, xlist):
+ if not isinstance(lst, Sequence):
+ raise TypeError("arguments 1-3 must be sequences")
+ if not(timeout is None or isinstance(timeout, Number)):
+ raise TypeError("timeout must be a float or None")
+ if timeout is not None and timeout < 0:
+ raise error(errno.EINVAL, "Invalid argument")
+ return _Select(rlist, wlist, xlist)(timeout)
+
+
+def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT,
+ source_address=None):
+ """Connect to *address* and return the socket object.
+
+ Convenience function. Connect to *address* (a 2-tuple ``(host,
+ port)``) and return the socket object. Passing the optional
+ *timeout* parameter will set the timeout on the socket instance
+ before attempting to connect. If no *timeout* is supplied, the
+ global default timeout setting returned by :func:`getdefaulttimeout`
+ is used. If *source_address* is set it must be a tuple of (host, port)
+ for the socket to bind as a source address before making the connection.
+ An host of '' or port 0 tells the OS to use the default.
+ """
+
+ host, port = address
+ err = None
+ for res in getaddrinfo(host, port, 0, SOCK_STREAM):
+ af, socktype, proto, canonname, sa = res
+ sock = None
+ try:
+ sock = socket(af, socktype, proto)
+ if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
+ sock.settimeout(timeout)
+ if source_address:
+ sock.bind(source_address)
+ sock.connect(sa)
+ return sock
+
+ except error as _:
+ err = _
+ if sock is not None:
+ sock.close()
+
+ if err is not None:
+ raise err
+ else:
+ raise error("getaddrinfo returns an empty list")
+
+
+# MISCELLANEOUS module level functions
+
+_defaulttimeout = None
+
+def _calctimeoutvalue(value):
+ if value is None:
+ return None
+ try:
+ floatvalue = float(value)
+ except:
+ raise TypeError('Socket timeout value must be a number or None')
+ if floatvalue < 0.0:
+ raise ValueError("Socket timeout value cannot be negative")
+ return floatvalue
+
+def getdefaulttimeout():
+ return _defaulttimeout
+
+def setdefaulttimeout(timeout):
+ global _defaulttimeout
+ _defaulttimeout = _calctimeoutvalue(timeout)
+
+
+# Define data structures to support IPV4 and IPV6.
+# FIXME are these ip address classes required by CPython API? Must they be old-style classes?
+
+class _ip_address_t: pass
+
+class _ipv4_address_t(_ip_address_t):
+
+ def __init__(self, sockaddr, port, jaddress):
+ self.sockaddr = sockaddr
+ self.port = port
+ self.jaddress = jaddress
+
+ def __getitem__(self, index):
+ if 0 == index:
+ return self.sockaddr
+ elif 1 == index:
+ return self.port
+ else:
+ raise IndexError()
+
+ def __len__(self):
+ return 2
+
+ def __str__(self):
+ return "('%s', %d)" % (self.sockaddr, self.port)
+
+ __repr__ = __str__
+
+class _ipv6_address_t(_ip_address_t):
+
+ def __init__(self, sockaddr, port, jaddress):
+ self.sockaddr = sockaddr
+ self.port = port
+ self.jaddress = jaddress
+
+ def __getitem__(self, index):
+ if 0 == index:
+ return self.sockaddr
+ elif 1 == index:
+ return self.port
+ elif 2 == index:
+ return 0
+ elif 3 == index:
+ return self.jaddress.scopeId
+ else:
+ raise IndexError()
+
+ def __len__(self):
+ return 4
+
+ def __str__(self):
+ return "('%s', %d, 0, %d)" % (self.sockaddr, self.port, self.jaddress.scopeId)
+
+ __repr__ = __str__
+
+
+
+def _get_jsockaddr(address_object, family, sock_type, proto, flags):
+ addr = _get_jsockaddr2(address_object, family, sock_type, proto, flags)
+ log.debug("Address %s for %s", addr, address_object, extra={"sock": "*"})
+ return addr
+
+def _get_jsockaddr2(address_object, family, sock_type, proto, flags):
+ # Is this an object that was returned from getaddrinfo? If so, it already contains an InetAddress
+ if isinstance(address_object, _ip_address_t):
+ return java.net.InetSocketAddress(address_object.jaddress, address_object[1])
+ # The user passed an address tuple, not an object returned from getaddrinfo
+ # So we must call getaddrinfo, after some translations and checking
+ if address_object is None:
+ address_object = ("", 0)
+ error_message = "Address must be a 2-tuple (ipv4: (host, port)) or a 4-tuple (ipv6: (host, port, flow, scope))"
+ if not isinstance(address_object, tuple) or \
+ ((family == AF_INET and len(address_object) != 2) or (family == AF_INET6 and len(address_object) not in [2,4] )) or \
+ not isinstance(address_object[0], (basestring, NoneType)) or \
+ not isinstance(address_object[1], (int, long)):
+ raise TypeError(error_message)
+ if len(address_object) == 4 and not isinstance(address_object[3], (int, long)):
+ raise TypeError(error_message)
+ hostname = address_object[0]
+ if hostname is not None:
+ hostname = hostname.strip()
+ port = address_object[1]
+ if family == AF_INET and sock_type == SOCK_DGRAM and hostname == "<broadcast>":
+ hostname = INADDR_BROADCAST
+ if hostname in ["", None]:
+ if flags & AI_PASSIVE:
+ hostname = {AF_INET: INADDR_ANY, AF_INET6: IN6ADDR_ANY_INIT}[family]
+ else:
+ hostname = "localhost"
+ if isinstance(hostname, unicode):
+ hostname = encodings.idna.ToASCII(hostname)
+ addresses = getaddrinfo(hostname, port, family, sock_type, proto, flags)
+ if len(addresses) == 0:
+ raise gaierror(errno.EGETADDRINFOFAILED, 'getaddrinfo failed')
+ return java.net.InetSocketAddress(addresses[0][4].jaddress, port)
+
+
+def _is_ip_address(addr, version=None):
+ try:
+ _google_ipaddr_r234.IPAddress(addr, version)
+ return True
+ except ValueError:
+ return False
+
+
+def is_ipv4_address(addr):
+ return _is_ip_address(addr, 4)
+
+
+def is_ipv6_address(addr):
+ return _is_ip_address(addr, 6)
+
+
+def is_ip_address(addr):
+ return _is_ip_address(addr)
+
+
+# Workaround for this (predominantly windows) issue
+# http://wiki.python.org/jython/NewSocketModule#IPV6_address_support
+
+_ipv4_addresses_only = False
+
+def _use_ipv4_addresses_only(value):
+ global _ipv4_addresses_only
+ _ipv4_addresses_only = value
+
+
+def _getaddrinfo_get_host(host, family, flags):
+ if not isinstance(host, basestring) and host is not None:
+ raise TypeError("getaddrinfo() argument 1 must be string or None")
+ if flags & AI_NUMERICHOST:
+ if not is_ip_address(host):
+ raise gaierror(EAI_NONAME, "Name or service not known")
+ if family == AF_INET and not is_ipv4_address(host):
+ raise gaierror(EAI_ADDRFAMILY, "Address family for hostname not supported")
+ if family == AF_INET6 and not is_ipv6_address(host):
+ raise gaierror(EAI_ADDRFAMILY, "Address family for hostname not supported")
+ if isinstance(host, unicode):
+ host = encodings.idna.ToASCII(host)
+ return host
+
+
+def _getaddrinfo_get_port(port, flags):
+ if isinstance(port, basestring):
+ try:
+ int_port = int(port)
+ except ValueError:
+ if flags & AI_NUMERICSERV:
+ raise gaierror(EAI_NONAME, "Name or service not known")
+ # Lookup the service by name
+ try:
+ int_port = getservbyname(port)
+ except error:
+ raise gaierror(EAI_SERVICE, "Servname not supported for ai_socktype")
+ elif port is None:
+ int_port = 0
+ elif not isinstance(port, (int, long)):
+ raise error("Int or String expected")
+ else:
+ int_port = int(port)
+ return int_port % 65536
+
+
+ at raises_java_exception
+def getaddrinfo(host, port, family=AF_UNSPEC, socktype=0, proto=0, flags=0):
+ if _ipv4_addresses_only:
+ family = AF_INET
+ if not family in [AF_INET, AF_INET6, AF_UNSPEC]:
+ raise gaierror(errno.EIO, 'ai_family not supported')
+ host = _getaddrinfo_get_host(host, family, flags)
+ port = _getaddrinfo_get_port(port, flags)
+ if socktype not in [0, SOCK_DGRAM, SOCK_STREAM]:
+ raise error(errno.ESOCKTNOSUPPORT, "Socket type is not supported")
+ filter_fns = []
+ filter_fns.append({
+ AF_INET: lambda x: isinstance(x, java.net.Inet4Address),
+ AF_INET6: lambda x: isinstance(x, java.net.Inet6Address),
+ AF_UNSPEC: lambda x: isinstance(x, java.net.InetAddress),
+ }[family])
+ if host in [None, ""]:
+ if flags & AI_PASSIVE:
+ hosts = {AF_INET: [INADDR_ANY], AF_INET6: [IN6ADDR_ANY_INIT], AF_UNSPEC: [INADDR_ANY, IN6ADDR_ANY_INIT]}[family]
+ else:
+ hosts = ["localhost"]
+ else:
+ hosts = [host]
+ results = []
+ for h in hosts:
+ for a in java.net.InetAddress.getAllByName(h):
+ if len([f for f in filter_fns if f(a)]):
+ family = {java.net.Inet4Address: AF_INET, java.net.Inet6Address: AF_INET6}[a.getClass()]
+ if flags & AI_CANONNAME:
+ canonname = str(a.getCanonicalHostName())
+ else:
+ canonname = ""
+ sockaddr = str(a.getHostAddress())
+ # TODO: Include flowinfo and scopeid in a 4-tuple for IPv6 addresses
+ sock_tuple = {AF_INET : _ipv4_address_t, AF_INET6 : _ipv6_address_t}[family](sockaddr, port, a)
+ if socktype == 0:
+ socktypes = [SOCK_DGRAM, SOCK_STREAM]
+ else:
+ socktypes = [socktype]
+ for result_socktype in socktypes:
+ result_proto = {SOCK_DGRAM: IPPROTO_UDP, SOCK_STREAM: IPPROTO_TCP}[result_socktype]
+ if proto in [0, result_proto]:
+ # The returned socket will only support the result_proto
+ # If this does not match the requested proto, don't return it
+ results.append((family, result_socktype, result_proto, canonname, sock_tuple))
+ return results
+
+
+
+def htons(x): return x
+def htonl(x): return x
+def ntohs(x): return x
+def ntohl(x): return x
+
+ at raises_java_exception
+def inet_pton(family, ip_string):
+ if family == AF_INET:
+ if not is_ipv4_address(ip_string):
+ raise error("illegal IP address string passed to inet_pton")
+ elif family == AF_INET6:
+ if not is_ipv6_address(ip_string):
+ raise error("illegal IP address string passed to inet_pton")
+ else:
+ raise error(errno.EAFNOSUPPORT, "Address family not supported by protocol")
+ ia = java.net.InetAddress.getByName(ip_string)
+ bytes = []
+ for byte in ia.getAddress():
+ if byte < 0:
+ bytes.append(byte+256)
+ else:
+ bytes.append(byte)
+ return "".join([chr(byte) for byte in bytes])
+
+ at raises_java_exception
+def inet_ntop(family, packed_ip):
+ jByteArray = array.array("b", packed_ip)
+ if family == AF_INET:
+ if len(jByteArray) != 4:
+ raise ValueError("invalid length of packed IP address string")
+ elif family == AF_INET6:
+ if len(jByteArray) != 16:
+ raise ValueError("invalid length of packed IP address string")
+ else:
+ raise ValueError("unknown address family %s" % family)
+ ia = java.net.InetAddress.getByAddress(jByteArray)
+ return ia.getHostAddress()
+
+def inet_aton(ip_string):
+ return inet_pton(AF_INET, ip_string)
+
+def inet_ntoa(packed_ip):
+ return inet_ntop(AF_INET, packed_ip)
+
+
+
+# Various toplevel functions for the socket module
+##################################################
+
+def _gethostbyaddr(name):
+ # This is as close as I can get; at least the types are correct...
+ addresses = InetAddress.getAllByName(gethostbyname(name))
+ names = []
+ addrs = []
+ for addr in addresses:
+ names.append(str(addr.getHostName()))
+ addrs.append(str(addr.getHostAddress()))
+ return names, addrs
+
+ at raises_java_exception
+def getfqdn(name=None):
+ """
+ Return a fully qualified domain name for name. If name is omitted or empty
+ it is interpreted as the local host. To find the fully qualified name,
+ the hostname returned by gethostbyaddr() is checked, then aliases for the
+ host, if available. The first name which includes a period is selected.
+ In case no fully qualified domain name is available, the hostname is retur
+ New in version 2.0.
+ """
+ if not name:
+ name = gethostname()
+ names, addrs = _gethostbyaddr(name)
+ for a in names:
+ if a.find(".") >= 0:
+ return a
+ return name
+
+ at raises_java_exception
+def gethostname():
+ return str(InetAddress.getLocalHost().getHostName())
+
+ at raises_java_exception
+def gethostbyname(name):
+ return str(InetAddress.getByName(name).getHostAddress())
+
+#
+# Skeleton implementation of gethostbyname_ex
+# Needed because urllib2 refers to it
+#
+
+ at raises_java_exception
+def gethostbyname_ex(name):
+ return name, [], gethostbyname(name)
+
+ at raises_java_exception
+def gethostbyaddr(name):
+ names, addrs = _gethostbyaddr(name)
+ return names[0], names, addrs
+
+
+try:
+ from jnr.netdb import Service, Protocol
+
+ def getservbyname(service_name, protocol_name=None):
+ service = Service.getServiceByName(service_name, protocol_name)
+ if service is None:
+ raise error('service/proto not found')
+ return service.getPort()
+
+ def getservbyport(port, protocol_name=None):
+ service = Service.getServiceByPort(port, protocol_name)
+ if service is None:
+ raise error('port/proto not found')
+ return service.getName()
+
+ def getprotobyname(protocol_name=None):
+ proto = Protocol.getProtocolByName(protocol_name)
+ if proto is None:
+ raise error('protocol not found')
+ return proto.getProto()
+
+except ImportError:
+ def getservbyname(service_name, protocol_name=None):
+ return None
+
+ def getservbyport(port, protocol_name=None):
+ return None
+
+ def getprotobyname(protocol_name=None):
+ return None
+
+
+def _getnameinfo_get_host(address, flags):
+ if not isinstance(address, basestring):
+ raise TypeError("getnameinfo() address 1 must be string, not None")
+ if isinstance(address, unicode):
+ address = encodings.idna.ToASCII(address)
+ jia = InetAddress.getByName(address)
+ result = jia.getCanonicalHostName()
+ if flags & NI_NAMEREQD:
+ if is_ip_address(result):
+ raise gaierror(EAI_NONAME, "Name or service not known")
+ elif flags & NI_NUMERICHOST:
+ result = jia.getHostAddress()
+ # Ignoring NI_NOFQDN for now
+ if flags & NI_IDN:
+ result = encodings.idna.ToASCII(result)
+ return result
+
+def _getnameinfo_get_port(port, flags):
+ if not isinstance(port, (int, long)):
+ raise TypeError("getnameinfo() port number must be an integer")
+ if flags & NI_NUMERICSERV:
+ return port
+ proto = None
+ if flags & NI_DGRAM:
+ proto = "udp"
+ return getservbyport(port, proto)
+
+ at raises_java_exception
+def getnameinfo(sock_addr, flags):
+ if not isinstance(sock_addr, tuple) or len(sock_addr) < 2:
+ raise TypeError("getnameinfo() argument 1 must be a tuple")
+ host = _getnameinfo_get_host(sock_addr[0], flags)
+ port = _getnameinfo_get_port(sock_addr[1], flags)
+ return (host, port)
+
+
+
+class _fileobject(object):
+ """Faux file object attached to a socket object."""
+
+ default_bufsize = 8192
+ name = "<socket>"
+
+ __slots__ = ["mode", "bufsize", "softspace",
+ # "closed" is a property, see below
+ "_sock", "_rbufsize", "_wbufsize", "_rbuf", "_wbuf", "_wbuf_len",
+ "_close"]
+
+ def __init__(self, sock, mode='rb', bufsize=-1, close=False):
+ self._sock = sock
+ self.mode = mode # Not actually used in this version
+ if bufsize < 0:
+ bufsize = self.default_bufsize
+ self.bufsize = bufsize
+ self.softspace = False
+ # _rbufsize is the suggested recv buffer size. It is *strictly*
+ # obeyed within readline() for recv calls. If it is larger than
+ # default_bufsize it will be used for recv calls within read().
+ if bufsize == 0:
+ self._rbufsize = 1
+ elif bufsize == 1:
+ self._rbufsize = self.default_bufsize
+ else:
+ self._rbufsize = bufsize
+ self._wbufsize = bufsize
+ # We use StringIO for the read buffer to avoid holding a list
+ # of variously sized string objects which have been known to
+ # fragment the heap due to how they are malloc()ed and often
+ # realloc()ed down much smaller than their original allocation.
+ self._rbuf = StringIO()
+ self._wbuf = [] # A list of strings
+ self._wbuf_len = 0
+ self._close = close
+
+ def _getclosed(self):
+ return self._sock is None
+ closed = property(_getclosed, doc="True if the file is closed")
+
+ def close(self):
+ try:
+ if self._sock:
+ self.flush()
+ finally:
+ if self._close:
+ self._sock.close()
+ self._sock = None
+
+ def __del__(self):
+ try:
+ self.close()
+ except:
+ # close() may fail if __init__ didn't complete
+ pass
+
+ def flush(self):
+ if self._wbuf:
+ data = "".join(self._wbuf)
+ self._wbuf = []
+ self._wbuf_len = 0
+ buffer_size = max(self._rbufsize, self.default_bufsize)
+ data_size = len(data)
+ write_offset = 0
+ # FIXME apparently this doesn't yet work on jython,
+ # despite our work on memoryview/buffer support
+ view = data # memoryview(data)
+ try:
+ while write_offset < data_size:
+ chunk = view[write_offset:write_offset+buffer_size]
+ self._sock.sendall(chunk)
+ write_offset += buffer_size
+ finally:
+ if write_offset < data_size:
+ remainder = data[write_offset:]
+ del view, data # explicit free
+ self._wbuf.append(remainder)
+ self._wbuf_len = len(remainder)
+
+ def fileno(self):
+ return self._sock.fileno()
+
+ def write(self, data):
+ data = str(data) # XXX Should really reject non-string non-buffers
+ if not data:
+ return
+ self._wbuf.append(data)
+ self._wbuf_len += len(data)
+ if (self._wbufsize == 0 or
+ (self._wbufsize == 1 and '\n' in data) or
+ (self._wbufsize > 1 and self._wbuf_len >= self._wbufsize)):
+ self.flush()
+
+ def writelines(self, list):
+ # XXX We could do better here for very long lists
+ # XXX Should really reject non-string non-buffers
+ lines = filter(None, map(str, list))
+ self._wbuf_len += sum(map(len, lines))
+ self._wbuf.extend(lines)
+ if (self._wbufsize <= 1 or
+ self._wbuf_len >= self._wbufsize):
+ self.flush()
+
+ def read(self, size=-1):
+ # Use max, disallow tiny reads in a loop as they are very inefficient.
+ # We never leave read() with any leftover data from a new recv() call
+ # in our internal buffer.
+ rbufsize = max(self._rbufsize, self.default_bufsize)
+ # Our use of StringIO rather than lists of string objects returned by
+ # recv() minimizes memory usage and fragmentation that occurs when
+ # rbufsize is large compared to the typical return value of recv().
+ buf = self._rbuf
+ buf.seek(0, 2) # seek end
+ if size < 0:
+ # Read until EOF
+ self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
+ while True:
+ try:
+ data = self._sock.recv(rbufsize)
+ except error, e:
+ if e.args[0] == errno.EINTR:
+ continue
+ raise
+ if not data:
+ break
+ buf.write(data)
+ return buf.getvalue()
+ else:
+ # Read until size bytes or EOF seen, whichever comes first
+ buf_len = buf.tell()
+ if buf_len >= size:
+ # Already have size bytes in our buffer? Extract and return.
+ buf.seek(0)
+ rv = buf.read(size)
+ self._rbuf = StringIO()
+ self._rbuf.write(buf.read())
+ return rv
+
+ self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
+ while True:
+ left = size - buf_len
+ # recv() will malloc the amount of memory given as its
+ # parameter even though it often returns much less data
+ # than that. The returned data string is short lived
+ # as we copy it into a StringIO and free it. This avoids
+ # fragmentation issues on many platforms.
+ try:
+ data = self._sock.recv(left)
+ except error, e:
+ if e.args[0] == errno.EINTR:
+ continue
+ raise
+ if not data:
+ break
+ n = len(data)
+ if n == size and not buf_len:
+ # Shortcut. Avoid buffer data copies when:
+ # - We have no data in our buffer.
+ # AND
+ # - Our call to recv returned exactly the
+ # number of bytes we were asked to read.
+ return data
+ if n == left:
+ buf.write(data)
+ del data # explicit free
+ break
+ assert n <= left, "recv(%d) returned %d bytes" % (left, n)
+ buf.write(data)
+ buf_len += n
+ del data # explicit free
+ #assert buf_len == buf.tell()
+ return buf.getvalue()
+
+ def readline(self, size=-1):
+ buf = self._rbuf
+ buf.seek(0, 2) # seek end
+ if buf.tell() > 0:
+ # check if we already have it in our buffer
+ buf.seek(0)
+ bline = buf.readline(size)
+ if bline.endswith('\n') or len(bline) == size:
+ self._rbuf = StringIO()
+ self._rbuf.write(buf.read())
+ return bline
+ del bline
+ if size < 0:
+ # Read until \n or EOF, whichever comes first
+ if self._rbufsize <= 1:
+ # Speed up unbuffered case
+ buf.seek(0)
+ buffers = [buf.read()]
+ self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
+ data = None
+ recv = self._sock.recv
+ while True:
+ try:
+ while data != "\n":
+ data = recv(1)
+ if not data:
+ break
+ buffers.append(data)
+ except error, e:
+ # The try..except to catch EINTR was moved outside the
+ # recv loop to avoid the per byte overhead.
+ if e.args[0] == errno.EINTR:
+ continue
+ raise
+ break
+ return "".join(buffers)
+
+ buf.seek(0, 2) # seek end
+ self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
+ while True:
+ try:
+ data = self._sock.recv(self._rbufsize)
+ except error, e:
+ if e.args[0] == errno.EINTR:
+ continue
+ raise
+ if not data:
+ break
+ nl = data.find('\n')
+ if nl >= 0:
+ nl += 1
+ buf.write(data[:nl])
+ self._rbuf.write(data[nl:])
+ del data
+ break
+ buf.write(data)
+ return buf.getvalue()
+ else:
+ # Read until size bytes or \n or EOF seen, whichever comes first
+ buf.seek(0, 2) # seek end
+ buf_len = buf.tell()
+ if buf_len >= size:
+ buf.seek(0)
+ rv = buf.read(size)
+ self._rbuf = StringIO()
+ self._rbuf.write(buf.read())
+ return rv
+ self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
+ while True:
+ try:
+ data = self._sock.recv(self._rbufsize)
+ except error, e:
+ if e.args[0] == errno.EINTR:
+ continue
+ raise
+ if not data:
+ break
+ left = size - buf_len
+ # did we just receive a newline?
+ nl = data.find('\n', 0, left)
+ if nl >= 0:
+ nl += 1
+ # save the excess data to _rbuf
+ self._rbuf.write(data[nl:])
+ if buf_len:
+ buf.write(data[:nl])
+ break
+ else:
+ # Shortcut. Avoid data copy through buf when returning
+ # a substring of our first recv().
+ return data[:nl]
+ n = len(data)
+ if n == size and not buf_len:
+ # Shortcut. Avoid data copy through buf when
+ # returning exactly all of our first recv().
+ return data
+ if n >= left:
+ buf.write(data[:left])
+ self._rbuf.write(data[left:])
+ break
+ buf.write(data)
+ buf_len += n
+ #assert buf_len == buf.tell()
+ return buf.getvalue()
+
+ def readlines(self, sizehint=0):
+ total = 0
+ list = []
+ while True:
+ line = self.readline()
+ if not line:
+ break
+ list.append(line)
+ total += len(line)
+ if sizehint and total >= sizehint:
+ break
+ return list
+
+ # Iterator protocols
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ line = self.readline()
+ if not line:
+ raise StopIteration
+ return line
diff --git a/Lib/_sslcerts.py b/Lib/_sslcerts.py
new file mode 100644
--- /dev/null
+++ b/Lib/_sslcerts.py
@@ -0,0 +1,239 @@
+import logging
+import sys
+import uuid
+from array import array
+from contextlib import closing
+from StringIO import StringIO
+
+from java.io import BufferedInputStream, BufferedReader, FileReader, InputStreamReader, ByteArrayInputStream
+from java.security import KeyStore, Security
+from java.security.cert import CertificateException, CertificateFactory
+from javax.net.ssl import (
+ X509KeyManager, X509TrustManager, KeyManagerFactory, SSLContext, TrustManager, TrustManagerFactory)
+
+try:
+ # jarjar-ed version
+ from org.python.bouncycastle.asn1.pkcs import PrivateKeyInfo
+ from org.python.bouncycastle.cert import X509CertificateHolder
+ from org.python.bouncycastle.cert.jcajce import JcaX509CertificateConverter
+ from org.python.bouncycastle.jce.provider import BouncyCastleProvider
+ from org.python.bouncycastle.openssl import PEMKeyPair, PEMParser
+ from org.python.bouncycastle.openssl.jcajce import JcaPEMKeyConverter
+except ImportError:
+ # dev version from extlibs
+ from org.bouncycastle.asn1.pkcs import PrivateKeyInfo
+ from org.bouncycastle.cert import X509CertificateHolder
+ from org.bouncycastle.cert.jcajce import JcaX509CertificateConverter
+ from org.bouncycastle.jce.provider import BouncyCastleProvider
+ from org.bouncycastle.openssl import PEMKeyPair, PEMParser
+ from org.bouncycastle.openssl.jcajce import JcaPEMKeyConverter
+
+log = logging.getLogger("ssl")
+
+
+# FIXME what happens if reloaded?
+Security.addProvider(BouncyCastleProvider())
+
+
+# build the necessary certificate with a CertificateFactory; this can take the pem format:
+# http://docs.oracle.com/javase/7/docs/api/java/security/cert/CertificateFactory.html#generateCertificate(java.io.InputStream)
+
+# not certain if we can include a private key in the pem file; see
+# http://stackoverflow.com/questions/7216969/getting-rsa-private-key-from-pem-base64-encoded-private-key-file
+
+
+# helpful advice for being able to manage ca_certs outside of Java's keystore
+# specifically the example ReloadableX509TrustManager
+# http://jcalcote.wordpress.com/2010/06/22/managing-a-dynamic-java-trust-store/
+
+# in the case of http://docs.python.org/2/library/ssl.html#ssl.CERT_REQUIRED
+
+# http://docs.python.org/2/library/ssl.html#ssl.CERT_NONE
+# https://github.com/rackerlabs/romper/blob/master/romper/trust.py#L15
+#
+# it looks like CERT_OPTIONAL simply validates certificates if
+# provided, probably something in checkServerTrusted - maybe a None
+# arg? need to verify as usual with a real system... :)
+
+# http://alesaudate.wordpress.com/2010/08/09/how-to-dynamically-select-a-certificate-alias-when-invoking-web-services/
+# is somewhat relevant for managing the keyfile, certfile
+
+
+def _get_ca_certs_trust_manager(ca_certs):
+ trust_store = KeyStore.getInstance(KeyStore.getDefaultType())
+ trust_store.load(None, None)
+ num_certs_installed = 0
+ with open(ca_certs) as f:
+ cf = CertificateFactory.getInstance("X.509")
+ for cert in cf.generateCertificates(BufferedInputStream(f)):
+ trust_store.setCertificateEntry(str(uuid.uuid4()), cert)
+ num_certs_installed += 1
+ tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm())
+ tmf.init(trust_store)
+ log.debug("Installed %s certificates", num_certs_installed, extra={"sock": "*"})
+ return tmf
+
+
+def _stringio_as_reader(s):
+ return BufferedReader(InputStreamReader(ByteArrayInputStream(bytearray(s.getvalue()))))
+
+
+def _extract_readers(cert_file):
+ private_key = StringIO()
+ certs = StringIO()
+ output = certs
+ with open(cert_file) as f:
+ for line in f:
+ if line.startswith("-----BEGIN PRIVATE KEY-----"):
+ output = private_key
+ output.write(line)
+ if line.startswith("-----END PRIVATE KEY-----"):
+ output = certs
+ return _stringio_as_reader(private_key), _stringio_as_reader(certs)
+
+
+def _get_openssl_key_manager(cert_file, key_file=None):
+ paths = [key_file] if key_file else []
+ paths.append(cert_file)
+
+ # Go from Bouncy Castle API to Java's; a bit heavyweight for the Python dev ;)
+ key_converter = JcaPEMKeyConverter().setProvider("BC")
+ cert_converter = JcaX509CertificateConverter().setProvider("BC")
+
+ private_key = None
+ certs = []
+ for path in paths:
+ for br in _extract_readers(path):
+ while True:
+ obj = PEMParser(br).readObject()
+ if obj is None:
+ break
+ if isinstance(obj, PEMKeyPair):
+ private_key = key_converter.getKeyPair(obj).getPrivate()
+ elif isinstance(obj, PrivateKeyInfo):
+ private_key = key_converter.getPrivateKey(obj)
+ elif isinstance(obj, X509CertificateHolder):
+ certs.append(cert_converter.getCertificate(obj))
+
+ assert private_key, "No private key loaded"
+ key_store = KeyStore.getInstance(KeyStore.getDefaultType())
+ key_store.load(None, None)
+ key_store.setKeyEntry(str(uuid.uuid4()), private_key, [], certs)
+ kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm())
+ kmf.init(key_store, [])
+ return kmf
+
+
+def _get_ssl_context(keyfile, certfile, ca_certs):
+ if certfile is None and ca_certs is None:
+ log.debug("Using default SSL context", extra={"sock": "*"})
+ return SSLContext.getDefault()
+ else:
+ log.debug("Setting up a specific SSL context for keyfile=%s, certfile=%s, ca_certs=%s",
+ keyfile, certfile, ca_certs, extra={"sock": "*"})
+ if ca_certs:
+ # should support composite usage below
+ trust_managers = _get_ca_certs_trust_manager(ca_certs).getTrustManagers()
+ else:
+ trust_managers = None
+ if certfile:
+ key_managers = _get_openssl_key_manager(certfile, keyfile).getKeyManagers()
+ else:
+ key_managers = None
+
+ # FIXME FIXME for performance, cache this lookup in the future
+ # to avoid re-reading files on every lookup
+ context = SSLContext.getInstance("SSL")
+ context.init(key_managers, trust_managers, None)
+ return context
+
+
+# CompositeX509KeyManager and CompositeX509TrustManager allow for mixing together Java built-in managers
+# with new managers to support Python ssl.
+#
+# See http://tersesystems.com/2014/01/13/fixing-the-most-dangerous-code-in-the-world/
+# for a good description of this composite approach.
+#
+# Ported to Python from http://codyaray.com/2013/04/java-ssl-with-multiple-keystores
+# which was inspired by http://stackoverflow.com/questions/1793979/registering-multiple-keystores-in-jvm
+
+class CompositeX509KeyManager(X509KeyManager):
+
+ def __init__(self, key_managers):
+ self.key_managers = key_managers
+
+ def chooseClientAlias(self, key_type, issuers, socket):
+ for key_manager in self.key_managers:
+ alias = key_manager.chooseClientAlias(key_type, issuers, socket)
+ if alias:
+ return alias;
+ return None
+
+ def chooseServerAlias(self, key_type, issuers, socket):
+ for key_manager in self.key_managers:
+ alias = key_manager.chooseServerAlias(key_type, issuers, socket)
+ if alias:
+ return alias;
+ return None
+
+ def getPrivateKey(self, alias):
+ for key_manager in self.key_managers:
+ private_key = keyManager.getPrivateKey(alias)
+ if private_key:
+ return private_key
+ return None
+
+ def getCertificateChain(self, alias):
+ for key_manager in self.key_managers:
+ chain = key_manager.getCertificateChain(alias)
+ if chain:
+ return chain
+ return None
+
+ def getClientAliases(self, key_type, issuers):
+ aliases = []
+ for key_manager in self.key_managers:
+ aliases.extend(key_manager.getClientAliases(key_type, issuers))
+ if not aliases:
+ return None
+ else:
+ return aliases
+
+ def getServerAliases(self, key_type, issuers):
+ aliases = []
+ for key_manager in self.key_managers:
+ aliases.extend(key_manager.getServerAliases(key_type, issuers))
+ if not aliases:
+ return None
+ else:
+ return aliases
+
+
+class CompositeX509TrustManager(X509TrustManager):
+
+ def __init__(self, trust_managers):
+ self.trust_managers = trust_managers
+
+ def checkClientTrusted(self, chain, auth_type):
+ for trust_manager in self.trust_managers:
+ try:
+ trustManager.checkClientTrusted(chain, auth_type);
+ return
+ except CertificateException:
+ pass
+ raise CertificateException("None of the TrustManagers trust this certificate chain")
+
+ def checkServerTrusted(self, chain, auth_type):
+ for trust_manager in self.trust_managers:
+ try:
+ trustManager.checkServerTrusted(chain, auth_type);
+ return
+ except CertificateException:
+ pass
+ raise CertificateException("None of the TrustManagers trust this certificate chain")
+
+ def getAcceptedIssuers(self):
+ certs = []
+ for trust_manager in self.trust_managers:
+ certs.extend(trustManager.getAcceptedIssuers())
+ return certs
diff --git a/Lib/asynchat.py b/Lib/asynchat.py
deleted file mode 100644
--- a/Lib/asynchat.py
+++ /dev/null
@@ -1,295 +0,0 @@
-# -*- Mode: Python; tab-width: 4 -*-
-# Id: asynchat.py,v 2.26 2000/09/07 22:29:26 rushing Exp
-# Author: Sam Rushing <rushing at nightmare.com>
-
-# ======================================================================
-# Copyright 1996 by Sam Rushing
-#
-# All Rights Reserved
-#
-# Permission to use, copy, modify, and distribute this software and
-# its documentation for any purpose and without fee is hereby
-# granted, provided that the above copyright notice appear in all
-# copies and that both that copyright notice and this permission
-# notice appear in supporting documentation, and that the name of Sam
-# Rushing not be used in advertising or publicity pertaining to
-# distribution of the software without specific, written prior
-# permission.
-#
-# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
-# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
-# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
-# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
-# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
-# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-# ======================================================================
-
-r"""A class supporting chat-style (command/response) protocols.
-
-This class adds support for 'chat' style protocols - where one side
-sends a 'command', and the other sends a response (examples would be
-the common internet protocols - smtp, nntp, ftp, etc..).
-
-The handle_read() method looks at the input stream for the current
-'terminator' (usually '\r\n' for single-line responses, '\r\n.\r\n'
-for multi-line output), calling self.found_terminator() on its
-receipt.
-
-for example:
-Say you build an async nntp client using this class. At the start
-of the connection, you'll have self.terminator set to '\r\n', in
-order to process the single-line greeting. Just before issuing a
-'LIST' command you'll set it to '\r\n.\r\n'. The output of the LIST
-command will be accumulated (using your own 'collect_incoming_data'
-method) up to the terminator, and then control will be returned to
-you - by calling your self.found_terminator() method.
-"""
-
-import socket
-import asyncore
-from collections import deque
-
-class async_chat (asyncore.dispatcher):
- """This is an abstract class. You must derive from this class, and add
- the two methods collect_incoming_data() and found_terminator()"""
-
- # these are overridable defaults
-
- ac_in_buffer_size = 4096
- ac_out_buffer_size = 4096
-
- def __init__ (self, conn=None):
- self.ac_in_buffer = ''
- self.ac_out_buffer = ''
- self.producer_fifo = fifo()
- asyncore.dispatcher.__init__ (self, conn)
-
- def collect_incoming_data(self, data):
- raise NotImplementedError, "must be implemented in subclass"
-
- def found_terminator(self):
- raise NotImplementedError, "must be implemented in subclass"
-
- def set_terminator (self, term):
- "Set the input delimiter. Can be a fixed string of any length, an integer, or None"
- self.terminator = term
-
- def get_terminator (self):
- return self.terminator
-
- # grab some more data from the socket,
- # throw it to the collector method,
- # check for the terminator,
- # if found, transition to the next state.
-
- def handle_read (self):
-
- try:
- data = self.recv (self.ac_in_buffer_size)
- except socket.error, why:
- self.handle_error()
- return
-
- self.ac_in_buffer = self.ac_in_buffer + data
-
- # Continue to search for self.terminator in self.ac_in_buffer,
- # while calling self.collect_incoming_data. The while loop
- # is necessary because we might read several data+terminator
- # combos with a single recv(1024).
-
- while self.ac_in_buffer:
- lb = len(self.ac_in_buffer)
- terminator = self.get_terminator()
- if not terminator:
- # no terminator, collect it all
- self.collect_incoming_data (self.ac_in_buffer)
- self.ac_in_buffer = ''
- elif isinstance(terminator, int) or isinstance(terminator, long):
- # numeric terminator
- n = terminator
- if lb < n:
- self.collect_incoming_data (self.ac_in_buffer)
- self.ac_in_buffer = ''
- self.terminator = self.terminator - lb
- else:
- self.collect_incoming_data (self.ac_in_buffer[:n])
- self.ac_in_buffer = self.ac_in_buffer[n:]
- self.terminator = 0
- self.found_terminator()
- else:
- # 3 cases:
- # 1) end of buffer matches terminator exactly:
- # collect data, transition
- # 2) end of buffer matches some prefix:
- # collect data to the prefix
- # 3) end of buffer does not match any prefix:
- # collect data
- terminator_len = len(terminator)
- index = self.ac_in_buffer.find(terminator)
- if index != -1:
- # we found the terminator
- if index > 0:
- # don't bother reporting the empty string (source of subtle bugs)
- self.collect_incoming_data (self.ac_in_buffer[:index])
- self.ac_in_buffer = self.ac_in_buffer[index+terminator_len:]
- # This does the Right Thing if the terminator is changed here.
- self.found_terminator()
- else:
- # check for a prefix of the terminator
- index = find_prefix_at_end (self.ac_in_buffer, terminator)
- if index:
- if index != lb:
- # we found a prefix, collect up to the prefix
- self.collect_incoming_data (self.ac_in_buffer[:-index])
- self.ac_in_buffer = self.ac_in_buffer[-index:]
- break
- else:
- # no prefix, collect it all
- self.collect_incoming_data (self.ac_in_buffer)
- self.ac_in_buffer = ''
-
- def handle_write (self):
- self.initiate_send ()
-
- def handle_close (self):
- self.close()
-
- def push (self, data):
- self.producer_fifo.push (simple_producer (data))
- self.initiate_send()
-
- def push_with_producer (self, producer):
- self.producer_fifo.push (producer)
- self.initiate_send()
-
- def readable (self):
- "predicate for inclusion in the readable for select()"
- return (len(self.ac_in_buffer) <= self.ac_in_buffer_size)
-
- def writable (self):
- "predicate for inclusion in the writable for select()"
- # return len(self.ac_out_buffer) or len(self.producer_fifo) or (not self.connected)
- # this is about twice as fast, though not as clear.
- return not (
- (self.ac_out_buffer == '') and
- self.producer_fifo.is_empty() and
- self.connected
- )
-
- def close_when_done (self):
- "automatically close this channel once the outgoing queue is empty"
- self.producer_fifo.push (None)
-
- # refill the outgoing buffer by calling the more() method
- # of the first producer in the queue
- def refill_buffer (self):
- while 1:
- if len(self.producer_fifo):
- p = self.producer_fifo.first()
- # a 'None' in the producer fifo is a sentinel,
- # telling us to close the channel.
- if p is None:
- if not self.ac_out_buffer:
- self.producer_fifo.pop()
- self.close()
- return
- elif isinstance(p, str):
- self.producer_fifo.pop()
- self.ac_out_buffer = self.ac_out_buffer + p
- return
- data = p.more()
- if data:
- self.ac_out_buffer = self.ac_out_buffer + data
- return
- else:
- self.producer_fifo.pop()
- else:
- return
-
- def initiate_send (self):
- obs = self.ac_out_buffer_size
- # try to refill the buffer
- if (len (self.ac_out_buffer) < obs):
- self.refill_buffer()
-
- if self.ac_out_buffer and self.connected:
- # try to send the buffer
- try:
- num_sent = self.send (self.ac_out_buffer[:obs])
- if num_sent:
- self.ac_out_buffer = self.ac_out_buffer[num_sent:]
-
- except socket.error, why:
- self.handle_error()
- return
-
- def discard_buffers (self):
- # Emergencies only!
- self.ac_in_buffer = ''
- self.ac_out_buffer = ''
- while self.producer_fifo:
- self.producer_fifo.pop()
-
-
-class simple_producer:
-
- def __init__ (self, data, buffer_size=512):
- self.data = data
- self.buffer_size = buffer_size
-
- def more (self):
- if len (self.data) > self.buffer_size:
- result = self.data[:self.buffer_size]
- self.data = self.data[self.buffer_size:]
- return result
- else:
- result = self.data
- self.data = ''
- return result
-
-class fifo:
- def __init__ (self, list=None):
- if not list:
- self.list = deque()
- else:
- self.list = deque(list)
-
- def __len__ (self):
- return len(self.list)
-
- def is_empty (self):
- return not self.list
-
- def first (self):
- return self.list[0]
-
- def push (self, data):
- self.list.append(data)
-
- def pop (self):
- if self.list:
- return (1, self.list.popleft())
- else:
- return (0, None)
-
-# Given 'haystack', see if any prefix of 'needle' is at its end. This
-# assumes an exact match has already been checked. Return the number of
-# characters matched.
-# for example:
-# f_p_a_e ("qwerty\r", "\r\n") => 1
-# f_p_a_e ("qwertydkjf", "\r\n") => 0
-# f_p_a_e ("qwerty\r\n", "\r\n") => <undefined>
-
-# this could maybe be made faster with a computed regex?
-# [answer: no; circa Python-2.0, Jan 2001]
-# new python: 28961/s
-# old python: 18307/s
-# re: 12820/s
-# regex: 14035/s
-
-def find_prefix_at_end (haystack, needle):
- l = len(needle) - 1
- while l and not haystack.endswith(needle[:l]):
- l -= 1
- return l
diff --git a/Lib/asyncore.py b/Lib/asyncore.py
deleted file mode 100644
--- a/Lib/asyncore.py
+++ /dev/null
@@ -1,705 +0,0 @@
-# -*- Mode: Python -*-
-# Id: asyncore.py,v 2.51 2000/09/07 22:29:26 rushing Exp
-# Author: Sam Rushing <rushing at nightmare.com>
-
-# ======================================================================
-# Copyright 1996 by Sam Rushing
-#
-# All Rights Reserved
-#
-# Permission to use, copy, modify, and distribute this software and
-# its documentation for any purpose and without fee is hereby
-# granted, provided that the above copyright notice appear in all
-# copies and that both that copyright notice and this permission
-# notice appear in supporting documentation, and that the name of Sam
-# Rushing not be used in advertising or publicity pertaining to
-# distribution of the software without specific, written prior
-# permission.
-#
-# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
-# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
-# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
-# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
-# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
-# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-# ======================================================================
-
-"""Basic infrastructure for asynchronous socket service clients and servers.
-
-There are only two ways to have a program on a single processor do "more
-than one thing at a time". Multi-threaded programming is the simplest and
-most popular way to do it, but there is another very different technique,
-that lets you have nearly all the advantages of multi-threading, without
-actually using multiple threads. it's really only practical if your program
-is largely I/O bound. If your program is CPU bound, then pre-emptive
-scheduled threads are probably what you really need. Network servers are
-rarely CPU-bound, however.
-
-If your operating system supports the select() system call in its I/O
-library (and nearly all do), then you can use it to juggle multiple
-communication channels at once; doing other work while your I/O is taking
-place in the "background." Although this strategy can seem strange and
-complex, especially at first, it is in many ways easier to understand and
-control than multi-threaded programming. The module documented here solves
-many of the difficult problems for you, making the task of building
-sophisticated high-performance network servers and clients a snap.
-"""
-
-import select
-import socket
-import sys
-import time
-import warnings
-
-import os
-from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, ECONNRESET, EINVAL, \
- ENOTCONN, ESHUTDOWN, EINTR, EISCONN, EBADF, ECONNABORTED, EPIPE, EAGAIN, \
- errorcode
-
-_DISCONNECTED = frozenset((ECONNRESET, ENOTCONN, ESHUTDOWN, ECONNABORTED, EPIPE,
- EBADF))
-
-try:
- socket_map
-except NameError:
- socket_map = {}
-
-def _strerror(err):
- try:
- return os.strerror(err)
- except (ValueError, OverflowError, NameError):
- if err in errorcode:
- return errorcode[err]
- return "Unknown error %s" %err
-
-class ExitNow(Exception):
- pass
-
-_reraised_exceptions = (ExitNow, KeyboardInterrupt, SystemExit)
-
-def read(obj):
- try:
- obj.handle_read_event()
- except _reraised_exceptions:
- raise
- except:
- obj.handle_error()
-
-def write(obj):
- try:
- obj.handle_write_event()
- except _reraised_exceptions:
- raise
- except:
- obj.handle_error()
-
-def _exception(obj):
- try:
- obj.handle_expt_event()
- except _reraised_exceptions:
- raise
- except:
- obj.handle_error()
-
-def readwrite(obj, flags):
- try:
- if flags & select.POLLIN:
- obj.handle_read_event()
- if flags & select.POLLOUT:
- obj.handle_write_event()
- if flags & select.POLLPRI:
- obj.handle_expt_event()
- if flags & (select.POLLHUP | select.POLLERR | select.POLLNVAL):
- obj.handle_close()
- except socket.error, e:
- if e.args[0] not in _DISCONNECTED:
- obj.handle_error()
- else:
- obj.handle_close()
- except _reraised_exceptions:
- raise
- except:
- obj.handle_error()
-
-def poll(timeout=0.0, map=None):
- if map is None:
- map = socket_map
- if map:
- r = []; w = []; e = []
- for fd, obj in map.items():
- is_r = obj.readable()
- is_w = obj.writable()
- if is_r:
- r.append(fd)
- # accepting sockets should not be writable
- if is_w and not obj.accepting:
- w.append(fd)
- if is_r or is_w:
- e.append(fd)
- if [] == r == w == e:
- time.sleep(timeout)
- return
-
- try:
- r, w, e = select.select(r, w, e, timeout)
- except select.error, err:
- if err.args[0] != EINTR:
- raise
- else:
- return
-
- for fd in r:
- obj = map.get(fd)
- if obj is None:
- continue
- read(obj)
-
- for fd in w:
- obj = map.get(fd)
- if obj is None:
- continue
- write(obj)
-
- for fd in e:
- obj = map.get(fd)
- if obj is None:
- continue
- _exception(obj)
-
-def poll2(timeout=0.0, map=None):
- # Use the poll() support added to the select module in Python 2.0
- if map is None:
- map = socket_map
- if timeout is not None:
- # timeout is in milliseconds
- timeout = int(timeout*1000)
- pollster = select.poll()
- if map:
- for fd, obj in map.items():
- flags = 0
- if obj.readable():
- flags |= select.POLLIN | select.POLLPRI
- # accepting sockets should not be writable
- if obj.writable() and not obj.accepting:
- flags |= select.POLLOUT
- if flags:
- # Only check for exceptions if object was either readable
- # or writable.
- flags |= select.POLLERR | select.POLLHUP | select.POLLNVAL
- pollster.register(fd, flags)
- try:
- r = pollster.poll(timeout)
- except select.error, err:
- if err.args[0] != EINTR:
- raise
- r = []
- for fd, flags in r:
- obj = map.get(fd)
- if obj is None:
- continue
- readwrite(obj, flags)
-
-poll3 = poll2 # Alias for backward compatibility
-
-def jython_poll_fun(timeout=0.0, map=None):
- # On jython, select.poll() is the mechanism to use,
- # select.select is implemented on top of it.
- # Also, we have to use a cache of such objects, because of problems with frequent
- # creation and destruction of such objects on windows
- # "select() crashes with IOException": http://bugs.jython.org/issue1291
- # So this function is basically the same function as poll2 above, except
- # with the select.poll() functionality wrapped in a try..finally clause.
- if map is None:
- map = socket_map
- if timeout is not None:
- # timeout is in milliseconds
- timeout = int(timeout*1000)
- if map:
- try:
- pollster = select._poll_object_cache.get_poll_object()
- for fd, obj in map.items():
- flags = 0
- if obj.readable():
- flags |= select.POLLIN | select.POLLPRI
- # accepting sockets should not be writable
- if obj.writable() and not obj.accepting:
- flags |= select.POLLOUT
- if flags:
- # Only check for exceptions if object was either readable
- # or writable.
- flags |= select.POLLERR | select.POLLHUP | select.POLLNVAL
- pollster.register(obj, flags)
- try:
- r = pollster.poll(timeout)
- except select.error, err:
- if err.args[0] != EINTR:
- raise
- r = []
- for obj, flags in r:
- # obj = map.get(fd)
- if obj is None:
- continue
- readwrite(obj, flags)
- finally:
- select._poll_object_cache.release_poll_object(pollster)
-
-def loop(timeout=30.0, use_poll=False, map=None, count=None):
- if map is None:
- map = socket_map
-
- if use_poll and hasattr(select, 'poll'):
- poll_fun = poll2
- else:
- poll_fun = poll
- if sys.platform.startswith('java'):
- poll_fun = jython_poll_fun
-
- if count is None:
- while map:
- poll_fun(timeout, map)
-
- else:
- while map and count > 0:
- poll_fun(timeout, map)
- count = count - 1
-
-class dispatcher:
-
- debug = False
- connected = False
- accepting = False
- connecting = False
- closing = False
- addr = None
- ignore_log_types = frozenset(['warning'])
-
- def __init__(self, sock=None, map=None):
- if map is None:
- self._map = socket_map
- else:
- self._map = map
-
- self._fileno = None
-
- if sock:
- # Set to nonblocking just to make sure for cases where we
- # get a socket from a blocking source.
- sock.setblocking(0)
- self.set_socket(sock, map)
- self.connected = True
- # The constructor no longer requires that the socket
- # passed be connected.
- try:
- self.addr = sock.getpeername()
- except socket.error, err:
- if err.args[0] in (ENOTCONN, EINVAL):
- # To handle the case where we got an unconnected
- # socket.
- self.connected = False
- else:
- # The socket is broken in some unknown way, alert
- # the user and remove it from the map (to prevent
- # polling of broken sockets).
- self.del_channel(map)
- raise
- else:
- self.socket = None
-
- def __repr__(self):
- status = [self.__class__.__module__+"."+self.__class__.__name__]
- if self.accepting and self.addr:
- status.append('listening')
- elif self.connected:
- status.append('connected')
- if self.addr is not None:
- try:
- status.append('%s:%d' % self.addr)
- except TypeError:
- status.append(repr(self.addr))
- return '<%s at %#x>' % (' '.join(status), id(self))
-
- __str__ = __repr__
-
- def add_channel(self, map=None):
- #self.log_info('adding channel %s' % self)
- if map is None:
- map = self._map
- map[self._fileno] = self
-
- def del_channel(self, map=None):
- fd = self._fileno
- if map is None:
- map = self._map
- if fd in map:
- #self.log_info('closing channel %d:%s' % (fd, self))
- del map[fd]
- self._fileno = None
-
- def create_socket(self, family, type):
- self.family_and_type = family, type
- sock = socket.socket(family, type)
- sock.setblocking(0)
- self.set_socket(sock)
-
- def set_socket(self, sock, map=None):
- self.socket = sock
-## self.__dict__['socket'] = sock
- # On jython, the socket object itself is what is watchable.
- # http://mail.python.org/pipermail/python-dev/2007-May/073443.html
- self._fileno = sock
- self.add_channel(map)
-
- def set_reuse_addr(self):
- # try to re-use a server port if possible
- try:
- self.socket.setsockopt(
- socket.SOL_SOCKET, socket.SO_REUSEADDR,
- self.socket.getsockopt(socket.SOL_SOCKET,
- socket.SO_REUSEADDR) | 1
- )
- except socket.error:
- pass
-
- # ==================================================
- # predicates for select()
- # these are used as filters for the lists of sockets
- # to pass to select().
- # ==================================================
-
- def readable(self):
- return True
-
- def writable(self):
- return True
-
- # ==================================================
- # socket object methods.
- # ==================================================
-
- def listen(self, num):
- self.accepting = True
- if os.name == 'nt' and num > 5:
- num = 5
- return self.socket.listen(num)
-
- def bind(self, addr):
- self.addr = addr
- return self.socket.bind(addr)
-
- def connect(self, address):
- self.connected = False
- self.connecting = True
- err = self.socket.connect_ex(address)
- if err in (EINPROGRESS, EALREADY, EWOULDBLOCK) \
- or err == EINVAL and os.name in ('nt', 'ce'):
- self.addr = address
- return
- if err in (0, EISCONN):
- self.addr = address
- self.handle_connect_event()
- else:
- raise socket.error(err, errorcode[err])
-
- def accept(self):
- # XXX can return either an address pair or None
- try:
- conn, addr = self.socket.accept()
- except TypeError:
- return None
- except socket.error as why:
- if why.args[0] in (EWOULDBLOCK, ECONNABORTED, EAGAIN):
- return None
- else:
- raise
- else:
- return conn, addr
-
- def send(self, data):
- try:
- result = self.socket.send(data)
- return result
- except socket.error, why:
- if why.args[0] == EWOULDBLOCK:
- return 0
- elif why.args[0] in _DISCONNECTED:
- self.handle_close()
- return 0
- else:
- raise
-
- def recv(self, buffer_size):
- try:
- data = self.socket.recv(buffer_size)
- if not data:
- # a closed connection is indicated by signaling
- # a read condition, and having recv() return 0.
- self.handle_close()
- return ''
- else:
- return data
- except socket.error, why:
- # winsock sometimes throws ENOTCONN
- if why.args[0] in _DISCONNECTED:
- self.handle_close()
- return ''
- else:
- raise
-
- def close(self):
- self.connected = False
- self.accepting = False
- self.connecting = False
- self.del_channel()
- try:
- self.socket.close()
- except socket.error, why:
- if why.args[0] not in (ENOTCONN, EBADF):
- raise
-
- # cheap inheritance, used to pass all other attribute
- # references to the underlying socket object.
- def __getattr__(self, attr):
- try:
- retattr = getattr(self.socket, attr)
- except AttributeError:
- raise AttributeError("%s instance has no attribute '%s'"
- %(self.__class__.__name__, attr))
- else:
- msg = "%(me)s.%(attr)s is deprecated. Use %(me)s.socket.%(attr)s " \
- "instead." % {'me': self.__class__.__name__, 'attr':attr}
- warnings.warn(msg, DeprecationWarning, stacklevel=2)
- return retattr
-
- # log and log_info may be overridden to provide more sophisticated
- # logging and warning methods. In general, log is for 'hit' logging
- # and 'log_info' is for informational, warning and error logging.
-
- def log(self, message):
- sys.stderr.write('log: %s\n' % str(message))
-
- def log_info(self, message, type='info'):
- if type not in self.ignore_log_types:
- print '%s: %s' % (type, message)
-
- def handle_read_event(self):
- if self.accepting:
- # accepting sockets are never connected, they "spawn" new
- # sockets that are connected
- self.handle_accept()
- elif not self.connected:
- if self.connecting:
- self.handle_connect_event()
- self.handle_read()
- else:
- self.handle_read()
-
- def handle_connect_event(self):
- err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
- if err != 0:
- raise socket.error(err, _strerror(err))
- self.handle_connect()
- self.connected = True
- self.connecting = False
-
- def handle_write_event(self):
- if self.accepting:
- # Accepting sockets shouldn't get a write event.
- # We will pretend it didn't happen.
- return
-
- if not self.connected:
- if self.connecting:
- self.handle_connect_event()
- self.handle_write()
-
- def handle_expt_event(self):
- # handle_expt_event() is called if there might be an error on the
- # socket, or if there is OOB data
- # check for the error condition first
- err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
- if err != 0:
- # we can get here when select.select() says that there is an
- # exceptional condition on the socket
- # since there is an error, we'll go ahead and close the socket
- # like we would in a subclassed handle_read() that received no
- # data
- self.handle_close()
- else:
- self.handle_expt()
-
- def handle_error(self):
- nil, t, v, tbinfo = compact_traceback()
-
- # sometimes a user repr method will crash.
- try:
- self_repr = repr(self)
- except:
- self_repr = '<__repr__(self) failed for object at %0x>' % id(self)
-
- self.log_info(
- 'uncaptured python exception, closing channel %s (%s:%s %s)' % (
- self_repr,
- t,
- v,
- tbinfo
- ),
- 'error'
- )
- self.handle_close()
-
- def handle_expt(self):
- self.log_info('unhandled incoming priority event', 'warning')
-
- def handle_read(self):
- self.log_info('unhandled read event', 'warning')
-
- def handle_write(self):
- self.log_info('unhandled write event', 'warning')
-
- def handle_connect(self):
- self.log_info('unhandled connect event', 'warning')
-
- def handle_accept(self):
- self.log_info('unhandled accept event', 'warning')
-
- def handle_close(self):
- self.log_info('unhandled close event', 'warning')
- self.close()
-
-# ---------------------------------------------------------------------------
-# adds simple buffered output capability, useful for simple clients.
-# [for more sophisticated usage use asynchat.async_chat]
-# ---------------------------------------------------------------------------
-
-class dispatcher_with_send(dispatcher):
-
- def __init__(self, sock=None, map=None):
- dispatcher.__init__(self, sock, map)
- self.out_buffer = ''
-
- def initiate_send(self):
- num_sent = 0
- num_sent = dispatcher.send(self, self.out_buffer[:512])
- self.out_buffer = self.out_buffer[num_sent:]
-
- def handle_write(self):
- self.initiate_send()
-
- def writable(self):
- return (not self.connected) or len(self.out_buffer)
-
- def send(self, data):
- if self.debug:
- self.log_info('sending %s' % repr(data))
- self.out_buffer = self.out_buffer + data
- self.initiate_send()
-
-# ---------------------------------------------------------------------------
-# used for debugging.
-# ---------------------------------------------------------------------------
-
-def compact_traceback():
- t, v, tb = sys.exc_info()
- tbinfo = []
- if not tb: # Must have a traceback
- raise AssertionError("traceback does not exist")
- while tb:
- tbinfo.append((
- tb.tb_frame.f_code.co_filename,
- tb.tb_frame.f_code.co_name,
- str(tb.tb_lineno)
- ))
- tb = tb.tb_next
-
- # just to be safe
- del tb
-
- file, function, line = tbinfo[-1]
- info = ' '.join(['[%s|%s|%s]' % x for x in tbinfo])
- return (file, function, line), t, v, info
-
-def close_all(map=None, ignore_all=False):
- if map is None:
- map = socket_map
- for x in map.values():
- try:
- x.close()
- except OSError, x:
- if x.args[0] == EBADF:
- pass
- elif not ignore_all:
- raise
- except _reraised_exceptions:
- raise
- except:
- if not ignore_all:
- raise
- map.clear()
-
-# Asynchronous File I/O:
-#
-# After a little research (reading man pages on various unixen, and
-# digging through the linux kernel), I've determined that select()
-# isn't meant for doing asynchronous file i/o.
-# Heartening, though - reading linux/mm/filemap.c shows that linux
-# supports asynchronous read-ahead. So _MOST_ of the time, the data
-# will be sitting in memory for us already when we go to read it.
-#
-# What other OS's (besides NT) support async file i/o? [VMS?]
-#
-# Regardless, this is useful for pipes, and stdin/stdout...
-
-if os.name == 'posix':
- import fcntl
-
- class file_wrapper:
- # Here we override just enough to make a file
- # look like a socket for the purposes of asyncore.
- # The passed fd is automatically os.dup()'d
-
- def __init__(self, fd):
- self.fd = os.dup(fd)
-
- def recv(self, *args):
- return os.read(self.fd, *args)
-
- def send(self, *args):
- return os.write(self.fd, *args)
-
- def getsockopt(self, level, optname, buflen=None):
- if (level == socket.SOL_SOCKET and
- optname == socket.SO_ERROR and
- not buflen):
- return 0
- raise NotImplementedError("Only asyncore specific behaviour "
- "implemented.")
-
- read = recv
- write = send
-
- def close(self):
- os.close(self.fd)
-
- def fileno(self):
- return self.fd
-
- class file_dispatcher(dispatcher):
-
- def __init__(self, fd, map=None):
- dispatcher.__init__(self, None, map)
- self.connected = True
- try:
- fd = fd.fileno()
- except AttributeError:
- pass
- self.set_file(fd)
- # set it to non-blocking mode
- flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0)
- flags = flags | os.O_NONBLOCK
- fcntl.fcntl(fd, fcntl.F_SETFL, flags)
-
- def set_file(self, fd):
- self.socket = file_wrapper(fd)
- self._fileno = self.socket.fileno()
- self.add_channel()
diff --git a/Lib/distutils/command/install.py b/Lib/distutils/command/install.py
--- a/Lib/distutils/command/install.py
+++ b/Lib/distutils/command/install.py
@@ -6,7 +6,7 @@
# This module should be kept compatible with Python 2.1.
-__revision__ = "$Id: install.py 43363 2006-03-27 21:55:21Z phillip.eby $"
+__revision__ = "$Id$"
import sys, os, string
from types import *
@@ -16,8 +16,11 @@
from distutils.errors import DistutilsPlatformError
from distutils.file_util import write_file
from distutils.util import convert_path, subst_vars, change_root
+from distutils.util import get_platform
from distutils.errors import DistutilsOptionError
-from glob import glob
+from site import USER_BASE
+from site import USER_SITE
+
if sys.version < "2.2":
WINDOWS_SCHEME = {
@@ -51,13 +54,20 @@
'scripts': '$base/bin',
'data' : '$base',
},
+ 'unix_user': {
+ 'purelib': '$usersite',
+ 'platlib': '$usersite',
+ 'headers': '$userbase/include/python$py_version_short/$dist_name',
+ 'scripts': '$userbase/bin',
+ 'data' : '$userbase',
+ },
'nt': WINDOWS_SCHEME,
- 'mac': {
- 'purelib': '$base/Lib/site-packages',
- 'platlib': '$base/Lib/site-packages',
- 'headers': '$base/Include/$dist_name',
- 'scripts': '$base/Scripts',
- 'data' : '$base',
+ 'nt_user': {
+ 'purelib': '$usersite',
+ 'platlib': '$usersite',
+ 'headers': '$userbase/Python$py_version_nodot/Include/$dist_name',
+ 'scripts': '$userbase/Scripts',
+ 'data' : '$userbase',
},
'os2': {
'purelib': '$base/Lib/site-packages',
@@ -66,6 +76,13 @@
'scripts': '$base/Scripts',
'data' : '$base',
},
+ 'os2_home': {
+ 'purelib': '$usersite',
+ 'platlib': '$usersite',
+ 'headers': '$userbase/include/python$py_version_short/$dist_name',
+ 'scripts': '$userbase/bin',
+ 'data' : '$userbase',
+ },
'java': {
'purelib': '$base/Lib/site-packages',
'platlib': '$base/Lib/site-packages',
@@ -93,6 +110,8 @@
"(Unix only) prefix for platform-specific files"),
('home=', None,
"(Unix only) home directory to install under"),
+ ('user', None,
+ "install in user site-package '%s'" % USER_SITE),
# Or, just set the base director(y|ies)
('install-base=', None,
@@ -144,7 +163,7 @@
"filename in which to record list of installed files"),
]
- boolean_options = ['compile', 'force', 'skip-build']
+ boolean_options = ['compile', 'force', 'skip-build', 'user']
negative_opt = {'no-compile' : 'compile'}
@@ -155,6 +174,7 @@
self.prefix = None
self.exec_prefix = None
self.home = None
+ self.user = 0
# These select only the installation base; it's up to the user to
# specify the installation scheme (currently, that means supplying
@@ -173,6 +193,8 @@
self.install_lib = None # set to either purelib or platlib
self.install_scripts = None
self.install_data = None
+ self.install_userbase = USER_BASE
+ self.install_usersite = USER_SITE
self.compile = None
self.optimize = None
@@ -248,6 +270,11 @@
raise DistutilsOptionError, \
"must supply either home or prefix/exec-prefix -- not both"
+ if self.user and (self.prefix or self.exec_prefix or self.home or
+ self.install_base or self.install_platbase):
+ raise DistutilsOptionError("can't combine user with with prefix/"
+ "exec_prefix/home or install_(plat)base")
+
# Next, stuff that's wrong (or dubious) only on certain platforms.
if os.name != "posix":
if self.exec_prefix:
@@ -283,10 +310,13 @@
'dist_fullname': self.distribution.get_fullname(),
'py_version': py_version,
'py_version_short': py_version[0:3],
+ 'py_version_nodot': py_version[0] + py_version[2],
'sys_prefix': prefix,
'prefix': prefix,
'sys_exec_prefix': exec_prefix,
'exec_prefix': exec_prefix,
+ 'userbase': self.install_userbase,
+ 'usersite': self.install_usersite,
}
self.expand_basedirs()
@@ -308,6 +338,10 @@
self.dump_dirs("post-expand_dirs()")
+ # Create directories in the home dir:
+ if self.user:
+ self.create_home_path()
+
# Pick the actual directory to install all modules to: either
# install_purelib or install_platlib, depending on whether this
# module distribution is pure or not. Of course, if the user
@@ -322,7 +356,8 @@
# Convert directories from Unix /-separated syntax to the local
# convention.
self.convert_paths('lib', 'purelib', 'platlib',
- 'scripts', 'data', 'headers')
+ 'scripts', 'data', 'headers',
+ 'userbase', 'usersite')
# Well, we're not actually fully completely finalized yet: we still
# have to deal with 'extra_path', which is the hack for allowing
@@ -359,7 +394,7 @@
opt_name = opt[0]
if opt_name[-1] == "=":
opt_name = opt_name[0:-1]
- if self.negative_opt.has_key(opt_name):
+ if opt_name in self.negative_opt:
opt_name = string.translate(self.negative_opt[opt_name],
longopt_xlate)
val = not getattr(self, opt_name)
@@ -383,7 +418,13 @@
"installation scheme is incomplete")
return
- if self.home is not None:
+ if self.user:
+ if self.install_userbase is None:
+ raise DistutilsPlatformError(
+ "User base directory is not specified")
+ self.install_base = self.install_platbase = self.install_userbase
+ self.select_scheme("unix_user")
+ elif self.home is not None:
self.install_base = self.install_platbase = self.home
self.select_scheme("unix_home")
else:
@@ -408,7 +449,13 @@
def finalize_other (self): # Windows and Mac OS for now
- if self.home is not None:
+ if self.user:
+ if self.install_userbase is None:
+ raise DistutilsPlatformError(
+ "User base directory is not specified")
+ self.install_base = self.install_platbase = self.install_userbase
+ self.select_scheme(os.name + "_user")
+ elif self.home is not None:
self.install_base = self.install_platbase = self.home
self.select_scheme("unix_home")
else:
@@ -438,7 +485,7 @@
for attr in attrs:
val = getattr(self, attr)
if val is not None:
- if os.name == 'posix':
+ if os.name == 'posix' or os.name == 'nt':
val = os.path.expanduser(val)
val = subst_vars(val, self.config_vars)
setattr(self, attr, val)
@@ -503,6 +550,16 @@
attr = "install_" + name
setattr(self, attr, change_root(self.root, getattr(self, attr)))
+ def create_home_path(self):
+ """Create directories under ~
+ """
+ if not self.user:
+ return
+ home = convert_path(os.path.expanduser("~"))
+ for name, path in self.config_vars.iteritems():
+ if path.startswith(home) and not os.path.isdir(path):
+ self.debug_print("os.makedirs('%s', 0700)" % path)
+ os.makedirs(path, 0700)
# -- Command execution methods -------------------------------------
@@ -511,6 +568,14 @@
# Obviously have to build before we can install
if not self.skip_build:
self.run_command('build')
+ # If we built for any other platform, we can't install.
+ build_plat = self.distribution.get_command_obj('build').plat_name
+ # check warn_dir - it is a clue that the 'install' is happening
+ # internally, and not to sys.path, so we don't check the platform
+ # matches what we are running.
+ if self.warn_dir and build_plat != get_platform():
+ raise DistutilsPlatformError("Can't install when "
+ "cross-compiling")
# Run all sub-commands (at least those that need to be run)
for cmd_name in self.get_sub_commands():
diff --git a/Lib/ftplib.py b/Lib/ftplib.py
deleted file mode 100644
--- a/Lib/ftplib.py
+++ /dev/null
@@ -1,1047 +0,0 @@
-"""An FTP client class and some helper functions.
-
-Based on RFC 959: File Transfer Protocol (FTP), by J. Postel and J. Reynolds
-
-Example:
-
->>> from ftplib import FTP
->>> ftp = FTP('ftp.python.org') # connect to host, default port
->>> ftp.login() # default, i.e.: user anonymous, passwd anonymous@
-'230 Guest login ok, access restrictions apply.'
->>> ftp.retrlines('LIST') # list directory contents
-total 9
-drwxr-xr-x 8 root wheel 1024 Jan 3 1994 .
-drwxr-xr-x 8 root wheel 1024 Jan 3 1994 ..
-drwxr-xr-x 2 root wheel 1024 Jan 3 1994 bin
-drwxr-xr-x 2 root wheel 1024 Jan 3 1994 etc
-d-wxrwxr-x 2 ftp wheel 1024 Sep 5 13:43 incoming
-drwxr-xr-x 2 root wheel 1024 Nov 17 1993 lib
-drwxr-xr-x 6 1094 wheel 1024 Sep 13 19:07 pub
-drwxr-xr-x 3 root wheel 1024 Jan 3 1994 usr
--rw-r--r-- 1 root root 312 Aug 1 1994 welcome.msg
-'226 Transfer complete.'
->>> ftp.quit()
-'221 Goodbye.'
->>>
-
-A nice test that reveals some of the network dialogue would be:
-python ftplib.py -d localhost -l -p -l
-"""
-
-#
-# Changes and improvements suggested by Steve Majewski.
-# Modified by Jack to work on the mac.
-# Modified by Siebren to support docstrings and PASV.
-# Modified by Phil Schwartz to add storbinary and storlines callbacks.
-# Modified by Giampaolo Rodola' to add TLS support.
-#
-
-import os
-import sys
-
-# Import SOCKS module if it exists, else standard socket module socket
-try:
- import SOCKS; socket = SOCKS; del SOCKS # import SOCKS as socket
- from socket import getfqdn; socket.getfqdn = getfqdn; del getfqdn
-except ImportError:
- import socket
-from socket import _GLOBAL_DEFAULT_TIMEOUT
-
-__all__ = ["FTP","Netrc"]
-
-# Magic number from <socket.h>
-MSG_OOB = 0x1 # Process data out of band
-
-
-# The standard FTP server control port
-FTP_PORT = 21
-
-
-# Exception raised when an error or invalid response is received
-class Error(Exception): pass
-class error_reply(Error): pass # unexpected [123]xx reply
-class error_temp(Error): pass # 4xx errors
-class error_perm(Error): pass # 5xx errors
-class error_proto(Error): pass # response does not begin with [1-5]
-
-
-# All exceptions (hopefully) that may be raised here and that aren't
-# (always) programming errors on our side
-all_errors = (Error, IOError, EOFError)
-
-
-# Line terminators (we always output CRLF, but accept any of CRLF, CR, LF)
-CRLF = '\r\n'
-
-# The class itself
-class FTP:
-
- '''An FTP client class.
-
- To create a connection, call the class using these arguments:
- host, user, passwd, acct, timeout
-
- The first four arguments are all strings, and have default value ''.
- timeout must be numeric and defaults to None if not passed,
- meaning that no timeout will be set on any ftp socket(s)
- If a timeout is passed, then this is now the default timeout for all ftp
- socket operations for this instance.
-
- Then use self.connect() with optional host and port argument.
-
- To download a file, use ftp.retrlines('RETR ' + filename),
- or ftp.retrbinary() with slightly different arguments.
- To upload a file, use ftp.storlines() or ftp.storbinary(),
- which have an open file as argument (see their definitions
- below for details).
- The download/upload functions first issue appropriate TYPE
- and PORT or PASV commands.
-'''
-
- debugging = 0
- host = ''
- port = FTP_PORT
- sock = None
- file = None
- welcome = None
- passiveserver = 1
-
- # Initialization method (called by class instantiation).
- # Initialize host to localhost, port to standard ftp port
- # Optional arguments are host (for connect()),
- # and user, passwd, acct (for login())
- def __init__(self, host='', user='', passwd='', acct='',
- timeout=_GLOBAL_DEFAULT_TIMEOUT):
- self.timeout = timeout
- if host:
- self.connect(host)
- if user:
- self.login(user, passwd, acct)
-
- def connect(self, host='', port=0, timeout=-999):
- '''Connect to host. Arguments are:
- - host: hostname to connect to (string, default previous host)
- - port: port to connect to (integer, default previous port)
- '''
- if host != '':
- self.host = host
- if port > 0:
- self.port = port
- if timeout != -999:
- self.timeout = timeout
- self.sock = socket.create_connection((self.host, self.port), self.timeout)
- self.af = self.sock.family
- self.file = self.sock.makefile('rb')
- self.welcome = self.getresp()
- return self.welcome
-
- def getwelcome(self):
- '''Get the welcome message from the server.
- (this is read and squirreled away by connect())'''
- if self.debugging:
- print '*welcome*', self.sanitize(self.welcome)
- return self.welcome
-
- def set_debuglevel(self, level):
- '''Set the debugging level.
- The required argument level means:
- 0: no debugging output (default)
- 1: print commands and responses but not body text etc.
- 2: also print raw lines read and sent before stripping CR/LF'''
- self.debugging = level
- debug = set_debuglevel
-
- def set_pasv(self, val):
- '''Use passive or active mode for data transfers.
- With a false argument, use the normal PORT mode,
- With a true argument, use the PASV command.'''
- self.passiveserver = val
-
- # Internal: "sanitize" a string for printing
- def sanitize(self, s):
- if s[:5] == 'pass ' or s[:5] == 'PASS ':
- i = len(s)
- while i > 5 and s[i-1] in '\r\n':
- i = i-1
- s = s[:5] + '*'*(i-5) + s[i:]
- return repr(s)
-
- # Internal: send one line to the server, appending CRLF
- def putline(self, line):
- line = line + CRLF
- if self.debugging > 1: print '*put*', self.sanitize(line)
- self.sock.sendall(line)
-
- # Internal: send one command to the server (through putline())
- def putcmd(self, line):
- if self.debugging: print '*cmd*', self.sanitize(line)
- self.putline(line)
-
- # Internal: return one line from the server, stripping CRLF.
- # Raise EOFError if the connection is closed
- def getline(self):
- line = self.file.readline()
- if self.debugging > 1:
- print '*get*', self.sanitize(line)
- if not line: raise EOFError
- if line[-2:] == CRLF: line = line[:-2]
- elif line[-1:] in CRLF: line = line[:-1]
- return line
-
- # Internal: get a response from the server, which may possibly
- # consist of multiple lines. Return a single string with no
- # trailing CRLF. If the response consists of multiple lines,
- # these are separated by '\n' characters in the string
- def getmultiline(self):
- line = self.getline()
- if line[3:4] == '-':
- code = line[:3]
- while 1:
- nextline = self.getline()
- line = line + ('\n' + nextline)
- if nextline[:3] == code and \
- nextline[3:4] != '-':
- break
- return line
-
- # Internal: get a response from the server.
- # Raise various errors if the response indicates an error
- def getresp(self):
- resp = self.getmultiline()
- if self.debugging: print '*resp*', self.sanitize(resp)
- self.lastresp = resp[:3]
- c = resp[:1]
- if c in ('1', '2', '3'):
- return resp
- if c == '4':
- raise error_temp, resp
- if c == '5':
- raise error_perm, resp
- raise error_proto, resp
-
- def voidresp(self):
- """Expect a response beginning with '2'."""
- resp = self.getresp()
- if resp[:1] != '2':
- raise error_reply, resp
- return resp
-
- def abort(self):
- '''Abort a file transfer. Uses out-of-band data.
- This does not follow the procedure from the RFC to send Telnet
- IP and Synch; that doesn't seem to work with the servers I've
- tried. Instead, just send the ABOR command as OOB data.'''
- line = 'ABOR' + CRLF
- if self.debugging > 1: print '*put urgent*', self.sanitize(line)
- self.sock.sendall(line, MSG_OOB)
- resp = self.getmultiline()
- if resp[:3] not in ('426', '225', '226'):
- raise error_proto, resp
-
- def sendcmd(self, cmd):
- '''Send a command and return the response.'''
- self.putcmd(cmd)
- return self.getresp()
-
- def voidcmd(self, cmd):
- """Send a command and expect a response beginning with '2'."""
- self.putcmd(cmd)
- return self.voidresp()
-
- def sendport(self, host, port):
- '''Send a PORT command with the current host and the given
- port number.
- '''
- hbytes = host.split('.')
- pbytes = [repr(port//256), repr(port%256)]
- bytes = hbytes + pbytes
- cmd = 'PORT ' + ','.join(bytes)
- return self.voidcmd(cmd)
-
- def sendeprt(self, host, port):
- '''Send a EPRT command with the current host and the given port number.'''
- af = 0
- if self.af == socket.AF_INET:
- af = 1
- if self.af == socket.AF_INET6:
- af = 2
- if af == 0:
- raise error_proto, 'unsupported address family'
- fields = ['', repr(af), host, repr(port), '']
- cmd = 'EPRT ' + '|'.join(fields)
- return self.voidcmd(cmd)
-
- def makeport(self):
- '''Create a new socket and send a PORT command for it.'''
- err = None
- sock = None
- for res in socket.getaddrinfo(None, 0, self.af, socket.SOCK_STREAM, 0, socket.AI_PASSIVE):
- af, socktype, proto, canonname, sa = res
- try:
- sock = socket.socket(af, socktype, proto)
- sock.bind(sa)
- except socket.error, err:
- if sock:
- sock.close()
- sock = None
- continue
- break
- if sock is None:
- if err is not None:
- raise err
- else:
- raise socket.error("getaddrinfo returns an empty list")
- sock.listen(1)
- port = sock.getsockname()[1] # Get proper port
- host = self.sock.getsockname()[0] # Get proper host
- if self.af == socket.AF_INET:
- resp = self.sendport(host, port)
- else:
- resp = self.sendeprt(host, port)
- if self.timeout is not _GLOBAL_DEFAULT_TIMEOUT:
- sock.settimeout(self.timeout)
- return sock
-
- def makepasv(self):
- if self.af == socket.AF_INET:
- host, port = parse227(self.sendcmd('PASV'))
- else:
- host, port = parse229(self.sendcmd('EPSV'), self.sock.getpeername())
- return host, port
-
- def ntransfercmd(self, cmd, rest=None):
- """Initiate a transfer over the data connection.
-
- If the transfer is active, send a port command and the
- transfer command, and accept the connection. If the server is
- passive, send a pasv command, connect to it, and start the
- transfer command. Either way, return the socket for the
- connection and the expected size of the transfer. The
- expected size may be None if it could not be determined.
-
- Optional `rest' argument can be a string that is sent as the
- argument to a REST command. This is essentially a server
- marker used to tell the server to skip over any data up to the
- given marker.
- """
- size = None
- if self.passiveserver:
- host, port = self.makepasv()
- conn = socket.create_connection((host, port), self.timeout)
- try:
- if rest is not None:
- self.sendcmd("REST %s" % rest)
- resp = self.sendcmd(cmd)
- # Some servers apparently send a 200 reply to
- # a LIST or STOR command, before the 150 reply
- # (and way before the 226 reply). This seems to
- # be in violation of the protocol (which only allows
- # 1xx or error messages for LIST), so we just discard
- # this response.
- if resp[0] == '2':
- resp = self.getresp()
- if resp[0] != '1':
- raise error_reply, resp
- except:
- conn.close()
- raise
- else:
- sock = self.makeport()
- try:
- if rest is not None:
- self.sendcmd("REST %s" % rest)
- resp = self.sendcmd(cmd)
- # See above.
- if resp[0] == '2':
- resp = self.getresp()
- if resp[0] != '1':
- raise error_reply, resp
- conn, sockaddr = sock.accept()
- if self.timeout is not _GLOBAL_DEFAULT_TIMEOUT:
- conn.settimeout(self.timeout)
- finally:
- sock.close()
- if resp[:3] == '150':
- # this is conditional in case we received a 125
- size = parse150(resp)
- return conn, size
-
- def transfercmd(self, cmd, rest=None):
- """Like ntransfercmd() but returns only the socket."""
- return self.ntransfercmd(cmd, rest)[0]
-
- def login(self, user = '', passwd = '', acct = ''):
- '''Login, default anonymous.'''
- if not user: user = 'anonymous'
- if not passwd: passwd = ''
- if not acct: acct = ''
- if user == 'anonymous' and passwd in ('', '-'):
- # If there is no anonymous ftp password specified
- # then we'll just use anonymous@
- # We don't send any other thing because:
- # - We want to remain anonymous
- # - We want to stop SPAM
- # - We don't want to let ftp sites to discriminate by the user,
- # host or country.
- passwd = passwd + 'anonymous@'
- resp = self.sendcmd('USER ' + user)
- if resp[0] == '3': resp = self.sendcmd('PASS ' + passwd)
- if resp[0] == '3': resp = self.sendcmd('ACCT ' + acct)
- if resp[0] != '2':
- raise error_reply, resp
- return resp
-
- def retrbinary(self, cmd, callback, blocksize=8192, rest=None):
- """Retrieve data in binary mode. A new port is created for you.
-
- Args:
- cmd: A RETR command.
- callback: A single parameter callable to be called on each
- block of data read.
- blocksize: The maximum number of bytes to read from the
- socket at one time. [default: 8192]
- rest: Passed to transfercmd(). [default: None]
-
- Returns:
- The response code.
- """
- self.voidcmd('TYPE I')
- conn = self.transfercmd(cmd, rest)
- while 1:
- data = conn.recv(blocksize)
- if not data:
- break
- callback(data)
- conn.close()
- return self.voidresp()
-
- def retrlines(self, cmd, callback = None):
- """Retrieve data in line mode. A new port is created for you.
-
- Args:
- cmd: A RETR, LIST, NLST, or MLSD command.
- callback: An optional single parameter callable that is called
- for each line with the trailing CRLF stripped.
- [default: print_line()]
-
- Returns:
- The response code.
- """
- if callback is None: callback = print_line
- resp = self.sendcmd('TYPE A')
- conn = self.transfercmd(cmd)
- fp = conn.makefile('rb')
- while 1:
- line = fp.readline()
- if self.debugging > 2: print '*retr*', repr(line)
- if not line:
- break
- if line[-2:] == CRLF:
- line = line[:-2]
- elif line[-1:] == '\n':
- line = line[:-1]
- callback(line)
- fp.close()
- conn.close()
- return self.voidresp()
-
- def storbinary(self, cmd, fp, blocksize=8192, callback=None, rest=None):
- """Store a file in binary mode. A new port is created for you.
-
- Args:
- cmd: A STOR command.
- fp: A file-like object with a read(num_bytes) method.
- blocksize: The maximum data size to read from fp and send over
- the connection at once. [default: 8192]
- callback: An optional single parameter callable that is called on
- on each block of data after it is sent. [default: None]
- rest: Passed to transfercmd(). [default: None]
-
- Returns:
- The response code.
- """
- self.voidcmd('TYPE I')
- conn = self.transfercmd(cmd, rest)
- while 1:
- buf = fp.read(blocksize)
- if not buf: break
- conn.sendall(buf)
- if callback: callback(buf)
- conn.close()
- return self.voidresp()
-
- def storlines(self, cmd, fp, callback=None):
- """Store a file in line mode. A new port is created for you.
-
- Args:
- cmd: A STOR command.
- fp: A file-like object with a readline() method.
- callback: An optional single parameter callable that is called on
- on each line after it is sent. [default: None]
-
- Returns:
- The response code.
- """
- self.voidcmd('TYPE A')
- conn = self.transfercmd(cmd)
- while 1:
- buf = fp.readline()
- if not buf: break
- if buf[-2:] != CRLF:
- if buf[-1] in CRLF: buf = buf[:-1]
- buf = buf + CRLF
- conn.sendall(buf)
- if callback: callback(buf)
- conn.close()
- return self.voidresp()
-
- def acct(self, password):
- '''Send new account name.'''
- cmd = 'ACCT ' + password
- return self.voidcmd(cmd)
-
- def nlst(self, *args):
- '''Return a list of files in a given directory (default the current).'''
- cmd = 'NLST'
- for arg in args:
- cmd = cmd + (' ' + arg)
- files = []
- self.retrlines(cmd, files.append)
- return files
-
- def dir(self, *args):
- '''List a directory in long form.
- By default list current directory to stdout.
- Optional last argument is callback function; all
- non-empty arguments before it are concatenated to the
- LIST command. (This *should* only be used for a pathname.)'''
- cmd = 'LIST'
- func = None
- if args[-1:] and type(args[-1]) != type(''):
- args, func = args[:-1], args[-1]
- for arg in args:
- if arg:
- cmd = cmd + (' ' + arg)
- self.retrlines(cmd, func)
-
- def rename(self, fromname, toname):
- '''Rename a file.'''
- resp = self.sendcmd('RNFR ' + fromname)
- if resp[0] != '3':
- raise error_reply, resp
- return self.voidcmd('RNTO ' + toname)
-
- def delete(self, filename):
- '''Delete a file.'''
- resp = self.sendcmd('DELE ' + filename)
- if resp[:3] in ('250', '200'):
- return resp
- else:
- raise error_reply, resp
-
- def cwd(self, dirname):
- '''Change to a directory.'''
- if dirname == '..':
- try:
- return self.voidcmd('CDUP')
- except error_perm, msg:
- if msg.args[0][:3] != '500':
- raise
- elif dirname == '':
- dirname = '.' # does nothing, but could return error
- cmd = 'CWD ' + dirname
- return self.voidcmd(cmd)
-
- def size(self, filename):
- '''Retrieve the size of a file.'''
- # The SIZE command is defined in RFC-3659
- resp = self.sendcmd('SIZE ' + filename)
- if resp[:3] == '213':
- s = resp[3:].strip()
- try:
- return int(s)
- except (OverflowError, ValueError):
- return long(s)
-
- def mkd(self, dirname):
- '''Make a directory, return its full pathname.'''
- resp = self.sendcmd('MKD ' + dirname)
- return parse257(resp)
-
- def rmd(self, dirname):
- '''Remove a directory.'''
- return self.voidcmd('RMD ' + dirname)
-
- def pwd(self):
- '''Return current working directory.'''
- resp = self.sendcmd('PWD')
- return parse257(resp)
-
- def quit(self):
- '''Quit, and close the connection.'''
- resp = self.voidcmd('QUIT')
- self.close()
- return resp
-
- def close(self):
- '''Close the connection without assuming anything about it.'''
- if self.file is not None:
- self.file.close()
- if self.sock is not None:
- self.sock.close()
- self.file = self.sock = None
-
-try:
- import ssl
- ssl.PROTOCOL_TLSv1
-except (ImportError, AttributeError):
- pass
-else:
- class FTP_TLS(FTP):
- '''A FTP subclass which adds TLS support to FTP as described
- in RFC-4217.
-
- Connect as usual to port 21 implicitly securing the FTP control
- connection before authenticating.
-
- Securing the data connection requires user to explicitly ask
- for it by calling prot_p() method.
-
- Usage example:
- >>> from ftplib import FTP_TLS
- >>> ftps = FTP_TLS('ftp.python.org')
- >>> ftps.login() # login anonymously previously securing control channel
- '230 Guest login ok, access restrictions apply.'
- >>> ftps.prot_p() # switch to secure data connection
- '200 Protection level set to P'
- >>> ftps.retrlines('LIST') # list directory content securely
- total 9
- drwxr-xr-x 8 root wheel 1024 Jan 3 1994 .
- drwxr-xr-x 8 root wheel 1024 Jan 3 1994 ..
- drwxr-xr-x 2 root wheel 1024 Jan 3 1994 bin
- drwxr-xr-x 2 root wheel 1024 Jan 3 1994 etc
- d-wxrwxr-x 2 ftp wheel 1024 Sep 5 13:43 incoming
- drwxr-xr-x 2 root wheel 1024 Nov 17 1993 lib
- drwxr-xr-x 6 1094 wheel 1024 Sep 13 19:07 pub
- drwxr-xr-x 3 root wheel 1024 Jan 3 1994 usr
- -rw-r--r-- 1 root root 312 Aug 1 1994 welcome.msg
- '226 Transfer complete.'
- >>> ftps.quit()
- '221 Goodbye.'
- >>>
- '''
- ssl_version = ssl.PROTOCOL_TLSv1
-
- def __init__(self, host='', user='', passwd='', acct='', keyfile=None,
- certfile=None, timeout=_GLOBAL_DEFAULT_TIMEOUT):
- self.keyfile = keyfile
- self.certfile = certfile
- self._prot_p = False
- FTP.__init__(self, host, user, passwd, acct, timeout)
-
- def login(self, user='', passwd='', acct='', secure=True):
- if secure and not isinstance(self.sock, ssl.SSLSocket):
- self.auth()
- return FTP.login(self, user, passwd, acct)
-
- def auth(self):
- '''Set up secure control connection by using TLS/SSL.'''
- if isinstance(self.sock, ssl.SSLSocket):
- raise ValueError("Already using TLS")
- if self.ssl_version == ssl.PROTOCOL_TLSv1:
- resp = self.voidcmd('AUTH TLS')
- else:
- resp = self.voidcmd('AUTH SSL')
- self.sock = ssl.wrap_socket(self.sock, self.keyfile, self.certfile,
- ssl_version=self.ssl_version)
- self.file = self.sock.makefile(mode='rb')
- return resp
-
- def prot_p(self):
- '''Set up secure data connection.'''
- # PROT defines whether or not the data channel is to be protected.
- # Though RFC-2228 defines four possible protection levels,
- # RFC-4217 only recommends two, Clear and Private.
- # Clear (PROT C) means that no security is to be used on the
- # data-channel, Private (PROT P) means that the data-channel
- # should be protected by TLS.
- # PBSZ command MUST still be issued, but must have a parameter of
- # '0' to indicate that no buffering is taking place and the data
- # connection should not be encapsulated.
- self.voidcmd('PBSZ 0')
- resp = self.voidcmd('PROT P')
- self._prot_p = True
- return resp
-
- def prot_c(self):
- '''Set up clear text data connection.'''
- resp = self.voidcmd('PROT C')
- self._prot_p = False
- return resp
-
- # --- Overridden FTP methods
-
- def ntransfercmd(self, cmd, rest=None):
- conn, size = FTP.ntransfercmd(self, cmd, rest)
- if self._prot_p:
- conn = ssl.wrap_socket(conn, self.keyfile, self.certfile,
- ssl_version=self.ssl_version)
- return conn, size
-
- def retrbinary(self, cmd, callback, blocksize=8192, rest=None):
- self.voidcmd('TYPE I')
- conn = self.transfercmd(cmd, rest)
- try:
- while 1:
- data = conn.recv(blocksize)
- if not data:
- break
- callback(data)
- # shutdown ssl layer
- if isinstance(conn, ssl.SSLSocket):
- conn.unwrap()
- finally:
- conn.close()
- return self.voidresp()
-
- def retrlines(self, cmd, callback = None):
- if callback is None: callback = print_line
- resp = self.sendcmd('TYPE A')
- conn = self.transfercmd(cmd)
- fp = conn.makefile('rb')
- try:
- while 1:
- line = fp.readline()
- if self.debugging > 2: print '*retr*', repr(line)
- if not line:
- break
- if line[-2:] == CRLF:
- line = line[:-2]
- elif line[-1:] == '\n':
- line = line[:-1]
- callback(line)
- # shutdown ssl layer
- if isinstance(conn, ssl.SSLSocket):
- conn.unwrap()
- finally:
- fp.close()
- conn.close()
- return self.voidresp()
-
- def storbinary(self, cmd, fp, blocksize=8192, callback=None, rest=None):
- self.voidcmd('TYPE I')
- conn = self.transfercmd(cmd, rest)
- try:
- while 1:
- buf = fp.read(blocksize)
- if not buf: break
- conn.sendall(buf)
- if callback: callback(buf)
- # shutdown ssl layer
- if isinstance(conn, ssl.SSLSocket):
- conn.unwrap()
- finally:
- conn.close()
- return self.voidresp()
-
- def storlines(self, cmd, fp, callback=None):
- self.voidcmd('TYPE A')
- conn = self.transfercmd(cmd)
- try:
- while 1:
- buf = fp.readline()
- if not buf: break
- if buf[-2:] != CRLF:
- if buf[-1] in CRLF: buf = buf[:-1]
- buf = buf + CRLF
- conn.sendall(buf)
- if callback: callback(buf)
- # shutdown ssl layer
- if isinstance(conn, ssl.SSLSocket):
- conn.unwrap()
- finally:
- conn.close()
- return self.voidresp()
-
- __all__.append('FTP_TLS')
- all_errors = (Error, IOError, EOFError, ssl.SSLError)
-
-
-_150_re = None
-
-def parse150(resp):
- '''Parse the '150' response for a RETR request.
- Returns the expected transfer size or None; size is not guaranteed to
- be present in the 150 message.
- '''
- if resp[:3] != '150':
- raise error_reply, resp
- global _150_re
- if _150_re is None:
- import re
- _150_re = re.compile("150 .* \((\d+) bytes\)", re.IGNORECASE)
- m = _150_re.match(resp)
- if not m:
- return None
- s = m.group(1)
- try:
- return int(s)
- except (OverflowError, ValueError):
- return long(s)
-
-
-_227_re = None
-
-def parse227(resp):
- '''Parse the '227' response for a PASV request.
- Raises error_proto if it does not contain '(h1,h2,h3,h4,p1,p2)'
- Return ('host.addr.as.numbers', port#) tuple.'''
-
- if resp[:3] != '227':
- raise error_reply, resp
- global _227_re
- if _227_re is None:
- import re
- _227_re = re.compile(r'(\d+),(\d+),(\d+),(\d+),(\d+),(\d+)')
- m = _227_re.search(resp)
- if not m:
- raise error_proto, resp
- numbers = m.groups()
- host = '.'.join(numbers[:4])
- port = (int(numbers[4]) << 8) + int(numbers[5])
- return host, port
-
-
-def parse229(resp, peer):
- '''Parse the '229' response for a EPSV request.
- Raises error_proto if it does not contain '(|||port|)'
- Return ('host.addr.as.numbers', port#) tuple.'''
-
- if resp[:3] != '229':
- raise error_reply, resp
- left = resp.find('(')
- if left < 0: raise error_proto, resp
- right = resp.find(')', left + 1)
- if right < 0:
- raise error_proto, resp # should contain '(|||port|)'
- if resp[left + 1] != resp[right - 1]:
- raise error_proto, resp
- parts = resp[left + 1:right].split(resp[left+1])
- if len(parts) != 5:
- raise error_proto, resp
- host = peer[0]
- port = int(parts[3])
- return host, port
-
-
-def parse257(resp):
- '''Parse the '257' response for a MKD or PWD request.
- This is a response to a MKD or PWD request: a directory name.
- Returns the directoryname in the 257 reply.'''
-
- if resp[:3] != '257':
- raise error_reply, resp
- if resp[3:5] != ' "':
- return '' # Not compliant to RFC 959, but UNIX ftpd does this
- dirname = ''
- i = 5
- n = len(resp)
- while i < n:
- c = resp[i]
- i = i+1
- if c == '"':
- if i >= n or resp[i] != '"':
- break
- i = i+1
- dirname = dirname + c
- return dirname
-
-
-def print_line(line):
- '''Default retrlines callback to print a line.'''
- print line
-
-
-def ftpcp(source, sourcename, target, targetname = '', type = 'I'):
- '''Copy file from one FTP-instance to another.'''
- if not targetname: targetname = sourcename
- type = 'TYPE ' + type
- source.voidcmd(type)
- target.voidcmd(type)
- sourcehost, sourceport = parse227(source.sendcmd('PASV'))
- target.sendport(sourcehost, sourceport)
- # RFC 959: the user must "listen" [...] BEFORE sending the
- # transfer request.
- # So: STOR before RETR, because here the target is a "user".
- treply = target.sendcmd('STOR ' + targetname)
- if treply[:3] not in ('125', '150'): raise error_proto # RFC 959
- sreply = source.sendcmd('RETR ' + sourcename)
- if sreply[:3] not in ('125', '150'): raise error_proto # RFC 959
- source.voidresp()
- target.voidresp()
-
-
-class Netrc:
- """Class to parse & provide access to 'netrc' format files.
-
- See the netrc(4) man page for information on the file format.
-
- WARNING: This class is obsolete -- use module netrc instead.
-
- """
- __defuser = None
- __defpasswd = None
- __defacct = None
-
- def __init__(self, filename=None):
- if filename is None:
- if "HOME" in os.environ:
- filename = os.path.join(os.environ["HOME"],
- ".netrc")
- else:
- raise IOError, \
- "specify file to load or set $HOME"
- self.__hosts = {}
- self.__macros = {}
- fp = open(filename, "r")
- in_macro = 0
- while 1:
- line = fp.readline()
- if not line: break
- if in_macro and line.strip():
- macro_lines.append(line)
- continue
- elif in_macro:
- self.__macros[macro_name] = tuple(macro_lines)
- in_macro = 0
- words = line.split()
- host = user = passwd = acct = None
- default = 0
- i = 0
- while i < len(words):
- w1 = words[i]
- if i+1 < len(words):
- w2 = words[i + 1]
- else:
- w2 = None
- if w1 == 'default':
- default = 1
- elif w1 == 'machine' and w2:
- host = w2.lower()
- i = i + 1
- elif w1 == 'login' and w2:
- user = w2
- i = i + 1
- elif w1 == 'password' and w2:
- passwd = w2
- i = i + 1
- elif w1 == 'account' and w2:
- acct = w2
- i = i + 1
- elif w1 == 'macdef' and w2:
- macro_name = w2
- macro_lines = []
- in_macro = 1
- break
- i = i + 1
- if default:
- self.__defuser = user or self.__defuser
- self.__defpasswd = passwd or self.__defpasswd
- self.__defacct = acct or self.__defacct
- if host:
- if host in self.__hosts:
- ouser, opasswd, oacct = \
- self.__hosts[host]
- user = user or ouser
- passwd = passwd or opasswd
- acct = acct or oacct
- self.__hosts[host] = user, passwd, acct
- fp.close()
-
- def get_hosts(self):
- """Return a list of hosts mentioned in the .netrc file."""
- return self.__hosts.keys()
-
- def get_account(self, host):
- """Returns login information for the named host.
-
- The return value is a triple containing userid,
- password, and the accounting field.
-
- """
- host = host.lower()
- user = passwd = acct = None
- if host in self.__hosts:
- user, passwd, acct = self.__hosts[host]
- user = user or self.__defuser
- passwd = passwd or self.__defpasswd
- acct = acct or self.__defacct
- return user, passwd, acct
-
- def get_macros(self):
- """Return a list of all defined macro names."""
- return self.__macros.keys()
-
- def get_macro(self, macro):
- """Return a sequence of lines which define a named macro."""
- return self.__macros[macro]
-
-
-
-def test():
- '''Test program.
- Usage: ftp [-d] [-r[file]] host [-l[dir]] [-d[dir]] [-p] [file] ...
-
- -d dir
- -l list
- -p password
- '''
-
- if len(sys.argv) < 2:
- print test.__doc__
- sys.exit(0)
-
- debugging = 0
- rcfile = None
- while sys.argv[1] == '-d':
- debugging = debugging+1
- del sys.argv[1]
- if sys.argv[1][:2] == '-r':
- # get name of alternate ~/.netrc file:
- rcfile = sys.argv[1][2:]
- del sys.argv[1]
- host = sys.argv[1]
- ftp = FTP(host)
- ftp.set_debuglevel(debugging)
- userid = passwd = acct = ''
- try:
- netrc = Netrc(rcfile)
- except IOError:
- if rcfile is not None:
- sys.stderr.write("Could not open account file"
- " -- using anonymous login.")
- else:
- try:
- userid, passwd, acct = netrc.get_account(host)
- except KeyError:
- # no account for host
- sys.stderr.write(
- "No account -- using anonymous login.")
- ftp.login(userid, passwd, acct)
- for file in sys.argv[2:]:
- if file[:2] == '-l':
- ftp.dir(file[2:])
- elif file[:2] == '-d':
- cmd = 'CWD'
- if file[2:]: cmd = cmd + ' ' + file[2:]
- resp = ftp.sendcmd(cmd)
- elif file == '-p':
- ftp.set_pasv(not ftp.passiveserver)
- else:
- ftp.retrbinary('RETR ' + file, \
- sys.stdout.write, 1024)
- ftp.quit()
-
-
-if __name__ == '__main__':
- test()
diff --git a/Lib/logging/config.py b/Lib/logging/config.py
deleted file mode 100644
--- a/Lib/logging/config.py
+++ /dev/null
@@ -1,909 +0,0 @@
-# Copyright 2001-2010 by Vinay Sajip. All Rights Reserved.
-#
-# Permission to use, copy, modify, and distribute this software and its
-# documentation for any purpose and without fee is hereby granted,
-# provided that the above copyright notice appear in all copies and that
-# both that copyright notice and this permission notice appear in
-# supporting documentation, and that the name of Vinay Sajip
-# not be used in advertising or publicity pertaining to distribution
-# of the software without specific, written prior permission.
-# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
-# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
-# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
-# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
-# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-Configuration functions for the logging package for Python. The core package
-is based on PEP 282 and comments thereto in comp.lang.python, and influenced
-by Apache's log4j system.
-
-Copyright (C) 2001-2010 Vinay Sajip. All Rights Reserved.
-
-To use, simply 'import logging' and log away!
-"""
-
-import sys, logging, logging.handlers, socket, struct, os, traceback, re
-import types, cStringIO
-
-try:
- import thread
- import threading
-except ImportError:
- thread = None
-
-from SocketServer import ThreadingTCPServer, StreamRequestHandler
-
-
-DEFAULT_LOGGING_CONFIG_PORT = 9030
-
-if sys.platform == "win32":
- RESET_ERROR = 10054 #WSAECONNRESET
-else:
- RESET_ERROR = 104 #ECONNRESET
-
-#
-# The following code implements a socket listener for on-the-fly
-# reconfiguration of logging.
-#
-# _listener holds the server object doing the listening
-_listener = None
-
-def fileConfig(fname, defaults=None, disable_existing_loggers=True):
- """
- Read the logging configuration from a ConfigParser-format file.
-
- This can be called several times from an application, allowing an end user
- the ability to select from various pre-canned configurations (if the
- developer provides a mechanism to present the choices and load the chosen
- configuration).
- """
- import ConfigParser
-
- cp = ConfigParser.ConfigParser(defaults)
- if hasattr(fname, 'readline'):
- cp.readfp(fname)
- else:
- cp.read(fname)
-
- formatters = _create_formatters(cp)
-
- # critical section
- logging._acquireLock()
- try:
- logging._handlers.clear()
- del logging._handlerList[:]
- # Handlers add themselves to logging._handlers
- handlers = _install_handlers(cp, formatters)
- _install_loggers(cp, handlers, disable_existing_loggers)
- finally:
- logging._releaseLock()
-
-
-def _resolve(name):
- """Resolve a dotted name to a global object."""
- name = name.split('.')
- used = name.pop(0)
- found = __import__(used)
- for n in name:
- used = used + '.' + n
- try:
- found = getattr(found, n)
- except AttributeError:
- __import__(used)
- found = getattr(found, n)
- return found
-
-def _strip_spaces(alist):
- return map(lambda x: x.strip(), alist)
-
-def _encoded(s):
- return s if isinstance(s, str) else s.encode('utf-8')
-
-def _create_formatters(cp):
- """Create and return formatters"""
- flist = cp.get("formatters", "keys")
- if not len(flist):
- return {}
- flist = flist.split(",")
- flist = _strip_spaces(flist)
- formatters = {}
- for form in flist:
- sectname = "formatter_%s" % form
- opts = cp.options(sectname)
- if "format" in opts:
- fs = cp.get(sectname, "format", 1)
- else:
- fs = None
- if "datefmt" in opts:
- dfs = cp.get(sectname, "datefmt", 1)
- else:
- dfs = None
- c = logging.Formatter
- if "class" in opts:
- class_name = cp.get(sectname, "class")
- if class_name:
- c = _resolve(class_name)
- f = c(fs, dfs)
- formatters[form] = f
- return formatters
-
-
-def _install_handlers(cp, formatters):
- """Install and return handlers"""
- hlist = cp.get("handlers", "keys")
- if not len(hlist):
- return {}
- hlist = hlist.split(",")
- hlist = _strip_spaces(hlist)
- handlers = {}
- fixups = [] #for inter-handler references
- for hand in hlist:
- sectname = "handler_%s" % hand
- klass = cp.get(sectname, "class")
- opts = cp.options(sectname)
- if "formatter" in opts:
- fmt = cp.get(sectname, "formatter")
- else:
- fmt = ""
- try:
- klass = eval(klass, vars(logging))
- except (AttributeError, NameError):
- klass = _resolve(klass)
- args = cp.get(sectname, "args")
- args = eval(args, vars(logging))
- h = klass(*args)
- if "level" in opts:
- level = cp.get(sectname, "level")
- h.setLevel(logging._levelNames[level])
- if len(fmt):
- h.setFormatter(formatters[fmt])
- if issubclass(klass, logging.handlers.MemoryHandler):
- if "target" in opts:
- target = cp.get(sectname,"target")
- else:
- target = ""
- if len(target): #the target handler may not be loaded yet, so keep for later...
- fixups.append((h, target))
- handlers[hand] = h
- #now all handlers are loaded, fixup inter-handler references...
- for h, t in fixups:
- h.setTarget(handlers[t])
- return handlers
-
-
-def _install_loggers(cp, handlers, disable_existing_loggers):
- """Create and install loggers"""
-
- # configure the root first
- llist = cp.get("loggers", "keys")
- llist = llist.split(",")
- llist = list(map(lambda x: x.strip(), llist))
- llist.remove("root")
- sectname = "logger_root"
- root = logging.root
- log = root
- opts = cp.options(sectname)
- if "level" in opts:
- level = cp.get(sectname, "level")
- log.setLevel(logging._levelNames[level])
- for h in root.handlers[:]:
- root.removeHandler(h)
- hlist = cp.get(sectname, "handlers")
- if len(hlist):
- hlist = hlist.split(",")
- hlist = _strip_spaces(hlist)
- for hand in hlist:
- log.addHandler(handlers[hand])
-
- #and now the others...
- #we don't want to lose the existing loggers,
- #since other threads may have pointers to them.
- #existing is set to contain all existing loggers,
- #and as we go through the new configuration we
- #remove any which are configured. At the end,
- #what's left in existing is the set of loggers
- #which were in the previous configuration but
- #which are not in the new configuration.
- existing = list(root.manager.loggerDict.keys())
- #The list needs to be sorted so that we can
- #avoid disabling child loggers of explicitly
- #named loggers. With a sorted list it is easier
- #to find the child loggers.
- existing.sort()
- #We'll keep the list of existing loggers
- #which are children of named loggers here...
- child_loggers = []
- #now set up the new ones...
- for log in llist:
- sectname = "logger_%s" % log
- qn = cp.get(sectname, "qualname")
- opts = cp.options(sectname)
- if "propagate" in opts:
- propagate = cp.getint(sectname, "propagate")
- else:
- propagate = 1
- logger = logging.getLogger(qn)
- if qn in existing:
- i = existing.index(qn) + 1 # start with the entry after qn
- prefixed = qn + "."
- pflen = len(prefixed)
- num_existing = len(existing)
- while i < num_existing:
- if existing[i][:pflen] == prefixed:
- child_loggers.append(existing[i])
- i += 1
- existing.remove(qn)
- if "level" in opts:
- level = cp.get(sectname, "level")
- logger.setLevel(logging._levelNames[level])
- for h in logger.handlers[:]:
- logger.removeHandler(h)
- logger.propagate = propagate
- logger.disabled = 0
- hlist = cp.get(sectname, "handlers")
- if len(hlist):
- hlist = hlist.split(",")
- hlist = _strip_spaces(hlist)
- for hand in hlist:
- logger.addHandler(handlers[hand])
-
- #Disable any old loggers. There's no point deleting
- #them as other threads may continue to hold references
- #and by disabling them, you stop them doing any logging.
- #However, don't disable children of named loggers, as that's
- #probably not what was intended by the user.
- for log in existing:
- logger = root.manager.loggerDict[log]
- if log in child_loggers:
- logger.level = logging.NOTSET
- logger.handlers = []
- logger.propagate = 1
- elif disable_existing_loggers:
- logger.disabled = 1
-
-
-
-IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I)
-
-
-def valid_ident(s):
- m = IDENTIFIER.match(s)
- if not m:
- raise ValueError('Not a valid Python identifier: %r' % s)
- return True
-
-
-# The ConvertingXXX classes are wrappers around standard Python containers,
-# and they serve to convert any suitable values in the container. The
-# conversion converts base dicts, lists and tuples to their wrapped
-# equivalents, whereas strings which match a conversion format are converted
-# appropriately.
-#
-# Each wrapper should have a configurator attribute holding the actual
-# configurator to use for conversion.
-
-class ConvertingDict(dict):
- """A converting dictionary wrapper."""
-
- def __getitem__(self, key):
- value = dict.__getitem__(self, key)
- result = self.configurator.convert(value)
- #If the converted value is different, save for next time
- if value is not result:
- self[key] = result
- if type(result) in (ConvertingDict, ConvertingList,
- ConvertingTuple):
- result.parent = self
- result.key = key
- return result
-
- def get(self, key, default=None):
- value = dict.get(self, key, default)
- result = self.configurator.convert(value)
- #If the converted value is different, save for next time
- if value is not result:
- self[key] = result
- if type(result) in (ConvertingDict, ConvertingList,
- ConvertingTuple):
- result.parent = self
- result.key = key
- return result
-
- def pop(self, key, default=None):
- value = dict.pop(self, key, default)
- result = self.configurator.convert(value)
- if value is not result:
- if type(result) in (ConvertingDict, ConvertingList,
- ConvertingTuple):
- result.parent = self
- result.key = key
- return result
-
-class ConvertingList(list):
- """A converting list wrapper."""
- def __getitem__(self, key):
- value = list.__getitem__(self, key)
- result = self.configurator.convert(value)
- #If the converted value is different, save for next time
- if value is not result:
- self[key] = result
- if type(result) in (ConvertingDict, ConvertingList,
- ConvertingTuple):
- result.parent = self
- result.key = key
- return result
-
- def pop(self, idx=-1):
- value = list.pop(self, idx)
- result = self.configurator.convert(value)
- if value is not result:
- if type(result) in (ConvertingDict, ConvertingList,
- ConvertingTuple):
- result.parent = self
- return result
-
-class ConvertingTuple(tuple):
- """A converting tuple wrapper."""
- def __getitem__(self, key):
- value = tuple.__getitem__(self, key)
- result = self.configurator.convert(value)
- if value is not result:
- if type(result) in (ConvertingDict, ConvertingList,
- ConvertingTuple):
- result.parent = self
- result.key = key
- return result
-
-class BaseConfigurator(object):
- """
- The configurator base class which defines some useful defaults.
- """
-
- CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$')
-
- WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')
- DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')
- INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')
- DIGIT_PATTERN = re.compile(r'^\d+$')
-
- value_converters = {
- 'ext' : 'ext_convert',
- 'cfg' : 'cfg_convert',
- }
-
- # We might want to use a different one, e.g. importlib
- importer = __import__
-
- def __init__(self, config):
- self.config = ConvertingDict(config)
- self.config.configurator = self
-
- def resolve(self, s):
- """
- Resolve strings to objects using standard import and attribute
- syntax.
- """
- name = s.split('.')
- used = name.pop(0)
- try:
- found = self.importer(used)
- for frag in name:
- used += '.' + frag
- try:
- found = getattr(found, frag)
- except AttributeError:
- self.importer(used)
- found = getattr(found, frag)
- return found
- except ImportError:
- e, tb = sys.exc_info()[1:]
- v = ValueError('Cannot resolve %r: %s' % (s, e))
- v.__cause__, v.__traceback__ = e, tb
- raise v
-
- def ext_convert(self, value):
- """Default converter for the ext:// protocol."""
- return self.resolve(value)
-
- def cfg_convert(self, value):
- """Default converter for the cfg:// protocol."""
- rest = value
- m = self.WORD_PATTERN.match(rest)
- if m is None:
- raise ValueError("Unable to convert %r" % value)
- else:
- rest = rest[m.end():]
- d = self.config[m.groups()[0]]
- #print d, rest
- while rest:
- m = self.DOT_PATTERN.match(rest)
- if m:
- d = d[m.groups()[0]]
- else:
- m = self.INDEX_PATTERN.match(rest)
- if m:
- idx = m.groups()[0]
- if not self.DIGIT_PATTERN.match(idx):
- d = d[idx]
- else:
- try:
- n = int(idx) # try as number first (most likely)
- d = d[n]
- except TypeError:
- d = d[idx]
- if m:
- rest = rest[m.end():]
- else:
- raise ValueError('Unable to convert '
- '%r at %r' % (value, rest))
- #rest should be empty
- return d
-
- def convert(self, value):
- """
- Convert values to an appropriate type. dicts, lists and tuples are
- replaced by their converting alternatives. Strings are checked to
- see if they have a conversion format and are converted if they do.
- """
- if not isinstance(value, ConvertingDict) and isinstance(value, dict):
- value = ConvertingDict(value)
- value.configurator = self
- elif not isinstance(value, ConvertingList) and isinstance(value, list):
- value = ConvertingList(value)
- value.configurator = self
- elif not isinstance(value, ConvertingTuple) and\
- isinstance(value, tuple):
- value = ConvertingTuple(value)
- value.configurator = self
- elif isinstance(value, basestring): # str for py3k
- m = self.CONVERT_PATTERN.match(value)
- if m:
- d = m.groupdict()
- prefix = d['prefix']
- converter = self.value_converters.get(prefix, None)
- if converter:
- suffix = d['suffix']
- converter = getattr(self, converter)
- value = converter(suffix)
- return value
-
- def configure_custom(self, config):
- """Configure an object with a user-supplied factory."""
- c = config.pop('()')
- if not hasattr(c, '__call__') and hasattr(types, 'ClassType') and type(c) != types.ClassType:
- c = self.resolve(c)
- props = config.pop('.', None)
- # Check for valid identifiers
- kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
- result = c(**kwargs)
- if props:
- for name, value in props.items():
- setattr(result, name, value)
- return result
-
- def as_tuple(self, value):
- """Utility function which converts lists to tuples."""
- if isinstance(value, list):
- value = tuple(value)
- return value
-
-class DictConfigurator(BaseConfigurator):
- """
- Configure logging using a dictionary-like object to describe the
- configuration.
- """
-
- def configure(self):
- """Do the configuration."""
-
- config = self.config
- if 'version' not in config:
- raise ValueError("dictionary doesn't specify a version")
- if config['version'] != 1:
- raise ValueError("Unsupported version: %s" % config['version'])
- incremental = config.pop('incremental', False)
- EMPTY_DICT = {}
- logging._acquireLock()
- try:
- if incremental:
- handlers = config.get('handlers', EMPTY_DICT)
- for name in handlers:
- if name not in logging._handlers:
- raise ValueError('No handler found with '
- 'name %r' % name)
- else:
- try:
- handler = logging._handlers[name]
- handler_config = handlers[name]
- level = handler_config.get('level', None)
- if level:
- handler.setLevel(logging._checkLevel(level))
- except StandardError, e:
- raise ValueError('Unable to configure handler '
- '%r: %s' % (name, e))
- loggers = config.get('loggers', EMPTY_DICT)
- for name in loggers:
- try:
- self.configure_logger(name, loggers[name], True)
- except StandardError, e:
- raise ValueError('Unable to configure logger '
- '%r: %s' % (name, e))
- root = config.get('root', None)
- if root:
- try:
- self.configure_root(root, True)
- except StandardError, e:
- raise ValueError('Unable to configure root '
- 'logger: %s' % e)
- else:
- disable_existing = config.pop('disable_existing_loggers', True)
-
- logging._handlers.clear()
- del logging._handlerList[:]
-
- # Do formatters first - they don't refer to anything else
- formatters = config.get('formatters', EMPTY_DICT)
- for name in formatters:
- try:
- formatters[name] = self.configure_formatter(
- formatters[name])
- except StandardError, e:
- raise ValueError('Unable to configure '
- 'formatter %r: %s' % (name, e))
- # Next, do filters - they don't refer to anything else, either
- filters = config.get('filters', EMPTY_DICT)
- for name in filters:
- try:
- filters[name] = self.configure_filter(filters[name])
- except StandardError, e:
- raise ValueError('Unable to configure '
- 'filter %r: %s' % (name, e))
-
- # Next, do handlers - they refer to formatters and filters
- # As handlers can refer to other handlers, sort the keys
- # to allow a deterministic order of configuration
- handlers = config.get('handlers', EMPTY_DICT)
- for name in sorted(handlers):
- try:
- handler = self.configure_handler(handlers[name])
- handler.name = name
- handlers[name] = handler
- except StandardError, e:
- raise ValueError('Unable to configure handler '
- '%r: %s' % (name, e))
- # Next, do loggers - they refer to handlers and filters
-
- #we don't want to lose the existing loggers,
- #since other threads may have pointers to them.
- #existing is set to contain all existing loggers,
- #and as we go through the new configuration we
- #remove any which are configured. At the end,
- #what's left in existing is the set of loggers
- #which were in the previous configuration but
- #which are not in the new configuration.
- root = logging.root
- existing = root.manager.loggerDict.keys()
- #The list needs to be sorted so that we can
- #avoid disabling child loggers of explicitly
- #named loggers. With a sorted list it is easier
- #to find the child loggers.
- existing.sort()
- #We'll keep the list of existing loggers
- #which are children of named loggers here...
- child_loggers = []
- #now set up the new ones...
- loggers = config.get('loggers', EMPTY_DICT)
- for name in loggers:
- name = _encoded(name)
- if name in existing:
- i = existing.index(name)
- prefixed = name + "."
- pflen = len(prefixed)
- num_existing = len(existing)
- i = i + 1 # look at the entry after name
- while (i < num_existing) and\
- (existing[i][:pflen] == prefixed):
- child_loggers.append(existing[i])
- i = i + 1
- existing.remove(name)
- try:
- self.configure_logger(name, loggers[name])
- except StandardError, e:
- raise ValueError('Unable to configure logger '
- '%r: %s' % (name, e))
-
- #Disable any old loggers. There's no point deleting
- #them as other threads may continue to hold references
- #and by disabling them, you stop them doing any logging.
- #However, don't disable children of named loggers, as that's
- #probably not what was intended by the user.
- for log in existing:
- logger = root.manager.loggerDict[log]
- if log in child_loggers:
- logger.level = logging.NOTSET
- logger.handlers = []
- logger.propagate = True
- elif disable_existing:
- logger.disabled = True
-
- # And finally, do the root logger
- root = config.get('root', None)
- if root:
- try:
- self.configure_root(root)
- except StandardError, e:
- raise ValueError('Unable to configure root '
- 'logger: %s' % e)
- finally:
- logging._releaseLock()
-
- def configure_formatter(self, config):
- """Configure a formatter from a dictionary."""
- if '()' in config:
- factory = config['()'] # for use in exception handler
- try:
- result = self.configure_custom(config)
- except TypeError, te:
- if "'format'" not in str(te):
- raise
- #Name of parameter changed from fmt to format.
- #Retry with old name.
- #This is so that code can be used with older Python versions
- #(e.g. by Django)
- config['fmt'] = config.pop('format')
- config['()'] = factory
- result = self.configure_custom(config)
- else:
- fmt = config.get('format', None)
- dfmt = config.get('datefmt', None)
- result = logging.Formatter(fmt, dfmt)
- return result
-
- def configure_filter(self, config):
- """Configure a filter from a dictionary."""
- if '()' in config:
- result = self.configure_custom(config)
- else:
- name = config.get('name', '')
- result = logging.Filter(name)
- return result
-
- def add_filters(self, filterer, filters):
- """Add filters to a filterer from a list of names."""
- for f in filters:
- try:
- filterer.addFilter(self.config['filters'][f])
- except StandardError, e:
- raise ValueError('Unable to add filter %r: %s' % (f, e))
-
- def configure_handler(self, config):
- """Configure a handler from a dictionary."""
- formatter = config.pop('formatter', None)
- if formatter:
- try:
- formatter = self.config['formatters'][formatter]
- except StandardError, e:
- raise ValueError('Unable to set formatter '
- '%r: %s' % (formatter, e))
- level = config.pop('level', None)
- filters = config.pop('filters', None)
- if '()' in config:
- c = config.pop('()')
- if not hasattr(c, '__call__') and hasattr(types, 'ClassType') and type(c) != types.ClassType:
- c = self.resolve(c)
- factory = c
- else:
- klass = self.resolve(config.pop('class'))
- #Special case for handler which refers to another handler
- if issubclass(klass, logging.handlers.MemoryHandler) and\
- 'target' in config:
- try:
- config['target'] = self.config['handlers'][config['target']]
- except StandardError, e:
- raise ValueError('Unable to set target handler '
- '%r: %s' % (config['target'], e))
- elif issubclass(klass, logging.handlers.SMTPHandler) and\
- 'mailhost' in config:
- config['mailhost'] = self.as_tuple(config['mailhost'])
- elif issubclass(klass, logging.handlers.SysLogHandler) and\
- 'address' in config:
- config['address'] = self.as_tuple(config['address'])
- factory = klass
- kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
- try:
- result = factory(**kwargs)
- except TypeError, te:
- if "'stream'" not in str(te):
- raise
- #The argument name changed from strm to stream
- #Retry with old name.
- #This is so that code can be used with older Python versions
- #(e.g. by Django)
- kwargs['strm'] = kwargs.pop('stream')
- result = factory(**kwargs)
- if formatter:
- result.setFormatter(formatter)
- if level is not None:
- result.setLevel(logging._checkLevel(level))
- if filters:
- self.add_filters(result, filters)
- return result
-
- def add_handlers(self, logger, handlers):
- """Add handlers to a logger from a list of names."""
- for h in handlers:
- try:
- logger.addHandler(self.config['handlers'][h])
- except StandardError, e:
- raise ValueError('Unable to add handler %r: %s' % (h, e))
-
- def common_logger_config(self, logger, config, incremental=False):
- """
- Perform configuration which is common to root and non-root loggers.
- """
- level = config.get('level', None)
- if level is not None:
- logger.setLevel(logging._checkLevel(level))
- if not incremental:
- #Remove any existing handlers
- for h in logger.handlers[:]:
- logger.removeHandler(h)
- handlers = config.get('handlers', None)
- if handlers:
- self.add_handlers(logger, handlers)
- filters = config.get('filters', None)
- if filters:
- self.add_filters(logger, filters)
-
- def configure_logger(self, name, config, incremental=False):
- """Configure a non-root logger from a dictionary."""
- logger = logging.getLogger(name)
- self.common_logger_config(logger, config, incremental)
- propagate = config.get('propagate', None)
- if propagate is not None:
- logger.propagate = propagate
-
- def configure_root(self, config, incremental=False):
- """Configure a root logger from a dictionary."""
- root = logging.getLogger()
- self.common_logger_config(root, config, incremental)
-
-dictConfigClass = DictConfigurator
-
-def dictConfig(config):
- """Configure logging using a dictionary."""
- dictConfigClass(config).configure()
-
-
-def listen(port=DEFAULT_LOGGING_CONFIG_PORT):
- """
- Start up a socket server on the specified port, and listen for new
- configurations.
-
- These will be sent as a file suitable for processing by fileConfig().
- Returns a Thread object on which you can call start() to start the server,
- and which you can join() when appropriate. To stop the server, call
- stopListening().
- """
- if not thread:
- raise NotImplementedError("listen() needs threading to work")
-
- class ConfigStreamHandler(StreamRequestHandler):
- """
- Handler for a logging configuration request.
-
- It expects a completely new logging configuration and uses fileConfig
- to install it.
- """
- def handle(self):
- """
- Handle a request.
-
- Each request is expected to be a 4-byte length, packed using
- struct.pack(">L", n), followed by the config file.
- Uses fileConfig() to do the grunt work.
- """
- import tempfile
- try:
- conn = self.connection
- chunk = conn.recv(4)
- if len(chunk) == 4:
- slen = struct.unpack(">L", chunk)[0]
- chunk = self.connection.recv(slen)
- while len(chunk) < slen:
- chunk = chunk + conn.recv(slen - len(chunk))
- try:
- import json
- d =json.loads(chunk)
- assert isinstance(d, dict)
- dictConfig(d)
- except:
- #Apply new configuration.
-
- file = cStringIO.StringIO(chunk)
- try:
- fileConfig(file)
- except (KeyboardInterrupt, SystemExit):
- raise
- except:
- traceback.print_exc()
- if self.server.ready:
- self.server.ready.set()
- except socket.error, e:
- if not isinstance(e.args, tuple):
- raise
- else:
- errcode = e.args[0]
- if errcode != RESET_ERROR:
- raise
-
- class ConfigSocketReceiver(ThreadingTCPServer):
- """
- A simple TCP socket-based logging config receiver.
- """
-
- allow_reuse_address = 1
-
- def __init__(self, host='localhost', port=DEFAULT_LOGGING_CONFIG_PORT,
- handler=None, ready=None):
- ThreadingTCPServer.__init__(self, (host, port), handler)
- logging._acquireLock()
- self.abort = 0
- logging._releaseLock()
- self.timeout = 1
- self.ready = ready
-
- def serve_until_stopped(self):
- if sys.platform.startswith('java'):
- from select import cpython_compatible_select as select
- else:
- from select import select
- abort = 0
- while not abort:
- rd, wr, ex = select([self.socket.fileno()],
- [], [],
- self.timeout)
- if rd:
- self.handle_request()
- logging._acquireLock()
- abort = self.abort
- logging._releaseLock()
- self.socket.close()
-
- class Server(threading.Thread):
-
- def __init__(self, rcvr, hdlr, port):
- super(Server, self).__init__()
- self.rcvr = rcvr
- self.hdlr = hdlr
- self.port = port
- self.ready = threading.Event()
-
- def run(self):
- server = self.rcvr(port=self.port, handler=self.hdlr,
- ready=self.ready)
- if self.port == 0:
- self.port = server.server_address[1]
- self.ready.set()
- global _listener
- logging._acquireLock()
- _listener = server
- logging._releaseLock()
- server.serve_until_stopped()
-
- return Server(ConfigSocketReceiver, ConfigStreamHandler, port)
-
-def stopListening():
- """
- Stop the listening server which was created with a call to listen().
- """
- global _listener
- logging._acquireLock()
- try:
- if _listener:
- _listener.abort = 1
- _listener = None
- finally:
- logging._releaseLock()
diff --git a/Lib/netrc.py b/Lib/netrc.py
deleted file mode 100644
--- a/Lib/netrc.py
+++ /dev/null
@@ -1,119 +0,0 @@
-"""An object-oriented interface to .netrc files."""
-
-# Module and documentation by Eric S. Raymond, 21 Dec 1998
-
-from __future__ import with_statement
-import os, shlex
-
-__all__ = ["netrc", "NetrcParseError"]
-
-
-class NetrcParseError(Exception):
- """Exception raised on syntax errors in the .netrc file."""
- def __init__(self, msg, filename=None, lineno=None):
- self.filename = filename
- self.lineno = lineno
- self.msg = msg
- Exception.__init__(self, msg)
-
- def __str__(self):
- return "%s (%s, line %s)" % (self.msg, self.filename, self.lineno)
-
-
-class netrc:
- def __init__(self, file=None):
- if file is None:
- try:
- file = os.path.join(os.environ['HOME'], ".netrc")
- except KeyError:
- raise IOError("Could not find .netrc: $HOME is not set")
- self.hosts = {}
- self.macros = {}
- with open(file) as fp:
- self._parse(file, fp)
-
- def _parse(self, file, fp):
- lexer = shlex.shlex(fp)
- lexer.wordchars += r"""!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
- lexer.commenters = lexer.commenters.replace('#', '')
- while 1:
- # Look for a machine, default, or macdef top-level keyword
- toplevel = tt = lexer.get_token()
- if not tt:
- break
- elif tt[0] == '#':
- fp.readline();
- continue;
- elif tt == 'machine':
- entryname = lexer.get_token()
- elif tt == 'default':
- entryname = 'default'
- elif tt == 'macdef': # Just skip to end of macdefs
- entryname = lexer.get_token()
- self.macros[entryname] = []
- lexer.whitespace = ' \t'
- while 1:
- line = lexer.instream.readline()
- if not line or line == '\012':
- lexer.whitespace = ' \t\r\n'
- break
- self.macros[entryname].append(line)
- continue
- else:
- raise NetrcParseError(
- "bad toplevel token %r" % tt, file, lexer.lineno)
-
- # We're looking at start of an entry for a named machine or default.
- login = ''
- account = password = None
- self.hosts[entryname] = {}
- while 1:
- tt = lexer.get_token()
- if (tt=='' or tt == 'machine' or
- tt == 'default' or tt =='macdef'):
- if password:
- self.hosts[entryname] = (login, account, password)
- lexer.push_token(tt)
- break
- else:
- raise NetrcParseError(
- "malformed %s entry %s terminated by %s"
- % (toplevel, entryname, repr(tt)),
- file, lexer.lineno)
- elif tt == 'login' or tt == 'user':
- login = lexer.get_token()
- elif tt == 'account':
- account = lexer.get_token()
- elif tt == 'password':
- password = lexer.get_token()
- else:
- raise NetrcParseError("bad follower token %r" % tt,
- file, lexer.lineno)
-
- def authenticators(self, host):
- """Return a (user, account, password) tuple for given host."""
- if host in self.hosts:
- return self.hosts[host]
- elif 'default' in self.hosts:
- return self.hosts['default']
- else:
- return None
-
- def __repr__(self):
- """Dump the class data in the format of a .netrc file."""
- rep = ""
- for host in self.hosts.keys():
- attrs = self.hosts[host]
- rep = rep + "machine "+ host + "\n\tlogin " + repr(attrs[0]) + "\n"
- if attrs[1]:
- rep = rep + "account " + repr(attrs[1])
- rep = rep + "\tpassword " + repr(attrs[2]) + "\n"
- for macro in self.macros.keys():
- rep = rep + "macdef " + macro + "\n"
- for line in self.macros[macro]:
- rep = rep + line
- rep = rep + "\n"
- return rep
-
-if __name__ == '__main__':
- print netrc()
diff --git a/Lib/robotparser.py b/Lib/robotparser.py
deleted file mode 100644
--- a/Lib/robotparser.py
+++ /dev/null
@@ -1,222 +0,0 @@
-""" robotparser.py
-
- Copyright (C) 2000 Bastian Kleineidam
-
- You can choose between two licenses when using this package:
- 1) GNU GPLv2
- 2) PSF license for Python 2.2
-
- The robots.txt Exclusion Protocol is implemented as specified in
- http://info.webcrawler.com/mak/projects/robots/norobots-rfc.html
-"""
-import urlparse
-import urllib
-
-__all__ = ["RobotFileParser"]
-
-
-class RobotFileParser:
- """ This class provides a set of methods to read, parse and answer
- questions about a single robots.txt file.
-
- """
-
- def __init__(self, url=''):
- self.entries = []
- self.default_entry = None
- self.disallow_all = False
- self.allow_all = False
- self.set_url(url)
- self.last_checked = 0
-
- def mtime(self):
- """Returns the time the robots.txt file was last fetched.
-
- This is useful for long-running web spiders that need to
- check for new robots.txt files periodically.
-
- """
- return self.last_checked
-
- def modified(self):
- """Sets the time the robots.txt file was last fetched to the
- current time.
-
- """
- import time
- self.last_checked = time.time()
-
- def set_url(self, url):
- """Sets the URL referring to a robots.txt file."""
- self.url = url
- self.host, self.path = urlparse.urlparse(url)[1:3]
-
- def read(self):
- """Reads the robots.txt URL and feeds it to the parser."""
- opener = URLopener()
- f = opener.open(self.url)
- lines = [line.strip() for line in f]
- f.close()
- self.errcode = opener.errcode
- if self.errcode in (401, 403):
- self.disallow_all = True
- elif self.errcode >= 400:
- self.allow_all = True
- elif self.errcode == 200 and lines:
- self.parse(lines)
-
- def _add_entry(self, entry):
- if "*" in entry.useragents:
- # the default entry is considered last
- if self.default_entry is None:
- # the first default entry wins
- self.default_entry = entry
- else:
- self.entries.append(entry)
-
- def parse(self, lines):
- """parse the input lines from a robots.txt file.
- We allow that a user-agent: line is not preceded by
- one or more blank lines."""
- # states:
- # 0: start state
- # 1: saw user-agent line
- # 2: saw an allow or disallow line
- state = 0
- linenumber = 0
- entry = Entry()
-
- for line in lines:
- linenumber += 1
- if not line:
- if state == 1:
- entry = Entry()
- state = 0
- elif state == 2:
- self._add_entry(entry)
- entry = Entry()
- state = 0
- # remove optional comment and strip line
- i = line.find('#')
- if i >= 0:
- line = line[:i]
- line = line.strip()
- if not line:
- continue
- line = line.split(':', 1)
- if len(line) == 2:
- line[0] = line[0].strip().lower()
- line[1] = urllib.unquote(line[1].strip())
- if line[0] == "user-agent":
- if state == 2:
- self._add_entry(entry)
- entry = Entry()
- entry.useragents.append(line[1])
- state = 1
- elif line[0] == "disallow":
- if state != 0:
- entry.rulelines.append(RuleLine(line[1], False))
- state = 2
- elif line[0] == "allow":
- if state != 0:
- entry.rulelines.append(RuleLine(line[1], True))
- state = 2
- if state == 2:
- self._add_entry(entry)
-
-
- def can_fetch(self, useragent, url):
- """using the parsed robots.txt decide if useragent can fetch url"""
- if self.disallow_all:
- return False
- if self.allow_all:
- return True
- # search for given user agent matches
- # the first match counts
- parsed_url = urlparse.urlparse(urllib.unquote(url))
- url = urlparse.urlunparse(('', '', parsed_url.path,
- parsed_url.params, parsed_url.query, parsed_url.fragment))
- url = urllib.quote(url)
- if not url:
- url = "/"
- for entry in self.entries:
- if entry.applies_to(useragent):
- return entry.allowance(url)
- # try the default entry last
- if self.default_entry:
- return self.default_entry.allowance(url)
- # agent not found ==> access granted
- return True
-
-
- def __str__(self):
- return ''.join([str(entry) + "\n" for entry in self.entries])
-
-
-class RuleLine:
- """A rule line is a single "Allow:" (allowance==True) or "Disallow:"
- (allowance==False) followed by a path."""
- def __init__(self, path, allowance):
- if path == '' and not allowance:
- # an empty value means allow all
- allowance = True
- self.path = urllib.quote(path)
- self.allowance = allowance
-
- def applies_to(self, filename):
- return self.path == "*" or filename.startswith(self.path)
-
- def __str__(self):
- return (self.allowance and "Allow" or "Disallow") + ": " + self.path
-
-
-class Entry:
- """An entry has one or more user-agents and zero or more rulelines"""
- def __init__(self):
- self.useragents = []
- self.rulelines = []
-
- def __str__(self):
- ret = []
- for agent in self.useragents:
- ret.extend(["User-agent: ", agent, "\n"])
- for line in self.rulelines:
- ret.extend([str(line), "\n"])
- return ''.join(ret)
-
- def applies_to(self, useragent):
- """check if this entry applies to the specified agent"""
- # split the name token and make it lower case
- useragent = useragent.split("/")[0].lower()
- for agent in self.useragents:
- if agent == '*':
- # we have the catch-all agent
- return True
- agent = agent.lower()
- if agent in useragent:
- return True
- return False
-
- def allowance(self, filename):
- """Preconditions:
- - our agent applies to this entry
- - filename is URL decoded"""
- for line in self.rulelines:
- if line.applies_to(filename):
- return line.allowance
- return True
-
-class URLopener(urllib.FancyURLopener):
- def __init__(self, *args):
- urllib.FancyURLopener.__init__(self, *args)
- self.errcode = 200
-
- def prompt_user_passwd(self, host, realm):
- ## If robots.txt file is accessible only with a password,
- ## we act as if the file wasn't there.
- return None, None
-
- def http_error_default(self, url, fp, errcode, errmsg, headers):
- self.errcode = errcode
- return urllib.FancyURLopener.http_error_default(self, url, fp, errcode,
- errmsg, headers)
diff --git a/Lib/select.py b/Lib/select.py
--- a/Lib/select.py
+++ b/Lib/select.py
@@ -1,264 +1,12 @@
-"""
-This is an select module for use on JVMs >= 1.5.
-It is documented, along with known issues and workarounds, on the jython wiki.
-http://wiki.python.org/jython/SelectModule
-"""
+# dispatches to _socket for actual implementation
-import java.nio.channels.SelectableChannel
-import java.nio.channels.SelectionKey
-import java.nio.channels.Selector
-from java.nio.channels.SelectionKey import OP_ACCEPT, OP_CONNECT, OP_WRITE, OP_READ
-
-import errno
-import os
-import Queue
-import socket
-
-class error(Exception): pass
-
-ALL = None
-
-_exception_map = {
-
-# (<javaexception>, <circumstance>) : lambda: <code that raises the python equivalent>
-
-(java.nio.channels.ClosedChannelException, ALL) : error(errno.ENOTCONN, 'Socket is not connected'),
-(java.nio.channels.CancelledKeyException, ALL) : error(errno.ENOTCONN, 'Socket is not connected'),
-(java.nio.channels.IllegalBlockingModeException, ALL) : error(errno.ESOCKISBLOCKING, 'socket must be in non-blocking mode'),
-}
-
-def _map_exception(exc, circumstance=ALL):
- try:
- mapped_exception = _exception_map[(exc.__class__, circumstance)]
- mapped_exception.java_exception = exc
- return mapped_exception
- except KeyError:
- return error(-1, 'Unmapped java exception: <%s:%s>' % (exc.toString(), circumstance))
-
-POLLIN = 1
-POLLOUT = 2
-
-# The following event types are completely ignored on jython
-# Java does not support them, AFAICT
-# They are declared only to support code compatibility with cpython
-
-POLLPRI = 4
-POLLERR = 8
-POLLHUP = 16
-POLLNVAL = 32
-
-def _getselectable(selectable_object):
- try:
- channel = selectable_object.getchannel()
- except:
- try:
- channel = selectable_object.fileno().getChannel()
- except:
- raise TypeError("Object '%s' is not watchable" % selectable_object,
- errno.ENOTSOCK)
-
- if channel and not isinstance(channel, java.nio.channels.SelectableChannel):
- raise TypeError("Object '%s' is not watchable" % selectable_object,
- errno.ENOTSOCK)
- return channel
-
-class poll:
-
- def __init__(self):
- self.selector = java.nio.channels.Selector.open()
- self.chanmap = {}
- self.unconnected_sockets = []
-
- def _register_channel(self, socket_object, channel, mask):
- jmask = 0
- if mask & POLLIN:
- # Note that OP_READ is NOT a valid event on server socket channels.
- if channel.validOps() & OP_ACCEPT:
- jmask = OP_ACCEPT
- else:
- jmask = OP_READ
- if mask & POLLOUT:
- if channel.validOps() & OP_WRITE:
- jmask |= OP_WRITE
- if channel.validOps() & OP_CONNECT:
- jmask |= OP_CONNECT
- selectionkey = channel.register(self.selector, jmask)
- self.chanmap[channel] = (socket_object, selectionkey)
-
- def _check_unconnected_sockets(self):
- temp_list = []
- for socket_object, mask in self.unconnected_sockets:
- channel = _getselectable(socket_object)
- if channel is not None:
- self._register_channel(socket_object, channel, mask)
- else:
- temp_list.append( (socket_object, mask) )
- self.unconnected_sockets = temp_list
-
- def register(self, socket_object, mask = POLLIN|POLLOUT|POLLPRI):
- try:
- channel = _getselectable(socket_object)
- if channel is None:
- # The socket is not yet connected, and thus has no channel
- # Add it to a pending list, and return
- self.unconnected_sockets.append( (socket_object, mask) )
- return
- self._register_channel(socket_object, channel, mask)
- except java.lang.Exception, jlx:
- raise _map_exception(jlx)
-
- def unregister(self, socket_object):
- try:
- channel = _getselectable(socket_object)
- self.chanmap[channel][1].cancel()
- del self.chanmap[channel]
- except java.lang.Exception, jlx:
- raise _map_exception(jlx)
-
- def _dopoll(self, timeout):
- if timeout is None or timeout < 0:
- self.selector.select()
- else:
- try:
- timeout = int(timeout)
- if not timeout:
- self.selector.selectNow()
- else:
- # No multiplication required: both cpython and java use millisecond timeouts
- self.selector.select(timeout)
- except ValueError, vx:
- raise error("poll timeout must be a number of milliseconds or None", errno.EINVAL)
- # The returned selectedKeys cannot be used from multiple threads!
- return self.selector.selectedKeys()
-
- def poll(self, timeout=None):
- try:
- self._check_unconnected_sockets()
- selectedkeys = self._dopoll(timeout)
- results = []
- for k in selectedkeys.iterator():
- jmask = k.readyOps()
- pymask = 0
- if jmask & OP_READ: pymask |= POLLIN
- if jmask & OP_WRITE: pymask |= POLLOUT
- if jmask & OP_ACCEPT: pymask |= POLLIN
- if jmask & OP_CONNECT: pymask |= POLLOUT
- # Now return the original userobject, and the return event mask
- results.append( (self.chanmap[k.channel()][0], pymask) )
- return results
- except java.lang.Exception, jlx:
- raise _map_exception(jlx)
-
- def _deregister_all(self):
- try:
- for k in self.selector.keys():
- k.cancel()
- # Keys are not actually removed from the selector until the next select operation.
- self.selector.selectNow()
- except java.lang.Exception, jlx:
- raise _map_exception(jlx)
-
- def close(self):
- try:
- self._deregister_all()
- self.selector.close()
- except java.lang.Exception, jlx:
- raise _map_exception(jlx)
-
-def _calcselecttimeoutvalue(value):
- if value is None:
- return None
- try:
- floatvalue = float(value)
- except Exception, x:
- raise TypeError("Select timeout value must be a number or None")
- if value < 0:
- raise error("Select timeout value cannot be negative", errno.EINVAL)
- if floatvalue < 0.000001:
- return 0
- return int(floatvalue * 1000) # Convert to milliseconds
-
-# This cache for poll objects is required because of a bug in java on MS Windows
-# http://bugs.jython.org/issue1291
-
-class poll_object_cache:
-
- def __init__(self):
- self.is_windows = os._name == 'nt'
- if self.is_windows:
- self.poll_object_queue = Queue.Queue()
- import atexit
- atexit.register(self.finalize)
-
- def get_poll_object(self):
- if not self.is_windows:
- return poll()
- try:
- return self.poll_object_queue.get(False)
- except Queue.Empty:
- return poll()
-
- def release_poll_object(self, pobj):
- if self.is_windows:
- pobj._deregister_all()
- self.poll_object_queue.put(pobj)
- else:
- pobj.close()
-
- def finalize(self):
- if self.is_windows:
- while True:
- try:
- p = self.poll_object_queue.get(False)
- p.close()
- except Queue.Empty:
- return
-
-_poll_object_cache = poll_object_cache()
-
-def native_select(read_fd_list, write_fd_list, outofband_fd_list, timeout=None):
- timeout = _calcselecttimeoutvalue(timeout)
- # First create a poll object to do the actual watching.
- pobj = _poll_object_cache.get_poll_object()
- try:
- registered_for_read = {}
- # Check the read list
- for fd in read_fd_list:
- pobj.register(fd, POLLIN)
- registered_for_read[fd] = 1
- # And now the write list
- for fd in write_fd_list:
- if fd in registered_for_read:
- # registering a second time overwrites the first
- pobj.register(fd, POLLIN|POLLOUT)
- else:
- pobj.register(fd, POLLOUT)
- results = pobj.poll(timeout)
- # Now start preparing the results
- read_ready_list, write_ready_list, oob_ready_list = [], [], []
- for fd, mask in results:
- if mask & POLLIN:
- read_ready_list.append(fd)
- if mask & POLLOUT:
- write_ready_list.append(fd)
- return read_ready_list, write_ready_list, oob_ready_list
- finally:
- _poll_object_cache.release_poll_object(pobj)
-
-select = native_select
-
-def cpython_compatible_select(read_fd_list, write_fd_list, outofband_fd_list, timeout=None):
- # First turn all sockets to non-blocking
- # keeping track of which ones have changed
- modified_channels = []
- try:
- for socket_list in [read_fd_list, write_fd_list, outofband_fd_list]:
- for s in socket_list:
- channel = _getselectable(s)
- if channel.isBlocking():
- modified_channels.append(channel)
- channel.configureBlocking(0)
- return native_select(read_fd_list, write_fd_list, outofband_fd_list, timeout)
- finally:
- for channel in modified_channels:
- channel.configureBlocking(1)
+from _socket import (
+ POLLIN,
+ POLLOUT,
+ POLLPRI,
+ POLLERR,
+ POLLHUP,
+ POLLNVAL,
+ error,
+ #poll,
+ select)
diff --git a/Lib/socket.py b/Lib/socket.py
--- a/Lib/socket.py
+++ b/Lib/socket.py
@@ -1,1889 +1,142 @@
-"""
-This is an updated socket module for use on JVMs >= 1.5; it is derived from the old jython socket module.
-It is documented, along with known issues and workarounds, on the jython wiki.
-http://wiki.python.org/jython/NewSocketModule
-"""
+# dispatches to _socket for actual implementation
-_defaulttimeout = None
+from _socket import (
+ socket, error, herror, gaierror, timeout, has_ipv6,
-import errno
-import jarray
-import string
-import struct
-import sys
-import threading
-import time
-import types
+ create_connection,
-# Java.io classes
-import java.io.BufferedInputStream
-import java.io.BufferedOutputStream
-# Java.io exceptions
-import java.io.InterruptedIOException
-import java.io.IOException
+ getdefaulttimeout,
+ setdefaulttimeout,
+
+ getfqdn,
+ gethostbyaddr,
+ gethostbyname,
+ gethostbyname_ex,
+ gethostname,
+ getprotobyname,
+ getservbyname,
+ getservbyport,
-# Java.lang classes
-import java.lang.String
-# Java.lang exceptions
-import java.lang.Exception
+ AF_UNSPEC,
+ AF_INET,
+ AF_INET6,
-# Java.net classes
-import java.net.DatagramPacket
-import java.net.InetAddress
-import java.net.InetSocketAddress
-import java.net.Socket
-# Java.net exceptions
-import java.net.BindException
-import java.net.ConnectException
-import java.net.NoRouteToHostException
-import java.net.PortUnreachableException
-import java.net.ProtocolException
-import java.net.SocketException
-import java.net.SocketTimeoutException
-import java.net.UnknownHostException
+ AI_PASSIVE,
+ AI_CANONNAME,
+ AI_NUMERICHOST,
+ AI_V4MAPPED,
+ AI_ALL,
+ AI_ADDRCONFIG,
+ AI_NUMERICSERV,
-# Java.nio classes
-import java.nio.ByteBuffer
-import java.nio.channels.DatagramChannel
-import java.nio.channels.ServerSocketChannel
-import java.nio.channels.SocketChannel
-# Java.nio exceptions
-import java.nio.channels.AlreadyConnectedException
-import java.nio.channels.AsynchronousCloseException
-import java.nio.channels.CancelledKeyException
-import java.nio.channels.ClosedByInterruptException
-import java.nio.channels.ClosedChannelException
-import java.nio.channels.ClosedSelectorException
-import java.nio.channels.ConnectionPendingException
-import java.nio.channels.IllegalBlockingModeException
-import java.nio.channels.IllegalSelectorException
-import java.nio.channels.NoConnectionPendingException
-import java.nio.channels.NonReadableChannelException
-import java.nio.channels.NonWritableChannelException
-import java.nio.channels.NotYetBoundException
-import java.nio.channels.NotYetConnectedException
-import java.nio.channels.UnresolvedAddressException
-import java.nio.channels.UnsupportedAddressTypeException
+ EAI_NONAME,
+ EAI_SERVICE,
+ EAI_ADDRFAMILY,
+
+ NI_NUMERICHOST,
+ NI_NUMERICSERV,
+ NI_NOFQDN,
+ NI_NAMEREQD,
+ NI_DGRAM,
+ NI_MAXSERV,
+ NI_IDN,
+ NI_IDN_ALLOW_UNASSIGNED,
+ NI_IDN_USE_STD3_ASCII_RULES,
+ NI_MAXHOST,
-# Javax.net.ssl classes
-import javax.net.ssl.SSLSocketFactory
-# Javax.net.ssl exceptions
-javax.net.ssl.SSLException
-javax.net.ssl.SSLHandshakeException
-javax.net.ssl.SSLKeyException
-javax.net.ssl.SSLPeerUnverifiedException
-javax.net.ssl.SSLProtocolException
+ SHUT_RD,
+ SHUT_WR,
+ SHUT_RDWR,
-import org.python.core.io.DatagramSocketIO
-import org.python.core.io.ServerSocketIO
-import org.python.core.io.SocketIO
-from org.python.core.Py import newString as asPyString
+ SOCK_DGRAM,
+ SOCK_STREAM,
+ SOCK_RAW,
+ SOCK_RDM,
+ SOCK_SEQPACKET,
+
+ SOL_SOCKET,
+ # not supported, but here for apparent completeness
+ IPPROTO_AH,
+ IPPROTO_DSTOPTS,
+ IPPROTO_ESP,
+ IPPROTO_FRAGMENT,
+ IPPROTO_GGP,
+ IPPROTO_HOPOPTS,
+ IPPROTO_ICMP,
+ IPPROTO_ICMPV6,
+ IPPROTO_IDP,
+ IPPROTO_IGMP,
+ IPPROTO_IP, # supported
+ # not supported
+ IPPROTO_IPV4,
+ IPPROTO_IPV6,
+ IPPROTO_MAX,
+ IPPROTO_ND,
+ IPPROTO_NONE,
+ IPPROTO_PUP,
+ IPPROTO_RAW,
+ IPPROTO_ROUTING,
+ IPPROTO_TCP, # supported
+ IPPROTO_UDP, # supported
-class error(IOError): pass
-class herror(error): pass
-class gaierror(error): pass
-class timeout(error): pass
-class sslerror(error): pass
+ # supported
+ SO_BROADCAST,
+ SO_KEEPALIVE,
+ SO_LINGER,
+ SO_RCVBUF,
+ SO_REUSEADDR,
+ SO_SNDBUF,
+ SO_TIMEOUT,
+ TCP_NODELAY,
-def _add_exception_attrs(exc):
- setattr(exc, 'errno', exc[0])
- setattr(exc, 'strerror', exc[1])
- return exc
+ # pseudo options
+ SO_ACCEPTCONN,
+ SO_ERROR,
+ SO_TYPE,
-def _unmapped_exception(exc):
- return _add_exception_attrs(error(-1, 'Unmapped exception: %s' % exc))
+ # unsupported, will return errno.ENOPROTOOPT if actually used
+ SO_OOBINLINE,
+ SO_DEBUG,
+ SO_DONTROUTE,
+ SO_EXCLUSIVEADDRUSE,
+ SO_RCVLOWAT,
+ SO_RCVTIMEO,
+ SO_REUSEPORT,
+ SO_SNDLOWAT,
+ SO_SNDTIMEO,
+ SO_USELOOPBACK,
+
+ INADDR_ANY,
+ INADDR_BROADCAST,
+ IN6ADDR_ANY_INIT,
-def java_net_socketexception_handler(exc):
- if exc.message.startswith("Address family not supported by protocol family"):
- return _add_exception_attrs(error(errno.EAFNOSUPPORT,
- 'Address family not supported by protocol family: See http://wiki.python.org/jython/NewSocketModule#IPV6_address_support'))
- return _unmapped_exception(exc)
+ _GLOBAL_DEFAULT_TIMEOUT,
-def would_block_error(exc=None):
- return _add_exception_attrs(error(errno.EWOULDBLOCK, 'The socket operation could not complete without blocking'))
+ is_ipv4_address, is_ipv6_address, is_ip_address,
+ getaddrinfo,
+ getnameinfo,
+ htons,
+ htonl,
+ ntohs,
+ ntohl,
+ inet_aton,
+ inet_ntoa,
+ inet_pton,
+ inet_ntop,
-ALL = None
+ _fileobject,
+ _get_jsockaddr
+)
-_ssl_message = ": Differences between the SSL socket behaviour of cpython vs. jython are explained on the wiki: http://wiki.python.org/jython/NewSocketModule#SSL_Support"
-_exception_map = {
+def supports(feature):
+ # FIXME this seems to be Jython internals specific, and for
+ # testing only; consider removing since it really no longer
+ # matters
+
+ if feature == "idna":
+ return True
+ raise KeyError("Unknown feature", feature)
-# (<javaexception>, <circumstance>) : callable that raises the python equivalent exception, or None to stub out as unmapped
-(java.io.IOException, ALL) : lambda x: error(errno.ECONNRESET, 'Software caused connection abort'),
-(java.io.InterruptedIOException, ALL) : lambda x: timeout(None, 'timed out'),
-(java.net.BindException, ALL) : lambda x: error(errno.EADDRINUSE, 'Address already in use'),
-(java.net.ConnectException, ALL) : lambda x: error(errno.ECONNREFUSED, 'Connection refused'),
-(java.net.NoRouteToHostException, ALL) : lambda x: error(errno.EHOSTUNREACH, 'No route to host'),
-(java.net.PortUnreachableException, ALL) : None,
-(java.net.ProtocolException, ALL) : None,
-(java.net.SocketException, ALL) : java_net_socketexception_handler,
-(java.net.SocketTimeoutException, ALL) : lambda x: timeout(None, 'timed out'),
-(java.net.UnknownHostException, ALL) : lambda x: gaierror(errno.EGETADDRINFOFAILED, 'getaddrinfo failed'),
-
-(java.nio.channels.AlreadyConnectedException, ALL) : lambda x: error(errno.EISCONN, 'Socket is already connected'),
-(java.nio.channels.AsynchronousCloseException, ALL) : None,
-(java.nio.channels.CancelledKeyException, ALL) : None,
-(java.nio.channels.ClosedByInterruptException, ALL) : None,
-(java.nio.channels.ClosedChannelException, ALL) : lambda x: error(errno.EPIPE, 'Socket closed'),
-(java.nio.channels.ClosedSelectorException, ALL) : None,
-(java.nio.channels.ConnectionPendingException, ALL) : None,
-(java.nio.channels.IllegalBlockingModeException, ALL) : None,
-(java.nio.channels.IllegalSelectorException, ALL) : None,
-(java.nio.channels.NoConnectionPendingException, ALL) : None,
-(java.nio.channels.NonReadableChannelException, ALL) : None,
-(java.nio.channels.NonWritableChannelException, ALL) : None,
-(java.nio.channels.NotYetBoundException, ALL) : None,
-(java.nio.channels.NotYetConnectedException, ALL) : None,
-(java.nio.channels.UnresolvedAddressException, ALL) : lambda x: gaierror(errno.EGETADDRINFOFAILED, 'getaddrinfo failed'),
-(java.nio.channels.UnsupportedAddressTypeException, ALL) : None,
-
-# These error codes are currently wrong: getting them correct is going to require
-# some investigation. Cpython 2.6 introduced extensive SSL support.
-
-(javax.net.ssl.SSLException, ALL) : lambda x: sslerror(-1, 'SSL exception'+_ssl_message),
-(javax.net.ssl.SSLHandshakeException, ALL) : lambda x: sslerror(-1, 'SSL handshake exception'+_ssl_message),
-(javax.net.ssl.SSLKeyException, ALL) : lambda x: sslerror(-1, 'SSL key exception'+_ssl_message),
-(javax.net.ssl.SSLPeerUnverifiedException, ALL) : lambda x: sslerror(-1, 'SSL peer unverified exception'+_ssl_message),
-(javax.net.ssl.SSLProtocolException, ALL) : lambda x: sslerror(-1, 'SSL protocol exception'+_ssl_message),
-
-}
-
-def _map_exception(java_exception, circumstance=ALL):
- mapped_exception = _exception_map.get((java_exception.__class__, circumstance))
- if mapped_exception:
- py_exception = mapped_exception(java_exception)
- else:
- py_exception = error(-1, 'Unmapped exception: %s' % java_exception)
- setattr(py_exception, 'java_exception', java_exception)
- return _add_exception_attrs(py_exception)
-
-from functools import wraps
-
-# Used to map java exceptions to the equivalent python exception
-# And to set the _last_error attribute on socket objects, to support SO_ERROR
-def raises_java_exception(method_or_function):
- @wraps(method_or_function)
- def handle_exception(*args, **kwargs):
- is_socket = (len(args) > 0 and isinstance(args[0], _nonblocking_api_mixin))
- try:
- try:
- return method_or_function(*args, **kwargs)
- except java.lang.Exception, jlx:
- raise _map_exception(jlx)
- except error, e:
- if is_socket:
- setattr(args[0], '_last_error', e[0])
- raise
- else:
- if is_socket:
- setattr(args[0], '_last_error', 0)
- return handle_exception
-
-_feature_support_map = {
- 'ipv6': True,
- 'idna': False,
- 'tipc': False,
-}
-
-def supports(feature, *args):
- if len(args) == 1:
- _feature_support_map[feature] = args[0]
- return _feature_support_map.get(feature, False)
-
-MODE_BLOCKING = 'block'
-MODE_NONBLOCKING = 'nonblock'
-MODE_TIMEOUT = 'timeout'
-
-_permitted_modes = (MODE_BLOCKING, MODE_NONBLOCKING, MODE_TIMEOUT)
-
-SHUT_RD = 0
-SHUT_WR = 1
-SHUT_RDWR = 2
-
-AF_UNSPEC = 0
-AF_INET = 2
-AF_INET6 = 23
-
-AI_PASSIVE = 1
-AI_CANONNAME = 2
-AI_NUMERICHOST = 4
-AI_V4MAPPED = 8
-AI_ALL = 16
-AI_ADDRCONFIG = 32
-AI_NUMERICSERV = 1024
-
-EAI_NONAME = -2
-EAI_SERVICE = -8
-EAI_ADDRFAMILY = -9
-
-NI_NUMERICHOST = 1
-NI_NUMERICSERV = 2
-NI_NOFQDN = 4
-NI_NAMEREQD = 8
-NI_DGRAM = 16
-NI_MAXSERV = 32
-NI_IDN = 64
-NI_IDN_ALLOW_UNASSIGNED = 128
-NI_IDN_USE_STD3_ASCII_RULES = 256
-NI_MAXHOST = 1025
-
-# For some reason, probably historical, SOCK_DGRAM and SOCK_STREAM are opposite values of what they are on cpython.
-# I.E. The following is the way they are on cpython
-# SOCK_STREAM = 1
-# SOCK_DGRAM = 2
-# At some point, we should probably switch them around, which *should* not affect anybody
-
-SOCK_DGRAM = 1
-SOCK_STREAM = 2
-SOCK_RAW = 3 # not supported
-SOCK_RDM = 4 # not supported
-SOCK_SEQPACKET = 5 # not supported
-
-SOL_SOCKET = 0xFFFF
-
-IPPROTO_AH = 51 # not supported
-IPPROTO_DSTOPTS = 60 # not supported
-IPPROTO_ESP = 50 # not supported
-IPPROTO_FRAGMENT = 44 # not supported
-IPPROTO_GGP = 3 # not supported
-IPPROTO_HOPOPTS = 0 # not supported
-IPPROTO_ICMP = 1 # not supported
-IPPROTO_ICMPV6 = 58 # not supported
-IPPROTO_IDP = 22 # not supported
-IPPROTO_IGMP = 2 # not supported
-IPPROTO_IP = 0
-IPPROTO_IPV4 = 4 # not supported
-IPPROTO_IPV6 = 41 # not supported
-IPPROTO_MAX = 256 # not supported
-IPPROTO_ND = 77 # not supported
-IPPROTO_NONE = 59 # not supported
-IPPROTO_PUP = 12 # not supported
-IPPROTO_RAW = 255 # not supported
-IPPROTO_ROUTING = 43 # not supported
-IPPROTO_TCP = 6
-IPPROTO_UDP = 17
-
-SO_ACCEPTCONN = 1
-SO_BROADCAST = 2
-SO_ERROR = 4
-SO_KEEPALIVE = 8
-SO_LINGER = 16
-SO_OOBINLINE = 32
-SO_RCVBUF = 64
-SO_REUSEADDR = 128
-SO_SNDBUF = 256
-SO_TIMEOUT = 512
-SO_TYPE = 1024
-
-TCP_NODELAY = 2048
-
-INADDR_ANY = "0.0.0.0"
-INADDR_BROADCAST = "255.255.255.255"
-
-IN6ADDR_ANY_INIT = "::"
-
-# Options with negative constants are not supported
-# They are being added here so that code that refers to them
-# will not break with an AttributeError
-
-SO_DEBUG = -1
-SO_DONTROUTE = -1
-SO_EXCLUSIVEADDRUSE = -8
-SO_RCVLOWAT = -16
-SO_RCVTIMEO = -32
-SO_REUSEPORT = -64
-SO_SNDLOWAT = -128
-SO_SNDTIMEO = -256
-SO_USELOOPBACK = -512
-
-__all__ = [
- # Families
- 'AF_UNSPEC', 'AF_INET', 'AF_INET6',
- # getaddrinfo and getnameinfo flags
- 'AI_PASSIVE', 'AI_CANONNAME', 'AI_NUMERICHOST', 'AI_V4MAPPED',
- 'AI_ALL', 'AI_ADDRCONFIG', 'AI_NUMERICSERV', 'EAI_NONAME',
- 'EAI_SERVICE', 'EAI_ADDRFAMILY',
- 'NI_NUMERICHOST', 'NI_NUMERICSERV', 'NI_NOFQDN', 'NI_NAMEREQD',
- 'NI_DGRAM', 'NI_MAXSERV', 'NI_IDN', 'NI_IDN_ALLOW_UNASSIGNED',
- 'NI_IDN_USE_STD3_ASCII_RULES', 'NI_MAXHOST',
- # socket types
- 'SOCK_DGRAM', 'SOCK_STREAM', 'SOCK_RAW', 'SOCK_RDM', 'SOCK_SEQPACKET',
- # levels
- 'SOL_SOCKET',
- # protocols
- 'IPPROTO_AH', 'IPPROTO_DSTOPTS', 'IPPROTO_ESP', 'IPPROTO_FRAGMENT',
- 'IPPROTO_GGP', 'IPPROTO_HOPOPTS', 'IPPROTO_ICMP', 'IPPROTO_ICMPV6',
- 'IPPROTO_IDP', 'IPPROTO_IGMP', 'IPPROTO_IP', 'IPPROTO_IPV4',
- 'IPPROTO_IPV6', 'IPPROTO_MAX', 'IPPROTO_ND', 'IPPROTO_NONE',
- 'IPPROTO_PUP', 'IPPROTO_RAW', 'IPPROTO_ROUTING', 'IPPROTO_TCP',
- 'IPPROTO_UDP',
- # Special hostnames
- 'INADDR_ANY', 'INADDR_BROADCAST', 'IN6ADDR_ANY_INIT',
- # support socket options
- 'SO_BROADCAST', 'SO_KEEPALIVE', 'SO_LINGER', 'SO_OOBINLINE',
- 'SO_RCVBUF', 'SO_REUSEADDR', 'SO_SNDBUF', 'SO_TIMEOUT', 'TCP_NODELAY',
- # unsupported socket options
- 'SO_ACCEPTCONN', 'SO_DEBUG', 'SO_DONTROUTE', 'SO_ERROR',
- 'SO_EXCLUSIVEADDRUSE', 'SO_RCVLOWAT', 'SO_RCVTIMEO', 'SO_REUSEPORT',
- 'SO_SNDLOWAT', 'SO_SNDTIMEO', 'SO_TYPE', 'SO_USELOOPBACK',
- # functions
- 'getfqdn', 'gethostname', 'gethostbyname', 'gethostbyaddr',
- 'getservbyname', 'getservbyport', 'getprotobyname', 'getaddrinfo',
- 'getnameinfo', 'getdefaulttimeout', 'setdefaulttimeout', 'htons',
- 'htonl', 'ntohs', 'ntohl', 'inet_pton', 'inet_ntop', 'inet_aton',
- 'inet_ntoa', 'create_connection', 'socket', 'ssl',
- # exceptions
- 'error', 'herror', 'gaierror', 'timeout', 'sslerror',
- # classes
- 'SocketType',
- # Misc flags
- 'has_ipv6', 'SHUT_RD', 'SHUT_WR', 'SHUT_RDWR',
-]
-
-def _constant_to_name(const_value, expected_name_starts):
- sock_module = sys.modules['socket']
- try:
- for name in dir(sock_module):
- if getattr(sock_module, name) is const_value:
- for name_start in expected_name_starts:
- if name.startswith(name_start):
- return name
- return "Unknown"
- finally:
- sock_module = None
-
-import _google_ipaddr_r234
-
-def _is_ip_address(addr, version=None):
- try:
- _google_ipaddr_r234.IPAddress(addr, version)
- return True
- except ValueError:
- return False
-
-def is_ipv4_address(addr):
- return _is_ip_address(addr, 4)
-
-def is_ipv6_address(addr):
- return _is_ip_address(addr, 6)
-
-def is_ip_address(addr):
- return _is_ip_address(addr)
-
-class _nio_impl:
-
- timeout = None
- mode = MODE_BLOCKING
-
- def config(self, mode, timeout):
- self.mode = mode
- if self.mode == MODE_BLOCKING:
- self.jchannel.configureBlocking(1)
- if self.mode == MODE_NONBLOCKING:
- self.jchannel.configureBlocking(0)
- if self.mode == MODE_TIMEOUT:
- self.jchannel.configureBlocking(1)
- self._timeout_millis = int(timeout*1000)
- self.jsocket.setSoTimeout(self._timeout_millis)
-
- def getsockopt(self, level, option):
- if (level, option) in self.options:
- result = getattr(self.jsocket, "get%s" % self.options[ (level, option) ])()
- if option == SO_LINGER:
- if result == -1:
- enabled, linger_time = 0, 0
- else:
- enabled, linger_time = 1, result
- return struct.pack('ii', enabled, linger_time)
- return result
- else:
- raise error(errno.ENOPROTOOPT, "Socket option '%s' (level '%s') not supported on socket(%s)" % \
- (_constant_to_name(option, ['SO_', 'TCP_']), _constant_to_name(level, ['SOL_', 'IPPROTO_']), str(self.jsocket)))
-
- def setsockopt(self, level, option, value):
- if (level, option) in self.options:
- if option == SO_LINGER:
- values = struct.unpack('ii', value)
- self.jsocket.setSoLinger(*values)
- else:
- getattr(self.jsocket, "set%s" % self.options[ (level, option) ])(value)
- else:
- raise error(errno.ENOPROTOOPT, "Socket option '%s' (level '%s') not supported on socket(%s)" % \
- (_constant_to_name(option, ['SO_', 'TCP_']), _constant_to_name(level, ['SOL_', 'IPPROTO_']), str(self.jsocket)))
-
- def close(self):
- self.jsocket.close()
-
- def getchannel(self):
- return self.jchannel
-
- def fileno(self):
- return self.socketio
-
-class _client_socket_impl(_nio_impl):
-
- options = {
- (SOL_SOCKET, SO_KEEPALIVE): 'KeepAlive',
- (SOL_SOCKET, SO_LINGER): 'SoLinger',
- (SOL_SOCKET, SO_OOBINLINE): 'OOBInline',
- (SOL_SOCKET, SO_RCVBUF): 'ReceiveBufferSize',
- (SOL_SOCKET, SO_REUSEADDR): 'ReuseAddress',
- (SOL_SOCKET, SO_SNDBUF): 'SendBufferSize',
- (SOL_SOCKET, SO_TIMEOUT): 'SoTimeout',
- (IPPROTO_TCP, TCP_NODELAY): 'TcpNoDelay',
- }
-
- def __init__(self, socket=None, pending_options=None):
- if socket:
- self.jchannel = socket.getChannel()
- else:
- self.jchannel = java.nio.channels.SocketChannel.open()
- self.jsocket = self.jchannel.socket()
- self.socketio = org.python.core.io.SocketIO(self.jchannel, 'rw')
- if pending_options:
- for level, optname in pending_options.keys():
- self.setsockopt(level, optname, pending_options[ (level, optname) ])
-
- def bind(self, jsockaddr, reuse_addr):
- self.jsocket.setReuseAddress(reuse_addr)
- self.jsocket.bind(jsockaddr)
-
- def connect(self, jsockaddr):
- if self.mode == MODE_TIMEOUT:
- self.jsocket.connect (jsockaddr, self._timeout_millis)
- else:
- self.jchannel.connect(jsockaddr)
-
- def finish_connect(self):
- return self.jchannel.finishConnect()
-
- def _do_read_net(self, buf):
- # Need two separate implementations because the java.nio APIs do not support timeouts
- return self.jsocket.getInputStream().read(buf)
-
- def _do_read_nio(self, buf):
- bytebuf = java.nio.ByteBuffer.wrap(buf)
- count = self.jchannel.read(bytebuf)
- return count
-
- def _do_write_net(self, buf):
- self.jsocket.getOutputStream().write(buf)
- return len(buf)
-
- def _do_write_nio(self, buf):
- bytebuf = java.nio.ByteBuffer.wrap(buf)
- count = self.jchannel.write(bytebuf)
- return count
-
- def read(self, buf):
- if self.mode == MODE_TIMEOUT:
- return self._do_read_net(buf)
- else:
- return self._do_read_nio(buf)
-
- def write(self, buf):
- if self.mode == MODE_TIMEOUT:
- return self._do_write_net(buf)
- else:
- return self._do_write_nio(buf)
-
- def shutdown(self, how):
- if how in (SHUT_RD, SHUT_RDWR):
- self.jsocket.shutdownInput()
- if how in (SHUT_WR, SHUT_RDWR):
- self.jsocket.shutdownOutput()
-
- def getsockname(self):
- return (self.jsocket.getLocalAddress().getHostAddress(), self.jsocket.getLocalPort())
-
- def getpeername(self):
- return (self.jsocket.getInetAddress().getHostAddress(), self.jsocket.getPort() )
-
-class _server_socket_impl(_nio_impl):
-
- options = {
- (SOL_SOCKET, SO_RCVBUF): 'ReceiveBufferSize',
- (SOL_SOCKET, SO_REUSEADDR): 'ReuseAddress',
- (SOL_SOCKET, SO_TIMEOUT): 'SoTimeout',
- }
-
- def __init__(self, jsockaddr, backlog, reuse_addr):
- self.pending_client_options = {}
- self.jchannel = java.nio.channels.ServerSocketChannel.open()
- self.jsocket = self.jchannel.socket()
- self.jsocket.setReuseAddress(reuse_addr)
- self.jsocket.bind(jsockaddr, backlog)
- self.socketio = org.python.core.io.ServerSocketIO(self.jchannel, 'rw')
-
- def accept(self):
- if self.mode in (MODE_BLOCKING, MODE_NONBLOCKING):
- new_cli_chan = self.jchannel.accept()
- if new_cli_chan is not None:
- return _client_socket_impl(new_cli_chan.socket(), self.pending_client_options)
- else:
- return None
- else:
- # In timeout mode now
- new_cli_sock = self.jsocket.accept()
- return _client_socket_impl(new_cli_sock, self.pending_client_options)
-
- def shutdown(self, how):
- # This is no-op on java, for server sockets.
- # What the user wants to achieve is achieved by calling close() on
- # java/jython. But we can't call that here because that would then
- # later cause the user explicit close() call to fail
- pass
-
- def getsockopt(self, level, option):
- if self.options.has_key( (level, option) ):
- return _nio_impl.getsockopt(self, level, option)
- elif _client_socket_impl.options.has_key( (level, option) ):
- return self.pending_client_options.get( (level, option), None)
- else:
- raise error(errno.ENOPROTOOPT, "Socket option '%s' (level '%s') not supported on socket(%s)" % \
- (_constant_to_name(option, ['SO_', 'TCP_']), _constant_to_name(level, ['SOL_', 'IPPROTO_']), str(self.jsocket)))
-
- def setsockopt(self, level, option, value):
- if self.options.has_key( (level, option) ):
- _nio_impl.setsockopt(self, level, option, value)
- elif _client_socket_impl.options.has_key( (level, option) ):
- self.pending_client_options[ (level, option) ] = value
- else:
- raise error(errno.ENOPROTOOPT, "Socket option '%s' (level '%s') not supported on socket(%s)" % \
- (_constant_to_name(option, ['SO_', 'TCP_']), _constant_to_name(level, ['SOL_', 'IPPROTO_']), str(self.jsocket)))
-
- def getsockname(self):
- return (self.jsocket.getInetAddress().getHostAddress(), self.jsocket.getLocalPort())
-
- def getpeername(self):
- # Not a meaningful operation for server sockets.
- raise error(errno.ENOTCONN, "Socket is not connected")
-
-class _datagram_socket_impl(_nio_impl):
-
- options = {
- (SOL_SOCKET, SO_BROADCAST): 'Broadcast',
- (SOL_SOCKET, SO_RCVBUF): 'ReceiveBufferSize',
- (SOL_SOCKET, SO_REUSEADDR): 'ReuseAddress',
- (SOL_SOCKET, SO_SNDBUF): 'SendBufferSize',
- (SOL_SOCKET, SO_TIMEOUT): 'SoTimeout',
- }
-
- def __init__(self, jsockaddr=None, reuse_addr=0):
- self.jchannel = java.nio.channels.DatagramChannel.open()
- self.jsocket = self.jchannel.socket()
- if jsockaddr is not None:
- self.jsocket.setReuseAddress(reuse_addr)
- self.jsocket.bind(jsockaddr)
- self.socketio = org.python.core.io.DatagramSocketIO(self.jchannel, 'rw')
-
- def connect(self, jsockaddr):
- self.jchannel.connect(jsockaddr)
-
- def disconnect(self):
- """
- Disconnect the datagram socket.
- cpython appears not to have this operation
- """
- self.jchannel.disconnect()
-
- def shutdown(self, how):
- # This is no-op on java, for datagram sockets.
- # What the user wants to achieve is achieved by calling close() on
- # java/jython. But we can't call that here because that would then
- # later cause the user explicit close() call to fail
- pass
-
- def _do_send_net(self, byte_array, socket_address, flags):
- # Need two separate implementations because the java.nio APIs do not support timeouts
- num_bytes = len(byte_array)
- if self.jsocket.isConnected() and socket_address is None:
- packet = java.net.DatagramPacket(byte_array, num_bytes)
- else:
- packet = java.net.DatagramPacket(byte_array, num_bytes, socket_address)
- self.jsocket.send(packet)
- return num_bytes
-
- def _do_send_nio(self, byte_array, socket_address, flags):
- byte_buf = java.nio.ByteBuffer.wrap(byte_array)
- if self.jchannel.isConnected() and socket_address is None:
- bytes_sent = self.jchannel.write(byte_buf)
- else:
- bytes_sent = self.jchannel.send(byte_buf, socket_address)
- return bytes_sent
-
- def sendto(self, byte_array, jsockaddr, flags):
- if self.mode == MODE_TIMEOUT:
- return self._do_send_net(byte_array, jsockaddr, flags)
- else:
- return self._do_send_nio(byte_array, jsockaddr, flags)
-
- def send(self, byte_array, flags):
- if self.mode == MODE_TIMEOUT:
- return self._do_send_net(byte_array, None, flags)
- else:
- return self._do_send_nio(byte_array, None, flags)
-
- def _do_receive_net(self, return_source_address, num_bytes, flags):
- byte_array = jarray.zeros(num_bytes, 'b')
- packet = java.net.DatagramPacket(byte_array, num_bytes)
- self.jsocket.receive(packet)
- bytes_rcvd = packet.getLength()
- if bytes_rcvd < num_bytes:
- byte_array = byte_array[:bytes_rcvd]
- return_data = byte_array.tostring()
- if return_source_address:
- host = None
- if packet.getAddress():
- host = packet.getAddress().getHostAddress()
- port = packet.getPort()
- return return_data, (host, port)
- else:
- return return_data
-
- def _do_receive_nio(self, return_source_address, num_bytes, flags):
- byte_array = jarray.zeros(num_bytes, 'b')
- byte_buf = java.nio.ByteBuffer.wrap(byte_array)
- source_address = self.jchannel.receive(byte_buf)
- if source_address is None and not self.jchannel.isBlocking():
- raise would_block_error()
- byte_buf.flip() ; bytes_read = byte_buf.remaining()
- if bytes_read < num_bytes:
- byte_array = byte_array[:bytes_read]
- return_data = byte_array.tostring()
- if return_source_address:
- return return_data, (source_address.getAddress().getHostAddress(), source_address.getPort())
- else:
- return return_data
-
- def recvfrom(self, num_bytes, flags):
- if self.mode == MODE_TIMEOUT:
- return self._do_receive_net(1, num_bytes, flags)
- else:
- return self._do_receive_nio(1, num_bytes, flags)
-
- def recv(self, num_bytes, flags):
- if self.mode == MODE_TIMEOUT:
- return self._do_receive_net(0, num_bytes, flags)
- else:
- return self._do_receive_nio(0, num_bytes, flags)
-
- def getsockname(self):
- return (self.jsocket.getLocalAddress().getHostAddress(), self.jsocket.getLocalPort())
-
- def getpeername(self):
- peer_address = self.jsocket.getInetAddress()
- if peer_address is None:
- raise error(errno.ENOTCONN, "Socket is not connected")
- return (peer_address.getHostAddress(), self.jsocket.getPort() )
-
-has_ipv6 = True # IPV6 FTW!
-
-# Name and address functions
-
-def _gethostbyaddr(name):
- # This is as close as I can get; at least the types are correct...
- addresses = java.net.InetAddress.getAllByName(gethostbyname(name))
- names = []
- addrs = []
- for addr in addresses:
- names.append(asPyString(addr.getHostName()))
- addrs.append(asPyString(addr.getHostAddress()))
- return (names, addrs)
-
- at raises_java_exception
-def getfqdn(name=None):
- """
- Return a fully qualified domain name for name. If name is omitted or empty
- it is interpreted as the local host. To find the fully qualified name,
- the hostname returned by gethostbyaddr() is checked, then aliases for the
- host, if available. The first name which includes a period is selected.
- In case no fully qualified domain name is available, the hostname is retur
- New in version 2.0.
- """
- if not name:
- name = gethostname()
- names, addrs = _gethostbyaddr(name)
- for a in names:
- if a.find(".") >= 0:
- return a
- return name
-
- at raises_java_exception
-def gethostname():
- return asPyString(java.net.InetAddress.getLocalHost().getHostName())
-
- at raises_java_exception
-def gethostbyname(name):
- return asPyString(java.net.InetAddress.getByName(name).getHostAddress())
-
-#
-# Skeleton implementation of gethostbyname_ex
-# Needed because urllib2 refers to it
-#
-
- at raises_java_exception
-def gethostbyname_ex(name):
- return (name, [], gethostbyname(name))
-
- at raises_java_exception
-def gethostbyaddr(name):
- names, addrs = _gethostbyaddr(name)
- return (names[0], names, addrs)
-
-def getservbyname(service_name, protocol_name=None):
- try:
- from jnr.netdb import Service
- except ImportError:
- return None
- service = Service.getServiceByName(service_name, protocol_name)
- if service is None:
- raise error('service/proto not found')
- return service.getPort()
-
-def getservbyport(port, protocol_name=None):
- try:
- from jnr.netdb import Service
- except ImportError:
- return None
- service = Service.getServiceByPort(port, protocol_name)
- if service is None:
- raise error('port/proto not found')
- return service.getName()
-
-def getprotobyname(protocol_name=None):
- try:
- from jnr.netdb import Protocol
- except ImportError:
- return None
- proto = Protocol.getProtocolByName(protocol_name)
- if proto is None:
- raise error('protocol not found')
- return proto.getProto()
-
-def _realsocket(family = AF_INET, sock_type = SOCK_STREAM, protocol=0):
- assert family in (AF_INET, AF_INET6), "Only AF_INET and AF_INET6 sockets are currently supported on jython"
- assert sock_type in (SOCK_DGRAM, SOCK_STREAM), "Only SOCK_STREAM and SOCK_DGRAM sockets are currently supported on jython"
- if sock_type == SOCK_STREAM:
- if protocol != 0:
- assert protocol == IPPROTO_TCP, "Only IPPROTO_TCP supported on SOCK_STREAM sockets"
- else:
- protocol = IPPROTO_TCP
- result = _tcpsocket()
- else:
- if protocol != 0:
- assert protocol == IPPROTO_UDP, "Only IPPROTO_UDP supported on SOCK_DGRAM sockets"
- else:
- protocol = IPPROTO_UDP
- result = _udpsocket()
- setattr(result, "family", family)
- setattr(result, "type", sock_type)
- setattr(result, "proto", protocol)
- return result
-
-#
-# Attempt to provide IDNA (RFC 3490) support.
-#
-# Try java.net.IDN, built into java 6
-#
-
-idna_libraries = [
- ('java.net.IDN', 'toASCII', 'toUnicode',
- 'ALLOW_UNASSIGNED', 'USE_STD3_ASCII_RULES',
- java.lang.IllegalArgumentException)
-]
-
-for idna_lib, efn, dfn, au, usar, exc in idna_libraries:
- try:
- m = __import__(idna_lib, globals(), locals(), [efn, dfn, au, usar])
- encode_fn = getattr(m, efn)
- def _encode_idna(name):
- try:
- return encode_fn(name)
- except exc:
- raise UnicodeEncodeError(name)
- decode_fn = getattr(m, dfn)
- def _decode_idna(name, flags=0):
- try:
- jflags = 0
- if flags & NI_IDN_ALLOW_UNASSIGNED:
- jflags |= au
- if flags & NI_IDN_USE_STD3_ASCII_RULES:
- jflags |= usar
- return decode_fn(name, jflags)
- except Exception, x:
- raise UnicodeDecodeError(name)
- supports('idna', True)
- break
- except (AttributeError, ImportError), e:
- pass
-else:
- _encode_idna = lambda x: x.encode("ascii")
- _decode_idna = lambda x, y=0: x.decode("ascii")
-
-#
-# Define data structures to support IPV4 and IPV6.
-#
-
-class _ip_address_t: pass
-
-class _ipv4_address_t(_ip_address_t):
-
- def __init__(self, sockaddr, port, jaddress):
- self.sockaddr = sockaddr
- self.port = port
- self.jaddress = jaddress
-
- def __getitem__(self, index):
- if 0 == index:
- return self.sockaddr
- elif 1 == index:
- return self.port
- else:
- raise IndexError()
-
- def __len__(self):
- return 2
-
- def __str__(self):
- return "('%s', %d)" % (self.sockaddr, self.port)
-
- __repr__ = __str__
-
-class _ipv6_address_t(_ip_address_t):
-
- def __init__(self, sockaddr, port, jaddress):
- self.sockaddr = sockaddr
- self.port = port
- self.jaddress = jaddress
-
- def __getitem__(self, index):
- if 0 == index:
- return self.sockaddr
- elif 1 == index:
- return self.port
- elif 2 == index:
- return 0
- elif 3 == index:
- return self.jaddress.scopeId
- else:
- raise IndexError()
-
- def __len__(self):
- return 4
-
- def __str__(self):
- return "('%s', %d, 0, %d)" % (self.sockaddr, self.port, self.jaddress.scopeId)
-
- __repr__ = __str__
-
-def _get_jsockaddr(address_object, family, sock_type, proto, flags):
- # Is this an object that was returned from getaddrinfo? If so, it already contains an InetAddress
- if isinstance(address_object, _ip_address_t):
- return java.net.InetSocketAddress(address_object.jaddress, address_object[1])
- # The user passed an address tuple, not an object returned from getaddrinfo
- # So we must call getaddrinfo, after some translations and checking
- if address_object is None:
- address_object = ("", 0)
- error_message = "Address must be a 2-tuple (ipv4: (host, port)) or a 4-tuple (ipv6: (host, port, flow, scope))"
- if not isinstance(address_object, tuple) or \
- ((family == AF_INET and len(address_object) != 2) or (family == AF_INET6 and len(address_object) not in [2,4] )) or \
- not isinstance(address_object[0], (basestring, types.NoneType)) or \
- not isinstance(address_object[1], (int, long)):
- raise TypeError(error_message)
- if len(address_object) == 4 and not isinstance(address_object[3], (int, long)):
- raise TypeError(error_message)
- hostname = address_object[0]
- if hostname is not None:
- hostname = hostname.strip()
- port = address_object[1]
- if family == AF_INET and sock_type == SOCK_DGRAM and hostname == "<broadcast>":
- hostname = INADDR_BROADCAST
- if hostname in ["", None]:
- if flags & AI_PASSIVE:
- hostname = {AF_INET: INADDR_ANY, AF_INET6: IN6ADDR_ANY_INIT}[family]
- else:
- hostname = "localhost"
- if isinstance(hostname, unicode):
- hostname = _encode_idna(hostname)
- addresses = getaddrinfo(hostname, port, family, sock_type, proto, flags)
- if len(addresses) == 0:
- raise gaierror(errno.EGETADDRINFOFAILED, 'getaddrinfo failed')
- return java.net.InetSocketAddress(addresses[0][4].jaddress, port)
-
-# Workaround for this (predominantly windows) issue
-# http://wiki.python.org/jython/NewSocketModule#IPV6_address_support
-
-_ipv4_addresses_only = False
-
-def _use_ipv4_addresses_only(value):
- global _ipv4_addresses_only
- _ipv4_addresses_only = value
-
-def _getaddrinfo_get_host(host, family, flags):
- if not isinstance(host, basestring) and host is not None:
- raise TypeError("getaddrinfo() argument 1 must be string or None")
- if flags & AI_NUMERICHOST:
- if not is_ip_address(host):
- raise gaierror(EAI_NONAME, "Name or service not known")
- if family == AF_INET and not is_ipv4_address(host):
- raise gaierror(EAI_ADDRFAMILY, "Address family for hostname not supported")
- if family == AF_INET6 and not is_ipv6_address(host):
- raise gaierror(EAI_ADDRFAMILY, "Address family for hostname not supported")
- if isinstance(host, unicode):
- host = _encode_idna(host)
- return host
-
-def _getaddrinfo_get_port(port, flags):
- if isinstance(port, basestring):
- try:
- int_port = int(port)
- except ValueError:
- if flags & AI_NUMERICSERV:
- raise gaierror(EAI_NONAME, "Name or service not known")
- # Lookup the service by name
- try:
- int_port = getservbyname(port)
- except error:
- raise gaierror(EAI_SERVICE, "Servname not supported for ai_socktype")
- elif port is None:
- int_port = 0
- elif not isinstance(port, (int, long)):
- raise error("Int or String expected")
- else:
- int_port = int(port)
- return int_port % 65536
-
- at raises_java_exception
-def getaddrinfo(host, port, family=AF_UNSPEC, socktype=0, proto=0, flags=0):
- if _ipv4_addresses_only:
- family = AF_INET
- if not family in [AF_INET, AF_INET6, AF_UNSPEC]:
- raise gaierror(errno.EIO, 'ai_family not supported')
- host = _getaddrinfo_get_host(host, family, flags)
- port = _getaddrinfo_get_port(port, flags)
- if socktype not in [0, SOCK_DGRAM, SOCK_STREAM]:
- raise error(errno.ESOCKTNOSUPPORT, "Socket type %s is not supported" % _constant_to_name(socktype, ['SOCK_']))
- filter_fns = []
- filter_fns.append({
- AF_INET: lambda x: isinstance(x, java.net.Inet4Address),
- AF_INET6: lambda x: isinstance(x, java.net.Inet6Address),
- AF_UNSPEC: lambda x: isinstance(x, java.net.InetAddress),
- }[family])
- if host in [None, ""]:
- if flags & AI_PASSIVE:
- hosts = {AF_INET: [INADDR_ANY], AF_INET6: [IN6ADDR_ANY_INIT], AF_UNSPEC: [INADDR_ANY, IN6ADDR_ANY_INIT]}[family]
- else:
- hosts = ["localhost"]
- else:
- hosts = [host]
- results = []
- for h in hosts:
- for a in java.net.InetAddress.getAllByName(h):
- if len([f for f in filter_fns if f(a)]):
- family = {java.net.Inet4Address: AF_INET, java.net.Inet6Address: AF_INET6}[a.getClass()]
- if flags & AI_CANONNAME:
- canonname = asPyString(a.getCanonicalHostName())
- else:
- canonname = ""
- sockaddr = asPyString(a.getHostAddress())
- # TODO: Include flowinfo and scopeid in a 4-tuple for IPv6 addresses
- sock_tuple = {AF_INET : _ipv4_address_t, AF_INET6 : _ipv6_address_t}[family](sockaddr, port, a)
- if socktype == 0:
- socktypes = [SOCK_DGRAM, SOCK_STREAM]
- else:
- socktypes = [socktype]
- for result_socktype in socktypes:
- result_proto = {SOCK_DGRAM: IPPROTO_UDP, SOCK_STREAM: IPPROTO_TCP}[result_socktype]
- if proto in [0, result_proto]:
- # The returned socket will only support the result_proto
- # If this does not match the requested proto, don't return it
- results.append((family, result_socktype, result_proto, canonname, sock_tuple))
- return results
-
-def _getnameinfo_get_host(address, flags):
- if not isinstance(address, basestring):
- raise TypeError("getnameinfo() address 1 must be string, not None")
- if isinstance(address, unicode):
- address = _encode_idna(address)
- jia = java.net.InetAddress.getByName(address)
- result = jia.getCanonicalHostName()
- if flags & NI_NAMEREQD:
- if is_ip_address(result):
- raise gaierror(EAI_NONAME, "Name or service not known")
- elif flags & NI_NUMERICHOST:
- result = jia.getHostAddress()
- # Ignoring NI_NOFQDN for now
- if flags & NI_IDN:
- result = _decode_idna(result, flags)
- return result
-
-def _getnameinfo_get_port(port, flags):
- if not isinstance(port, (int, long)):
- raise TypeError("getnameinfo() port number must be an integer")
- if flags & NI_NUMERICSERV:
- return port
- proto = None
- if flags & NI_DGRAM:
- proto = "udp"
- return getservbyport(port, proto)
-
- at raises_java_exception
-def getnameinfo(sock_addr, flags):
- if not isinstance(sock_addr, tuple) or len(sock_addr) < 2:
- raise TypeError("getnameinfo() argument 1 must be a tuple")
- host = _getnameinfo_get_host(sock_addr[0], flags)
- port = _getnameinfo_get_port(sock_addr[1], flags)
- return (host, port)
-
-def getdefaulttimeout():
- return _defaulttimeout
-
-def _calctimeoutvalue(value):
- if value is None:
- return None
- try:
- floatvalue = float(value)
- except:
- raise TypeError('Socket timeout value must be a number or None')
- if floatvalue < 0.0:
- raise ValueError("Socket timeout value cannot be negative")
- if floatvalue < 0.000001:
- return 0.0
- return floatvalue
-
-def setdefaulttimeout(timeout):
- global _defaulttimeout
- try:
- _defaulttimeout = _calctimeoutvalue(timeout)
- finally:
- _nonblocking_api_mixin.timeout = _defaulttimeout
-
-def htons(x): return x
-def htonl(x): return x
-def ntohs(x): return x
-def ntohl(x): return x
-
- at raises_java_exception
-def inet_pton(family, ip_string):
- if family == AF_INET:
- if not is_ipv4_address(ip_string):
- raise error("illegal IP address string passed to inet_pton")
- elif family == AF_INET6:
- if not is_ipv6_address(ip_string):
- raise error("illegal IP address string passed to inet_pton")
- else:
- raise error(errno.EAFNOSUPPORT, "Address family not supported by protocol")
- ia = java.net.InetAddress.getByName(ip_string)
- bytes = []
- for byte in ia.getAddress():
- if byte < 0:
- bytes.append(byte+256)
- else:
- bytes.append(byte)
- return "".join([chr(byte) for byte in bytes])
-
- at raises_java_exception
-def inet_ntop(family, packed_ip):
- jByteArray = jarray.array(packed_ip, 'b')
- if family == AF_INET:
- if len(jByteArray) != 4:
- raise ValueError("invalid length of packed IP address string")
- elif family == AF_INET6:
- if len(jByteArray) != 16:
- raise ValueError("invalid length of packed IP address string")
- else:
- raise ValueError("unknown address family %s" % family)
- ia = java.net.InetAddress.getByAddress(jByteArray)
- return ia.getHostAddress()
-
-def inet_aton(ip_string):
- return inet_pton(AF_INET, ip_string)
-
-def inet_ntoa(packed_ip):
- return inet_ntop(AF_INET, packed_ip)
-
-class _nonblocking_api_mixin:
-
- mode = MODE_BLOCKING
- reference_count = 0
- close_lock = threading.Lock()
-
- def __init__(self):
- self.timeout = _defaulttimeout
- if self.timeout is not None:
- self.mode = MODE_TIMEOUT
- self.pending_options = {
- (SOL_SOCKET, SO_REUSEADDR): 0,
- }
-
- def gettimeout(self):
- return self.timeout
-
- def settimeout(self, timeout):
- self.timeout = _calctimeoutvalue(timeout)
- if self.timeout is None:
- self.mode = MODE_BLOCKING
- elif self.timeout < 0.000001:
- self.mode = MODE_NONBLOCKING
- else:
- self.mode = MODE_TIMEOUT
- self._config()
-
- def setblocking(self, flag):
- if flag:
- self.mode = MODE_BLOCKING
- self.timeout = None
- else:
- self.mode = MODE_NONBLOCKING
- self.timeout = 0.0
- self._config()
-
- def getblocking(self):
- return self.mode == MODE_BLOCKING
-
- @raises_java_exception
- def setsockopt(self, level, optname, value):
- if self.sock_impl:
- self.sock_impl.setsockopt(level, optname, value)
- else:
- self.pending_options[ (level, optname) ] = value
-
- @raises_java_exception
- def getsockopt(self, level, optname):
- # Handle "pseudo" options first
- if level == SOL_SOCKET and optname == SO_TYPE:
- return getattr(self, "type")
- if level == SOL_SOCKET and optname == SO_ERROR:
- return_value = self._last_error
- self._last_error = 0
- return return_value
- # Now handle "real" options
- if self.sock_impl:
- return self.sock_impl.getsockopt(level, optname)
- else:
- return self.pending_options.get( (level, optname), None)
-
- @raises_java_exception
- def shutdown(self, how):
- assert how in (SHUT_RD, SHUT_WR, SHUT_RDWR)
- if not self.sock_impl:
- raise error(errno.ENOTCONN, "Transport endpoint is not connected")
- self.sock_impl.shutdown(how)
-
- @raises_java_exception
- def close(self):
- if self.sock_impl:
- self.sock_impl.close()
-
- @raises_java_exception
- def getsockname(self):
- if self.sock_impl is None:
- # If the user has already bound an address, return that
- if self.local_addr:
- return self.local_addr
- # The user has not bound, connected or listened
- # This is what cpython raises in this scenario
- raise error(errno.EINVAL, "Invalid argument")
- return self.sock_impl.getsockname()
-
- @raises_java_exception
- def getpeername(self):
- if self.sock_impl is None:
- raise error(errno.ENOTCONN, "Socket is not connected")
- return self.sock_impl.getpeername()
-
- def _config(self):
- assert self.mode in _permitted_modes
- if self.sock_impl:
- self.sock_impl.config(self.mode, self.timeout)
- for level, optname in self.pending_options.keys():
- if optname != SO_REUSEADDR:
- self.sock_impl.setsockopt(level, optname, self.pending_options[ (level, optname) ])
-
- def getchannel(self):
- if not self.sock_impl:
- return None
- return self.sock_impl.getchannel()
-
- def fileno(self):
- if not self.sock_impl:
- return None
- return self.sock_impl.fileno()
-
- def _get_jsocket(self):
- return self.sock_impl.jsocket
-
-class _tcpsocket(_nonblocking_api_mixin):
-
- sock_impl = None
- istream = None
- ostream = None
- local_addr = None
- server = 0
- _last_error = 0
-
- def __init__(self):
- _nonblocking_api_mixin.__init__(self)
-
- def getsockopt(self, level, optname):
- if level == SOL_SOCKET and optname == SO_ACCEPTCONN:
- return self.server
- return _nonblocking_api_mixin.getsockopt(self, level, optname)
-
- @raises_java_exception
- def bind(self, addr):
- assert not self.sock_impl
- assert not self.local_addr
- # Do the address format check
- _get_jsockaddr(addr, self.family, self.type, self.proto, AI_PASSIVE)
- self.local_addr = addr
-
- @raises_java_exception
- def listen(self, backlog):
- "This signifies a server socket"
- assert not self.sock_impl
- self.server = 1
- self.sock_impl = _server_socket_impl(_get_jsockaddr(self.local_addr, self.family, self.type, self.proto, AI_PASSIVE),
- backlog, self.pending_options[ (SOL_SOCKET, SO_REUSEADDR) ])
- self._config()
-
- @raises_java_exception
- def accept(self):
- "This signifies a server socket"
- if not self.sock_impl:
- self.listen()
- assert self.server
- new_sock = self.sock_impl.accept()
- if not new_sock:
- raise would_block_error()
- cliconn = _tcpsocket()
- cliconn.pending_options[ (SOL_SOCKET, SO_REUSEADDR) ] = new_sock.jsocket.getReuseAddress()
- cliconn.sock_impl = new_sock
- cliconn._setup()
- return cliconn, new_sock.getpeername()
-
- def _do_connect(self, addr):
- assert not self.sock_impl
- self.sock_impl = _client_socket_impl()
- if self.local_addr: # Has the socket been bound to a local address?
- self.sock_impl.bind(_get_jsockaddr(self.local_addr, self.family, self.type, self.proto, 0),
- self.pending_options[ (SOL_SOCKET, SO_REUSEADDR) ])
- self._config() # Configure timeouts, etc, now that the socket exists
- self.sock_impl.connect(_get_jsockaddr(addr, self.family, self.type, self.proto, 0))
-
- @raises_java_exception
- def connect(self, addr):
- "This signifies a client socket"
- self._do_connect(addr)
- self._setup()
-
- @raises_java_exception
- def connect_ex(self, addr):
- "This signifies a client socket"
- if not self.sock_impl:
- self._do_connect(addr)
- if self.sock_impl.finish_connect():
- self._setup()
- if self.mode == MODE_NONBLOCKING:
- return errno.EISCONN
- return 0
- return errno.EINPROGRESS
-
- def _setup(self):
- if self.mode != MODE_NONBLOCKING:
- self.istream = self.sock_impl.jsocket.getInputStream()
- self.ostream = self.sock_impl.jsocket.getOutputStream()
-
- @raises_java_exception
- def recv(self, n):
- if not self.sock_impl: raise error(errno.ENOTCONN, 'Socket is not connected')
- if self.sock_impl.jchannel.isConnectionPending():
- self.sock_impl.jchannel.finishConnect()
- data = jarray.zeros(n, 'b')
- m = self.sock_impl.read(data)
- if m == -1:#indicates EOF has been reached, so we just return the empty string
- return ""
- elif m <= 0:
- if self.mode == MODE_NONBLOCKING:
- raise would_block_error()
- return ""
- if m < n:
- data = data[:m]
- return data.tostring()
-
- @raises_java_exception
- def recvfrom(self, n):
- return self.recv(n), self.getpeername()
-
- @raises_java_exception
- def send(self, s):
- if not self.sock_impl: raise error(errno.ENOTCONN, 'Socket is not connected')
- if self.sock_impl.jchannel.isConnectionPending():
- self.sock_impl.jchannel.finishConnect()
- numwritten = self.sock_impl.write(s)
- if numwritten == 0 and self.mode == MODE_NONBLOCKING:
- raise would_block_error()
- return numwritten
-
- sendall = send
-
- @raises_java_exception
- def close(self):
- if self.istream:
- self.istream.close()
- if self.ostream:
- self.ostream.close()
- if self.sock_impl:
- self.sock_impl.close()
-
-
-class _udpsocket(_nonblocking_api_mixin):
-
- sock_impl = None
- connected = False
- local_addr = None
- _last_error = 0
-
- def __init__(self):
- _nonblocking_api_mixin.__init__(self)
-
- @raises_java_exception
- def bind(self, addr):
- assert not self.sock_impl
- assert not self.local_addr
- # Do the address format check
- _get_jsockaddr(addr, self.family, self.type, self.proto, AI_PASSIVE)
- self.local_addr = addr
- self.sock_impl = _datagram_socket_impl(_get_jsockaddr(self.local_addr, self.family, self.type, self.proto, AI_PASSIVE),
- self.pending_options[ (SOL_SOCKET, SO_REUSEADDR) ])
- self._config()
-
- def _do_connect(self, addr):
- assert not self.connected, "Datagram Socket is already connected"
- if not self.sock_impl:
- self.sock_impl = _datagram_socket_impl()
- self._config()
- self.sock_impl.connect(_get_jsockaddr(addr, self.family, self.type, self.proto, 0))
- self.connected = True
-
- @raises_java_exception
- def connect(self, addr):
- self._do_connect(addr)
-
- @raises_java_exception
- def connect_ex(self, addr):
- if not self.sock_impl:
- self._do_connect(addr)
- return 0
-
- @raises_java_exception
- def sendto(self, data, p1, p2=None):
- if not p2:
- flags, addr = 0, p1
- else:
- flags, addr = 0, p2
- if not self.sock_impl:
- self.sock_impl = _datagram_socket_impl()
- self._config()
- byte_array = java.lang.String(data).getBytes('iso-8859-1')
- result = self.sock_impl.sendto(byte_array, _get_jsockaddr(addr, self.family, self.type, self.proto, 0), flags)
- return result
-
- @raises_java_exception
- def send(self, data, flags=None):
- if not self.connected: raise error(errno.ENOTCONN, "Socket is not connected")
- byte_array = java.lang.String(data).getBytes('iso-8859-1')
- return self.sock_impl.send(byte_array, flags)
-
- @raises_java_exception
- def recvfrom(self, num_bytes, flags=None):
- """
- There is some disagreement as to what the behaviour should be if
- a recvfrom operation is requested on an unbound socket.
- See the following links for more information
- http://bugs.jython.org/issue1005
- http://bugs.sun.com/view_bug.do?bug_id=6621689
- """
- # This is the old 2.1 behaviour
- #assert self.sock_impl
- # This is amak's preferred interpretation
- #raise error(errno.ENOTCONN, "Recvfrom on unbound udp socket meaningless operation")
- # And this is the option for cpython compatibility
- if not self.sock_impl:
- self.sock_impl = _datagram_socket_impl()
- self._config()
- return self.sock_impl.recvfrom(num_bytes, flags)
-
- @raises_java_exception
- def recv(self, num_bytes, flags=None):
- if not self.sock_impl:
- raise error(errno.ENOTCONN, "Socket is not connected")
- return self.sock_impl.recv(num_bytes, flags)
-
- def __del__(self):
- self.close()
-
-_socketmethods = (
- 'bind', 'connect', 'connect_ex', 'fileno', 'listen',
- 'getpeername', 'getsockname', 'getsockopt', 'setsockopt',
- 'sendall', 'setblocking',
- 'settimeout', 'gettimeout', 'shutdown', 'getchannel')
-
-# All the method names that must be delegated to either the real socket
-# object or the _closedsocket object.
-_delegate_methods = ("recv", "recvfrom", "recv_into", "recvfrom_into",
- "send", "sendto")
-
-class _closedsocket(object):
- __slots__ = []
- def _dummy(*args):
- raise error(errno.EBADF, 'Bad file descriptor')
- # All _delegate_methods must also be initialized here.
- send = recv = recv_into = sendto = recvfrom = recvfrom_into = _dummy
- __getattr__ = _dummy
-
-_active_sockets = set()
-
-def _closeActiveSockets():
- for socket in _active_sockets.copy():
- try:
- socket.close()
- except error:
- msg = 'Problem closing socket: %s: %r' % (socket, sys.exc_info())
- print >> sys.stderr, msg
-
-class _socketobject(object):
-
- __doc__ = _realsocket.__doc__
-
- __slots__ = ["_sock", "__weakref__"] + list(_delegate_methods)
-
- def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, _sock=None):
- if _sock is None:
- _sock = _realsocket(family, type, proto)
- _sock.reference_count += 1
- elif isinstance(_sock, _nonblocking_api_mixin):
- _sock.reference_count += 1
- self._sock = _sock
- for method in _delegate_methods:
- meth = getattr(_sock, method, None)
- if meth:
- setattr(self, method, meth)
- _active_sockets.add(self)
-
- def close(self):
- try:
- _active_sockets.remove(self)
- except KeyError:
- pass
- _sock = self._sock
- if isinstance(_sock, _nonblocking_api_mixin):
- _sock.close_lock.acquire()
- try:
- _sock.reference_count -=1
- if not _sock.reference_count:
- _sock.close()
- self._sock = _closedsocket()
- dummy = self._sock._dummy
- for method in _delegate_methods:
- setattr(self, method, dummy)
- self.send = self.recv = self.sendto = self.recvfrom = \
- self._sock._dummy
- finally:
- _sock.close_lock.release()
- #close.__doc__ = _realsocket.close.__doc__
-
- def accept(self):
- sock, addr = self._sock.accept()
- return _socketobject(_sock=sock), addr
- #accept.__doc__ = _realsocket.accept.__doc__
-
- def dup(self):
- """dup() -> socket object
-
- Return a new socket object connected to the same system resource."""
- _sock = self._sock
- if not isinstance(_sock, _nonblocking_api_mixin):
- return _socketobject(_sock=_sock)
-
- _sock.close_lock.acquire()
- try:
- duped = _socketobject(_sock=_sock)
- finally:
- _sock.close_lock.release()
- return duped
-
- def makefile(self, mode='r', bufsize=-1):
- """makefile([mode[, bufsize]]) -> file object
-
- Return a regular file object corresponding to the socket. The mode
- and bufsize arguments are as for the built-in open() function."""
- _sock = self._sock
- if not isinstance(_sock, _nonblocking_api_mixin):
- return _fileobject(_sock, mode, bufsize)
-
- _sock.close_lock.acquire()
- try:
- fileobject = _fileobject(_sock, mode, bufsize)
- finally:
- _sock.close_lock.release()
- return fileobject
-
- family = property(lambda self: self._sock.family, doc="the socket family")
- type = property(lambda self: self._sock.type, doc="the socket type")
- proto = property(lambda self: self._sock.proto, doc="the socket protocol")
-
- _s = ("def %s(self, *args): return self._sock.%s(*args)\n\n"
- #"%s.__doc__ = _realsocket.%s.__doc__\n")
- )
- for _m in _socketmethods:
- #exec _s % (_m, _m, _m, _m)
- exec _s % (_m, _m)
- del _m, _s
-
-socket = SocketType = _socketobject
-
-class _fileobject(object):
- """Faux file object attached to a socket object."""
-
- default_bufsize = 8192
- name = "<socket>"
-
- __slots__ = ["mode", "bufsize", "softspace",
- # "closed" is a property, see below
- "_sock", "_rbufsize", "_wbufsize", "_rbuf", "_wbuf",
- "_close"]
-
- def __init__(self, sock, mode='rb', bufsize=-1, close=False):
- self._sock = sock
- if isinstance(sock, _nonblocking_api_mixin):
- sock.reference_count += 1
- self.mode = mode # Not actually used in this version
- if bufsize < 0:
- bufsize = self.default_bufsize
- self.bufsize = bufsize
- self.softspace = False
- if bufsize == 0:
- self._rbufsize = 1
- elif bufsize == 1:
- self._rbufsize = self.default_bufsize
- else:
- self._rbufsize = bufsize
- self._wbufsize = bufsize
- self._rbuf = "" # A string
- self._wbuf = [] # A list of strings
- self._close = close
-
- def _getclosed(self):
- return self._sock is None
- closed = property(_getclosed, doc="True if the file is closed")
-
- def close(self):
- try:
- if self._sock:
- self.flush()
- finally:
- if self._sock:
- if isinstance(self._sock, _nonblocking_api_mixin):
- self._sock.reference_count -= 1
- if not self._sock.reference_count or self._close:
- self._sock.close()
- elif self._close:
- self._sock.close()
- self._sock = None
-
- def __del__(self):
- try:
- self.close()
- except:
- # close() may fail if __init__ didn't complete
- pass
-
- def flush(self):
- if self._wbuf:
- buffer = "".join(self._wbuf)
- self._wbuf = []
- self._sock.sendall(buffer)
-
- def fileno(self):
- return self._sock.fileno()
-
- def write(self, data):
- data = str(data) # XXX Should really reject non-string non-buffers
- if not data:
- return
- self._wbuf.append(data)
- if (self._wbufsize == 0 or
- self._wbufsize == 1 and '\n' in data or
- self._get_wbuf_len() >= self._wbufsize):
- self.flush()
-
- def writelines(self, list):
- # XXX We could do better here for very long lists
- # XXX Should really reject non-string non-buffers
- self._wbuf.extend(filter(None, map(str, list)))
- if (self._wbufsize <= 1 or
- self._get_wbuf_len() >= self._wbufsize):
- self.flush()
-
- def _get_wbuf_len(self):
- buf_len = 0
- for x in self._wbuf:
- buf_len += len(x)
- return buf_len
-
- def read(self, size=-1):
- data = self._rbuf
- if size < 0:
- # Read until EOF
- buffers = []
- if data:
- buffers.append(data)
- self._rbuf = ""
- if self._rbufsize <= 1:
- recv_size = self.default_bufsize
- else:
- recv_size = self._rbufsize
- while True:
- data = self._sock.recv(recv_size)
- if not data:
- break
- buffers.append(data)
- return "".join(buffers)
- else:
- # Read until size bytes or EOF seen, whichever comes first
- buf_len = len(data)
- if buf_len >= size:
- self._rbuf = data[size:]
- return data[:size]
- buffers = []
- if data:
- buffers.append(data)
- self._rbuf = ""
- while True:
- left = size - buf_len
- recv_size = max(self._rbufsize, left)
- data = self._sock.recv(recv_size)
- if not data:
- break
- buffers.append(data)
- n = len(data)
- if n >= left:
- self._rbuf = data[left:]
- buffers[-1] = data[:left]
- break
- buf_len += n
- return "".join(buffers)
-
- def readline(self, size=-1):
- data = self._rbuf
- if size < 0:
- # Read until \n or EOF, whichever comes first
- if self._rbufsize <= 1:
- # Speed up unbuffered case
- assert data == ""
- buffers = []
- recv = self._sock.recv
- while data != "\n":
- data = recv(1)
- if not data:
- break
- buffers.append(data)
- return "".join(buffers)
- nl = data.find('\n')
- if nl >= 0:
- nl += 1
- self._rbuf = data[nl:]
- return data[:nl]
- buffers = []
- if data:
- buffers.append(data)
- self._rbuf = ""
- while True:
- data = self._sock.recv(self._rbufsize)
- if not data:
- break
- buffers.append(data)
- nl = data.find('\n')
- if nl >= 0:
- nl += 1
- self._rbuf = data[nl:]
- buffers[-1] = data[:nl]
- break
- return "".join(buffers)
- else:
- # Read until size bytes or \n or EOF seen, whichever comes first
- nl = data.find('\n', 0, size)
- if nl >= 0:
- nl += 1
- self._rbuf = data[nl:]
- return data[:nl]
- buf_len = len(data)
- if buf_len >= size:
- self._rbuf = data[size:]
- return data[:size]
- buffers = []
- if data:
- buffers.append(data)
- self._rbuf = ""
- while True:
- data = self._sock.recv(self._rbufsize)
- if not data:
- break
- buffers.append(data)
- left = size - buf_len
- nl = data.find('\n', 0, left)
- if nl >= 0:
- nl += 1
- self._rbuf = data[nl:]
- buffers[-1] = data[:nl]
- break
- n = len(data)
- if n >= left:
- self._rbuf = data[left:]
- buffers[-1] = data[:left]
- break
- buf_len += n
- return "".join(buffers)
-
- def readlines(self, sizehint=0):
- total = 0
- list = []
- while True:
- line = self.readline()
- if not line:
- break
- list.append(line)
- total += len(line)
- if sizehint and total >= sizehint:
- break
- return list
-
- # Iterator protocols
-
- def __iter__(self):
- return self
-
- def next(self):
- line = self.readline()
- if not line:
- raise StopIteration
- return line
-
-_GLOBAL_DEFAULT_TIMEOUT = object()
-
-def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT,
- source_address=None):
- """Connect to *address* and return the socket object.
-
- Convenience function. Connect to *address* (a 2-tuple ``(host,
- port)``) and return the socket object. Passing the optional
- *timeout* parameter will set the timeout on the socket instance
- before attempting to connect. If no *timeout* is supplied, the
- global default timeout setting returned by :func:`getdefaulttimeout`
- is used. If *source_address* is set it must be a tuple of (host, port)
- for the socket to bind as a source address before making the connection.
- An host of '' or port 0 tells the OS to use the default.
- """
-
- host, port = address
- err = None
- for res in getaddrinfo(host, port, 0, SOCK_STREAM):
- af, socktype, proto, canonname, sa = res
- sock = None
- try:
- sock = socket(af, socktype, proto)
- if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
- sock.settimeout(timeout)
- if source_address:
- sock.bind(source_address)
- sock.connect(sa)
- return sock
-
- except error as _:
- err = _
- if sock is not None:
- sock.close()
-
- if err is not None:
- raise err
- else:
- raise error("getaddrinfo returns an empty list")
-
-# Define the SSL support
-
-class ssl:
-
- @raises_java_exception
- def __init__(self, jython_socket_wrapper, keyfile=None, certfile=None):
- self.jython_socket_wrapper = jython_socket_wrapper
- jython_socket = self.jython_socket_wrapper._sock
- self.java_ssl_socket = self._make_ssl_socket(jython_socket)
- self._in_buf = java.io.BufferedInputStream(self.java_ssl_socket.getInputStream())
- self._out_buf = java.io.BufferedOutputStream(self.java_ssl_socket.getOutputStream())
-
- def _make_ssl_socket(self, jython_socket, auto_close=0):
- java_net_socket = jython_socket._get_jsocket()
- assert isinstance(java_net_socket, java.net.Socket)
- host = java_net_socket.getInetAddress().getHostAddress()
- port = java_net_socket.getPort()
- factory = javax.net.ssl.SSLSocketFactory.getDefault();
- java_ssl_socket = factory.createSocket(java_net_socket, host, port, auto_close)
- java_ssl_socket.setEnabledCipherSuites(java_ssl_socket.getSupportedCipherSuites())
- java_ssl_socket.startHandshake()
- return java_ssl_socket
-
- @raises_java_exception
- def read(self, n=4096):
- data = jarray.zeros(n, 'b')
- m = self._in_buf.read(data, 0, n)
- if m <= 0:
- return ""
- if m < n:
- data = data[:m]
- return data.tostring()
-
- recv = read
-
- @raises_java_exception
- def write(self, s):
- self._out_buf.write(s)
- self._out_buf.flush()
- return len(s)
-
- send = sendall = write
-
- def makefile(self, mode='r', bufsize=-1):
- return _fileobject(self, mode, bufsize)
-
- def _get_server_cert(self):
- return self.java_ssl_socket.getSession().getPeerCertificates()[0]
-
- @raises_java_exception
- def server(self):
- cert = self._get_server_cert()
- return cert.getSubjectDN().toString()
-
- @raises_java_exception
- def issuer(self):
- cert = self._get_server_cert()
- return cert.getIssuerDN().toString()
-
- def close(self):
- self.jython_socket_wrapper.close()
-
-def test():
- s = socket(AF_INET, SOCK_STREAM)
- s.connect(("", 80))
- s.send("GET / HTTP/1.0\r\n\r\n")
- while 1:
- data = s.recv(2000)
- print data
- if not data:
- break
-
-if __name__ == '__main__':
- test()
diff --git a/Lib/ssl.py b/Lib/ssl.py
--- a/Lib/ssl.py
+++ b/Lib/ssl.py
@@ -1,10 +1,257 @@
-"""
-This module provides very limited support for the SSL module on jython.
-
-See the jython wiki for more information.
-http://wiki.python.org/jython/SSLModule
-"""
-
-import socket
-
-wrap_socket = socket.ssl
+import logging
+
+try:
+ # jarjar-ed version
+ from org.python.netty.channel import ChannelInitializer
+ from org.python.netty.handler.ssl import SslHandler
+except ImportError:
+ # dev version from extlibs
+ from io.netty.channel import ChannelInitializer
+ from io.netty.handler.ssl import SslHandler
+
+from _socket import (
+ SSLError, raises_java_exception,
+ SSL_ERROR_SSL,
+ SSL_ERROR_WANT_READ,
+ SSL_ERROR_WANT_WRITE,
+ SSL_ERROR_WANT_X509_LOOKUP,
+ SSL_ERROR_SYSCALL,
+ SSL_ERROR_ZERO_RETURN,
+ SSL_ERROR_WANT_CONNECT,
+ SSL_ERROR_EOF,
+ SSL_ERROR_INVALID_ERROR_CODE)
+from _sslcerts import _get_ssl_context
+
+from java.text import SimpleDateFormat
+from java.util import Locale, TimeZone
+from javax.naming.ldap import LdapName
+from javax.security.auth.x500 import X500Principal
+
+
+log = logging.getLogger("socket")
+
+
+CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED = range(3)
+
+# FIXME need to map to java names as well; there's also possibility some difference between
+# SSLv2 (Java) and PROTOCOL_SSLv23 (Python) but reading the docs suggest not
+# http://docs.oracle.com/javase/7/docs/technotes/guides/security/StandardNames.html#SSLContext
+
+# Currently ignored, since we just use the default in Java. FIXME
+PROTOCOL_SSLv2, PROTOCOL_SSLv3, PROTOCOL_SSLv23, PROTOCOL_TLSv1 = range(4)
+_PROTOCOL_NAMES = {PROTOCOL_SSLv2: 'SSLv2', PROTOCOL_SSLv3: 'SSLv3', PROTOCOL_SSLv23: 'SSLv23', PROTOCOL_TLSv1: 'TLSv1'}
+
+_rfc2822_date_format = SimpleDateFormat("MMM dd HH:mm:ss yyyy z", Locale.US)
+_rfc2822_date_format.setTimeZone(TimeZone.getTimeZone("GMT"))
+
+_ldap_rdn_display_names = {
+ # list from RFC 2253
+ "CN": "commonName",
+ "L": "localityName",
+ "ST": "stateOrProvinceName",
+ "O": "organizationName",
+ "OU": "organizationalUnitName",
+ "C": "countryName",
+ "STREET": "streetAddress",
+ "DC": "domainComponent",
+ "UID": "userid"
+}
+
+_cert_name_types = [
+ # FIXME only entry 2 - DNS - has been confirmed w/ cpython;
+ # everything else is coming from this doc:
+ # http://docs.oracle.com/javase/7/docs/api/java/security/cert/X509Certificate.html#getSubjectAlternativeNames()
+ "other",
+ "rfc822",
+ "DNS",
+ "x400Address",
+ "directory",
+ "ediParty",
+ "uniformResourceIdentifier",
+ "ipAddress",
+ "registeredID"]
+
+
+class SSLInitializer(ChannelInitializer):
+
+ def __init__(self, ssl_handler):
+ self.ssl_handler = ssl_handler
+
+ def initChannel(self, ch):
+ pipeline = ch.pipeline()
+ pipeline.addLast("ssl", self.ssl_handler)
+
+
+class SSLSocket(object):
+
+ def __init__(self, sock,
+ keyfile, certfile, ca_certs,
+ do_handshake_on_connect, server_side):
+ self.sock = sock
+ self._sock = sock._sock # the real underlying socket
+ self.context = _get_ssl_context(keyfile, certfile, ca_certs)
+ self.engine = self.context.createSSLEngine()
+ self.engine.setUseClientMode(not server_side)
+ self.ssl_handler = SslHandler(self.engine)
+ self.already_handshaked = False
+ self.do_handshake_on_connect = do_handshake_on_connect
+
+ if self.do_handshake_on_connect and hasattr(self._sock, "connected") and self._sock.connected:
+ self.already_handshaked = True
+ log.debug("Adding SSL handler to pipeline after connection", extra={"sock": self._sock})
+ self._sock.channel.pipeline().addFirst("ssl", self.ssl_handler)
+ self._sock._post_connect()
+ self._sock._notify_selectors()
+ self._sock._unlatch()
+
+ def handshake_step(result):
+ log.debug("SSL handshaking %s", result, extra={"sock": self._sock})
+ if not hasattr(self._sock, "activity_latch"): # need a better discriminant
+ self._sock._post_connect()
+ self._sock._notify_selectors()
+
+ self.ssl_handler.handshakeFuture().addListener(handshake_step)
+ if self.do_handshake_on_connect and self.already_handshaked:
+ self.ssl_handler.handshakeFuture().sync()
+ log.debug("SSL handshaking completed", extra={"sock": self._sock})
+
+ def connect(self, addr):
+ log.debug("Connect SSL with handshaking %s", self.do_handshake_on_connect, extra={"sock": self._sock})
+ self._sock._connect(addr)
+ if self.do_handshake_on_connect:
+ self.already_handshaked = True
+ if self._sock.connected:
+ log.debug("Already connected, adding SSL handler to pipeline...", extra={"sock": self._sock})
+ self._sock.channel.pipeline().addFirst("ssl", self.ssl_handler)
+ else:
+ log.debug("Not connected, adding SSL initializer...", extra={"sock": self._sock})
+ self._sock.connect_handlers.append(SSLInitializer(self.ssl_handler))
+
+ # Various pass through methods to the wrapper socket
+
+ def send(self, data):
+ return self.sock.send(data)
+
+ def sendall(self, data):
+ return self.sock.sendall(data)
+
+ def recv(self, bufsize, flags=0):
+ return self.sock.recv(bufsize, flags)
+
+ def close(self):
+ self.sock.close()
+
+ def setblocking(self, mode):
+ self.sock.setblocking(mode)
+
+ def settimeout(self, timeout):
+ self.sock.settimeout(timeout)
+
+ def gettimeout(self):
+ return self.sock.gettimeout()
+
+ def makefile(self, mode='r', bufsize=-1):
+ return self.sock.makefile(mode, bufsize)
+
+ def shutdown(self, how):
+ self.sock.shutdown(how)
+
+ # Need to work with the real underlying socket as well
+
+ def _readable(self):
+ return self._sock._readable()
+
+ def _writable(self):
+ return self._sock._writable()
+
+ def _register_selector(self, selector):
+ self._sock._register_selector(selector)
+
+ def _unregister_selector(self, selector):
+ return self._sock._unregister_selector(selector)
+
+ def _notify_selectors(self):
+ self._sock._notify_selectors()
+
+ def do_handshake(self):
+ if not self.already_handshaked:
+ log.debug("Not handshaked, so adding SSL handler", extra={"sock": self._sock})
+ self.already_handshaked = True
+ self._sock.channel.pipeline().addFirst("ssl", self.ssl_handler)
+
+ def getpeername(self):
+ return self.sock.getpeername()
+
+ def fileno(self):
+ return self
+
+ @raises_java_exception
+ def getpeercert(self, binary_form=False):
+ cert = self.engine.getSession().getPeerCertificates()[0]
+ if binary_form:
+ return cert.getEncoded()
+ dn = cert.getSubjectX500Principal().getName()
+ ldapDN = LdapName(dn)
+ # FIXME given this tuple of a single element tuple structure assumed here, is it possible this is
+ # not actually the case, eg because of multi value attributes?
+ rdns = tuple((((_ldap_rdn_display_names.get(rdn.type), rdn.value),) for rdn in ldapDN.getRdns()))
+ # FIXME is it str? or utf8? or some other encoding? maybe a bug in cpython?
+ alt_names = tuple(((_cert_name_types[type], str(name)) for (type, name) in cert.getSubjectAlternativeNames()))
+ pycert = {
+ "notAfter": _rfc2822_date_format.format(cert.getNotAfter()),
+ "subject": rdns,
+ "subjectAltName": alt_names,
+ }
+ return pycert
+
+ @raises_java_exception
+ def issuer(self):
+ return self.getpeercert().getIssuerDN().toString()
+
+ def cipher(self):
+ session = self._sslsocket.session
+ suite = str(session.cipherSuite)
+ if "256" in suite: # FIXME!!! this test usually works, but there must be a better approach
+ strength = 256
+ elif "128" in suite:
+ strength = 128
+ else:
+ strength = None
+ return suite, str(session.protocol), strength
+
+
+
+# instantiates a SSLEngine, with the following things to keep in mind:
+
+# FIXME not yet supported
+# suppress_ragged_eofs - presumably this is an exception we can detect in Netty, the underlying SSLEngine certainly does
+# ssl_version - use SSLEngine.setEnabledProtocols(java.lang.String[])
+# ciphers - SSLEngine.setEnabledCipherSuites(String[] suites)
+
+def wrap_socket(sock, keyfile=None, certfile=None, server_side=False, cert_reqs=CERT_NONE,
+ ssl_version=None, ca_certs=None, do_handshake_on_connect=True,
+ suppress_ragged_eofs=True, ciphers=None):
+ return SSLSocket(
+ sock,
+ keyfile=keyfile, certfile=certfile, ca_certs=ca_certs,
+ server_side=server_side,
+ do_handshake_on_connect=do_handshake_on_connect)
+
+
+def unwrap_socket(sock):
+ # FIXME removing SSL handler from pipeline should suffice, but low pri for now
+ raise NotImplemented()
+
+
+# Underlying Java does a good job of managing entropy, so these are just no-ops
+
+def RAND_status():
+ return True
+
+def RAND_egd(path):
+ pass
+
+def RAND_add(bytes, entropy):
+ pass
+
+
diff --git a/Lib/telnetlib.py b/Lib/telnetlib.py
deleted file mode 100644
--- a/Lib/telnetlib.py
+++ /dev/null
@@ -1,664 +0,0 @@
-r"""TELNET client class.
-
-Based on RFC 854: TELNET Protocol Specification, by J. Postel and
-J. Reynolds
-
-Example:
-
->>> from telnetlib import Telnet
->>> tn = Telnet('www.python.org', 79) # connect to finger port
->>> tn.write('guido\r\n')
->>> print tn.read_all()
-Login Name TTY Idle When Where
-guido Guido van Rossum pts/2 <Dec 2 11:10> snag.cnri.reston..
-
->>>
-
-Note that read_all() won't read until eof -- it just reads some data
--- but it guarantees to read at least one byte unless EOF is hit.
-
-It is possible to pass a Telnet object to select.select() in order to
-wait until more data is available. Note that in this case,
-read_eager() may return '' even if there was data on the socket,
-because the protocol negotiation may have eaten the data. This is why
-EOFError is needed in some cases to distinguish between "no data" and
-"connection closed" (since the socket also appears ready for reading
-when it is closed).
-
-To do:
-- option negotiation
-- timeout should be intrinsic to the connection object instead of an
- option on one of the read calls only
-
-"""
-
-
-# Imported modules
-import sys
-import socket
-import select
-import os
-if os.name == 'java':
- from select import cpython_compatible_select as select
-else:
- from select import select
-del os
-
-
-__all__ = ["Telnet"]
-
-# Tunable parameters
-DEBUGLEVEL = 0
-
-# Telnet protocol defaults
-TELNET_PORT = 23
-
-# Telnet protocol characters (don't change)
-IAC = chr(255) # "Interpret As Command"
-DONT = chr(254)
-DO = chr(253)
-WONT = chr(252)
-WILL = chr(251)
-theNULL = chr(0)
-
-SE = chr(240) # Subnegotiation End
-NOP = chr(241) # No Operation
-DM = chr(242) # Data Mark
-BRK = chr(243) # Break
-IP = chr(244) # Interrupt process
-AO = chr(245) # Abort output
-AYT = chr(246) # Are You There
-EC = chr(247) # Erase Character
-EL = chr(248) # Erase Line
-GA = chr(249) # Go Ahead
-SB = chr(250) # Subnegotiation Begin
-
-
-# Telnet protocol options code (don't change)
-# These ones all come from arpa/telnet.h
-BINARY = chr(0) # 8-bit data path
-ECHO = chr(1) # echo
-RCP = chr(2) # prepare to reconnect
-SGA = chr(3) # suppress go ahead
-NAMS = chr(4) # approximate message size
-STATUS = chr(5) # give status
-TM = chr(6) # timing mark
-RCTE = chr(7) # remote controlled transmission and echo
-NAOL = chr(8) # negotiate about output line width
-NAOP = chr(9) # negotiate about output page size
-NAOCRD = chr(10) # negotiate about CR disposition
-NAOHTS = chr(11) # negotiate about horizontal tabstops
-NAOHTD = chr(12) # negotiate about horizontal tab disposition
-NAOFFD = chr(13) # negotiate about formfeed disposition
-NAOVTS = chr(14) # negotiate about vertical tab stops
-NAOVTD = chr(15) # negotiate about vertical tab disposition
-NAOLFD = chr(16) # negotiate about output LF disposition
-XASCII = chr(17) # extended ascii character set
-LOGOUT = chr(18) # force logout
-BM = chr(19) # byte macro
-DET = chr(20) # data entry terminal
-SUPDUP = chr(21) # supdup protocol
-SUPDUPOUTPUT = chr(22) # supdup output
-SNDLOC = chr(23) # send location
-TTYPE = chr(24) # terminal type
-EOR = chr(25) # end or record
-TUID = chr(26) # TACACS user identification
-OUTMRK = chr(27) # output marking
-TTYLOC = chr(28) # terminal location number
-VT3270REGIME = chr(29) # 3270 regime
-X3PAD = chr(30) # X.3 PAD
-NAWS = chr(31) # window size
-TSPEED = chr(32) # terminal speed
-LFLOW = chr(33) # remote flow control
-LINEMODE = chr(34) # Linemode option
-XDISPLOC = chr(35) # X Display Location
-OLD_ENVIRON = chr(36) # Old - Environment variables
-AUTHENTICATION = chr(37) # Authenticate
-ENCRYPT = chr(38) # Encryption option
-NEW_ENVIRON = chr(39) # New - Environment variables
-# the following ones come from
-# http://www.iana.org/assignments/telnet-options
-# Unfortunately, that document does not assign identifiers
-# to all of them, so we are making them up
-TN3270E = chr(40) # TN3270E
-XAUTH = chr(41) # XAUTH
-CHARSET = chr(42) # CHARSET
-RSP = chr(43) # Telnet Remote Serial Port
-COM_PORT_OPTION = chr(44) # Com Port Control Option
-SUPPRESS_LOCAL_ECHO = chr(45) # Telnet Suppress Local Echo
-TLS = chr(46) # Telnet Start TLS
-KERMIT = chr(47) # KERMIT
-SEND_URL = chr(48) # SEND-URL
-FORWARD_X = chr(49) # FORWARD_X
-PRAGMA_LOGON = chr(138) # TELOPT PRAGMA LOGON
-SSPI_LOGON = chr(139) # TELOPT SSPI LOGON
-PRAGMA_HEARTBEAT = chr(140) # TELOPT PRAGMA HEARTBEAT
-EXOPL = chr(255) # Extended-Options-List
-NOOPT = chr(0)
-
-class Telnet:
-
- """Telnet interface class.
-
- An instance of this class represents a connection to a telnet
- server. The instance is initially not connected; the open()
- method must be used to establish a connection. Alternatively, the
- host name and optional port number can be passed to the
- constructor, too.
-
- Don't try to reopen an already connected instance.
-
- This class has many read_*() methods. Note that some of them
- raise EOFError when the end of the connection is read, because
- they can return an empty string for other reasons. See the
- individual doc strings.
-
- read_until(expected, [timeout])
- Read until the expected string has been seen, or a timeout is
- hit (default is no timeout); may block.
-
- read_all()
- Read all data until EOF; may block.
-
- read_some()
- Read at least one byte or EOF; may block.
-
- read_very_eager()
- Read all data available already queued or on the socket,
- without blocking.
-
- read_eager()
- Read either data already queued or some data available on the
- socket, without blocking.
-
- read_lazy()
- Read all data in the raw queue (processing it first), without
- doing any socket I/O.
-
- read_very_lazy()
- Reads all data in the cooked queue, without doing any socket
- I/O.
-
- read_sb_data()
- Reads available data between SB ... SE sequence. Don't block.
-
- set_option_negotiation_callback(callback)
- Each time a telnet option is read on the input flow, this callback
- (if set) is called with the following parameters :
- callback(telnet socket, command, option)
- option will be chr(0) when there is no option.
- No other action is done afterwards by telnetlib.
-
- """
-
- def __init__(self, host=None, port=0,
- timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
- """Constructor.
-
- When called without arguments, create an unconnected instance.
- With a hostname argument, it connects the instance; port number
- and timeout are optional.
- """
- self.debuglevel = DEBUGLEVEL
- self.host = host
- self.port = port
- self.timeout = timeout
- self.sock = None
- self.rawq = ''
- self.irawq = 0
- self.cookedq = ''
- self.eof = 0
- self.iacseq = '' # Buffer for IAC sequence.
- self.sb = 0 # flag for SB and SE sequence.
- self.sbdataq = ''
- self.option_callback = None
- if host is not None:
- self.open(host, port, timeout)
-
- def open(self, host, port=0, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
- """Connect to a host.
-
- The optional second argument is the port number, which
- defaults to the standard telnet port (23).
-
- Don't try to reopen an already connected instance.
- """
- self.eof = 0
- if not port:
- port = TELNET_PORT
- self.host = host
- self.port = port
- self.timeout = timeout
- self.sock = socket.create_connection((host, port), timeout)
-
- def __del__(self):
- """Destructor -- close the connection."""
- self.close()
-
- def msg(self, msg, *args):
- """Print a debug message, when the debug level is > 0.
-
- If extra arguments are present, they are substituted in the
- message using the standard string formatting operator.
-
- """
- if self.debuglevel > 0:
- print 'Telnet(%s,%s):' % (self.host, self.port),
- if args:
- print msg % args
- else:
- print msg
-
- def set_debuglevel(self, debuglevel):
- """Set the debug level.
-
- The higher it is, the more debug output you get (on sys.stdout).
-
- """
- self.debuglevel = debuglevel
-
- def close(self):
- """Close the connection."""
- if self.sock:
- self.sock.close()
- self.sock = 0
- self.eof = 1
- self.iacseq = ''
- self.sb = 0
-
- def get_socket(self):
- """Return the socket object used internally."""
- return self.sock
-
- def fileno(self):
- """Return the fileno() of the socket object used internally."""
- return self.sock.fileno()
-
- def write(self, buffer):
- """Write a string to the socket, doubling any IAC characters.
-
- Can block if the connection is blocked. May raise
- socket.error if the connection is closed.
-
- """
- if IAC in buffer:
- buffer = buffer.replace(IAC, IAC+IAC)
- self.msg("send %r", buffer)
- self.sock.sendall(buffer)
-
- def read_until(self, match, timeout=None):
- """Read until a given string is encountered or until timeout.
-
- When no match is found, return whatever is available instead,
- possibly the empty string. Raise EOFError if the connection
- is closed and no cooked data is available.
-
- """
- n = len(match)
- self.process_rawq()
- i = self.cookedq.find(match)
- if i >= 0:
- i = i+n
- buf = self.cookedq[:i]
- self.cookedq = self.cookedq[i:]
- return buf
- s_reply = ([self], [], [])
- s_args = s_reply
- if timeout is not None:
- s_args = s_args + (timeout,)
- from time import time
- time_start = time()
- while not self.eof and select(*s_args) == s_reply:
- i = max(0, len(self.cookedq)-n)
- self.fill_rawq()
- self.process_rawq()
- i = self.cookedq.find(match, i)
- if i >= 0:
- i = i+n
- buf = self.cookedq[:i]
- self.cookedq = self.cookedq[i:]
- return buf
- if timeout is not None:
- elapsed = time() - time_start
- if elapsed >= timeout:
- break
- s_args = s_reply + (timeout-elapsed,)
- return self.read_very_lazy()
-
- def read_all(self):
- """Read all data until EOF; block until connection closed."""
- self.process_rawq()
- while not self.eof:
- self.fill_rawq()
- self.process_rawq()
- buf = self.cookedq
- self.cookedq = ''
- return buf
-
- def read_some(self):
- """Read at least one byte of cooked data unless EOF is hit.
-
- Return '' if EOF is hit. Block if no data is immediately
- available.
-
- """
- self.process_rawq()
- while not self.cookedq and not self.eof:
- self.fill_rawq()
- self.process_rawq()
- buf = self.cookedq
- self.cookedq = ''
- return buf
-
- def read_very_eager(self):
- """Read everything that's possible without blocking in I/O (eager).
-
- Raise EOFError if connection closed and no cooked data
- available. Return '' if no cooked data available otherwise.
- Don't block unless in the midst of an IAC sequence.
-
- """
- self.process_rawq()
- while not self.eof and self.sock_avail():
- self.fill_rawq()
- self.process_rawq()
- return self.read_very_lazy()
-
- def read_eager(self):
- """Read readily available data.
-
- Raise EOFError if connection closed and no cooked data
- available. Return '' if no cooked data available otherwise.
- Don't block unless in the midst of an IAC sequence.
-
- """
- self.process_rawq()
- while not self.cookedq and not self.eof and self.sock_avail():
- self.fill_rawq()
- self.process_rawq()
- return self.read_very_lazy()
-
- def read_lazy(self):
- """Process and return data that's already in the queues (lazy).
-
- Raise EOFError if connection closed and no data available.
- Return '' if no cooked data available otherwise. Don't block
- unless in the midst of an IAC sequence.
-
- """
- self.process_rawq()
- return self.read_very_lazy()
-
- def read_very_lazy(self):
- """Return any data available in the cooked queue (very lazy).
-
- Raise EOFError if connection closed and no data available.
- Return '' if no cooked data available otherwise. Don't block.
-
- """
- buf = self.cookedq
- self.cookedq = ''
- if not buf and self.eof and not self.rawq:
- raise EOFError, 'telnet connection closed'
- return buf
-
- def read_sb_data(self):
- """Return any data available in the SB ... SE queue.
-
- Return '' if no SB ... SE available. Should only be called
- after seeing a SB or SE command. When a new SB command is
- found, old unread SB data will be discarded. Don't block.
-
- """
- buf = self.sbdataq
- self.sbdataq = ''
- return buf
-
- def set_option_negotiation_callback(self, callback):
- """Provide a callback function called after each receipt of a telnet option."""
- self.option_callback = callback
-
- def process_rawq(self):
- """Transfer from raw queue to cooked queue.
-
- Set self.eof when connection is closed. Don't block unless in
- the midst of an IAC sequence.
-
- """
- buf = ['', '']
- try:
- while self.rawq:
- c = self.rawq_getchar()
- if not self.iacseq:
- if c == theNULL:
- continue
- if c == "\021":
- continue
- if c != IAC:
- buf[self.sb] = buf[self.sb] + c
- continue
- else:
- self.iacseq += c
- elif len(self.iacseq) == 1:
- # 'IAC: IAC CMD [OPTION only for WILL/WONT/DO/DONT]'
- if c in (DO, DONT, WILL, WONT):
- self.iacseq += c
- continue
-
- self.iacseq = ''
- if c == IAC:
- buf[self.sb] = buf[self.sb] + c
- else:
- if c == SB: # SB ... SE start.
- self.sb = 1
- self.sbdataq = ''
- elif c == SE:
- self.sb = 0
- self.sbdataq = self.sbdataq + buf[1]
- buf[1] = ''
- if self.option_callback:
- # Callback is supposed to look into
- # the sbdataq
- self.option_callback(self.sock, c, NOOPT)
- else:
- # We can't offer automatic processing of
- # suboptions. Alas, we should not get any
- # unless we did a WILL/DO before.
- self.msg('IAC %d not recognized' % ord(c))
- elif len(self.iacseq) == 2:
- cmd = self.iacseq[1]
- self.iacseq = ''
- opt = c
- if cmd in (DO, DONT):
- self.msg('IAC %s %d',
- cmd == DO and 'DO' or 'DONT', ord(opt))
- if self.option_callback:
- self.option_callback(self.sock, cmd, opt)
- else:
- self.sock.sendall(IAC + WONT + opt)
- elif cmd in (WILL, WONT):
- self.msg('IAC %s %d',
- cmd == WILL and 'WILL' or 'WONT', ord(opt))
- if self.option_callback:
- self.option_callback(self.sock, cmd, opt)
- else:
- self.sock.sendall(IAC + DONT + opt)
- except EOFError: # raised by self.rawq_getchar()
- self.iacseq = '' # Reset on EOF
- self.sb = 0
- pass
- self.cookedq = self.cookedq + buf[0]
- self.sbdataq = self.sbdataq + buf[1]
-
- def rawq_getchar(self):
- """Get next char from raw queue.
-
- Block if no data is immediately available. Raise EOFError
- when connection is closed.
-
- """
- if not self.rawq:
- self.fill_rawq()
- if self.eof:
- raise EOFError
- c = self.rawq[self.irawq]
- self.irawq = self.irawq + 1
- if self.irawq >= len(self.rawq):
- self.rawq = ''
- self.irawq = 0
- return c
-
- def fill_rawq(self):
- """Fill raw queue from exactly one recv() system call.
-
- Block if no data is immediately available. Set self.eof when
- connection is closed.
-
- """
- if self.irawq >= len(self.rawq):
- self.rawq = ''
- self.irawq = 0
- # The buffer size should be fairly small so as to avoid quadratic
- # behavior in process_rawq() above
- buf = self.sock.recv(50)
- self.msg("recv %r", buf)
- self.eof = (not buf)
- self.rawq = self.rawq + buf
-
- def sock_avail(self):
- """Test whether data is available on the socket."""
- return select([self], [], [], 0) == ([self], [], [])
-
- def interact(self):
- """Interaction function, emulates a very dumb telnet client."""
- if sys.platform == "win32":
- self.mt_interact()
- return
- while 1:
- rfd, wfd, xfd = select([self, sys.stdin], [], [])
- if self in rfd:
- try:
- text = self.read_eager()
- except EOFError:
- print '*** Connection closed by remote host ***'
- break
- if text:
- sys.stdout.write(text)
- sys.stdout.flush()
- if sys.stdin in rfd:
- line = sys.stdin.readline()
- if not line:
- break
- self.write(line)
-
- def mt_interact(self):
- """Multithreaded version of interact()."""
- import thread
- thread.start_new_thread(self.listener, ())
- while 1:
- line = sys.stdin.readline()
- if not line:
- break
- self.write(line)
-
- def listener(self):
- """Helper for mt_interact() -- this executes in the other thread."""
- while 1:
- try:
- data = self.read_eager()
- except EOFError:
- print '*** Connection closed by remote host ***'
- return
- if data:
- sys.stdout.write(data)
- else:
- sys.stdout.flush()
-
- def expect(self, list, timeout=None):
- """Read until one from a list of a regular expressions matches.
-
- The first argument is a list of regular expressions, either
- compiled (re.RegexObject instances) or uncompiled (strings).
- The optional second argument is a timeout, in seconds; default
- is no timeout.
-
- Return a tuple of three items: the index in the list of the
- first regular expression that matches; the match object
- returned; and the text read up till and including the match.
-
- If EOF is read and no text was read, raise EOFError.
- Otherwise, when nothing matches, return (-1, None, text) where
- text is the text received so far (may be the empty string if a
- timeout happened).
-
- If a regular expression ends with a greedy match (e.g. '.*')
- or if more than one expression can match the same input, the
- results are undeterministic, and may depend on the I/O timing.
-
- """
- re = None
- list = list[:]
- indices = range(len(list))
- for i in indices:
- if not hasattr(list[i], "search"):
- if not re: import re
- list[i] = re.compile(list[i])
- if timeout is not None:
- from time import time
- time_start = time()
- while 1:
- self.process_rawq()
- for i in indices:
- m = list[i].search(self.cookedq)
- if m:
- e = m.end()
- text = self.cookedq[:e]
- self.cookedq = self.cookedq[e:]
- return (i, m, text)
- if self.eof:
- break
- if timeout is not None:
- elapsed = time() - time_start
- if elapsed >= timeout:
- break
- s_args = ([self.fileno()], [], [], timeout-elapsed)
- r, w, x = select(*s_args)
- if not r:
- break
- self.fill_rawq()
- text = self.read_very_lazy()
- if not text and self.eof:
- raise EOFError
- return (-1, None, text)
-
-
-def test():
- """Test program for telnetlib.
-
- Usage: python telnetlib.py [-d] ... [host [port]]
-
- Default host is localhost; default port is 23.
-
- """
- debuglevel = 0
- while sys.argv[1:] and sys.argv[1] == '-d':
- debuglevel = debuglevel+1
- del sys.argv[1]
- host = 'localhost'
- if sys.argv[1:]:
- host = sys.argv[1]
- port = 0
- if sys.argv[2:]:
- portstr = sys.argv[2]
- try:
- port = int(portstr)
- except ValueError:
- port = socket.getservbyname(portstr, 'tcp')
- tn = Telnet()
- tn.set_debuglevel(debuglevel)
- tn.open(host, port, timeout=0.5)
- tn.interact()
- tn.close()
-
-if __name__ == '__main__':
- test()
diff --git a/Lib/test/test_httplib.py b/Lib/test/test_httplib.py
deleted file mode 100644
--- a/Lib/test/test_httplib.py
+++ /dev/null
@@ -1,472 +0,0 @@
-import httplib
-import array
-import httplib
-import StringIO
-import socket
-import errno
-
-import unittest
-TestCase = unittest.TestCase
-
-from test import test_support
-
-HOST = test_support.HOST
-
-class FakeSocket:
- def __init__(self, text, fileclass=StringIO.StringIO):
- self.text = text
- self.fileclass = fileclass
- self.data = ''
-
- def sendall(self, data):
- self.data += ''.join(data)
-
- def makefile(self, mode, bufsize=None):
- if mode != 'r' and mode != 'rb':
- raise httplib.UnimplementedFileMode()
- return self.fileclass(self.text)
-
-class EPipeSocket(FakeSocket):
-
- def __init__(self, text, pipe_trigger):
- # When sendall() is called with pipe_trigger, raise EPIPE.
- FakeSocket.__init__(self, text)
- self.pipe_trigger = pipe_trigger
-
- def sendall(self, data):
- if self.pipe_trigger in data:
- raise socket.error(errno.EPIPE, "gotcha")
- self.data += data
-
- def close(self):
- pass
-
-class NoEOFStringIO(StringIO.StringIO):
- """Like StringIO, but raises AssertionError on EOF.
-
- This is used below to test that httplib doesn't try to read
- more from the underlying file than it should.
- """
- def read(self, n=-1):
- data = StringIO.StringIO.read(self, n)
- if data == '':
- raise AssertionError('caller tried to read past EOF')
- return data
-
- def readline(self, length=None):
- data = StringIO.StringIO.readline(self, length)
- if data == '':
- raise AssertionError('caller tried to read past EOF')
- return data
-
-
-class HeaderTests(TestCase):
- def test_auto_headers(self):
- # Some headers are added automatically, but should not be added by
- # .request() if they are explicitly set.
-
- class HeaderCountingBuffer(list):
- def __init__(self):
- self.count = {}
- def append(self, item):
- kv = item.split(':')
- if len(kv) > 1:
- # item is a 'Key: Value' header string
- lcKey = kv[0].lower()
- self.count.setdefault(lcKey, 0)
- self.count[lcKey] += 1
- list.append(self, item)
-
- for explicit_header in True, False:
- for header in 'Content-length', 'Host', 'Accept-encoding':
- conn = httplib.HTTPConnection('example.com')
- conn.sock = FakeSocket('blahblahblah')
- conn._buffer = HeaderCountingBuffer()
-
- body = 'spamspamspam'
- headers = {}
- if explicit_header:
- headers[header] = str(len(body))
- conn.request('POST', '/', body, headers)
- self.assertEqual(conn._buffer.count[header.lower()], 1)
-
- def test_putheader(self):
- conn = httplib.HTTPConnection('example.com')
- conn.sock = FakeSocket(None)
- conn.putrequest('GET','/')
- conn.putheader('Content-length',42)
- self.assertTrue('Content-length: 42' in conn._buffer)
-
- def test_ipv6host_header(self):
- # Default host header on IPv6 transaction should wrapped by [] if
- # its actual IPv6 address
- expected = 'GET /foo HTTP/1.1\r\nHost: [2001::]:81\r\n' \
- 'Accept-Encoding: identity\r\n\r\n'
- conn = httplib.HTTPConnection('[2001::]:81')
- sock = FakeSocket('')
- conn.sock = sock
- conn.request('GET', '/foo')
- self.assertTrue(sock.data.startswith(expected))
-
- expected = 'GET /foo HTTP/1.1\r\nHost: [2001:102A::]\r\n' \
- 'Accept-Encoding: identity\r\n\r\n'
- conn = httplib.HTTPConnection('[2001:102A::]')
- sock = FakeSocket('')
- conn.sock = sock
- conn.request('GET', '/foo')
- self.assertTrue(sock.data.startswith(expected))
-
-
-class BasicTest(TestCase):
- def test_status_lines(self):
- # Test HTTP status lines
-
- body = "HTTP/1.1 200 Ok\r\n\r\nText"
- sock = FakeSocket(body)
- resp = httplib.HTTPResponse(sock)
- resp.begin()
- self.assertEqual(resp.read(), 'Text')
- self.assertTrue(resp.isclosed())
-
- body = "HTTP/1.1 400.100 Not Ok\r\n\r\nText"
- sock = FakeSocket(body)
- resp = httplib.HTTPResponse(sock)
- self.assertRaises(httplib.BadStatusLine, resp.begin)
-
- def test_bad_status_repr(self):
- exc = httplib.BadStatusLine('')
- self.assertEqual(repr(exc), '''BadStatusLine("\'\'",)''')
-
- def test_partial_reads(self):
- # if we have a lenght, the system knows when to close itself
- # same behaviour than when we read the whole thing with read()
- body = "HTTP/1.1 200 Ok\r\nContent-Length: 4\r\n\r\nText"
- sock = FakeSocket(body)
- resp = httplib.HTTPResponse(sock)
- resp.begin()
- self.assertEqual(resp.read(2), 'Te')
- self.assertFalse(resp.isclosed())
- self.assertEqual(resp.read(2), 'xt')
- self.assertTrue(resp.isclosed())
-
- def test_host_port(self):
- # Check invalid host_port
-
- # Note that httplib does not accept user:password@ in the host-port.
- for hp in ("www.python.org:abc", "user:password at www.python.org"):
- self.assertRaises(httplib.InvalidURL, httplib.HTTP, hp)
-
- for hp, h, p in (("[fe80::207:e9ff:fe9b]:8000", "fe80::207:e9ff:fe9b",
- 8000),
- ("www.python.org:80", "www.python.org", 80),
- ("www.python.org", "www.python.org", 80),
- ("www.python.org:", "www.python.org", 80),
- ("[fe80::207:e9ff:fe9b]", "fe80::207:e9ff:fe9b", 80)):
- http = httplib.HTTP(hp)
- c = http._conn
- if h != c.host:
- self.fail("Host incorrectly parsed: %s != %s" % (h, c.host))
- if p != c.port:
- self.fail("Port incorrectly parsed: %s != %s" % (p, c.host))
-
- def test_response_headers(self):
- # test response with multiple message headers with the same field name.
- text = ('HTTP/1.1 200 OK\r\n'
- 'Set-Cookie: Customer="WILE_E_COYOTE";'
- ' Version="1"; Path="/acme"\r\n'
- 'Set-Cookie: Part_Number="Rocket_Launcher_0001"; Version="1";'
- ' Path="/acme"\r\n'
- '\r\n'
- 'No body\r\n')
- hdr = ('Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"'
- ', '
- 'Part_Number="Rocket_Launcher_0001"; Version="1"; Path="/acme"')
- s = FakeSocket(text)
- r = httplib.HTTPResponse(s)
- r.begin()
- cookies = r.getheader("Set-Cookie")
- if cookies != hdr:
- self.fail("multiple headers not combined properly")
-
- def test_read_head(self):
- # Test that the library doesn't attempt to read any data
- # from a HEAD request. (Tickles SF bug #622042.)
- sock = FakeSocket(
- 'HTTP/1.1 200 OK\r\n'
- 'Content-Length: 14432\r\n'
- '\r\n',
- NoEOFStringIO)
- resp = httplib.HTTPResponse(sock, method="HEAD")
- resp.begin()
- if resp.read() != "":
- self.fail("Did not expect response from HEAD request")
-
- @unittest.skipIf(test_support.is_jython, "FIXME: not working on Jython")
- def test_send_file(self):
- expected = 'GET /foo HTTP/1.1\r\nHost: example.com\r\n' \
- 'Accept-Encoding: identity\r\nContent-Length:'
-
- body = open(__file__, 'rb')
- conn = httplib.HTTPConnection('example.com')
- sock = FakeSocket(body)
- conn.sock = sock
- conn.request('GET', '/foo', body)
- self.assertTrue(sock.data.startswith(expected))
-
- def test_send(self):
- expected = 'this is a test this is only a test'
- conn = httplib.HTTPConnection('example.com')
- sock = FakeSocket(None)
- conn.sock = sock
- conn.send(expected)
- self.assertEqual(expected, sock.data)
- sock.data = ''
- conn.send(array.array('c', expected))
- self.assertEqual(expected, sock.data)
- sock.data = ''
- conn.send(StringIO.StringIO(expected))
- self.assertEqual(expected, sock.data)
-
- def test_chunked(self):
- chunked_start = (
- 'HTTP/1.1 200 OK\r\n'
- 'Transfer-Encoding: chunked\r\n\r\n'
- 'a\r\n'
- 'hello worl\r\n'
- '1\r\n'
- 'd\r\n'
- )
- sock = FakeSocket(chunked_start + '0\r\n')
- resp = httplib.HTTPResponse(sock, method="GET")
- resp.begin()
- self.assertEqual(resp.read(), 'hello world')
- resp.close()
-
- for x in ('', 'foo\r\n'):
- sock = FakeSocket(chunked_start + x)
- resp = httplib.HTTPResponse(sock, method="GET")
- resp.begin()
- try:
- resp.read()
- except httplib.IncompleteRead, i:
- self.assertEqual(i.partial, 'hello world')
- self.assertEqual(repr(i),'IncompleteRead(11 bytes read)')
- self.assertEqual(str(i),'IncompleteRead(11 bytes read)')
- else:
- self.fail('IncompleteRead expected')
- finally:
- resp.close()
-
- def test_chunked_head(self):
- chunked_start = (
- 'HTTP/1.1 200 OK\r\n'
- 'Transfer-Encoding: chunked\r\n\r\n'
- 'a\r\n'
- 'hello world\r\n'
- '1\r\n'
- 'd\r\n'
- )
- sock = FakeSocket(chunked_start + '0\r\n')
- resp = httplib.HTTPResponse(sock, method="HEAD")
- resp.begin()
- self.assertEqual(resp.read(), '')
- self.assertEqual(resp.status, 200)
- self.assertEqual(resp.reason, 'OK')
- self.assertTrue(resp.isclosed())
-
- def test_negative_content_length(self):
- sock = FakeSocket('HTTP/1.1 200 OK\r\n'
- 'Content-Length: -1\r\n\r\nHello\r\n')
- resp = httplib.HTTPResponse(sock, method="GET")
- resp.begin()
- self.assertEqual(resp.read(), 'Hello\r\n')
- resp.close()
-
- def test_incomplete_read(self):
- sock = FakeSocket('HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\nHello\r\n')
- resp = httplib.HTTPResponse(sock, method="GET")
- resp.begin()
- try:
- resp.read()
- except httplib.IncompleteRead as i:
- self.assertEqual(i.partial, 'Hello\r\n')
- self.assertEqual(repr(i),
- "IncompleteRead(7 bytes read, 3 more expected)")
- self.assertEqual(str(i),
- "IncompleteRead(7 bytes read, 3 more expected)")
- else:
- self.fail('IncompleteRead expected')
- finally:
- resp.close()
-
- def test_epipe(self):
- sock = EPipeSocket(
- "HTTP/1.0 401 Authorization Required\r\n"
- "Content-type: text/html\r\n"
- "WWW-Authenticate: Basic realm=\"example\"\r\n",
- b"Content-Length")
- conn = httplib.HTTPConnection("example.com")
- conn.sock = sock
- self.assertRaises(socket.error,
- lambda: conn.request("PUT", "/url", "body"))
- resp = conn.getresponse()
- self.assertEqual(401, resp.status)
- self.assertEqual("Basic realm=\"example\"",
- resp.getheader("www-authenticate"))
-
- def test_filenoattr(self):
- # Just test the fileno attribute in the HTTPResponse Object.
- body = "HTTP/1.1 200 Ok\r\n\r\nText"
- sock = FakeSocket(body)
- resp = httplib.HTTPResponse(sock)
- self.assertTrue(hasattr(resp,'fileno'),
- 'HTTPResponse should expose a fileno attribute')
-
- # Test lines overflowing the max line size (_MAXLINE in http.client)
-
- def test_overflowing_status_line(self):
- self.skipTest("disabled for HTTP 0.9 support")
- body = "HTTP/1.1 200 Ok" + "k" * 65536 + "\r\n"
- resp = httplib.HTTPResponse(FakeSocket(body))
- self.assertRaises((httplib.LineTooLong, httplib.BadStatusLine), resp.begin)
-
- def test_overflowing_header_line(self):
- body = (
- 'HTTP/1.1 200 OK\r\n'
- 'X-Foo: bar' + 'r' * 65536 + '\r\n\r\n'
- )
- resp = httplib.HTTPResponse(FakeSocket(body))
- self.assertRaises(httplib.LineTooLong, resp.begin)
-
- def test_overflowing_chunked_line(self):
- body = (
- 'HTTP/1.1 200 OK\r\n'
- 'Transfer-Encoding: chunked\r\n\r\n'
- + '0' * 65536 + 'a\r\n'
- 'hello world\r\n'
- '0\r\n'
- )
- resp = httplib.HTTPResponse(FakeSocket(body))
- resp.begin()
- self.assertRaises(httplib.LineTooLong, resp.read)
-
-
-class OfflineTest(TestCase):
- def test_responses(self):
- self.assertEqual(httplib.responses[httplib.NOT_FOUND], "Not Found")
-
-
-class SourceAddressTest(TestCase):
- def setUp(self):
- self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- self.port = test_support.bind_port(self.serv)
- self.source_port = test_support.find_unused_port()
- self.serv.listen(5)
- self.conn = None
-
- def tearDown(self):
- if self.conn:
- self.conn.close()
- self.conn = None
- self.serv.close()
- self.serv = None
-
- def testHTTPConnectionSourceAddress(self):
- self.conn = httplib.HTTPConnection(HOST, self.port,
- source_address=('', self.source_port))
- self.conn.connect()
- self.assertEqual(self.conn.sock.getsockname()[1], self.source_port)
-
- @unittest.skipIf(not hasattr(httplib, 'HTTPSConnection'),
- 'httplib.HTTPSConnection not defined')
- def testHTTPSConnectionSourceAddress(self):
- self.conn = httplib.HTTPSConnection(HOST, self.port,
- source_address=('', self.source_port))
- # We don't test anything here other the constructor not barfing as
- # this code doesn't deal with setting up an active running SSL server
- # for an ssl_wrapped connect() to actually return from.
-
-
-class TimeoutTest(TestCase):
- PORT = None
-
- def setUp(self):
- self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- TimeoutTest.PORT = test_support.bind_port(self.serv)
- self.serv.listen(5)
-
- def tearDown(self):
- self.serv.close()
- self.serv = None
-
- def testTimeoutAttribute(self):
- '''This will prove that the timeout gets through
- HTTPConnection and into the socket.
- '''
- # default -- use global socket timeout
- self.assertTrue(socket.getdefaulttimeout() is None)
- socket.setdefaulttimeout(30)
- try:
- httpConn = httplib.HTTPConnection(HOST, TimeoutTest.PORT)
- httpConn.connect()
- finally:
- socket.setdefaulttimeout(None)
- self.assertEqual(httpConn.sock.gettimeout(), 30)
- httpConn.close()
-
- # no timeout -- do not use global socket default
- self.assertTrue(socket.getdefaulttimeout() is None)
- socket.setdefaulttimeout(30)
- try:
- httpConn = httplib.HTTPConnection(HOST, TimeoutTest.PORT,
- timeout=None)
- httpConn.connect()
- finally:
- socket.setdefaulttimeout(None)
- self.assertEqual(httpConn.sock.gettimeout(), None)
- httpConn.close()
-
- # a value
- httpConn = httplib.HTTPConnection(HOST, TimeoutTest.PORT, timeout=30)
- httpConn.connect()
- self.assertEqual(httpConn.sock.gettimeout(), 30)
- httpConn.close()
-
-
-class HTTPSTimeoutTest(TestCase):
-# XXX Here should be tests for HTTPS, there isn't any right now!
-
- def test_attributes(self):
- # simple test to check it's storing it
- if hasattr(httplib, 'HTTPSConnection'):
- h = httplib.HTTPSConnection(HOST, TimeoutTest.PORT, timeout=30)
- self.assertEqual(h.timeout, 30)
-
- @unittest.skipIf(not hasattr(httplib, 'HTTPS'), 'httplib.HTTPS not available')
- def test_host_port(self):
- # Check invalid host_port
-
- # Note that httplib does not accept user:password@ in the host-port.
- for hp in ("www.python.org:abc", "user:password at www.python.org"):
- self.assertRaises(httplib.InvalidURL, httplib.HTTP, hp)
-
- for hp, h, p in (("[fe80::207:e9ff:fe9b]:8000", "fe80::207:e9ff:fe9b",
- 8000),
- ("pypi.python.org:443", "pypi.python.org", 443),
- ("pypi.python.org", "pypi.python.org", 443),
- ("pypi.python.org:", "pypi.python.org", 443),
- ("[fe80::207:e9ff:fe9b]", "fe80::207:e9ff:fe9b", 443)):
- http = httplib.HTTPS(hp)
- c = http._conn
- if h != c.host:
- self.fail("Host incorrectly parsed: %s != %s" % (h, c.host))
- if p != c.port:
- self.fail("Port incorrectly parsed: %s != %s" % (p, c.host))
-
-
-def test_main(verbose=None):
- test_support.run_unittest(HeaderTests, OfflineTest, BasicTest, TimeoutTest,
- HTTPSTimeoutTest, SourceAddressTest)
-
-if __name__ == '__main__':
- test_main()
diff --git a/Lib/test/test_httpservers.py b/Lib/test/test_httpservers.py
deleted file mode 100644
--- a/Lib/test/test_httpservers.py
+++ /dev/null
@@ -1,543 +0,0 @@
-"""Unittests for the various HTTPServer modules.
-
-Written by Cody A.W. Somerville <cody-somerville at ubuntu.com>,
-Josip Dzolonga, and Michael Otteneder for the 2007/08 GHOP contest.
-"""
-
-import os
-import sys
-import re
-import base64
-import shutil
-import urllib
-import httplib
-import tempfile
-import unittest
-import CGIHTTPServer
-
-
-from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
-from SimpleHTTPServer import SimpleHTTPRequestHandler
-from CGIHTTPServer import CGIHTTPRequestHandler
-from StringIO import StringIO
-from test import test_support
-
-
-threading = test_support.import_module('threading')
-
-
-class NoLogRequestHandler:
- def log_message(self, *args):
- # don't write log messages to stderr
- pass
-
-class SocketlessRequestHandler(SimpleHTTPRequestHandler):
- def __init__(self):
- self.get_called = False
- self.protocol_version = "HTTP/1.1"
-
- def do_GET(self):
- self.get_called = True
- self.send_response(200)
- self.send_header('Content-Type', 'text/html')
- self.end_headers()
- self.wfile.write(b'<html><body>Data</body></html>\r\n')
-
- def log_message(self, fmt, *args):
- pass
-
-
-class TestServerThread(threading.Thread):
- def __init__(self, test_object, request_handler):
- threading.Thread.__init__(self)
- self.request_handler = request_handler
- self.test_object = test_object
-
- def run(self):
- self.server = HTTPServer(('', 0), self.request_handler)
- self.test_object.PORT = self.server.socket.getsockname()[1]
- self.test_object.server_started.set()
- self.test_object = None
- try:
- self.server.serve_forever(0.05)
- finally:
- self.server.server_close()
-
- def stop(self):
- self.server.shutdown()
-
-
-class BaseTestCase(unittest.TestCase):
- def setUp(self):
- self._threads = test_support.threading_setup()
- os.environ = test_support.EnvironmentVarGuard()
- self.server_started = threading.Event()
- self.thread = TestServerThread(self, self.request_handler)
- self.thread.start()
- self.server_started.wait()
-
- def tearDown(self):
- self.thread.stop()
- os.environ.__exit__()
- test_support.threading_cleanup(*self._threads)
-
- def request(self, uri, method='GET', body=None, headers={}):
- self.connection = httplib.HTTPConnection('localhost', self.PORT)
- self.connection.request(method, uri, body, headers)
- return self.connection.getresponse()
-
-class BaseHTTPRequestHandlerTestCase(unittest.TestCase):
- """Test the functionality of the BaseHTTPServer focussing on
- BaseHTTPRequestHandler.
- """
-
- HTTPResponseMatch = re.compile('HTTP/1.[0-9]+ 200 OK')
-
- def setUp (self):
- self.handler = SocketlessRequestHandler()
-
- def send_typical_request(self, message):
- input_msg = StringIO(message)
- output = StringIO()
- self.handler.rfile = input_msg
- self.handler.wfile = output
- self.handler.handle_one_request()
- output.seek(0)
- return output.readlines()
-
- def verify_get_called(self):
- self.assertTrue(self.handler.get_called)
-
- def verify_expected_headers(self, headers):
- for fieldName in 'Server: ', 'Date: ', 'Content-Type: ':
- self.assertEqual(sum(h.startswith(fieldName) for h in headers), 1)
-
- def verify_http_server_response(self, response):
- match = self.HTTPResponseMatch.search(response)
- self.assertTrue(match is not None)
-
- def test_http_1_1(self):
- result = self.send_typical_request('GET / HTTP/1.1\r\n\r\n')
- self.verify_http_server_response(result[0])
- self.verify_expected_headers(result[1:-1])
- self.verify_get_called()
- self.assertEqual(result[-1], '<html><body>Data</body></html>\r\n')
-
- def test_http_1_0(self):
- result = self.send_typical_request('GET / HTTP/1.0\r\n\r\n')
- self.verify_http_server_response(result[0])
- self.verify_expected_headers(result[1:-1])
- self.verify_get_called()
- self.assertEqual(result[-1], '<html><body>Data</body></html>\r\n')
-
- def test_http_0_9(self):
- result = self.send_typical_request('GET / HTTP/0.9\r\n\r\n')
- self.assertEqual(len(result), 1)
- self.assertEqual(result[0], '<html><body>Data</body></html>\r\n')
- self.verify_get_called()
-
- def test_with_continue_1_0(self):
- result = self.send_typical_request('GET / HTTP/1.0\r\nExpect: 100-continue\r\n\r\n')
- self.verify_http_server_response(result[0])
- self.verify_expected_headers(result[1:-1])
- self.verify_get_called()
- self.assertEqual(result[-1], '<html><body>Data</body></html>\r\n')
-
- def test_request_length(self):
- # Issue #10714: huge request lines are discarded, to avoid Denial
- # of Service attacks.
- result = self.send_typical_request(b'GET ' + b'x' * 65537)
- self.assertEqual(result[0], b'HTTP/1.1 414 Request-URI Too Long\r\n')
- self.assertFalse(self.handler.get_called)
-
-
-class BaseHTTPServerTestCase(BaseTestCase):
- class request_handler(NoLogRequestHandler, BaseHTTPRequestHandler):
- protocol_version = 'HTTP/1.1'
- default_request_version = 'HTTP/1.1'
-
- def do_TEST(self):
- self.send_response(204)
- self.send_header('Content-Type', 'text/html')
- self.send_header('Connection', 'close')
- self.end_headers()
-
- def do_KEEP(self):
- self.send_response(204)
- self.send_header('Content-Type', 'text/html')
- self.send_header('Connection', 'keep-alive')
- self.end_headers()
-
- def do_KEYERROR(self):
- self.send_error(999)
-
- def do_CUSTOM(self):
- self.send_response(999)
- self.send_header('Content-Type', 'text/html')
- self.send_header('Connection', 'close')
- self.end_headers()
-
- def setUp(self):
- BaseTestCase.setUp(self)
- self.con = httplib.HTTPConnection('localhost', self.PORT)
- self.con.connect()
-
- def test_command(self):
- self.con.request('GET', '/')
- res = self.con.getresponse()
- self.assertEqual(res.status, 501)
-
- def test_request_line_trimming(self):
- self.con._http_vsn_str = 'HTTP/1.1\n'
- self.con.putrequest('XYZBOGUS', '/')
- self.con.endheaders()
- res = self.con.getresponse()
- self.assertEqual(res.status, 501)
-
- def test_version_bogus(self):
- self.con._http_vsn_str = 'FUBAR'
- self.con.putrequest('GET', '/')
- self.con.endheaders()
- res = self.con.getresponse()
- self.assertEqual(res.status, 400)
-
- def test_version_digits(self):
- self.con._http_vsn_str = 'HTTP/9.9.9'
- self.con.putrequest('GET', '/')
- self.con.endheaders()
- res = self.con.getresponse()
- self.assertEqual(res.status, 400)
-
- def test_version_none_get(self):
- self.con._http_vsn_str = ''
- self.con.putrequest('GET', '/')
- self.con.endheaders()
- res = self.con.getresponse()
- self.assertEqual(res.status, 501)
-
- def test_version_none(self):
- # Test that a valid method is rejected when not HTTP/1.x
- self.con._http_vsn_str = ''
- self.con.putrequest('CUSTOM', '/')
- self.con.endheaders()
- res = self.con.getresponse()
- self.assertEqual(res.status, 400)
-
- def test_version_invalid(self):
- self.con._http_vsn = 99
- self.con._http_vsn_str = 'HTTP/9.9'
- self.con.putrequest('GET', '/')
- self.con.endheaders()
- res = self.con.getresponse()
- self.assertEqual(res.status, 505)
-
- def test_send_blank(self):
- self.con._http_vsn_str = ''
- self.con.putrequest('', '')
- self.con.endheaders()
- res = self.con.getresponse()
- self.assertEqual(res.status, 400)
-
- def test_header_close(self):
- self.con.putrequest('GET', '/')
- self.con.putheader('Connection', 'close')
- self.con.endheaders()
- res = self.con.getresponse()
- self.assertEqual(res.status, 501)
-
- def test_head_keep_alive(self):
- self.con._http_vsn_str = 'HTTP/1.1'
- self.con.putrequest('GET', '/')
- self.con.putheader('Connection', 'keep-alive')
- self.con.endheaders()
- res = self.con.getresponse()
- self.assertEqual(res.status, 501)
-
- def test_handler(self):
- self.con.request('TEST', '/')
- res = self.con.getresponse()
- self.assertEqual(res.status, 204)
-
- def test_return_header_keep_alive(self):
- self.con.request('KEEP', '/')
- res = self.con.getresponse()
- self.assertEqual(res.getheader('Connection'), 'keep-alive')
- self.con.request('TEST', '/')
- self.addCleanup(self.con.close)
-
- def test_internal_key_error(self):
- self.con.request('KEYERROR', '/')
- res = self.con.getresponse()
- self.assertEqual(res.status, 999)
-
- def test_return_custom_status(self):
- self.con.request('CUSTOM', '/')
- res = self.con.getresponse()
- self.assertEqual(res.status, 999)
-
-
-class SimpleHTTPServerTestCase(BaseTestCase):
- class request_handler(NoLogRequestHandler, SimpleHTTPRequestHandler):
- pass
-
- def setUp(self):
- BaseTestCase.setUp(self)
- self.cwd = os.getcwd()
- basetempdir = tempfile.gettempdir()
- os.chdir(basetempdir)
- self.data = 'We are the knights who say Ni!'
- self.tempdir = tempfile.mkdtemp(dir=basetempdir)
- self.tempdir_name = os.path.basename(self.tempdir)
- temp = open(os.path.join(self.tempdir, 'test'), 'wb')
- temp.write(self.data)
- temp.close()
-
- def tearDown(self):
- try:
- os.chdir(self.cwd)
- try:
- shutil.rmtree(self.tempdir)
- except OSError:
- pass
- finally:
- BaseTestCase.tearDown(self)
-
- def check_status_and_reason(self, response, status, data=None):
- body = response.read()
- self.assertTrue(response)
- self.assertEqual(response.status, status)
- self.assertIsNotNone(response.reason)
- if data:
- self.assertEqual(data, body)
-
- def test_get(self):
- #constructs the path relative to the root directory of the HTTPServer
- response = self.request(self.tempdir_name + '/test')
- self.check_status_and_reason(response, 200, data=self.data)
- response = self.request(self.tempdir_name + '/')
- self.check_status_and_reason(response, 200)
- response = self.request(self.tempdir_name)
- self.check_status_and_reason(response, 301)
- response = self.request('/ThisDoesNotExist')
- self.check_status_and_reason(response, 404)
- response = self.request('/' + 'ThisDoesNotExist' + '/')
- self.check_status_and_reason(response, 404)
- f = open(os.path.join(self.tempdir_name, 'index.html'), 'w')
- response = self.request('/' + self.tempdir_name + '/')
- self.check_status_and_reason(response, 200)
-
- # chmod() doesn't work as expected on Windows, and filesystem
- # permissions are ignored by root on Unix.
- if os.name == 'posix' and os.geteuid() != 0:
- os.chmod(self.tempdir, 0)
- response = self.request(self.tempdir_name + '/')
- self.check_status_and_reason(response, 404)
- os.chmod(self.tempdir, 0755)
-
- def test_head(self):
- response = self.request(
- self.tempdir_name + '/test', method='HEAD')
- self.check_status_and_reason(response, 200)
- self.assertEqual(response.getheader('content-length'),
- str(len(self.data)))
- self.assertEqual(response.getheader('content-type'),
- 'application/octet-stream')
-
- def test_invalid_requests(self):
- response = self.request('/', method='FOO')
- self.check_status_and_reason(response, 501)
- # requests must be case sensitive,so this should fail too
- response = self.request('/', method='get')
- self.check_status_and_reason(response, 501)
- response = self.request('/', method='GETs')
- self.check_status_and_reason(response, 501)
-
-
-cgi_file1 = """\
-#!%s
-
-print "Content-type: text/html"
-print
-print "Hello World"
-"""
-
-cgi_file2 = """\
-#!%s
-import cgi
-
-print "Content-type: text/html"
-print
-
-form = cgi.FieldStorage()
-print "%%s, %%s, %%s" %% (form.getfirst("spam"), form.getfirst("eggs"),
- form.getfirst("bacon"))
-"""
-
-
- at unittest.skipIf(hasattr(os, 'geteuid') and os.geteuid() == 0,
- "This test can't be run reliably as root (issue #13308).")
-class CGIHTTPServerTestCase(BaseTestCase):
- class request_handler(NoLogRequestHandler, CGIHTTPRequestHandler):
- pass
-
- def setUp(self):
- BaseTestCase.setUp(self)
- self.parent_dir = tempfile.mkdtemp()
- self.cgi_dir = os.path.join(self.parent_dir, 'cgi-bin')
- os.mkdir(self.cgi_dir)
-
- # The shebang line should be pure ASCII: use symlink if possible.
- # See issue #7668.
- if hasattr(os, 'symlink'):
- self.pythonexe = os.path.join(self.parent_dir, 'python')
- os.symlink(sys.executable, self.pythonexe)
- else:
- self.pythonexe = sys.executable
-
- self.file1_path = os.path.join(self.cgi_dir, 'file1.py')
- with open(self.file1_path, 'w') as file1:
- file1.write(cgi_file1 % self.pythonexe)
- os.chmod(self.file1_path, 0777)
-
- self.file2_path = os.path.join(self.cgi_dir, 'file2.py')
- with open(self.file2_path, 'w') as file2:
- file2.write(cgi_file2 % self.pythonexe)
- os.chmod(self.file2_path, 0777)
-
- self.cwd = os.getcwd()
- os.chdir(self.parent_dir)
-
- def tearDown(self):
- try:
- os.chdir(self.cwd)
- if self.pythonexe != sys.executable:
- os.remove(self.pythonexe)
- os.remove(self.file1_path)
- os.remove(self.file2_path)
- os.rmdir(self.cgi_dir)
- os.rmdir(self.parent_dir)
- finally:
- BaseTestCase.tearDown(self)
-
- def test_url_collapse_path(self):
- # verify tail is the last portion and head is the rest on proper urls
- test_vectors = {
- '': '//',
- '..': IndexError,
- '/.//..': IndexError,
- '/': '//',
- '//': '//',
- '/\\': '//\\',
- '/.//': '//',
- 'cgi-bin/file1.py': '/cgi-bin/file1.py',
- '/cgi-bin/file1.py': '/cgi-bin/file1.py',
- 'a': '//a',
- '/a': '//a',
- '//a': '//a',
- './a': '//a',
- './C:/': '/C:/',
- '/a/b': '/a/b',
- '/a/b/': '/a/b/',
- '/a/b/.': '/a/b/',
- '/a/b/c/..': '/a/b/',
- '/a/b/c/../d': '/a/b/d',
- '/a/b/c/../d/e/../f': '/a/b/d/f',
- '/a/b/c/../d/e/../../f': '/a/b/f',
- '/a/b/c/../d/e/.././././..//f': '/a/b/f',
- '../a/b/c/../d/e/.././././..//f': IndexError,
- '/a/b/c/../d/e/../../../f': '/a/f',
- '/a/b/c/../d/e/../../../../f': '//f',
- '/a/b/c/../d/e/../../../../../f': IndexError,
- '/a/b/c/../d/e/../../../../f/..': '//',
- '/a/b/c/../d/e/../../../../f/../.': '//',
- }
- for path, expected in test_vectors.iteritems():
- if isinstance(expected, type) and issubclass(expected, Exception):
- self.assertRaises(expected,
- CGIHTTPServer._url_collapse_path, path)
- else:
- actual = CGIHTTPServer._url_collapse_path(path)
- self.assertEqual(expected, actual,
- msg='path = %r\nGot: %r\nWanted: %r' %
- (path, actual, expected))
-
- @unittest.skipIf(test_support.is_jython, "FIXME: not working on Jython")
- def test_headers_and_content(self):
- res = self.request('/cgi-bin/file1.py')
- self.assertEqual(('Hello World\n', 'text/html', 200),
- (res.read(), res.getheader('Content-type'), res.status))
-
- @unittest.skipIf(test_support.is_jython, "FIXME: not working on Jython")
- def test_post(self):
- params = urllib.urlencode({'spam' : 1, 'eggs' : 'python', 'bacon' : 123456})
- headers = {'Content-type' : 'application/x-www-form-urlencoded'}
- res = self.request('/cgi-bin/file2.py', 'POST', params, headers)
-
- self.assertEqual(res.read(), '1, python, 123456\n')
-
- def test_invaliduri(self):
- res = self.request('/cgi-bin/invalid')
- res.read()
- self.assertEqual(res.status, 404)
-
- @unittest.skipIf(test_support.is_jython, "FIXME: not working on Jython")
- def test_authorization(self):
- headers = {'Authorization' : 'Basic %s' %
- base64.b64encode('username:pass')}
- res = self.request('/cgi-bin/file1.py', 'GET', headers=headers)
- self.assertEqual(('Hello World\n', 'text/html', 200),
- (res.read(), res.getheader('Content-type'), res.status))
-
- @unittest.skipIf(test_support.is_jython, "FIXME: not working on Jython")
- def test_no_leading_slash(self):
- # http://bugs.python.org/issue2254
- res = self.request('cgi-bin/file1.py')
- self.assertEqual(('Hello World\n', 'text/html', 200),
- (res.read(), res.getheader('Content-type'), res.status))
-
- @unittest.skipIf(test_support.is_jython, "FIXME: not working on Jython")
- def test_os_environ_is_not_altered(self):
- signature = "Test CGI Server"
- os.environ['SERVER_SOFTWARE'] = signature
- res = self.request('/cgi-bin/file1.py')
- self.assertEqual((b'Hello World\n', 'text/html', 200),
- (res.read(), res.getheader('Content-type'), res.status))
- self.assertEqual(os.environ['SERVER_SOFTWARE'], signature)
-
-
-class SimpleHTTPRequestHandlerTestCase(unittest.TestCase):
- """ Test url parsing """
- def setUp(self):
- self.translated = os.getcwd()
- self.translated = os.path.join(self.translated, 'filename')
- self.handler = SocketlessRequestHandler()
-
- def test_query_arguments(self):
- path = self.handler.translate_path('/filename')
- self.assertEqual(path, self.translated)
- path = self.handler.translate_path('/filename?foo=bar')
- self.assertEqual(path, self.translated)
- path = self.handler.translate_path('/filename?a=b&spam=eggs#zot')
- self.assertEqual(path, self.translated)
-
- def test_start_with_double_slash(self):
- path = self.handler.translate_path('//filename')
- self.assertEqual(path, self.translated)
- path = self.handler.translate_path('//filename?foo=bar')
- self.assertEqual(path, self.translated)
-
-
-def test_main(verbose=None):
- try:
- cwd = os.getcwd()
- test_support.run_unittest(BaseHTTPRequestHandlerTestCase,
- SimpleHTTPRequestHandlerTestCase,
- BaseHTTPServerTestCase,
- SimpleHTTPServerTestCase,
- CGIHTTPServerTestCase
- )
- finally:
- os.chdir(cwd)
-
-if __name__ == '__main__':
- test_main()
diff --git a/Lib/test/test_logging.py b/Lib/test/test_logging.py
--- a/Lib/test/test_logging.py
+++ b/Lib/test/test_logging.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python
#
-# Copyright 2001-2010 by Vinay Sajip. All Rights Reserved.
+# Copyright 2001-2012 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
@@ -18,7 +18,7 @@
"""Test harness for the logging module. Run all tests.
-Copyright (C) 2001-2010 Vinay Sajip. All Rights Reserved.
+Copyright (C) 2001-2012 Vinay Sajip. All Rights Reserved.
"""
import logging
@@ -31,6 +31,7 @@
import gc
import json
import os
+import random
import re
import select
import socket
@@ -40,6 +41,7 @@
import tempfile
from test.test_support import captured_stdout, run_with_locale, run_unittest
import textwrap
+import time
import unittest
import warnings
import weakref
@@ -272,6 +274,8 @@
('INF.BADPARENT', 'INFO', '4'),
])
+ def test_invalid_name(self):
+ self.assertRaises(TypeError, logging.getLogger, any)
class BasicFilterTest(BaseTest):
@@ -562,6 +566,38 @@
datefmt=
"""
+ # config1a moves the handler to the root.
+ config1a = """
+ [loggers]
+ keys=root,parser
+
+ [handlers]
+ keys=hand1
+
+ [formatters]
+ keys=form1
+
+ [logger_root]
+ level=WARNING
+ handlers=hand1
+
+ [logger_parser]
+ level=DEBUG
+ handlers=
+ propagate=1
+ qualname=compiler.parser
+
+ [handler_hand1]
+ class=StreamHandler
+ level=NOTSET
+ formatter=form1
+ args=(sys.stdout,)
+
+ [formatter_form1]
+ format=%(levelname)s ++ %(message)s
+ datefmt=
+ """
+
# config2 has a subtle configuration error that should be reported
config2 = config1.replace("sys.stdout", "sys.stbout")
@@ -640,6 +676,44 @@
datefmt=
"""
+ # config7 adds a compiler logger.
+ config7 = """
+ [loggers]
+ keys=root,parser,compiler
+
+ [handlers]
+ keys=hand1
+
+ [formatters]
+ keys=form1
+
+ [logger_root]
+ level=WARNING
+ handlers=hand1
+
+ [logger_compiler]
+ level=DEBUG
+ handlers=
+ propagate=1
+ qualname=compiler
+
+ [logger_parser]
+ level=DEBUG
+ handlers=
+ propagate=1
+ qualname=compiler.parser
+
+ [handler_hand1]
+ class=StreamHandler
+ level=NOTSET
+ formatter=form1
+ args=(sys.stdout,)
+
+ [formatter_form1]
+ format=%(levelname)s ++ %(message)s
+ datefmt=
+ """
+
def apply_config(self, conf):
file = cStringIO.StringIO(textwrap.dedent(conf))
logging.config.fileConfig(file)
@@ -703,6 +777,49 @@
def test_config6_ok(self):
self.test_config1_ok(config=self.config6)
+ def test_config7_ok(self):
+ with captured_stdout() as output:
+ self.apply_config(self.config1a)
+ logger = logging.getLogger("compiler.parser")
+ # See issue #11424. compiler-hyphenated sorts
+ # between compiler and compiler.xyz and this
+ # was preventing compiler.xyz from being included
+ # in the child loggers of compiler because of an
+ # overzealous loop termination condition.
+ hyphenated = logging.getLogger('compiler-hyphenated')
+ # All will output a message
+ logger.info(self.next_message())
+ logger.error(self.next_message())
+ hyphenated.critical(self.next_message())
+ self.assert_log_lines([
+ ('INFO', '1'),
+ ('ERROR', '2'),
+ ('CRITICAL', '3'),
+ ], stream=output)
+ # Original logger output is empty.
+ self.assert_log_lines([])
+ with captured_stdout() as output:
+ self.apply_config(self.config7)
+ logger = logging.getLogger("compiler.parser")
+ self.assertFalse(logger.disabled)
+ # Both will output a message
+ logger.info(self.next_message())
+ logger.error(self.next_message())
+ logger = logging.getLogger("compiler.lexer")
+ # Both will output a message
+ logger.info(self.next_message())
+ logger.error(self.next_message())
+ # Will not appear
+ hyphenated.critical(self.next_message())
+ self.assert_log_lines([
+ ('INFO', '4'),
+ ('ERROR', '5'),
+ ('INFO', '6'),
+ ('ERROR', '7'),
+ ], stream=output)
+ # Original logger output is empty.
+ self.assert_log_lines([])
+
class LogRecordStreamHandler(StreamRequestHandler):
"""Handler for a streaming logging request. It saves the log message in the
@@ -755,14 +872,6 @@
self.finished = threading.Event()
def serve_until_stopped(self):
- if sys.platform.startswith('java'):
- # XXX: There's a problem using cpython_compatibile_select
- # here: it seems to be due to the fact that
- # cpython_compatible_select switches blocking mode on while
- # a separate thread is reading from the same socket, causing
- # a read of 0 in LogRecordStreamHandler.handle (which
- # deadlocks this test)
- self.socket.setblocking(0)
while not self.abort:
rd, wr, ex = select.select([self.socket.fileno()], [], [],
self.timeout)
@@ -1766,6 +1875,47 @@
self.assertTrue(c2 is c3)
+class HandlerTest(BaseTest):
+
+ @unittest.skipIf(os.name in ('java', 'nt'), 'WatchedFileHandler not appropriate for Jython or Windows.')
+ @unittest.skipUnless(threading, 'Threading required for this test.')
+ def test_race(self):
+ # Issue #14632 refers.
+ def remove_loop(fname, tries):
+ for _ in range(tries):
+ try:
+ os.unlink(fname)
+ except OSError:
+ pass
+ time.sleep(0.004 * random.randint(0, 4))
+
+ del_count = 500
+ log_count = 500
+
+ for delay in (False, True):
+ fd, fn = tempfile.mkstemp('.log', 'test_logging-3-')
+ os.close(fd)
+ remover = threading.Thread(target=remove_loop, args=(fn, del_count))
+ remover.daemon = True
+ remover.start()
+ h = logging.handlers.WatchedFileHandler(fn, delay=delay)
+ f = logging.Formatter('%(asctime)s: %(levelname)s: %(message)s')
+ h.setFormatter(f)
+ try:
+ for _ in range(log_count):
+ time.sleep(0.005)
+ r = logging.makeLogRecord({'msg': 'testing' })
+ h.handle(r)
+ finally:
+ remover.join()
+ try:
+ h.close()
+ except ValueError:
+ pass
+ if os.path.exists(fn):
+ os.unlink(fn)
+
+
# Set the locale to the platform-dependent default. I have no idea
# why the test does this, but in any case we save the current locale
# first and restore it at the end.
@@ -1775,7 +1925,7 @@
CustomLevelsAndFiltersTest, MemoryHandlerTest,
ConfigFileTest, SocketHandlerTest, MemoryTest,
EncodingTest, WarningsTest, ConfigDictTest, ManagerTest,
- ChildLoggerTest)
+ ChildLoggerTest, HandlerTest)
if __name__ == "__main__":
test_main()
diff --git a/Lib/test/test_mhlib.py b/Lib/test/test_mhlib.py
--- a/Lib/test/test_mhlib.py
+++ b/Lib/test/test_mhlib.py
@@ -7,10 +7,10 @@
### mhlib. It should.
import unittest
-from test.test_support import is_jython, run_unittest, TESTFN, TestSkipped
+from test.test_support import is_jython, run_unittest, TESTFN, import_module
import os, StringIO
import sys
-import mhlib
+mhlib = import_module('mhlib', deprecated=True)
if (sys.platform.startswith("win") or sys.platform=="riscos" or
sys.platform.startswith("atheos") or (is_jython and os._name != 'posix')):
@@ -21,8 +21,8 @@
# link counts, and that causes test_listfolders() here to get back
# an empty list from its call of listallfolders().
# The other tests here pass on Windows.
- raise TestSkipped("skipped on %s -- " % sys.platform +
- "too many Unix assumptions")
+ raise unittest.SkipTest("skipped on %s -- " % sys.platform +
+ "too many Unix assumptions")
_mhroot = TESTFN+"_MH"
_mhpath = os.path.join(_mhroot, "MH")
@@ -148,7 +148,7 @@
writeCurMessage('inbox', 2)
mh = getMH()
- eq = self.assertEquals
+ eq = self.assertEqual
eq(mh.getprofile('Editor'), 'emacs')
eq(mh.getprofile('not-set'), None)
eq(mh.getpath(), os.path.abspath(_mhpath))
@@ -171,37 +171,34 @@
def test_listfolders(self):
mh = getMH()
- eq = self.assertEquals
+ eq = self.assertEqual
folders = mh.listfolders()
folders.sort()
eq(folders, ['deep', 'inbox', 'wide'])
- #link counts from os.stat always return 0 in jython, which causes
- #lisallfolders and listsubfolders to return empty lists.
- if not sys.platform.startswith("java"):
- folders = mh.listallfolders()
- folders.sort()
- tfolders = map(normF, ['deep', 'deep/f1', 'deep/f2', 'deep/f2/f3',
+ folders = mh.listallfolders()
+ folders.sort()
+ tfolders = map(normF, ['deep', 'deep/f1', 'deep/f2', 'deep/f2/f3',
'inbox', 'wide'])
- tfolders.sort()
- eq(folders, tfolders)
+ tfolders.sort()
+ eq(folders, tfolders)
- folders = mh.listsubfolders('deep')
- folders.sort()
- eq(folders, map(normF, ['deep/f1', 'deep/f2']))
+ folders = mh.listsubfolders('deep')
+ folders.sort()
+ eq(folders, map(normF, ['deep/f1', 'deep/f2']))
- folders = mh.listallsubfolders('deep')
- folders.sort()
- eq(folders, map(normF, ['deep/f1', 'deep/f2', 'deep/f2/f3']))
- eq(mh.listsubfolders(normF('deep/f2')), [normF('deep/f2/f3')])
+ folders = mh.listallsubfolders('deep')
+ folders.sort()
+ eq(folders, map(normF, ['deep/f1', 'deep/f2', 'deep/f2/f3']))
+ eq(mh.listsubfolders(normF('deep/f2')), [normF('deep/f2/f3')])
- eq(mh.listsubfolders('inbox'), [])
- eq(mh.listallsubfolders('inbox'), [])
+ eq(mh.listsubfolders('inbox'), [])
+ eq(mh.listallsubfolders('inbox'), [])
def test_sequence(self):
mh = getMH()
- eq = self.assertEquals
+ eq = self.assertEqual
writeCurMessage('wide', 55)
f = mh.openfolder('wide')
@@ -256,12 +253,12 @@
def test_modify(self):
mh = getMH()
- eq = self.assertEquals
+ eq = self.assertEqual
mh.makefolder("dummy1")
- self.assert_("dummy1" in mh.listfolders())
+ self.assertIn("dummy1", mh.listfolders())
path = os.path.join(_mhpath, "dummy1")
- self.assert_(os.path.exists(path))
+ self.assertTrue(os.path.exists(path))
f = mh.openfolder('dummy1')
def create(n):
@@ -313,12 +310,12 @@
mh.deletefolder('dummy1')
mh.deletefolder('dummy2')
- self.assert_('dummy1' not in mh.listfolders())
- self.assert_(not os.path.exists(path))
+ self.assertNotIn('dummy1', mh.listfolders())
+ self.assertTrue(not os.path.exists(path))
def test_read(self):
mh = getMH()
- eq = self.assertEquals
+ eq = self.assertEqual
f = mh.openfolder('inbox')
msg = f.openmessage(1)
diff --git a/Lib/test/test_select.py b/Lib/test/test_select.py
--- a/Lib/test/test_select.py
+++ b/Lib/test/test_select.py
@@ -134,16 +134,11 @@
else:
self.fail("Unregistering socket that is not registered should have raised KeyError")
-#
-# using the test_socket thread based server/client management, for convenience.
-#
-class ThreadedPollClientSocket(test_socket.ThreadedTCPSocketTest):
- HOST = HOST
- PORT = PORT
+class ThreadedPollClientSocket(test_socket.SocketConnectedTest):
def testSocketRegisteredBeforeConnected(self):
- self.cli_conn = self.serv.accept()
+ pass
def _testSocketRegisteredBeforeConnected(self):
timeout = 1000 # milliseconds
@@ -152,7 +147,7 @@
poll_object.register(self.cli, select.POLLOUT)
result_list = poll_object.poll(timeout)
result_sockets = [r[0] for r in result_list]
- self.failIf(self.cli in result_sockets, "Unconnected client socket should not have been selectable")
+ self.failUnless(self.cli in result_sockets, "Unconnected client socket should be selectable")
# Now connect the socket, but DO NOT register it again
self.cli.setblocking(0)
self.cli.connect( (self.HOST, self.PORT) )
@@ -161,25 +156,8 @@
result_sockets = [r[0] for r in result_list]
self.failUnless(self.cli in result_sockets, "Connected client socket should have been selectable")
- def testSocketMustBeNonBlocking(self):
- self.cli_conn = self.serv.accept()
-
- def _testSocketMustBeNonBlocking(self):
- self.cli.setblocking(1)
- self.cli.connect( (self.HOST, self.PORT) )
- timeout = 1000 # milliseconds
- poll_object = select.poll()
- try:
- poll_object.register(self.cli)
- except select.error, se:
- self.failUnlessEqual(se[0], errno.ESOCKISBLOCKING)
- except Exception, x:
- self.fail("Registering blocking socket should have raised select.error, not %s" % str(x))
- else:
- self.fail("Registering blocking socket should have raised select.error")
-
def testSelectOnSocketFileno(self):
- self.cli_conn = self.serv.accept()
+ pass
def _testSelectOnSocketFileno(self):
self.cli.setblocking(0)
@@ -190,42 +168,13 @@
except Exception, x:
self.fail("Selecting on socket.fileno() should not have raised exception: %s" % str(x))
-class TestPipes(unittest.TestCase):
-
- verbose = 1
-
- def test(self):
- import sys
- from test.test_support import verbose
- if sys.platform[:3] in ('win', 'mac', 'os2', 'riscos'):
- if verbose:
- print "Can't test select easily on", sys.platform
- return
- cmd = 'for i in 0 1 2 3 4 5 6 7 8 9; do echo testing...; sleep 1; done'
- p = os.popen(cmd, 'r')
- for tout in (0, 1, 2, 4, 8, 16) + (None,)*10:
- if verbose:
- print 'timeout =', tout
- rfd, wfd, xfd = select.select([p], [], [], tout)
- if (rfd, wfd, xfd) == ([], [], []):
- continue
- if (rfd, wfd, xfd) == ([p], [], []):
- line = p.readline()
- if verbose:
- print repr(line)
- if not line:
- if verbose:
- print 'EOF'
- break
- continue
- self.fail('Unexpected return values from select(): %s' % str(rfd, wfd, xfd))
- p.close()
def test_main():
- if test_support.is_jython:
- del TestPipes.test
- test_support.run_unittest(__name__)
+ tests = [TestSelectInvalidParameters, TestSelectClientSocket, TestPollClientSocket, ThreadedPollClientSocket]
+ suites = [unittest.makeSuite(klass, 'test') for klass in tests]
+ test_support._run_suite(unittest.TestSuite(suites))
+
if __name__ == "__main__":
test_main()
diff --git a/Lib/test/test_socket.py b/Lib/test/test_socket.py
--- a/Lib/test/test_socket.py
+++ b/Lib/test/test_socket.py
@@ -7,6 +7,7 @@
import jarray
import Queue
import platform
+import pprint
import select
import socket
import struct
@@ -15,6 +16,7 @@
import thread, threading
from weakref import proxy
from StringIO import StringIO
+from _socket import _check_threadpool_for_pending_threads
PORT = 50100
HOST = 'localhost'
@@ -124,15 +126,26 @@
if not self.server_ready.isSet():
self.server_ready.set()
self.client_ready.wait()
-
+
def _tearDown(self):
- self.done.wait()
+ self.done.wait() # wait for the client to exit
self.__tearDown()
+ msg = None
if not self.queue.empty():
msg = self.queue.get()
- self.fail(msg)
+ # Wait up to one second for there not to be pending threads
+ for i in xrange(10):
+ pending_threads = _check_threadpool_for_pending_threads()
+ if len(pending_threads) == 0:
+ break
+ time.sleep(0.1)
+
+ if pending_threads or msg:
+ self.fail("msg={} Pending threads in Netty pool={}".format(msg, pprint.pformat(pending_threads)))
+
+
def clientRun(self, test_func):
self.server_ready.wait()
self.client_ready.set()
@@ -150,10 +163,6 @@
def clientTearDown(self):
self.done.set()
- if sys.platform[:4] != 'java':
- # This causes the whole process to exit on jython
- # Probably related to problems with daemon status of threads
- thread.exit()
class ThreadedTCPSocketTest(SocketTCPTest, ThreadableTest):
@@ -275,23 +284,6 @@
socket.SOL_SOCKET
socket.SO_REUSEADDR
- def testConstantToNameMapping(self):
- # Testing for mission critical constants
- for name, expected_name_starts in [
- ('IPPROTO_ICMP', ['IPPROTO_']),
- ('IPPROTO_TCP', ['IPPROTO_']),
- ('IPPROTO_UDP', ['IPPROTO_']),
- ('SO_BROADCAST', ['SO_', 'TCP_']),
- ('SO_KEEPALIVE', ['SO_', 'TCP_']),
- ('SO_ACCEPTCONN', ['SO_', 'TCP_']),
- ('SO_DEBUG', ['SO_', 'TCP_']),
- ('SOCK_DGRAM', ['SOCK_']),
- ('SOCK_RAW', ['SOCK_']),
- ('SOL_SOCKET', ['SOL_', 'IPPROTO_']),
- ('TCP_NODELAY', ['SO_', 'TCP_']),
- ]:
- self.failUnlessEqual(socket._constant_to_name(getattr(socket, name), expected_name_starts), name)
-
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
@@ -715,7 +707,7 @@
for expected_value in values:
sock.setsockopt(level, option, expected_value)
retrieved_value = sock.getsockopt(level, option)
- msg = "Retrieved option(%s, %s) value %s != %s(value set)" % (level, option, retrieved_value, expected_value)
+ msg = "TCP Retrieved option(%s, %s) value %s != %s(value set)" % (level, option, retrieved_value, expected_value)
if option == socket.SO_RCVBUF:
self.assert_(retrieved_value >= expected_value, msg)
else:
@@ -729,7 +721,7 @@
sock.bind( (HOST, PORT) )
retrieved_option_value = sock.getsockopt(level, option)
self.failUnlessEqual(retrieved_option_value, values[-1], \
- "Option value '(%s, %s)'='%s' did not propagate to implementation socket: got %s" % (level, option, values[-1], retrieved_option_value) )
+ "UDP Option value '(%s, %s)'='%s' did not propagate to implementation socket: got %s" % (level, option, values[-1], retrieved_option_value) )
self._testSetAndGetOption(sock, level, option, values)
finally:
sock.close()
@@ -747,10 +739,10 @@
self._testSetAndGetOption(sock, level, option, values)
# now connect the socket i.e. cause the implementation socket to be created
# First bind, so that the SO_REUSEADDR setting propagates
- sock.bind( (HOST, PORT+1) )
+ #sock.bind( (HOST, PORT+1) )
sock.connect( (HOST, PORT) )
retrieved_option_value = sock.getsockopt(level, option)
- msg = "Option value '%s'='%s' did not propagate to implementation socket: got %s" % (option, values[-1], retrieved_option_value)
+ msg = "TCP client option value '%s'='%s' did not propagate to implementation socket: got %s" % (option, values[-1], retrieved_option_value)
if option in (socket.SO_RCVBUF, socket.SO_SNDBUF):
# NOTE: there's no guarantee that bufsize will be the
# exact setsockopt value, particularly after
@@ -765,6 +757,7 @@
server_sock.close()
if sock:
sock.close()
+ pass
def _testTCPClientInheritedOption(self, level, option, values):
cli_sock = accepted_sock = None
@@ -780,7 +773,7 @@
cli_sock.connect( (HOST, PORT) )
accepted_sock = server_sock.accept()[0]
retrieved_option_value = accepted_sock.getsockopt(level, option)
- msg = "Option value '(%s,%s)'='%s' did not propagate to accepted socket: got %s" % (level, option, values[-1], retrieved_option_value)
+ msg = "TCP client inherited option value '(%s,%s)'='%s' did not propagate to accepted socket: got %s" % (level, option, values[-1], retrieved_option_value)
if option == socket.SO_RCVBUF:
# NOTE: see similar bsd/solaris workaround above
self.assert_(retrieved_option_value >= values[-1], msg)
@@ -789,6 +782,7 @@
self._testSetAndGetOption(accepted_sock, level, option, values)
finally:
server_sock.close()
+ time.sleep(1)
if cli_sock:
cli_sock.close()
if accepted_sock:
@@ -803,7 +797,7 @@
sock.bind( (HOST, PORT) )
sock.listen(50)
retrieved_option_value = sock.getsockopt(level, option)
- msg = "Option value '(%s,%s)'='%s' did not propagate to implementation socket. Got %s" % (level, option, values[-1], retrieved_option_value)
+ msg = "TCP server option value '(%s,%s)'='%s' did not propagate to implementation socket. Got %s" % (level, option, values[-1], retrieved_option_value)
if option == socket.SO_RCVBUF:
# NOTE: see similar bsd/solaris workaround above
self.assert_(retrieved_option_value >= values[-1], msg)
@@ -849,19 +843,20 @@
self._testOption(socket.SOL_SOCKET, socket.SO_KEEPALIVE, [0, 1])
self._testInheritedOption(socket.SOL_SOCKET, socket.SO_KEEPALIVE, [0, 1])
- def testSO_LINGER(self):
- self.test_tcp_client = 1
- self.test_tcp_server = 1
- off = struct.pack('ii', 0, 0)
- on_2_seconds = struct.pack('ii', 1, 2)
- self._testOption(socket.SOL_SOCKET, socket.SO_LINGER, [off, on_2_seconds])
- self._testInheritedOption(socket.SOL_SOCKET, socket.SO_LINGER, [off, on_2_seconds])
+ # def testSO_LINGER(self):
+ # self.test_tcp_client = 1
+ # self.test_tcp_server = 1
+ # off = struct.pack('ii', 0, 0)
+ # on_2_seconds = struct.pack('ii', 1, 2)
+ # self._testOption(socket.SOL_SOCKET, socket.SO_LINGER, [off, on_2_seconds])
+ # self._testInheritedOption(socket.SOL_SOCKET, socket.SO_LINGER, [off, on_2_seconds])
- def testSO_OOBINLINE(self):
- self.test_tcp_client = 1
- self.test_tcp_server = 1
- self._testOption(socket.SOL_SOCKET, socket.SO_OOBINLINE, [0, 1])
- self._testInheritedOption(socket.SOL_SOCKET, socket.SO_OOBINLINE, [0, 1])
+ # # WILL NOT FIX
+ # def testSO_OOBINLINE(self):
+ # self.test_tcp_client = 1
+ # self.test_tcp_server = 1
+ # self._testOption(socket.SOL_SOCKET, socket.SO_OOBINLINE, [0, 1])
+ # self._testInheritedOption(socket.SOL_SOCKET, socket.SO_OOBINLINE, [0, 1])
def testSO_RCVBUF(self):
self.test_udp = 1
@@ -889,8 +884,7 @@
self.test_tcp_client = 1
self.test_tcp_server = 1
self._testOption(socket.SOL_SOCKET, socket.SO_TIMEOUT, [0, 1, 1000])
- # We don't test inheritance here because both server and client sockets have SO_TIMEOUT
- # but it doesn't inherit.
+ self._testInheritedOption(socket.SOL_SOCKET, socket.SO_TIMEOUT, [0, 1, 1000])
def testTCP_NODELAY(self):
self.test_tcp_client = 1
@@ -921,18 +915,26 @@
self.fail("getsocket(SO_ACCEPTCONN) on valid socket type should not have raised exception: %s" % (str(se)))
def testSO_ERROR(self):
- for socket_type in [socket.SOCK_STREAM, socket.SOCK_DGRAM]:
- s = socket.socket(socket.AF_INET, socket_type)
- self.failUnlessEqual(s.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR), 0)
- try:
- # Now cause an error
- s.connect(("localhost", 100000))
- self.fail("Operation '%s' that should have failed to generate SO_ERROR did not" % operation)
- except socket.error, se:
- so_error = s.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
- self.failUnlessEqual(so_error, se[0])
- # Now retrieve the option again - it should be zero
- self.failUnlessEqual(s.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR), 0)
+ good = bad = None
+
+ try:
+ good = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ good.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ good.bind((HOST, PORT))
+ good.listen(1)
+ bad = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ bad.bind((HOST, PORT))
+ bad.listen(1)
+ self.fail("Listen operation against same port did not generate an expected error")
+ except socket.error, se:
+ self.failUnlessEqual(bad.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR), se[0])
+ # try again, should now be reset
+ self.failUnlessEqual(bad.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR), 0)
+ finally:
+ if good is not None:
+ good.close()
+ if bad is not None:
+ bad.close()
def testSO_TYPE(self):
for socket_type in [socket.SOCK_STREAM, socket.SOCK_DGRAM]:
@@ -1041,7 +1043,7 @@
def testFromFd(self):
# Testing fromfd()
if not hasattr(socket, "fromfd"):
- return # On Windows, this doesn't exist
+ return # On Windows or Jython, this doesn't exist
fd = self.cli_conn.fileno()
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
msg = sock.recv(1024)
@@ -1081,6 +1083,7 @@
dup_conn = self.cli_conn.dup()
msg = dup_conn.recv(len('and ' + MSG))
self.assertEqual(msg, 'and ' + MSG)
+ dup_conn.close() # need to ensure all sockets are closed
def _testDup(self):
self.serv_conn.send(MSG)
@@ -1127,7 +1130,7 @@
def testSendtoAndRecvTimeoutMode(self):
# Need to test again in timeout mode, which follows
# a different code path
- self.serv.settimeout(10)
+ self.serv.settimeout(1)
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
@@ -1147,14 +1150,15 @@
def testSendAndRecvTimeoutMode(self):
# Need to test again in timeout mode, which follows
# a different code path
- self.serv.settimeout(10)
+ self.serv.settimeout(5)
# Testing send() and recv() over connect'ed UDP
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendAndRecvTimeoutMode(self):
self.cli.connect( (self.HOST, self.PORT) )
- self.cli.settimeout(10)
+ self.cli.settimeout(5)
+ time.sleep(1)
self.cli.send(MSG, 0)
def testRecvFrom(self):
@@ -1168,12 +1172,12 @@
def testRecvFromTimeoutMode(self):
# Need to test again in timeout mode, which follows
# a different code path
- self.serv.settimeout(10)
+ self.serv.settimeout(1)
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFromTimeoutMode(self):
- self.cli.settimeout(10)
+ self.cli.settimeout(1)
self.cli.sendto(MSG, 0, (self.HOST, self.PORT))
def testSendtoEightBitSafe(self):
@@ -1242,13 +1246,6 @@
end = time.time()
self.assert_((end - start) < 1.0, "Error setting non-blocking mode.")
- def testGetBlocking(self):
- # Testing whether set blocking works
- self.serv.setblocking(0)
- self.failUnless(not self.serv.getblocking(), "Getblocking return true instead of false")
- self.serv.setblocking(1)
- self.failUnless(self.serv.getblocking(), "Getblocking return false instead of true")
-
def testAcceptNoConnection(self):
# Testing non-blocking accept returns immediately when no connection
self.serv.setblocking(0)
@@ -1267,21 +1264,22 @@
def testAcceptConnection(self):
# Testing non-blocking accept works when connection present
self.serv.setblocking(0)
- read, write, err = select.select([self.serv], [], [])
+
+ # this can potentially race with the client, so we need to loop
+ while True:
+ read, write, err = select.select([self.serv], [], [], 0.1)
+ if read or write or err:
+ break
if self.serv in read:
conn, addr = self.serv.accept()
+ conn.close()
else:
self.fail("Error trying to do accept after select: server socket was not in 'read'able list")
def _testAcceptConnection(self):
# Make a connection to the server
self.cli.connect((self.HOST, self.PORT))
-
- #
- # AMAK: 20070311
- # Introduced a new test for non-blocking connect
- # Renamed old testConnect to testBlockingConnect
- #
+ time.sleep(1)
def testBlockingConnect(self):
# Testing blocking connect
@@ -1300,21 +1298,20 @@
# Testing non-blocking connect
self.cli.setblocking(0)
result = self.cli.connect_ex((self.HOST, self.PORT))
- rfds, wfds, xfds = select.select([], [self.cli], [])
+ while True:
+ rfds, wfds, xfds = select.select([self.cli], [self.cli], [], 0.1)
+ if rfds or wfds or xfds:
+ break
self.failUnless(self.cli in wfds)
try:
self.cli.send(MSG)
except socket.error:
self.fail("Sending on connected socket should not have raised socket.error")
- #
- # AMAK: 20070518
- # Introduced a new test for connect with bind to specific local address
- #
-
def testConnectWithLocalBind(self):
# Test blocking connect
conn, addr = self.serv.accept()
+ conn.close() # Closing the server socket does not close this client socket
def _testConnectWithLocalBind(self):
# Testing blocking connect with local bind
@@ -1342,18 +1339,28 @@
def testRecvData(self):
# Testing non-blocking recv
- conn, addr = self.serv.accept()
- conn.setblocking(0)
- rfds, wfds, xfds = select.select([conn], [], [])
- if conn in rfds:
- msg = conn.recv(len(MSG))
- self.assertEqual(msg, MSG)
- else:
- self.fail("Non-blocking socket with data should been in read list.")
+ conn, addr = self.serv.accept() # server socket is blocking
+ conn.setblocking(0) # but now the child socket is not
+
+ try:
+ # this can potentially race with the client, so we need to loop
+ while True:
+ rfds, wfds, xfds = select.select([conn], [], [], 0.1)
+ if rfds or wfds or xfds:
+ break
+
+ if conn in rfds:
+ msg = conn.recv(len(MSG))
+ self.assertEqual(msg, MSG)
+ else:
+ self.fail("Non-blocking socket with data should been in read list.")
+ finally:
+ conn.close()
def _testRecvData(self):
self.cli.connect((self.HOST, self.PORT))
self.cli.send(MSG)
+ #time.sleep(0.5)
def testRecvNoData(self):
# Testing non-blocking recv
@@ -1365,10 +1372,13 @@
pass
else:
self.fail("Non-blocking recv of no data should have raised socket.error.")
+ finally:
+ conn.close()
def _testRecvNoData(self):
self.cli.connect((self.HOST, self.PORT))
- time.sleep(0.1)
+ time.sleep(1) # Without a sleep, we may not see the connect, because the channel will be closed
+
class NonBlockingUDPTests(ThreadedUDPSocketTest): pass
@@ -1453,6 +1463,7 @@
self.failUnlessEqual(se[0], errno.EBADF)
else:
self.fail("Original socket did not close")
+
try:
self.dup_conn1.send(MSG)
except socket.error, se:
@@ -1541,7 +1552,7 @@
def testClosedAttr(self):
self.assert_(not self.serv_file.closed)
-
+
def _testClosedAttr(self):
self.assert_(not self.cli_file.closed)
@@ -1627,7 +1638,7 @@
def testConnectTimeout(self):
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
cli.settimeout(0.1)
- host = '192.168.192.168'
+ host = '192.0.2.42' # address in TEST-NET-1, guaranteed to not be routeable
try:
cli.connect((host, 5000))
except socket.timeout, st:
@@ -1643,7 +1654,7 @@
_saved_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(0.1)
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- host = '192.168.192.168'
+ host = '192.0.2.42' # address in TEST-NET-1, guaranteed to not be routeable
try:
cli.connect((host, 5000))
except socket.timeout, st:
@@ -1654,24 +1665,22 @@
self.fail('''Client socket timeout should have raised
socket.timeout. This tries to connect to %s in the assumption that it isn't
used, but if it is on your network this failure is bogus.''' % host)
- socket.setdefaulttimeout(_saved_timeout)
+ finally:
+ socket.setdefaulttimeout(_saved_timeout)
def testRecvTimeout(self):
def raise_timeout(*args, **kwargs):
cli_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- cli_sock.connect( (HOST, PORT) )
+ cli_sock.connect( (self.HOST, self.PORT) )
cli_sock.settimeout(1)
cli_sock.recv(1024)
self.failUnlessRaises(socket.timeout, raise_timeout,
"TCP socket recv failed to generate a timeout exception (TCP)")
- # Disable this test, but leave it present for documentation purposes
- # socket timeouts only work for read and accept, not for write
- # http://java.sun.com/j2se/1.4.2/docs/api/java/net/SocketTimeoutException.html
def estSendTimeout(self):
def raise_timeout(*args, **kwargs):
cli_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- cli_sock.connect( (HOST, PORT) )
+ cli_sock.connect( (self.HOST, self.PORT) )
# First fill the socket
cli_sock.settimeout(1)
sent = 0
@@ -1683,7 +1692,7 @@
def testSwitchModes(self):
cli_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- cli_sock.connect( (HOST, PORT) )
+ cli_sock.connect( (self.HOST, self.PORT) )
# set non-blocking mode
cli_sock.setblocking(0)
# then set timeout mode
@@ -1695,11 +1704,6 @@
else:
pass
-#
-# AMAK: 20070307
-# Corrected the superclass of UDPTimeoutTest
-#
-
class UDPTimeoutTest(SocketUDPTest):
def testUDPTimeout(self):
@@ -1812,15 +1816,6 @@
self.failUnlessEqual(expected_canonname, canonname, "For hostname '%s' and flags %d, canonname '%s' != '%s'" % (host_param, flags, expected_canonname, canonname) )
self.failUnlessEqual(expected_sockaddr, sockaddr[0], "For hostname '%s' and flags %d, sockaddr '%s' != '%s'" % (host_param, flags, expected_sockaddr, sockaddr[0]) )
- def testIPV4AddressesOnly(self):
- socket._use_ipv4_addresses_only(True)
- def doAddressTest(addrinfos):
- for family, socktype, proto, canonname, sockaddr in addrinfos:
- self.failIf(":" in sockaddr[0], "Incorrectly received IPv6 address '%s'" % (sockaddr[0]) )
- doAddressTest(socket.getaddrinfo("localhost", 0, socket.AF_INET6, socket.SOCK_STREAM, 0, 0))
- doAddressTest(socket.getaddrinfo("localhost", 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, 0))
- socket._use_ipv4_addresses_only(False)
-
def testAddrTupleTypes(self):
ipv4_address_tuple = socket.getaddrinfo("localhost", 80, socket.AF_INET, socket.SOCK_STREAM, 0, 0)[0][4]
self.failUnlessEqual(ipv4_address_tuple[0], "127.0.0.1")
@@ -1966,21 +1961,28 @@
for address, flags, expected in [
( ("127.0.0.1", 25), 0, "smtp" ),
( ("127.0.0.1", 25), socket.NI_NUMERICSERV, 25 ),
- ( ("127.0.0.1", 513), socket.NI_DGRAM, "who" ),
- ( ("127.0.0.1", 513), 0, "login"),
+
+ # This portion of the test does not suceed on OS X;
+ # the above entries probably suffice
+ # ( ("127.0.0.1", 513), socket.NI_DGRAM, "who" ),
+ # ( ("127.0.0.1", 513), 0, "login"),
]:
result = socket.getnameinfo(address, flags)
self.failUnlessEqual(result[1], expected)
- def testHost(self):
- for address, flags, expected in [
- ( ("www.python.org", 80), 0, "dinsdale.python.org"),
- ( ("www.python.org", 80), socket.NI_NUMERICHOST, "82.94.164.162" ),
- ( ("www.python.org", 80), socket.NI_NAMEREQD, "dinsdale.python.org"),
- ( ("82.94.164.162", 80), socket.NI_NAMEREQD, "dinsdale.python.org"),
- ]:
- result = socket.getnameinfo(address, flags)
- self.failUnlessEqual(result[0], expected)
+
+ # This test currently fails due to the recent changes (as of March 2014) at python.org:
+ # TBD perhaps there are well-known addresses that guarantee stable resolution
+
+ # def testHost(self):
+ # for address, flags, expected in [
+ # ( ("www.python.org", 80), 0, "dinsdale.python.org"),
+ # ( ("www.python.org", 80), socket.NI_NUMERICHOST, "82.94.164.162" ),
+ # ( ("www.python.org", 80), socket.NI_NAMEREQD, "dinsdale.python.org"),
+ # ( ("82.94.164.162", 80), socket.NI_NAMEREQD, "dinsdale.python.org"),
+ # ]:
+ # result = socket.getnameinfo(address, flags)
+ # self.failUnlessEqual(result[0], expected)
def testNI_NAMEREQD(self):
# This test may delay for some seconds
@@ -2069,7 +2071,7 @@
( socket.AF_INET, 0, socket.AI_PASSIVE, ("", 80), [socket.INADDR_ANY]),
( socket.AF_INET6, 0, 0, ("", 80), ["localhost"]),
( socket.AF_INET6, 0, socket.AI_PASSIVE, ("", 80), [socket.IN6ADDR_ANY_INIT, "0:0:0:0:0:0:0:0"]),
- ( socket.AF_INET, socket.SOCK_DGRAM, 0, ("<broadcast>", 80), [socket.INADDR_BROADCAST]),
+ ( socket.AF_INET, socket.SOCK_DGRAM, 0, ("<broadcast>", 80), ["broadcasthost"]),
]:
sockaddr = socket._get_jsockaddr(addr_tuple, family, sock_type, 0, flags)
self.failUnless(sockaddr.hostName in expected, "_get_jsockaddr returned wrong hostname '%s' for special hostname '%s'(family=%d)" % (sockaddr.hostName, addr_tuple[0], family))
@@ -2152,16 +2154,6 @@
else:
self.fail("Send on unconnected socket raised exception")
- def testSocketNotBound(self):
- try:
- result = self.s.recv(1024)
- except socket.error, se:
- self.failUnlessEqual(se[0], errno.ENOTCONN)
- except Exception, x:
- self.fail("Receive on unbound socket raised wrong exception: %s" % x)
- else:
- self.fail("Receive on unbound socket raised exception")
-
def testClosedSocket(self):
self.s.close()
try:
@@ -2215,6 +2207,17 @@
else:
self.fail("Binding to already bound host/port should have raised exception")
+ def testSocketNotBound(self):
+ try:
+ result = self.s.recv(1024)
+ except socket.error, se:
+ self.failUnlessEqual(se[0], errno.ENOTCONN)
+ except Exception, x:
+ self.fail("Receive on unbound socket raised wrong exception: %s" % x)
+ else:
+ self.fail("Receive on unbound socket raised exception")
+
+
class TestJythonUDPExceptions(TestJythonExceptionsShared, unittest.TestCase):
def setUp(self):
@@ -2358,7 +2361,7 @@
try:
self.s.getsockname()
except socket.error, se:
- if se[0] == errno.EINVAL:
+ if se[0] == errno.ENOTCONN:
return
self.fail("getsockname() on unconnected socket should have raised socket.error")
@@ -2451,6 +2454,9 @@
try:
self.s.getpeername()
except socket.error, se:
+ # FIXME Apparently Netty's doesn't set remoteAddress, even if connected, for datagram channels
+ # so we may have to shadow
+ print "\n\n\ngetpeername()", self.s._sock.channel
self.fail("getpeername() on connected UDP socket should not have raised socket.error")
self.failUnlessEqual(self.s.getpeername(), self._udp_peer.getsockname())
finally:
@@ -2491,12 +2497,15 @@
TestGetSockAndPeerNameTCPServer,
TestGetSockAndPeerNameUDP,
]
+
if hasattr(socket, "socketpair"):
tests.append(BasicSocketPairTest)
+
if sys.platform[:4] == 'java':
tests.append(TestJythonTCPExceptions)
tests.append(TestJythonUDPExceptions)
tests.append(TestJython_get_jsockaddr)
+
# TODO: Broadcast requires permission, and is blocked by some firewalls
# Need some way to discover the network setup on the test machine
if False:
diff --git a/Lib/test/test_socket_ssl.py b/Lib/test/test_socket_ssl.py
deleted file mode 100644
--- a/Lib/test/test_socket_ssl.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# Test just the SSL support in the socket module, in a moderately bogus way.
-
-from test import test_support
-import socket
-import time
-
-# Optionally test SSL support. This requires the 'network' resource as given
-# on the regrtest command line.
-skip_expected = not (test_support.is_resource_enabled('network') and
- hasattr(socket, "ssl"))
-
-def test_basic():
- test_support.requires('network')
-
- import urllib
-
- socket.RAND_status()
- try:
- socket.RAND_egd(1)
- except TypeError:
- pass
- else:
- print "didn't raise TypeError"
- socket.RAND_add("this is a random string", 75.0)
-
- f = urllib.urlopen('https://sf.net')
- buf = f.read()
- f.close()
-
-def test_rude_shutdown():
- # This test deadlocks, see http://bugs.jython.org/issue1049
- if test_support.is_jython:
- return
- try:
- import thread
- except ImportError:
- return
-
- # some random port to connect to
- PORT = 9934
- def listener():
- s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- s.bind(('', PORT))
- s.listen(5)
- s.accept()
- del s
- thread.exit()
-
- def connector():
- s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- s.connect(('localhost', PORT))
- try:
- ssl_sock = socket.ssl(s)
- except socket.sslerror:
- pass
- else:
- raise test_support.TestFailed, \
- 'connecting to closed SSL socket failed'
-
- thread.start_new_thread(listener, ())
- time.sleep(1)
- connector()
-
-def test_https_socket():
- s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- s.connect(('www.verisign.com', 443))
- ssl_sock = socket.ssl(s)
- ssl_sock.server()
- ssl_sock.issuer()
- s.close()
-
-def test_main():
- if not hasattr(socket, "ssl"):
- raise test_support.TestSkipped("socket module has no ssl support")
- test_rude_shutdown()
- test_basic()
- test_https_socket()
-
-if __name__ == "__main__":
- test_main()
diff --git a/Lib/test/test_socketserver.py b/Lib/test/test_socketserver.py
--- a/Lib/test/test_socketserver.py
+++ b/Lib/test/test_socketserver.py
@@ -33,8 +33,7 @@
signal.alarm(n)
select_fn = select.select
-if test.test_support.is_jython:
- select_fn = select.cpython_compatible_select
+
def receive(sock, n, timeout=20):
r, w, x = select_fn([sock], [], [], timeout)
diff --git a/Lib/test/test_urllib2_localnet.py b/Lib/test/test_urllib2_localnet.py
deleted file mode 100644
--- a/Lib/test/test_urllib2_localnet.py
+++ /dev/null
@@ -1,557 +0,0 @@
-#!/usr/bin/env python
-
-import urlparse
-import urllib2
-import BaseHTTPServer
-import unittest
-import hashlib
-
-from test import test_support
-
-if test_support.is_jython:
- import socket
- # Working around an IPV6 problem on Windows
- socket._use_ipv4_addresses_only(True)
-
-mimetools = test_support.import_module('mimetools', deprecated=True)
-threading = test_support.import_module('threading')
-
-# Loopback http server infrastructure
-
-class LoopbackHttpServer(BaseHTTPServer.HTTPServer):
- """HTTP server w/ a few modifications that make it useful for
- loopback testing purposes.
- """
-
- def __init__(self, server_address, RequestHandlerClass):
- BaseHTTPServer.HTTPServer.__init__(self,
- server_address,
- RequestHandlerClass,
- True)
-
- host, port = self.socket.getsockname()[:2]
- self.server_name = socket.getfqdn(host)
- self.server_port = port
-
- # Set the timeout of our listening socket really low so
- # that we can stop the server easily.
- self.socket.settimeout(1.0)
-
- def get_request(self):
- """BaseHTTPServer method, overridden."""
-
- request, client_address = self.socket.accept()
-
- # It's a loopback connection, so setting the timeout
- # really low shouldn't affect anything, but should make
- # deadlocks less likely to occur.
- request.settimeout(10.0)
-
- return (request, client_address)
-
-class LoopbackHttpServerThread(threading.Thread):
- """Stoppable thread that runs a loopback http server."""
-
- def __init__(self, request_handler):
- threading.Thread.__init__(self)
- self._stop = False
- self.ready = threading.Event()
- request_handler.protocol_version = "HTTP/1.0"
- self.httpd = LoopbackHttpServer(('127.0.0.1', 0),
- request_handler)
- #print "Serving HTTP on %s port %s" % (self.httpd.server_name,
- # self.httpd.server_port)
- self.port = self.httpd.server_port
-
- def stop(self):
- """Stops the webserver if it's currently running."""
-
- # Set the stop flag.
- self._stop = True
-
- self.join()
-
- def run(self):
- self.ready.set()
- while not self._stop:
- self.httpd.handle_request()
-
-# Authentication infrastructure
-
-class DigestAuthHandler:
- """Handler for performing digest authentication."""
-
- def __init__(self):
- self._request_num = 0
- self._nonces = []
- self._users = {}
- self._realm_name = "Test Realm"
- self._qop = "auth"
-
- def set_qop(self, qop):
- self._qop = qop
-
- def set_users(self, users):
- assert isinstance(users, dict)
- self._users = users
-
- def set_realm(self, realm):
- self._realm_name = realm
-
- def _generate_nonce(self):
- self._request_num += 1
- nonce = hashlib.md5(str(self._request_num)).hexdigest()
- self._nonces.append(nonce)
- return nonce
-
- def _create_auth_dict(self, auth_str):
- first_space_index = auth_str.find(" ")
- auth_str = auth_str[first_space_index+1:]
-
- parts = auth_str.split(",")
-
- auth_dict = {}
- for part in parts:
- name, value = part.split("=")
- name = name.strip()
- if value[0] == '"' and value[-1] == '"':
- value = value[1:-1]
- else:
- value = value.strip()
- auth_dict[name] = value
- return auth_dict
-
- def _validate_auth(self, auth_dict, password, method, uri):
- final_dict = {}
- final_dict.update(auth_dict)
- final_dict["password"] = password
- final_dict["method"] = method
- final_dict["uri"] = uri
- HA1_str = "%(username)s:%(realm)s:%(password)s" % final_dict
- HA1 = hashlib.md5(HA1_str).hexdigest()
- HA2_str = "%(method)s:%(uri)s" % final_dict
- HA2 = hashlib.md5(HA2_str).hexdigest()
- final_dict["HA1"] = HA1
- final_dict["HA2"] = HA2
- response_str = "%(HA1)s:%(nonce)s:%(nc)s:" \
- "%(cnonce)s:%(qop)s:%(HA2)s" % final_dict
- response = hashlib.md5(response_str).hexdigest()
-
- return response == auth_dict["response"]
-
- def _return_auth_challenge(self, request_handler):
- request_handler.send_response(407, "Proxy Authentication Required")
- request_handler.send_header("Content-Type", "text/html")
- request_handler.send_header(
- 'Proxy-Authenticate', 'Digest realm="%s", '
- 'qop="%s",'
- 'nonce="%s", ' % \
- (self._realm_name, self._qop, self._generate_nonce()))
- # XXX: Not sure if we're supposed to add this next header or
- # not.
- #request_handler.send_header('Connection', 'close')
- request_handler.end_headers()
- request_handler.wfile.write("Proxy Authentication Required.")
- return False
-
- def handle_request(self, request_handler):
- """Performs digest authentication on the given HTTP request
- handler. Returns True if authentication was successful, False
- otherwise.
-
- If no users have been set, then digest auth is effectively
- disabled and this method will always return True.
- """
-
- if len(self._users) == 0:
- return True
-
- if 'Proxy-Authorization' not in request_handler.headers:
- return self._return_auth_challenge(request_handler)
- else:
- auth_dict = self._create_auth_dict(
- request_handler.headers['Proxy-Authorization']
- )
- if auth_dict["username"] in self._users:
- password = self._users[ auth_dict["username"] ]
- else:
- return self._return_auth_challenge(request_handler)
- if not auth_dict.get("nonce") in self._nonces:
- return self._return_auth_challenge(request_handler)
- else:
- self._nonces.remove(auth_dict["nonce"])
-
- auth_validated = False
-
- # MSIE uses short_path in its validation, but Python's
- # urllib2 uses the full path, so we're going to see if
- # either of them works here.
-
- for path in [request_handler.path, request_handler.short_path]:
- if self._validate_auth(auth_dict,
- password,
- request_handler.command,
- path):
- auth_validated = True
-
- if not auth_validated:
- return self._return_auth_challenge(request_handler)
- return True
-
-# Proxy test infrastructure
-
-class FakeProxyHandler(BaseHTTPServer.BaseHTTPRequestHandler):
- """This is a 'fake proxy' that makes it look like the entire
- internet has gone down due to a sudden zombie invasion. It main
- utility is in providing us with authentication support for
- testing.
- """
-
- def __init__(self, digest_auth_handler, *args, **kwargs):
- # This has to be set before calling our parent's __init__(), which will
- # try to call do_GET().
- self.digest_auth_handler = digest_auth_handler
- BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
-
- def log_message(self, format, *args):
- # Uncomment the next line for debugging.
- #sys.stderr.write(format % args)
- pass
-
- def do_GET(self):
- (scm, netloc, path, params, query, fragment) = urlparse.urlparse(
- self.path, 'http')
- self.short_path = path
- if self.digest_auth_handler.handle_request(self):
- self.send_response(200, "OK")
- self.send_header("Content-Type", "text/html")
- self.end_headers()
- self.wfile.write("You've reached %s!<BR>" % self.path)
- self.wfile.write("Our apologies, but our server is down due to "
- "a sudden zombie invasion.")
-
-# Test cases
-
-class BaseTestCase(unittest.TestCase):
- def setUp(self):
- self._threads = test_support.threading_setup()
-
- def tearDown(self):
- test_support.threading_cleanup(*self._threads)
-
-
-class ProxyAuthTests(BaseTestCase):
- URL = "http://localhost"
-
- USER = "tester"
- PASSWD = "test123"
- REALM = "TestRealm"
-
- def setUp(self):
- super(ProxyAuthTests, self).setUp()
- self.digest_auth_handler = DigestAuthHandler()
- self.digest_auth_handler.set_users({self.USER: self.PASSWD})
- self.digest_auth_handler.set_realm(self.REALM)
- def create_fake_proxy_handler(*args, **kwargs):
- return FakeProxyHandler(self.digest_auth_handler, *args, **kwargs)
-
- self.server = LoopbackHttpServerThread(create_fake_proxy_handler)
- self.server.start()
- self.server.ready.wait()
- proxy_url = "http://127.0.0.1:%d" % self.server.port
- handler = urllib2.ProxyHandler({"http" : proxy_url})
- self.proxy_digest_handler = urllib2.ProxyDigestAuthHandler()
- self.opener = urllib2.build_opener(handler, self.proxy_digest_handler)
-
- def tearDown(self):
- self.server.stop()
- super(ProxyAuthTests, self).tearDown()
-
- def test_proxy_with_bad_password_raises_httperror(self):
- self.proxy_digest_handler.add_password(self.REALM, self.URL,
- self.USER, self.PASSWD+"bad")
- self.digest_auth_handler.set_qop("auth")
- self.assertRaises(urllib2.HTTPError,
- self.opener.open,
- self.URL)
-
- def test_proxy_with_no_password_raises_httperror(self):
- self.digest_auth_handler.set_qop("auth")
- self.assertRaises(urllib2.HTTPError,
- self.opener.open,
- self.URL)
-
- def test_proxy_qop_auth_works(self):
- self.proxy_digest_handler.add_password(self.REALM, self.URL,
- self.USER, self.PASSWD)
- self.digest_auth_handler.set_qop("auth")
- result = self.opener.open(self.URL)
- while result.read():
- pass
- result.close()
-
- def test_proxy_qop_auth_int_works_or_throws_urlerror(self):
- self.proxy_digest_handler.add_password(self.REALM, self.URL,
- self.USER, self.PASSWD)
- self.digest_auth_handler.set_qop("auth-int")
- try:
- result = self.opener.open(self.URL)
- except urllib2.URLError:
- # It's okay if we don't support auth-int, but we certainly
- # shouldn't receive any kind of exception here other than
- # a URLError.
- result = None
- if result:
- while result.read():
- pass
- result.close()
-
-
-def GetRequestHandler(responses):
-
- class FakeHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
-
- server_version = "TestHTTP/"
- requests = []
- headers_received = []
- port = 80
-
- def do_GET(self):
- body = self.send_head()
- if body:
- self.wfile.write(body)
-
- def do_POST(self):
- content_length = self.headers['Content-Length']
- post_data = self.rfile.read(int(content_length))
- self.do_GET()
- self.requests.append(post_data)
-
- def send_head(self):
- FakeHTTPRequestHandler.headers_received = self.headers
- self.requests.append(self.path)
- response_code, headers, body = responses.pop(0)
-
- self.send_response(response_code)
-
- for (header, value) in headers:
- self.send_header(header, value % self.port)
- if body:
- self.send_header('Content-type', 'text/plain')
- self.end_headers()
- return body
- self.end_headers()
-
- def log_message(self, *args):
- pass
-
-
- return FakeHTTPRequestHandler
-
-
-class TestUrlopen(BaseTestCase):
- """Tests urllib2.urlopen using the network.
-
- These tests are not exhaustive. Assuming that testing using files does a
- good job overall of some of the basic interface features. There are no
- tests exercising the optional 'data' and 'proxies' arguments. No tests
- for transparent redirection have been written.
- """
-
- def setUp(self):
- proxy_handler = urllib2.ProxyHandler({})
- opener = urllib2.build_opener(proxy_handler)
- urllib2.install_opener(opener)
- super(TestUrlopen, self).setUp()
-
- def start_server(self, responses):
- handler = GetRequestHandler(responses)
-
- self.server = LoopbackHttpServerThread(handler)
- self.server.start()
- self.server.ready.wait()
- port = self.server.port
- handler.port = port
- return handler
-
-
- def test_redirection(self):
- expected_response = 'We got here...'
- responses = [
- (302, [('Location', 'http://localhost:%s/somewhere_else')], ''),
- (200, [], expected_response)
- ]
-
- handler = self.start_server(responses)
-
- try:
- f = urllib2.urlopen('http://localhost:%s/' % handler.port)
- data = f.read()
- f.close()
-
- self.assertEqual(data, expected_response)
- self.assertEqual(handler.requests, ['/', '/somewhere_else'])
- finally:
- self.server.stop()
-
-
- def test_404(self):
- expected_response = 'Bad bad bad...'
- handler = self.start_server([(404, [], expected_response)])
-
- try:
- try:
- urllib2.urlopen('http://localhost:%s/weeble' % handler.port)
- except urllib2.URLError, f:
- pass
- else:
- self.fail('404 should raise URLError')
-
- data = f.read()
- f.close()
-
- self.assertEqual(data, expected_response)
- self.assertEqual(handler.requests, ['/weeble'])
- finally:
- self.server.stop()
-
-
- def test_200(self):
- expected_response = 'pycon 2008...'
- handler = self.start_server([(200, [], expected_response)])
-
- try:
- f = urllib2.urlopen('http://localhost:%s/bizarre' % handler.port)
- data = f.read()
- f.close()
-
- self.assertEqual(data, expected_response)
- self.assertEqual(handler.requests, ['/bizarre'])
- finally:
- self.server.stop()
-
- def test_200_with_parameters(self):
- expected_response = 'pycon 2008...'
- handler = self.start_server([(200, [], expected_response)])
-
- try:
- f = urllib2.urlopen('http://localhost:%s/bizarre' % handler.port, 'get=with_feeling')
- data = f.read()
- f.close()
-
- self.assertEqual(data, expected_response)
- self.assertEqual(handler.requests, ['/bizarre', 'get=with_feeling'])
- finally:
- self.server.stop()
-
-
- def test_sending_headers(self):
- handler = self.start_server([(200, [], "we don't care")])
-
- try:
- req = urllib2.Request("http://localhost:%s/" % handler.port,
- headers={'Range': 'bytes=20-39'})
- urllib2.urlopen(req)
- self.assertEqual(handler.headers_received['Range'], 'bytes=20-39')
- finally:
- self.server.stop()
-
- def test_basic(self):
- handler = self.start_server([(200, [], "we don't care")])
-
- try:
- open_url = urllib2.urlopen("http://localhost:%s" % handler.port)
- for attr in ("read", "close", "info", "geturl"):
- self.assertTrue(hasattr(open_url, attr), "object returned from "
- "urlopen lacks the %s attribute" % attr)
- try:
- self.assertTrue(open_url.read(), "calling 'read' failed")
- finally:
- open_url.close()
- finally:
- self.server.stop()
-
- def test_info(self):
- handler = self.start_server([(200, [], "we don't care")])
-
- try:
- open_url = urllib2.urlopen("http://localhost:%s" % handler.port)
- info_obj = open_url.info()
- self.assertIsInstance(info_obj, mimetools.Message,
- "object returned by 'info' is not an "
- "instance of mimetools.Message")
- self.assertEqual(info_obj.getsubtype(), "plain")
- finally:
- self.server.stop()
-
- def test_geturl(self):
- # Make sure same URL as opened is returned by geturl.
- handler = self.start_server([(200, [], "we don't care")])
-
- try:
- open_url = urllib2.urlopen("http://localhost:%s" % handler.port)
- url = open_url.geturl()
- self.assertEqual(url, "http://localhost:%s" % handler.port)
- finally:
- self.server.stop()
-
-
- def test_bad_address(self):
- # Make sure proper exception is raised when connecting to a bogus
- # address.
- self.assertRaises(IOError,
- # Given that both VeriSign and various ISPs have in
- # the past or are presently hijacking various invalid
- # domain name requests in an attempt to boost traffic
- # to their own sites, finding a domain name to use
- # for this test is difficult. RFC2606 leads one to
- # believe that '.invalid' should work, but experience
- # seemed to indicate otherwise. Single character
- # TLDs are likely to remain invalid, so this seems to
- # be the best choice. The trailing '.' prevents a
- # related problem: The normal DNS resolver appends
- # the domain names from the search path if there is
- # no '.' the end and, and if one of those domains
- # implements a '*' rule a result is returned.
- # However, none of this will prevent the test from
- # failing if the ISP hijacks all invalid domain
- # requests. The real solution would be to be able to
- # parameterize the framework with a mock resolver.
- urllib2.urlopen, "http://sadflkjsasf.i.nvali.d./")
-
- def test_iteration(self):
- expected_response = "pycon 2008..."
- handler = self.start_server([(200, [], expected_response)])
- try:
- data = urllib2.urlopen("http://localhost:%s" % handler.port)
- for line in data:
- self.assertEqual(line, expected_response)
- finally:
- self.server.stop()
-
- def ztest_line_iteration(self):
- lines = ["We\n", "got\n", "here\n", "verylong " * 8192 + "\n"]
- expected_response = "".join(lines)
- handler = self.start_server([(200, [], expected_response)])
- try:
- data = urllib2.urlopen("http://localhost:%s" % handler.port)
- for index, line in enumerate(data):
- self.assertEqual(line, lines[index],
- "Fetched line number %s doesn't match expected:\n"
- " Expected length was %s, got %s" %
- (index, len(lines[index]), len(line)))
- finally:
- self.server.stop()
- self.assertEqual(index + 1, len(lines))
-
-def test_main():
- # We will NOT depend on the network resource flag
- # (Lib/test/regrtest.py -u network) since all tests here are only
- # localhost. However, if this is a bad rationale, then uncomment
- # the next line.
- #test_support.requires("network")
-
- test_support.run_unittest(ProxyAuthTests, TestUrlopen)
-
-if __name__ == "__main__":
- test_main()
diff --git a/Lib/uu.py b/Lib/uu.py
deleted file mode 100755
--- a/Lib/uu.py
+++ /dev/null
@@ -1,211 +0,0 @@
-#! /usr/bin/env python
-
-# Copyright 1994 by Lance Ellinghouse
-# Cathedral City, California Republic, United States of America.
-# All Rights Reserved
-# Permission to use, copy, modify, and distribute this software and its
-# documentation for any purpose and without fee is hereby granted,
-# provided that the above copyright notice appear in all copies and that
-# both that copyright notice and this permission notice appear in
-# supporting documentation, and that the name of Lance Ellinghouse
-# not be used in advertising or publicity pertaining to distribution
-# of the software without specific, written prior permission.
-# LANCE ELLINGHOUSE DISCLAIMS ALL WARRANTIES WITH REGARD TO
-# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
-# FITNESS, IN NO EVENT SHALL LANCE ELLINGHOUSE CENTRUM BE LIABLE
-# FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
-# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-#
-# Modified by Jack Jansen, CWI, July 1995:
-# - Use binascii module to do the actual line-by-line conversion
-# between ascii and binary. This results in a 1000-fold speedup. The C
-# version is still 5 times faster, though.
-# - Arguments more compliant with python standard
-
-"""Implementation of the UUencode and UUdecode functions.
-
-encode(in_file, out_file [,name, mode])
-decode(in_file [, out_file, mode])
-"""
-
-import binascii
-import os
-import sys
-
-__all__ = ["Error", "encode", "decode"]
-
-class Error(Exception):
- pass
-
-def encode(in_file, out_file, name=None, mode=None):
- """Uuencode file"""
- #
- # If in_file is a pathname open it and change defaults
- #
-
- close_in_file = False
- close_out_file = False
-
- if in_file == '-':
- in_file = sys.stdin
- elif isinstance(in_file, basestring):
- if name is None:
- name = os.path.basename(in_file)
- if mode is None:
- try:
- mode = os.stat(in_file).st_mode
- except AttributeError:
- pass
- in_file = open(in_file, 'rb')
- close_in_file = True
- #
- # Open out_file if it is a pathname
- #
- if out_file == '-':
- out_file = sys.stdout
- elif isinstance(out_file, basestring):
- out_file = open(out_file, 'w')
- close_out_file = True
- #
- # Set defaults for name and mode
- #
- if name is None:
- name = '-'
- if mode is None:
- mode = 0666
- #
- # Write the data
- #
- out_file.write('begin %o %s\n' % ((mode&0777),name))
- data = in_file.read(45)
- while len(data) > 0:
- out_file.write(binascii.b2a_uu(data))
- data = in_file.read(45)
- out_file.write(' \nend\n')
-
- # Jython and other implementations requires files to be explicitly
- # closed if we don't want to wait for GC
- if close_in_file:
- in_file.close()
- if close_out_file:
- out_file.close()
-
-def decode(in_file, out_file=None, mode=None, quiet=0):
- """Decode uuencoded file"""
-
- close_in_file = False
- close_out_file = False
-
- #
- # Open the input file, if needed.
- #
- if in_file == '-':
- in_file = sys.stdin
- elif isinstance(in_file, basestring):
- close_in_file = True
- in_file = open(in_file)
- #
- # Read until a begin is encountered or we've exhausted the file
- #
- while True:
- hdr = in_file.readline()
- if not hdr:
- raise Error('No valid begin line found in input file')
- if not hdr.startswith('begin'):
- continue
- hdrfields = hdr.split(' ', 2)
- if len(hdrfields) == 3 and hdrfields[0] == 'begin':
- try:
- int(hdrfields[1], 8)
- break
- except ValueError:
- pass
- if out_file is None:
- out_file = hdrfields[2].rstrip()
- if os.path.exists(out_file):
- raise Error('Cannot overwrite existing file: %s' % out_file)
- if mode is None:
- mode = int(hdrfields[1], 8)
- #
- # Open the output file
- #
- opened = False
- if out_file == '-':
- out_file = sys.stdout
- elif isinstance(out_file, basestring):
- close_out_file = True
- fp = open(out_file, 'wb')
- try:
- os.path.chmod(out_file, mode)
- except AttributeError:
- pass
- out_file = fp
- opened = True
- #
- # Main decoding loop
- #
- s = in_file.readline()
- while s and s.strip() != 'end':
- try:
- data = binascii.a2b_uu(s)
- except binascii.Error, v:
- # Workaround for broken uuencoders by /Fredrik Lundh
- nbytes = (((ord(s[0])-32) & 63) * 4 + 5) // 3
- data = binascii.a2b_uu(s[:nbytes])
- if not quiet:
- sys.stderr.write("Warning: %s\n" % v)
- out_file.write(data)
- s = in_file.readline()
- if not s:
- raise Error('Truncated input file')
- if opened:
- out_file.close()
-
- # Jython and other implementations requires files to be explicitly
- # closed if we don't want to wait for GC
- if close_in_file:
- in_file.close()
- if close_out_file:
- out_file.close()
-
-def test():
- """uuencode/uudecode main program"""
-
- import optparse
- parser = optparse.OptionParser(usage='usage: %prog [-d] [-t] [input [output]]')
- parser.add_option('-d', '--decode', dest='decode', help='Decode (instead of encode)?', default=False, action='store_true')
- parser.add_option('-t', '--text', dest='text', help='data is text, encoded format unix-compatible text?', default=False, action='store_true')
-
- (options, args) = parser.parse_args()
- if len(args) > 2:
- parser.error('incorrect number of arguments')
- sys.exit(1)
-
- input = sys.stdin
- output = sys.stdout
- if len(args) > 0:
- input = args[0]
- if len(args) > 1:
- output = args[1]
-
- if options.decode:
- if options.text:
- if isinstance(output, basestring):
- output = open(output, 'w')
- else:
- print sys.argv[0], ': cannot do -t to stdout'
- sys.exit(1)
- decode(input, output)
- else:
- if options.text:
- if isinstance(input, basestring):
- input = open(input, 'r')
- else:
- print sys.argv[0], ': cannot do -t from stdin'
- sys.exit(1)
- encode(input, output)
-
-if __name__ == '__main__':
- test()
diff --git a/Lib/zlib.py b/Lib/zlib.py
--- a/Lib/zlib.py
+++ b/Lib/zlib.py
@@ -16,11 +16,11 @@
import array
import binascii
import jarray
+from cStringIO import StringIO
+from java.lang import Long, String, System
from java.util.zip import Adler32, Deflater, Inflater, DataFormatException
-from java.lang import Long, String
-from cStringIO import StringIO
class error(Exception):
pass
@@ -56,7 +56,6 @@
def crc32(string, value=0):
return binascii.crc32(string, value)
-
def compress(string, level=6):
if level < Z_BEST_SPEED or level > Z_BEST_COMPRESSION:
raise error, "Bad compression level"
@@ -77,7 +76,8 @@
finally:
inflater.end()
-class compressobj:
+
+class compressobj(object):
# all jython uses wbits for is deciding whether to skip the header if it's negative
def __init__(self, level=6, method=DEFLATED, wbits=MAX_WBITS,
memLevel=0, strategy=0):
@@ -108,14 +108,26 @@
self._ended = True
return last
-class decompressobj:
- # all jython uses wbits for is deciding whether to skip the header if it's negative
+
+class decompressobj(object):
+
def __init__(self, wbits=MAX_WBITS):
- if abs(wbits) > MAX_WBITS or abs(wbits) < 8:
+
+ # Jython only uses wbits to determine to skip the header if it's negative;
+ # but apparently there are some tests around this that we do some bogus
+ # param checking
+
+ if abs(wbits) < 8:
raise ValueError, "Invalid initialization option"
+ if abs(wbits) > 16: # NOTE apparently this also implies being negative in CPython/zlib
+ wbits = -1
+
self.inflater = Inflater(wbits < 0)
+ self._ended = False
self.unused_data = ""
- self._ended = False
+ self.unconsumed_tail = ""
+ self.gzip = wbits < 0
+ self.gzip_header_skipped = False
def decompress(self, string, max_length=0):
if self._ended:
@@ -132,7 +144,13 @@
if max_length < 0:
raise ValueError("max_length must be a positive integer")
+ # Suppress gzip header if present and wbits < 0
+ if self.gzip and not self.gzip_header_skipped:
+ string = _skip_gzip_header(string)
+ self.gzip_header_skipped = True
+
string = _to_input(string)
+
self.inflater.setInput(string)
inflated = _get_inflate_data(self.inflater, max_length)
@@ -146,6 +164,7 @@
return inflated
def flush(self, length=None):
+ # FIXME close input streams if gzip
if self._ended:
raise error("decompressobj may not be used after flush()")
if length is None:
@@ -193,3 +212,52 @@
break
s.seek(0)
return s.read()
+
+
+
+FTEXT = 1
+FHCRC = 2
+FEXTRA = 4
+FNAME = 8
+FCOMMENT = 16
+
+def _skip_gzip_header(string):
+ # per format specified in http://tools.ietf.org/html/rfc1952
+
+ # could we use bytearray instead?
+ s = array.array("B", string)
+
+ id1 = s[0]
+ id2 = s[1]
+
+ # Check gzip magic
+ if id1 != 31 or id2 != 139:
+ return string
+
+ cm = s[2]
+ flg = s[3]
+ mtime = s[4:8]
+ xfl = s[8]
+ os = s[9]
+
+ # skip fixed header, then figure out variable parts
+ s = s[10:]
+
+ if flg & FEXTRA:
+ # skip extra field
+ xlen = s[0] + s[1] * 256 # MSB ordering
+ s = s[2 + xlen:]
+ if flg & FNAME:
+ # skip filename
+ s = s[s.find("\x00")+1:]
+ if flg & FCOMMENT:
+ # skip comment
+ s = s[s.find("\x00")+1:]
+ if flg & FHCRC:
+ # skip CRC16 for the header - might be nice to check of course
+ s = s[2:]
+
+ return s.tostring()
+
+
+
diff --git a/build.xml b/build.xml
--- a/build.xml
+++ b/build.xml
@@ -573,12 +573,26 @@
<zipfileset src="extlibs/asm-commons-4.0.jar"/>
<zipfileset src="extlibs/asm-util-4.0.jar"/>
<rule pattern="org.objectweb.asm.**" result="org.python.objectweb.asm. at 1"/>
+ <zipfileset src="extlibs/bcpkix-jdk15on-150.jar" excludes="META-INF/*.SF"/>
+ <rule pattern="org.bouncycastle.**" result="org.python.bouncycastle. at 1"/>
+ <zipfileset src="extlibs/bcprov-jdk15on-150.jar" excludes="META-INF/*.SF"/>
+ <rule pattern="org.bouncycastle.**" result="org.python.bouncycastle. at 1"/>
<zipfileset src="extlibs/commons-compress-1.4.1.jar"/>
<rule pattern="org.apache.**" result="org.python.apache. at 1"/>
<zipfileset src="extlibs/guava-13.0.1.jar"/>
<rule pattern="com.google.**" result="org.python.google. at 1"/>
<zipfileset src="extlibs/icu4j-52_1.jar"/>
<rule pattern="com.ibm.icu.**" result="org.python.icu. at 1"/>
+ <zipfileset src="extlibs/netty-buffer-4.0.18.Final.jar"/>
+ <rule pattern="io.netty.**" result="org.python.netty. at 1"/>
+ <zipfileset src="extlibs/netty-codec-4.0.18.Final.jar"/>
+ <rule pattern="io.netty.**" result="org.python.netty. at 1"/>
+ <zipfileset src="extlibs/netty-common-4.0.18.Final.jar"/>
+ <rule pattern="io.netty.**" result="org.python.netty. at 1"/>
+ <zipfileset src="extlibs/netty-handler-4.0.18.Final.jar"/>
+ <rule pattern="io.netty.**" result="org.python.netty. at 1"/>
+ <zipfileset src="extlibs/netty-transport-4.0.18.Final.jar"/>
+ <rule pattern="io.netty.**" result="org.python.netty. at 1"/>
<zipfileset src="extlibs/jffi-arm-Linux.jar"/>
<zipfileset src="extlibs/jffi-Darwin.jar"/>
<zipfileset src="extlibs/jffi-i386-FreeBSD.jar"/>
diff --git a/extlibs/bcpkix-jdk15on-150.jar b/extlibs/bcpkix-jdk15on-150.jar
new file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..5dc125fc4945a73e628fec34001503899e9c0b10
GIT binary patch
[stripped]
diff --git a/extlibs/bcprov-jdk15on-150.jar b/extlibs/bcprov-jdk15on-150.jar
new file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..d4b510d7894afa3775fdfb7ab2b3a704baf82c99
GIT binary patch
[stripped]
diff --git a/extlibs/netty-buffer-4.0.18.Final.jar b/extlibs/netty-buffer-4.0.18.Final.jar
new file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..4ef63f516ba9c0dbb951c85a7b1382f9d3c1f23f
GIT binary patch
[stripped]
diff --git a/extlibs/netty-codec-4.0.18.Final.jar b/extlibs/netty-codec-4.0.18.Final.jar
new file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..248d26950900a16fa5612b06aaaf0a58d6b000fc
GIT binary patch
[stripped]
diff --git a/extlibs/netty-common-4.0.18.Final.jar b/extlibs/netty-common-4.0.18.Final.jar
new file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..ee7491c13be9228430aecbe358297f2aada76db1
GIT binary patch
[stripped]
diff --git a/extlibs/netty-handler-4.0.18.Final.jar b/extlibs/netty-handler-4.0.18.Final.jar
new file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..22eb114f569f9c6f5ca05cf4a935c9913f5c29d8
GIT binary patch
[stripped]
diff --git a/extlibs/netty-transport-4.0.18.Final.jar b/extlibs/netty-transport-4.0.18.Final.jar
new file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..133c0fa65c71555ed90c960bf95233167aacc4c1
GIT binary patch
[stripped]
--
Repository URL: http://hg.python.org/jython
More information about the Jython-checkins
mailing list