[Jython-checkins] jython: codecs.lookup will use Java encodings (via java.nio.charset.Charset)

jim.baker jython-checkins at python.org
Sat Jun 14 02:34:43 CEST 2014


http://hg.python.org/jython/rev/6c718e5e9ae9
changeset:   7290:6c718e5e9ae9
user:        Jim Baker <jim.baker at rackspace.com>
date:        Fri Jun 13 18:35:06 2014 -0600
summary:
  codecs.lookup will use Java encodings (via java.nio.charset.Charset)
if the Python equivalent is not available/does not load.

Fixes http://bugs.jython.org/issue1066 support for multibytecodecs,
including CJK codecs like big5, shift_jis, and johab. Note some codecs
like hz and euc_jisx0213 remain unsupported due to their lack of
availability in Java.

files:
  Lib/encodings/__init__.py               |  167 +++++
  Lib/encodings/_java.py                  |  248 +++++++
  Lib/test/test_codecs.py                 |   57 +-
  Lib/test/test_multibytecodec_support.py |  375 ++++++++++++
  4 files changed, 820 insertions(+), 27 deletions(-)


diff --git a/Lib/encodings/__init__.py b/Lib/encodings/__init__.py
new file mode 100644
--- /dev/null
+++ b/Lib/encodings/__init__.py
@@ -0,0 +1,167 @@
+""" Standard "encodings" Package
+
+    Standard Python encoding modules are stored in this package
+    directory.
+
+    Codec modules must have names corresponding to normalized encoding
+    names as defined in the normalize_encoding() function below, e.g.
+    'utf-8' must be implemented by the module 'utf_8.py'.
+
+    Each codec module must export the following interface:
+
+    * getregentry() -> codecs.CodecInfo object
+    The getregentry() API must a CodecInfo object with encoder, decoder,
+    incrementalencoder, incrementaldecoder, streamwriter and streamreader
+    atttributes which adhere to the Python Codec Interface Standard.
+
+    In addition, a module may optionally also define the following
+    APIs which are then used by the package's codec search function:
+
+    * getaliases() -> sequence of encoding name strings to use as aliases
+
+    Alias names returned by getaliases() must be normalized encoding
+    names as defined by normalize_encoding().
+
+Written by Marc-Andre Lemburg (mal at lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+
+"""#"
+
+import codecs
+from encodings import aliases, _java
+import __builtin__
+
+_cache = {}
+_unknown = '--unknown--'
+_import_tail = ['*']
+_norm_encoding_map = ('                                              . '
+                      '0123456789       ABCDEFGHIJKLMNOPQRSTUVWXYZ     '
+                      ' abcdefghijklmnopqrstuvwxyz                     '
+                      '                                                '
+                      '                                                '
+                      '                ')
+_aliases = aliases.aliases
+
+class CodecRegistryError(LookupError, SystemError):
+    pass
+
+def normalize_encoding(encoding):
+
+    """ Normalize an encoding name.
+
+        Normalization works as follows: all non-alphanumeric
+        characters except the dot used for Python package names are
+        collapsed and replaced with a single underscore, e.g. '  -;#'
+        becomes '_'. Leading and trailing underscores are removed.
+
+        Note that encoding names should be ASCII only; if they do use
+        non-ASCII characters, these must be Latin-1 compatible.
+
+    """
+    # Make sure we have an 8-bit string, because .translate() works
+    # differently for Unicode strings.
+    if hasattr(__builtin__, "unicode") and isinstance(encoding, unicode):
+        # Note that .encode('latin-1') does *not* use the codec
+        # registry, so this call doesn't recurse. (See unicodeobject.c
+        # PyUnicode_AsEncodedString() for details)
+        encoding = encoding.encode('latin-1')
+    return '_'.join(encoding.translate(_norm_encoding_map).split())
+
+def search_function(encoding):
+
+    # Cache lookup
+    entry = _cache.get(encoding, _unknown)
+    if entry is not _unknown:
+        return entry
+
+    # Import the module:
+    #
+    # First try to find an alias for the normalized encoding
+    # name and lookup the module using the aliased name, then try to
+    # lookup the module using the standard import scheme, i.e. first
+    # try in the encodings package, then at top-level.
+    #
+    norm_encoding = normalize_encoding(encoding)
+    aliased_encoding = _aliases.get(norm_encoding) or \
+                       _aliases.get(norm_encoding.replace('.', '_'))
+    if aliased_encoding is not None:
+        modnames = [aliased_encoding,
+                    norm_encoding]
+    else:
+        modnames = [norm_encoding]
+    for modname in modnames:
+        if not modname or '.' in modname:
+            continue
+        try:
+            # Import is absolute to prevent the possibly malicious import of a
+            # module with side-effects that is not in the 'encodings' package.
+            mod = __import__('encodings.' + modname, fromlist=_import_tail,
+                             level=0)
+        except ImportError:
+            pass
+        else:
+            break
+    else:
+        mod = None
+
+    try:
+        getregentry = mod.getregentry
+    except AttributeError:
+        # Not a codec module
+        mod = None
+
+    if mod is None:
+        # First, see if we can load the encoding using java.nio.Charset;
+        # FIXME this could include encodings not known to Python, so we should test that out as well
+        entry, codecaliases = _java._java_factory(encoding)
+        if entry is not None:
+            _cache[encoding] = entry
+            for alias in codecaliases:
+                if alias not in _aliases:
+                    _aliases[alias] = modname
+            return entry
+
+        # Cache misses
+        _cache[encoding] = None
+        return None
+
+    # Now ask the module for the registry entry
+    entry = getregentry()
+    if not isinstance(entry, codecs.CodecInfo):
+        if not 4 <= len(entry) <= 7:
+            raise CodecRegistryError,\
+                 'module "%s" (%s) failed to register' % \
+                  (mod.__name__, mod.__file__)
+        if not hasattr(entry[0], '__call__') or \
+           not hasattr(entry[1], '__call__') or \
+           (entry[2] is not None and not hasattr(entry[2], '__call__')) or \
+           (entry[3] is not None and not hasattr(entry[3], '__call__')) or \
+           (len(entry) > 4 and entry[4] is not None and not hasattr(entry[4], '__call__')) or \
+           (len(entry) > 5 and entry[5] is not None and not hasattr(entry[5], '__call__')):
+            raise CodecRegistryError,\
+                'incompatible codecs in module "%s" (%s)' % \
+                (mod.__name__, mod.__file__)
+        if len(entry)<7 or entry[6] is None:
+            entry += (None,)*(6-len(entry)) + (mod.__name__.split(".", 1)[1],)
+        entry = codecs.CodecInfo(*entry)
+
+    # Cache the codec registry entry
+    _cache[encoding] = entry
+
+    # Register its aliases (without overwriting previously registered
+    # aliases)
+    try:
+        codecaliases = mod.getaliases()
+    except AttributeError:
+        pass
+    else:
+        for alias in codecaliases:
+            if alias not in _aliases:
+                _aliases[alias] = modname
+
+    # Return the registry entry
+    return entry
+
+# Register the search_function in the Python codec registry
+codecs.register(search_function)
diff --git a/Lib/encodings/_java.py b/Lib/encodings/_java.py
new file mode 100644
--- /dev/null
+++ b/Lib/encodings/_java.py
@@ -0,0 +1,248 @@
+# implements a factory to create codec instances for a given java charset
+
+import codecs
+
+from array import array
+from functools import partial
+from java.lang import StringBuilder
+from java.nio import ByteBuffer, CharBuffer
+from java.nio.charset import Charset, IllegalCharsetNameException
+from StringIO import StringIO
+
+
+python_to_java = {
+    'cp932': 'cp942',
+    'iso2022_jp': 'ISO-2022-JP',
+    'iso2022_jp_2': 'ISO-2022-JP-2',
+    'iso2022_kr': 'ISO-2022-KR',
+    'shift_jisx0213': 'x-SJIS_0213',
+}
+
+
+
+def _java_factory(encoding):
+    encoding = python_to_java.get(encoding, encoding)
+
+    supported = False
+    try:
+        supported = Charset.isSupported(encoding)
+    except IllegalCharsetNameException:
+        pass
+    if not supported:
+        return None, set()
+
+    charset = Charset.forName(encoding)  # FIXME should we return this canonical name? could be best... TBD
+    entry = codecs.CodecInfo(
+        name=encoding,
+        encode=Codec(encoding).encode,
+        decode=Codec(encoding).decode,
+        incrementalencoder=partial(IncrementalEncoder, encoding=encoding),
+        incrementaldecoder=partial(IncrementalDecoder, encoding=encoding),
+        streamreader=partial(StreamReader, encoding=encoding),
+        streamwriter=partial(StreamWriter, encoding=encoding)
+    )
+    return entry, charset.aliases()
+
+
+class Codec(object):  # (codecs.Codec):
+
+    def __init__(self, encoding):
+        self.encoding = encoding
+
+    def decode(self, input, errors='strict', final=True):
+        error_function = codecs.lookup_error(errors)
+        input_buffer = ByteBuffer.wrap(array('b', input))
+        decoder = Charset.forName(self.encoding).newDecoder()
+        output_buffer = CharBuffer.allocate(min(max(int(len(input) / 2), 256), 1024))
+        builder = StringBuilder(int(decoder.averageCharsPerByte() * len(input)))
+
+        while True:
+            result = decoder.decode(input_buffer, output_buffer, False)
+            pos = output_buffer.position()
+            output_buffer.rewind()
+            builder.append(output_buffer.subSequence(0, pos))
+            if result.isUnderflow():
+                if final:
+                    _process_incomplete_decode(self.encoding, input, error_function, input_buffer, builder)
+                break
+            _process_decode_errors(self.encoding, input, result, error_function, input_buffer, builder)
+
+        return builder.toString(), input_buffer.position()
+
+    def encode(self, input, errors='strict'):
+        error_function = codecs.lookup_error(errors)
+        # workaround non-BMP issues - need to get the exact count of chars, not codepoints
+        input_buffer = CharBuffer.allocate(StringBuilder(input).length())
+        input_buffer.put(input)
+        input_buffer.rewind()
+        encoder = Charset.forName(self.encoding).newEncoder()
+        output_buffer = ByteBuffer.allocate(min(max(len(input) * 2, 256), 1024))
+        builder = StringIO()
+
+        while True:
+            result = encoder.encode(input_buffer, output_buffer, True)
+            pos = output_buffer.position()
+            output_buffer.rewind()
+            builder.write(output_buffer.array()[0:pos].tostring())
+            if result.isUnderflow():
+                break
+            _process_encode_errors(self.encoding, input, result, error_function, input_buffer, builder)
+
+        return builder.getvalue(), len(input)
+
+
+class NonfinalCodec(Codec):
+
+    def decode(self, input, errors='strict'):
+        return Codec.decode(self, input, errors, final=False)
+
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+
+    def __init__(self, errors='strict', encoding=None):
+        assert encoding
+        self.encoding = encoding
+        self.errors = errors
+        self.encoder = Charset.forName(self.encoding).newEncoder()
+        self.output_buffer = ByteBuffer.allocate(1024)
+
+    def encode(self, input, final=False):
+        error_function = codecs.lookup_error(self.errors)
+        # workaround non-BMP issues - need to get the exact count of chars, not codepoints
+        input_buffer = CharBuffer.allocate(StringBuilder(input).length())
+        input_buffer.put(input)
+        input_buffer.rewind()
+        self.output_buffer.rewind()
+        builder = StringIO()
+
+        while True:
+            result = self.encoder.encode(input_buffer, self.output_buffer, final)
+            pos = self.output_buffer.position()
+            self.output_buffer.rewind()
+            builder.write(self.output_buffer.array()[0:pos].tostring())
+            if result.isUnderflow():
+                break
+            _process_encode_errors(self.encoding, input, result, error_function, input_buffer, builder)
+
+        return builder.getvalue()
+
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+
+    def __init__(self, errors='strict', encoding=None,):
+        assert encoding
+        self.encoding = encoding
+        self.errors = errors
+        self.decoder = Charset.forName(self.encoding).newDecoder()
+        self.output_buffer = CharBuffer.allocate(1024)
+        self.buffer = ''
+
+    def decode(self, input, final=False):
+        error_function = codecs.lookup_error(self.errors)
+        input_array = array('b', self.buffer + str(input))
+        input_buffer = ByteBuffer.wrap(input_array)
+        builder = StringBuilder(int(self.decoder.averageCharsPerByte() * len(input)))
+        self.output_buffer.rewind()
+
+        while True:
+            result = self.decoder.decode(input_buffer, self.output_buffer, final)
+            pos = self.output_buffer.position()
+            self.output_buffer.rewind()
+            builder.append(self.output_buffer.subSequence(0, pos))
+            if result.isUnderflow():
+                if not final:
+                    # Keep around any remaining input for next call to decode
+                    self.buffer = input_array[input_buffer.position():input_buffer.limit()].tostring()
+                else:
+                    _process_incomplete_decode(self.encoding, input, error_function, input_buffer, builder)
+                break
+            _process_decode_errors(self.encoding, input, result, error_function, input_buffer, builder)
+
+        return builder.toString()
+
+    def reset(self):
+        self.buffer = ""
+
+    def getstate(self):
+        return self.buffer or 0
+
+    def setstate(self, state):
+        self.buffer = state or ""
+
+
+class StreamWriter(NonfinalCodec, codecs.StreamWriter):
+
+    def __init__(self, stream, errors='strict', encoding=None, ):
+        NonfinalCodec.__init__(self, encoding)
+        codecs.StreamWriter.__init__(self, stream, errors)
+
+
+class StreamReader(NonfinalCodec, codecs.StreamReader):
+
+    def __init__(self, stream, errors='strict', encoding=None, ):
+        NonfinalCodec.__init__(self, encoding)
+        codecs.StreamReader.__init__(self, stream, errors)
+
+
+def _process_decode_errors(encoding, input, result, error_function, input_buffer, builder):
+    if result.isError():
+        e = UnicodeDecodeError(
+            encoding,
+            input, 
+            input_buffer.position(),
+            input_buffer.position() + result.length(),
+            'illegal multibyte sequence')
+        replacement, pos = error_function(e)
+        if not isinstance(replacement, unicode):
+            raise TypeError()
+        pos = int(pos)
+        if pos < 0:
+            pos = input_buffer.limit() + pos
+        if pos > input_buffer.limit():
+            raise IndexError()
+        builder.append(replacement)
+        input_buffer.position(pos)
+
+
+def _process_incomplete_decode(encoding, input, error_function, input_buffer, builder):
+    if input_buffer.position() < input_buffer.limit():
+        e = UnicodeDecodeError(
+            encoding,
+            input, 
+            input_buffer.position(),
+            input_buffer.limit(),
+            'illegal multibyte sequence')
+        replacement, pos = error_function(e)
+        if not isinstance(replacement, unicode):
+            raise TypeError()
+        pos = int(pos)
+        if pos < 0:
+            pos = input_buffer.limit() + pos
+        if pos > input_buffer.limit():
+            raise IndexError()
+        builder.append(replacement)
+        input_buffer.position(pos)
+
+
+def _get_unicode(input_buffer, result):
+    return input_buffer.subSequence(0, result.length()).toString()
+
+
+def _process_encode_errors(encoding, input, result, error_function, input_buffer, builder):
+    if result.isError():
+        e = UnicodeEncodeError(
+            encoding,
+            input, 
+            input_buffer.position(),
+            input_buffer.position() + result.length(),
+            'illegal multibyte sequence')
+        replacement, pos = error_function(e)
+        if not isinstance(replacement, unicode):
+            raise TypeError()
+        pos = int(pos)
+        if pos < 0:
+            pos = input_buffer.limit() + pos
+        if pos > input_buffer.limit():
+            raise IndexError()
+        builder.write(str(replacement))
+        input_buffer.position(pos)
diff --git a/Lib/test/test_codecs.py b/Lib/test/test_codecs.py
--- a/Lib/test/test_codecs.py
+++ b/Lib/test/test_codecs.py
@@ -1258,8 +1258,8 @@
 all_unicode_encodings = [
     "ascii",
     "base64_codec",
-# FIXME: Jython issue 1066:    "big5",
-# FIXME: Jython issue 1066:    "big5hkscs",
+    "big5",
+    "big5hkscs",
     "charmap",
     "cp037",
     "cp1006",
@@ -1296,27 +1296,27 @@
     "cp869",
     "cp874",
     "cp875",
-# FIXME: Jython issue 1066:    "cp932",
-# FIXME: Jython issue 1066:    "cp949",
-# FIXME: Jython issue 1066:    "cp950",
-# FIXME: Jython issue 1066:    "euc_jis_2004",
-# FIXME: Jython issue 1066:    'euc_jisx0213',
-# FIXME: Jython issue 1066:    'euc_jp',
-# FIXME: Jython issue 1066:    'euc_kr',
-# FIXME: Jython issue 1066:    'gb18030',
-# FIXME: Jython issue 1066:    'gb2312',
-# FIXME: Jython issue 1066:    'gbk',
+    "cp932",
+    "cp949",
+    "cp950",
+    # "euc_jis_2004",  # Not available on Java
+    # 'euc_jisx0213',  # Not available on Java
+    'euc_jp',
+    'euc_kr',
+    'gb18030',
+    'gb2312',
+    'gbk',
     "hex_codec",
     "hp_roman8",
-# FIXME: Jython issue 1066:    'hz',
-# FIXME: Jython issue 1066:    "idna",
-# FIXME: Jython issue 1066:    'iso2022_jp',
-# FIXME: Jython issue 1066:    'iso2022_jp_1',
-# FIXME: Jython issue 1066:    'iso2022_jp_2',
-# FIXME: Jython issue 1066:    'iso2022_jp_2004',
-# FIXME: Jython issue 1066:    'iso2022_jp_3',
-# FIXME: Jython issue 1066:    'iso2022_jp_ext',
-# FIXME: Jython issue 1066:    'iso2022_kr',
+    # 'hz', # Not available on Java
+    "idna",
+    'iso2022_jp',
+    # 'iso2022_jp_1', # Not available on Java
+    'iso2022_jp_2',
+    # 'iso2022_jp_2004', # Not available on Java
+    # 'iso2022_jp_3', # Not available on Java
+    # 'iso2022_jp_ext', # Not available on Java
+    'iso2022_kr',
     "iso8859_1",
     "iso8859_10",
     "iso8859_11",
@@ -1332,7 +1332,7 @@
     "iso8859_7",
     "iso8859_8",
     "iso8859_9",
-# FIXME: Jython issue 1066:    'johab',
+    'johab',
     "koi8_r",
     "koi8_u",
     "latin_1",
@@ -1347,9 +1347,9 @@
     "punycode",
     "raw_unicode_escape",
     "rot_13",
-# FIXME: Jython issue 1066:    'shift_jis',
-# FIXME: Jython issue 1066:    'shift_jis_2004',
-# FIXME: Jython issue 1066:    'shift_jisx0213',
+    "shift_jis",
+    #'shift_jis_2004', # Not available on Java
+    'shift_jisx0213',
     "tis_620",
     "unicode_escape",
     "unicode_internal",
@@ -1499,11 +1499,14 @@
                         self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding))
 
     def test_seek(self):
-        # all codecs should be able to encode these
-        s = u"%s\n%s\n" % (100*u"abc123", 100*u"def456")
+        # all codecs - except idna on Java - should be able to encode these
+        s1 = u"%s\n%s\n" % (100*u"abc123", 100*u"def456")
         for encoding in all_unicode_encodings:
+            s = s1
             if encoding in broken_unicode_with_streams:
                 continue
+            if encoding == "idna":
+                s =  u"%s\n%s\n" % (5*u"abc123", 5*u"def456") # idna encoder rejects as being too long
             reader = codecs.getreader(encoding)(StringIO.StringIO(s.encode(encoding)))
             for t in xrange(5):
                 # Test that calling seek resets the internal codec state and buffers
diff --git a/Lib/test/test_multibytecodec_support.py b/Lib/test/test_multibytecodec_support.py
new file mode 100644
--- /dev/null
+++ b/Lib/test/test_multibytecodec_support.py
@@ -0,0 +1,375 @@
+#!/usr/bin/env python
+#
+# test_multibytecodec_support.py
+#   Common Unittest Routines for CJK codecs
+#
+
+import codecs
+import os
+import re
+import sys
+import unittest
+from httplib import HTTPException
+from test import test_support
+from StringIO import StringIO
+
+class TestBase:
+    encoding        = ''   # codec name
+    codec           = None # codec tuple (with 4 elements)
+    tstring         = ''   # string to test StreamReader
+
+    codectests      = None # must set. codec test tuple
+    roundtriptest   = 1    # set if roundtrip is possible with unicode
+    has_iso10646    = 0    # set if this encoding contains whole iso10646 map
+    xmlcharnametest = None # string to test xmlcharrefreplace
+    unmappedunicode = u'\ufffe' # a unicode codepoint that is not mapped
+                                # CPython uses an isolated surrogate, which will not work on Jython
+
+    def setUp(self):
+        if self.codec is None:
+            self.codec = codecs.lookup(self.encoding)
+        self.encode = self.codec.encode
+        self.decode = self.codec.decode
+        self.reader = self.codec.streamreader
+        self.writer = self.codec.streamwriter
+        self.incrementalencoder = self.codec.incrementalencoder
+        self.incrementaldecoder = self.codec.incrementaldecoder
+
+    def test_chunkcoding(self):
+        for native, utf8 in zip(*[StringIO(f).readlines()
+                                  for f in self.tstring]):
+            u = self.decode(native)[0]
+            self.assertEqual(u, utf8.decode('utf-8'))
+            if self.roundtriptest:
+                self.assertEqual(native, self.encode(u)[0])
+
+    def test_errorhandle(self):
+        for source, scheme, expected in self.codectests:
+            if isinstance(source, bytes):
+                func = self.decode
+            else:
+                func = self.encode
+            if expected:
+                result = func(source, scheme)[0]
+                if func is self.decode:
+                    self.assertTrue(type(result) is unicode, type(result))
+                    self.assertEqual(result, expected,
+                                     '%r.decode(%r, %r)=%r != %r'
+                                     % (source, self.encoding, scheme, result,
+                                        expected))
+                else:
+                    self.assertTrue(type(result) is bytes, type(result))
+                    self.assertEqual(result, expected,
+                                     '%r.encode(%r, %r)=%r != %r'
+                                     % (source, self.encoding, scheme, result,
+                                        expected))
+            else:
+                self.assertRaises(UnicodeError, func, source, scheme)
+
+    def test_xmlcharrefreplace(self):
+        if self.has_iso10646:
+            return
+
+        s = u"\u0b13\u0b23\u0b60 nd eggs"
+        self.assertEqual(
+            self.encode(s, "xmlcharrefreplace")[0],
+            "ଓଣୠ nd eggs"
+        )
+
+    def test_customreplace_encode(self):
+        if self.has_iso10646:
+            return
+
+        from htmlentitydefs import codepoint2name
+
+        def xmlcharnamereplace(exc):
+            if not isinstance(exc, UnicodeEncodeError):
+                raise TypeError("don't know how to handle %r" % exc)
+            l = []
+            for c in exc.object[exc.start:exc.end]:
+                if ord(c) in codepoint2name:
+                    l.append(u"&%s;" % codepoint2name[ord(c)])
+                else:
+                    l.append(u"&#%d;" % ord(c))
+            return (u"".join(l), exc.end)
+
+        codecs.register_error("test.xmlcharnamereplace", xmlcharnamereplace)
+
+        if self.xmlcharnametest:
+            sin, sout = self.xmlcharnametest
+        else:
+            sin = u"\xab\u211c\xbb = \u2329\u1234\u232a"
+            sout = "«ℜ» = ⟨ሴ⟩"
+        self.assertEqual(self.encode(sin,
+                                    "test.xmlcharnamereplace")[0], sout)
+
+    def test_callback_wrong_objects(self):
+        def myreplace(exc):
+            return (ret, exc.end)
+        codecs.register_error("test.cjktest", myreplace)
+
+        for ret in ([1, 2, 3], [], None, object(), 'string', ''):
+            self.assertRaises(TypeError, self.encode, self.unmappedunicode,
+                              'test.cjktest')
+
+    def test_callback_long_index(self):
+        def myreplace(exc):
+            return (u'x', long(exc.end))
+        codecs.register_error("test.cjktest", myreplace)
+        self.assertEqual(self.encode(u'abcd' + self.unmappedunicode + u'efgh',
+                                     'test.cjktest'), ('abcdxefgh', 9))
+
+        def myreplace(exc):
+            return (u'x', sys.maxint + 1)
+        codecs.register_error("test.cjktest", myreplace)
+        self.assertRaises(IndexError, self.encode, self.unmappedunicode,
+                          'test.cjktest')
+
+    def test_callback_None_index(self):
+        def myreplace(exc):
+            return (u'x', None)
+        codecs.register_error("test.cjktest", myreplace)
+        self.assertRaises(TypeError, self.encode, self.unmappedunicode,
+                          'test.cjktest')
+
+    def test_callback_backward_index(self):
+        def myreplace(exc):
+            if myreplace.limit > 0:
+                myreplace.limit -= 1
+                return (u'REPLACED', 0)
+            else:
+                return (u'TERMINAL', exc.end)
+        myreplace.limit = 3
+        codecs.register_error("test.cjktest", myreplace)
+        self.assertEqual(self.encode(u'abcd' + self.unmappedunicode + u'efgh',
+                                     'test.cjktest'),
+                ('abcdREPLACEDabcdREPLACEDabcdREPLACEDabcdTERMINALefgh', 9))
+
+    def test_callback_forward_index(self):
+        def myreplace(exc):
+            return (u'REPLACED', exc.end + 2)
+        codecs.register_error("test.cjktest", myreplace)
+        self.assertEqual(self.encode(u'abcd' + self.unmappedunicode + u'efgh',
+                                     'test.cjktest'), ('abcdREPLACEDgh', 9))
+
+    def test_callback_index_outofbound(self):
+        def myreplace(exc):
+            return (u'TERM', 100)
+        codecs.register_error("test.cjktest", myreplace)
+        self.assertRaises(IndexError, self.encode, self.unmappedunicode,
+                          'test.cjktest')
+
+    def test_incrementalencoder(self):
+        UTF8Reader = codecs.getreader('utf-8')
+        for sizehint in [None] + range(1, 33) + \
+                        [64, 128, 256, 512, 1024]:
+            istream = UTF8Reader(StringIO(self.tstring[1]))
+            ostream = StringIO()
+            encoder = self.incrementalencoder()
+            while 1:
+                if sizehint is not None:
+                    data = istream.read(sizehint)
+                else:
+                    data = istream.read()
+
+                if not data:
+                    break
+                e = encoder.encode(data)
+                ostream.write(e)
+
+            self.assertEqual(ostream.getvalue(), self.tstring[0])
+
+    def test_incrementaldecoder(self):
+        UTF8Writer = codecs.getwriter('utf-8')
+        for sizehint in [None, -1] + range(1, 33) + \
+                        [64, 128, 256, 512, 1024]:
+            istream = StringIO(self.tstring[0])
+            ostream = UTF8Writer(StringIO())
+            decoder = self.incrementaldecoder()
+            while 1:
+                data = istream.read(sizehint)
+                if not data:
+                    break
+                else:
+                    u = decoder.decode(data)
+                    ostream.write(u)
+
+            self.assertEqual(ostream.getvalue(), self.tstring[1])
+
+    def test_incrementalencoder_error_callback(self):
+        inv = self.unmappedunicode
+
+        e = self.incrementalencoder()
+        self.assertRaises(UnicodeEncodeError, e.encode, inv, True)
+
+        e.errors = 'ignore'
+        self.assertEqual(e.encode(inv, True), '')
+
+        e.reset()
+        def tempreplace(exc):
+            return (u'called', exc.end)
+        codecs.register_error('test.incremental_error_callback', tempreplace)
+        e.errors = 'test.incremental_error_callback'
+        self.assertEqual(e.encode(inv, True), 'called')
+
+        # again
+        e.errors = 'ignore'
+        self.assertEqual(e.encode(inv, True), '')
+
+    def test_streamreader(self):
+        UTF8Writer = codecs.getwriter('utf-8')
+        for name in ["read", "readline", "readlines"]:
+            for sizehint in [None, -1] + range(1, 33) + \
+                            [64, 128, 256, 512, 1024]:
+                istream = self.reader(StringIO(self.tstring[0]))
+                ostream = UTF8Writer(StringIO())
+                func = getattr(istream, name)
+                while 1:
+                    data = func(sizehint)
+                    if not data:
+                        break
+                    if name == "readlines":
+                        ostream.writelines(data)
+                    else:
+                        ostream.write(data)
+
+                self.assertEqual(ostream.getvalue(), self.tstring[1])
+
+    def test_streamwriter(self):
+        readfuncs = ('read', 'readline', 'readlines')
+        UTF8Reader = codecs.getreader('utf-8')
+        for name in readfuncs:
+            for sizehint in [None] + range(1, 33) + \
+                            [64, 128, 256, 512, 1024]:
+                istream = UTF8Reader(StringIO(self.tstring[1]))
+                ostream = self.writer(StringIO())
+                func = getattr(istream, name)
+                while 1:
+                    if sizehint is not None:
+                        data = func(sizehint)
+                    else:
+                        data = func()
+
+                    if not data:
+                        break
+                    if name == "readlines":
+                        ostream.writelines(data)
+                    else:
+                        ostream.write(data)
+
+                self.assertEqual(ostream.getvalue(), self.tstring[0])
+
+class TestBase_Mapping(unittest.TestCase):
+    pass_enctest = []
+    pass_dectest = []
+    supmaps = []
+    codectests = []
+
+    def __init__(self, *args, **kw):
+        unittest.TestCase.__init__(self, *args, **kw)
+        try:
+            self.open_mapping_file().close() # test it to report the error early
+        except (IOError, HTTPException):
+            self.skipTest("Could not retrieve "+self.mapfileurl)
+
+    def open_mapping_file(self):
+        return test_support.open_urlresource(self.mapfileurl)
+
+    def test_mapping_file(self):
+        if self.mapfileurl.endswith('.xml'):
+            self._test_mapping_file_ucm()
+        else:
+            self._test_mapping_file_plain()
+
+    def _test_mapping_file_plain(self):
+        _unichr = lambda c: eval("u'\\U%08x'" % int(c, 16))
+        unichrs = lambda s: u''.join(_unichr(c) for c in s.split('+'))
+        urt_wa = {}
+
+        with self.open_mapping_file() as f:
+            for line in f:
+                if not line:
+                    break
+                data = line.split('#')[0].strip().split()
+                if len(data) != 2:
+                    continue
+
+                csetval = eval(data[0])
+                if csetval <= 0x7F:
+                    csetch = chr(csetval & 0xff)
+                elif csetval >= 0x1000000:
+                    csetch = chr(csetval >> 24) + chr((csetval >> 16) & 0xff) + \
+                             chr((csetval >> 8) & 0xff) + chr(csetval & 0xff)
+                elif csetval >= 0x10000:
+                    csetch = chr(csetval >> 16) + \
+                             chr((csetval >> 8) & 0xff) + chr(csetval & 0xff)
+                elif csetval >= 0x100:
+                    csetch = chr(csetval >> 8) + chr(csetval & 0xff)
+                else:
+                    continue
+
+                unich = unichrs(data[1])
+                if unich == u'\ufffd' or unich in urt_wa:
+                    continue
+                urt_wa[unich] = csetch
+
+                self._testpoint(csetch, unich)
+
+    def _test_mapping_file_ucm(self):
+        with self.open_mapping_file() as f:
+            ucmdata = f.read()
+        uc = re.findall('<a u="([A-F0-9]{4})" b="([0-9A-F ]+)"/>', ucmdata)
+        for uni, coded in uc:
+            unich = unichr(int(uni, 16))
+            codech = ''.join(chr(int(c, 16)) for c in coded.split())
+            self._testpoint(codech, unich)
+
+    def test_mapping_supplemental(self):
+        for mapping in self.supmaps:
+            self._testpoint(*mapping)
+
+    def _testpoint(self, csetch, unich):
+        if (csetch, unich) not in self.pass_enctest:
+            try:
+                self.assertEqual(unich.encode(self.encoding), csetch)
+            except UnicodeError, exc:
+                self.fail('Encoding failed while testing %s -> %s: %s' % (
+                            repr(unich), repr(csetch), exc.reason))
+        if (csetch, unich) not in self.pass_dectest:
+            try:
+                self.assertEqual(csetch.decode(self.encoding), unich)
+            except UnicodeError, exc:
+                self.fail('Decoding failed while testing %s -> %s: %s' % (
+                            repr(csetch), repr(unich), exc.reason))
+
+    def test_errorhandle(self):
+        for source, scheme, expected in self.codectests:
+            if isinstance(source, bytes):
+                func = source.decode
+            else:
+                func = source.encode
+            if expected:
+                if isinstance(source, bytes):
+                    result = func(self.encoding, scheme)
+                    self.assertTrue(type(result) is unicode, type(result))
+                    self.assertEqual(result, expected,
+                                     '%r.decode(%r, %r)=%r != %r'
+                                     % (source, self.encoding, scheme, result,
+                                        expected))
+                else:
+                    result = func(self.encoding, scheme)
+                    self.assertTrue(type(result) is bytes, type(result))
+                    self.assertEqual(result, expected,
+                                     '%r.encode(%r, %r)=%r != %r'
+                                     % (source, self.encoding, scheme, result,
+                                        expected))
+            else:
+                self.assertRaises(UnicodeError, func, self.encoding, scheme)
+
+def load_teststring(name):
+    dir = os.path.join(os.path.dirname(__file__), 'cjkencodings')
+    with open(os.path.join(dir, name + '.txt'), 'rb') as f:
+        encoded = f.read()
+    with open(os.path.join(dir, name + '-utf8.txt'), 'rb') as f:
+        utf8 = f.read()
+    return encoded, utf8

-- 
Repository URL: http://hg.python.org/jython


More information about the Jython-checkins mailing list