Python codecs.getwriter() Examples

The following are 30 code examples of codecs.getwriter(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module codecs , or try the search function .
Example #1
Source File: util.py    From FuYiSpider with Apache License 2.0 6 votes vote down vote up
def write_exports(exports, stream):
    if sys.version_info[0] >= 3:
        # needs to be a text stream
        stream = codecs.getwriter('utf-8')(stream)
    cp = configparser.ConfigParser()
    for k, v in exports.items():
        # TODO check k, v for valid values
        cp.add_section(k)
        for entry in v.values():
            if entry.suffix is None:
                s = entry.prefix
            else:
                s = '%s:%s' % (entry.prefix, entry.suffix)
            if entry.flags:
                s = '%s [%s]' % (s, ', '.join(entry.flags))
            cp.set(k, entry.name, s)
    cp.write(stream) 
Example #2
Source File: cli.py    From pdfplumber with MIT License 6 votes vote down vote up
def to_json(pdf, types, encoding):
    data = { "metadata": pdf.metadata }

    def get_page_data(page):
        d = dict((t + "s", getattr(page, t + "s"))
            for t in types)
        d["width"] = page.width
        d["height"] = page.height
        return d

    data["pages"] = list(map(get_page_data, pdf.pages))

    if hasattr(sys.stdout, "buffer"):
        sys.stdout = codecs.getwriter("utf-8")(sys.stdout.buffer, "strict")
        json.dump(data, sys.stdout, cls=DecimalEncoder)
    else:
        json.dump(data, sys.stdout, cls=DecimalEncoder, encoding=encoding) 
Example #3
Source File: util.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def write_exports(exports, stream):
    if sys.version_info[0] >= 3:
        # needs to be a text stream
        stream = codecs.getwriter('utf-8')(stream)
    cp = configparser.ConfigParser()
    for k, v in exports.items():
        # TODO check k, v for valid values
        cp.add_section(k)
        for entry in v.values():
            if entry.suffix is None:
                s = entry.prefix
            else:
                s = '%s:%s' % (entry.prefix, entry.suffix)
            if entry.flags:
                s = '%s [%s]' % (s, ', '.join(entry.flags))
            cp.set(k, entry.name, s)
    cp.write(stream) 
Example #4
Source File: util.py    From recruit with Apache License 2.0 6 votes vote down vote up
def write_exports(exports, stream):
    if sys.version_info[0] >= 3:
        # needs to be a text stream
        stream = codecs.getwriter('utf-8')(stream)
    cp = configparser.ConfigParser()
    for k, v in exports.items():
        # TODO check k, v for valid values
        cp.add_section(k)
        for entry in v.values():
            if entry.suffix is None:
                s = entry.prefix
            else:
                s = '%s:%s' % (entry.prefix, entry.suffix)
            if entry.flags:
                s = '%s [%s]' % (s, ', '.join(entry.flags))
            cp.set(k, entry.name, s)
    cp.write(stream) 
Example #5
Source File: util.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def write_exports(exports, stream):
    if sys.version_info[0] >= 3:
        # needs to be a text stream
        stream = codecs.getwriter('utf-8')(stream)
    cp = configparser.ConfigParser()
    for k, v in exports.items():
        # TODO check k, v for valid values
        cp.add_section(k)
        for entry in v.values():
            if entry.suffix is None:
                s = entry.prefix
            else:
                s = '%s:%s' % (entry.prefix, entry.suffix)
            if entry.flags:
                s = '%s [%s]' % (s, ', '.join(entry.flags))
            cp.set(k, entry.name, s)
    cp.write(stream) 
Example #6
Source File: test_multibytecodec_support.py    From BinderFilter with MIT License 6 votes vote down vote up
def test_streamreader(self):
        UTF8Writer = codecs.getwriter('utf-8')
        for name in ["read", "readline", "readlines"]:
            for sizehint in [None, -1] + range(1, 33) + \
                            [64, 128, 256, 512, 1024]:
                istream = self.reader(StringIO(self.tstring[0]))
                ostream = UTF8Writer(StringIO())
                func = getattr(istream, name)
                while 1:
                    data = func(sizehint)
                    if not data:
                        break
                    if name == "readlines":
                        ostream.writelines(data)
                    else:
                        ostream.write(data)

                self.assertEqual(ostream.getvalue(), self.tstring[1]) 
Example #7
Source File: util.py    From jbox with MIT License 6 votes vote down vote up
def write_exports(exports, stream):
    if sys.version_info[0] >= 3:
        # needs to be a text stream
        stream = codecs.getwriter('utf-8')(stream)
    cp = configparser.ConfigParser()
    for k, v in exports.items():
        # TODO check k, v for valid values
        cp.add_section(k)
        for entry in v.values():
            if entry.suffix is None:
                s = entry.prefix
            else:
                s = '%s:%s' % (entry.prefix, entry.suffix)
            if entry.flags:
                s = '%s [%s]' % (s, ', '.join(entry.flags))
            cp.set(k, entry.name, s)
    cp.write(stream) 
Example #8
Source File: file_util.py    From tools with GNU General Public License v2.0 6 votes vote down vote up
def in_out(args,multiple_files=False):
    """Open the input/output data streams. If multiple_files is set to
    True, returns an iterator over lines. If set to False, returns an open file.
    This distinction is needed because validator.py checks the newlines property and
    needs to get the input as a file, but the other scripts just need the lines
    so they can work with several files.
    """
    #Decide where to get the data from
    if args.input is None or args.input=="-": #Stdin
        inp=codecs.getreader("utf-8")(os.fdopen(0,"U")) #Switched universal newlines on
    else: #File name given
        if multiple_files:
            inp_raw=fileinput.input(files=args.input,mode="U")
            inp=(line.decode("utf-8") for line in inp_raw)
        else:
            inp_raw=open(args.input,mode="U")
            inp=codecs.getreader("utf-8")(inp_raw)
    #inp is now an iterator over lines, giving unicode strings

    if args.output is None or args.output=="-": #stdout
        out=codecs.getwriter("utf-8")(sys.stdout)
    else: #File name given
        out=codecs.open(args.output,"w","utf-8")
    return inp,out 
Example #9
Source File: test_logging.py    From BinderFilter with MIT License 6 votes vote down vote up
def test_encoding_cyrillic_unicode(self):
        log = logging.getLogger("test")
        #Get a message in Unicode: Do svidanya in Cyrillic (meaning goodbye)
        message = u'\u0434\u043e \u0441\u0432\u0438\u0434\u0430\u043d\u0438\u044f'
        #Ensure it's written in a Cyrillic encoding
        writer_class = codecs.getwriter('cp1251')
        writer_class.encoding = 'cp1251'
        stream = cStringIO.StringIO()
        writer = writer_class(stream, 'strict')
        handler = logging.StreamHandler(writer)
        log.addHandler(handler)
        try:
            log.warning(message)
        finally:
            log.removeHandler(handler)
            handler.close()
        # check we wrote exactly those bytes, ignoring trailing \n etc
        s = stream.getvalue()
        #Compare against what the data should be when encoded in CP-1251
        self.assertEqual(s, '\xe4\xee \xf1\xe2\xe8\xe4\xe0\xed\xe8\xff\n') 
Example #10
Source File: exportxml.py    From gprime with GNU General Public License v2.0 6 votes vote down vote up
def write_handle(self, handle):
        """
        Write the database to the specified file handle.
        """

        if self.compress and _gzip_ok:
            try:
                g = gzip.GzipFile(mode="wb", fileobj=handle)
            except:
                g = handle
        else:
            g = handle

        self.g = codecs.getwriter("utf8")(g)

        self.write_xml_data()
        g.close()
        return 1 
Example #11
Source File: test_multibytecodec.py    From BinderFilter with MIT License 6 votes vote down vote up
def test_gb18030(self):
            s = StringIO.StringIO()
            c = codecs.getwriter('gb18030')(s)
            c.write(u'123')
            self.assertEqual(s.getvalue(), '123')
            c.write(u'\U00012345')
            self.assertEqual(s.getvalue(), '123\x907\x959')
            c.write(u'\U00012345'[0])
            self.assertEqual(s.getvalue(), '123\x907\x959')
            c.write(u'\U00012345'[1] + u'\U00012345' + u'\uac00\u00ac')
            self.assertEqual(s.getvalue(),
                    '123\x907\x959\x907\x959\x907\x959\x827\xcf5\x810\x851')
            c.write(u'\U00012345'[0])
            self.assertEqual(s.getvalue(),
                    '123\x907\x959\x907\x959\x907\x959\x827\xcf5\x810\x851')
            self.assertRaises(UnicodeError, c.reset)
            self.assertEqual(s.getvalue(),
                    '123\x907\x959\x907\x959\x907\x959\x827\xcf5\x810\x851') 
Example #12
Source File: test_multibytecodec_support.py    From oss-ftp with MIT License 6 votes vote down vote up
def test_streamreader(self):
        UTF8Writer = codecs.getwriter('utf-8')
        for name in ["read", "readline", "readlines"]:
            for sizehint in [None, -1] + range(1, 33) + \
                            [64, 128, 256, 512, 1024]:
                istream = self.reader(StringIO(self.tstring[0]))
                ostream = UTF8Writer(StringIO())
                func = getattr(istream, name)
                while 1:
                    data = func(sizehint)
                    if not data:
                        break
                    if name == "readlines":
                        ostream.writelines(data)
                    else:
                        ostream.write(data)

                self.assertEqual(ostream.getvalue(), self.tstring[1]) 
Example #13
Source File: test_logging.py    From oss-ftp with MIT License 6 votes vote down vote up
def test_encoding_utf16_unicode(self):
        # Issue #19267
        log = logging.getLogger("test")
        message = u'b\u0142\u0105d'
        writer_class = codecs.getwriter('utf-16-le')
        writer_class.encoding = 'utf-16-le'
        stream = cStringIO.StringIO()
        writer = writer_class(stream, 'strict')
        handler = logging.StreamHandler(writer)
        log.addHandler(handler)
        try:
            log.warning(message)
        finally:
            log.removeHandler(handler)
            handler.close()
        s = stream.getvalue()
        self.assertEqual(s, 'b\x00B\x01\x05\x01d\x00\n\x00') 
Example #14
Source File: test_logging.py    From oss-ftp with MIT License 6 votes vote down vote up
def test_encoding_cyrillic_unicode(self):
        log = logging.getLogger("test")
        #Get a message in Unicode: Do svidanya in Cyrillic (meaning goodbye)
        message = u'\u0434\u043e \u0441\u0432\u0438\u0434\u0430\u043d\u0438\u044f'
        #Ensure it's written in a Cyrillic encoding
        writer_class = codecs.getwriter('cp1251')
        writer_class.encoding = 'cp1251'
        stream = cStringIO.StringIO()
        writer = writer_class(stream, 'strict')
        handler = logging.StreamHandler(writer)
        log.addHandler(handler)
        try:
            log.warning(message)
        finally:
            log.removeHandler(handler)
            handler.close()
        # check we wrote exactly those bytes, ignoring trailing \n etc
        s = stream.getvalue()
        #Compare against what the data should be when encoded in CP-1251
        self.assertEqual(s, '\xe4\xee \xf1\xe2\xe8\xe4\xe0\xed\xe8\xff\n') 
Example #15
Source File: multibytecodec_support.py    From ironpython2 with Apache License 2.0 6 votes vote down vote up
def test_incrementaldecoder(self):
        UTF8Writer = codecs.getwriter('utf-8')
        for sizehint in [None, -1] + range(1, 33) + \
                        [64, 128, 256, 512, 1024]:
            istream = StringIO(self.tstring[0])
            ostream = UTF8Writer(StringIO())
            decoder = self.incrementaldecoder()
            while 1:
                data = istream.read(sizehint)
                if not data:
                    break
                else:
                    u = decoder.decode(data)
                    ostream.write(u)

            self.assertEqual(ostream.getvalue(), self.tstring[1]) 
Example #16
Source File: multibytecodec_support.py    From ironpython2 with Apache License 2.0 6 votes vote down vote up
def test_streamreader(self):
        UTF8Writer = codecs.getwriter('utf-8')
        for name in ["read", "readline", "readlines"]:
            for sizehint in [None, -1] + range(1, 33) + \
                            [64, 128, 256, 512, 1024]:
                istream = self.reader(StringIO(self.tstring[0]))
                ostream = UTF8Writer(StringIO())
                func = getattr(istream, name)
                while 1:
                    data = func(sizehint)
                    if not data:
                        break
                    if name == "readlines":
                        ostream.writelines(data)
                    else:
                        ostream.write(data)

                self.assertEqual(ostream.getvalue(), self.tstring[1]) 
Example #17
Source File: test_multibytecodec.py    From ironpython2 with Apache License 2.0 6 votes vote down vote up
def test_gb18030(self):
        s = StringIO.StringIO()
        c = codecs.getwriter('gb18030')(s)
        c.write(u'123')
        self.assertEqual(s.getvalue(), '123')
        c.write(u'\U00012345')
        self.assertEqual(s.getvalue(), '123\x907\x959')
        c.write(u'\U00012345'[0])
        self.assertEqual(s.getvalue(), '123\x907\x959')
        c.write(u'\U00012345'[1] + u'\U00012345' + u'\uac00\u00ac')
        self.assertEqual(s.getvalue(),
                '123\x907\x959\x907\x959\x907\x959\x827\xcf5\x810\x851')
        c.write(u'\U00012345'[0])
        self.assertEqual(s.getvalue(),
                '123\x907\x959\x907\x959\x907\x959\x827\xcf5\x810\x851')
        self.assertRaises(UnicodeError, c.reset)
        self.assertEqual(s.getvalue(),
                '123\x907\x959\x907\x959\x907\x959\x827\xcf5\x810\x851') 
Example #18
Source File: test_logging.py    From ironpython2 with Apache License 2.0 6 votes vote down vote up
def test_encoding_cyrillic_unicode(self):
        log = logging.getLogger("test")
        #Get a message in Unicode: Do svidanya in Cyrillic (meaning goodbye)
        message = u'\u0434\u043e \u0441\u0432\u0438\u0434\u0430\u043d\u0438\u044f'
        #Ensure it's written in a Cyrillic encoding
        writer_class = codecs.getwriter('cp1251')
        writer_class.encoding = 'cp1251'
        stream = cStringIO.StringIO()
        writer = writer_class(stream, 'strict')
        handler = logging.StreamHandler(writer)
        log.addHandler(handler)
        try:
            log.warning(message)
        finally:
            log.removeHandler(handler)
            handler.close()
        # check we wrote exactly those bytes, ignoring trailing \n etc
        s = stream.getvalue()
        #Compare against what the data should be when encoded in CP-1251
        self.assertEqual(s, '\xe4\xee \xf1\xe2\xe8\xe4\xe0\xed\xe8\xff\n') 
Example #19
Source File: test_logging.py    From ironpython2 with Apache License 2.0 6 votes vote down vote up
def test_encoding_utf16_unicode(self):
        # Issue #19267
        log = logging.getLogger("test")
        message = u'b\u0142\u0105d'
        writer_class = codecs.getwriter('utf-16-le')
        writer_class.encoding = 'utf-16-le'
        stream = cStringIO.StringIO()
        writer = writer_class(stream, 'strict')
        handler = logging.StreamHandler(writer)
        log.addHandler(handler)
        try:
            log.warning(message)
        finally:
            log.removeHandler(handler)
            handler.close()
        s = stream.getvalue()
        self.assertEqual(s, 'b\x00B\x01\x05\x01d\x00\n\x00') 
Example #20
Source File: test_multibytecodec.py    From oss-ftp with MIT License 6 votes vote down vote up
def test_gb18030(self):
        s = StringIO.StringIO()
        c = codecs.getwriter('gb18030')(s)
        c.write(u'123')
        self.assertEqual(s.getvalue(), '123')
        c.write(u'\U00012345')
        self.assertEqual(s.getvalue(), '123\x907\x959')
        c.write(u'\U00012345'[0])
        self.assertEqual(s.getvalue(), '123\x907\x959')
        c.write(u'\U00012345'[1] + u'\U00012345' + u'\uac00\u00ac')
        self.assertEqual(s.getvalue(),
                '123\x907\x959\x907\x959\x907\x959\x827\xcf5\x810\x851')
        c.write(u'\U00012345'[0])
        self.assertEqual(s.getvalue(),
                '123\x907\x959\x907\x959\x907\x959\x827\xcf5\x810\x851')
        self.assertRaises(UnicodeError, c.reset)
        self.assertEqual(s.getvalue(),
                '123\x907\x959\x907\x959\x907\x959\x827\xcf5\x810\x851') 
Example #21
Source File: io.py    From Computable with MIT License 6 votes vote down vote up
def unicode_std_stream(stream='stdout'):
    r"""Get a wrapper to write unicode to stdout/stderr as UTF-8.

    This ignores environment variables and default encodings, to reliably write
    unicode to stdout or stderr.

    ::

        unicode_std_stream().write(u'\u0142@e\xb6\u0167\u2190')
    """
    assert stream in ('stdout', 'stderr')
    stream  = getattr(sys, stream)
    if PY3:
        try:
            stream_b = stream.buffer
        except AttributeError:
            # sys.stdout has been replaced - use it directly
            return stream
    else:
        stream_b = stream

    return codecs.getwriter('utf-8')(stream_b) 
Example #22
Source File: util.py    From Python24 with MIT License 6 votes vote down vote up
def write_exports(exports, stream):
    if sys.version_info[0] >= 3:
        # needs to be a text stream
        stream = codecs.getwriter('utf-8')(stream)
    cp = configparser.ConfigParser()
    for k, v in exports.items():
        # TODO check k, v for valid values
        cp.add_section(k)
        for entry in v.values():
            if entry.suffix is None:
                s = entry.prefix
            else:
                s = '%s:%s' % (entry.prefix, entry.suffix)
            if entry.flags:
                s = '%s [%s]' % (s, ', '.join(entry.flags))
            cp.set(k, entry.name, s)
    cp.write(stream) 
Example #23
Source File: util.py    From kobo-predict with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def write_exports(exports, stream):
    if sys.version_info[0] >= 3:
        # needs to be a text stream
        stream = codecs.getwriter('utf-8')(stream)
    cp = configparser.ConfigParser()
    for k, v in exports.items():
        # TODO check k, v for valid values
        cp.add_section(k)
        for entry in v.values():
            if entry.suffix is None:
                s = entry.prefix
            else:
                s = '%s:%s' % (entry.prefix, entry.suffix)
            if entry.flags:
                s = '%s [%s]' % (s, ', '.join(entry.flags))
            cp.set(k, entry.name, s)
    cp.write(stream) 
Example #24
Source File: util.py    From oss-ftp with MIT License 6 votes vote down vote up
def write_exports(exports, stream):
    if sys.version_info[0] >= 3:
        # needs to be a text stream
        stream = codecs.getwriter('utf-8')(stream)
    cp = configparser.ConfigParser()
    for k, v in exports.items():
        # TODO check k, v for valid values
        cp.add_section(k)
        for entry in v.values():
            if entry.suffix is None:
                s = entry.prefix
            else:
                s = '%s:%s' % (entry.prefix, entry.suffix)
            if entry.flags:
                s = '%s [%s]' % (s, ', '.join(entry.flags))
            cp.set(k, entry.name, s)
    cp.write(stream) 
Example #25
Source File: util.py    From vnpy_crypto with MIT License 6 votes vote down vote up
def write_exports(exports, stream):
    if sys.version_info[0] >= 3:
        # needs to be a text stream
        stream = codecs.getwriter('utf-8')(stream)
    cp = configparser.ConfigParser()
    for k, v in exports.items():
        # TODO check k, v for valid values
        cp.add_section(k)
        for entry in v.values():
            if entry.suffix is None:
                s = entry.prefix
            else:
                s = '%s:%s' % (entry.prefix, entry.suffix)
            if entry.flags:
                s = '%s [%s]' % (s, ', '.join(entry.flags))
            cp.set(k, entry.name, s)
    cp.write(stream) 
Example #26
Source File: util.py    From FuYiSpider with Apache License 2.0 6 votes vote down vote up
def write_exports(exports, stream):
    if sys.version_info[0] >= 3:
        # needs to be a text stream
        stream = codecs.getwriter('utf-8')(stream)
    cp = configparser.ConfigParser()
    for k, v in exports.items():
        # TODO check k, v for valid values
        cp.add_section(k)
        for entry in v.values():
            if entry.suffix is None:
                s = entry.prefix
            else:
                s = '%s:%s' % (entry.prefix, entry.suffix)
            if entry.flags:
                s = '%s [%s]' % (s, ', '.join(entry.flags))
            cp.set(k, entry.name, s)
    cp.write(stream) 
Example #27
Source File: test_multibytecodec_support.py    From oss-ftp with MIT License 6 votes vote down vote up
def test_incrementaldecoder(self):
        UTF8Writer = codecs.getwriter('utf-8')
        for sizehint in [None, -1] + range(1, 33) + \
                        [64, 128, 256, 512, 1024]:
            istream = StringIO(self.tstring[0])
            ostream = UTF8Writer(StringIO())
            decoder = self.incrementaldecoder()
            while 1:
                data = istream.read(sizehint)
                if not data:
                    break
                else:
                    u = decoder.decode(data)
                    ostream.write(u)

            self.assertEqual(ostream.getvalue(), self.tstring[1]) 
Example #28
Source File: test_multibytecodec.py    From BinderFilter with MIT License 5 votes vote down vote up
def test_streamwriter_strwrite(self):
        s = StringIO.StringIO()
        wr = codecs.getwriter('gb18030')(s)
        wr.write('abcd')
        self.assertEqual(s.getvalue(), 'abcd') 
Example #29
Source File: test_codecs.py    From BinderFilter with MIT License 5 votes vote down vote up
def test_readlinequeue(self):
        q = Queue()
        writer = codecs.getwriter(self.encoding)(q)
        reader = codecs.getreader(self.encoding)(q)

        # No lineends
        writer.write(u"foo\r")
        self.assertEqual(reader.readline(keepends=False), u"foo")
        writer.write(u"\nbar\r")
        self.assertEqual(reader.readline(keepends=False), u"")
        self.assertEqual(reader.readline(keepends=False), u"bar")
        writer.write(u"baz")
        self.assertEqual(reader.readline(keepends=False), u"baz")
        self.assertEqual(reader.readline(keepends=False), u"")

        # Lineends
        writer.write(u"foo\r")
        self.assertEqual(reader.readline(keepends=True), u"foo\r")
        writer.write(u"\nbar\r")
        self.assertEqual(reader.readline(keepends=True), u"\n")
        self.assertEqual(reader.readline(keepends=True), u"bar\r")
        writer.write(u"baz")
        self.assertEqual(reader.readline(keepends=True), u"baz")
        self.assertEqual(reader.readline(keepends=True), u"")
        writer.write(u"foo\r\n")
        self.assertEqual(reader.readline(keepends=True), u"foo\r\n") 
Example #30
Source File: test_main.py    From Computable with MIT License 5 votes vote down vote up
def test_unencodable_diff(self):
        input_stream = StringIO.StringIO(u"print 'nothing'\nprint u'über'\n")
        out = StringIO.StringIO()
        out_enc = codecs.getwriter("ascii")(out)
        err = StringIO.StringIO()
        ret = self.run_2to3_capture(["-"], input_stream, out_enc, err)
        self.assertEqual(ret, 0)
        output = out.getvalue()
        self.assertTrue("-print 'nothing'" in output)
        self.assertTrue("WARNING: couldn't encode <stdin>'s diff for "
                        "your terminal" in err.getvalue())