Python google.protobuf.text_format.Tokenizer() Examples
The following are 30
code examples of google.protobuf.text_format.Tokenizer().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
google.protobuf.text_format
, or try the search function
.
Example #1
Source File: text_format_test.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def testConsumeAbstractIntegers(self): # This test only tests the failures in the integer parsing methods as well # as the '0' special cases. int64_max = (1 << 63) - 1 uint32_max = (1 << 32) - 1 text = '-1 %d %d' % (uint32_max + 1, int64_max + 1) tokenizer = text_format.Tokenizer(text.splitlines()) self.assertEqual(-1, tokenizer.ConsumeInteger()) self.assertEqual(uint32_max + 1, tokenizer.ConsumeInteger()) self.assertEqual(int64_max + 1, tokenizer.ConsumeInteger()) self.assertTrue(tokenizer.AtEnd()) text = '-0 0' tokenizer = text_format.Tokenizer(text.splitlines()) self.assertEqual(0, tokenizer.ConsumeInteger()) self.assertEqual(0, tokenizer.ConsumeInteger()) self.assertTrue(tokenizer.AtEnd())
Example #2
Source File: text_format_test.py From coremltools with BSD 3-Clause "New" or "Revised" License | 6 votes |
def testConsumeByteString(self): text = '"string1\'' tokenizer = text_format.Tokenizer(text.splitlines()) self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString) text = 'string1"' tokenizer = text_format.Tokenizer(text.splitlines()) self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString) text = '\n"\\xt"' tokenizer = text_format.Tokenizer(text.splitlines()) self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString) text = '\n"\\"' tokenizer = text_format.Tokenizer(text.splitlines()) self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString) text = '\n"\\x"' tokenizer = text_format.Tokenizer(text.splitlines()) self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
Example #3
Source File: text_format_test.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 6 votes |
def testConsumeByteString(self): text = '"string1\'' tokenizer = text_format.Tokenizer(text.splitlines()) self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString) text = 'string1"' tokenizer = text_format.Tokenizer(text.splitlines()) self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString) text = '\n"\\xt"' tokenizer = text_format.Tokenizer(text.splitlines()) self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString) text = '\n"\\"' tokenizer = text_format.Tokenizer(text.splitlines()) self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString) text = '\n"\\x"' tokenizer = text_format.Tokenizer(text.splitlines()) self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
Example #4
Source File: text_format_test.py From coremltools with BSD 3-Clause "New" or "Revised" License | 6 votes |
def testConsumeAbstractIntegers(self): # This test only tests the failures in the integer parsing methods as well # as the '0' special cases. int64_max = (1 << 63) - 1 uint32_max = (1 << 32) - 1 text = '-1 %d %d' % (uint32_max + 1, int64_max + 1) tokenizer = text_format.Tokenizer(text.splitlines()) self.assertEqual(-1, tokenizer.ConsumeInteger()) self.assertEqual(uint32_max + 1, tokenizer.ConsumeInteger()) self.assertEqual(int64_max + 1, tokenizer.ConsumeInteger()) self.assertTrue(tokenizer.AtEnd()) text = '-0 0' tokenizer = text_format.Tokenizer(text.splitlines()) self.assertEqual(0, tokenizer.ConsumeInteger()) self.assertEqual(0, tokenizer.ConsumeInteger()) self.assertTrue(tokenizer.AtEnd())
Example #5
Source File: text_format_test.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 6 votes |
def testConsumeAbstractIntegers(self): # This test only tests the failures in the integer parsing methods as well # as the '0' special cases. int64_max = (1 << 63) - 1 uint32_max = (1 << 32) - 1 text = '-1 %d %d' % (uint32_max + 1, int64_max + 1) tokenizer = text_format.Tokenizer(text.splitlines()) self.assertEqual(-1, tokenizer.ConsumeInteger()) self.assertEqual(uint32_max + 1, tokenizer.ConsumeInteger()) self.assertEqual(int64_max + 1, tokenizer.ConsumeInteger()) self.assertTrue(tokenizer.AtEnd()) text = '-0 0 0 1.2' tokenizer = text_format.Tokenizer(text.splitlines()) self.assertEqual(0, tokenizer.ConsumeInteger()) self.assertEqual(0, tokenizer.ConsumeInteger()) self.assertEqual(True, tokenizer.TryConsumeInteger()) self.assertEqual(False, tokenizer.TryConsumeInteger()) with self.assertRaises(text_format.ParseError): tokenizer.ConsumeInteger() self.assertEqual(1.2, tokenizer.ConsumeFloat()) self.assertTrue(tokenizer.AtEnd())
Example #6
Source File: text_format_test.py From lambda-packs with MIT License | 6 votes |
def testConsumeAbstractIntegers(self): # This test only tests the failures in the integer parsing methods as well # as the '0' special cases. int64_max = (1 << 63) - 1 uint32_max = (1 << 32) - 1 text = '-1 %d %d' % (uint32_max + 1, int64_max + 1) tokenizer = text_format.Tokenizer(text.splitlines()) self.assertEqual(-1, tokenizer.ConsumeInteger()) self.assertEqual(uint32_max + 1, tokenizer.ConsumeInteger()) self.assertEqual(int64_max + 1, tokenizer.ConsumeInteger()) self.assertTrue(tokenizer.AtEnd()) text = '-0 0 0 1.2' tokenizer = text_format.Tokenizer(text.splitlines()) self.assertEqual(0, tokenizer.ConsumeInteger()) self.assertEqual(0, tokenizer.ConsumeInteger()) self.assertEqual(True, tokenizer.TryConsumeInteger()) self.assertEqual(False, tokenizer.TryConsumeInteger()) with self.assertRaises(text_format.ParseError): tokenizer.ConsumeInteger() self.assertEqual(1.2, tokenizer.ConsumeFloat()) self.assertTrue(tokenizer.AtEnd())
Example #7
Source File: text_format_test.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def testConsumeByteString(self): text = '"string1\'' tokenizer = text_format.Tokenizer(text.splitlines()) self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString) text = 'string1"' tokenizer = text_format.Tokenizer(text.splitlines()) self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString) text = '\n"\\xt"' tokenizer = text_format.Tokenizer(text.splitlines()) self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString) text = '\n"\\"' tokenizer = text_format.Tokenizer(text.splitlines()) self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString) text = '\n"\\x"' tokenizer = text_format.Tokenizer(text.splitlines()) self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
Example #8
Source File: text_format_test.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 6 votes |
def testConsumeAndCheckTrailingComment(self): text = 'some_number: 4 # some comment' # trailing comment on the same line tokenizer = text_format.Tokenizer(text.splitlines(), skip_comments=False) self.assertRaises(text_format.ParseError, tokenizer.ConsumeCommentOrTrailingComment) self.assertEqual('some_number', tokenizer.ConsumeIdentifier()) self.assertEqual(tokenizer.token, ':') tokenizer.NextToken() self.assertRaises(text_format.ParseError, tokenizer.ConsumeCommentOrTrailingComment) self.assertEqual(4, tokenizer.ConsumeInteger()) self.assertFalse(tokenizer.AtEnd()) self.assertEqual((True, '# some comment'), tokenizer.ConsumeCommentOrTrailingComment()) self.assertTrue(tokenizer.AtEnd())
Example #9
Source File: text_format_test.py From lambda-packs with MIT License | 6 votes |
def testConsumeAndCheckTrailingComment(self): text = 'some_number: 4 # some comment' # trailing comment on the same line tokenizer = text_format.Tokenizer(text.splitlines(), skip_comments=False) self.assertRaises(text_format.ParseError, tokenizer.ConsumeCommentOrTrailingComment) self.assertEqual('some_number', tokenizer.ConsumeIdentifier()) self.assertEqual(tokenizer.token, ':') tokenizer.NextToken() self.assertRaises(text_format.ParseError, tokenizer.ConsumeCommentOrTrailingComment) self.assertEqual(4, tokenizer.ConsumeInteger()) self.assertFalse(tokenizer.AtEnd()) self.assertEqual((True, '# some comment'), tokenizer.ConsumeCommentOrTrailingComment()) self.assertTrue(tokenizer.AtEnd())
Example #10
Source File: text_format_test.py From keras-lambda with MIT License | 6 votes |
def testConsumeAbstractIntegers(self): # This test only tests the failures in the integer parsing methods as well # as the '0' special cases. int64_max = (1 << 63) - 1 uint32_max = (1 << 32) - 1 text = '-1 %d %d' % (uint32_max + 1, int64_max + 1) tokenizer = text_format.Tokenizer(text.splitlines()) self.assertEqual(-1, tokenizer.ConsumeInteger()) self.assertEqual(uint32_max + 1, tokenizer.ConsumeInteger()) self.assertEqual(int64_max + 1, tokenizer.ConsumeInteger()) self.assertTrue(tokenizer.AtEnd()) text = '-0 0' tokenizer = text_format.Tokenizer(text.splitlines()) self.assertEqual(0, tokenizer.ConsumeInteger()) self.assertEqual(0, tokenizer.ConsumeInteger()) self.assertTrue(tokenizer.AtEnd())
Example #11
Source File: text_format_test.py From coremltools with BSD 3-Clause "New" or "Revised" License | 6 votes |
def testConsumeAndCheckTrailingComment(self): text = 'some_number: 4 # some comment' # trailing comment on the same line tokenizer = text_format.Tokenizer(text.splitlines(), skip_comments=False) self.assertRaises(text_format.ParseError, tokenizer.ConsumeCommentOrTrailingComment) self.assertEqual('some_number', tokenizer.ConsumeIdentifier()) self.assertEqual(tokenizer.token, ':') tokenizer.NextToken() self.assertRaises(text_format.ParseError, tokenizer.ConsumeCommentOrTrailingComment) self.assertEqual(4, tokenizer.ConsumeInteger()) self.assertFalse(tokenizer.AtEnd()) self.assertEqual((True, '# some comment'), tokenizer.ConsumeCommentOrTrailingComment()) self.assertTrue(tokenizer.AtEnd())
Example #12
Source File: text_format_test.py From keras-lambda with MIT License | 6 votes |
def testConsumeByteString(self): text = '"string1\'' tokenizer = text_format.Tokenizer(text.splitlines()) self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString) text = 'string1"' tokenizer = text_format.Tokenizer(text.splitlines()) self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString) text = '\n"\\xt"' tokenizer = text_format.Tokenizer(text.splitlines()) self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString) text = '\n"\\"' tokenizer = text_format.Tokenizer(text.splitlines()) self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString) text = '\n"\\x"' tokenizer = text_format.Tokenizer(text.splitlines()) self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
Example #13
Source File: text_format_test.py From go2mapillary with GNU General Public License v3.0 | 6 votes |
def testConsumeAbstractIntegers(self): # This test only tests the failures in the integer parsing methods as well # as the '0' special cases. int64_max = (1 << 63) - 1 uint32_max = (1 << 32) - 1 text = '-1 %d %d' % (uint32_max + 1, int64_max + 1) tokenizer = text_format.Tokenizer(text.splitlines()) self.assertEqual(-1, tokenizer.ConsumeInteger()) self.assertEqual(uint32_max + 1, tokenizer.ConsumeInteger()) self.assertEqual(int64_max + 1, tokenizer.ConsumeInteger()) self.assertTrue(tokenizer.AtEnd()) text = '-0 0' tokenizer = text_format.Tokenizer(text.splitlines()) self.assertEqual(0, tokenizer.ConsumeInteger()) self.assertEqual(0, tokenizer.ConsumeInteger()) self.assertTrue(tokenizer.AtEnd())
Example #14
Source File: text_format_test.py From go2mapillary with GNU General Public License v3.0 | 6 votes |
def testConsumeByteString(self): text = '"string1\'' tokenizer = text_format.Tokenizer(text.splitlines()) self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString) text = 'string1"' tokenizer = text_format.Tokenizer(text.splitlines()) self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString) text = '\n"\\xt"' tokenizer = text_format.Tokenizer(text.splitlines()) self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString) text = '\n"\\"' tokenizer = text_format.Tokenizer(text.splitlines()) self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString) text = '\n"\\x"' tokenizer = text_format.Tokenizer(text.splitlines()) self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
Example #15
Source File: text_format_test.py From lambda-packs with MIT License | 6 votes |
def testConsumeByteString(self): text = '"string1\'' tokenizer = text_format.Tokenizer(text.splitlines()) self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString) text = 'string1"' tokenizer = text_format.Tokenizer(text.splitlines()) self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString) text = '\n"\\xt"' tokenizer = text_format.Tokenizer(text.splitlines()) self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString) text = '\n"\\"' tokenizer = text_format.Tokenizer(text.splitlines()) self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString) text = '\n"\\x"' tokenizer = text_format.Tokenizer(text.splitlines()) self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
Example #16
Source File: text_format_test.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def testConsumeTrailingComment(self): text = 'some_number: 4\n# some comment' tokenizer = text_format.Tokenizer(text.splitlines(), skip_comments=False) self.assertRaises(text_format.ParseError, tokenizer.ConsumeComment) self.assertEqual('some_number', tokenizer.ConsumeIdentifier()) self.assertEqual(tokenizer.token, ':') tokenizer.NextToken() self.assertRaises(text_format.ParseError, tokenizer.ConsumeComment) self.assertEqual(4, tokenizer.ConsumeInteger()) self.assertFalse(tokenizer.AtEnd()) self.assertEqual('# some comment', tokenizer.ConsumeComment()) self.assertTrue(tokenizer.AtEnd())
Example #17
Source File: text_format_test.py From go2mapillary with GNU General Public License v3.0 | 5 votes |
def testConsumeTrailingComment(self): text = 'some_number: 4\n# some comment' tokenizer = text_format.Tokenizer(text.splitlines(), skip_comments=False) self.assertRaises(text_format.ParseError, tokenizer.ConsumeComment) self.assertEqual('some_number', tokenizer.ConsumeIdentifier()) self.assertEqual(tokenizer.token, ':') tokenizer.NextToken() self.assertRaises(text_format.ParseError, tokenizer.ConsumeComment) self.assertEqual(4, tokenizer.ConsumeInteger()) self.assertFalse(tokenizer.AtEnd()) self.assertEqual('# some comment', tokenizer.ConsumeComment()) self.assertTrue(tokenizer.AtEnd())
Example #18
Source File: text_format_test.py From go2mapillary with GNU General Public License v3.0 | 5 votes |
def testConsumeTwoComments(self): text = '# some comment\n# another comment' tokenizer = text_format.Tokenizer(text.splitlines(), skip_comments=False) self.assertEqual('# some comment', tokenizer.ConsumeComment()) self.assertFalse(tokenizer.AtEnd()) self.assertEqual('# another comment', tokenizer.ConsumeComment()) self.assertTrue(tokenizer.AtEnd())
Example #19
Source File: text_format_test.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def testConsumeIntegers(self): # This test only tests the failures in the integer parsing methods as well # as the '0' special cases. int64_max = (1 << 63) - 1 uint32_max = (1 << 32) - 1 text = '-1 %d %d' % (uint32_max + 1, int64_max + 1) tokenizer = text_format.Tokenizer(text.splitlines()) self.assertRaises(text_format.ParseError, text_format._ConsumeUint32, tokenizer) self.assertRaises(text_format.ParseError, text_format._ConsumeUint64, tokenizer) self.assertEqual(-1, text_format._ConsumeInt32(tokenizer)) self.assertRaises(text_format.ParseError, text_format._ConsumeUint32, tokenizer) self.assertRaises(text_format.ParseError, text_format._ConsumeInt32, tokenizer) self.assertEqual(uint32_max + 1, text_format._ConsumeInt64(tokenizer)) self.assertRaises(text_format.ParseError, text_format._ConsumeInt64, tokenizer) self.assertEqual(int64_max + 1, text_format._ConsumeUint64(tokenizer)) self.assertTrue(tokenizer.AtEnd()) text = '-0 -0 0 0' tokenizer = text_format.Tokenizer(text.splitlines()) self.assertEqual(0, text_format._ConsumeUint32(tokenizer)) self.assertEqual(0, text_format._ConsumeUint64(tokenizer)) self.assertEqual(0, text_format._ConsumeUint32(tokenizer)) self.assertEqual(0, text_format._ConsumeUint64(tokenizer)) self.assertTrue(tokenizer.AtEnd())
Example #20
Source File: text_format_test.py From go2mapillary with GNU General Public License v3.0 | 5 votes |
def testSkipComment(self): tokenizer = text_format.Tokenizer('# some comment'.splitlines()) self.assertTrue(tokenizer.AtEnd()) self.assertRaises(text_format.ParseError, tokenizer.ConsumeComment)
Example #21
Source File: text_format_test.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def testConsumeBool(self): text = 'not-a-bool' tokenizer = text_format.Tokenizer(text.splitlines()) self.assertRaises(text_format.ParseError, tokenizer.ConsumeBool)
Example #22
Source File: text_format_test.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def testSkipComment(self): tokenizer = text_format.Tokenizer('# some comment'.splitlines()) self.assertTrue(tokenizer.AtEnd()) self.assertRaises(text_format.ParseError, tokenizer.ConsumeComment)
Example #23
Source File: text_format_test.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def testConsumeTwoComments(self): text = '# some comment\n# another comment' tokenizer = text_format.Tokenizer(text.splitlines(), skip_comments=False) self.assertEqual('# some comment', tokenizer.ConsumeComment()) self.assertFalse(tokenizer.AtEnd()) self.assertEqual('# another comment', tokenizer.ConsumeComment()) self.assertTrue(tokenizer.AtEnd())
Example #24
Source File: text_format_test.py From coremltools with BSD 3-Clause "New" or "Revised" License | 5 votes |
def testConsumeTwoLineComments(self): text = '# some comment\n# another comment' tokenizer = text_format.Tokenizer(text.splitlines(), skip_comments=False) self.assertEqual((False, '# some comment'), tokenizer.ConsumeCommentOrTrailingComment()) self.assertFalse(tokenizer.AtEnd()) self.assertEqual((False, '# another comment'), tokenizer.ConsumeCommentOrTrailingComment()) self.assertTrue(tokenizer.AtEnd())
Example #25
Source File: text_format_test.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def testConsumeLineComment(self): tokenizer = text_format.Tokenizer('# some comment'.splitlines(), skip_comments=False) self.assertFalse(tokenizer.AtEnd()) self.assertEqual((False, '# some comment'), tokenizer.ConsumeCommentOrTrailingComment()) self.assertTrue(tokenizer.AtEnd())
Example #26
Source File: text_format_test.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def testConsumeTwoLineComments(self): text = '# some comment\n# another comment' tokenizer = text_format.Tokenizer(text.splitlines(), skip_comments=False) self.assertEqual((False, '# some comment'), tokenizer.ConsumeCommentOrTrailingComment()) self.assertFalse(tokenizer.AtEnd()) self.assertEqual((False, '# another comment'), tokenizer.ConsumeCommentOrTrailingComment()) self.assertTrue(tokenizer.AtEnd())
Example #27
Source File: text_format_test.py From keras-lambda with MIT License | 5 votes |
def testConsumeIntegers(self): # This test only tests the failures in the integer parsing methods as well # as the '0' special cases. int64_max = (1 << 63) - 1 uint32_max = (1 << 32) - 1 text = '-1 %d %d' % (uint32_max + 1, int64_max + 1) tokenizer = text_format.Tokenizer(text.splitlines()) self.assertRaises(text_format.ParseError, text_format._ConsumeUint32, tokenizer) self.assertRaises(text_format.ParseError, text_format._ConsumeUint64, tokenizer) self.assertEqual(-1, text_format._ConsumeInt32(tokenizer)) self.assertRaises(text_format.ParseError, text_format._ConsumeUint32, tokenizer) self.assertRaises(text_format.ParseError, text_format._ConsumeInt32, tokenizer) self.assertEqual(uint32_max + 1, text_format._ConsumeInt64(tokenizer)) self.assertRaises(text_format.ParseError, text_format._ConsumeInt64, tokenizer) self.assertEqual(int64_max + 1, text_format._ConsumeUint64(tokenizer)) self.assertTrue(tokenizer.AtEnd()) text = '-0 -0 0 0' tokenizer = text_format.Tokenizer(text.splitlines()) self.assertEqual(0, text_format._ConsumeUint32(tokenizer)) self.assertEqual(0, text_format._ConsumeUint64(tokenizer)) self.assertEqual(0, text_format._ConsumeUint32(tokenizer)) self.assertEqual(0, text_format._ConsumeUint64(tokenizer)) self.assertTrue(tokenizer.AtEnd())
Example #28
Source File: text_format_test.py From keras-lambda with MIT License | 5 votes |
def testConsumeBool(self): text = 'not-a-bool' tokenizer = text_format.Tokenizer(text.splitlines()) self.assertRaises(text_format.ParseError, tokenizer.ConsumeBool)
Example #29
Source File: text_format_test.py From keras-lambda with MIT License | 5 votes |
def testSkipComment(self): tokenizer = text_format.Tokenizer('# some comment'.splitlines()) self.assertTrue(tokenizer.AtEnd()) self.assertRaises(text_format.ParseError, tokenizer.ConsumeComment)
Example #30
Source File: text_format_test.py From keras-lambda with MIT License | 5 votes |
def testConsumeTwoComments(self): text = '# some comment\n# another comment' tokenizer = text_format.Tokenizer(text.splitlines(), skip_comments=False) self.assertEqual('# some comment', tokenizer.ConsumeComment()) self.assertFalse(tokenizer.AtEnd()) self.assertEqual('# another comment', tokenizer.ConsumeComment()) self.assertTrue(tokenizer.AtEnd())