Python google.protobuf.descriptor_pb2.FileDescriptorSet() Examples
The following are 12
code examples of google.protobuf.descriptor_pb2.FileDescriptorSet().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
google.protobuf.descriptor_pb2
, or try the search function
.
Example #1
Source File: test_descriptor.py From artman with Apache License 2.0 | 6 votes |
def test_valid_rst(self): descriptor_set = 'test/tasks/data/test_descriptor/descriptor_set' task = descriptor_set_tasks.PythonDocsConvertionTask() updated_descriptor = task.execute(descriptor_set) desc_set = desc.FileDescriptorSet() with open(updated_descriptor, 'rb') as f: desc_set.ParseFromString(f.read()) lint_errors = [] comment_count = 0 for comment in gather_comments_from_descriptor_set(desc_set): lint_errors.extend(restructuredtext_lint.lint(comment)) comment_count += 1 print(lint_errors) assert len(lint_errors) == 0 assert comment_count == 1913
Example #2
Source File: data.py From ranking with Apache License 2.0 | 6 votes |
def _get_descriptor_set(): """Returns a FileDescriptorSet proto to be used by tf.io.decode_proto.""" proto = pb.FileDescriptorSet() # The FileDescriptor for tensorflow.ranking.internal.ExampleListWithContext. file_proto = proto.file.add( name=_FILE_NAME, package=_PACKAGE, syntax="proto3") message_proto = file_proto.message_type.add(name=_MESSAGE_NAME) message_proto.field.add( name=_EXAMPLES_FIELD_NAME, number=1, type=pb.FieldDescriptorProto.TYPE_BYTES, label=pb.FieldDescriptorProto.LABEL_REPEATED) message_proto.field.add( name=_CONTEXT_FIELD_NAME, number=2, type=pb.FieldDescriptorProto.TYPE_BYTES) return proto
Example #3
Source File: input_generator.py From lingvo with Apache License 2.0 | 5 votes |
def _GetDescriptorSetForTextInput(): """Returns a string for tf.io.decode_proto's descriptor_source.""" file_descriptor_set = descriptor_pb2.FileDescriptorSet() text_input_pb2.DESCRIPTOR.CopyToProto(file_descriptor_set.file.add()) return b'bytes://' + file_descriptor_set.SerializeToString()
Example #4
Source File: descriptor_set_tasks.py From artman with Apache License 2.0 | 5 votes |
def execute(self, descriptor_set): desc_set = desc.FileDescriptorSet() with open(descriptor_set, 'rb') as f: desc_set.ParseFromString(f.read()) for file_descriptor_proto in desc_set.file: if not file_descriptor_proto.source_code_info: continue for location in file_descriptor_proto.source_code_info.location: location.leading_comments = md2rst(location.leading_comments) location.trailing_comments = md2rst(location.trailing_comments) detached = [md2rst(c) for c in location.leading_detached_comments] del location.leading_detached_comments[:] location.leading_detached_comments.extend(detached) desc_file, desc_ext = os.path.splitext(descriptor_set) new_descriptor_set = desc_file + '_updated_py_docs' + desc_ext with open(new_descriptor_set, mode='wb') as f: f.write(desc_set.SerializeToString()) return new_descriptor_set
Example #5
Source File: analyzers.py From transform with Apache License 2.0 | 5 votes |
def _maybe_annotate_vocab_metadata(vocab_filename, unfiltered_vocabulary_size): """Annotates a bucketized tensor with the boundaries that were applied. Creates a deferred annotation for the specified tensor. Args: vocab_filename: The name of the vocabulary. unfiltered_vocabulary_size: A tf.int32 tensor containing the unfiltered vocab size. """ if not common.IS_ANNOTATIONS_PB_AVAILABLE: return from tensorflow_transform import annotations_pb2 # pylint: disable=g-import-not-at-top message_type = annotations_pb2.VocabularyMetadata.DESCRIPTOR.full_name unfiltered_vocabulary_size = tf.expand_dims(unfiltered_vocabulary_size, 0) file_name = tf.convert_to_tensor([vocab_filename]) descriptor_source = descriptor_pb2.FileDescriptorSet() annotations_pb2.VocabularyMetadata.DESCRIPTOR.file.CopyToProto( descriptor_source.file.add()) descriptor_source_str = b'bytes://' + descriptor_source.SerializeToString() message_proto = tf_utils._encode_proto( # pylint: disable=protected-access { 'unfiltered_vocabulary_size': unfiltered_vocabulary_size, 'file_name': file_name, }, message_type, descriptor_source=descriptor_source_str) assert message_proto.shape == [1] message_proto = message_proto[0] # Note: we annotate globally here (tied to a vocabulary by filename) rather # than attaching to a tensor, because this annotation is tied to an analysis # output not a final tensor produced by a mapper. type_url = os.path.join(common.ANNOTATION_PREFIX_URL, message_type) schema_inference.annotate(type_url, message_proto)
Example #6
Source File: service_test.py From luci-py with Apache License 2.0 | 5 votes |
def test(self): serv = service.Discovery() serv.add_service(TestService.DESCRIPTION) res = serv.Describe(None, None) self.assertEquals(res.services, ['discovery_test.TestService']) for f in res.description.file: self.assertTrue(f.HasField('source_code_info')) f.ClearField('source_code_info') expected_description = descriptor_pb2.FileDescriptorSet() text_format.Merge(EXPECTED_DESCRIPTION, expected_description) self.assertEqual(expected_description, res.description)
Example #7
Source File: service_test.py From luci-py with Apache License 2.0 | 5 votes |
def test(self): serv = service.Discovery() serv.add_service(TestService.DESCRIPTION) res = serv.Describe(None, None) self.assertEquals(res.services, ['discovery_test.TestService']) for f in res.description.file: self.assertTrue(f.HasField('source_code_info')) f.ClearField('source_code_info') expected_description = descriptor_pb2.FileDescriptorSet() text_format.Merge(EXPECTED_DESCRIPTION, expected_description) self.assertEqual(expected_description, res.description)
Example #8
Source File: service_test.py From luci-py with Apache License 2.0 | 5 votes |
def test(self): serv = service.Discovery() serv.add_service(TestService.DESCRIPTION) res = serv.Describe(None, None) self.assertEquals(res.services, ['discovery_test.TestService']) for f in res.description.file: self.assertTrue(f.HasField('source_code_info')) f.ClearField('source_code_info') expected_description = descriptor_pb2.FileDescriptorSet() text_format.Merge(EXPECTED_DESCRIPTION, expected_description) self.assertEqual(expected_description, res.description)
Example #9
Source File: service_test.py From luci-py with Apache License 2.0 | 5 votes |
def test(self): serv = service.Discovery() serv.add_service(TestService.DESCRIPTION) res = serv.Describe(None, None) self.assertEquals(res.services, ['discovery_test.TestService']) for f in res.description.file: self.assertTrue(f.HasField('source_code_info')) f.ClearField('source_code_info') expected_description = descriptor_pb2.FileDescriptorSet() text_format.Merge(EXPECTED_DESCRIPTION, expected_description) self.assertEqual(expected_description, res.description)
Example #10
Source File: service_test.py From luci-py with Apache License 2.0 | 5 votes |
def test(self): serv = service.Discovery() serv.add_service(TestService.DESCRIPTION) res = serv.Describe(None, None) self.assertEquals(res.services, ['discovery_test.TestService']) for f in res.description.file: self.assertTrue(f.HasField('source_code_info')) f.ClearField('source_code_info') expected_description = descriptor_pb2.FileDescriptorSet() text_format.Merge(EXPECTED_DESCRIPTION, expected_description) self.assertEqual(expected_description, res.description)
Example #11
Source File: descriptor_parser.py From voltha with Apache License 2.0 | 5 votes |
def load_meta_descriptor(self): """ Load the protobuf version of descriptor.proto to use it in decoding protobuf paths. """ fpath = os.path.abspath(os.path.join(os.path.dirname(__file__), 'descriptor.desc')) with open(fpath, 'r') as f: blob = f.read() proto = descriptor_pb2.FileDescriptorSet() proto.ParseFromString(blob) assert len(proto.file) == 1 return proto.file[0]
Example #12
Source File: descriptor_parser.py From voltha with Apache License 2.0 | 5 votes |
def load_meta_descriptor(self): """ Load the protobuf version of descriptor.proto to use it in decoding protobuf paths. """ fpath = os.path.abspath(os.path.join(os.path.dirname(__file__), 'descriptor.desc')) with open(fpath, 'r') as f: blob = f.read() proto = descriptor_pb2.FileDescriptorSet() proto.ParseFromString(blob) assert len(proto.file) == 1 return proto.file[0]